Contents of /trunk/kernel-alx/patches-4.19/0102-4.19.3-all-fixes.patch
Parent Directory | Revision Log
Revision 3381 -
(show annotations)
(download)
Fri Aug 2 11:47:03 2019 UTC (5 years, 1 month ago) by niro
File size: 220765 byte(s)
Fri Aug 2 11:47:03 2019 UTC (5 years, 1 month ago) by niro
File size: 220765 byte(s)
-linux-4.19.3
1 | diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt |
2 | index 51c136c821bf..eef7d9d259e8 100644 |
3 | --- a/Documentation/filesystems/overlayfs.txt |
4 | +++ b/Documentation/filesystems/overlayfs.txt |
5 | @@ -286,6 +286,12 @@ pointed by REDIRECT. This should not be possible on local system as setting |
6 | "trusted." xattrs will require CAP_SYS_ADMIN. But it should be possible |
7 | for untrusted layers like from a pen drive. |
8 | |
9 | +Note: redirect_dir={off|nofollow|follow(*)} conflicts with metacopy=on, and |
10 | +results in an error. |
11 | + |
12 | +(*) redirect_dir=follow only conflicts with metacopy=on if upperdir=... is |
13 | +given. |
14 | + |
15 | Sharing and copying layers |
16 | -------------------------- |
17 | |
18 | diff --git a/Makefile b/Makefile |
19 | index c8fe567f18ab..e4064fa16f11 100644 |
20 | --- a/Makefile |
21 | +++ b/Makefile |
22 | @@ -1,7 +1,7 @@ |
23 | # SPDX-License-Identifier: GPL-2.0 |
24 | VERSION = 4 |
25 | PATCHLEVEL = 19 |
26 | -SUBLEVEL = 2 |
27 | +SUBLEVEL = 3 |
28 | EXTRAVERSION = |
29 | NAME = "People's Front" |
30 | |
31 | diff --git a/arch/alpha/include/asm/termios.h b/arch/alpha/include/asm/termios.h |
32 | index 6a8c53dec57e..b7c77bb1bfd2 100644 |
33 | --- a/arch/alpha/include/asm/termios.h |
34 | +++ b/arch/alpha/include/asm/termios.h |
35 | @@ -73,9 +73,15 @@ |
36 | }) |
37 | |
38 | #define user_termios_to_kernel_termios(k, u) \ |
39 | - copy_from_user(k, u, sizeof(struct termios)) |
40 | + copy_from_user(k, u, sizeof(struct termios2)) |
41 | |
42 | #define kernel_termios_to_user_termios(u, k) \ |
43 | + copy_to_user(u, k, sizeof(struct termios2)) |
44 | + |
45 | +#define user_termios_to_kernel_termios_1(k, u) \ |
46 | + copy_from_user(k, u, sizeof(struct termios)) |
47 | + |
48 | +#define kernel_termios_to_user_termios_1(u, k) \ |
49 | copy_to_user(u, k, sizeof(struct termios)) |
50 | |
51 | #endif /* _ALPHA_TERMIOS_H */ |
52 | diff --git a/arch/alpha/include/uapi/asm/ioctls.h b/arch/alpha/include/uapi/asm/ioctls.h |
53 | index 3729d92d3fa8..dc8c20ac7191 100644 |
54 | --- a/arch/alpha/include/uapi/asm/ioctls.h |
55 | +++ b/arch/alpha/include/uapi/asm/ioctls.h |
56 | @@ -32,6 +32,11 @@ |
57 | #define TCXONC _IO('t', 30) |
58 | #define TCFLSH _IO('t', 31) |
59 | |
60 | +#define TCGETS2 _IOR('T', 42, struct termios2) |
61 | +#define TCSETS2 _IOW('T', 43, struct termios2) |
62 | +#define TCSETSW2 _IOW('T', 44, struct termios2) |
63 | +#define TCSETSF2 _IOW('T', 45, struct termios2) |
64 | + |
65 | #define TIOCSWINSZ _IOW('t', 103, struct winsize) |
66 | #define TIOCGWINSZ _IOR('t', 104, struct winsize) |
67 | #define TIOCSTART _IO('t', 110) /* start output, like ^Q */ |
68 | diff --git a/arch/alpha/include/uapi/asm/termbits.h b/arch/alpha/include/uapi/asm/termbits.h |
69 | index de6c8360fbe3..4575ba34a0ea 100644 |
70 | --- a/arch/alpha/include/uapi/asm/termbits.h |
71 | +++ b/arch/alpha/include/uapi/asm/termbits.h |
72 | @@ -26,6 +26,19 @@ struct termios { |
73 | speed_t c_ospeed; /* output speed */ |
74 | }; |
75 | |
76 | +/* Alpha has identical termios and termios2 */ |
77 | + |
78 | +struct termios2 { |
79 | + tcflag_t c_iflag; /* input mode flags */ |
80 | + tcflag_t c_oflag; /* output mode flags */ |
81 | + tcflag_t c_cflag; /* control mode flags */ |
82 | + tcflag_t c_lflag; /* local mode flags */ |
83 | + cc_t c_cc[NCCS]; /* control characters */ |
84 | + cc_t c_line; /* line discipline (== c_cc[19]) */ |
85 | + speed_t c_ispeed; /* input speed */ |
86 | + speed_t c_ospeed; /* output speed */ |
87 | +}; |
88 | + |
89 | /* Alpha has matching termios and ktermios */ |
90 | |
91 | struct ktermios { |
92 | @@ -152,6 +165,7 @@ struct ktermios { |
93 | #define B3000000 00034 |
94 | #define B3500000 00035 |
95 | #define B4000000 00036 |
96 | +#define BOTHER 00037 |
97 | |
98 | #define CSIZE 00001400 |
99 | #define CS5 00000000 |
100 | @@ -169,6 +183,9 @@ struct ktermios { |
101 | #define CMSPAR 010000000000 /* mark or space (stick) parity */ |
102 | #define CRTSCTS 020000000000 /* flow control */ |
103 | |
104 | +#define CIBAUD 07600000 |
105 | +#define IBSHIFT 16 |
106 | + |
107 | /* c_lflag bits */ |
108 | #define ISIG 0x00000080 |
109 | #define ICANON 0x00000100 |
110 | diff --git a/arch/arm/boot/dts/imx6ull-pinfunc.h b/arch/arm/boot/dts/imx6ull-pinfunc.h |
111 | index fdc46bb09cc1..3c12a6fb0b61 100644 |
112 | --- a/arch/arm/boot/dts/imx6ull-pinfunc.h |
113 | +++ b/arch/arm/boot/dts/imx6ull-pinfunc.h |
114 | @@ -14,14 +14,23 @@ |
115 | * The pin function ID is a tuple of |
116 | * <mux_reg conf_reg input_reg mux_mode input_val> |
117 | */ |
118 | +/* signals common for i.MX6UL and i.MX6ULL */ |
119 | +#undef MX6UL_PAD_UART5_TX_DATA__UART5_DTE_RX |
120 | +#define MX6UL_PAD_UART5_TX_DATA__UART5_DTE_RX 0x00BC 0x0348 0x0644 0x0 0x6 |
121 | +#undef MX6UL_PAD_UART5_RX_DATA__UART5_DCE_RX |
122 | +#define MX6UL_PAD_UART5_RX_DATA__UART5_DCE_RX 0x00C0 0x034C 0x0644 0x0 0x7 |
123 | +#undef MX6UL_PAD_ENET1_RX_EN__UART5_DCE_RTS |
124 | +#define MX6UL_PAD_ENET1_RX_EN__UART5_DCE_RTS 0x00CC 0x0358 0x0640 0x1 0x5 |
125 | +#undef MX6UL_PAD_ENET1_TX_DATA0__UART5_DTE_RTS |
126 | +#define MX6UL_PAD_ENET1_TX_DATA0__UART5_DTE_RTS 0x00D0 0x035C 0x0640 0x1 0x6 |
127 | +#undef MX6UL_PAD_CSI_DATA02__UART5_DCE_RTS |
128 | +#define MX6UL_PAD_CSI_DATA02__UART5_DCE_RTS 0x01EC 0x0478 0x0640 0x8 0x7 |
129 | + |
130 | +/* signals for i.MX6ULL only */ |
131 | #define MX6ULL_PAD_UART1_TX_DATA__UART5_DTE_RX 0x0084 0x0310 0x0644 0x9 0x4 |
132 | #define MX6ULL_PAD_UART1_RX_DATA__UART5_DCE_RX 0x0088 0x0314 0x0644 0x9 0x5 |
133 | #define MX6ULL_PAD_UART1_CTS_B__UART5_DCE_RTS 0x008C 0x0318 0x0640 0x9 0x3 |
134 | #define MX6ULL_PAD_UART1_RTS_B__UART5_DTE_RTS 0x0090 0x031C 0x0640 0x9 0x4 |
135 | -#define MX6ULL_PAD_UART5_TX_DATA__UART5_DTE_RX 0x00BC 0x0348 0x0644 0x0 0x6 |
136 | -#define MX6ULL_PAD_UART5_RX_DATA__UART5_DCE_RX 0x00C0 0x034C 0x0644 0x0 0x7 |
137 | -#define MX6ULL_PAD_ENET1_RX_EN__UART5_DCE_RTS 0x00CC 0x0358 0x0640 0x1 0x5 |
138 | -#define MX6ULL_PAD_ENET1_TX_DATA0__UART5_DTE_RTS 0x00D0 0x035C 0x0640 0x1 0x6 |
139 | #define MX6ULL_PAD_ENET2_RX_DATA0__EPDC_SDDO08 0x00E4 0x0370 0x0000 0x9 0x0 |
140 | #define MX6ULL_PAD_ENET2_RX_DATA1__EPDC_SDDO09 0x00E8 0x0374 0x0000 0x9 0x0 |
141 | #define MX6ULL_PAD_ENET2_RX_EN__EPDC_SDDO10 0x00EC 0x0378 0x0000 0x9 0x0 |
142 | @@ -55,7 +64,6 @@ |
143 | #define MX6ULL_PAD_CSI_DATA00__ESAI_TX_HF_CLK 0x01E4 0x0470 0x0000 0x9 0x0 |
144 | #define MX6ULL_PAD_CSI_DATA01__ESAI_RX_HF_CLK 0x01E8 0x0474 0x0000 0x9 0x0 |
145 | #define MX6ULL_PAD_CSI_DATA02__ESAI_RX_FS 0x01EC 0x0478 0x0000 0x9 0x0 |
146 | -#define MX6ULL_PAD_CSI_DATA02__UART5_DCE_RTS 0x01EC 0x0478 0x0640 0x8 0x7 |
147 | #define MX6ULL_PAD_CSI_DATA03__ESAI_RX_CLK 0x01F0 0x047C 0x0000 0x9 0x0 |
148 | #define MX6ULL_PAD_CSI_DATA04__ESAI_TX_FS 0x01F4 0x0480 0x0000 0x9 0x0 |
149 | #define MX6ULL_PAD_CSI_DATA05__ESAI_TX_CLK 0x01F8 0x0484 0x0000 0x9 0x0 |
150 | diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig |
151 | index 7eca43ff69bb..f4c2e993bba3 100644 |
152 | --- a/arch/arm/configs/imx_v6_v7_defconfig |
153 | +++ b/arch/arm/configs/imx_v6_v7_defconfig |
154 | @@ -409,6 +409,7 @@ CONFIG_ZISOFS=y |
155 | CONFIG_UDF_FS=m |
156 | CONFIG_MSDOS_FS=m |
157 | CONFIG_VFAT_FS=y |
158 | +CONFIG_TMPFS_POSIX_ACL=y |
159 | CONFIG_JFFS2_FS=y |
160 | CONFIG_UBIFS_FS=y |
161 | CONFIG_NFS_FS=y |
162 | diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S |
163 | index 6fe52819e014..339eb17c9808 100644 |
164 | --- a/arch/arm/mm/proc-v7.S |
165 | +++ b/arch/arm/mm/proc-v7.S |
166 | @@ -112,7 +112,7 @@ ENTRY(cpu_v7_hvc_switch_mm) |
167 | hvc #0 |
168 | ldmfd sp!, {r0 - r3} |
169 | b cpu_v7_switch_mm |
170 | -ENDPROC(cpu_v7_smc_switch_mm) |
171 | +ENDPROC(cpu_v7_hvc_switch_mm) |
172 | #endif |
173 | ENTRY(cpu_v7_iciallu_switch_mm) |
174 | mov r3, #0 |
175 | diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi |
176 | index bc6c141d7372..5089aa64088f 100644 |
177 | --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi |
178 | +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi |
179 | @@ -137,6 +137,9 @@ |
180 | reset-names = "stmmaceth", "stmmaceth-ocp"; |
181 | clocks = <&clkmgr STRATIX10_EMAC0_CLK>; |
182 | clock-names = "stmmaceth"; |
183 | + tx-fifo-depth = <16384>; |
184 | + rx-fifo-depth = <16384>; |
185 | + snps,multicast-filter-bins = <256>; |
186 | status = "disabled"; |
187 | }; |
188 | |
189 | @@ -150,6 +153,9 @@ |
190 | reset-names = "stmmaceth", "stmmaceth-ocp"; |
191 | clocks = <&clkmgr STRATIX10_EMAC1_CLK>; |
192 | clock-names = "stmmaceth"; |
193 | + tx-fifo-depth = <16384>; |
194 | + rx-fifo-depth = <16384>; |
195 | + snps,multicast-filter-bins = <256>; |
196 | status = "disabled"; |
197 | }; |
198 | |
199 | @@ -163,6 +169,9 @@ |
200 | reset-names = "stmmaceth", "stmmaceth-ocp"; |
201 | clocks = <&clkmgr STRATIX10_EMAC2_CLK>; |
202 | clock-names = "stmmaceth"; |
203 | + tx-fifo-depth = <16384>; |
204 | + rx-fifo-depth = <16384>; |
205 | + snps,multicast-filter-bins = <256>; |
206 | status = "disabled"; |
207 | }; |
208 | |
209 | diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts |
210 | index 6edc4fa9fd42..7c661753bfaf 100644 |
211 | --- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts |
212 | +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts |
213 | @@ -76,7 +76,7 @@ |
214 | phy-mode = "rgmii"; |
215 | phy-handle = <&phy0>; |
216 | |
217 | - max-frame-size = <3800>; |
218 | + max-frame-size = <9000>; |
219 | |
220 | mdio0 { |
221 | #address-cells = <1>; |
222 | diff --git a/arch/mips/include/asm/mach-loongson64/irq.h b/arch/mips/include/asm/mach-loongson64/irq.h |
223 | index 3644b68c0ccc..be9f727a9328 100644 |
224 | --- a/arch/mips/include/asm/mach-loongson64/irq.h |
225 | +++ b/arch/mips/include/asm/mach-loongson64/irq.h |
226 | @@ -10,7 +10,7 @@ |
227 | #define MIPS_CPU_IRQ_BASE 56 |
228 | |
229 | #define LOONGSON_UART_IRQ (MIPS_CPU_IRQ_BASE + 2) /* UART */ |
230 | -#define LOONGSON_HT1_IRQ (MIPS_CPU_IRQ_BASE + 3) /* HT1 */ |
231 | +#define LOONGSON_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 3) /* CASCADE */ |
232 | #define LOONGSON_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7) /* CPU Timer */ |
233 | |
234 | #define LOONGSON_HT1_CFG_BASE loongson_sysconf.ht_control_base |
235 | diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c |
236 | index d455363d51c3..4c07a43a3242 100644 |
237 | --- a/arch/mips/kernel/crash.c |
238 | +++ b/arch/mips/kernel/crash.c |
239 | @@ -36,6 +36,9 @@ static void crash_shutdown_secondary(void *passed_regs) |
240 | if (!cpu_online(cpu)) |
241 | return; |
242 | |
243 | + /* We won't be sent IPIs any more. */ |
244 | + set_cpu_online(cpu, false); |
245 | + |
246 | local_irq_disable(); |
247 | if (!cpumask_test_cpu(cpu, &cpus_in_crash)) |
248 | crash_save_cpu(regs, cpu); |
249 | diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c |
250 | index 8b574bcd39ba..4b3726e4fe3a 100644 |
251 | --- a/arch/mips/kernel/machine_kexec.c |
252 | +++ b/arch/mips/kernel/machine_kexec.c |
253 | @@ -118,6 +118,9 @@ machine_kexec(struct kimage *image) |
254 | *ptr = (unsigned long) phys_to_virt(*ptr); |
255 | } |
256 | |
257 | + /* Mark offline BEFORE disabling local irq. */ |
258 | + set_cpu_online(smp_processor_id(), false); |
259 | + |
260 | /* |
261 | * we do not want to be bothered. |
262 | */ |
263 | diff --git a/arch/mips/loongson64/loongson-3/irq.c b/arch/mips/loongson64/loongson-3/irq.c |
264 | index cbeb20f9fc95..5605061f5f98 100644 |
265 | --- a/arch/mips/loongson64/loongson-3/irq.c |
266 | +++ b/arch/mips/loongson64/loongson-3/irq.c |
267 | @@ -96,51 +96,8 @@ void mach_irq_dispatch(unsigned int pending) |
268 | } |
269 | } |
270 | |
271 | -static struct irqaction cascade_irqaction = { |
272 | - .handler = no_action, |
273 | - .flags = IRQF_NO_SUSPEND, |
274 | - .name = "cascade", |
275 | -}; |
276 | - |
277 | -static inline void mask_loongson_irq(struct irq_data *d) |
278 | -{ |
279 | - clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); |
280 | - irq_disable_hazard(); |
281 | - |
282 | - /* Workaround: UART IRQ may deliver to any core */ |
283 | - if (d->irq == LOONGSON_UART_IRQ) { |
284 | - int cpu = smp_processor_id(); |
285 | - int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node; |
286 | - int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node; |
287 | - u64 intenclr_addr = smp_group[node_id] | |
288 | - (u64)(&LOONGSON_INT_ROUTER_INTENCLR); |
289 | - u64 introuter_lpc_addr = smp_group[node_id] | |
290 | - (u64)(&LOONGSON_INT_ROUTER_LPC); |
291 | - |
292 | - *(volatile u32 *)intenclr_addr = 1 << 10; |
293 | - *(volatile u8 *)introuter_lpc_addr = 0x10 + (1<<core_id); |
294 | - } |
295 | -} |
296 | - |
297 | -static inline void unmask_loongson_irq(struct irq_data *d) |
298 | -{ |
299 | - /* Workaround: UART IRQ may deliver to any core */ |
300 | - if (d->irq == LOONGSON_UART_IRQ) { |
301 | - int cpu = smp_processor_id(); |
302 | - int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node; |
303 | - int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node; |
304 | - u64 intenset_addr = smp_group[node_id] | |
305 | - (u64)(&LOONGSON_INT_ROUTER_INTENSET); |
306 | - u64 introuter_lpc_addr = smp_group[node_id] | |
307 | - (u64)(&LOONGSON_INT_ROUTER_LPC); |
308 | - |
309 | - *(volatile u32 *)intenset_addr = 1 << 10; |
310 | - *(volatile u8 *)introuter_lpc_addr = 0x10 + (1<<core_id); |
311 | - } |
312 | - |
313 | - set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); |
314 | - irq_enable_hazard(); |
315 | -} |
316 | +static inline void mask_loongson_irq(struct irq_data *d) { } |
317 | +static inline void unmask_loongson_irq(struct irq_data *d) { } |
318 | |
319 | /* For MIPS IRQs which shared by all cores */ |
320 | static struct irq_chip loongson_irq_chip = { |
321 | @@ -183,12 +140,11 @@ void __init mach_init_irq(void) |
322 | chip->irq_set_affinity = plat_set_irq_affinity; |
323 | |
324 | irq_set_chip_and_handler(LOONGSON_UART_IRQ, |
325 | - &loongson_irq_chip, handle_level_irq); |
326 | - |
327 | - /* setup HT1 irq */ |
328 | - setup_irq(LOONGSON_HT1_IRQ, &cascade_irqaction); |
329 | + &loongson_irq_chip, handle_percpu_irq); |
330 | + irq_set_chip_and_handler(LOONGSON_BRIDGE_IRQ, |
331 | + &loongson_irq_chip, handle_percpu_irq); |
332 | |
333 | - set_c0_status(STATUSF_IP2 | STATUSF_IP6); |
334 | + set_c0_status(STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP6); |
335 | } |
336 | |
337 | #ifdef CONFIG_HOTPLUG_CPU |
338 | diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c |
339 | index f1e92bf743c2..3c3b1e6abb53 100644 |
340 | --- a/arch/mips/pci/pci-legacy.c |
341 | +++ b/arch/mips/pci/pci-legacy.c |
342 | @@ -127,8 +127,12 @@ static void pcibios_scanbus(struct pci_controller *hose) |
343 | if (pci_has_flag(PCI_PROBE_ONLY)) { |
344 | pci_bus_claim_resources(bus); |
345 | } else { |
346 | + struct pci_bus *child; |
347 | + |
348 | pci_bus_size_bridges(bus); |
349 | pci_bus_assign_resources(bus); |
350 | + list_for_each_entry(child, &bus->children, node) |
351 | + pcie_bus_configure_settings(child); |
352 | } |
353 | pci_bus_add_devices(bus); |
354 | } |
355 | diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile |
356 | index 11a1acba164a..d2824b0cc142 100644 |
357 | --- a/arch/powerpc/Makefile |
358 | +++ b/arch/powerpc/Makefile |
359 | @@ -238,7 +238,11 @@ cpu-as-$(CONFIG_4xx) += -Wa,-m405 |
360 | cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec) |
361 | cpu-as-$(CONFIG_E200) += -Wa,-me200 |
362 | cpu-as-$(CONFIG_E500) += -Wa,-me500 |
363 | -cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 |
364 | + |
365 | +# When using '-many -mpower4' gas will first try and find a matching power4 |
366 | +# mnemonic and failing that it will allow any valid mnemonic that GAS knows |
367 | +# about. GCC will pass -many to GAS when assembling, clang does not. |
368 | +cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 -Wa,-many |
369 | cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc) |
370 | |
371 | KBUILD_AFLAGS += $(cpu-as-y) |
372 | diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S |
373 | index dcf2f15e6797..32dfe6d083f3 100644 |
374 | --- a/arch/powerpc/boot/crt0.S |
375 | +++ b/arch/powerpc/boot/crt0.S |
376 | @@ -47,8 +47,10 @@ p_end: .long _end |
377 | p_pstack: .long _platform_stack_top |
378 | #endif |
379 | |
380 | - .weak _zimage_start |
381 | .globl _zimage_start |
382 | + /* Clang appears to require the .weak directive to be after the symbol |
383 | + * is defined. See https://bugs.llvm.org/show_bug.cgi?id=38921 */ |
384 | + .weak _zimage_start |
385 | _zimage_start: |
386 | .globl _zimage_start_lib |
387 | _zimage_start_lib: |
388 | diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h |
389 | index 4f547752ae79..193f53116c7a 100644 |
390 | --- a/arch/powerpc/include/asm/mmu-8xx.h |
391 | +++ b/arch/powerpc/include/asm/mmu-8xx.h |
392 | @@ -34,20 +34,12 @@ |
393 | * respectively NA for All or X for Supervisor and no access for User. |
394 | * Then we use the APG to say whether accesses are according to Page rules or |
395 | * "all Supervisor" rules (Access to all) |
396 | - * We also use the 2nd APG bit for _PAGE_ACCESSED when having SWAP: |
397 | - * When that bit is not set access is done iaw "all user" |
398 | - * which means no access iaw page rules. |
399 | - * Therefore, we define 4 APG groups. lsb is _PMD_USER, 2nd is _PAGE_ACCESSED |
400 | - * 0x => No access => 11 (all accesses performed as user iaw page definition) |
401 | - * 10 => No user => 01 (all accesses performed according to page definition) |
402 | - * 11 => User => 00 (all accesses performed as supervisor iaw page definition) |
403 | + * Therefore, we define 2 APG groups. lsb is _PMD_USER |
404 | + * 0 => No user => 01 (all accesses performed according to page definition) |
405 | + * 1 => User => 00 (all accesses performed as supervisor iaw page definition) |
406 | * We define all 16 groups so that all other bits of APG can take any value |
407 | */ |
408 | -#ifdef CONFIG_SWAP |
409 | -#define MI_APG_INIT 0xf4f4f4f4 |
410 | -#else |
411 | #define MI_APG_INIT 0x44444444 |
412 | -#endif |
413 | |
414 | /* The effective page number register. When read, contains the information |
415 | * about the last instruction TLB miss. When MI_RPN is written, bits in |
416 | @@ -115,20 +107,12 @@ |
417 | * Supervisor and no access for user and NA for ALL. |
418 | * Then we use the APG to say whether accesses are according to Page rules or |
419 | * "all Supervisor" rules (Access to all) |
420 | - * We also use the 2nd APG bit for _PAGE_ACCESSED when having SWAP: |
421 | - * When that bit is not set access is done iaw "all user" |
422 | - * which means no access iaw page rules. |
423 | - * Therefore, we define 4 APG groups. lsb is _PMD_USER, 2nd is _PAGE_ACCESSED |
424 | - * 0x => No access => 11 (all accesses performed as user iaw page definition) |
425 | - * 10 => No user => 01 (all accesses performed according to page definition) |
426 | - * 11 => User => 00 (all accesses performed as supervisor iaw page definition) |
427 | + * Therefore, we define 2 APG groups. lsb is _PMD_USER |
428 | + * 0 => No user => 01 (all accesses performed according to page definition) |
429 | + * 1 => User => 00 (all accesses performed as supervisor iaw page definition) |
430 | * We define all 16 groups so that all other bits of APG can take any value |
431 | */ |
432 | -#ifdef CONFIG_SWAP |
433 | -#define MD_APG_INIT 0xf4f4f4f4 |
434 | -#else |
435 | #define MD_APG_INIT 0x44444444 |
436 | -#endif |
437 | |
438 | /* The effective page number register. When read, contains the information |
439 | * about the last instruction TLB miss. When MD_RPN is written, bits in |
440 | @@ -180,12 +164,6 @@ |
441 | */ |
442 | #define SPRN_M_TW 799 |
443 | |
444 | -/* APGs */ |
445 | -#define M_APG0 0x00000000 |
446 | -#define M_APG1 0x00000020 |
447 | -#define M_APG2 0x00000040 |
448 | -#define M_APG3 0x00000060 |
449 | - |
450 | #ifdef CONFIG_PPC_MM_SLICES |
451 | #include <asm/nohash/32/slice.h> |
452 | #define SLICE_ARRAY_SIZE (1 << (32 - SLICE_LOW_SHIFT - 1)) |
453 | diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c |
454 | index 6ebba3e48b01..c72767a5327a 100644 |
455 | --- a/arch/powerpc/kernel/eeh.c |
456 | +++ b/arch/powerpc/kernel/eeh.c |
457 | @@ -169,6 +169,11 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) |
458 | int n = 0, l = 0; |
459 | char buffer[128]; |
460 | |
461 | + if (!pdn) { |
462 | + pr_warn("EEH: Note: No error log for absent device.\n"); |
463 | + return 0; |
464 | + } |
465 | + |
466 | n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n", |
467 | pdn->phb->global_number, pdn->busno, |
468 | PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); |
469 | diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S |
470 | index 6582f824d620..81d4574d1f37 100644 |
471 | --- a/arch/powerpc/kernel/head_8xx.S |
472 | +++ b/arch/powerpc/kernel/head_8xx.S |
473 | @@ -353,13 +353,14 @@ _ENTRY(ITLBMiss_cmp) |
474 | #if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE) |
475 | mtcr r12 |
476 | #endif |
477 | - |
478 | -#ifdef CONFIG_SWAP |
479 | - rlwinm r11, r10, 31, _PAGE_ACCESSED >> 1 |
480 | -#endif |
481 | /* Load the MI_TWC with the attributes for this "segment." */ |
482 | mtspr SPRN_MI_TWC, r11 /* Set segment attributes */ |
483 | |
484 | +#ifdef CONFIG_SWAP |
485 | + rlwinm r11, r10, 32-5, _PAGE_PRESENT |
486 | + and r11, r11, r10 |
487 | + rlwimi r10, r11, 0, _PAGE_PRESENT |
488 | +#endif |
489 | li r11, RPN_PATTERN | 0x200 |
490 | /* The Linux PTE won't go exactly into the MMU TLB. |
491 | * Software indicator bits 20 and 23 must be clear. |
492 | @@ -470,14 +471,22 @@ _ENTRY(DTLBMiss_jmp) |
493 | * above. |
494 | */ |
495 | rlwimi r11, r10, 0, _PAGE_GUARDED |
496 | -#ifdef CONFIG_SWAP |
497 | - /* _PAGE_ACCESSED has to be set. We use second APG bit for that, 0 |
498 | - * on that bit will represent a Non Access group |
499 | - */ |
500 | - rlwinm r11, r10, 31, _PAGE_ACCESSED >> 1 |
501 | -#endif |
502 | mtspr SPRN_MD_TWC, r11 |
503 | |
504 | + /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set. |
505 | + * We also need to know if the insn is a load/store, so: |
506 | + * Clear _PAGE_PRESENT and load that which will |
507 | + * trap into DTLB Error with store bit set accordinly. |
508 | + */ |
509 | + /* PRESENT=0x1, ACCESSED=0x20 |
510 | + * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5)); |
511 | + * r10 = (r10 & ~PRESENT) | r11; |
512 | + */ |
513 | +#ifdef CONFIG_SWAP |
514 | + rlwinm r11, r10, 32-5, _PAGE_PRESENT |
515 | + and r11, r11, r10 |
516 | + rlwimi r10, r11, 0, _PAGE_PRESENT |
517 | +#endif |
518 | /* The Linux PTE won't go exactly into the MMU TLB. |
519 | * Software indicator bits 24, 25, 26, and 27 must be |
520 | * set. All other Linux PTE bits control the behavior |
521 | @@ -637,8 +646,8 @@ InstructionBreakpoint: |
522 | */ |
523 | DTLBMissIMMR: |
524 | mtcr r12 |
525 | - /* Set 512k byte guarded page and mark it valid and accessed */ |
526 | - li r10, MD_PS512K | MD_GUARDED | MD_SVALID | M_APG2 |
527 | + /* Set 512k byte guarded page and mark it valid */ |
528 | + li r10, MD_PS512K | MD_GUARDED | MD_SVALID |
529 | mtspr SPRN_MD_TWC, r10 |
530 | mfspr r10, SPRN_IMMR /* Get current IMMR */ |
531 | rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */ |
532 | @@ -656,8 +665,8 @@ _ENTRY(dtlb_miss_exit_2) |
533 | |
534 | DTLBMissLinear: |
535 | mtcr r12 |
536 | - /* Set 8M byte page and mark it valid and accessed */ |
537 | - li r11, MD_PS8MEG | MD_SVALID | M_APG2 |
538 | + /* Set 8M byte page and mark it valid */ |
539 | + li r11, MD_PS8MEG | MD_SVALID |
540 | mtspr SPRN_MD_TWC, r11 |
541 | rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */ |
542 | ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \ |
543 | @@ -675,8 +684,8 @@ _ENTRY(dtlb_miss_exit_3) |
544 | #ifndef CONFIG_PIN_TLB_TEXT |
545 | ITLBMissLinear: |
546 | mtcr r12 |
547 | - /* Set 8M byte page and mark it valid,accessed */ |
548 | - li r11, MI_PS8MEG | MI_SVALID | M_APG2 |
549 | + /* Set 8M byte page and mark it valid */ |
550 | + li r11, MI_PS8MEG | MI_SVALID |
551 | mtspr SPRN_MI_TWC, r11 |
552 | rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */ |
553 | ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \ |
554 | @@ -960,7 +969,7 @@ initial_mmu: |
555 | ori r8, r8, MI_EVALID /* Mark it valid */ |
556 | mtspr SPRN_MI_EPN, r8 |
557 | li r8, MI_PS8MEG /* Set 8M byte page */ |
558 | - ori r8, r8, MI_SVALID | M_APG2 /* Make it valid, APG 2 */ |
559 | + ori r8, r8, MI_SVALID /* Make it valid */ |
560 | mtspr SPRN_MI_TWC, r8 |
561 | li r8, MI_BOOTINIT /* Create RPN for address 0 */ |
562 | mtspr SPRN_MI_RPN, r8 /* Store TLB entry */ |
563 | @@ -987,7 +996,7 @@ initial_mmu: |
564 | ori r8, r8, MD_EVALID /* Mark it valid */ |
565 | mtspr SPRN_MD_EPN, r8 |
566 | li r8, MD_PS512K | MD_GUARDED /* Set 512k byte page */ |
567 | - ori r8, r8, MD_SVALID | M_APG2 /* Make it valid and accessed */ |
568 | + ori r8, r8, MD_SVALID /* Make it valid */ |
569 | mtspr SPRN_MD_TWC, r8 |
570 | mr r8, r9 /* Create paddr for TLB */ |
571 | ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */ |
572 | diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c |
573 | index 2c53de9f3b6a..8661eea78503 100644 |
574 | --- a/arch/powerpc/kernel/module_64.c |
575 | +++ b/arch/powerpc/kernel/module_64.c |
576 | @@ -680,7 +680,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, |
577 | |
578 | case R_PPC64_REL32: |
579 | /* 32 bits relative (used by relative exception tables) */ |
580 | - *(u32 *)location = value - (unsigned long)location; |
581 | + /* Convert value to relative */ |
582 | + value -= (unsigned long)location; |
583 | + if (value + 0x80000000 > 0xffffffff) { |
584 | + pr_err("%s: REL32 %li out of range!\n", |
585 | + me->name, (long int)value); |
586 | + return -ENOEXEC; |
587 | + } |
588 | + *(u32 *)location = value; |
589 | break; |
590 | |
591 | case R_PPC64_TOCSAVE: |
592 | diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c |
593 | index c85adb858271..8689a02b7df8 100644 |
594 | --- a/arch/powerpc/kernel/traps.c |
595 | +++ b/arch/powerpc/kernel/traps.c |
596 | @@ -767,12 +767,17 @@ void machine_check_exception(struct pt_regs *regs) |
597 | if (check_io_access(regs)) |
598 | goto bail; |
599 | |
600 | - die("Machine check", regs, SIGBUS); |
601 | - |
602 | /* Must die if the interrupt is not recoverable */ |
603 | if (!(regs->msr & MSR_RI)) |
604 | nmi_panic(regs, "Unrecoverable Machine check"); |
605 | |
606 | + if (!nested) |
607 | + nmi_exit(); |
608 | + |
609 | + die("Machine check", regs, SIGBUS); |
610 | + |
611 | + return; |
612 | + |
613 | bail: |
614 | if (!nested) |
615 | nmi_exit(); |
616 | diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c |
617 | index cf77d755246d..5d53684c2ebd 100644 |
618 | --- a/arch/powerpc/mm/8xx_mmu.c |
619 | +++ b/arch/powerpc/mm/8xx_mmu.c |
620 | @@ -79,7 +79,7 @@ void __init MMU_init_hw(void) |
621 | for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) { |
622 | mtspr(SPRN_MD_CTR, ctr | (i << 8)); |
623 | mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID); |
624 | - mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID | M_APG2); |
625 | + mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID); |
626 | mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT); |
627 | addr += LARGE_PAGE_SIZE_8M; |
628 | mem -= LARGE_PAGE_SIZE_8M; |
629 | diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c |
630 | index 876e2a3c79f2..bdf33b989f98 100644 |
631 | --- a/arch/powerpc/mm/dump_linuxpagetables.c |
632 | +++ b/arch/powerpc/mm/dump_linuxpagetables.c |
633 | @@ -418,12 +418,13 @@ static void walk_pagetables(struct pg_state *st) |
634 | unsigned int i; |
635 | unsigned long addr; |
636 | |
637 | + addr = st->start_address; |
638 | + |
639 | /* |
640 | * Traverse the linux pagetable structure and dump pages that are in |
641 | * the hash pagetable. |
642 | */ |
643 | - for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { |
644 | - addr = KERN_VIRT_START + i * PGDIR_SIZE; |
645 | + for (i = 0; i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) { |
646 | if (!pgd_none(*pgd) && !pgd_huge(*pgd)) |
647 | /* pgd exists */ |
648 | walk_pud(st, pgd, addr); |
649 | @@ -472,9 +473,14 @@ static int ptdump_show(struct seq_file *m, void *v) |
650 | { |
651 | struct pg_state st = { |
652 | .seq = m, |
653 | - .start_address = KERN_VIRT_START, |
654 | .marker = address_markers, |
655 | }; |
656 | + |
657 | + if (radix_enabled()) |
658 | + st.start_address = PAGE_OFFSET; |
659 | + else |
660 | + st.start_address = KERN_VIRT_START; |
661 | + |
662 | /* Traverse kernel page tables */ |
663 | walk_pagetables(&st); |
664 | note_page(&st, 0, 0, 0); |
665 | diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c |
666 | index e87f9ef9115b..7296a42eb62e 100644 |
667 | --- a/arch/powerpc/mm/hugetlbpage.c |
668 | +++ b/arch/powerpc/mm/hugetlbpage.c |
669 | @@ -19,6 +19,7 @@ |
670 | #include <linux/moduleparam.h> |
671 | #include <linux/swap.h> |
672 | #include <linux/swapops.h> |
673 | +#include <linux/kmemleak.h> |
674 | #include <asm/pgtable.h> |
675 | #include <asm/pgalloc.h> |
676 | #include <asm/tlb.h> |
677 | @@ -112,6 +113,8 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, |
678 | for (i = i - 1 ; i >= 0; i--, hpdp--) |
679 | *hpdp = __hugepd(0); |
680 | kmem_cache_free(cachep, new); |
681 | + } else { |
682 | + kmemleak_ignore(new); |
683 | } |
684 | spin_unlock(ptl); |
685 | return 0; |
686 | diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c |
687 | index 205fe557ca10..4f213ba33491 100644 |
688 | --- a/arch/powerpc/mm/slice.c |
689 | +++ b/arch/powerpc/mm/slice.c |
690 | @@ -61,6 +61,13 @@ static void slice_print_mask(const char *label, const struct slice_mask *mask) { |
691 | |
692 | #endif |
693 | |
694 | +static inline bool slice_addr_is_low(unsigned long addr) |
695 | +{ |
696 | + u64 tmp = (u64)addr; |
697 | + |
698 | + return tmp < SLICE_LOW_TOP; |
699 | +} |
700 | + |
701 | static void slice_range_to_mask(unsigned long start, unsigned long len, |
702 | struct slice_mask *ret) |
703 | { |
704 | @@ -70,7 +77,7 @@ static void slice_range_to_mask(unsigned long start, unsigned long len, |
705 | if (SLICE_NUM_HIGH) |
706 | bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); |
707 | |
708 | - if (start < SLICE_LOW_TOP) { |
709 | + if (slice_addr_is_low(start)) { |
710 | unsigned long mend = min(end, |
711 | (unsigned long)(SLICE_LOW_TOP - 1)); |
712 | |
713 | @@ -78,7 +85,7 @@ static void slice_range_to_mask(unsigned long start, unsigned long len, |
714 | - (1u << GET_LOW_SLICE_INDEX(start)); |
715 | } |
716 | |
717 | - if ((start + len) > SLICE_LOW_TOP) { |
718 | + if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) { |
719 | unsigned long start_index = GET_HIGH_SLICE_INDEX(start); |
720 | unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT)); |
721 | unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index; |
722 | @@ -133,7 +140,7 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret, |
723 | if (!slice_low_has_vma(mm, i)) |
724 | ret->low_slices |= 1u << i; |
725 | |
726 | - if (high_limit <= SLICE_LOW_TOP) |
727 | + if (slice_addr_is_low(high_limit - 1)) |
728 | return; |
729 | |
730 | for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++) |
731 | @@ -182,7 +189,7 @@ static bool slice_check_range_fits(struct mm_struct *mm, |
732 | unsigned long end = start + len - 1; |
733 | u64 low_slices = 0; |
734 | |
735 | - if (start < SLICE_LOW_TOP) { |
736 | + if (slice_addr_is_low(start)) { |
737 | unsigned long mend = min(end, |
738 | (unsigned long)(SLICE_LOW_TOP - 1)); |
739 | |
740 | @@ -192,7 +199,7 @@ static bool slice_check_range_fits(struct mm_struct *mm, |
741 | if ((low_slices & available->low_slices) != low_slices) |
742 | return false; |
743 | |
744 | - if (SLICE_NUM_HIGH && ((start + len) > SLICE_LOW_TOP)) { |
745 | + if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) { |
746 | unsigned long start_index = GET_HIGH_SLICE_INDEX(start); |
747 | unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT)); |
748 | unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index; |
749 | @@ -303,7 +310,7 @@ static bool slice_scan_available(unsigned long addr, |
750 | int end, unsigned long *boundary_addr) |
751 | { |
752 | unsigned long slice; |
753 | - if (addr < SLICE_LOW_TOP) { |
754 | + if (slice_addr_is_low(addr)) { |
755 | slice = GET_LOW_SLICE_INDEX(addr); |
756 | *boundary_addr = (slice + end) << SLICE_LOW_SHIFT; |
757 | return !!(available->low_slices & (1u << slice)); |
758 | @@ -706,7 +713,7 @@ unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) |
759 | |
760 | VM_BUG_ON(radix_enabled()); |
761 | |
762 | - if (addr < SLICE_LOW_TOP) { |
763 | + if (slice_addr_is_low(addr)) { |
764 | psizes = mm->context.low_slices_psize; |
765 | index = GET_LOW_SLICE_INDEX(addr); |
766 | } else { |
767 | diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c |
768 | index 15fe5f0c8665..ae5d568e267f 100644 |
769 | --- a/arch/powerpc/mm/tlb_nohash.c |
770 | +++ b/arch/powerpc/mm/tlb_nohash.c |
771 | @@ -503,6 +503,9 @@ static void setup_page_sizes(void) |
772 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { |
773 | struct mmu_psize_def *def = &mmu_psize_defs[psize]; |
774 | |
775 | + if (!def->shift) |
776 | + continue; |
777 | + |
778 | if (tlb1ps & (1U << (def->shift - 10))) { |
779 | def->flags |= MMU_PAGE_SIZE_DIRECT; |
780 | |
781 | diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c |
782 | index 51dc398ae3f7..a29fdf8a2e56 100644 |
783 | --- a/arch/powerpc/platforms/powernv/memtrace.c |
784 | +++ b/arch/powerpc/platforms/powernv/memtrace.c |
785 | @@ -90,17 +90,15 @@ static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages) |
786 | walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE, |
787 | change_memblock_state); |
788 | |
789 | - lock_device_hotplug(); |
790 | - remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); |
791 | - unlock_device_hotplug(); |
792 | |
793 | return true; |
794 | } |
795 | |
796 | static u64 memtrace_alloc_node(u32 nid, u64 size) |
797 | { |
798 | - u64 start_pfn, end_pfn, nr_pages; |
799 | + u64 start_pfn, end_pfn, nr_pages, pfn; |
800 | u64 base_pfn; |
801 | + u64 bytes = memory_block_size_bytes(); |
802 | |
803 | if (!node_spanned_pages(nid)) |
804 | return 0; |
805 | @@ -113,8 +111,21 @@ static u64 memtrace_alloc_node(u32 nid, u64 size) |
806 | end_pfn = round_down(end_pfn - nr_pages, nr_pages); |
807 | |
808 | for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) { |
809 | - if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) |
810 | + if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) { |
811 | + /* |
812 | + * Remove memory in memory block size chunks so that |
813 | + * iomem resources are always split to the same size and |
814 | + * we never try to remove memory that spans two iomem |
815 | + * resources. |
816 | + */ |
817 | + lock_device_hotplug(); |
818 | + end_pfn = base_pfn + nr_pages; |
819 | + for (pfn = base_pfn; pfn < end_pfn; pfn += bytes>> PAGE_SHIFT) { |
820 | + remove_memory(nid, pfn << PAGE_SHIFT, bytes); |
821 | + } |
822 | + unlock_device_hotplug(); |
823 | return base_pfn << PAGE_SHIFT; |
824 | + } |
825 | } |
826 | |
827 | return 0; |
828 | diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h |
829 | index 3a17107594c8..eb786f90f2d3 100644 |
830 | --- a/arch/x86/include/asm/mce.h |
831 | +++ b/arch/x86/include/asm/mce.h |
832 | @@ -216,6 +216,8 @@ static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *s |
833 | |
834 | int mce_available(struct cpuinfo_x86 *c); |
835 | bool mce_is_memory_error(struct mce *m); |
836 | +bool mce_is_correctable(struct mce *m); |
837 | +int mce_usable_address(struct mce *m); |
838 | |
839 | DECLARE_PER_CPU(unsigned, mce_exception_count); |
840 | DECLARE_PER_CPU(unsigned, mce_poll_count); |
841 | diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c |
842 | index 953b3ce92dcc..cdbedeb3f3db 100644 |
843 | --- a/arch/x86/kernel/cpu/mcheck/mce.c |
844 | +++ b/arch/x86/kernel/cpu/mcheck/mce.c |
845 | @@ -485,7 +485,7 @@ static void mce_report_event(struct pt_regs *regs) |
846 | * be somewhat complicated (e.g. segment offset would require an instruction |
847 | * parser). So only support physical addresses up to page granuality for now. |
848 | */ |
849 | -static int mce_usable_address(struct mce *m) |
850 | +int mce_usable_address(struct mce *m) |
851 | { |
852 | if (!(m->status & MCI_STATUS_ADDRV)) |
853 | return 0; |
854 | @@ -505,6 +505,7 @@ static int mce_usable_address(struct mce *m) |
855 | |
856 | return 1; |
857 | } |
858 | +EXPORT_SYMBOL_GPL(mce_usable_address); |
859 | |
860 | bool mce_is_memory_error(struct mce *m) |
861 | { |
862 | @@ -534,7 +535,7 @@ bool mce_is_memory_error(struct mce *m) |
863 | } |
864 | EXPORT_SYMBOL_GPL(mce_is_memory_error); |
865 | |
866 | -static bool mce_is_correctable(struct mce *m) |
867 | +bool mce_is_correctable(struct mce *m) |
868 | { |
869 | if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) |
870 | return false; |
871 | @@ -544,6 +545,7 @@ static bool mce_is_correctable(struct mce *m) |
872 | |
873 | return true; |
874 | } |
875 | +EXPORT_SYMBOL_GPL(mce_is_correctable); |
876 | |
877 | static bool cec_add_mce(struct mce *m) |
878 | { |
879 | diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c |
880 | index ad12733f6058..852e74e48890 100644 |
881 | --- a/arch/x86/kernel/cpu/mshyperv.c |
882 | +++ b/arch/x86/kernel/cpu/mshyperv.c |
883 | @@ -20,6 +20,7 @@ |
884 | #include <linux/interrupt.h> |
885 | #include <linux/irq.h> |
886 | #include <linux/kexec.h> |
887 | +#include <linux/i8253.h> |
888 | #include <asm/processor.h> |
889 | #include <asm/hypervisor.h> |
890 | #include <asm/hyperv-tlfs.h> |
891 | @@ -285,6 +286,16 @@ static void __init ms_hyperv_init_platform(void) |
892 | if (efi_enabled(EFI_BOOT)) |
893 | x86_platform.get_nmi_reason = hv_get_nmi_reason; |
894 | |
895 | + /* |
896 | + * Hyper-V VMs have a PIT emulation quirk such that zeroing the |
897 | + * counter register during PIT shutdown restarts the PIT. So it |
898 | + * continues to interrupt @18.2 HZ. Setting i8253_clear_counter |
899 | + * to false tells pit_shutdown() not to zero the counter so that |
900 | + * the PIT really is shutdown. Generation 2 VMs don't have a PIT, |
901 | + * and setting this value has no effect. |
902 | + */ |
903 | + i8253_clear_counter_on_shutdown = false; |
904 | + |
905 | #if IS_ENABLED(CONFIG_HYPERV) |
906 | /* |
907 | * Setup the hook to get control post apic initialization. |
908 | diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c |
909 | index 8e005329648b..d805202c63cd 100644 |
910 | --- a/arch/x86/kernel/cpu/vmware.c |
911 | +++ b/arch/x86/kernel/cpu/vmware.c |
912 | @@ -77,7 +77,7 @@ static __init int setup_vmw_sched_clock(char *s) |
913 | } |
914 | early_param("no-vmw-sched-clock", setup_vmw_sched_clock); |
915 | |
916 | -static unsigned long long vmware_sched_clock(void) |
917 | +static unsigned long long notrace vmware_sched_clock(void) |
918 | { |
919 | unsigned long long ns; |
920 | |
921 | diff --git a/arch/x86/um/shared/sysdep/ptrace_32.h b/arch/x86/um/shared/sysdep/ptrace_32.h |
922 | index b94a108de1dc..ae00d22bce02 100644 |
923 | --- a/arch/x86/um/shared/sysdep/ptrace_32.h |
924 | +++ b/arch/x86/um/shared/sysdep/ptrace_32.h |
925 | @@ -10,20 +10,10 @@ |
926 | |
927 | static inline void update_debugregs(int seq) {} |
928 | |
929 | -/* syscall emulation path in ptrace */ |
930 | - |
931 | -#ifndef PTRACE_SYSEMU |
932 | -#define PTRACE_SYSEMU 31 |
933 | -#endif |
934 | - |
935 | void set_using_sysemu(int value); |
936 | int get_using_sysemu(void); |
937 | extern int sysemu_supported; |
938 | |
939 | -#ifndef PTRACE_SYSEMU_SINGLESTEP |
940 | -#define PTRACE_SYSEMU_SINGLESTEP 32 |
941 | -#endif |
942 | - |
943 | #define UPT_SYSCALL_ARG1(r) UPT_BX(r) |
944 | #define UPT_SYSCALL_ARG2(r) UPT_CX(r) |
945 | #define UPT_SYSCALL_ARG3(r) UPT_DX(r) |
946 | diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile |
947 | index dc9e0ba7122c..294846117fc2 100644 |
948 | --- a/arch/xtensa/boot/Makefile |
949 | +++ b/arch/xtensa/boot/Makefile |
950 | @@ -33,7 +33,7 @@ uImage: $(obj)/uImage |
951 | boot-elf boot-redboot: $(addprefix $(obj)/,$(subdir-y)) |
952 | $(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS) |
953 | |
954 | -OBJCOPYFLAGS = --strip-all -R .comment -R .note.gnu.build-id -O binary |
955 | +OBJCOPYFLAGS = --strip-all -R .comment -R .notes -O binary |
956 | |
957 | vmlinux.bin: vmlinux FORCE |
958 | $(call if_changed,objcopy) |
959 | diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h |
960 | index e4ccb88b7996..677bc76c1d70 100644 |
961 | --- a/arch/xtensa/include/asm/processor.h |
962 | +++ b/arch/xtensa/include/asm/processor.h |
963 | @@ -23,7 +23,11 @@ |
964 | # error Linux requires the Xtensa Windowed Registers Option. |
965 | #endif |
966 | |
967 | -#define ARCH_SLAB_MINALIGN XCHAL_DATA_WIDTH |
968 | +/* Xtensa ABI requires stack alignment to be at least 16 */ |
969 | + |
970 | +#define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16) |
971 | + |
972 | +#define ARCH_SLAB_MINALIGN STACK_ALIGN |
973 | |
974 | /* |
975 | * User space process size: 1 GB. |
976 | diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S |
977 | index 2f76118ecf62..9053a5622d2c 100644 |
978 | --- a/arch/xtensa/kernel/head.S |
979 | +++ b/arch/xtensa/kernel/head.S |
980 | @@ -88,9 +88,12 @@ _SetupMMU: |
981 | initialize_mmu |
982 | #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY |
983 | rsr a2, excsave1 |
984 | - movi a3, 0x08000000 |
985 | + movi a3, XCHAL_KSEG_PADDR |
986 | + bltu a2, a3, 1f |
987 | + sub a2, a2, a3 |
988 | + movi a3, XCHAL_KSEG_SIZE |
989 | bgeu a2, a3, 1f |
990 | - movi a3, 0xd0000000 |
991 | + movi a3, XCHAL_KSEG_CACHED_VADDR |
992 | add a2, a2, a3 |
993 | wsr a2, excsave1 |
994 | 1: |
995 | diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S |
996 | index a1c3edb8ad56..fa926995d2a3 100644 |
997 | --- a/arch/xtensa/kernel/vmlinux.lds.S |
998 | +++ b/arch/xtensa/kernel/vmlinux.lds.S |
999 | @@ -131,6 +131,7 @@ SECTIONS |
1000 | .fixup : { *(.fixup) } |
1001 | |
1002 | EXCEPTION_TABLE(16) |
1003 | + NOTES |
1004 | /* Data section */ |
1005 | |
1006 | _sdata = .; |
1007 | diff --git a/block/blk-core.c b/block/blk-core.c |
1008 | index cff0a60ee200..eb8b52241453 100644 |
1009 | --- a/block/blk-core.c |
1010 | +++ b/block/blk-core.c |
1011 | @@ -793,9 +793,8 @@ void blk_cleanup_queue(struct request_queue *q) |
1012 | * dispatch may still be in-progress since we dispatch requests |
1013 | * from more than one contexts. |
1014 | * |
1015 | - * No need to quiesce queue if it isn't initialized yet since |
1016 | - * blk_freeze_queue() should be enough for cases of passthrough |
1017 | - * request. |
1018 | + * We rely on driver to deal with the race in case that queue |
1019 | + * initialization isn't done. |
1020 | */ |
1021 | if (q->mq_ops && blk_queue_init_done(q)) |
1022 | blk_mq_quiesce_queue(q); |
1023 | diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c |
1024 | index 0e89b5457cab..ceeb2eaf28cf 100644 |
1025 | --- a/crypto/crypto_user.c |
1026 | +++ b/crypto/crypto_user.c |
1027 | @@ -83,7 +83,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) |
1028 | { |
1029 | struct crypto_report_cipher rcipher; |
1030 | |
1031 | - strlcpy(rcipher.type, "cipher", sizeof(rcipher.type)); |
1032 | + strncpy(rcipher.type, "cipher", sizeof(rcipher.type)); |
1033 | |
1034 | rcipher.blocksize = alg->cra_blocksize; |
1035 | rcipher.min_keysize = alg->cra_cipher.cia_min_keysize; |
1036 | @@ -102,7 +102,7 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) |
1037 | { |
1038 | struct crypto_report_comp rcomp; |
1039 | |
1040 | - strlcpy(rcomp.type, "compression", sizeof(rcomp.type)); |
1041 | + strncpy(rcomp.type, "compression", sizeof(rcomp.type)); |
1042 | if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, |
1043 | sizeof(struct crypto_report_comp), &rcomp)) |
1044 | goto nla_put_failure; |
1045 | @@ -116,7 +116,7 @@ static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg) |
1046 | { |
1047 | struct crypto_report_acomp racomp; |
1048 | |
1049 | - strlcpy(racomp.type, "acomp", sizeof(racomp.type)); |
1050 | + strncpy(racomp.type, "acomp", sizeof(racomp.type)); |
1051 | |
1052 | if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, |
1053 | sizeof(struct crypto_report_acomp), &racomp)) |
1054 | @@ -131,7 +131,7 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg) |
1055 | { |
1056 | struct crypto_report_akcipher rakcipher; |
1057 | |
1058 | - strlcpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); |
1059 | + strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); |
1060 | |
1061 | if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, |
1062 | sizeof(struct crypto_report_akcipher), &rakcipher)) |
1063 | @@ -146,7 +146,7 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg) |
1064 | { |
1065 | struct crypto_report_kpp rkpp; |
1066 | |
1067 | - strlcpy(rkpp.type, "kpp", sizeof(rkpp.type)); |
1068 | + strncpy(rkpp.type, "kpp", sizeof(rkpp.type)); |
1069 | |
1070 | if (nla_put(skb, CRYPTOCFGA_REPORT_KPP, |
1071 | sizeof(struct crypto_report_kpp), &rkpp)) |
1072 | @@ -160,10 +160,10 @@ nla_put_failure: |
1073 | static int crypto_report_one(struct crypto_alg *alg, |
1074 | struct crypto_user_alg *ualg, struct sk_buff *skb) |
1075 | { |
1076 | - strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); |
1077 | - strlcpy(ualg->cru_driver_name, alg->cra_driver_name, |
1078 | + strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); |
1079 | + strncpy(ualg->cru_driver_name, alg->cra_driver_name, |
1080 | sizeof(ualg->cru_driver_name)); |
1081 | - strlcpy(ualg->cru_module_name, module_name(alg->cra_module), |
1082 | + strncpy(ualg->cru_module_name, module_name(alg->cra_module), |
1083 | sizeof(ualg->cru_module_name)); |
1084 | |
1085 | ualg->cru_type = 0; |
1086 | @@ -176,7 +176,7 @@ static int crypto_report_one(struct crypto_alg *alg, |
1087 | if (alg->cra_flags & CRYPTO_ALG_LARVAL) { |
1088 | struct crypto_report_larval rl; |
1089 | |
1090 | - strlcpy(rl.type, "larval", sizeof(rl.type)); |
1091 | + strncpy(rl.type, "larval", sizeof(rl.type)); |
1092 | if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, |
1093 | sizeof(struct crypto_report_larval), &rl)) |
1094 | goto nla_put_failure; |
1095 | diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c |
1096 | index 78f9de260d5f..e9fb0bf3c8d2 100644 |
1097 | --- a/drivers/acpi/acpica/dsopcode.c |
1098 | +++ b/drivers/acpi/acpica/dsopcode.c |
1099 | @@ -417,10 +417,6 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state, |
1100 | ACPI_FORMAT_UINT64(obj_desc->region.address), |
1101 | obj_desc->region.length)); |
1102 | |
1103 | - status = acpi_ut_add_address_range(obj_desc->region.space_id, |
1104 | - obj_desc->region.address, |
1105 | - obj_desc->region.length, node); |
1106 | - |
1107 | /* Now the address and length are valid for this opregion */ |
1108 | |
1109 | obj_desc->region.flags |= AOPOBJ_DATA_VALID; |
1110 | diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c |
1111 | index 19b641208d86..c5d15752dfb3 100644 |
1112 | --- a/drivers/acpi/nfit/core.c |
1113 | +++ b/drivers/acpi/nfit/core.c |
1114 | @@ -2845,9 +2845,9 @@ static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc) |
1115 | return rc; |
1116 | |
1117 | if (ars_status_process_records(acpi_desc)) |
1118 | - return -ENOMEM; |
1119 | + dev_err(acpi_desc->dev, "Failed to process ARS records\n"); |
1120 | |
1121 | - return 0; |
1122 | + return rc; |
1123 | } |
1124 | |
1125 | static int ars_register(struct acpi_nfit_desc *acpi_desc, |
1126 | diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c |
1127 | index e9626bf6ca29..d6c1b10f6c25 100644 |
1128 | --- a/drivers/acpi/nfit/mce.c |
1129 | +++ b/drivers/acpi/nfit/mce.c |
1130 | @@ -25,8 +25,12 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, |
1131 | struct acpi_nfit_desc *acpi_desc; |
1132 | struct nfit_spa *nfit_spa; |
1133 | |
1134 | - /* We only care about memory errors */ |
1135 | - if (!mce_is_memory_error(mce)) |
1136 | + /* We only care about uncorrectable memory errors */ |
1137 | + if (!mce_is_memory_error(mce) || mce_is_correctable(mce)) |
1138 | + return NOTIFY_DONE; |
1139 | + |
1140 | + /* Verify the address reported in the MCE is valid. */ |
1141 | + if (!mce_usable_address(mce)) |
1142 | return NOTIFY_DONE; |
1143 | |
1144 | /* |
1145 | diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c |
1146 | index 6e594644cb1d..a7f5202a4815 100644 |
1147 | --- a/drivers/ata/libata-core.c |
1148 | +++ b/drivers/ata/libata-core.c |
1149 | @@ -4553,7 +4553,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { |
1150 | /* These specific Samsung models/firmware-revs do not handle LPM well */ |
1151 | { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, |
1152 | { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, }, |
1153 | - { "SAMSUNG MZ7TD256HAFV-000L9", "DXT02L5Q", ATA_HORKAGE_NOLPM, }, |
1154 | + { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, }, |
1155 | |
1156 | /* devices that don't properly handle queued TRIM commands */ |
1157 | { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | |
1158 | diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c |
1159 | index a1d6b5597c17..66921427d109 100644 |
1160 | --- a/drivers/block/zram/zram_drv.c |
1161 | +++ b/drivers/block/zram/zram_drv.c |
1162 | @@ -1636,6 +1636,11 @@ static const struct attribute_group zram_disk_attr_group = { |
1163 | .attrs = zram_disk_attrs, |
1164 | }; |
1165 | |
1166 | +static const struct attribute_group *zram_disk_attr_groups[] = { |
1167 | + &zram_disk_attr_group, |
1168 | + NULL, |
1169 | +}; |
1170 | + |
1171 | /* |
1172 | * Allocate and initialize new zram device. the function returns |
1173 | * '>= 0' device_id upon success, and negative value otherwise. |
1174 | @@ -1716,24 +1721,15 @@ static int zram_add(void) |
1175 | |
1176 | zram->disk->queue->backing_dev_info->capabilities |= |
1177 | (BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO); |
1178 | + disk_to_dev(zram->disk)->groups = zram_disk_attr_groups; |
1179 | add_disk(zram->disk); |
1180 | |
1181 | - ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj, |
1182 | - &zram_disk_attr_group); |
1183 | - if (ret < 0) { |
1184 | - pr_err("Error creating sysfs group for device %d\n", |
1185 | - device_id); |
1186 | - goto out_free_disk; |
1187 | - } |
1188 | strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor)); |
1189 | |
1190 | zram_debugfs_register(zram); |
1191 | pr_info("Added device: %s\n", zram->disk->disk_name); |
1192 | return device_id; |
1193 | |
1194 | -out_free_disk: |
1195 | - del_gendisk(zram->disk); |
1196 | - put_disk(zram->disk); |
1197 | out_free_queue: |
1198 | blk_cleanup_queue(queue); |
1199 | out_free_idr: |
1200 | @@ -1762,16 +1758,6 @@ static int zram_remove(struct zram *zram) |
1201 | mutex_unlock(&bdev->bd_mutex); |
1202 | |
1203 | zram_debugfs_unregister(zram); |
1204 | - /* |
1205 | - * Remove sysfs first, so no one will perform a disksize |
1206 | - * store while we destroy the devices. This also helps during |
1207 | - * hot_remove -- zram_reset_device() is the last holder of |
1208 | - * ->init_lock, no later/concurrent disksize_store() or any |
1209 | - * other sysfs handlers are possible. |
1210 | - */ |
1211 | - sysfs_remove_group(&disk_to_dev(zram->disk)->kobj, |
1212 | - &zram_disk_attr_group); |
1213 | - |
1214 | /* Make sure all the pending I/O are finished */ |
1215 | fsync_bdev(bdev); |
1216 | zram_reset_device(zram); |
1217 | diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c |
1218 | index a5d5a96479bf..10802d1fc554 100644 |
1219 | --- a/drivers/cdrom/cdrom.c |
1220 | +++ b/drivers/cdrom/cdrom.c |
1221 | @@ -2445,7 +2445,7 @@ static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi, |
1222 | return -ENOSYS; |
1223 | |
1224 | if (arg != CDSL_CURRENT && arg != CDSL_NONE) { |
1225 | - if ((int)arg >= cdi->capacity) |
1226 | + if (arg >= cdi->capacity) |
1227 | return -EINVAL; |
1228 | } |
1229 | |
1230 | diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c |
1231 | index 72b6091eb7b9..dc7fbc796cb6 100644 |
1232 | --- a/drivers/clk/at91/clk-pll.c |
1233 | +++ b/drivers/clk/at91/clk-pll.c |
1234 | @@ -133,6 +133,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, |
1235 | { |
1236 | struct clk_pll *pll = to_clk_pll(hw); |
1237 | |
1238 | + if (!pll->div || !pll->mul) |
1239 | + return 0; |
1240 | + |
1241 | return (parent_rate / pll->div) * (pll->mul + 1); |
1242 | } |
1243 | |
1244 | diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c |
1245 | index d44e0eea31ec..0934d3724495 100644 |
1246 | --- a/drivers/clk/clk-s2mps11.c |
1247 | +++ b/drivers/clk/clk-s2mps11.c |
1248 | @@ -245,6 +245,36 @@ static const struct platform_device_id s2mps11_clk_id[] = { |
1249 | }; |
1250 | MODULE_DEVICE_TABLE(platform, s2mps11_clk_id); |
1251 | |
1252 | +#ifdef CONFIG_OF |
1253 | +/* |
1254 | + * Device is instantiated through parent MFD device and device matching is done |
1255 | + * through platform_device_id. |
1256 | + * |
1257 | + * However if device's DT node contains proper clock compatible and driver is |
1258 | + * built as a module, then the *module* matching will be done trough DT aliases. |
1259 | + * This requires of_device_id table. In the same time this will not change the |
1260 | + * actual *device* matching so do not add .of_match_table. |
1261 | + */ |
1262 | +static const struct of_device_id s2mps11_dt_match[] = { |
1263 | + { |
1264 | + .compatible = "samsung,s2mps11-clk", |
1265 | + .data = (void *)S2MPS11X, |
1266 | + }, { |
1267 | + .compatible = "samsung,s2mps13-clk", |
1268 | + .data = (void *)S2MPS13X, |
1269 | + }, { |
1270 | + .compatible = "samsung,s2mps14-clk", |
1271 | + .data = (void *)S2MPS14X, |
1272 | + }, { |
1273 | + .compatible = "samsung,s5m8767-clk", |
1274 | + .data = (void *)S5M8767X, |
1275 | + }, { |
1276 | + /* Sentinel */ |
1277 | + }, |
1278 | +}; |
1279 | +MODULE_DEVICE_TABLE(of, s2mps11_dt_match); |
1280 | +#endif |
1281 | + |
1282 | static struct platform_driver s2mps11_clk_driver = { |
1283 | .driver = { |
1284 | .name = "s2mps11-clk", |
1285 | diff --git a/drivers/clk/hisilicon/reset.c b/drivers/clk/hisilicon/reset.c |
1286 | index 2a5015c736ce..43e82fa64422 100644 |
1287 | --- a/drivers/clk/hisilicon/reset.c |
1288 | +++ b/drivers/clk/hisilicon/reset.c |
1289 | @@ -109,9 +109,8 @@ struct hisi_reset_controller *hisi_reset_init(struct platform_device *pdev) |
1290 | return NULL; |
1291 | |
1292 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1293 | - rstc->membase = devm_ioremap(&pdev->dev, |
1294 | - res->start, resource_size(res)); |
1295 | - if (!rstc->membase) |
1296 | + rstc->membase = devm_ioremap_resource(&pdev->dev, res); |
1297 | + if (IS_ERR(rstc->membase)) |
1298 | return NULL; |
1299 | |
1300 | spin_lock_init(&rstc->lock); |
1301 | diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c |
1302 | index 00ce62ad6416..8cf74fc423e6 100644 |
1303 | --- a/drivers/clk/meson/axg.c |
1304 | +++ b/drivers/clk/meson/axg.c |
1305 | @@ -319,6 +319,7 @@ static struct clk_regmap axg_fclk_div2 = { |
1306 | .ops = &clk_regmap_gate_ops, |
1307 | .parent_names = (const char *[]){ "fclk_div2_div" }, |
1308 | .num_parents = 1, |
1309 | + .flags = CLK_IS_CRITICAL, |
1310 | }, |
1311 | }; |
1312 | |
1313 | @@ -343,6 +344,18 @@ static struct clk_regmap axg_fclk_div3 = { |
1314 | .ops = &clk_regmap_gate_ops, |
1315 | .parent_names = (const char *[]){ "fclk_div3_div" }, |
1316 | .num_parents = 1, |
1317 | + /* |
1318 | + * FIXME: |
1319 | + * This clock, as fdiv2, is used by the SCPI FW and is required |
1320 | + * by the platform to operate correctly. |
1321 | + * Until the following condition are met, we need this clock to |
1322 | + * be marked as critical: |
1323 | + * a) The SCPI generic driver claims and enable all the clocks |
1324 | + * it needs |
1325 | + * b) CCF has a clock hand-off mechanism to make the sure the |
1326 | + * clock stays on until the proper driver comes along |
1327 | + */ |
1328 | + .flags = CLK_IS_CRITICAL, |
1329 | }, |
1330 | }; |
1331 | |
1332 | diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c |
1333 | index 86d3ae58e84c..6435d86118f1 100644 |
1334 | --- a/drivers/clk/meson/gxbb.c |
1335 | +++ b/drivers/clk/meson/gxbb.c |
1336 | @@ -522,6 +522,18 @@ static struct clk_regmap gxbb_fclk_div3 = { |
1337 | .ops = &clk_regmap_gate_ops, |
1338 | .parent_names = (const char *[]){ "fclk_div3_div" }, |
1339 | .num_parents = 1, |
1340 | + /* |
1341 | + * FIXME: |
1342 | + * This clock, as fdiv2, is used by the SCPI FW and is required |
1343 | + * by the platform to operate correctly. |
1344 | + * Until the following condition are met, we need this clock to |
1345 | + * be marked as critical: |
1346 | + * a) The SCPI generic driver claims and enable all the clocks |
1347 | + * it needs |
1348 | + * b) CCF has a clock hand-off mechanism to make the sure the |
1349 | + * clock stays on until the proper driver comes along |
1350 | + */ |
1351 | + .flags = CLK_IS_CRITICAL, |
1352 | }, |
1353 | }; |
1354 | |
1355 | diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c |
1356 | index e8075359366b..ebce5260068b 100644 |
1357 | --- a/drivers/clk/rockchip/clk-ddr.c |
1358 | +++ b/drivers/clk/rockchip/clk-ddr.c |
1359 | @@ -80,16 +80,12 @@ static long rockchip_ddrclk_sip_round_rate(struct clk_hw *hw, |
1360 | static u8 rockchip_ddrclk_get_parent(struct clk_hw *hw) |
1361 | { |
1362 | struct rockchip_ddrclk *ddrclk = to_rockchip_ddrclk_hw(hw); |
1363 | - int num_parents = clk_hw_get_num_parents(hw); |
1364 | u32 val; |
1365 | |
1366 | val = clk_readl(ddrclk->reg_base + |
1367 | ddrclk->mux_offset) >> ddrclk->mux_shift; |
1368 | val &= GENMASK(ddrclk->mux_width - 1, 0); |
1369 | |
1370 | - if (val >= num_parents) |
1371 | - return -EINVAL; |
1372 | - |
1373 | return val; |
1374 | } |
1375 | |
1376 | diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c |
1377 | index 252366a5231f..2c5426607790 100644 |
1378 | --- a/drivers/clk/rockchip/clk-rk3328.c |
1379 | +++ b/drivers/clk/rockchip/clk-rk3328.c |
1380 | @@ -813,22 +813,22 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = { |
1381 | MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "clk_sdmmc", |
1382 | RK3328_SDMMC_CON0, 1), |
1383 | MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "clk_sdmmc", |
1384 | - RK3328_SDMMC_CON1, 1), |
1385 | + RK3328_SDMMC_CON1, 0), |
1386 | |
1387 | MMC(SCLK_SDIO_DRV, "sdio_drv", "clk_sdio", |
1388 | RK3328_SDIO_CON0, 1), |
1389 | MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "clk_sdio", |
1390 | - RK3328_SDIO_CON1, 1), |
1391 | + RK3328_SDIO_CON1, 0), |
1392 | |
1393 | MMC(SCLK_EMMC_DRV, "emmc_drv", "clk_emmc", |
1394 | RK3328_EMMC_CON0, 1), |
1395 | MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "clk_emmc", |
1396 | - RK3328_EMMC_CON1, 1), |
1397 | + RK3328_EMMC_CON1, 0), |
1398 | |
1399 | MMC(SCLK_SDMMC_EXT_DRV, "sdmmc_ext_drv", "clk_sdmmc_ext", |
1400 | RK3328_SDMMC_EXT_CON0, 1), |
1401 | MMC(SCLK_SDMMC_EXT_SAMPLE, "sdmmc_ext_sample", "clk_sdmmc_ext", |
1402 | - RK3328_SDMMC_EXT_CON1, 1), |
1403 | + RK3328_SDMMC_EXT_CON1, 0), |
1404 | }; |
1405 | |
1406 | static const char *const rk3328_critical_clocks[] __initconst = { |
1407 | diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c |
1408 | index bdbfe78fe133..0f7a0ffd3f70 100644 |
1409 | --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c |
1410 | +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c |
1411 | @@ -224,7 +224,7 @@ static SUNXI_CCU_MP_WITH_MUX(psi_ahb1_ahb2_clk, "psi-ahb1-ahb2", |
1412 | psi_ahb1_ahb2_parents, |
1413 | 0x510, |
1414 | 0, 5, /* M */ |
1415 | - 16, 2, /* P */ |
1416 | + 8, 2, /* P */ |
1417 | 24, 2, /* mux */ |
1418 | 0); |
1419 | |
1420 | @@ -233,19 +233,19 @@ static const char * const ahb3_apb1_apb2_parents[] = { "osc24M", "osc32k", |
1421 | "pll-periph0" }; |
1422 | static SUNXI_CCU_MP_WITH_MUX(ahb3_clk, "ahb3", ahb3_apb1_apb2_parents, 0x51c, |
1423 | 0, 5, /* M */ |
1424 | - 16, 2, /* P */ |
1425 | + 8, 2, /* P */ |
1426 | 24, 2, /* mux */ |
1427 | 0); |
1428 | |
1429 | static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", ahb3_apb1_apb2_parents, 0x520, |
1430 | 0, 5, /* M */ |
1431 | - 16, 2, /* P */ |
1432 | + 8, 2, /* P */ |
1433 | 24, 2, /* mux */ |
1434 | 0); |
1435 | |
1436 | static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", ahb3_apb1_apb2_parents, 0x524, |
1437 | 0, 5, /* M */ |
1438 | - 16, 2, /* P */ |
1439 | + 8, 2, /* P */ |
1440 | 24, 2, /* mux */ |
1441 | 0); |
1442 | |
1443 | diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c |
1444 | index 9c38895542f4..d4350bb10b83 100644 |
1445 | --- a/drivers/clocksource/i8253.c |
1446 | +++ b/drivers/clocksource/i8253.c |
1447 | @@ -20,6 +20,13 @@ |
1448 | DEFINE_RAW_SPINLOCK(i8253_lock); |
1449 | EXPORT_SYMBOL(i8253_lock); |
1450 | |
1451 | +/* |
1452 | + * Handle PIT quirk in pit_shutdown() where zeroing the counter register |
1453 | + * restarts the PIT, negating the shutdown. On platforms with the quirk, |
1454 | + * platform specific code can set this to false. |
1455 | + */ |
1456 | +bool i8253_clear_counter_on_shutdown __ro_after_init = true; |
1457 | + |
1458 | #ifdef CONFIG_CLKSRC_I8253 |
1459 | /* |
1460 | * Since the PIT overflows every tick, its not very useful |
1461 | @@ -109,8 +116,11 @@ static int pit_shutdown(struct clock_event_device *evt) |
1462 | raw_spin_lock(&i8253_lock); |
1463 | |
1464 | outb_p(0x30, PIT_MODE); |
1465 | - outb_p(0, PIT_CH0); |
1466 | - outb_p(0, PIT_CH0); |
1467 | + |
1468 | + if (i8253_clear_counter_on_shutdown) { |
1469 | + outb_p(0, PIT_CH0); |
1470 | + outb_p(0, PIT_CH0); |
1471 | + } |
1472 | |
1473 | raw_spin_unlock(&i8253_lock); |
1474 | return 0; |
1475 | diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c |
1476 | index 073557f433eb..df564d783216 100644 |
1477 | --- a/drivers/cpuidle/cpuidle-arm.c |
1478 | +++ b/drivers/cpuidle/cpuidle-arm.c |
1479 | @@ -103,13 +103,6 @@ static int __init arm_idle_init_cpu(int cpu) |
1480 | goto out_kfree_drv; |
1481 | } |
1482 | |
1483 | - ret = cpuidle_register_driver(drv); |
1484 | - if (ret) { |
1485 | - if (ret != -EBUSY) |
1486 | - pr_err("Failed to register cpuidle driver\n"); |
1487 | - goto out_kfree_drv; |
1488 | - } |
1489 | - |
1490 | /* |
1491 | * Call arch CPU operations in order to initialize |
1492 | * idle states suspend back-end specific data |
1493 | @@ -117,15 +110,20 @@ static int __init arm_idle_init_cpu(int cpu) |
1494 | ret = arm_cpuidle_init(cpu); |
1495 | |
1496 | /* |
1497 | - * Skip the cpuidle device initialization if the reported |
1498 | + * Allow the initialization to continue for other CPUs, if the reported |
1499 | * failure is a HW misconfiguration/breakage (-ENXIO). |
1500 | */ |
1501 | - if (ret == -ENXIO) |
1502 | - return 0; |
1503 | - |
1504 | if (ret) { |
1505 | pr_err("CPU %d failed to init idle CPU ops\n", cpu); |
1506 | - goto out_unregister_drv; |
1507 | + ret = ret == -ENXIO ? 0 : ret; |
1508 | + goto out_kfree_drv; |
1509 | + } |
1510 | + |
1511 | + ret = cpuidle_register_driver(drv); |
1512 | + if (ret) { |
1513 | + if (ret != -EBUSY) |
1514 | + pr_err("Failed to register cpuidle driver\n"); |
1515 | + goto out_kfree_drv; |
1516 | } |
1517 | |
1518 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
1519 | diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c |
1520 | index f7d6d690116e..cdc4f9a171d9 100644 |
1521 | --- a/drivers/crypto/hisilicon/sec/sec_algs.c |
1522 | +++ b/drivers/crypto/hisilicon/sec/sec_algs.c |
1523 | @@ -732,6 +732,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, |
1524 | int *splits_in_nents; |
1525 | int *splits_out_nents = NULL; |
1526 | struct sec_request_el *el, *temp; |
1527 | + bool split = skreq->src != skreq->dst; |
1528 | |
1529 | mutex_init(&sec_req->lock); |
1530 | sec_req->req_base = &skreq->base; |
1531 | @@ -750,7 +751,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, |
1532 | if (ret) |
1533 | goto err_free_split_sizes; |
1534 | |
1535 | - if (skreq->src != skreq->dst) { |
1536 | + if (split) { |
1537 | sec_req->len_out = sg_nents(skreq->dst); |
1538 | ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps, |
1539 | &splits_out, &splits_out_nents, |
1540 | @@ -785,8 +786,9 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, |
1541 | split_sizes[i], |
1542 | skreq->src != skreq->dst, |
1543 | splits_in[i], splits_in_nents[i], |
1544 | - splits_out[i], |
1545 | - splits_out_nents[i], info); |
1546 | + split ? splits_out[i] : NULL, |
1547 | + split ? splits_out_nents[i] : 0, |
1548 | + info); |
1549 | if (IS_ERR(el)) { |
1550 | ret = PTR_ERR(el); |
1551 | goto err_free_elements; |
1552 | @@ -806,13 +808,6 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, |
1553 | * more refined but this is unlikely to happen so no need. |
1554 | */ |
1555 | |
1556 | - /* Cleanup - all elements in pointer arrays have been coppied */ |
1557 | - kfree(splits_in_nents); |
1558 | - kfree(splits_in); |
1559 | - kfree(splits_out_nents); |
1560 | - kfree(splits_out); |
1561 | - kfree(split_sizes); |
1562 | - |
1563 | /* Grab a big lock for a long time to avoid concurrency issues */ |
1564 | mutex_lock(&queue->queuelock); |
1565 | |
1566 | @@ -827,13 +822,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, |
1567 | (!queue->havesoftqueue || |
1568 | kfifo_avail(&queue->softqueue) > steps)) || |
1569 | !list_empty(&ctx->backlog)) { |
1570 | + ret = -EBUSY; |
1571 | if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { |
1572 | list_add_tail(&sec_req->backlog_head, &ctx->backlog); |
1573 | mutex_unlock(&queue->queuelock); |
1574 | - return -EBUSY; |
1575 | + goto out; |
1576 | } |
1577 | |
1578 | - ret = -EBUSY; |
1579 | mutex_unlock(&queue->queuelock); |
1580 | goto err_free_elements; |
1581 | } |
1582 | @@ -842,7 +837,15 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, |
1583 | if (ret) |
1584 | goto err_free_elements; |
1585 | |
1586 | - return -EINPROGRESS; |
1587 | + ret = -EINPROGRESS; |
1588 | +out: |
1589 | + /* Cleanup - all elements in pointer arrays have been copied */ |
1590 | + kfree(splits_in_nents); |
1591 | + kfree(splits_in); |
1592 | + kfree(splits_out_nents); |
1593 | + kfree(splits_out); |
1594 | + kfree(split_sizes); |
1595 | + return ret; |
1596 | |
1597 | err_free_elements: |
1598 | list_for_each_entry_safe(el, temp, &sec_req->elements, head) { |
1599 | @@ -854,7 +857,7 @@ err_free_elements: |
1600 | crypto_skcipher_ivsize(atfm), |
1601 | DMA_BIDIRECTIONAL); |
1602 | err_unmap_out_sg: |
1603 | - if (skreq->src != skreq->dst) |
1604 | + if (split) |
1605 | sec_unmap_sg_on_err(skreq->dst, steps, splits_out, |
1606 | splits_out_nents, sec_req->len_out, |
1607 | info->dev); |
1608 | diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c |
1609 | index 8830fa601e45..0c0d2312f4a8 100644 |
1610 | --- a/drivers/firmware/efi/libstub/fdt.c |
1611 | +++ b/drivers/firmware/efi/libstub/fdt.c |
1612 | @@ -158,6 +158,10 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, |
1613 | return efi_status; |
1614 | } |
1615 | } |
1616 | + |
1617 | + /* shrink the FDT back to its minimum size */ |
1618 | + fdt_pack(fdt); |
1619 | + |
1620 | return EFI_SUCCESS; |
1621 | |
1622 | fdt_set_fail: |
1623 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c |
1624 | index 353993218f21..f008804f0b97 100644 |
1625 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c |
1626 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c |
1627 | @@ -358,7 +358,9 @@ out: |
1628 | * |
1629 | * Checks the acpi event and if it matches an atif event, |
1630 | * handles it. |
1631 | - * Returns NOTIFY code |
1632 | + * |
1633 | + * Returns: |
1634 | + * NOTIFY_BAD or NOTIFY_DONE, depending on the event. |
1635 | */ |
1636 | static int amdgpu_atif_handler(struct amdgpu_device *adev, |
1637 | struct acpi_bus_event *event) |
1638 | @@ -372,11 +374,16 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, |
1639 | if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) |
1640 | return NOTIFY_DONE; |
1641 | |
1642 | + /* Is this actually our event? */ |
1643 | if (!atif || |
1644 | !atif->notification_cfg.enabled || |
1645 | - event->type != atif->notification_cfg.command_code) |
1646 | - /* Not our event */ |
1647 | - return NOTIFY_DONE; |
1648 | + event->type != atif->notification_cfg.command_code) { |
1649 | + /* These events will generate keypresses otherwise */ |
1650 | + if (event->type == ACPI_VIDEO_NOTIFY_PROBE) |
1651 | + return NOTIFY_BAD; |
1652 | + else |
1653 | + return NOTIFY_DONE; |
1654 | + } |
1655 | |
1656 | if (atif->functions.sbios_requests) { |
1657 | struct atif_sbios_requests req; |
1658 | @@ -385,7 +392,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, |
1659 | count = amdgpu_atif_get_sbios_requests(atif, &req); |
1660 | |
1661 | if (count <= 0) |
1662 | - return NOTIFY_DONE; |
1663 | + return NOTIFY_BAD; |
1664 | |
1665 | DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count); |
1666 | |
1667 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c |
1668 | index d472a2c8399f..b80243d3972e 100644 |
1669 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c |
1670 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c |
1671 | @@ -67,7 +67,8 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, |
1672 | unsigned i; |
1673 | int r; |
1674 | |
1675 | - if (num_entries > SIZE_MAX / sizeof(struct amdgpu_bo_list_entry)) |
1676 | + if (num_entries > (SIZE_MAX - sizeof(struct amdgpu_bo_list)) |
1677 | + / sizeof(struct amdgpu_bo_list_entry)) |
1678 | return -EINVAL; |
1679 | |
1680 | size = sizeof(struct amdgpu_bo_list); |
1681 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c |
1682 | index 3a072a7a39f0..df9b173c3d0b 100644 |
1683 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c |
1684 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c |
1685 | @@ -574,7 +574,7 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) |
1686 | /* skip over VMID 0, since it is the system VM */ |
1687 | for (j = 1; j < id_mgr->num_ids; ++j) { |
1688 | amdgpu_vmid_reset(adev, i, j); |
1689 | - amdgpu_sync_create(&id_mgr->ids[i].active); |
1690 | + amdgpu_sync_create(&id_mgr->ids[j].active); |
1691 | list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); |
1692 | } |
1693 | } |
1694 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c |
1695 | index 391e2f7c03aa..f823d4baf044 100644 |
1696 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c |
1697 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c |
1698 | @@ -66,6 +66,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, |
1699 | amdgpu_sync_create(&(*job)->sync); |
1700 | amdgpu_sync_create(&(*job)->sched_sync); |
1701 | (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter); |
1702 | + (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET; |
1703 | |
1704 | return 0; |
1705 | } |
1706 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c |
1707 | index f55f72a37ca8..c29d519fa381 100644 |
1708 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c |
1709 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c |
1710 | @@ -277,6 +277,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) |
1711 | case CHIP_PITCAIRN: |
1712 | case CHIP_VERDE: |
1713 | case CHIP_OLAND: |
1714 | + case CHIP_HAINAN: |
1715 | return AMDGPU_FW_LOAD_DIRECT; |
1716 | #endif |
1717 | #ifdef CONFIG_DRM_AMDGPU_CIK |
1718 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c |
1719 | index b17771dd5ce7..6a84526e20e0 100644 |
1720 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c |
1721 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c |
1722 | @@ -714,7 +714,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ |
1723 | } |
1724 | |
1725 | gds_switch_needed &= !!ring->funcs->emit_gds_switch; |
1726 | - vm_flush_needed &= !!ring->funcs->emit_vm_flush; |
1727 | + vm_flush_needed &= !!ring->funcs->emit_vm_flush && |
1728 | + job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET; |
1729 | pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && |
1730 | ring->funcs->emit_wreg; |
1731 | |
1732 | diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c |
1733 | index 6ae050dc3220..9045e6fa0780 100644 |
1734 | --- a/drivers/gpu/drm/amd/display/dc/core/dc.c |
1735 | +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c |
1736 | @@ -1120,9 +1120,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa |
1737 | */ |
1738 | update_flags->bits.bpp_change = 1; |
1739 | |
1740 | - if (u->gamma && dce_use_lut(u->plane_info->format)) |
1741 | - update_flags->bits.gamma_change = 1; |
1742 | - |
1743 | if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, |
1744 | sizeof(union dc_tiling_info)) != 0) { |
1745 | update_flags->bits.swizzle_change = 1; |
1746 | @@ -1139,7 +1136,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa |
1747 | if (update_flags->bits.rotation_change |
1748 | || update_flags->bits.stereo_format_change |
1749 | || update_flags->bits.pixel_format_change |
1750 | - || update_flags->bits.gamma_change |
1751 | || update_flags->bits.bpp_change |
1752 | || update_flags->bits.bandwidth_change |
1753 | || update_flags->bits.output_tf_change) |
1754 | @@ -1229,13 +1225,26 @@ static enum surface_update_type det_surface_update(const struct dc *dc, |
1755 | if (u->coeff_reduction_factor) |
1756 | update_flags->bits.coeff_reduction_change = 1; |
1757 | |
1758 | + if (u->gamma) { |
1759 | + enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; |
1760 | + |
1761 | + if (u->plane_info) |
1762 | + format = u->plane_info->format; |
1763 | + else if (u->surface) |
1764 | + format = u->surface->format; |
1765 | + |
1766 | + if (dce_use_lut(format)) |
1767 | + update_flags->bits.gamma_change = 1; |
1768 | + } |
1769 | + |
1770 | if (update_flags->bits.in_transfer_func_change) { |
1771 | type = UPDATE_TYPE_MED; |
1772 | elevate_update_type(&overall_type, type); |
1773 | } |
1774 | |
1775 | if (update_flags->bits.input_csc_change |
1776 | - || update_flags->bits.coeff_reduction_change) { |
1777 | + || update_flags->bits.coeff_reduction_change |
1778 | + || update_flags->bits.gamma_change) { |
1779 | type = UPDATE_TYPE_FULL; |
1780 | elevate_update_type(&overall_type, type); |
1781 | } |
1782 | diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c |
1783 | index fb1f373d08a1..e798241fae37 100644 |
1784 | --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c |
1785 | +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c |
1786 | @@ -466,6 +466,9 @@ static void dce12_update_clocks(struct dccg *dccg, |
1787 | { |
1788 | struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; |
1789 | |
1790 | + /* TODO: Investigate why this is needed to fix display corruption. */ |
1791 | + new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100; |
1792 | + |
1793 | if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) { |
1794 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK; |
1795 | clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz; |
1796 | diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c |
1797 | index bf29733958c3..962900932bee 100644 |
1798 | --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c |
1799 | +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c |
1800 | @@ -1069,10 +1069,14 @@ static void build_evenly_distributed_points( |
1801 | struct dividers dividers) |
1802 | { |
1803 | struct gamma_pixel *p = points; |
1804 | - struct gamma_pixel *p_last = p + numberof_points - 1; |
1805 | + struct gamma_pixel *p_last; |
1806 | |
1807 | uint32_t i = 0; |
1808 | |
1809 | + // This function should not gets called with 0 as a parameter |
1810 | + ASSERT(numberof_points > 0); |
1811 | + p_last = p + numberof_points - 1; |
1812 | + |
1813 | do { |
1814 | struct fixed31_32 value = dc_fixpt_from_fraction(i, |
1815 | numberof_points - 1); |
1816 | @@ -1083,7 +1087,7 @@ static void build_evenly_distributed_points( |
1817 | |
1818 | ++p; |
1819 | ++i; |
1820 | - } while (i != numberof_points); |
1821 | + } while (i < numberof_points); |
1822 | |
1823 | p->r = dc_fixpt_div(p_last->r, dividers.divider1); |
1824 | p->g = dc_fixpt_div(p_last->g, dividers.divider1); |
1825 | diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c |
1826 | index 0adfc5392cd3..c9a15baf2c10 100644 |
1827 | --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c |
1828 | +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c |
1829 | @@ -1222,14 +1222,17 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, |
1830 | |
1831 | static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr) |
1832 | { |
1833 | - if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) |
1834 | + if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) { |
1835 | + smu8_nbdpm_pstate_enable_disable(hwmgr, true, true); |
1836 | return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF); |
1837 | + } |
1838 | return 0; |
1839 | } |
1840 | |
1841 | static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr) |
1842 | { |
1843 | if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) { |
1844 | + smu8_nbdpm_pstate_enable_disable(hwmgr, false, true); |
1845 | return smum_send_msg_to_smc_with_parameter( |
1846 | hwmgr, |
1847 | PPSMC_MSG_UVDPowerON, |
1848 | diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c |
1849 | index fbe3ef4ee45c..924788772b07 100644 |
1850 | --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c |
1851 | +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c |
1852 | @@ -2268,11 +2268,13 @@ static uint32_t ci_get_offsetof(uint32_t type, uint32_t member) |
1853 | case DRAM_LOG_BUFF_SIZE: |
1854 | return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE); |
1855 | } |
1856 | + break; |
1857 | case SMU_Discrete_DpmTable: |
1858 | switch (member) { |
1859 | case LowSclkInterruptThreshold: |
1860 | return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT); |
1861 | } |
1862 | + break; |
1863 | } |
1864 | pr_debug("can't get the offset of type %x member %x\n", type, member); |
1865 | return 0; |
1866 | diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c |
1867 | index 18048f8e2f13..40df5c2706cc 100644 |
1868 | --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c |
1869 | +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c |
1870 | @@ -2330,6 +2330,7 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member) |
1871 | case DRAM_LOG_BUFF_SIZE: |
1872 | return offsetof(SMU73_SoftRegisters, DRAM_LOG_BUFF_SIZE); |
1873 | } |
1874 | + break; |
1875 | case SMU_Discrete_DpmTable: |
1876 | switch (member) { |
1877 | case UvdBootLevel: |
1878 | @@ -2339,6 +2340,7 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member) |
1879 | case LowSclkInterruptThreshold: |
1880 | return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold); |
1881 | } |
1882 | + break; |
1883 | } |
1884 | pr_warn("can't get the offset of type %x member %x\n", type, member); |
1885 | return 0; |
1886 | diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c |
1887 | index 9299b93aa09a..302ca7745723 100644 |
1888 | --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c |
1889 | +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c |
1890 | @@ -2236,11 +2236,13 @@ static uint32_t iceland_get_offsetof(uint32_t type, uint32_t member) |
1891 | case DRAM_LOG_BUFF_SIZE: |
1892 | return offsetof(SMU71_SoftRegisters, DRAM_LOG_BUFF_SIZE); |
1893 | } |
1894 | + break; |
1895 | case SMU_Discrete_DpmTable: |
1896 | switch (member) { |
1897 | case LowSclkInterruptThreshold: |
1898 | return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold); |
1899 | } |
1900 | + break; |
1901 | } |
1902 | pr_warn("can't get the offset of type %x member %x\n", type, member); |
1903 | return 0; |
1904 | diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c |
1905 | index 7dabc6c456e1..697c8d92bd53 100644 |
1906 | --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c |
1907 | +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c |
1908 | @@ -2618,6 +2618,7 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member) |
1909 | case DRAM_LOG_BUFF_SIZE: |
1910 | return offsetof(SMU72_SoftRegisters, DRAM_LOG_BUFF_SIZE); |
1911 | } |
1912 | + break; |
1913 | case SMU_Discrete_DpmTable: |
1914 | switch (member) { |
1915 | case UvdBootLevel: |
1916 | @@ -2627,6 +2628,7 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member) |
1917 | case LowSclkInterruptThreshold: |
1918 | return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold); |
1919 | } |
1920 | + break; |
1921 | } |
1922 | pr_warn("can't get the offset of type %x member %x\n", type, member); |
1923 | return 0; |
1924 | diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c |
1925 | index 57420d7caa4e..59113fdd1c1c 100644 |
1926 | --- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c |
1927 | +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c |
1928 | @@ -2184,6 +2184,7 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member) |
1929 | case DRAM_LOG_BUFF_SIZE: |
1930 | return offsetof(SMU75_SoftRegisters, DRAM_LOG_BUFF_SIZE); |
1931 | } |
1932 | + break; |
1933 | case SMU_Discrete_DpmTable: |
1934 | switch (member) { |
1935 | case UvdBootLevel: |
1936 | @@ -2193,6 +2194,7 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member) |
1937 | case LowSclkInterruptThreshold: |
1938 | return offsetof(SMU75_Discrete_DpmTable, LowSclkInterruptThreshold); |
1939 | } |
1940 | + break; |
1941 | } |
1942 | pr_warn("can't get the offset of type %x member %x\n", type, member); |
1943 | return 0; |
1944 | diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c |
1945 | index 7780567aa669..d708472d93c4 100644 |
1946 | --- a/drivers/gpu/drm/drm_dp_mst_topology.c |
1947 | +++ b/drivers/gpu/drm/drm_dp_mst_topology.c |
1948 | @@ -1274,6 +1274,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_ |
1949 | mutex_lock(&mgr->lock); |
1950 | mstb = mgr->mst_primary; |
1951 | |
1952 | + if (!mstb) |
1953 | + goto out; |
1954 | + |
1955 | for (i = 0; i < lct - 1; i++) { |
1956 | int shift = (i % 2) ? 0 : 4; |
1957 | int port_num = (rad[i / 2] >> shift) & 0xf; |
1958 | diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c |
1959 | index fe9c6c731e87..ee4a5e1221f1 100644 |
1960 | --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c |
1961 | +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c |
1962 | @@ -30,6 +30,12 @@ struct drm_dmi_panel_orientation_data { |
1963 | int orientation; |
1964 | }; |
1965 | |
1966 | +static const struct drm_dmi_panel_orientation_data acer_s1003 = { |
1967 | + .width = 800, |
1968 | + .height = 1280, |
1969 | + .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, |
1970 | +}; |
1971 | + |
1972 | static const struct drm_dmi_panel_orientation_data asus_t100ha = { |
1973 | .width = 800, |
1974 | .height = 1280, |
1975 | @@ -67,7 +73,13 @@ static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = { |
1976 | }; |
1977 | |
1978 | static const struct dmi_system_id orientation_data[] = { |
1979 | - { /* Asus T100HA */ |
1980 | + { /* Acer One 10 (S1003) */ |
1981 | + .matches = { |
1982 | + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"), |
1983 | + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"), |
1984 | + }, |
1985 | + .driver_data = (void *)&acer_s1003, |
1986 | + }, { /* Asus T100HA */ |
1987 | .matches = { |
1988 | DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), |
1989 | DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"), |
1990 | diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c |
1991 | index 69e9b431bf1f..e5a9fae31ab7 100644 |
1992 | --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c |
1993 | +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c |
1994 | @@ -93,7 +93,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) |
1995 | * If the GPU managed to complete this jobs fence, the timout is |
1996 | * spurious. Bail out. |
1997 | */ |
1998 | - if (fence_completed(gpu, submit->out_fence->seqno)) |
1999 | + if (dma_fence_is_signaled(submit->out_fence)) |
2000 | return; |
2001 | |
2002 | /* |
2003 | diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c |
2004 | index b92595c477ef..8bd29075ae4e 100644 |
2005 | --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c |
2006 | +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c |
2007 | @@ -122,6 +122,7 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper, |
2008 | hi_fbdev->fb = hibmc_framebuffer_init(priv->dev, &mode_cmd, gobj); |
2009 | if (IS_ERR(hi_fbdev->fb)) { |
2010 | ret = PTR_ERR(hi_fbdev->fb); |
2011 | + hi_fbdev->fb = NULL; |
2012 | DRM_ERROR("failed to initialize framebuffer: %d\n", ret); |
2013 | goto out_release_fbi; |
2014 | } |
2015 | diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h |
2016 | index 7a9b36176efb..bfb6f652b09f 100644 |
2017 | --- a/drivers/gpu/drm/i915/gvt/gtt.h |
2018 | +++ b/drivers/gpu/drm/i915/gvt/gtt.h |
2019 | @@ -35,7 +35,6 @@ |
2020 | #define _GVT_GTT_H_ |
2021 | |
2022 | #define I915_GTT_PAGE_SHIFT 12 |
2023 | -#define I915_GTT_PAGE_MASK (~(I915_GTT_PAGE_SIZE - 1)) |
2024 | |
2025 | struct intel_vgpu_mm; |
2026 | |
2027 | diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c |
2028 | index fcc73a6ab503..47cc932e23a7 100644 |
2029 | --- a/drivers/gpu/drm/i915/i915_gem.c |
2030 | +++ b/drivers/gpu/drm/i915/i915_gem.c |
2031 | @@ -1122,11 +1122,7 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, |
2032 | offset = offset_in_page(args->offset); |
2033 | for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { |
2034 | struct page *page = i915_gem_object_get_page(obj, idx); |
2035 | - int length; |
2036 | - |
2037 | - length = remain; |
2038 | - if (offset + length > PAGE_SIZE) |
2039 | - length = PAGE_SIZE - offset; |
2040 | + unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); |
2041 | |
2042 | ret = shmem_pread(page, offset, length, user_data, |
2043 | page_to_phys(page) & obj_do_bit17_swizzling, |
2044 | @@ -1570,11 +1566,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, |
2045 | offset = offset_in_page(args->offset); |
2046 | for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { |
2047 | struct page *page = i915_gem_object_get_page(obj, idx); |
2048 | - int length; |
2049 | - |
2050 | - length = remain; |
2051 | - if (offset + length > PAGE_SIZE) |
2052 | - length = PAGE_SIZE - offset; |
2053 | + unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); |
2054 | |
2055 | ret = shmem_pwrite(page, offset, length, user_data, |
2056 | page_to_phys(page) & obj_do_bit17_swizzling, |
2057 | diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c |
2058 | index 3f0c612d42e7..679bbae52945 100644 |
2059 | --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c |
2060 | +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c |
2061 | @@ -458,7 +458,7 @@ eb_validate_vma(struct i915_execbuffer *eb, |
2062 | * any non-page-aligned or non-canonical addresses. |
2063 | */ |
2064 | if (unlikely(entry->flags & EXEC_OBJECT_PINNED && |
2065 | - entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK))) |
2066 | + entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK))) |
2067 | return -EINVAL; |
2068 | |
2069 | /* pad_to_size was once a reserved field, so sanitize it */ |
2070 | diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c |
2071 | index f00c7fbef79e..294a143b85f5 100644 |
2072 | --- a/drivers/gpu/drm/i915/i915_gem_gtt.c |
2073 | +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c |
2074 | @@ -1768,7 +1768,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m) |
2075 | if (i == 4) |
2076 | continue; |
2077 | |
2078 | - seq_printf(m, "\t\t(%03d, %04d) %08lx: ", |
2079 | + seq_printf(m, "\t\t(%03d, %04d) %08llx: ", |
2080 | pde, pte, |
2081 | (pde * GEN6_PTES + pte) * PAGE_SIZE); |
2082 | for (i = 0; i < 4; i++) { |
2083 | diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h |
2084 | index 2a116a91420b..680e0dc5db4b 100644 |
2085 | --- a/drivers/gpu/drm/i915/i915_gem_gtt.h |
2086 | +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h |
2087 | @@ -42,13 +42,15 @@ |
2088 | #include "i915_selftest.h" |
2089 | #include "i915_timeline.h" |
2090 | |
2091 | -#define I915_GTT_PAGE_SIZE_4K BIT(12) |
2092 | -#define I915_GTT_PAGE_SIZE_64K BIT(16) |
2093 | -#define I915_GTT_PAGE_SIZE_2M BIT(21) |
2094 | +#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12) |
2095 | +#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16) |
2096 | +#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21) |
2097 | |
2098 | #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K |
2099 | #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M |
2100 | |
2101 | +#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE |
2102 | + |
2103 | #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE |
2104 | |
2105 | #define I915_FENCE_REG_NONE -1 |
2106 | @@ -662,20 +664,20 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, |
2107 | u64 start, u64 end, unsigned int flags); |
2108 | |
2109 | /* Flags used by pin/bind&friends. */ |
2110 | -#define PIN_NONBLOCK BIT(0) |
2111 | -#define PIN_MAPPABLE BIT(1) |
2112 | -#define PIN_ZONE_4G BIT(2) |
2113 | -#define PIN_NONFAULT BIT(3) |
2114 | -#define PIN_NOEVICT BIT(4) |
2115 | - |
2116 | -#define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */ |
2117 | -#define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */ |
2118 | -#define PIN_USER BIT(7) /* I915_VMA_LOCAL_BIND */ |
2119 | -#define PIN_UPDATE BIT(8) |
2120 | - |
2121 | -#define PIN_HIGH BIT(9) |
2122 | -#define PIN_OFFSET_BIAS BIT(10) |
2123 | -#define PIN_OFFSET_FIXED BIT(11) |
2124 | +#define PIN_NONBLOCK BIT_ULL(0) |
2125 | +#define PIN_MAPPABLE BIT_ULL(1) |
2126 | +#define PIN_ZONE_4G BIT_ULL(2) |
2127 | +#define PIN_NONFAULT BIT_ULL(3) |
2128 | +#define PIN_NOEVICT BIT_ULL(4) |
2129 | + |
2130 | +#define PIN_MBZ BIT_ULL(5) /* I915_VMA_PIN_OVERFLOW */ |
2131 | +#define PIN_GLOBAL BIT_ULL(6) /* I915_VMA_GLOBAL_BIND */ |
2132 | +#define PIN_USER BIT_ULL(7) /* I915_VMA_LOCAL_BIND */ |
2133 | +#define PIN_UPDATE BIT_ULL(8) |
2134 | + |
2135 | +#define PIN_HIGH BIT_ULL(9) |
2136 | +#define PIN_OFFSET_BIAS BIT_ULL(10) |
2137 | +#define PIN_OFFSET_FIXED BIT_ULL(11) |
2138 | #define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE) |
2139 | |
2140 | #endif |
2141 | diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h |
2142 | index 9e63cd47b60f..5b544cb38148 100644 |
2143 | --- a/drivers/gpu/drm/i915/i915_reg.h |
2144 | +++ b/drivers/gpu/drm/i915/i915_reg.h |
2145 | @@ -2097,8 +2097,12 @@ enum i915_power_well_id { |
2146 | |
2147 | /* ICL PHY DFLEX registers */ |
2148 | #define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0) |
2149 | -#define DFLEXDPMLE1_DPMLETC_MASK(n) (0xf << (4 * (n))) |
2150 | -#define DFLEXDPMLE1_DPMLETC(n, x) ((x) << (4 * (n))) |
2151 | +#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port))) |
2152 | +#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port))) |
2153 | +#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port))) |
2154 | +#define DFLEXDPMLE1_DPMLETC_ML3(tc_port) (8 << (4 * (tc_port))) |
2155 | +#define DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) (12 << (4 * (tc_port))) |
2156 | +#define DFLEXDPMLE1_DPMLETC_ML3_0(tc_port) (15 << (4 * (tc_port))) |
2157 | |
2158 | /* BXT PHY Ref registers */ |
2159 | #define _PORT_REF_DW3_A 0x16218C |
2160 | diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c |
2161 | index 769f3f586661..ee3ca2de983b 100644 |
2162 | --- a/drivers/gpu/drm/i915/intel_audio.c |
2163 | +++ b/drivers/gpu/drm/i915/intel_audio.c |
2164 | @@ -144,6 +144,9 @@ static const struct { |
2165 | /* HDMI N/CTS table */ |
2166 | #define TMDS_297M 297000 |
2167 | #define TMDS_296M 296703 |
2168 | +#define TMDS_594M 594000 |
2169 | +#define TMDS_593M 593407 |
2170 | + |
2171 | static const struct { |
2172 | int sample_rate; |
2173 | int clock; |
2174 | @@ -164,6 +167,20 @@ static const struct { |
2175 | { 176400, TMDS_297M, 18816, 247500 }, |
2176 | { 192000, TMDS_296M, 23296, 281250 }, |
2177 | { 192000, TMDS_297M, 20480, 247500 }, |
2178 | + { 44100, TMDS_593M, 8918, 937500 }, |
2179 | + { 44100, TMDS_594M, 9408, 990000 }, |
2180 | + { 48000, TMDS_593M, 5824, 562500 }, |
2181 | + { 48000, TMDS_594M, 6144, 594000 }, |
2182 | + { 32000, TMDS_593M, 5824, 843750 }, |
2183 | + { 32000, TMDS_594M, 3072, 445500 }, |
2184 | + { 88200, TMDS_593M, 17836, 937500 }, |
2185 | + { 88200, TMDS_594M, 18816, 990000 }, |
2186 | + { 96000, TMDS_593M, 11648, 562500 }, |
2187 | + { 96000, TMDS_594M, 12288, 594000 }, |
2188 | + { 176400, TMDS_593M, 35672, 937500 }, |
2189 | + { 176400, TMDS_594M, 37632, 990000 }, |
2190 | + { 192000, TMDS_593M, 23296, 562500 }, |
2191 | + { 192000, TMDS_594M, 24576, 594000 }, |
2192 | }; |
2193 | |
2194 | /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ |
2195 | diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c |
2196 | index d2951096bca0..3bd44d042a1d 100644 |
2197 | --- a/drivers/gpu/drm/i915/intel_display.c |
2198 | +++ b/drivers/gpu/drm/i915/intel_display.c |
2199 | @@ -2754,20 +2754,33 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state, |
2200 | |
2201 | plane_state->base.visible = visible; |
2202 | |
2203 | - /* FIXME pre-g4x don't work like this */ |
2204 | - if (visible) { |
2205 | + if (visible) |
2206 | crtc_state->base.plane_mask |= drm_plane_mask(&plane->base); |
2207 | - crtc_state->active_planes |= BIT(plane->id); |
2208 | - } else { |
2209 | + else |
2210 | crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base); |
2211 | - crtc_state->active_planes &= ~BIT(plane->id); |
2212 | - } |
2213 | |
2214 | DRM_DEBUG_KMS("%s active planes 0x%x\n", |
2215 | crtc_state->base.crtc->name, |
2216 | crtc_state->active_planes); |
2217 | } |
2218 | |
2219 | +static void fixup_active_planes(struct intel_crtc_state *crtc_state) |
2220 | +{ |
2221 | + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); |
2222 | + struct drm_plane *plane; |
2223 | + |
2224 | + /* |
2225 | + * Active_planes aliases if multiple "primary" or cursor planes |
2226 | + * have been used on the same (or wrong) pipe. plane_mask uses |
2227 | + * unique ids, hence we can use that to reconstruct active_planes. |
2228 | + */ |
2229 | + crtc_state->active_planes = 0; |
2230 | + |
2231 | + drm_for_each_plane_mask(plane, &dev_priv->drm, |
2232 | + crtc_state->base.plane_mask) |
2233 | + crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); |
2234 | +} |
2235 | + |
2236 | static void intel_plane_disable_noatomic(struct intel_crtc *crtc, |
2237 | struct intel_plane *plane) |
2238 | { |
2239 | @@ -2777,6 +2790,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc, |
2240 | to_intel_plane_state(plane->base.state); |
2241 | |
2242 | intel_set_plane_visible(crtc_state, plane_state, false); |
2243 | + fixup_active_planes(crtc_state); |
2244 | |
2245 | if (plane->id == PLANE_PRIMARY) |
2246 | intel_pre_disable_primary_noatomic(&crtc->base); |
2247 | @@ -2795,7 +2809,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, |
2248 | struct drm_i915_gem_object *obj; |
2249 | struct drm_plane *primary = intel_crtc->base.primary; |
2250 | struct drm_plane_state *plane_state = primary->state; |
2251 | - struct drm_crtc_state *crtc_state = intel_crtc->base.state; |
2252 | struct intel_plane *intel_plane = to_intel_plane(primary); |
2253 | struct intel_plane_state *intel_state = |
2254 | to_intel_plane_state(plane_state); |
2255 | @@ -2885,10 +2898,6 @@ valid_fb: |
2256 | plane_state->fb = fb; |
2257 | plane_state->crtc = &intel_crtc->base; |
2258 | |
2259 | - intel_set_plane_visible(to_intel_crtc_state(crtc_state), |
2260 | - to_intel_plane_state(plane_state), |
2261 | - true); |
2262 | - |
2263 | atomic_or(to_intel_plane(primary)->frontbuffer_bit, |
2264 | &obj->frontbuffer_bits); |
2265 | } |
2266 | @@ -12630,17 +12639,12 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) |
2267 | intel_check_cpu_fifo_underruns(dev_priv); |
2268 | intel_check_pch_fifo_underruns(dev_priv); |
2269 | |
2270 | - if (!new_crtc_state->active) { |
2271 | - /* |
2272 | - * Make sure we don't call initial_watermarks |
2273 | - * for ILK-style watermark updates. |
2274 | - * |
2275 | - * No clue what this is supposed to achieve. |
2276 | - */ |
2277 | - if (INTEL_GEN(dev_priv) >= 9) |
2278 | - dev_priv->display.initial_watermarks(intel_state, |
2279 | - to_intel_crtc_state(new_crtc_state)); |
2280 | - } |
2281 | + /* FIXME unify this for all platforms */ |
2282 | + if (!new_crtc_state->active && |
2283 | + !HAS_GMCH_DISPLAY(dev_priv) && |
2284 | + dev_priv->display.initial_watermarks) |
2285 | + dev_priv->display.initial_watermarks(intel_state, |
2286 | + to_intel_crtc_state(new_crtc_state)); |
2287 | } |
2288 | } |
2289 | |
2290 | @@ -14573,7 +14577,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, |
2291 | fb->height < SKL_MIN_YUV_420_SRC_H || |
2292 | (fb->width % 4) != 0 || (fb->height % 4) != 0)) { |
2293 | DRM_DEBUG_KMS("src dimensions not correct for NV12\n"); |
2294 | - return -EINVAL; |
2295 | + goto err; |
2296 | } |
2297 | |
2298 | for (i = 0; i < fb->format->num_planes; i++) { |
2299 | @@ -15365,17 +15369,6 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) |
2300 | POSTING_READ(DPLL(pipe)); |
2301 | } |
2302 | |
2303 | -static bool intel_plane_mapping_ok(struct intel_crtc *crtc, |
2304 | - struct intel_plane *plane) |
2305 | -{ |
2306 | - enum pipe pipe; |
2307 | - |
2308 | - if (!plane->get_hw_state(plane, &pipe)) |
2309 | - return true; |
2310 | - |
2311 | - return pipe == crtc->pipe; |
2312 | -} |
2313 | - |
2314 | static void |
2315 | intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) |
2316 | { |
2317 | @@ -15387,13 +15380,20 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) |
2318 | for_each_intel_crtc(&dev_priv->drm, crtc) { |
2319 | struct intel_plane *plane = |
2320 | to_intel_plane(crtc->base.primary); |
2321 | + struct intel_crtc *plane_crtc; |
2322 | + enum pipe pipe; |
2323 | |
2324 | - if (intel_plane_mapping_ok(crtc, plane)) |
2325 | + if (!plane->get_hw_state(plane, &pipe)) |
2326 | + continue; |
2327 | + |
2328 | + if (pipe == crtc->pipe) |
2329 | continue; |
2330 | |
2331 | DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n", |
2332 | plane->base.name); |
2333 | - intel_plane_disable_noatomic(crtc, plane); |
2334 | + |
2335 | + plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); |
2336 | + intel_plane_disable_noatomic(plane_crtc, plane); |
2337 | } |
2338 | } |
2339 | |
2340 | @@ -15441,13 +15441,9 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, |
2341 | I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); |
2342 | } |
2343 | |
2344 | - /* restore vblank interrupts to correct state */ |
2345 | - drm_crtc_vblank_reset(&crtc->base); |
2346 | if (crtc->active) { |
2347 | struct intel_plane *plane; |
2348 | |
2349 | - drm_crtc_vblank_on(&crtc->base); |
2350 | - |
2351 | /* Disable everything but the primary plane */ |
2352 | for_each_intel_plane_on_crtc(dev, crtc, plane) { |
2353 | const struct intel_plane_state *plane_state = |
2354 | @@ -15565,23 +15561,32 @@ void i915_redisable_vga(struct drm_i915_private *dev_priv) |
2355 | } |
2356 | |
2357 | /* FIXME read out full plane state for all planes */ |
2358 | -static void readout_plane_state(struct intel_crtc *crtc) |
2359 | +static void readout_plane_state(struct drm_i915_private *dev_priv) |
2360 | { |
2361 | - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
2362 | - struct intel_crtc_state *crtc_state = |
2363 | - to_intel_crtc_state(crtc->base.state); |
2364 | struct intel_plane *plane; |
2365 | + struct intel_crtc *crtc; |
2366 | |
2367 | - for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { |
2368 | + for_each_intel_plane(&dev_priv->drm, plane) { |
2369 | struct intel_plane_state *plane_state = |
2370 | to_intel_plane_state(plane->base.state); |
2371 | - enum pipe pipe; |
2372 | + struct intel_crtc_state *crtc_state; |
2373 | + enum pipe pipe = PIPE_A; |
2374 | bool visible; |
2375 | |
2376 | visible = plane->get_hw_state(plane, &pipe); |
2377 | |
2378 | + crtc = intel_get_crtc_for_pipe(dev_priv, pipe); |
2379 | + crtc_state = to_intel_crtc_state(crtc->base.state); |
2380 | + |
2381 | intel_set_plane_visible(crtc_state, plane_state, visible); |
2382 | } |
2383 | + |
2384 | + for_each_intel_crtc(&dev_priv->drm, crtc) { |
2385 | + struct intel_crtc_state *crtc_state = |
2386 | + to_intel_crtc_state(crtc->base.state); |
2387 | + |
2388 | + fixup_active_planes(crtc_state); |
2389 | + } |
2390 | } |
2391 | |
2392 | static void intel_modeset_readout_hw_state(struct drm_device *dev) |
2393 | @@ -15613,13 +15618,13 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) |
2394 | if (crtc_state->base.active) |
2395 | dev_priv->active_crtcs |= 1 << crtc->pipe; |
2396 | |
2397 | - readout_plane_state(crtc); |
2398 | - |
2399 | DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", |
2400 | crtc->base.base.id, crtc->base.name, |
2401 | enableddisabled(crtc_state->base.active)); |
2402 | } |
2403 | |
2404 | + readout_plane_state(dev_priv); |
2405 | + |
2406 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { |
2407 | struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; |
2408 | |
2409 | @@ -15789,7 +15794,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev, |
2410 | struct drm_modeset_acquire_ctx *ctx) |
2411 | { |
2412 | struct drm_i915_private *dev_priv = to_i915(dev); |
2413 | - enum pipe pipe; |
2414 | struct intel_crtc *crtc; |
2415 | struct intel_encoder *encoder; |
2416 | int i; |
2417 | @@ -15800,15 +15804,23 @@ intel_modeset_setup_hw_state(struct drm_device *dev, |
2418 | /* HW state is read out, now we need to sanitize this mess. */ |
2419 | get_encoder_power_domains(dev_priv); |
2420 | |
2421 | - intel_sanitize_plane_mapping(dev_priv); |
2422 | + /* |
2423 | + * intel_sanitize_plane_mapping() may need to do vblank |
2424 | + * waits, so we need vblank interrupts restored beforehand. |
2425 | + */ |
2426 | + for_each_intel_crtc(&dev_priv->drm, crtc) { |
2427 | + drm_crtc_vblank_reset(&crtc->base); |
2428 | |
2429 | - for_each_intel_encoder(dev, encoder) { |
2430 | - intel_sanitize_encoder(encoder); |
2431 | + if (crtc->active) |
2432 | + drm_crtc_vblank_on(&crtc->base); |
2433 | } |
2434 | |
2435 | - for_each_pipe(dev_priv, pipe) { |
2436 | - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); |
2437 | + intel_sanitize_plane_mapping(dev_priv); |
2438 | |
2439 | + for_each_intel_encoder(dev, encoder) |
2440 | + intel_sanitize_encoder(encoder); |
2441 | + |
2442 | + for_each_intel_crtc(&dev_priv->drm, crtc) { |
2443 | intel_sanitize_crtc(crtc, ctx); |
2444 | intel_dump_pipe_config(crtc, crtc->config, |
2445 | "[setup_hw_state]"); |
2446 | diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c |
2447 | index 1193202766a2..f92079e19de8 100644 |
2448 | --- a/drivers/gpu/drm/i915/intel_dp.c |
2449 | +++ b/drivers/gpu/drm/i915/intel_dp.c |
2450 | @@ -401,6 +401,22 @@ static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, |
2451 | return true; |
2452 | } |
2453 | |
2454 | +static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, |
2455 | + int link_rate, |
2456 | + uint8_t lane_count) |
2457 | +{ |
2458 | + const struct drm_display_mode *fixed_mode = |
2459 | + intel_dp->attached_connector->panel.fixed_mode; |
2460 | + int mode_rate, max_rate; |
2461 | + |
2462 | + mode_rate = intel_dp_link_required(fixed_mode->clock, 18); |
2463 | + max_rate = intel_dp_max_data_rate(link_rate, lane_count); |
2464 | + if (mode_rate > max_rate) |
2465 | + return false; |
2466 | + |
2467 | + return true; |
2468 | +} |
2469 | + |
2470 | int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, |
2471 | int link_rate, uint8_t lane_count) |
2472 | { |
2473 | @@ -410,9 +426,23 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, |
2474 | intel_dp->num_common_rates, |
2475 | link_rate); |
2476 | if (index > 0) { |
2477 | + if (intel_dp_is_edp(intel_dp) && |
2478 | + !intel_dp_can_link_train_fallback_for_edp(intel_dp, |
2479 | + intel_dp->common_rates[index - 1], |
2480 | + lane_count)) { |
2481 | + DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n"); |
2482 | + return 0; |
2483 | + } |
2484 | intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; |
2485 | intel_dp->max_link_lane_count = lane_count; |
2486 | } else if (lane_count > 1) { |
2487 | + if (intel_dp_is_edp(intel_dp) && |
2488 | + !intel_dp_can_link_train_fallback_for_edp(intel_dp, |
2489 | + intel_dp_max_common_rate(intel_dp), |
2490 | + lane_count >> 1)) { |
2491 | + DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n"); |
2492 | + return 0; |
2493 | + } |
2494 | intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); |
2495 | intel_dp->max_link_lane_count = lane_count >> 1; |
2496 | } else { |
2497 | @@ -4709,19 +4739,13 @@ intel_dp_long_pulse(struct intel_connector *connector, |
2498 | */ |
2499 | status = connector_status_disconnected; |
2500 | goto out; |
2501 | - } else { |
2502 | - /* |
2503 | - * If display is now connected check links status, |
2504 | - * there has been known issues of link loss triggering |
2505 | - * long pulse. |
2506 | - * |
2507 | - * Some sinks (eg. ASUS PB287Q) seem to perform some |
2508 | - * weird HPD ping pong during modesets. So we can apparently |
2509 | - * end up with HPD going low during a modeset, and then |
2510 | - * going back up soon after. And once that happens we must |
2511 | - * retrain the link to get a picture. That's in case no |
2512 | - * userspace component reacted to intermittent HPD dip. |
2513 | - */ |
2514 | + } |
2515 | + |
2516 | + /* |
2517 | + * Some external monitors do not signal loss of link synchronization |
2518 | + * with an IRQ_HPD, so force a link status check. |
2519 | + */ |
2520 | + if (!intel_dp_is_edp(intel_dp)) { |
2521 | struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; |
2522 | |
2523 | intel_dp_retrain_link(encoder, ctx); |
2524 | diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c |
2525 | index 4da6e33c7fa1..329309a085cb 100644 |
2526 | --- a/drivers/gpu/drm/i915/intel_dp_link_training.c |
2527 | +++ b/drivers/gpu/drm/i915/intel_dp_link_training.c |
2528 | @@ -352,22 +352,14 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) |
2529 | return; |
2530 | |
2531 | failure_handling: |
2532 | - /* Dont fallback and prune modes if its eDP */ |
2533 | - if (!intel_dp_is_edp(intel_dp)) { |
2534 | - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d", |
2535 | - intel_connector->base.base.id, |
2536 | - intel_connector->base.name, |
2537 | - intel_dp->link_rate, intel_dp->lane_count); |
2538 | - if (!intel_dp_get_link_train_fallback_values(intel_dp, |
2539 | - intel_dp->link_rate, |
2540 | - intel_dp->lane_count)) |
2541 | - /* Schedule a Hotplug Uevent to userspace to start modeset */ |
2542 | - schedule_work(&intel_connector->modeset_retry_work); |
2543 | - } else { |
2544 | - DRM_ERROR("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d", |
2545 | - intel_connector->base.base.id, |
2546 | - intel_connector->base.name, |
2547 | - intel_dp->link_rate, intel_dp->lane_count); |
2548 | - } |
2549 | + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d", |
2550 | + intel_connector->base.base.id, |
2551 | + intel_connector->base.name, |
2552 | + intel_dp->link_rate, intel_dp->lane_count); |
2553 | + if (!intel_dp_get_link_train_fallback_values(intel_dp, |
2554 | + intel_dp->link_rate, |
2555 | + intel_dp->lane_count)) |
2556 | + /* Schedule a Hotplug Uevent to userspace to start modeset */ |
2557 | + schedule_work(&intel_connector->modeset_retry_work); |
2558 | return; |
2559 | } |
2560 | diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c |
2561 | index 4ecd65375603..1fec0c71b4d9 100644 |
2562 | --- a/drivers/gpu/drm/i915/intel_dp_mst.c |
2563 | +++ b/drivers/gpu/drm/i915/intel_dp_mst.c |
2564 | @@ -38,11 +38,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, |
2565 | struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); |
2566 | struct intel_digital_port *intel_dig_port = intel_mst->primary; |
2567 | struct intel_dp *intel_dp = &intel_dig_port->dp; |
2568 | - struct intel_connector *connector = |
2569 | - to_intel_connector(conn_state->connector); |
2570 | + struct drm_connector *connector = conn_state->connector; |
2571 | + void *port = to_intel_connector(connector)->port; |
2572 | struct drm_atomic_state *state = pipe_config->base.state; |
2573 | int bpp; |
2574 | - int lane_count, slots; |
2575 | + int lane_count, slots = 0; |
2576 | const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; |
2577 | int mst_pbn; |
2578 | bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc, |
2579 | @@ -70,17 +70,23 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, |
2580 | |
2581 | pipe_config->port_clock = intel_dp_max_link_rate(intel_dp); |
2582 | |
2583 | - if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, connector->port)) |
2584 | + if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port)) |
2585 | pipe_config->has_audio = true; |
2586 | |
2587 | mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp); |
2588 | pipe_config->pbn = mst_pbn; |
2589 | |
2590 | - slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, |
2591 | - connector->port, mst_pbn); |
2592 | - if (slots < 0) { |
2593 | - DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", slots); |
2594 | - return false; |
2595 | + /* Zombie connectors can't have VCPI slots */ |
2596 | + if (READ_ONCE(connector->registered)) { |
2597 | + slots = drm_dp_atomic_find_vcpi_slots(state, |
2598 | + &intel_dp->mst_mgr, |
2599 | + port, |
2600 | + mst_pbn); |
2601 | + if (slots < 0) { |
2602 | + DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", |
2603 | + slots); |
2604 | + return false; |
2605 | + } |
2606 | } |
2607 | |
2608 | intel_link_compute_m_n(bpp, lane_count, |
2609 | @@ -311,9 +317,8 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) |
2610 | struct edid *edid; |
2611 | int ret; |
2612 | |
2613 | - if (!intel_dp) { |
2614 | + if (!READ_ONCE(connector->registered)) |
2615 | return intel_connector_update_modes(connector, NULL); |
2616 | - } |
2617 | |
2618 | edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port); |
2619 | ret = intel_connector_update_modes(connector, edid); |
2620 | @@ -328,9 +333,10 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force) |
2621 | struct intel_connector *intel_connector = to_intel_connector(connector); |
2622 | struct intel_dp *intel_dp = intel_connector->mst_port; |
2623 | |
2624 | - if (!intel_dp) |
2625 | + if (!READ_ONCE(connector->registered)) |
2626 | return connector_status_disconnected; |
2627 | - return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port); |
2628 | + return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, |
2629 | + intel_connector->port); |
2630 | } |
2631 | |
2632 | static void |
2633 | @@ -370,7 +376,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector, |
2634 | int bpp = 24; /* MST uses fixed bpp */ |
2635 | int max_rate, mode_rate, max_lanes, max_link_clock; |
2636 | |
2637 | - if (!intel_dp) |
2638 | + if (!READ_ONCE(connector->registered)) |
2639 | return MODE_ERROR; |
2640 | |
2641 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
2642 | @@ -402,7 +408,7 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c |
2643 | struct intel_dp *intel_dp = intel_connector->mst_port; |
2644 | struct intel_crtc *crtc = to_intel_crtc(state->crtc); |
2645 | |
2646 | - if (!intel_dp) |
2647 | + if (!READ_ONCE(connector->registered)) |
2648 | return NULL; |
2649 | return &intel_dp->mst_encoders[crtc->pipe]->base.base; |
2650 | } |
2651 | @@ -452,6 +458,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo |
2652 | if (!intel_connector) |
2653 | return NULL; |
2654 | |
2655 | + intel_connector->get_hw_state = intel_dp_mst_get_hw_state; |
2656 | + intel_connector->mst_port = intel_dp; |
2657 | + intel_connector->port = port; |
2658 | + |
2659 | connector = &intel_connector->base; |
2660 | ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, |
2661 | DRM_MODE_CONNECTOR_DisplayPort); |
2662 | @@ -462,10 +472,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo |
2663 | |
2664 | drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); |
2665 | |
2666 | - intel_connector->get_hw_state = intel_dp_mst_get_hw_state; |
2667 | - intel_connector->mst_port = intel_dp; |
2668 | - intel_connector->port = port; |
2669 | - |
2670 | for_each_pipe(dev_priv, pipe) { |
2671 | struct drm_encoder *enc = |
2672 | &intel_dp->mst_encoders[pipe]->base.base; |
2673 | @@ -503,7 +509,6 @@ static void intel_dp_register_mst_connector(struct drm_connector *connector) |
2674 | static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, |
2675 | struct drm_connector *connector) |
2676 | { |
2677 | - struct intel_connector *intel_connector = to_intel_connector(connector); |
2678 | struct drm_i915_private *dev_priv = to_i915(connector->dev); |
2679 | |
2680 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); |
2681 | @@ -512,10 +517,6 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, |
2682 | if (dev_priv->fbdev) |
2683 | drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, |
2684 | connector); |
2685 | - /* prevent race with the check in ->detect */ |
2686 | - drm_modeset_lock(&connector->dev->mode_config.connection_mutex, NULL); |
2687 | - intel_connector->mst_port = NULL; |
2688 | - drm_modeset_unlock(&connector->dev->mode_config.connection_mutex); |
2689 | |
2690 | drm_connector_put(connector); |
2691 | } |
2692 | diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c |
2693 | index 648a13c6043c..9a8018130237 100644 |
2694 | --- a/drivers/gpu/drm/i915/intel_hotplug.c |
2695 | +++ b/drivers/gpu/drm/i915/intel_hotplug.c |
2696 | @@ -228,7 +228,9 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) |
2697 | drm_for_each_connector_iter(connector, &conn_iter) { |
2698 | struct intel_connector *intel_connector = to_intel_connector(connector); |
2699 | |
2700 | - if (intel_connector->encoder->hpd_pin == pin) { |
2701 | + /* Don't check MST ports, they don't have pins */ |
2702 | + if (!intel_connector->mst_port && |
2703 | + intel_connector->encoder->hpd_pin == pin) { |
2704 | if (connector->polled != intel_connector->polled) |
2705 | DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", |
2706 | connector->name); |
2707 | @@ -395,37 +397,54 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, |
2708 | struct intel_encoder *encoder; |
2709 | bool storm_detected = false; |
2710 | bool queue_dig = false, queue_hp = false; |
2711 | + u32 long_hpd_pulse_mask = 0; |
2712 | + u32 short_hpd_pulse_mask = 0; |
2713 | + enum hpd_pin pin; |
2714 | |
2715 | if (!pin_mask) |
2716 | return; |
2717 | |
2718 | spin_lock(&dev_priv->irq_lock); |
2719 | + |
2720 | + /* |
2721 | + * Determine whether ->hpd_pulse() exists for each pin, and |
2722 | + * whether we have a short or a long pulse. This is needed |
2723 | + * as each pin may have up to two encoders (HDMI and DP) and |
2724 | + * only the one of them (DP) will have ->hpd_pulse(). |
2725 | + */ |
2726 | for_each_intel_encoder(&dev_priv->drm, encoder) { |
2727 | - enum hpd_pin pin = encoder->hpd_pin; |
2728 | bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder); |
2729 | + enum port port = encoder->port; |
2730 | + bool long_hpd; |
2731 | |
2732 | + pin = encoder->hpd_pin; |
2733 | if (!(BIT(pin) & pin_mask)) |
2734 | continue; |
2735 | |
2736 | - if (has_hpd_pulse) { |
2737 | - bool long_hpd = long_mask & BIT(pin); |
2738 | - enum port port = encoder->port; |
2739 | + if (!has_hpd_pulse) |
2740 | + continue; |
2741 | |
2742 | - DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), |
2743 | - long_hpd ? "long" : "short"); |
2744 | - /* |
2745 | - * For long HPD pulses we want to have the digital queue happen, |
2746 | - * but we still want HPD storm detection to function. |
2747 | - */ |
2748 | - queue_dig = true; |
2749 | - if (long_hpd) { |
2750 | - dev_priv->hotplug.long_port_mask |= (1 << port); |
2751 | - } else { |
2752 | - /* for short HPD just trigger the digital queue */ |
2753 | - dev_priv->hotplug.short_port_mask |= (1 << port); |
2754 | - continue; |
2755 | - } |
2756 | + long_hpd = long_mask & BIT(pin); |
2757 | + |
2758 | + DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), |
2759 | + long_hpd ? "long" : "short"); |
2760 | + queue_dig = true; |
2761 | + |
2762 | + if (long_hpd) { |
2763 | + long_hpd_pulse_mask |= BIT(pin); |
2764 | + dev_priv->hotplug.long_port_mask |= BIT(port); |
2765 | + } else { |
2766 | + short_hpd_pulse_mask |= BIT(pin); |
2767 | + dev_priv->hotplug.short_port_mask |= BIT(port); |
2768 | } |
2769 | + } |
2770 | + |
2771 | + /* Now process each pin just once */ |
2772 | + for_each_hpd_pin(pin) { |
2773 | + bool long_hpd; |
2774 | + |
2775 | + if (!(BIT(pin) & pin_mask)) |
2776 | + continue; |
2777 | |
2778 | if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) { |
2779 | /* |
2780 | @@ -442,11 +461,22 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, |
2781 | if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED) |
2782 | continue; |
2783 | |
2784 | - if (!has_hpd_pulse) { |
2785 | + /* |
2786 | + * Delegate to ->hpd_pulse() if one of the encoders for this |
2787 | + * pin has it, otherwise let the hotplug_work deal with this |
2788 | + * pin directly. |
2789 | + */ |
2790 | + if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) { |
2791 | + long_hpd = long_hpd_pulse_mask & BIT(pin); |
2792 | + } else { |
2793 | dev_priv->hotplug.event_bits |= BIT(pin); |
2794 | + long_hpd = true; |
2795 | queue_hp = true; |
2796 | } |
2797 | |
2798 | + if (!long_hpd) |
2799 | + continue; |
2800 | + |
2801 | if (intel_hpd_irq_storm_detect(dev_priv, pin)) { |
2802 | dev_priv->hotplug.event_bits &= ~BIT(pin); |
2803 | storm_detected = true; |
2804 | diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c |
2805 | index cdf19553ffac..5d5336fbe7b0 100644 |
2806 | --- a/drivers/gpu/drm/i915/intel_lpe_audio.c |
2807 | +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c |
2808 | @@ -297,8 +297,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) |
2809 | lpe_audio_platdev_destroy(dev_priv); |
2810 | |
2811 | irq_free_desc(dev_priv->lpe_audio.irq); |
2812 | -} |
2813 | |
2814 | + dev_priv->lpe_audio.irq = -1; |
2815 | + dev_priv->lpe_audio.platdev = NULL; |
2816 | +} |
2817 | |
2818 | /** |
2819 | * intel_lpe_audio_notify() - notify lpe audio event |
2820 | diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c |
2821 | index 174479232e94..75ea87ebf9b0 100644 |
2822 | --- a/drivers/gpu/drm/i915/intel_lrc.c |
2823 | +++ b/drivers/gpu/drm/i915/intel_lrc.c |
2824 | @@ -424,7 +424,8 @@ static u64 execlists_update_context(struct i915_request *rq) |
2825 | |
2826 | reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); |
2827 | |
2828 | - /* True 32b PPGTT with dynamic page allocation: update PDP |
2829 | + /* |
2830 | + * True 32b PPGTT with dynamic page allocation: update PDP |
2831 | * registers and point the unallocated PDPs to scratch page. |
2832 | * PML4 is allocated during ppgtt init, so this is not needed |
2833 | * in 48-bit mode. |
2834 | @@ -432,6 +433,17 @@ static u64 execlists_update_context(struct i915_request *rq) |
2835 | if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm)) |
2836 | execlists_update_context_pdps(ppgtt, reg_state); |
2837 | |
2838 | + /* |
2839 | + * Make sure the context image is complete before we submit it to HW. |
2840 | + * |
2841 | + * Ostensibly, writes (including the WCB) should be flushed prior to |
2842 | + * an uncached write such as our mmio register access, the empirical |
2843 | + * evidence (esp. on Braswell) suggests that the WC write into memory |
2844 | + * may not be visible to the HW prior to the completion of the UC |
2845 | + * register write and that we may begin execution from the context |
2846 | + * before its image is complete leading to invalid PD chasing. |
2847 | + */ |
2848 | + wmb(); |
2849 | return ce->lrc_desc; |
2850 | } |
2851 | |
2852 | diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c |
2853 | index 6a8f27d0a742..3b8218dd9bb1 100644 |
2854 | --- a/drivers/gpu/drm/i915/intel_ringbuffer.c |
2855 | +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c |
2856 | @@ -91,6 +91,7 @@ static int |
2857 | gen4_render_ring_flush(struct i915_request *rq, u32 mode) |
2858 | { |
2859 | u32 cmd, *cs; |
2860 | + int i; |
2861 | |
2862 | /* |
2863 | * read/write caches: |
2864 | @@ -127,12 +128,45 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) |
2865 | cmd |= MI_INVALIDATE_ISP; |
2866 | } |
2867 | |
2868 | - cs = intel_ring_begin(rq, 2); |
2869 | + i = 2; |
2870 | + if (mode & EMIT_INVALIDATE) |
2871 | + i += 20; |
2872 | + |
2873 | + cs = intel_ring_begin(rq, i); |
2874 | if (IS_ERR(cs)) |
2875 | return PTR_ERR(cs); |
2876 | |
2877 | *cs++ = cmd; |
2878 | - *cs++ = MI_NOOP; |
2879 | + |
2880 | + /* |
2881 | + * A random delay to let the CS invalidate take effect? Without this |
2882 | + * delay, the GPU relocation path fails as the CS does not see |
2883 | + * the updated contents. Just as important, if we apply the flushes |
2884 | + * to the EMIT_FLUSH branch (i.e. immediately after the relocation |
2885 | + * write and before the invalidate on the next batch), the relocations |
2886 | + * still fail. This implies that is a delay following invalidation |
2887 | + * that is required to reset the caches as opposed to a delay to |
2888 | + * ensure the memory is written. |
2889 | + */ |
2890 | + if (mode & EMIT_INVALIDATE) { |
2891 | + *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; |
2892 | + *cs++ = i915_ggtt_offset(rq->engine->scratch) | |
2893 | + PIPE_CONTROL_GLOBAL_GTT; |
2894 | + *cs++ = 0; |
2895 | + *cs++ = 0; |
2896 | + |
2897 | + for (i = 0; i < 12; i++) |
2898 | + *cs++ = MI_FLUSH; |
2899 | + |
2900 | + *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; |
2901 | + *cs++ = i915_ggtt_offset(rq->engine->scratch) | |
2902 | + PIPE_CONTROL_GLOBAL_GTT; |
2903 | + *cs++ = 0; |
2904 | + *cs++ = 0; |
2905 | + } |
2906 | + |
2907 | + *cs++ = cmd; |
2908 | + |
2909 | intel_ring_advance(rq, cs); |
2910 | |
2911 | return 0; |
2912 | diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c |
2913 | index 7efb326badcd..704572c2e6a2 100644 |
2914 | --- a/drivers/gpu/drm/i915/selftests/huge_pages.c |
2915 | +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c |
2916 | @@ -549,7 +549,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg) |
2917 | err = igt_check_page_sizes(vma); |
2918 | |
2919 | if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) { |
2920 | - pr_err("page_sizes.gtt=%u, expected %lu\n", |
2921 | + pr_err("page_sizes.gtt=%u, expected %llu\n", |
2922 | vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K); |
2923 | err = -EINVAL; |
2924 | } |
2925 | diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c |
2926 | index 8e2e269db97e..127d81513671 100644 |
2927 | --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c |
2928 | +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c |
2929 | @@ -1337,7 +1337,7 @@ static int igt_gtt_reserve(void *arg) |
2930 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
2931 | if (vma->node.start != total || |
2932 | vma->node.size != 2*I915_GTT_PAGE_SIZE) { |
2933 | - pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", |
2934 | + pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n", |
2935 | vma->node.start, vma->node.size, |
2936 | total, 2*I915_GTT_PAGE_SIZE); |
2937 | err = -EINVAL; |
2938 | @@ -1386,7 +1386,7 @@ static int igt_gtt_reserve(void *arg) |
2939 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
2940 | if (vma->node.start != total || |
2941 | vma->node.size != 2*I915_GTT_PAGE_SIZE) { |
2942 | - pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", |
2943 | + pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n", |
2944 | vma->node.start, vma->node.size, |
2945 | total, 2*I915_GTT_PAGE_SIZE); |
2946 | err = -EINVAL; |
2947 | @@ -1430,7 +1430,7 @@ static int igt_gtt_reserve(void *arg) |
2948 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
2949 | if (vma->node.start != offset || |
2950 | vma->node.size != 2*I915_GTT_PAGE_SIZE) { |
2951 | - pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", |
2952 | + pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n", |
2953 | vma->node.start, vma->node.size, |
2954 | offset, 2*I915_GTT_PAGE_SIZE); |
2955 | err = -EINVAL; |
2956 | diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c |
2957 | index da1363a0c54d..93d70f4a2154 100644 |
2958 | --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c |
2959 | +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c |
2960 | @@ -633,8 +633,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev) |
2961 | struct device_node *child, *node; |
2962 | int ret; |
2963 | |
2964 | - node = of_find_compatible_node(dev->of_node, NULL, |
2965 | - "qcom,gpu-pwrlevels"); |
2966 | + node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels"); |
2967 | if (!node) { |
2968 | dev_err(dev, "Could not find the GPU powerlevels\n"); |
2969 | return -ENXIO; |
2970 | @@ -655,6 +654,8 @@ static int adreno_get_legacy_pwrlevels(struct device *dev) |
2971 | dev_pm_opp_add(dev, val, 0); |
2972 | } |
2973 | |
2974 | + of_node_put(node); |
2975 | + |
2976 | return 0; |
2977 | } |
2978 | |
2979 | diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c |
2980 | index 80cbf75bc2ff..cd02eae884cc 100644 |
2981 | --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c |
2982 | +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c |
2983 | @@ -1535,8 +1535,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, |
2984 | cnt++; |
2985 | |
2986 | dst = drm_plane_state_dest(pstate); |
2987 | - if (!drm_rect_intersect(&clip, &dst) || |
2988 | - !drm_rect_equals(&clip, &dst)) { |
2989 | + if (!drm_rect_intersect(&clip, &dst)) { |
2990 | DPU_ERROR("invalid vertical/horizontal destination\n"); |
2991 | DPU_ERROR("display: " DRM_RECT_FMT " plane: " |
2992 | DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect), |
2993 | diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c |
2994 | index 7dd6bd2d6d37..74cc204b07e8 100644 |
2995 | --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c |
2996 | +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c |
2997 | @@ -450,7 +450,7 @@ static void _dpu_kms_initialize_dsi(struct drm_device *dev, |
2998 | int i, rc; |
2999 | |
3000 | /*TODO: Support two independent DSI connectors */ |
3001 | - encoder = dpu_encoder_init(dev, DRM_MODE_CONNECTOR_DSI); |
3002 | + encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI); |
3003 | if (IS_ERR_OR_NULL(encoder)) { |
3004 | DPU_ERROR("encoder init failed for dsi display\n"); |
3005 | return; |
3006 | diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c |
3007 | index b640e39ebaca..4ac2b0c669b7 100644 |
3008 | --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c |
3009 | +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c |
3010 | @@ -1254,7 +1254,7 @@ static int dpu_plane_sspp_atomic_update(struct drm_plane *plane, |
3011 | const struct dpu_format *fmt; |
3012 | struct drm_crtc *crtc; |
3013 | struct drm_framebuffer *fb; |
3014 | - struct drm_rect src, dst; |
3015 | + int ret, min_scale; |
3016 | |
3017 | if (!plane) { |
3018 | DPU_ERROR("invalid plane\n"); |
3019 | @@ -1293,21 +1293,29 @@ static int dpu_plane_sspp_atomic_update(struct drm_plane *plane, |
3020 | pdpu->is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT); |
3021 | _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL); |
3022 | |
3023 | - src.x1 = state->src_x >> 16; |
3024 | - src.y1 = state->src_y >> 16; |
3025 | - src.x2 = src.x1 + (state->src_w >> 16); |
3026 | - src.y2 = src.y1 + (state->src_h >> 16); |
3027 | + min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxdwnscale); |
3028 | + ret = drm_atomic_helper_check_plane_state(state, crtc->state, min_scale, |
3029 | + pdpu->pipe_sblk->maxupscale << 16, |
3030 | + true, false); |
3031 | + if (ret) { |
3032 | + DPU_ERROR_PLANE(pdpu, "Check plane state failed (%d)\n", ret); |
3033 | + return ret; |
3034 | + } |
3035 | |
3036 | - dst = drm_plane_state_dest(state); |
3037 | + DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT |
3038 | + ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src), |
3039 | + crtc->base.id, DRM_RECT_ARG(&state->dst), |
3040 | + (char *)&fmt->base.pixel_format, DPU_FORMAT_IS_UBWC(fmt)); |
3041 | |
3042 | - DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FMT "->crtc%u " DRM_RECT_FMT |
3043 | - ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_ARG(&src), |
3044 | - crtc->base.id, DRM_RECT_ARG(&dst), |
3045 | - (char *)&fmt->base.pixel_format, |
3046 | - DPU_FORMAT_IS_UBWC(fmt)); |
3047 | + pdpu->pipe_cfg.src_rect = state->src; |
3048 | + |
3049 | + /* state->src is 16.16, src_rect is not */ |
3050 | + pdpu->pipe_cfg.src_rect.x1 >>= 16; |
3051 | + pdpu->pipe_cfg.src_rect.x2 >>= 16; |
3052 | + pdpu->pipe_cfg.src_rect.y1 >>= 16; |
3053 | + pdpu->pipe_cfg.src_rect.y2 >>= 16; |
3054 | |
3055 | - pdpu->pipe_cfg.src_rect = src; |
3056 | - pdpu->pipe_cfg.dst_rect = dst; |
3057 | + pdpu->pipe_cfg.dst_rect = state->dst; |
3058 | |
3059 | _dpu_plane_setup_scaler(pdpu, pstate, fmt, false); |
3060 | |
3061 | diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c |
3062 | index 7d306c5acd09..273cbbe27c2e 100644 |
3063 | --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c |
3064 | +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c |
3065 | @@ -259,7 +259,6 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane, |
3066 | msm_framebuffer_cleanup(fb, kms->aspace); |
3067 | } |
3068 | |
3069 | -#define FRAC_16_16(mult, div) (((mult) << 16) / (div)) |
3070 | static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, |
3071 | struct drm_plane_state *state) |
3072 | { |
3073 | diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h |
3074 | index 8e510d5c758a..9d11f321f5a9 100644 |
3075 | --- a/drivers/gpu/drm/msm/msm_drv.h |
3076 | +++ b/drivers/gpu/drm/msm/msm_drv.h |
3077 | @@ -62,6 +62,8 @@ struct msm_gem_vma; |
3078 | #define MAX_BRIDGES 8 |
3079 | #define MAX_CONNECTORS 8 |
3080 | |
3081 | +#define FRAC_16_16(mult, div) (((mult) << 16) / (div)) |
3082 | + |
3083 | struct msm_file_private { |
3084 | rwlock_t queuelock; |
3085 | struct list_head submitqueues; |
3086 | diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c |
3087 | index 5e808cfec345..46e6b82f7b66 100644 |
3088 | --- a/drivers/gpu/drm/msm/msm_gpu.c |
3089 | +++ b/drivers/gpu/drm/msm/msm_gpu.c |
3090 | @@ -367,8 +367,8 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, |
3091 | msm_gpu_devcoredump_read, msm_gpu_devcoredump_free); |
3092 | } |
3093 | #else |
3094 | -static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, char *comm, |
3095 | - char *cmd) |
3096 | +static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, |
3097 | + struct msm_gem_submit *submit, char *comm, char *cmd) |
3098 | { |
3099 | } |
3100 | #endif |
3101 | diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c |
3102 | index 041e7daf8a33..faf7009c0a3c 100644 |
3103 | --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c |
3104 | +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c |
3105 | @@ -843,22 +843,16 @@ nv50_mstc_atomic_best_encoder(struct drm_connector *connector, |
3106 | { |
3107 | struct nv50_head *head = nv50_head(connector_state->crtc); |
3108 | struct nv50_mstc *mstc = nv50_mstc(connector); |
3109 | - if (mstc->port) { |
3110 | - struct nv50_mstm *mstm = mstc->mstm; |
3111 | - return &mstm->msto[head->base.index]->encoder; |
3112 | - } |
3113 | - return NULL; |
3114 | + |
3115 | + return &mstc->mstm->msto[head->base.index]->encoder; |
3116 | } |
3117 | |
3118 | static struct drm_encoder * |
3119 | nv50_mstc_best_encoder(struct drm_connector *connector) |
3120 | { |
3121 | struct nv50_mstc *mstc = nv50_mstc(connector); |
3122 | - if (mstc->port) { |
3123 | - struct nv50_mstm *mstm = mstc->mstm; |
3124 | - return &mstm->msto[0]->encoder; |
3125 | - } |
3126 | - return NULL; |
3127 | + |
3128 | + return &mstc->mstm->msto[0]->encoder; |
3129 | } |
3130 | |
3131 | static enum drm_mode_status |
3132 | diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c |
3133 | index 408b955e5c39..6dd72bc32897 100644 |
3134 | --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c |
3135 | +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c |
3136 | @@ -116,7 +116,7 @@ nv40_backlight_init(struct drm_connector *connector) |
3137 | &nv40_bl_ops, &props); |
3138 | |
3139 | if (IS_ERR(bd)) { |
3140 | - if (bl_connector.id > 0) |
3141 | + if (bl_connector.id >= 0) |
3142 | ida_simple_remove(&bl_ida, bl_connector.id); |
3143 | return PTR_ERR(bd); |
3144 | } |
3145 | @@ -249,7 +249,7 @@ nv50_backlight_init(struct drm_connector *connector) |
3146 | nv_encoder, ops, &props); |
3147 | |
3148 | if (IS_ERR(bd)) { |
3149 | - if (bl_connector.id > 0) |
3150 | + if (bl_connector.id >= 0) |
3151 | ida_simple_remove(&bl_ida, bl_connector.id); |
3152 | return PTR_ERR(bd); |
3153 | } |
3154 | diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c |
3155 | index d02e183717dc..5c14d6ac855d 100644 |
3156 | --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c |
3157 | +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c |
3158 | @@ -801,6 +801,7 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon, |
3159 | bl = acr->hsbl_unload_blob; |
3160 | } else { |
3161 | nvkm_error(_acr->subdev, "invalid secure boot blob!\n"); |
3162 | + kfree(bl_desc); |
3163 | return -EINVAL; |
3164 | } |
3165 | |
3166 | diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c |
3167 | index f92fe205550b..e884183c018a 100644 |
3168 | --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c |
3169 | +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c |
3170 | @@ -285,6 +285,17 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait) |
3171 | } |
3172 | |
3173 | txn->last_pat->next_pa = 0; |
3174 | + /* ensure that the written descriptors are visible to DMM */ |
3175 | + wmb(); |
3176 | + |
3177 | + /* |
3178 | + * NOTE: the wmb() above should be enough, but there seems to be a bug |
3179 | + * in OMAP's memory barrier implementation, which in some rare cases may |
3180 | + * cause the writes not to be observable after wmb(). |
3181 | + */ |
3182 | + |
3183 | + /* read back to ensure the data is in RAM */ |
3184 | + readl(&txn->last_pat->next_pa); |
3185 | |
3186 | /* write to PAT_DESCR to clear out any pending transaction */ |
3187 | dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]); |
3188 | diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c |
3189 | index f0bc7cc0e913..fb46df56f0c4 100644 |
3190 | --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c |
3191 | +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c |
3192 | @@ -516,12 +516,22 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) |
3193 | |
3194 | dev->mode_config.min_width = 0; |
3195 | dev->mode_config.min_height = 0; |
3196 | - dev->mode_config.max_width = 4095; |
3197 | - dev->mode_config.max_height = 2047; |
3198 | dev->mode_config.normalize_zpos = true; |
3199 | dev->mode_config.funcs = &rcar_du_mode_config_funcs; |
3200 | dev->mode_config.helper_private = &rcar_du_mode_config_helper; |
3201 | |
3202 | + if (rcdu->info->gen < 3) { |
3203 | + dev->mode_config.max_width = 4095; |
3204 | + dev->mode_config.max_height = 2047; |
3205 | + } else { |
3206 | + /* |
3207 | + * The Gen3 DU uses the VSP1 for memory access, and is limited |
3208 | + * to frame sizes of 8190x8190. |
3209 | + */ |
3210 | + dev->mode_config.max_width = 8190; |
3211 | + dev->mode_config.max_height = 8190; |
3212 | + } |
3213 | + |
3214 | rcdu->num_crtcs = hweight8(rcdu->info->channels_mask); |
3215 | |
3216 | ret = rcar_du_properties_init(rcdu); |
3217 | diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c |
3218 | index f814d37b1db2..05368fa4f956 100644 |
3219 | --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c |
3220 | +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c |
3221 | @@ -442,6 +442,11 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev) |
3222 | return 0; |
3223 | } |
3224 | |
3225 | +static void rockchip_drm_platform_shutdown(struct platform_device *pdev) |
3226 | +{ |
3227 | + rockchip_drm_platform_remove(pdev); |
3228 | +} |
3229 | + |
3230 | static const struct of_device_id rockchip_drm_dt_ids[] = { |
3231 | { .compatible = "rockchip,display-subsystem", }, |
3232 | { /* sentinel */ }, |
3233 | @@ -451,6 +456,7 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids); |
3234 | static struct platform_driver rockchip_drm_platform_driver = { |
3235 | .probe = rockchip_drm_platform_probe, |
3236 | .remove = rockchip_drm_platform_remove, |
3237 | + .shutdown = rockchip_drm_platform_shutdown, |
3238 | .driver = { |
3239 | .name = "rockchip-drm", |
3240 | .of_match_table = rockchip_drm_dt_ids, |
3241 | diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c |
3242 | index 33d51281272b..fcdbac4a56e3 100644 |
3243 | --- a/drivers/hwmon/hwmon.c |
3244 | +++ b/drivers/hwmon/hwmon.c |
3245 | @@ -635,8 +635,10 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, |
3246 | if (info[i]->config[j] & HWMON_T_INPUT) { |
3247 | err = hwmon_thermal_add_sensor(dev, |
3248 | hwdev, j); |
3249 | - if (err) |
3250 | - goto free_device; |
3251 | + if (err) { |
3252 | + device_unregister(hdev); |
3253 | + goto ida_remove; |
3254 | + } |
3255 | } |
3256 | } |
3257 | } |
3258 | @@ -644,8 +646,6 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, |
3259 | |
3260 | return hdev; |
3261 | |
3262 | -free_device: |
3263 | - device_unregister(hdev); |
3264 | free_hwmon: |
3265 | kfree(hwdev); |
3266 | ida_remove: |
3267 | diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c |
3268 | index 2566b4d8b342..73856c2a8ac0 100644 |
3269 | --- a/drivers/input/touchscreen/wm97xx-core.c |
3270 | +++ b/drivers/input/touchscreen/wm97xx-core.c |
3271 | @@ -929,7 +929,8 @@ static int __init wm97xx_init(void) |
3272 | |
3273 | static void __exit wm97xx_exit(void) |
3274 | { |
3275 | - driver_unregister(&wm97xx_driver); |
3276 | + if (IS_BUILTIN(CONFIG_AC97_BUS)) |
3277 | + driver_unregister(&wm97xx_driver); |
3278 | platform_driver_unregister(&wm97xx_mfd_driver); |
3279 | } |
3280 | |
3281 | diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c |
3282 | index 071f4bc240ca..7b662bd1c7a0 100644 |
3283 | --- a/drivers/media/i2c/ov5640.c |
3284 | +++ b/drivers/media/i2c/ov5640.c |
3285 | @@ -223,8 +223,10 @@ struct ov5640_dev { |
3286 | int power_count; |
3287 | |
3288 | struct v4l2_mbus_framefmt fmt; |
3289 | + bool pending_fmt_change; |
3290 | |
3291 | const struct ov5640_mode_info *current_mode; |
3292 | + const struct ov5640_mode_info *last_mode; |
3293 | enum ov5640_frame_rate current_fr; |
3294 | struct v4l2_fract frame_interval; |
3295 | |
3296 | @@ -255,7 +257,7 @@ static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl) |
3297 | * should be identified and removed to speed register load time |
3298 | * over i2c. |
3299 | */ |
3300 | - |
3301 | +/* YUV422 UYVY VGA@30fps */ |
3302 | static const struct reg_value ov5640_init_setting_30fps_VGA[] = { |
3303 | {0x3103, 0x11, 0, 0}, {0x3008, 0x82, 0, 5}, {0x3008, 0x42, 0, 0}, |
3304 | {0x3103, 0x03, 0, 0}, {0x3017, 0x00, 0, 0}, {0x3018, 0x00, 0, 0}, |
3305 | @@ -1613,10 +1615,10 @@ static int ov5640_set_mode_direct(struct ov5640_dev *sensor, |
3306 | return __v4l2_ctrl_s_ctrl(sensor->ctrls.auto_exp, exposure); |
3307 | } |
3308 | |
3309 | -static int ov5640_set_mode(struct ov5640_dev *sensor, |
3310 | - const struct ov5640_mode_info *orig_mode) |
3311 | +static int ov5640_set_mode(struct ov5640_dev *sensor) |
3312 | { |
3313 | const struct ov5640_mode_info *mode = sensor->current_mode; |
3314 | + const struct ov5640_mode_info *orig_mode = sensor->last_mode; |
3315 | enum ov5640_downsize_mode dn_mode, orig_dn_mode; |
3316 | s32 exposure; |
3317 | int ret; |
3318 | @@ -1673,6 +1675,7 @@ static int ov5640_set_mode(struct ov5640_dev *sensor, |
3319 | return ret; |
3320 | |
3321 | sensor->pending_mode_change = false; |
3322 | + sensor->last_mode = mode; |
3323 | |
3324 | return 0; |
3325 | } |
3326 | @@ -1689,6 +1692,7 @@ static int ov5640_restore_mode(struct ov5640_dev *sensor) |
3327 | ret = ov5640_load_regs(sensor, &ov5640_mode_init_data); |
3328 | if (ret < 0) |
3329 | return ret; |
3330 | + sensor->last_mode = &ov5640_mode_init_data; |
3331 | |
3332 | ret = ov5640_mod_reg(sensor, OV5640_REG_SYS_ROOT_DIVIDER, 0x3f, |
3333 | (ilog2(OV5640_SCLK2X_ROOT_DIVIDER_DEFAULT) << 2) | |
3334 | @@ -1697,7 +1701,7 @@ static int ov5640_restore_mode(struct ov5640_dev *sensor) |
3335 | return ret; |
3336 | |
3337 | /* now restore the last capture mode */ |
3338 | - ret = ov5640_set_mode(sensor, &ov5640_mode_init_data); |
3339 | + ret = ov5640_set_mode(sensor); |
3340 | if (ret < 0) |
3341 | return ret; |
3342 | |
3343 | @@ -1968,9 +1972,12 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd, |
3344 | |
3345 | if (new_mode != sensor->current_mode) { |
3346 | sensor->current_mode = new_mode; |
3347 | - sensor->fmt = *mbus_fmt; |
3348 | sensor->pending_mode_change = true; |
3349 | } |
3350 | + if (mbus_fmt->code != sensor->fmt.code) { |
3351 | + sensor->fmt = *mbus_fmt; |
3352 | + sensor->pending_fmt_change = true; |
3353 | + } |
3354 | out: |
3355 | mutex_unlock(&sensor->lock); |
3356 | return ret; |
3357 | @@ -2541,13 +2548,16 @@ static int ov5640_s_stream(struct v4l2_subdev *sd, int enable) |
3358 | |
3359 | if (sensor->streaming == !enable) { |
3360 | if (enable && sensor->pending_mode_change) { |
3361 | - ret = ov5640_set_mode(sensor, sensor->current_mode); |
3362 | + ret = ov5640_set_mode(sensor); |
3363 | if (ret) |
3364 | goto out; |
3365 | + } |
3366 | |
3367 | + if (enable && sensor->pending_fmt_change) { |
3368 | ret = ov5640_set_framefmt(sensor, &sensor->fmt); |
3369 | if (ret) |
3370 | goto out; |
3371 | + sensor->pending_fmt_change = false; |
3372 | } |
3373 | |
3374 | if (sensor->ep.bus_type == V4L2_MBUS_CSI2) |
3375 | @@ -2642,9 +2652,14 @@ static int ov5640_probe(struct i2c_client *client, |
3376 | return -ENOMEM; |
3377 | |
3378 | sensor->i2c_client = client; |
3379 | + |
3380 | + /* |
3381 | + * default init sequence initialize sensor to |
3382 | + * YUV422 UYVY VGA@30fps |
3383 | + */ |
3384 | fmt = &sensor->fmt; |
3385 | - fmt->code = ov5640_formats[0].code; |
3386 | - fmt->colorspace = ov5640_formats[0].colorspace; |
3387 | + fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; |
3388 | + fmt->colorspace = V4L2_COLORSPACE_SRGB; |
3389 | fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace); |
3390 | fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE; |
3391 | fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace); |
3392 | @@ -2656,7 +2671,7 @@ static int ov5640_probe(struct i2c_client *client, |
3393 | sensor->current_fr = OV5640_30_FPS; |
3394 | sensor->current_mode = |
3395 | &ov5640_mode_data[OV5640_30_FPS][OV5640_MODE_VGA_640_480]; |
3396 | - sensor->pending_mode_change = true; |
3397 | + sensor->last_mode = sensor->current_mode; |
3398 | |
3399 | sensor->ae_target = 52; |
3400 | |
3401 | diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c |
3402 | index 805bd9c65940..8b450fc53202 100644 |
3403 | --- a/drivers/media/i2c/tvp5150.c |
3404 | +++ b/drivers/media/i2c/tvp5150.c |
3405 | @@ -901,9 +901,6 @@ static int tvp5150_set_selection(struct v4l2_subdev *sd, |
3406 | |
3407 | /* tvp5150 has some special limits */ |
3408 | rect.left = clamp(rect.left, 0, TVP5150_MAX_CROP_LEFT); |
3409 | - rect.width = clamp_t(unsigned int, rect.width, |
3410 | - TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left, |
3411 | - TVP5150_H_MAX - rect.left); |
3412 | rect.top = clamp(rect.top, 0, TVP5150_MAX_CROP_TOP); |
3413 | |
3414 | /* Calculate height based on current standard */ |
3415 | @@ -917,9 +914,16 @@ static int tvp5150_set_selection(struct v4l2_subdev *sd, |
3416 | else |
3417 | hmax = TVP5150_V_MAX_OTHERS; |
3418 | |
3419 | - rect.height = clamp_t(unsigned int, rect.height, |
3420 | + /* |
3421 | + * alignments: |
3422 | + * - width = 2 due to UYVY colorspace |
3423 | + * - height, image = no special alignment |
3424 | + */ |
3425 | + v4l_bound_align_image(&rect.width, |
3426 | + TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left, |
3427 | + TVP5150_H_MAX - rect.left, 1, &rect.height, |
3428 | hmax - TVP5150_MAX_CROP_TOP - rect.top, |
3429 | - hmax - rect.top); |
3430 | + hmax - rect.top, 0, 0); |
3431 | |
3432 | tvp5150_write(sd, TVP5150_VERT_BLANKING_START, rect.top); |
3433 | tvp5150_write(sd, TVP5150_VERT_BLANKING_STOP, |
3434 | diff --git a/drivers/media/pci/cx23885/altera-ci.c b/drivers/media/pci/cx23885/altera-ci.c |
3435 | index 62bc8049b320..198c05e83f5c 100644 |
3436 | --- a/drivers/media/pci/cx23885/altera-ci.c |
3437 | +++ b/drivers/media/pci/cx23885/altera-ci.c |
3438 | @@ -665,6 +665,10 @@ static int altera_hw_filt_init(struct altera_ci_config *config, int hw_filt_nr) |
3439 | } |
3440 | |
3441 | temp_int = append_internal(inter); |
3442 | + if (!temp_int) { |
3443 | + ret = -ENOMEM; |
3444 | + goto err; |
3445 | + } |
3446 | inter->filts_used = 1; |
3447 | inter->dev = config->dev; |
3448 | inter->fpga_rw = config->fpga_rw; |
3449 | @@ -699,6 +703,7 @@ err: |
3450 | __func__, ret); |
3451 | |
3452 | kfree(pid_filt); |
3453 | + kfree(inter); |
3454 | |
3455 | return ret; |
3456 | } |
3457 | @@ -733,6 +738,10 @@ int altera_ci_init(struct altera_ci_config *config, int ci_nr) |
3458 | } |
3459 | |
3460 | temp_int = append_internal(inter); |
3461 | + if (!temp_int) { |
3462 | + ret = -ENOMEM; |
3463 | + goto err; |
3464 | + } |
3465 | inter->cis_used = 1; |
3466 | inter->dev = config->dev; |
3467 | inter->fpga_rw = config->fpga_rw; |
3468 | @@ -801,6 +810,7 @@ err: |
3469 | ci_dbg_print("%s: Cannot initialize CI: Error %d.\n", __func__, ret); |
3470 | |
3471 | kfree(state); |
3472 | + kfree(inter); |
3473 | |
3474 | return ret; |
3475 | } |
3476 | diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c |
3477 | index 726b3b93a486..bf7b8417c27f 100644 |
3478 | --- a/drivers/media/platform/coda/coda-common.c |
3479 | +++ b/drivers/media/platform/coda/coda-common.c |
3480 | @@ -1804,7 +1804,8 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl) |
3481 | break; |
3482 | case V4L2_CID_MPEG_VIDEO_H264_PROFILE: |
3483 | /* TODO: switch between baseline and constrained baseline */ |
3484 | - ctx->params.h264_profile_idc = 66; |
3485 | + if (ctx->inst_type == CODA_INST_ENCODER) |
3486 | + ctx->params.h264_profile_idc = 66; |
3487 | break; |
3488 | case V4L2_CID_MPEG_VIDEO_H264_LEVEL: |
3489 | /* nothing to do, this is set by the encoder */ |
3490 | diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig |
3491 | index e514d57a0419..aa983422aa97 100644 |
3492 | --- a/drivers/mtd/devices/Kconfig |
3493 | +++ b/drivers/mtd/devices/Kconfig |
3494 | @@ -207,7 +207,7 @@ comment "Disk-On-Chip Device Drivers" |
3495 | config MTD_DOCG3 |
3496 | tristate "M-Systems Disk-On-Chip G3" |
3497 | select BCH |
3498 | - select BCH_CONST_PARAMS |
3499 | + select BCH_CONST_PARAMS if !MTD_NAND_BCH |
3500 | select BITREVERSE |
3501 | help |
3502 | This provides an MTD device driver for the M-Systems DiskOnChip |
3503 | diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c |
3504 | index 8e714fbfa521..6e9cbd1a0b6d 100644 |
3505 | --- a/drivers/mtd/spi-nor/cadence-quadspi.c |
3506 | +++ b/drivers/mtd/spi-nor/cadence-quadspi.c |
3507 | @@ -996,7 +996,7 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf, |
3508 | err_unmap: |
3509 | dma_unmap_single(nor->dev, dma_dst, len, DMA_DEV_TO_MEM); |
3510 | |
3511 | - return 0; |
3512 | + return ret; |
3513 | } |
3514 | |
3515 | static ssize_t cqspi_read(struct spi_nor *nor, loff_t from, |
3516 | diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c |
3517 | index ee28ec9e0aba..3c597569cfae 100644 |
3518 | --- a/drivers/net/bonding/bond_main.c |
3519 | +++ b/drivers/net/bonding/bond_main.c |
3520 | @@ -3111,13 +3111,13 @@ static int bond_slave_netdev_event(unsigned long event, |
3521 | case NETDEV_CHANGE: |
3522 | /* For 802.3ad mode only: |
3523 | * Getting invalid Speed/Duplex values here will put slave |
3524 | - * in weird state. So mark it as link-down for the time |
3525 | + * in weird state. So mark it as link-fail for the time |
3526 | * being and let link-monitoring (miimon) set it right when |
3527 | * correct speeds/duplex are available. |
3528 | */ |
3529 | if (bond_update_speed_duplex(slave) && |
3530 | BOND_MODE(bond) == BOND_MODE_8023AD) |
3531 | - slave->link = BOND_LINK_DOWN; |
3532 | + slave->link = BOND_LINK_FAIL; |
3533 | |
3534 | if (BOND_MODE(bond) == BOND_MODE_8023AD) |
3535 | bond_3ad_adapter_speed_duplex_changed(slave); |
3536 | diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c |
3537 | index 27d9b4bba535..2411ed3c7303 100644 |
3538 | --- a/drivers/of/of_numa.c |
3539 | +++ b/drivers/of/of_numa.c |
3540 | @@ -115,9 +115,14 @@ static int __init of_numa_parse_distance_map_v1(struct device_node *map) |
3541 | distance = of_read_number(matrix, 1); |
3542 | matrix++; |
3543 | |
3544 | + if ((nodea == nodeb && distance != LOCAL_DISTANCE) || |
3545 | + (nodea != nodeb && distance <= LOCAL_DISTANCE)) { |
3546 | + pr_err("Invalid distance[node%d -> node%d] = %d\n", |
3547 | + nodea, nodeb, distance); |
3548 | + return -EINVAL; |
3549 | + } |
3550 | + |
3551 | numa_set_distance(nodea, nodeb, distance); |
3552 | - pr_debug("distance[node%d -> node%d] = %d\n", |
3553 | - nodea, nodeb, distance); |
3554 | |
3555 | /* Set default distance of node B->A same as A->B */ |
3556 | if (nodeb > nodea) |
3557 | diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c |
3558 | index e79f2a181ad2..b9ec4a16db1f 100644 |
3559 | --- a/drivers/rtc/hctosys.c |
3560 | +++ b/drivers/rtc/hctosys.c |
3561 | @@ -50,8 +50,10 @@ static int __init rtc_hctosys(void) |
3562 | tv64.tv_sec = rtc_tm_to_time64(&tm); |
3563 | |
3564 | #if BITS_PER_LONG == 32 |
3565 | - if (tv64.tv_sec > INT_MAX) |
3566 | + if (tv64.tv_sec > INT_MAX) { |
3567 | + err = -ERANGE; |
3568 | goto err_read; |
3569 | + } |
3570 | #endif |
3571 | |
3572 | err = do_settimeofday64(&tv64); |
3573 | diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c |
3574 | index c11a89be292c..4a9fd8d944d6 100644 |
3575 | --- a/drivers/scsi/qla2xxx/qla_bsg.c |
3576 | +++ b/drivers/scsi/qla2xxx/qla_bsg.c |
3577 | @@ -2487,7 +2487,7 @@ qla24xx_bsg_request(struct bsg_job *bsg_job) |
3578 | vha = shost_priv(host); |
3579 | } |
3580 | |
3581 | - if (qla2x00_reset_active(vha)) { |
3582 | + if (qla2x00_chip_is_down(vha)) { |
3583 | ql_dbg(ql_dbg_user, vha, 0x709f, |
3584 | "BSG: ISP abort active/needed -- cmd=%d.\n", |
3585 | bsg_request->msgcode); |
3586 | diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c |
3587 | index a0038d879b9d..de3f2a097451 100644 |
3588 | --- a/drivers/scsi/qla2xxx/qla_gs.c |
3589 | +++ b/drivers/scsi/qla2xxx/qla_gs.c |
3590 | @@ -3261,6 +3261,9 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res) |
3591 | "Async done-%s res %x, WWPN %8phC \n", |
3592 | sp->name, res, fcport->port_name); |
3593 | |
3594 | + if (res == QLA_FUNCTION_TIMEOUT) |
3595 | + return; |
3596 | + |
3597 | if (res == (DID_ERROR << 16)) { |
3598 | /* entry status error */ |
3599 | goto done; |
3600 | @@ -4444,9 +4447,9 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) |
3601 | sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; |
3602 | qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); |
3603 | |
3604 | - rspsz = sizeof(struct ct_sns_gpnft_rsp) + |
3605 | - ((vha->hw->max_fibre_devices - 1) * |
3606 | - sizeof(struct ct_sns_gpn_ft_data)); |
3607 | + rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size; |
3608 | + memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); |
3609 | + memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); |
3610 | |
3611 | ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; |
3612 | /* CT_IU preamble */ |
3613 | diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c |
3614 | index b934977c5c26..5352c9bbcaf7 100644 |
3615 | --- a/drivers/scsi/qla2xxx/qla_init.c |
3616 | +++ b/drivers/scsi/qla2xxx/qla_init.c |
3617 | @@ -52,12 +52,14 @@ qla2x00_sp_timeout(struct timer_list *t) |
3618 | struct srb_iocb *iocb; |
3619 | struct req_que *req; |
3620 | unsigned long flags; |
3621 | + struct qla_hw_data *ha = sp->vha->hw; |
3622 | |
3623 | - spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); |
3624 | + WARN_ON(irqs_disabled()); |
3625 | + spin_lock_irqsave(&ha->hardware_lock, flags); |
3626 | req = sp->qpair->req; |
3627 | req->outstanding_cmds[sp->handle] = NULL; |
3628 | iocb = &sp->u.iocb_cmd; |
3629 | - spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); |
3630 | + spin_unlock_irqrestore(&ha->hardware_lock, flags); |
3631 | iocb->timeout(sp); |
3632 | } |
3633 | |
3634 | @@ -972,6 +974,15 @@ void qla24xx_async_gpdb_sp_done(void *s, int res) |
3635 | |
3636 | fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); |
3637 | |
3638 | + if (res == QLA_FUNCTION_TIMEOUT) |
3639 | + return; |
3640 | + |
3641 | + if (res == QLA_FUNCTION_TIMEOUT) { |
3642 | + dma_pool_free(sp->vha->hw->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, |
3643 | + sp->u.iocb_cmd.u.mbx.in_dma); |
3644 | + return; |
3645 | + } |
3646 | + |
3647 | memset(&ea, 0, sizeof(ea)); |
3648 | ea.event = FCME_GPDB_DONE; |
3649 | ea.fcport = fcport; |
3650 | @@ -1788,6 +1799,8 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) |
3651 | wait_for_completion(&abt_iocb->u.abt.comp); |
3652 | rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? |
3653 | QLA_SUCCESS : QLA_FUNCTION_FAILED; |
3654 | + } else { |
3655 | + goto done; |
3656 | } |
3657 | |
3658 | done_free_sp: |
3659 | @@ -1952,25 +1965,15 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) |
3660 | cid.b.rsvd_1 = 0; |
3661 | |
3662 | ql_dbg(ql_dbg_disc, vha, 0x20ec, |
3663 | - "%s %d %8phC LoopID 0x%x in use post gnl\n", |
3664 | + "%s %d %8phC lid %#x in use with pid %06x post gnl\n", |
3665 | __func__, __LINE__, ea->fcport->port_name, |
3666 | - ea->fcport->loop_id); |
3667 | + ea->fcport->loop_id, cid.b24); |
3668 | |
3669 | - if (IS_SW_RESV_ADDR(cid)) { |
3670 | - set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); |
3671 | - ea->fcport->loop_id = FC_NO_LOOP_ID; |
3672 | - } else { |
3673 | - qla2x00_clear_loop_id(ea->fcport); |
3674 | - } |
3675 | + set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); |
3676 | + ea->fcport->loop_id = FC_NO_LOOP_ID; |
3677 | qla24xx_post_gnl_work(vha, ea->fcport); |
3678 | break; |
3679 | case MBS_PORT_ID_USED: |
3680 | - ql_dbg(ql_dbg_disc, vha, 0x20ed, |
3681 | - "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n", |
3682 | - __func__, __LINE__, ea->fcport->port_name, |
3683 | - ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area, |
3684 | - ea->fcport->d_id.b.al_pa); |
3685 | - |
3686 | lid = ea->iop[1] & 0xffff; |
3687 | qlt_find_sess_invalidate_other(vha, |
3688 | wwn_to_u64(ea->fcport->port_name), |
3689 | @@ -4711,6 +4714,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) |
3690 | fcport->loop_id = FC_NO_LOOP_ID; |
3691 | qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); |
3692 | fcport->supported_classes = FC_COS_UNSPECIFIED; |
3693 | + fcport->fp_speed = PORT_SPEED_UNKNOWN; |
3694 | |
3695 | fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev, |
3696 | sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma, |
3697 | @@ -6682,7 +6686,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) |
3698 | * The next call disables the board |
3699 | * completely. |
3700 | */ |
3701 | - ha->isp_ops->reset_adapter(vha); |
3702 | + qla2x00_abort_isp_cleanup(vha); |
3703 | vha->flags.online = 0; |
3704 | clear_bit(ISP_ABORT_RETRY, |
3705 | &vha->dpc_flags); |
3706 | @@ -7142,7 +7146,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) |
3707 | } |
3708 | icb->firmware_options_2 &= cpu_to_le32( |
3709 | ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); |
3710 | - vha->flags.process_response_queue = 0; |
3711 | if (ha->zio_mode != QLA_ZIO_DISABLED) { |
3712 | ha->zio_mode = QLA_ZIO_MODE_6; |
3713 | |
3714 | @@ -7153,7 +7156,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) |
3715 | icb->firmware_options_2 |= cpu_to_le32( |
3716 | (uint32_t)ha->zio_mode); |
3717 | icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); |
3718 | - vha->flags.process_response_queue = 1; |
3719 | } |
3720 | |
3721 | if (rval) { |
3722 | diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c |
3723 | index 42ac8e097419..119927220299 100644 |
3724 | --- a/drivers/scsi/qla2xxx/qla_iocb.c |
3725 | +++ b/drivers/scsi/qla2xxx/qla_iocb.c |
3726 | @@ -1526,12 +1526,6 @@ qla24xx_start_scsi(srb_t *sp) |
3727 | |
3728 | /* Set chip new ring index. */ |
3729 | WRT_REG_DWORD(req->req_q_in, req->ring_index); |
3730 | - RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); |
3731 | - |
3732 | - /* Manage unprocessed RIO/ZIO commands in response queue. */ |
3733 | - if (vha->flags.process_response_queue && |
3734 | - rsp->ring_ptr->signature != RESPONSE_PROCESSED) |
3735 | - qla24xx_process_response_queue(vha, rsp); |
3736 | |
3737 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
3738 | return QLA_SUCCESS; |
3739 | @@ -1725,12 +1719,6 @@ qla24xx_dif_start_scsi(srb_t *sp) |
3740 | |
3741 | /* Set chip new ring index. */ |
3742 | WRT_REG_DWORD(req->req_q_in, req->ring_index); |
3743 | - RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); |
3744 | - |
3745 | - /* Manage unprocessed RIO/ZIO commands in response queue. */ |
3746 | - if (vha->flags.process_response_queue && |
3747 | - rsp->ring_ptr->signature != RESPONSE_PROCESSED) |
3748 | - qla24xx_process_response_queue(vha, rsp); |
3749 | |
3750 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
3751 | |
3752 | @@ -1880,11 +1868,6 @@ qla2xxx_start_scsi_mq(srb_t *sp) |
3753 | /* Set chip new ring index. */ |
3754 | WRT_REG_DWORD(req->req_q_in, req->ring_index); |
3755 | |
3756 | - /* Manage unprocessed RIO/ZIO commands in response queue. */ |
3757 | - if (vha->flags.process_response_queue && |
3758 | - rsp->ring_ptr->signature != RESPONSE_PROCESSED) |
3759 | - qla24xx_process_response_queue(vha, rsp); |
3760 | - |
3761 | spin_unlock_irqrestore(&qpair->qp_lock, flags); |
3762 | return QLA_SUCCESS; |
3763 | |
3764 | diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c |
3765 | index 596a9b214df1..84f57f075455 100644 |
3766 | --- a/drivers/scsi/qla2xxx/qla_mbx.c |
3767 | +++ b/drivers/scsi/qla2xxx/qla_mbx.c |
3768 | @@ -3762,10 +3762,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, |
3769 | mcp->mb[0] = MBC_PORT_PARAMS; |
3770 | mcp->mb[1] = loop_id; |
3771 | mcp->mb[2] = BIT_0; |
3772 | - if (IS_CNA_CAPABLE(vha->hw)) |
3773 | - mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0); |
3774 | - else |
3775 | - mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0); |
3776 | + mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0); |
3777 | mcp->mb[9] = vha->vp_idx; |
3778 | mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; |
3779 | mcp->in_mb = MBX_3|MBX_1|MBX_0; |
3780 | diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c |
3781 | index 20d9dc39f0fb..e6545cb9a2c1 100644 |
3782 | --- a/drivers/scsi/qla2xxx/qla_nvme.c |
3783 | +++ b/drivers/scsi/qla2xxx/qla_nvme.c |
3784 | @@ -607,7 +607,7 @@ void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp, int res) |
3785 | { |
3786 | int rval; |
3787 | |
3788 | - if (!test_bit(ABORT_ISP_ACTIVE, &sp->vha->dpc_flags)) { |
3789 | + if (ha->flags.fw_started) { |
3790 | rval = ha->isp_ops->abort_command(sp); |
3791 | if (!rval && !qla_nvme_wait_on_command(sp)) |
3792 | ql_log(ql_log_warn, NULL, 0x2112, |
3793 | @@ -660,9 +660,6 @@ void qla_nvme_delete(struct scsi_qla_host *vha) |
3794 | __func__, fcport); |
3795 | |
3796 | nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0); |
3797 | - init_completion(&fcport->nvme_del_done); |
3798 | - nvme_fc_unregister_remoteport(fcport->nvme_remote_port); |
3799 | - wait_for_completion(&fcport->nvme_del_done); |
3800 | } |
3801 | |
3802 | if (vha->nvme_local_port) { |
3803 | diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c |
3804 | index 42b8f0d3e580..ae9fd2d01004 100644 |
3805 | --- a/drivers/scsi/qla2xxx/qla_os.c |
3806 | +++ b/drivers/scsi/qla2xxx/qla_os.c |
3807 | @@ -4808,10 +4808,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) |
3808 | fcport->d_id = e->u.new_sess.id; |
3809 | fcport->flags |= FCF_FABRIC_DEVICE; |
3810 | fcport->fw_login_state = DSC_LS_PLOGI_PEND; |
3811 | - if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP) |
3812 | + if (e->u.new_sess.fc4_type & FS_FC4TYPE_FCP) |
3813 | fcport->fc4_type = FC4_TYPE_FCP_SCSI; |
3814 | |
3815 | - if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) { |
3816 | + if (e->u.new_sess.fc4_type & FS_FC4TYPE_NVME) { |
3817 | fcport->fc4_type = FC4_TYPE_OTHER; |
3818 | fcport->fc4f_nvme = FC4_TYPE_NVME; |
3819 | } |
3820 | diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c |
3821 | index 8c811b251d42..d2888b30a8a3 100644 |
3822 | --- a/drivers/scsi/qla2xxx/qla_target.c |
3823 | +++ b/drivers/scsi/qla2xxx/qla_target.c |
3824 | @@ -1261,7 +1261,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) |
3825 | qla24xx_chk_fcp_state(sess); |
3826 | |
3827 | ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, |
3828 | - "Scheduling sess %p for deletion\n", sess); |
3829 | + "Scheduling sess %p for deletion %8phC\n", |
3830 | + sess, sess->port_name); |
3831 | |
3832 | INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); |
3833 | WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); |
3834 | diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c |
3835 | index e03d12a5f986..64e2d859f633 100644 |
3836 | --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c |
3837 | +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c |
3838 | @@ -718,10 +718,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) |
3839 | cmd->sg_cnt = 0; |
3840 | cmd->offset = 0; |
3841 | cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); |
3842 | - if (cmd->trc_flags & TRC_XMIT_STATUS) { |
3843 | - pr_crit("Multiple calls for status = %p.\n", cmd); |
3844 | - dump_stack(); |
3845 | - } |
3846 | cmd->trc_flags |= TRC_XMIT_STATUS; |
3847 | |
3848 | if (se_cmd->data_direction == DMA_FROM_DEVICE) { |
3849 | diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c |
3850 | index eb97d2dd3651..b5f638286037 100644 |
3851 | --- a/drivers/scsi/scsi_lib.c |
3852 | +++ b/drivers/scsi/scsi_lib.c |
3853 | @@ -697,6 +697,12 @@ static bool scsi_end_request(struct request *req, blk_status_t error, |
3854 | */ |
3855 | scsi_mq_uninit_cmd(cmd); |
3856 | |
3857 | + /* |
3858 | + * queue is still alive, so grab the ref for preventing it |
3859 | + * from being cleaned up during running queue. |
3860 | + */ |
3861 | + percpu_ref_get(&q->q_usage_counter); |
3862 | + |
3863 | __blk_mq_end_request(req, error); |
3864 | |
3865 | if (scsi_target(sdev)->single_lun || |
3866 | @@ -704,6 +710,8 @@ static bool scsi_end_request(struct request *req, blk_status_t error, |
3867 | kblockd_schedule_work(&sdev->requeue_work); |
3868 | else |
3869 | blk_mq_run_hw_queues(q, true); |
3870 | + |
3871 | + percpu_ref_put(&q->q_usage_counter); |
3872 | } else { |
3873 | unsigned long flags; |
3874 | |
3875 | diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h |
3876 | index 3efc47e82973..bd040c29c4bf 100644 |
3877 | --- a/drivers/soc/ti/knav_qmss.h |
3878 | +++ b/drivers/soc/ti/knav_qmss.h |
3879 | @@ -329,8 +329,8 @@ struct knav_range_ops { |
3880 | }; |
3881 | |
3882 | struct knav_irq_info { |
3883 | - int irq; |
3884 | - u32 cpu_map; |
3885 | + int irq; |
3886 | + struct cpumask *cpu_mask; |
3887 | }; |
3888 | |
3889 | struct knav_range_info { |
3890 | diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c |
3891 | index 316e82e46f6c..2f7fb2dcc1d6 100644 |
3892 | --- a/drivers/soc/ti/knav_qmss_acc.c |
3893 | +++ b/drivers/soc/ti/knav_qmss_acc.c |
3894 | @@ -205,18 +205,18 @@ static int knav_range_setup_acc_irq(struct knav_range_info *range, |
3895 | { |
3896 | struct knav_device *kdev = range->kdev; |
3897 | struct knav_acc_channel *acc; |
3898 | - unsigned long cpu_map; |
3899 | + struct cpumask *cpu_mask; |
3900 | int ret = 0, irq; |
3901 | u32 old, new; |
3902 | |
3903 | if (range->flags & RANGE_MULTI_QUEUE) { |
3904 | acc = range->acc; |
3905 | irq = range->irqs[0].irq; |
3906 | - cpu_map = range->irqs[0].cpu_map; |
3907 | + cpu_mask = range->irqs[0].cpu_mask; |
3908 | } else { |
3909 | acc = range->acc + queue; |
3910 | irq = range->irqs[queue].irq; |
3911 | - cpu_map = range->irqs[queue].cpu_map; |
3912 | + cpu_mask = range->irqs[queue].cpu_mask; |
3913 | } |
3914 | |
3915 | old = acc->open_mask; |
3916 | @@ -239,8 +239,8 @@ static int knav_range_setup_acc_irq(struct knav_range_info *range, |
3917 | acc->name, acc->name); |
3918 | ret = request_irq(irq, knav_acc_int_handler, 0, acc->name, |
3919 | range); |
3920 | - if (!ret && cpu_map) { |
3921 | - ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map)); |
3922 | + if (!ret && cpu_mask) { |
3923 | + ret = irq_set_affinity_hint(irq, cpu_mask); |
3924 | if (ret) { |
3925 | dev_warn(range->kdev->dev, |
3926 | "Failed to set IRQ affinity\n"); |
3927 | diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c |
3928 | index 6755f2af5619..ef36acc0e708 100644 |
3929 | --- a/drivers/soc/ti/knav_qmss_queue.c |
3930 | +++ b/drivers/soc/ti/knav_qmss_queue.c |
3931 | @@ -118,19 +118,17 @@ static int knav_queue_setup_irq(struct knav_range_info *range, |
3932 | struct knav_queue_inst *inst) |
3933 | { |
3934 | unsigned queue = inst->id - range->queue_base; |
3935 | - unsigned long cpu_map; |
3936 | int ret = 0, irq; |
3937 | |
3938 | if (range->flags & RANGE_HAS_IRQ) { |
3939 | irq = range->irqs[queue].irq; |
3940 | - cpu_map = range->irqs[queue].cpu_map; |
3941 | ret = request_irq(irq, knav_queue_int_handler, 0, |
3942 | inst->irq_name, inst); |
3943 | if (ret) |
3944 | return ret; |
3945 | disable_irq(irq); |
3946 | - if (cpu_map) { |
3947 | - ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map)); |
3948 | + if (range->irqs[queue].cpu_mask) { |
3949 | + ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask); |
3950 | if (ret) { |
3951 | dev_warn(range->kdev->dev, |
3952 | "Failed to set IRQ affinity\n"); |
3953 | @@ -1262,9 +1260,19 @@ static int knav_setup_queue_range(struct knav_device *kdev, |
3954 | |
3955 | range->num_irqs++; |
3956 | |
3957 | - if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) |
3958 | - range->irqs[i].cpu_map = |
3959 | - (oirq.args[2] & 0x0000ff00) >> 8; |
3960 | + if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) { |
3961 | + unsigned long mask; |
3962 | + int bit; |
3963 | + |
3964 | + range->irqs[i].cpu_mask = devm_kzalloc(dev, |
3965 | + cpumask_size(), GFP_KERNEL); |
3966 | + if (!range->irqs[i].cpu_mask) |
3967 | + return -ENOMEM; |
3968 | + |
3969 | + mask = (oirq.args[2] & 0x0000ff00) >> 8; |
3970 | + for_each_set_bit(bit, &mask, BITS_PER_LONG) |
3971 | + cpumask_set_cpu(bit, range->irqs[i].cpu_mask); |
3972 | + } |
3973 | } |
3974 | |
3975 | range->num_irqs = min(range->num_irqs, range->num_queues); |
3976 | diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c |
3977 | index 8721f0a41d15..14da8cc2246a 100644 |
3978 | --- a/drivers/staging/erofs/unzip_vle.c |
3979 | +++ b/drivers/staging/erofs/unzip_vle.c |
3980 | @@ -1490,6 +1490,7 @@ static erofs_off_t vle_get_logical_extent_head( |
3981 | unsigned long long ofs; |
3982 | const unsigned int clusterbits = EROFS_SB(inode->i_sb)->clusterbits; |
3983 | const unsigned int clustersize = 1 << clusterbits; |
3984 | + unsigned int delta0; |
3985 | |
3986 | if (page->index != blkaddr) { |
3987 | kunmap_atomic(*kaddr_iter); |
3988 | @@ -1504,12 +1505,13 @@ static erofs_off_t vle_get_logical_extent_head( |
3989 | di = *kaddr_iter + vle_extent_blkoff(inode, lcn); |
3990 | switch (vle_cluster_type(di)) { |
3991 | case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: |
3992 | - BUG_ON(!di->di_u.delta[0]); |
3993 | - BUG_ON(lcn < di->di_u.delta[0]); |
3994 | + delta0 = le16_to_cpu(di->di_u.delta[0]); |
3995 | + DBG_BUGON(!delta0); |
3996 | + DBG_BUGON(lcn < delta0); |
3997 | |
3998 | ofs = vle_get_logical_extent_head(inode, |
3999 | page_iter, kaddr_iter, |
4000 | - lcn - di->di_u.delta[0], pcn, flags); |
4001 | + lcn - delta0, pcn, flags); |
4002 | break; |
4003 | case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: |
4004 | *flags ^= EROFS_MAP_ZIPPED; |
4005 | diff --git a/drivers/staging/iio/adc/ad7606.c b/drivers/staging/iio/adc/ad7606.c |
4006 | index 25b9fcd5e3a4..ce3351832fb1 100644 |
4007 | --- a/drivers/staging/iio/adc/ad7606.c |
4008 | +++ b/drivers/staging/iio/adc/ad7606.c |
4009 | @@ -26,9 +26,12 @@ |
4010 | |
4011 | #include "ad7606.h" |
4012 | |
4013 | -/* Scales are computed as 2.5/2**16 and 5/2**16 respectively */ |
4014 | +/* |
4015 | + * Scales are computed as 5000/32768 and 10000/32768 respectively, |
4016 | + * so that when applied to the raw values they provide mV values |
4017 | + */ |
4018 | static const unsigned int scale_avail[2][2] = { |
4019 | - {0, 38147}, {0, 76294} |
4020 | + {0, 152588}, {0, 305176} |
4021 | }; |
4022 | |
4023 | static int ad7606_reset(struct ad7606_state *st) |
4024 | diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c |
4025 | index cf342eb58e10..ad7e28ab9a4f 100644 |
4026 | --- a/drivers/staging/most/video/video.c |
4027 | +++ b/drivers/staging/most/video/video.c |
4028 | @@ -530,7 +530,7 @@ static int comp_disconnect_channel(struct most_interface *iface, |
4029 | return 0; |
4030 | } |
4031 | |
4032 | -static struct core_component comp_info = { |
4033 | +static struct core_component comp = { |
4034 | .name = "video", |
4035 | .probe_channel = comp_probe_channel, |
4036 | .disconnect_channel = comp_disconnect_channel, |
4037 | @@ -565,7 +565,7 @@ static void __exit comp_exit(void) |
4038 | } |
4039 | spin_unlock_irq(&list_lock); |
4040 | |
4041 | - most_deregister_component(&comp_info); |
4042 | + most_deregister_component(&comp); |
4043 | BUG_ON(!list_empty(&video_devices)); |
4044 | } |
4045 | |
4046 | diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c |
4047 | index 6ab982309e6a..441778100887 100644 |
4048 | --- a/drivers/thermal/thermal_core.c |
4049 | +++ b/drivers/thermal/thermal_core.c |
4050 | @@ -1102,8 +1102,9 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev) |
4051 | mutex_unlock(&thermal_list_lock); |
4052 | |
4053 | ida_simple_remove(&thermal_cdev_ida, cdev->id); |
4054 | - device_unregister(&cdev->device); |
4055 | + device_del(&cdev->device); |
4056 | thermal_cooling_device_destroy_sysfs(cdev); |
4057 | + put_device(&cdev->device); |
4058 | } |
4059 | EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister); |
4060 | |
4061 | diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c |
4062 | index af8beefe9b5c..877fd7f8a8ed 100644 |
4063 | --- a/drivers/tty/serial/8250/8250_of.c |
4064 | +++ b/drivers/tty/serial/8250/8250_of.c |
4065 | @@ -58,7 +58,7 @@ static int of_platform_serial_setup(struct platform_device *ofdev, |
4066 | struct resource resource; |
4067 | struct device_node *np = ofdev->dev.of_node; |
4068 | u32 clk, spd, prop; |
4069 | - int ret; |
4070 | + int ret, irq; |
4071 | |
4072 | memset(port, 0, sizeof *port); |
4073 | |
4074 | @@ -143,21 +143,27 @@ static int of_platform_serial_setup(struct platform_device *ofdev, |
4075 | if (ret >= 0) |
4076 | port->line = ret; |
4077 | |
4078 | - port->irq = irq_of_parse_and_map(np, 0); |
4079 | - if (!port->irq) { |
4080 | - ret = -EPROBE_DEFER; |
4081 | - goto err_unprepare; |
4082 | + irq = of_irq_get(np, 0); |
4083 | + if (irq < 0) { |
4084 | + if (irq == -EPROBE_DEFER) { |
4085 | + ret = -EPROBE_DEFER; |
4086 | + goto err_unprepare; |
4087 | + } |
4088 | + /* IRQ support not mandatory */ |
4089 | + irq = 0; |
4090 | } |
4091 | |
4092 | + port->irq = irq; |
4093 | + |
4094 | info->rst = devm_reset_control_get_optional_shared(&ofdev->dev, NULL); |
4095 | if (IS_ERR(info->rst)) { |
4096 | ret = PTR_ERR(info->rst); |
4097 | - goto err_dispose; |
4098 | + goto err_unprepare; |
4099 | } |
4100 | |
4101 | ret = reset_control_deassert(info->rst); |
4102 | if (ret) |
4103 | - goto err_dispose; |
4104 | + goto err_unprepare; |
4105 | |
4106 | port->type = type; |
4107 | port->uartclk = clk; |
4108 | @@ -184,8 +190,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev, |
4109 | port->handle_irq = fsl8250_handle_irq; |
4110 | |
4111 | return 0; |
4112 | -err_dispose: |
4113 | - irq_dispose_mapping(port->irq); |
4114 | err_unprepare: |
4115 | clk_disable_unprepare(info->clk); |
4116 | err_pmruntime: |
4117 | diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c |
4118 | index 243c96025053..47b41159a8bc 100644 |
4119 | --- a/drivers/tty/serial/sc16is7xx.c |
4120 | +++ b/drivers/tty/serial/sc16is7xx.c |
4121 | @@ -657,7 +657,7 @@ static void sc16is7xx_handle_tx(struct uart_port *port) |
4122 | uart_write_wakeup(port); |
4123 | } |
4124 | |
4125 | -static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno) |
4126 | +static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno) |
4127 | { |
4128 | struct uart_port *port = &s->p[portno].port; |
4129 | |
4130 | @@ -666,7 +666,7 @@ static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno) |
4131 | |
4132 | iir = sc16is7xx_port_read(port, SC16IS7XX_IIR_REG); |
4133 | if (iir & SC16IS7XX_IIR_NO_INT_BIT) |
4134 | - break; |
4135 | + return false; |
4136 | |
4137 | iir &= SC16IS7XX_IIR_ID_MASK; |
4138 | |
4139 | @@ -688,16 +688,23 @@ static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno) |
4140 | port->line, iir); |
4141 | break; |
4142 | } |
4143 | - } while (1); |
4144 | + } while (0); |
4145 | + return true; |
4146 | } |
4147 | |
4148 | static void sc16is7xx_ist(struct kthread_work *ws) |
4149 | { |
4150 | struct sc16is7xx_port *s = to_sc16is7xx_port(ws, irq_work); |
4151 | - int i; |
4152 | |
4153 | - for (i = 0; i < s->devtype->nr_uart; ++i) |
4154 | - sc16is7xx_port_irq(s, i); |
4155 | + while (1) { |
4156 | + bool keep_polling = false; |
4157 | + int i; |
4158 | + |
4159 | + for (i = 0; i < s->devtype->nr_uart; ++i) |
4160 | + keep_polling |= sc16is7xx_port_irq(s, i); |
4161 | + if (!keep_polling) |
4162 | + break; |
4163 | + } |
4164 | } |
4165 | |
4166 | static irqreturn_t sc16is7xx_irq(int irq, void *dev_id) |
4167 | diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c |
4168 | index ab3f6e91853d..3649b83ef863 100644 |
4169 | --- a/drivers/tty/serial/sh-sci.c |
4170 | +++ b/drivers/tty/serial/sh-sci.c |
4171 | @@ -3102,6 +3102,7 @@ static struct uart_driver sci_uart_driver = { |
4172 | static int sci_remove(struct platform_device *dev) |
4173 | { |
4174 | struct sci_port *port = platform_get_drvdata(dev); |
4175 | + unsigned int type = port->port.type; /* uart_remove_... clears it */ |
4176 | |
4177 | sci_ports_in_use &= ~BIT(port->port.line); |
4178 | uart_remove_one_port(&sci_uart_driver, &port->port); |
4179 | @@ -3112,8 +3113,7 @@ static int sci_remove(struct platform_device *dev) |
4180 | sysfs_remove_file(&dev->dev.kobj, |
4181 | &dev_attr_rx_fifo_trigger.attr); |
4182 | } |
4183 | - if (port->port.type == PORT_SCIFA || port->port.type == PORT_SCIFB || |
4184 | - port->port.type == PORT_HSCIF) { |
4185 | + if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF) { |
4186 | sysfs_remove_file(&dev->dev.kobj, |
4187 | &dev_attr_rx_fifo_timeout.attr); |
4188 | } |
4189 | diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c |
4190 | index 7576ceace571..f438eaa68246 100644 |
4191 | --- a/drivers/tty/tty_baudrate.c |
4192 | +++ b/drivers/tty/tty_baudrate.c |
4193 | @@ -77,7 +77,7 @@ speed_t tty_termios_baud_rate(struct ktermios *termios) |
4194 | else |
4195 | cbaud += 15; |
4196 | } |
4197 | - return baud_table[cbaud]; |
4198 | + return cbaud >= n_baud_table ? 0 : baud_table[cbaud]; |
4199 | } |
4200 | EXPORT_SYMBOL(tty_termios_baud_rate); |
4201 | |
4202 | @@ -113,7 +113,7 @@ speed_t tty_termios_input_baud_rate(struct ktermios *termios) |
4203 | else |
4204 | cbaud += 15; |
4205 | } |
4206 | - return baud_table[cbaud]; |
4207 | + return cbaud >= n_baud_table ? 0 : baud_table[cbaud]; |
4208 | #else /* IBSHIFT */ |
4209 | return tty_termios_baud_rate(termios); |
4210 | #endif /* IBSHIFT */ |
4211 | diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c |
4212 | index 5e5da9acaf0a..252eef2c32f9 100644 |
4213 | --- a/drivers/tty/tty_io.c |
4214 | +++ b/drivers/tty/tty_io.c |
4215 | @@ -408,7 +408,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line) |
4216 | mutex_lock(&tty_mutex); |
4217 | /* Search through the tty devices to look for a match */ |
4218 | list_for_each_entry(p, &tty_drivers, tty_drivers) { |
4219 | - if (strncmp(name, p->name, len) != 0) |
4220 | + if (!len || strncmp(name, p->name, len) != 0) |
4221 | continue; |
4222 | stp = str; |
4223 | if (*stp == ',') |
4224 | diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c |
4225 | index c24bb690680b..e7e3ae13516d 100644 |
4226 | --- a/drivers/vhost/scsi.c |
4227 | +++ b/drivers/vhost/scsi.c |
4228 | @@ -964,7 +964,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) |
4229 | prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); |
4230 | } |
4231 | /* |
4232 | - * Set prot_iter to data_iter, and advance past any |
4233 | + * Set prot_iter to data_iter and truncate it to |
4234 | + * prot_bytes, and advance data_iter past any |
4235 | * preceeding prot_bytes that may be present. |
4236 | * |
4237 | * Also fix up the exp_data_len to reflect only the |
4238 | @@ -973,6 +974,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) |
4239 | if (prot_bytes) { |
4240 | exp_data_len -= prot_bytes; |
4241 | prot_iter = data_iter; |
4242 | + iov_iter_truncate(&prot_iter, prot_bytes); |
4243 | iov_iter_advance(&data_iter, prot_bytes); |
4244 | } |
4245 | tag = vhost64_to_cpu(vq, v_req_pi.tag); |
4246 | diff --git a/drivers/video/fbdev/aty/mach64_accel.c b/drivers/video/fbdev/aty/mach64_accel.c |
4247 | index 2541a0e0de76..3ad46255f990 100644 |
4248 | --- a/drivers/video/fbdev/aty/mach64_accel.c |
4249 | +++ b/drivers/video/fbdev/aty/mach64_accel.c |
4250 | @@ -127,7 +127,7 @@ void aty_init_engine(struct atyfb_par *par, struct fb_info *info) |
4251 | |
4252 | /* set host attributes */ |
4253 | wait_for_fifo(13, par); |
4254 | - aty_st_le32(HOST_CNTL, 0, par); |
4255 | + aty_st_le32(HOST_CNTL, HOST_BYTE_ALIGN, par); |
4256 | |
4257 | /* set pattern attributes */ |
4258 | aty_st_le32(PAT_REG0, 0, par); |
4259 | @@ -233,7 +233,8 @@ void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area) |
4260 | rotation = rotation24bpp(dx, direction); |
4261 | } |
4262 | |
4263 | - wait_for_fifo(4, par); |
4264 | + wait_for_fifo(5, par); |
4265 | + aty_st_le32(DP_PIX_WIDTH, par->crtc.dp_pix_width, par); |
4266 | aty_st_le32(DP_SRC, FRGD_SRC_BLIT, par); |
4267 | aty_st_le32(SRC_Y_X, (sx << 16) | sy, par); |
4268 | aty_st_le32(SRC_HEIGHT1_WIDTH1, (width << 16) | area->height, par); |
4269 | @@ -269,7 +270,8 @@ void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) |
4270 | rotation = rotation24bpp(dx, DST_X_LEFT_TO_RIGHT); |
4271 | } |
4272 | |
4273 | - wait_for_fifo(3, par); |
4274 | + wait_for_fifo(4, par); |
4275 | + aty_st_le32(DP_PIX_WIDTH, par->crtc.dp_pix_width, par); |
4276 | aty_st_le32(DP_FRGD_CLR, color, par); |
4277 | aty_st_le32(DP_SRC, |
4278 | BKGD_SRC_BKGD_CLR | FRGD_SRC_FRGD_CLR | MONO_SRC_ONE, |
4279 | @@ -284,7 +286,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image) |
4280 | { |
4281 | struct atyfb_par *par = (struct atyfb_par *) info->par; |
4282 | u32 src_bytes, dx = image->dx, dy = image->dy, width = image->width; |
4283 | - u32 pix_width_save, pix_width, host_cntl, rotation = 0, src, mix; |
4284 | + u32 pix_width, rotation = 0, src, mix; |
4285 | |
4286 | if (par->asleep) |
4287 | return; |
4288 | @@ -296,8 +298,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image) |
4289 | return; |
4290 | } |
4291 | |
4292 | - pix_width = pix_width_save = aty_ld_le32(DP_PIX_WIDTH, par); |
4293 | - host_cntl = aty_ld_le32(HOST_CNTL, par) | HOST_BYTE_ALIGN; |
4294 | + pix_width = par->crtc.dp_pix_width; |
4295 | |
4296 | switch (image->depth) { |
4297 | case 1: |
4298 | @@ -345,7 +346,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image) |
4299 | * since Rage 3D IIc we have DP_HOST_TRIPLE_EN bit |
4300 | * this hwaccelerated triple has an issue with not aligned data |
4301 | */ |
4302 | - if (M64_HAS(HW_TRIPLE) && image->width % 8 == 0) |
4303 | + if (image->depth == 1 && M64_HAS(HW_TRIPLE) && image->width % 8 == 0) |
4304 | pix_width |= DP_HOST_TRIPLE_EN; |
4305 | } |
4306 | |
4307 | @@ -370,19 +371,18 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image) |
4308 | mix = FRGD_MIX_D_XOR_S | BKGD_MIX_D; |
4309 | } |
4310 | |
4311 | - wait_for_fifo(6, par); |
4312 | - aty_st_le32(DP_WRITE_MASK, 0xFFFFFFFF, par); |
4313 | + wait_for_fifo(5, par); |
4314 | aty_st_le32(DP_PIX_WIDTH, pix_width, par); |
4315 | aty_st_le32(DP_MIX, mix, par); |
4316 | aty_st_le32(DP_SRC, src, par); |
4317 | - aty_st_le32(HOST_CNTL, host_cntl, par); |
4318 | + aty_st_le32(HOST_CNTL, HOST_BYTE_ALIGN, par); |
4319 | aty_st_le32(DST_CNTL, DST_Y_TOP_TO_BOTTOM | DST_X_LEFT_TO_RIGHT | rotation, par); |
4320 | |
4321 | draw_rect(dx, dy, width, image->height, par); |
4322 | src_bytes = (((image->width * image->depth) + 7) / 8) * image->height; |
4323 | |
4324 | /* manual triple each pixel */ |
4325 | - if (info->var.bits_per_pixel == 24 && !(pix_width & DP_HOST_TRIPLE_EN)) { |
4326 | + if (image->depth == 1 && info->var.bits_per_pixel == 24 && !(pix_width & DP_HOST_TRIPLE_EN)) { |
4327 | int inbit, outbit, mult24, byte_id_in_dword, width; |
4328 | u8 *pbitmapin = (u8*)image->data, *pbitmapout; |
4329 | u32 hostdword; |
4330 | @@ -415,7 +415,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image) |
4331 | } |
4332 | } |
4333 | wait_for_fifo(1, par); |
4334 | - aty_st_le32(HOST_DATA0, hostdword, par); |
4335 | + aty_st_le32(HOST_DATA0, le32_to_cpu(hostdword), par); |
4336 | } |
4337 | } else { |
4338 | u32 *pbitmap, dwords = (src_bytes + 3) / 4; |
4339 | @@ -424,8 +424,4 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image) |
4340 | aty_st_le32(HOST_DATA0, get_unaligned_le32(pbitmap), par); |
4341 | } |
4342 | } |
4343 | - |
4344 | - /* restore pix_width */ |
4345 | - wait_for_fifo(1, par); |
4346 | - aty_st_le32(DP_PIX_WIDTH, pix_width_save, par); |
4347 | } |
4348 | diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c |
4349 | index 5f2e48d41d72..ab3d5f5dbb00 100644 |
4350 | --- a/fs/9p/vfs_file.c |
4351 | +++ b/fs/9p/vfs_file.c |
4352 | @@ -204,6 +204,14 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl) |
4353 | break; |
4354 | if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0) |
4355 | break; |
4356 | + /* |
4357 | + * p9_client_lock_dotl overwrites flock.client_id with the |
4358 | + * server message, free and reuse the client name |
4359 | + */ |
4360 | + if (flock.client_id != fid->clnt->name) { |
4361 | + kfree(flock.client_id); |
4362 | + flock.client_id = fid->clnt->name; |
4363 | + } |
4364 | } |
4365 | |
4366 | /* map 9p status to VFS status */ |
4367 | @@ -235,6 +243,8 @@ out_unlock: |
4368 | locks_lock_file_wait(filp, fl); |
4369 | fl->fl_type = fl_type; |
4370 | } |
4371 | + if (flock.client_id != fid->clnt->name) |
4372 | + kfree(flock.client_id); |
4373 | out: |
4374 | return res; |
4375 | } |
4376 | @@ -269,7 +279,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl) |
4377 | |
4378 | res = p9_client_getlock_dotl(fid, &glock); |
4379 | if (res < 0) |
4380 | - return res; |
4381 | + goto out; |
4382 | /* map 9p lock type to os lock type */ |
4383 | switch (glock.type) { |
4384 | case P9_LOCK_TYPE_RDLCK: |
4385 | @@ -290,7 +300,9 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl) |
4386 | fl->fl_end = glock.start + glock.length - 1; |
4387 | fl->fl_pid = -glock.proc_id; |
4388 | } |
4389 | - kfree(glock.client_id); |
4390 | +out: |
4391 | + if (glock.client_id != fid->clnt->name) |
4392 | + kfree(glock.client_id); |
4393 | return res; |
4394 | } |
4395 | |
4396 | diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c |
4397 | index 05dc3c17cb62..dc52ce5e495f 100644 |
4398 | --- a/fs/btrfs/disk-io.c |
4399 | +++ b/fs/btrfs/disk-io.c |
4400 | @@ -4359,13 +4359,23 @@ static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, |
4401 | unpin = pinned_extents; |
4402 | again: |
4403 | while (1) { |
4404 | + /* |
4405 | + * The btrfs_finish_extent_commit() may get the same range as |
4406 | + * ours between find_first_extent_bit and clear_extent_dirty. |
4407 | + * Hence, hold the unused_bg_unpin_mutex to avoid double unpin |
4408 | + * the same extent range. |
4409 | + */ |
4410 | + mutex_lock(&fs_info->unused_bg_unpin_mutex); |
4411 | ret = find_first_extent_bit(unpin, 0, &start, &end, |
4412 | EXTENT_DIRTY, NULL); |
4413 | - if (ret) |
4414 | + if (ret) { |
4415 | + mutex_unlock(&fs_info->unused_bg_unpin_mutex); |
4416 | break; |
4417 | + } |
4418 | |
4419 | clear_extent_dirty(unpin, start, end); |
4420 | btrfs_error_unpin_extent_range(fs_info, start, end); |
4421 | + mutex_unlock(&fs_info->unused_bg_unpin_mutex); |
4422 | cond_resched(); |
4423 | } |
4424 | |
4425 | diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c |
4426 | index 83268d8f48c4..7158b5b77c9d 100644 |
4427 | --- a/fs/btrfs/inode.c |
4428 | +++ b/fs/btrfs/inode.c |
4429 | @@ -1532,12 +1532,11 @@ out_check: |
4430 | } |
4431 | btrfs_release_path(path); |
4432 | |
4433 | - if (cur_offset <= end && cow_start == (u64)-1) { |
4434 | + if (cur_offset <= end && cow_start == (u64)-1) |
4435 | cow_start = cur_offset; |
4436 | - cur_offset = end; |
4437 | - } |
4438 | |
4439 | if (cow_start != (u64)-1) { |
4440 | + cur_offset = end; |
4441 | ret = cow_file_range(inode, locked_page, cow_start, end, end, |
4442 | page_started, nr_written, 1, NULL); |
4443 | if (ret) |
4444 | diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c |
4445 | index bd4767f562cd..c9152155fcbf 100644 |
4446 | --- a/fs/btrfs/ioctl.c |
4447 | +++ b/fs/btrfs/ioctl.c |
4448 | @@ -3489,6 +3489,8 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen, |
4449 | const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize; |
4450 | |
4451 | len = round_down(i_size_read(src), sz) - loff; |
4452 | + if (len == 0) |
4453 | + return 0; |
4454 | olen = len; |
4455 | } |
4456 | } |
4457 | @@ -4278,9 +4280,17 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src, |
4458 | goto out_unlock; |
4459 | if (len == 0) |
4460 | olen = len = src->i_size - off; |
4461 | - /* if we extend to eof, continue to block boundary */ |
4462 | - if (off + len == src->i_size) |
4463 | + /* |
4464 | + * If we extend to eof, continue to block boundary if and only if the |
4465 | + * destination end offset matches the destination file's size, otherwise |
4466 | + * we would be corrupting data by placing the eof block into the middle |
4467 | + * of a file. |
4468 | + */ |
4469 | + if (off + len == src->i_size) { |
4470 | + if (!IS_ALIGNED(len, bs) && destoff + len < inode->i_size) |
4471 | + goto out_unlock; |
4472 | len = ALIGN(src->i_size, bs) - off; |
4473 | + } |
4474 | |
4475 | if (len == 0) { |
4476 | ret = 0; |
4477 | diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c |
4478 | index db835635372f..d385778b628c 100644 |
4479 | --- a/fs/btrfs/tree-checker.c |
4480 | +++ b/fs/btrfs/tree-checker.c |
4481 | @@ -440,7 +440,7 @@ static int check_block_group_item(struct btrfs_fs_info *fs_info, |
4482 | type != (BTRFS_BLOCK_GROUP_METADATA | |
4483 | BTRFS_BLOCK_GROUP_DATA)) { |
4484 | block_group_err(fs_info, leaf, slot, |
4485 | -"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llu or 0x%llx", |
4486 | +"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx", |
4487 | type, hweight64(type), |
4488 | BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA, |
4489 | BTRFS_BLOCK_GROUP_SYSTEM, |
4490 | diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c |
4491 | index d0bcfbfc0e3a..16ecb76fa53c 100644 |
4492 | --- a/fs/btrfs/tree-log.c |
4493 | +++ b/fs/btrfs/tree-log.c |
4494 | @@ -4399,6 +4399,23 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, |
4495 | logged_end = end; |
4496 | |
4497 | list_for_each_entry_safe(em, n, &tree->modified_extents, list) { |
4498 | + /* |
4499 | + * Skip extents outside our logging range. It's important to do |
4500 | + * it for correctness because if we don't ignore them, we may |
4501 | + * log them before their ordered extent completes, and therefore |
4502 | + * we could log them without logging their respective checksums |
4503 | + * (the checksum items are added to the csum tree at the very |
4504 | + * end of btrfs_finish_ordered_io()). Also leave such extents |
4505 | + * outside of our range in the list, since we may have another |
4506 | + * ranged fsync in the near future that needs them. If an extent |
4507 | + * outside our range corresponds to a hole, log it to avoid |
4508 | + * leaving gaps between extents (fsck will complain when we are |
4509 | + * not using the NO_HOLES feature). |
4510 | + */ |
4511 | + if ((em->start > end || em->start + em->len <= start) && |
4512 | + em->block_start != EXTENT_MAP_HOLE) |
4513 | + continue; |
4514 | + |
4515 | list_del_init(&em->list); |
4516 | /* |
4517 | * Just an arbitrary number, this can be really CPU intensive |
4518 | diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c |
4519 | index ebc7bdaed2d0..4055ab4d5c52 100644 |
4520 | --- a/fs/ceph/inode.c |
4521 | +++ b/fs/ceph/inode.c |
4522 | @@ -1132,8 +1132,12 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in) |
4523 | if (IS_ERR(realdn)) { |
4524 | pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n", |
4525 | PTR_ERR(realdn), dn, in, ceph_vinop(in)); |
4526 | - dput(dn); |
4527 | - dn = realdn; /* note realdn contains the error */ |
4528 | + dn = realdn; |
4529 | + /* |
4530 | + * Caller should release 'dn' in the case of error. |
4531 | + * If 'req->r_dentry' is passed to this function, |
4532 | + * caller should leave 'req->r_dentry' untouched. |
4533 | + */ |
4534 | goto out; |
4535 | } else if (realdn) { |
4536 | dout("dn %p (%d) spliced with %p (%d) " |
4537 | diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c |
4538 | index d767e993591d..244531d3065a 100644 |
4539 | --- a/fs/ext4/inode.c |
4540 | +++ b/fs/ext4/inode.c |
4541 | @@ -5804,9 +5804,10 @@ int ext4_mark_iloc_dirty(handle_t *handle, |
4542 | { |
4543 | int err = 0; |
4544 | |
4545 | - if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) |
4546 | + if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) { |
4547 | + put_bh(iloc->bh); |
4548 | return -EIO; |
4549 | - |
4550 | + } |
4551 | if (IS_I_VERSION(inode)) |
4552 | inode_inc_iversion(inode); |
4553 | |
4554 | diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c |
4555 | index 377d516c475f..ffa25753e929 100644 |
4556 | --- a/fs/ext4/namei.c |
4557 | +++ b/fs/ext4/namei.c |
4558 | @@ -126,6 +126,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode, |
4559 | if (!is_dx_block && type == INDEX) { |
4560 | ext4_error_inode(inode, func, line, block, |
4561 | "directory leaf block found instead of index block"); |
4562 | + brelse(bh); |
4563 | return ERR_PTR(-EFSCORRUPTED); |
4564 | } |
4565 | if (!ext4_has_metadata_csum(inode->i_sb) || |
4566 | @@ -2811,7 +2812,9 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode) |
4567 | list_del_init(&EXT4_I(inode)->i_orphan); |
4568 | mutex_unlock(&sbi->s_orphan_lock); |
4569 | } |
4570 | - } |
4571 | + } else |
4572 | + brelse(iloc.bh); |
4573 | + |
4574 | jbd_debug(4, "superblock will point to %lu\n", inode->i_ino); |
4575 | jbd_debug(4, "orphan inode %lu will point to %d\n", |
4576 | inode->i_ino, NEXT_ORPHAN(inode)); |
4577 | diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c |
4578 | index ebbc663d0798..a5efee34415f 100644 |
4579 | --- a/fs/ext4/resize.c |
4580 | +++ b/fs/ext4/resize.c |
4581 | @@ -459,16 +459,18 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle, |
4582 | |
4583 | BUFFER_TRACE(bh, "get_write_access"); |
4584 | err = ext4_journal_get_write_access(handle, bh); |
4585 | - if (err) |
4586 | + if (err) { |
4587 | + brelse(bh); |
4588 | return err; |
4589 | + } |
4590 | ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", |
4591 | first_cluster, first_cluster - start, count2); |
4592 | ext4_set_bits(bh->b_data, first_cluster - start, count2); |
4593 | |
4594 | err = ext4_handle_dirty_metadata(handle, NULL, bh); |
4595 | + brelse(bh); |
4596 | if (unlikely(err)) |
4597 | return err; |
4598 | - brelse(bh); |
4599 | } |
4600 | |
4601 | return 0; |
4602 | @@ -605,7 +607,6 @@ handle_bb: |
4603 | bh = bclean(handle, sb, block); |
4604 | if (IS_ERR(bh)) { |
4605 | err = PTR_ERR(bh); |
4606 | - bh = NULL; |
4607 | goto out; |
4608 | } |
4609 | overhead = ext4_group_overhead_blocks(sb, group); |
4610 | @@ -618,9 +619,9 @@ handle_bb: |
4611 | ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count), |
4612 | sb->s_blocksize * 8, bh->b_data); |
4613 | err = ext4_handle_dirty_metadata(handle, NULL, bh); |
4614 | + brelse(bh); |
4615 | if (err) |
4616 | goto out; |
4617 | - brelse(bh); |
4618 | |
4619 | handle_ib: |
4620 | if (bg_flags[i] & EXT4_BG_INODE_UNINIT) |
4621 | @@ -635,18 +636,16 @@ handle_ib: |
4622 | bh = bclean(handle, sb, block); |
4623 | if (IS_ERR(bh)) { |
4624 | err = PTR_ERR(bh); |
4625 | - bh = NULL; |
4626 | goto out; |
4627 | } |
4628 | |
4629 | ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), |
4630 | sb->s_blocksize * 8, bh->b_data); |
4631 | err = ext4_handle_dirty_metadata(handle, NULL, bh); |
4632 | + brelse(bh); |
4633 | if (err) |
4634 | goto out; |
4635 | - brelse(bh); |
4636 | } |
4637 | - bh = NULL; |
4638 | |
4639 | /* Mark group tables in block bitmap */ |
4640 | for (j = 0; j < GROUP_TABLE_COUNT; j++) { |
4641 | @@ -685,7 +684,6 @@ handle_ib: |
4642 | } |
4643 | |
4644 | out: |
4645 | - brelse(bh); |
4646 | err2 = ext4_journal_stop(handle); |
4647 | if (err2 && !err) |
4648 | err = err2; |
4649 | @@ -873,6 +871,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, |
4650 | err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); |
4651 | if (unlikely(err)) { |
4652 | ext4_std_error(sb, err); |
4653 | + iloc.bh = NULL; |
4654 | goto exit_inode; |
4655 | } |
4656 | brelse(dind); |
4657 | @@ -924,6 +923,7 @@ static int add_new_gdb_meta_bg(struct super_block *sb, |
4658 | sizeof(struct buffer_head *), |
4659 | GFP_NOFS); |
4660 | if (!n_group_desc) { |
4661 | + brelse(gdb_bh); |
4662 | err = -ENOMEM; |
4663 | ext4_warning(sb, "not enough memory for %lu groups", |
4664 | gdb_num + 1); |
4665 | @@ -939,8 +939,6 @@ static int add_new_gdb_meta_bg(struct super_block *sb, |
4666 | kvfree(o_group_desc); |
4667 | BUFFER_TRACE(gdb_bh, "get_write_access"); |
4668 | err = ext4_journal_get_write_access(handle, gdb_bh); |
4669 | - if (unlikely(err)) |
4670 | - brelse(gdb_bh); |
4671 | return err; |
4672 | } |
4673 | |
4674 | @@ -1124,8 +1122,10 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data, |
4675 | backup_block, backup_block - |
4676 | ext4_group_first_block_no(sb, group)); |
4677 | BUFFER_TRACE(bh, "get_write_access"); |
4678 | - if ((err = ext4_journal_get_write_access(handle, bh))) |
4679 | + if ((err = ext4_journal_get_write_access(handle, bh))) { |
4680 | + brelse(bh); |
4681 | break; |
4682 | + } |
4683 | lock_buffer(bh); |
4684 | memcpy(bh->b_data, data, size); |
4685 | if (rest) |
4686 | @@ -2023,7 +2023,7 @@ retry: |
4687 | |
4688 | err = ext4_alloc_flex_bg_array(sb, n_group + 1); |
4689 | if (err) |
4690 | - return err; |
4691 | + goto out; |
4692 | |
4693 | err = ext4_mb_alloc_groupinfo(sb, n_group + 1); |
4694 | if (err) |
4695 | @@ -2059,6 +2059,10 @@ retry: |
4696 | n_blocks_count_retry = 0; |
4697 | free_flex_gd(flex_gd); |
4698 | flex_gd = NULL; |
4699 | + if (resize_inode) { |
4700 | + iput(resize_inode); |
4701 | + resize_inode = NULL; |
4702 | + } |
4703 | goto retry; |
4704 | } |
4705 | |
4706 | diff --git a/fs/ext4/super.c b/fs/ext4/super.c |
4707 | index d3d4643ab79b..8a149df1c6a1 100644 |
4708 | --- a/fs/ext4/super.c |
4709 | +++ b/fs/ext4/super.c |
4710 | @@ -4074,6 +4074,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
4711 | sbi->s_groups_count = blocks_count; |
4712 | sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, |
4713 | (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); |
4714 | + if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) != |
4715 | + le32_to_cpu(es->s_inodes_count)) { |
4716 | + ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu", |
4717 | + le32_to_cpu(es->s_inodes_count), |
4718 | + ((u64)sbi->s_groups_count * sbi->s_inodes_per_group)); |
4719 | + ret = -EINVAL; |
4720 | + goto failed_mount; |
4721 | + } |
4722 | db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / |
4723 | EXT4_DESC_PER_BLOCK(sb); |
4724 | if (ext4_has_feature_meta_bg(sb)) { |
4725 | @@ -4093,14 +4101,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
4726 | ret = -ENOMEM; |
4727 | goto failed_mount; |
4728 | } |
4729 | - if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) != |
4730 | - le32_to_cpu(es->s_inodes_count)) { |
4731 | - ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu", |
4732 | - le32_to_cpu(es->s_inodes_count), |
4733 | - ((u64)sbi->s_groups_count * sbi->s_inodes_per_group)); |
4734 | - ret = -EINVAL; |
4735 | - goto failed_mount; |
4736 | - } |
4737 | |
4738 | bgl_lock_init(sbi->s_blockgroup_lock); |
4739 | |
4740 | @@ -4509,6 +4509,7 @@ failed_mount6: |
4741 | percpu_counter_destroy(&sbi->s_freeinodes_counter); |
4742 | percpu_counter_destroy(&sbi->s_dirs_counter); |
4743 | percpu_counter_destroy(&sbi->s_dirtyclusters_counter); |
4744 | + percpu_free_rwsem(&sbi->s_journal_flag_rwsem); |
4745 | failed_mount5: |
4746 | ext4_ext_release(sb); |
4747 | ext4_release_system_zone(sb); |
4748 | diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c |
4749 | index f36fc5d5b257..4380c8630539 100644 |
4750 | --- a/fs/ext4/xattr.c |
4751 | +++ b/fs/ext4/xattr.c |
4752 | @@ -1388,6 +1388,12 @@ retry: |
4753 | bh = ext4_getblk(handle, ea_inode, block, 0); |
4754 | if (IS_ERR(bh)) |
4755 | return PTR_ERR(bh); |
4756 | + if (!bh) { |
4757 | + WARN_ON_ONCE(1); |
4758 | + EXT4_ERROR_INODE(ea_inode, |
4759 | + "ext4_getblk() return bh = NULL"); |
4760 | + return -EFSCORRUPTED; |
4761 | + } |
4762 | ret = ext4_journal_get_write_access(handle, bh); |
4763 | if (ret) |
4764 | goto out; |
4765 | @@ -2276,8 +2282,10 @@ static struct buffer_head *ext4_xattr_get_block(struct inode *inode) |
4766 | if (!bh) |
4767 | return ERR_PTR(-EIO); |
4768 | error = ext4_xattr_check_block(inode, bh); |
4769 | - if (error) |
4770 | + if (error) { |
4771 | + brelse(bh); |
4772 | return ERR_PTR(error); |
4773 | + } |
4774 | return bh; |
4775 | } |
4776 | |
4777 | @@ -2397,6 +2405,8 @@ retry_inode: |
4778 | error = ext4_xattr_block_set(handle, inode, &i, &bs); |
4779 | } else if (error == -ENOSPC) { |
4780 | if (EXT4_I(inode)->i_file_acl && !bs.s.base) { |
4781 | + brelse(bs.bh); |
4782 | + bs.bh = NULL; |
4783 | error = ext4_xattr_block_find(inode, &i, &bs); |
4784 | if (error) |
4785 | goto cleanup; |
4786 | @@ -2617,6 +2627,8 @@ out: |
4787 | kfree(buffer); |
4788 | if (is) |
4789 | brelse(is->iloc.bh); |
4790 | + if (bs) |
4791 | + brelse(bs->bh); |
4792 | kfree(is); |
4793 | kfree(bs); |
4794 | |
4795 | @@ -2696,7 +2708,6 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize, |
4796 | struct ext4_inode *raw_inode, handle_t *handle) |
4797 | { |
4798 | struct ext4_xattr_ibody_header *header; |
4799 | - struct buffer_head *bh; |
4800 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
4801 | static unsigned int mnt_count; |
4802 | size_t min_offs; |
4803 | @@ -2737,13 +2748,17 @@ retry: |
4804 | * EA block can hold new_extra_isize bytes. |
4805 | */ |
4806 | if (EXT4_I(inode)->i_file_acl) { |
4807 | + struct buffer_head *bh; |
4808 | + |
4809 | bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); |
4810 | error = -EIO; |
4811 | if (!bh) |
4812 | goto cleanup; |
4813 | error = ext4_xattr_check_block(inode, bh); |
4814 | - if (error) |
4815 | + if (error) { |
4816 | + brelse(bh); |
4817 | goto cleanup; |
4818 | + } |
4819 | base = BHDR(bh); |
4820 | end = bh->b_data + bh->b_size; |
4821 | min_offs = end - base; |
4822 | diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c |
4823 | index 11ea2c4a38ab..bf0da0382c9e 100644 |
4824 | --- a/fs/fuse/dev.c |
4825 | +++ b/fs/fuse/dev.c |
4826 | @@ -129,9 +129,13 @@ static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) |
4827 | |
4828 | static void fuse_drop_waiting(struct fuse_conn *fc) |
4829 | { |
4830 | - if (fc->connected) { |
4831 | - atomic_dec(&fc->num_waiting); |
4832 | - } else if (atomic_dec_and_test(&fc->num_waiting)) { |
4833 | + /* |
4834 | + * lockess check of fc->connected is okay, because atomic_dec_and_test() |
4835 | + * provides a memory barrier mached with the one in fuse_wait_aborted() |
4836 | + * to ensure no wake-up is missed. |
4837 | + */ |
4838 | + if (atomic_dec_and_test(&fc->num_waiting) && |
4839 | + !READ_ONCE(fc->connected)) { |
4840 | /* wake up aborters */ |
4841 | wake_up_all(&fc->blocked_waitq); |
4842 | } |
4843 | @@ -391,12 +395,19 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) |
4844 | if (test_bit(FR_BACKGROUND, &req->flags)) { |
4845 | spin_lock(&fc->lock); |
4846 | clear_bit(FR_BACKGROUND, &req->flags); |
4847 | - if (fc->num_background == fc->max_background) |
4848 | + if (fc->num_background == fc->max_background) { |
4849 | fc->blocked = 0; |
4850 | - |
4851 | - /* Wake up next waiter, if any */ |
4852 | - if (!fc->blocked && waitqueue_active(&fc->blocked_waitq)) |
4853 | wake_up(&fc->blocked_waitq); |
4854 | + } else if (!fc->blocked) { |
4855 | + /* |
4856 | + * Wake up next waiter, if any. It's okay to use |
4857 | + * waitqueue_active(), as we've already synced up |
4858 | + * fc->blocked with waiters with the wake_up() call |
4859 | + * above. |
4860 | + */ |
4861 | + if (waitqueue_active(&fc->blocked_waitq)) |
4862 | + wake_up(&fc->blocked_waitq); |
4863 | + } |
4864 | |
4865 | if (fc->num_background == fc->congestion_threshold && fc->sb) { |
4866 | clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC); |
4867 | @@ -1311,12 +1322,14 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file, |
4868 | goto out_end; |
4869 | } |
4870 | list_move_tail(&req->list, &fpq->processing); |
4871 | - spin_unlock(&fpq->lock); |
4872 | + __fuse_get_request(req); |
4873 | set_bit(FR_SENT, &req->flags); |
4874 | + spin_unlock(&fpq->lock); |
4875 | /* matches barrier in request_wait_answer() */ |
4876 | smp_mb__after_atomic(); |
4877 | if (test_bit(FR_INTERRUPTED, &req->flags)) |
4878 | queue_interrupt(fiq, req); |
4879 | + fuse_put_request(fc, req); |
4880 | |
4881 | return reqsize; |
4882 | |
4883 | @@ -1715,8 +1728,10 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, |
4884 | req->in.args[1].size = total_len; |
4885 | |
4886 | err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique); |
4887 | - if (err) |
4888 | + if (err) { |
4889 | fuse_retrieve_end(fc, req); |
4890 | + fuse_put_request(fc, req); |
4891 | + } |
4892 | |
4893 | return err; |
4894 | } |
4895 | @@ -1875,16 +1890,20 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud, |
4896 | |
4897 | /* Is it an interrupt reply? */ |
4898 | if (req->intr_unique == oh.unique) { |
4899 | + __fuse_get_request(req); |
4900 | spin_unlock(&fpq->lock); |
4901 | |
4902 | err = -EINVAL; |
4903 | - if (nbytes != sizeof(struct fuse_out_header)) |
4904 | + if (nbytes != sizeof(struct fuse_out_header)) { |
4905 | + fuse_put_request(fc, req); |
4906 | goto err_finish; |
4907 | + } |
4908 | |
4909 | if (oh.error == -ENOSYS) |
4910 | fc->no_interrupt = 1; |
4911 | else if (oh.error == -EAGAIN) |
4912 | queue_interrupt(&fc->iq, req); |
4913 | + fuse_put_request(fc, req); |
4914 | |
4915 | fuse_copy_finish(cs); |
4916 | return nbytes; |
4917 | @@ -2152,6 +2171,8 @@ EXPORT_SYMBOL_GPL(fuse_abort_conn); |
4918 | |
4919 | void fuse_wait_aborted(struct fuse_conn *fc) |
4920 | { |
4921 | + /* matches implicit memory barrier in fuse_drop_waiting() */ |
4922 | + smp_mb(); |
4923 | wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0); |
4924 | } |
4925 | |
4926 | diff --git a/fs/fuse/file.c b/fs/fuse/file.c |
4927 | index 32d0b883e74f..a0ffed34b85d 100644 |
4928 | --- a/fs/fuse/file.c |
4929 | +++ b/fs/fuse/file.c |
4930 | @@ -2913,10 +2913,12 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) |
4931 | } |
4932 | |
4933 | if (io->async) { |
4934 | + bool blocking = io->blocking; |
4935 | + |
4936 | fuse_aio_complete(io, ret < 0 ? ret : 0, -1); |
4937 | |
4938 | /* we have a non-extending, async request, so return */ |
4939 | - if (!io->blocking) |
4940 | + if (!blocking) |
4941 | return -EIOCBQUEUED; |
4942 | |
4943 | wait_for_completion(&wait); |
4944 | diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c |
4945 | index 84544a4f012d..8748539c04ed 100644 |
4946 | --- a/fs/gfs2/bmap.c |
4947 | +++ b/fs/gfs2/bmap.c |
4948 | @@ -1908,10 +1908,16 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length) |
4949 | if (ret < 0) |
4950 | goto out; |
4951 | |
4952 | - /* issue read-ahead on metadata */ |
4953 | - if (mp.mp_aheight > 1) { |
4954 | - for (; ret > 1; ret--) { |
4955 | - metapointer_range(&mp, mp.mp_aheight - ret, |
4956 | + /* On the first pass, issue read-ahead on metadata. */ |
4957 | + if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) { |
4958 | + unsigned int height = mp.mp_aheight - 1; |
4959 | + |
4960 | + /* No read-ahead for data blocks. */ |
4961 | + if (mp.mp_aheight - 1 == strip_h) |
4962 | + height--; |
4963 | + |
4964 | + for (; height >= mp.mp_aheight - ret; height--) { |
4965 | + metapointer_range(&mp, height, |
4966 | start_list, start_aligned, |
4967 | end_list, end_aligned, |
4968 | &start, &end); |
4969 | diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c |
4970 | index 1ad3256b9cbc..449d0cb45a84 100644 |
4971 | --- a/fs/gfs2/rgrp.c |
4972 | +++ b/fs/gfs2/rgrp.c |
4973 | @@ -725,6 +725,7 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp) |
4974 | |
4975 | if (gl) { |
4976 | glock_clear_object(gl, rgd); |
4977 | + gfs2_rgrp_brelse(rgd); |
4978 | gfs2_glock_put(gl); |
4979 | } |
4980 | |
4981 | @@ -1141,7 +1142,7 @@ static u32 count_unlinked(struct gfs2_rgrpd *rgd) |
4982 | * @rgd: the struct gfs2_rgrpd describing the RG to read in |
4983 | * |
4984 | * Read in all of a Resource Group's header and bitmap blocks. |
4985 | - * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps. |
4986 | + * Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps. |
4987 | * |
4988 | * Returns: errno |
4989 | */ |
4990 | diff --git a/fs/inode.c b/fs/inode.c |
4991 | index 42f6d25f32a5..65ae154df760 100644 |
4992 | --- a/fs/inode.c |
4993 | +++ b/fs/inode.c |
4994 | @@ -730,8 +730,11 @@ static enum lru_status inode_lru_isolate(struct list_head *item, |
4995 | return LRU_REMOVED; |
4996 | } |
4997 | |
4998 | - /* recently referenced inodes get one more pass */ |
4999 | - if (inode->i_state & I_REFERENCED) { |
5000 | + /* |
5001 | + * Recently referenced inodes and inodes with many attached pages |
5002 | + * get one more pass. |
5003 | + */ |
5004 | + if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) { |
5005 | inode->i_state &= ~I_REFERENCED; |
5006 | spin_unlock(&inode->i_lock); |
5007 | return LRU_ROTATE; |
5008 | diff --git a/fs/ioctl.c b/fs/ioctl.c |
5009 | index 2005529af560..0400297c8d72 100644 |
5010 | --- a/fs/ioctl.c |
5011 | +++ b/fs/ioctl.c |
5012 | @@ -669,6 +669,9 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, |
5013 | return ioctl_fiemap(filp, arg); |
5014 | |
5015 | case FIGETBSZ: |
5016 | + /* anon_bdev filesystems may not have a block size */ |
5017 | + if (!inode->i_sb->s_blocksize) |
5018 | + return -EINVAL; |
5019 | return put_user(inode->i_sb->s_blocksize, argp); |
5020 | |
5021 | case FICLONE: |
5022 | diff --git a/fs/namespace.c b/fs/namespace.c |
5023 | index 99186556f8d3..1fce41ba3535 100644 |
5024 | --- a/fs/namespace.c |
5025 | +++ b/fs/namespace.c |
5026 | @@ -695,9 +695,6 @@ static struct mountpoint *lookup_mountpoint(struct dentry *dentry) |
5027 | |
5028 | hlist_for_each_entry(mp, chain, m_hash) { |
5029 | if (mp->m_dentry == dentry) { |
5030 | - /* might be worth a WARN_ON() */ |
5031 | - if (d_unlinked(dentry)) |
5032 | - return ERR_PTR(-ENOENT); |
5033 | mp->m_count++; |
5034 | return mp; |
5035 | } |
5036 | @@ -711,6 +708,9 @@ static struct mountpoint *get_mountpoint(struct dentry *dentry) |
5037 | int ret; |
5038 | |
5039 | if (d_mountpoint(dentry)) { |
5040 | + /* might be worth a WARN_ON() */ |
5041 | + if (d_unlinked(dentry)) |
5042 | + return ERR_PTR(-ENOENT); |
5043 | mountpoint: |
5044 | read_seqlock_excl(&mount_lock); |
5045 | mp = lookup_mountpoint(dentry); |
5046 | @@ -1540,8 +1540,13 @@ static int do_umount(struct mount *mnt, int flags) |
5047 | |
5048 | namespace_lock(); |
5049 | lock_mount_hash(); |
5050 | - event++; |
5051 | |
5052 | + /* Recheck MNT_LOCKED with the locks held */ |
5053 | + retval = -EINVAL; |
5054 | + if (mnt->mnt.mnt_flags & MNT_LOCKED) |
5055 | + goto out; |
5056 | + |
5057 | + event++; |
5058 | if (flags & MNT_DETACH) { |
5059 | if (!list_empty(&mnt->mnt_list)) |
5060 | umount_tree(mnt, UMOUNT_PROPAGATE); |
5061 | @@ -1555,6 +1560,7 @@ static int do_umount(struct mount *mnt, int flags) |
5062 | retval = 0; |
5063 | } |
5064 | } |
5065 | +out: |
5066 | unlock_mount_hash(); |
5067 | namespace_unlock(); |
5068 | return retval; |
5069 | @@ -1645,7 +1651,7 @@ int ksys_umount(char __user *name, int flags) |
5070 | goto dput_and_out; |
5071 | if (!check_mnt(mnt)) |
5072 | goto dput_and_out; |
5073 | - if (mnt->mnt.mnt_flags & MNT_LOCKED) |
5074 | + if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */ |
5075 | goto dput_and_out; |
5076 | retval = -EPERM; |
5077 | if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN)) |
5078 | @@ -1728,8 +1734,14 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, |
5079 | for (s = r; s; s = next_mnt(s, r)) { |
5080 | if (!(flag & CL_COPY_UNBINDABLE) && |
5081 | IS_MNT_UNBINDABLE(s)) { |
5082 | - s = skip_mnt_tree(s); |
5083 | - continue; |
5084 | + if (s->mnt.mnt_flags & MNT_LOCKED) { |
5085 | + /* Both unbindable and locked. */ |
5086 | + q = ERR_PTR(-EPERM); |
5087 | + goto out; |
5088 | + } else { |
5089 | + s = skip_mnt_tree(s); |
5090 | + continue; |
5091 | + } |
5092 | } |
5093 | if (!(flag & CL_COPY_MNT_NS_FILE) && |
5094 | is_mnt_ns_file(s->mnt.mnt_root)) { |
5095 | @@ -1782,7 +1794,7 @@ void drop_collected_mounts(struct vfsmount *mnt) |
5096 | { |
5097 | namespace_lock(); |
5098 | lock_mount_hash(); |
5099 | - umount_tree(real_mount(mnt), UMOUNT_SYNC); |
5100 | + umount_tree(real_mount(mnt), 0); |
5101 | unlock_mount_hash(); |
5102 | namespace_unlock(); |
5103 | } |
5104 | diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c |
5105 | index 40a08cd483f0..18920152da14 100644 |
5106 | --- a/fs/nfs/nfs4state.c |
5107 | +++ b/fs/nfs/nfs4state.c |
5108 | @@ -2583,11 +2583,12 @@ static void nfs4_state_manager(struct nfs_client *clp) |
5109 | nfs4_clear_state_manager_bit(clp); |
5110 | /* Did we race with an attempt to give us more work? */ |
5111 | if (clp->cl_state == 0) |
5112 | - break; |
5113 | + return; |
5114 | if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) |
5115 | - break; |
5116 | + return; |
5117 | } while (refcount_read(&clp->cl_count) > 1); |
5118 | - return; |
5119 | + goto out_drain; |
5120 | + |
5121 | out_error: |
5122 | if (strlen(section)) |
5123 | section_sep = ": "; |
5124 | @@ -2595,6 +2596,7 @@ out_error: |
5125 | " with error %d\n", section_sep, section, |
5126 | clp->cl_hostname, -status); |
5127 | ssleep(1); |
5128 | +out_drain: |
5129 | nfs4_end_drain_session(clp); |
5130 | nfs4_clear_state_manager_bit(clp); |
5131 | } |
5132 | diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c |
5133 | index b7bc6e1a85ac..9d6b4f0f1a25 100644 |
5134 | --- a/fs/nfsd/nfs4proc.c |
5135 | +++ b/fs/nfsd/nfs4proc.c |
5136 | @@ -1037,6 +1037,9 @@ nfsd4_verify_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, |
5137 | { |
5138 | __be32 status; |
5139 | |
5140 | + if (!cstate->save_fh.fh_dentry) |
5141 | + return nfserr_nofilehandle; |
5142 | + |
5143 | status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->save_fh, |
5144 | src_stateid, RD_STATE, src, NULL); |
5145 | if (status) { |
5146 | diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c |
5147 | index 302cd7caa4a7..7578bd507c70 100644 |
5148 | --- a/fs/ocfs2/aops.c |
5149 | +++ b/fs/ocfs2/aops.c |
5150 | @@ -2412,8 +2412,16 @@ static int ocfs2_dio_end_io(struct kiocb *iocb, |
5151 | /* this io's submitter should not have unlocked this before we could */ |
5152 | BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); |
5153 | |
5154 | - if (bytes > 0 && private) |
5155 | - ret = ocfs2_dio_end_io_write(inode, private, offset, bytes); |
5156 | + if (bytes <= 0) |
5157 | + mlog_ratelimited(ML_ERROR, "Direct IO failed, bytes = %lld", |
5158 | + (long long)bytes); |
5159 | + if (private) { |
5160 | + if (bytes > 0) |
5161 | + ret = ocfs2_dio_end_io_write(inode, private, offset, |
5162 | + bytes); |
5163 | + else |
5164 | + ocfs2_dio_free_write_ctx(inode, private); |
5165 | + } |
5166 | |
5167 | ocfs2_iocb_clear_rw_locked(iocb); |
5168 | |
5169 | diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h |
5170 | index 308ea0eb35fd..a396096a5099 100644 |
5171 | --- a/fs/ocfs2/cluster/masklog.h |
5172 | +++ b/fs/ocfs2/cluster/masklog.h |
5173 | @@ -178,6 +178,15 @@ do { \ |
5174 | ##__VA_ARGS__); \ |
5175 | } while (0) |
5176 | |
5177 | +#define mlog_ratelimited(mask, fmt, ...) \ |
5178 | +do { \ |
5179 | + static DEFINE_RATELIMIT_STATE(_rs, \ |
5180 | + DEFAULT_RATELIMIT_INTERVAL, \ |
5181 | + DEFAULT_RATELIMIT_BURST); \ |
5182 | + if (__ratelimit(&_rs)) \ |
5183 | + mlog(mask, fmt, ##__VA_ARGS__); \ |
5184 | +} while (0) |
5185 | + |
5186 | #define mlog_errno(st) ({ \ |
5187 | int _st = (st); \ |
5188 | if (_st != -ERESTARTSYS && _st != -EINTR && \ |
5189 | diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c |
5190 | index b048d4fa3959..c121abbdfc7d 100644 |
5191 | --- a/fs/ocfs2/dir.c |
5192 | +++ b/fs/ocfs2/dir.c |
5193 | @@ -1897,8 +1897,7 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode, |
5194 | /* On error, skip the f_pos to the |
5195 | next block. */ |
5196 | ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1; |
5197 | - brelse(bh); |
5198 | - continue; |
5199 | + break; |
5200 | } |
5201 | if (le64_to_cpu(de->inode)) { |
5202 | unsigned char d_type = DT_UNKNOWN; |
5203 | diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c |
5204 | index 276914ae3c60..3bbde0a9f48f 100644 |
5205 | --- a/fs/overlayfs/dir.c |
5206 | +++ b/fs/overlayfs/dir.c |
5207 | @@ -463,6 +463,10 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode, |
5208 | if (IS_ERR(upper)) |
5209 | goto out_unlock; |
5210 | |
5211 | + err = -ESTALE; |
5212 | + if (d_is_negative(upper) || !IS_WHITEOUT(d_inode(upper))) |
5213 | + goto out_dput; |
5214 | + |
5215 | newdentry = ovl_create_temp(workdir, cattr); |
5216 | err = PTR_ERR(newdentry); |
5217 | if (IS_ERR(newdentry)) |
5218 | @@ -663,6 +667,10 @@ static int ovl_link(struct dentry *old, struct inode *newdir, |
5219 | if (err) |
5220 | goto out_drop_write; |
5221 | |
5222 | + err = ovl_copy_up(new->d_parent); |
5223 | + if (err) |
5224 | + goto out_drop_write; |
5225 | + |
5226 | if (ovl_is_metacopy_dentry(old)) { |
5227 | err = ovl_set_redirect(old, false); |
5228 | if (err) |
5229 | diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c |
5230 | index 9c0ca6a7becf..efd372312ef1 100644 |
5231 | --- a/fs/overlayfs/namei.c |
5232 | +++ b/fs/overlayfs/namei.c |
5233 | @@ -422,8 +422,10 @@ int ovl_verify_set_fh(struct dentry *dentry, const char *name, |
5234 | |
5235 | fh = ovl_encode_real_fh(real, is_upper); |
5236 | err = PTR_ERR(fh); |
5237 | - if (IS_ERR(fh)) |
5238 | + if (IS_ERR(fh)) { |
5239 | + fh = NULL; |
5240 | goto fail; |
5241 | + } |
5242 | |
5243 | err = ovl_verify_fh(dentry, name, fh); |
5244 | if (set && err == -ENODATA) |
5245 | diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c |
5246 | index 30adc9d408a0..0fb0a59a5e5c 100644 |
5247 | --- a/fs/overlayfs/super.c |
5248 | +++ b/fs/overlayfs/super.c |
5249 | @@ -472,6 +472,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config) |
5250 | { |
5251 | char *p; |
5252 | int err; |
5253 | + bool metacopy_opt = false, redirect_opt = false; |
5254 | |
5255 | config->redirect_mode = kstrdup(ovl_redirect_mode_def(), GFP_KERNEL); |
5256 | if (!config->redirect_mode) |
5257 | @@ -516,6 +517,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config) |
5258 | config->redirect_mode = match_strdup(&args[0]); |
5259 | if (!config->redirect_mode) |
5260 | return -ENOMEM; |
5261 | + redirect_opt = true; |
5262 | break; |
5263 | |
5264 | case OPT_INDEX_ON: |
5265 | @@ -548,6 +550,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config) |
5266 | |
5267 | case OPT_METACOPY_ON: |
5268 | config->metacopy = true; |
5269 | + metacopy_opt = true; |
5270 | break; |
5271 | |
5272 | case OPT_METACOPY_OFF: |
5273 | @@ -572,13 +575,32 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config) |
5274 | if (err) |
5275 | return err; |
5276 | |
5277 | - /* metacopy feature with upper requires redirect_dir=on */ |
5278 | - if (config->upperdir && config->metacopy && !config->redirect_dir) { |
5279 | - pr_warn("overlayfs: metadata only copy up requires \"redirect_dir=on\", falling back to metacopy=off.\n"); |
5280 | - config->metacopy = false; |
5281 | - } else if (config->metacopy && !config->redirect_follow) { |
5282 | - pr_warn("overlayfs: metadata only copy up requires \"redirect_dir=follow\" on non-upper mount, falling back to metacopy=off.\n"); |
5283 | - config->metacopy = false; |
5284 | + /* |
5285 | + * This is to make the logic below simpler. It doesn't make any other |
5286 | + * difference, since config->redirect_dir is only used for upper. |
5287 | + */ |
5288 | + if (!config->upperdir && config->redirect_follow) |
5289 | + config->redirect_dir = true; |
5290 | + |
5291 | + /* Resolve metacopy -> redirect_dir dependency */ |
5292 | + if (config->metacopy && !config->redirect_dir) { |
5293 | + if (metacopy_opt && redirect_opt) { |
5294 | + pr_err("overlayfs: conflicting options: metacopy=on,redirect_dir=%s\n", |
5295 | + config->redirect_mode); |
5296 | + return -EINVAL; |
5297 | + } |
5298 | + if (redirect_opt) { |
5299 | + /* |
5300 | + * There was an explicit redirect_dir=... that resulted |
5301 | + * in this conflict. |
5302 | + */ |
5303 | + pr_info("overlayfs: disabling metacopy due to redirect_dir=%s\n", |
5304 | + config->redirect_mode); |
5305 | + config->metacopy = false; |
5306 | + } else { |
5307 | + /* Automatically enable redirect otherwise. */ |
5308 | + config->redirect_follow = config->redirect_dir = true; |
5309 | + } |
5310 | } |
5311 | |
5312 | return 0; |
5313 | diff --git a/fs/udf/super.c b/fs/udf/super.c |
5314 | index 6f515651a2c2..b997e3116e37 100644 |
5315 | --- a/fs/udf/super.c |
5316 | +++ b/fs/udf/super.c |
5317 | @@ -613,14 +613,11 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options) |
5318 | struct udf_options uopt; |
5319 | struct udf_sb_info *sbi = UDF_SB(sb); |
5320 | int error = 0; |
5321 | - struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb); |
5322 | + |
5323 | + if (!(*flags & SB_RDONLY) && UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT)) |
5324 | + return -EACCES; |
5325 | |
5326 | sync_filesystem(sb); |
5327 | - if (lvidiu) { |
5328 | - int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev); |
5329 | - if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & SB_RDONLY)) |
5330 | - return -EACCES; |
5331 | - } |
5332 | |
5333 | uopt.flags = sbi->s_flags; |
5334 | uopt.uid = sbi->s_uid; |
5335 | @@ -1257,6 +1254,7 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block) |
5336 | ret = -EACCES; |
5337 | goto out_bh; |
5338 | } |
5339 | + UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT); |
5340 | ret = udf_load_vat(sb, i, type1_idx); |
5341 | if (ret < 0) |
5342 | goto out_bh; |
5343 | @@ -2155,10 +2153,12 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) |
5344 | UDF_MAX_READ_VERSION); |
5345 | ret = -EINVAL; |
5346 | goto error_out; |
5347 | - } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION && |
5348 | - !sb_rdonly(sb)) { |
5349 | - ret = -EACCES; |
5350 | - goto error_out; |
5351 | + } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION) { |
5352 | + if (!sb_rdonly(sb)) { |
5353 | + ret = -EACCES; |
5354 | + goto error_out; |
5355 | + } |
5356 | + UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT); |
5357 | } |
5358 | |
5359 | sbi->s_udfrev = minUDFWriteRev; |
5360 | @@ -2176,10 +2176,12 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) |
5361 | } |
5362 | |
5363 | if (sbi->s_partmaps[sbi->s_partition].s_partition_flags & |
5364 | - UDF_PART_FLAG_READ_ONLY && |
5365 | - !sb_rdonly(sb)) { |
5366 | - ret = -EACCES; |
5367 | - goto error_out; |
5368 | + UDF_PART_FLAG_READ_ONLY) { |
5369 | + if (!sb_rdonly(sb)) { |
5370 | + ret = -EACCES; |
5371 | + goto error_out; |
5372 | + } |
5373 | + UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT); |
5374 | } |
5375 | |
5376 | if (udf_find_fileset(sb, &fileset, &rootdir)) { |
5377 | diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h |
5378 | index 9424d7cab790..d12e507e9eb2 100644 |
5379 | --- a/fs/udf/udf_sb.h |
5380 | +++ b/fs/udf/udf_sb.h |
5381 | @@ -30,6 +30,8 @@ |
5382 | #define UDF_FLAG_LASTBLOCK_SET 16 |
5383 | #define UDF_FLAG_BLOCKSIZE_SET 17 |
5384 | #define UDF_FLAG_INCONSISTENT 18 |
5385 | +#define UDF_FLAG_RW_INCOMPAT 19 /* Set when we find RW incompatible |
5386 | + * feature */ |
5387 | |
5388 | #define UDF_PART_FLAG_UNALLOC_BITMAP 0x0001 |
5389 | #define UDF_PART_FLAG_UNALLOC_TABLE 0x0002 |
5390 | diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h |
5391 | index 49c93b9308d7..68bb09c29ce8 100644 |
5392 | --- a/include/linux/ceph/libceph.h |
5393 | +++ b/include/linux/ceph/libceph.h |
5394 | @@ -81,7 +81,13 @@ struct ceph_options { |
5395 | |
5396 | #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) |
5397 | #define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024) |
5398 | -#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024) |
5399 | + |
5400 | +/* |
5401 | + * Handle the largest possible rbd object in one message. |
5402 | + * There is no limit on the size of cephfs objects, but it has to obey |
5403 | + * rsize and wsize mount options anyway. |
5404 | + */ |
5405 | +#define CEPH_MSG_MAX_DATA_LEN (32*1024*1024) |
5406 | |
5407 | #define CEPH_AUTH_NAME_DEFAULT "guest" |
5408 | |
5409 | diff --git a/include/linux/i8253.h b/include/linux/i8253.h |
5410 | index e6bb36a97519..8336b2f6f834 100644 |
5411 | --- a/include/linux/i8253.h |
5412 | +++ b/include/linux/i8253.h |
5413 | @@ -21,6 +21,7 @@ |
5414 | #define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ) |
5415 | |
5416 | extern raw_spinlock_t i8253_lock; |
5417 | +extern bool i8253_clear_counter_on_shutdown; |
5418 | extern struct clock_event_device i8253_clockevent; |
5419 | extern void clockevent_i8253_init(bool oneshot); |
5420 | |
5421 | diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h |
5422 | index abe975c87b90..78b86dea2f29 100644 |
5423 | --- a/include/linux/mtd/nand.h |
5424 | +++ b/include/linux/mtd/nand.h |
5425 | @@ -324,9 +324,8 @@ static inline unsigned int nanddev_ntargets(const struct nand_device *nand) |
5426 | */ |
5427 | static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand) |
5428 | { |
5429 | - return (u64)nand->memorg.luns_per_target * |
5430 | - nand->memorg.eraseblocks_per_lun * |
5431 | - nand->memorg.pages_per_eraseblock; |
5432 | + return nand->memorg.ntargets * nand->memorg.luns_per_target * |
5433 | + nand->memorg.eraseblocks_per_lun; |
5434 | } |
5435 | |
5436 | /** |
5437 | diff --git a/include/linux/nmi.h b/include/linux/nmi.h |
5438 | index 08f9247e9827..9003e29cde46 100644 |
5439 | --- a/include/linux/nmi.h |
5440 | +++ b/include/linux/nmi.h |
5441 | @@ -119,6 +119,8 @@ static inline int hardlockup_detector_perf_init(void) { return 0; } |
5442 | void watchdog_nmi_stop(void); |
5443 | void watchdog_nmi_start(void); |
5444 | int watchdog_nmi_probe(void); |
5445 | +int watchdog_nmi_enable(unsigned int cpu); |
5446 | +void watchdog_nmi_disable(unsigned int cpu); |
5447 | |
5448 | /** |
5449 | * touch_nmi_watchdog - restart NMI watchdog timeout. |
5450 | diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h |
5451 | index 01674b56e14f..71a87c36e2b6 100644 |
5452 | --- a/include/uapi/linux/kfd_ioctl.h |
5453 | +++ b/include/uapi/linux/kfd_ioctl.h |
5454 | @@ -247,10 +247,10 @@ struct kfd_hsa_memory_exception_data { |
5455 | |
5456 | /* hw exception data */ |
5457 | struct kfd_hsa_hw_exception_data { |
5458 | - uint32_t reset_type; |
5459 | - uint32_t reset_cause; |
5460 | - uint32_t memory_lost; |
5461 | - uint32_t gpu_id; |
5462 | + __u32 reset_type; |
5463 | + __u32 reset_cause; |
5464 | + __u32 memory_lost; |
5465 | + __u32 gpu_id; |
5466 | }; |
5467 | |
5468 | /* Event data */ |
5469 | diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h |
5470 | index fd18c974a619..f6e798d42069 100644 |
5471 | --- a/include/xen/xen-ops.h |
5472 | +++ b/include/xen/xen-ops.h |
5473 | @@ -41,7 +41,7 @@ int xen_setup_shutdown_event(void); |
5474 | |
5475 | extern unsigned long *xen_contiguous_bitmap; |
5476 | |
5477 | -#ifdef CONFIG_XEN_PV |
5478 | +#if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) |
5479 | int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, |
5480 | unsigned int address_bits, |
5481 | dma_addr_t *dma_handle); |
5482 | diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c |
5483 | index 6ad4a9fcbd6f..7921ae4fca8d 100644 |
5484 | --- a/kernel/debug/kdb/kdb_bt.c |
5485 | +++ b/kernel/debug/kdb/kdb_bt.c |
5486 | @@ -179,14 +179,14 @@ kdb_bt(int argc, const char **argv) |
5487 | kdb_printf("no process for cpu %ld\n", cpu); |
5488 | return 0; |
5489 | } |
5490 | - sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu)); |
5491 | + sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu)); |
5492 | kdb_parse(buf); |
5493 | return 0; |
5494 | } |
5495 | kdb_printf("btc: cpu status: "); |
5496 | kdb_parse("cpu\n"); |
5497 | for_each_online_cpu(cpu) { |
5498 | - sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu)); |
5499 | + sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu)); |
5500 | kdb_parse(buf); |
5501 | touch_nmi_watchdog(); |
5502 | } |
5503 | diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c |
5504 | index 2ddfce8f1e8f..f338d23b112b 100644 |
5505 | --- a/kernel/debug/kdb/kdb_main.c |
5506 | +++ b/kernel/debug/kdb/kdb_main.c |
5507 | @@ -1192,7 +1192,7 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs, |
5508 | if (reason == KDB_REASON_DEBUG) { |
5509 | /* special case below */ |
5510 | } else { |
5511 | - kdb_printf("\nEntering kdb (current=0x%p, pid %d) ", |
5512 | + kdb_printf("\nEntering kdb (current=0x%px, pid %d) ", |
5513 | kdb_current, kdb_current ? kdb_current->pid : 0); |
5514 | #if defined(CONFIG_SMP) |
5515 | kdb_printf("on processor %d ", raw_smp_processor_id()); |
5516 | @@ -1208,7 +1208,7 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs, |
5517 | */ |
5518 | switch (db_result) { |
5519 | case KDB_DB_BPT: |
5520 | - kdb_printf("\nEntering kdb (0x%p, pid %d) ", |
5521 | + kdb_printf("\nEntering kdb (0x%px, pid %d) ", |
5522 | kdb_current, kdb_current->pid); |
5523 | #if defined(CONFIG_SMP) |
5524 | kdb_printf("on processor %d ", raw_smp_processor_id()); |
5525 | @@ -2048,7 +2048,7 @@ static int kdb_lsmod(int argc, const char **argv) |
5526 | if (mod->state == MODULE_STATE_UNFORMED) |
5527 | continue; |
5528 | |
5529 | - kdb_printf("%-20s%8u 0x%p ", mod->name, |
5530 | + kdb_printf("%-20s%8u 0x%px ", mod->name, |
5531 | mod->core_layout.size, (void *)mod); |
5532 | #ifdef CONFIG_MODULE_UNLOAD |
5533 | kdb_printf("%4d ", module_refcount(mod)); |
5534 | @@ -2059,7 +2059,7 @@ static int kdb_lsmod(int argc, const char **argv) |
5535 | kdb_printf(" (Loading)"); |
5536 | else |
5537 | kdb_printf(" (Live)"); |
5538 | - kdb_printf(" 0x%p", mod->core_layout.base); |
5539 | + kdb_printf(" 0x%px", mod->core_layout.base); |
5540 | |
5541 | #ifdef CONFIG_MODULE_UNLOAD |
5542 | { |
5543 | @@ -2341,7 +2341,7 @@ void kdb_ps1(const struct task_struct *p) |
5544 | return; |
5545 | |
5546 | cpu = kdb_process_cpu(p); |
5547 | - kdb_printf("0x%p %8d %8d %d %4d %c 0x%p %c%s\n", |
5548 | + kdb_printf("0x%px %8d %8d %d %4d %c 0x%px %c%s\n", |
5549 | (void *)p, p->pid, p->parent->pid, |
5550 | kdb_task_has_cpu(p), kdb_process_cpu(p), |
5551 | kdb_task_state_char(p), |
5552 | @@ -2354,7 +2354,7 @@ void kdb_ps1(const struct task_struct *p) |
5553 | } else { |
5554 | if (KDB_TSK(cpu) != p) |
5555 | kdb_printf(" Error: does not match running " |
5556 | - "process table (0x%p)\n", KDB_TSK(cpu)); |
5557 | + "process table (0x%px)\n", KDB_TSK(cpu)); |
5558 | } |
5559 | } |
5560 | } |
5561 | @@ -2692,7 +2692,7 @@ int kdb_register_flags(char *cmd, |
5562 | for_each_kdbcmd(kp, i) { |
5563 | if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) { |
5564 | kdb_printf("Duplicate kdb command registered: " |
5565 | - "%s, func %p help %s\n", cmd, func, help); |
5566 | + "%s, func %px help %s\n", cmd, func, help); |
5567 | return 1; |
5568 | } |
5569 | } |
5570 | diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c |
5571 | index 990b3cc526c8..987eb73284d2 100644 |
5572 | --- a/kernel/debug/kdb/kdb_support.c |
5573 | +++ b/kernel/debug/kdb/kdb_support.c |
5574 | @@ -40,7 +40,7 @@ |
5575 | int kdbgetsymval(const char *symname, kdb_symtab_t *symtab) |
5576 | { |
5577 | if (KDB_DEBUG(AR)) |
5578 | - kdb_printf("kdbgetsymval: symname=%s, symtab=%p\n", symname, |
5579 | + kdb_printf("kdbgetsymval: symname=%s, symtab=%px\n", symname, |
5580 | symtab); |
5581 | memset(symtab, 0, sizeof(*symtab)); |
5582 | symtab->sym_start = kallsyms_lookup_name(symname); |
5583 | @@ -88,7 +88,7 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab) |
5584 | char *knt1 = NULL; |
5585 | |
5586 | if (KDB_DEBUG(AR)) |
5587 | - kdb_printf("kdbnearsym: addr=0x%lx, symtab=%p\n", addr, symtab); |
5588 | + kdb_printf("kdbnearsym: addr=0x%lx, symtab=%px\n", addr, symtab); |
5589 | memset(symtab, 0, sizeof(*symtab)); |
5590 | |
5591 | if (addr < 4096) |
5592 | @@ -149,7 +149,7 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab) |
5593 | symtab->mod_name = "kernel"; |
5594 | if (KDB_DEBUG(AR)) |
5595 | kdb_printf("kdbnearsym: returns %d symtab->sym_start=0x%lx, " |
5596 | - "symtab->mod_name=%p, symtab->sym_name=%p (%s)\n", ret, |
5597 | + "symtab->mod_name=%px, symtab->sym_name=%px (%s)\n", ret, |
5598 | symtab->sym_start, symtab->mod_name, symtab->sym_name, |
5599 | symtab->sym_name); |
5600 | |
5601 | @@ -887,13 +887,13 @@ void debug_kusage(void) |
5602 | __func__, dah_first); |
5603 | if (dah_first) { |
5604 | h_used = (struct debug_alloc_header *)debug_alloc_pool; |
5605 | - kdb_printf("%s: h_used %p size %d\n", __func__, h_used, |
5606 | + kdb_printf("%s: h_used %px size %d\n", __func__, h_used, |
5607 | h_used->size); |
5608 | } |
5609 | do { |
5610 | h_used = (struct debug_alloc_header *) |
5611 | ((char *)h_free + dah_overhead + h_free->size); |
5612 | - kdb_printf("%s: h_used %p size %d caller %p\n", |
5613 | + kdb_printf("%s: h_used %px size %d caller %px\n", |
5614 | __func__, h_used, h_used->size, h_used->caller); |
5615 | h_free = (struct debug_alloc_header *) |
5616 | (debug_alloc_pool + h_free->next); |
5617 | @@ -902,7 +902,7 @@ void debug_kusage(void) |
5618 | ((char *)h_free + dah_overhead + h_free->size); |
5619 | if ((char *)h_used - debug_alloc_pool != |
5620 | sizeof(debug_alloc_pool_aligned)) |
5621 | - kdb_printf("%s: h_used %p size %d caller %p\n", |
5622 | + kdb_printf("%s: h_used %px size %d caller %px\n", |
5623 | __func__, h_used, h_used->size, h_used->caller); |
5624 | out: |
5625 | spin_unlock(&dap_lock); |
5626 | diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c |
5627 | index c30032367aab..f9a0cd094b81 100644 |
5628 | --- a/kernel/trace/trace_kprobe.c |
5629 | +++ b/kernel/trace/trace_kprobe.c |
5630 | @@ -61,9 +61,23 @@ static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk, |
5631 | return strncmp(mod->name, name, len) == 0 && name[len] == ':'; |
5632 | } |
5633 | |
5634 | -static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk) |
5635 | +static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk) |
5636 | { |
5637 | - return !!strchr(trace_kprobe_symbol(tk), ':'); |
5638 | + char *p; |
5639 | + bool ret; |
5640 | + |
5641 | + if (!tk->symbol) |
5642 | + return false; |
5643 | + p = strchr(tk->symbol, ':'); |
5644 | + if (!p) |
5645 | + return true; |
5646 | + *p = '\0'; |
5647 | + mutex_lock(&module_mutex); |
5648 | + ret = !!find_module(tk->symbol); |
5649 | + mutex_unlock(&module_mutex); |
5650 | + *p = ':'; |
5651 | + |
5652 | + return ret; |
5653 | } |
5654 | |
5655 | static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk) |
5656 | @@ -554,19 +568,13 @@ static int __register_trace_kprobe(struct trace_kprobe *tk) |
5657 | else |
5658 | ret = register_kprobe(&tk->rp.kp); |
5659 | |
5660 | - if (ret == 0) |
5661 | + if (ret == 0) { |
5662 | tk->tp.flags |= TP_FLAG_REGISTERED; |
5663 | - else { |
5664 | - if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) { |
5665 | - pr_warn("This probe might be able to register after target module is loaded. Continue.\n"); |
5666 | - ret = 0; |
5667 | - } else if (ret == -EILSEQ) { |
5668 | - pr_warn("Probing address(0x%p) is not an instruction boundary.\n", |
5669 | - tk->rp.kp.addr); |
5670 | - ret = -EINVAL; |
5671 | - } |
5672 | + } else if (ret == -EILSEQ) { |
5673 | + pr_warn("Probing address(0x%p) is not an instruction boundary.\n", |
5674 | + tk->rp.kp.addr); |
5675 | + ret = -EINVAL; |
5676 | } |
5677 | - |
5678 | return ret; |
5679 | } |
5680 | |
5681 | @@ -629,6 +637,11 @@ static int register_trace_kprobe(struct trace_kprobe *tk) |
5682 | |
5683 | /* Register k*probe */ |
5684 | ret = __register_trace_kprobe(tk); |
5685 | + if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) { |
5686 | + pr_warn("This probe might be able to register after target module is loaded. Continue.\n"); |
5687 | + ret = 0; |
5688 | + } |
5689 | + |
5690 | if (ret < 0) |
5691 | unregister_kprobe_event(tk); |
5692 | else |
5693 | diff --git a/lib/ubsan.c b/lib/ubsan.c |
5694 | index 59fee96c29a0..e4162f59a81c 100644 |
5695 | --- a/lib/ubsan.c |
5696 | +++ b/lib/ubsan.c |
5697 | @@ -427,8 +427,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, |
5698 | EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds); |
5699 | |
5700 | |
5701 | -void __noreturn |
5702 | -__ubsan_handle_builtin_unreachable(struct unreachable_data *data) |
5703 | +void __ubsan_handle_builtin_unreachable(struct unreachable_data *data) |
5704 | { |
5705 | unsigned long flags; |
5706 | |
5707 | diff --git a/mm/hugetlb.c b/mm/hugetlb.c |
5708 | index 7b5c0ad9a6bd..1931a3d9b282 100644 |
5709 | --- a/mm/hugetlb.c |
5710 | +++ b/mm/hugetlb.c |
5711 | @@ -3233,7 +3233,7 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte) |
5712 | int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, |
5713 | struct vm_area_struct *vma) |
5714 | { |
5715 | - pte_t *src_pte, *dst_pte, entry; |
5716 | + pte_t *src_pte, *dst_pte, entry, dst_entry; |
5717 | struct page *ptepage; |
5718 | unsigned long addr; |
5719 | int cow; |
5720 | @@ -3261,15 +3261,30 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, |
5721 | break; |
5722 | } |
5723 | |
5724 | - /* If the pagetables are shared don't copy or take references */ |
5725 | - if (dst_pte == src_pte) |
5726 | + /* |
5727 | + * If the pagetables are shared don't copy or take references. |
5728 | + * dst_pte == src_pte is the common case of src/dest sharing. |
5729 | + * |
5730 | + * However, src could have 'unshared' and dst shares with |
5731 | + * another vma. If dst_pte !none, this implies sharing. |
5732 | + * Check here before taking page table lock, and once again |
5733 | + * after taking the lock below. |
5734 | + */ |
5735 | + dst_entry = huge_ptep_get(dst_pte); |
5736 | + if ((dst_pte == src_pte) || !huge_pte_none(dst_entry)) |
5737 | continue; |
5738 | |
5739 | dst_ptl = huge_pte_lock(h, dst, dst_pte); |
5740 | src_ptl = huge_pte_lockptr(h, src, src_pte); |
5741 | spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); |
5742 | entry = huge_ptep_get(src_pte); |
5743 | - if (huge_pte_none(entry)) { /* skip none entry */ |
5744 | + dst_entry = huge_ptep_get(dst_pte); |
5745 | + if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) { |
5746 | + /* |
5747 | + * Skip if src entry none. Also, skip in the |
5748 | + * unlikely case dst entry !none as this implies |
5749 | + * sharing with another vma. |
5750 | + */ |
5751 | ; |
5752 | } else if (unlikely(is_hugetlb_entry_migration(entry) || |
5753 | is_hugetlb_entry_hwpoisoned(entry))) { |
5754 | diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c |
5755 | index 38d94b703e9d..f3f919728f5c 100644 |
5756 | --- a/mm/memory_hotplug.c |
5757 | +++ b/mm/memory_hotplug.c |
5758 | @@ -587,6 +587,7 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, |
5759 | for (i = 0; i < sections_to_remove; i++) { |
5760 | unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; |
5761 | |
5762 | + cond_resched(); |
5763 | ret = __remove_section(zone, __pfn_to_section(pfn), map_offset, |
5764 | altmap); |
5765 | map_offset = 0; |
5766 | diff --git a/mm/mempolicy.c b/mm/mempolicy.c |
5767 | index da858f794eb6..149b6f4cf023 100644 |
5768 | --- a/mm/mempolicy.c |
5769 | +++ b/mm/mempolicy.c |
5770 | @@ -2046,8 +2046,36 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, |
5771 | nmask = policy_nodemask(gfp, pol); |
5772 | if (!nmask || node_isset(hpage_node, *nmask)) { |
5773 | mpol_cond_put(pol); |
5774 | - page = __alloc_pages_node(hpage_node, |
5775 | - gfp | __GFP_THISNODE, order); |
5776 | + /* |
5777 | + * We cannot invoke reclaim if __GFP_THISNODE |
5778 | + * is set. Invoking reclaim with |
5779 | + * __GFP_THISNODE set, would cause THP |
5780 | + * allocations to trigger heavy swapping |
5781 | + * despite there may be tons of free memory |
5782 | + * (including potentially plenty of THP |
5783 | + * already available in the buddy) on all the |
5784 | + * other NUMA nodes. |
5785 | + * |
5786 | + * At most we could invoke compaction when |
5787 | + * __GFP_THISNODE is set (but we would need to |
5788 | + * refrain from invoking reclaim even if |
5789 | + * compaction returned COMPACT_SKIPPED because |
5790 | + * there wasn't not enough memory to succeed |
5791 | + * compaction). For now just avoid |
5792 | + * __GFP_THISNODE instead of limiting the |
5793 | + * allocation path to a strict and single |
5794 | + * compaction invocation. |
5795 | + * |
5796 | + * Supposedly if direct reclaim was enabled by |
5797 | + * the caller, the app prefers THP regardless |
5798 | + * of the node it comes from so this would be |
5799 | + * more desiderable behavior than only |
5800 | + * providing THP originated from the local |
5801 | + * node in such case. |
5802 | + */ |
5803 | + if (!(gfp & __GFP_DIRECT_RECLAIM)) |
5804 | + gfp |= __GFP_THISNODE; |
5805 | + page = __alloc_pages_node(hpage_node, gfp, order); |
5806 | goto out; |
5807 | } |
5808 | } |
5809 | diff --git a/mm/swapfile.c b/mm/swapfile.c |
5810 | index d954b71c4f9c..8810a6d7d67f 100644 |
5811 | --- a/mm/swapfile.c |
5812 | +++ b/mm/swapfile.c |
5813 | @@ -2820,7 +2820,7 @@ static struct swap_info_struct *alloc_swap_info(void) |
5814 | unsigned int type; |
5815 | int i; |
5816 | |
5817 | - p = kzalloc(sizeof(*p), GFP_KERNEL); |
5818 | + p = kvzalloc(sizeof(*p), GFP_KERNEL); |
5819 | if (!p) |
5820 | return ERR_PTR(-ENOMEM); |
5821 | |
5822 | @@ -2831,7 +2831,7 @@ static struct swap_info_struct *alloc_swap_info(void) |
5823 | } |
5824 | if (type >= MAX_SWAPFILES) { |
5825 | spin_unlock(&swap_lock); |
5826 | - kfree(p); |
5827 | + kvfree(p); |
5828 | return ERR_PTR(-EPERM); |
5829 | } |
5830 | if (type >= nr_swapfiles) { |
5831 | @@ -2845,7 +2845,7 @@ static struct swap_info_struct *alloc_swap_info(void) |
5832 | smp_wmb(); |
5833 | nr_swapfiles++; |
5834 | } else { |
5835 | - kfree(p); |
5836 | + kvfree(p); |
5837 | p = swap_info[type]; |
5838 | /* |
5839 | * Do not memset this entry: a racing procfs swap_next() |
5840 | diff --git a/net/9p/protocol.c b/net/9p/protocol.c |
5841 | index 4a1e1dd30b52..ee32bbf12675 100644 |
5842 | --- a/net/9p/protocol.c |
5843 | +++ b/net/9p/protocol.c |
5844 | @@ -46,10 +46,15 @@ p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...); |
5845 | void p9stat_free(struct p9_wstat *stbuf) |
5846 | { |
5847 | kfree(stbuf->name); |
5848 | + stbuf->name = NULL; |
5849 | kfree(stbuf->uid); |
5850 | + stbuf->uid = NULL; |
5851 | kfree(stbuf->gid); |
5852 | + stbuf->gid = NULL; |
5853 | kfree(stbuf->muid); |
5854 | + stbuf->muid = NULL; |
5855 | kfree(stbuf->extension); |
5856 | + stbuf->extension = NULL; |
5857 | } |
5858 | EXPORT_SYMBOL(p9stat_free); |
5859 | |
5860 | diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c |
5861 | index a676d5f76bdc..277d02a8cac8 100644 |
5862 | --- a/net/netfilter/nf_conntrack_core.c |
5863 | +++ b/net/netfilter/nf_conntrack_core.c |
5864 | @@ -1073,19 +1073,22 @@ static unsigned int early_drop_list(struct net *net, |
5865 | return drops; |
5866 | } |
5867 | |
5868 | -static noinline int early_drop(struct net *net, unsigned int _hash) |
5869 | +static noinline int early_drop(struct net *net, unsigned int hash) |
5870 | { |
5871 | - unsigned int i; |
5872 | + unsigned int i, bucket; |
5873 | |
5874 | for (i = 0; i < NF_CT_EVICTION_RANGE; i++) { |
5875 | struct hlist_nulls_head *ct_hash; |
5876 | - unsigned int hash, hsize, drops; |
5877 | + unsigned int hsize, drops; |
5878 | |
5879 | rcu_read_lock(); |
5880 | nf_conntrack_get_ht(&ct_hash, &hsize); |
5881 | - hash = reciprocal_scale(_hash++, hsize); |
5882 | + if (!i) |
5883 | + bucket = reciprocal_scale(hash, hsize); |
5884 | + else |
5885 | + bucket = (bucket + 1) % hsize; |
5886 | |
5887 | - drops = early_drop_list(net, &ct_hash[hash]); |
5888 | + drops = early_drop_list(net, &ct_hash[bucket]); |
5889 | rcu_read_unlock(); |
5890 | |
5891 | if (drops) { |
5892 | diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c |
5893 | index 30afbd236656..b53cc0960b5d 100644 |
5894 | --- a/net/sunrpc/xdr.c |
5895 | +++ b/net/sunrpc/xdr.c |
5896 | @@ -639,11 +639,10 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len) |
5897 | WARN_ON_ONCE(xdr->iov); |
5898 | return; |
5899 | } |
5900 | - if (fraglen) { |
5901 | + if (fraglen) |
5902 | xdr->end = head->iov_base + head->iov_len; |
5903 | - xdr->page_ptr--; |
5904 | - } |
5905 | /* (otherwise assume xdr->end is already set) */ |
5906 | + xdr->page_ptr--; |
5907 | head->iov_len = len; |
5908 | buf->len = len; |
5909 | xdr->p = head->iov_base + head->iov_len; |
5910 | diff --git a/scripts/spdxcheck.py b/scripts/spdxcheck.py |
5911 | index 839e190bbd7a..5056fb3b897d 100755 |
5912 | --- a/scripts/spdxcheck.py |
5913 | +++ b/scripts/spdxcheck.py |
5914 | @@ -168,7 +168,6 @@ class id_parser(object): |
5915 | self.curline = 0 |
5916 | try: |
5917 | for line in fd: |
5918 | - line = line.decode(locale.getpreferredencoding(False), errors='ignore') |
5919 | self.curline += 1 |
5920 | if self.curline > maxlines: |
5921 | break |
5922 | diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c |
5923 | index 18b98b5e1e3c..fe251c6f09f1 100644 |
5924 | --- a/security/selinux/hooks.c |
5925 | +++ b/security/selinux/hooks.c |
5926 | @@ -5318,6 +5318,9 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname, |
5927 | addr_buf = address; |
5928 | |
5929 | while (walk_size < addrlen) { |
5930 | + if (walk_size + sizeof(sa_family_t) > addrlen) |
5931 | + return -EINVAL; |
5932 | + |
5933 | addr = addr_buf; |
5934 | switch (addr->sa_family) { |
5935 | case AF_UNSPEC: |
5936 | diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c |
5937 | index 2ae640257fdb..ca577658e890 100644 |
5938 | --- a/tools/perf/util/cs-etm.c |
5939 | +++ b/tools/perf/util/cs-etm.c |
5940 | @@ -244,6 +244,27 @@ static void cs_etm__free(struct perf_session *session) |
5941 | zfree(&aux); |
5942 | } |
5943 | |
5944 | +static u8 cs_etm__cpu_mode(struct cs_etm_queue *etmq, u64 address) |
5945 | +{ |
5946 | + struct machine *machine; |
5947 | + |
5948 | + machine = etmq->etm->machine; |
5949 | + |
5950 | + if (address >= etmq->etm->kernel_start) { |
5951 | + if (machine__is_host(machine)) |
5952 | + return PERF_RECORD_MISC_KERNEL; |
5953 | + else |
5954 | + return PERF_RECORD_MISC_GUEST_KERNEL; |
5955 | + } else { |
5956 | + if (machine__is_host(machine)) |
5957 | + return PERF_RECORD_MISC_USER; |
5958 | + else if (perf_guest) |
5959 | + return PERF_RECORD_MISC_GUEST_USER; |
5960 | + else |
5961 | + return PERF_RECORD_MISC_HYPERVISOR; |
5962 | + } |
5963 | +} |
5964 | + |
5965 | static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address, |
5966 | size_t size, u8 *buffer) |
5967 | { |
5968 | @@ -258,10 +279,7 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address, |
5969 | return -1; |
5970 | |
5971 | machine = etmq->etm->machine; |
5972 | - if (address >= etmq->etm->kernel_start) |
5973 | - cpumode = PERF_RECORD_MISC_KERNEL; |
5974 | - else |
5975 | - cpumode = PERF_RECORD_MISC_USER; |
5976 | + cpumode = cs_etm__cpu_mode(etmq, address); |
5977 | |
5978 | thread = etmq->thread; |
5979 | if (!thread) { |
5980 | @@ -653,7 +671,7 @@ static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq, |
5981 | struct perf_sample sample = {.ip = 0,}; |
5982 | |
5983 | event->sample.header.type = PERF_RECORD_SAMPLE; |
5984 | - event->sample.header.misc = PERF_RECORD_MISC_USER; |
5985 | + event->sample.header.misc = cs_etm__cpu_mode(etmq, addr); |
5986 | event->sample.header.size = sizeof(struct perf_event_header); |
5987 | |
5988 | sample.ip = addr; |
5989 | @@ -665,7 +683,7 @@ static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq, |
5990 | sample.cpu = etmq->packet->cpu; |
5991 | sample.flags = 0; |
5992 | sample.insn_len = 1; |
5993 | - sample.cpumode = event->header.misc; |
5994 | + sample.cpumode = event->sample.header.misc; |
5995 | |
5996 | if (etm->synth_opts.last_branch) { |
5997 | cs_etm__copy_last_branch_rb(etmq); |
5998 | @@ -706,12 +724,15 @@ static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq) |
5999 | u64 nr; |
6000 | struct branch_entry entries; |
6001 | } dummy_bs; |
6002 | + u64 ip; |
6003 | + |
6004 | + ip = cs_etm__last_executed_instr(etmq->prev_packet); |
6005 | |
6006 | event->sample.header.type = PERF_RECORD_SAMPLE; |
6007 | - event->sample.header.misc = PERF_RECORD_MISC_USER; |
6008 | + event->sample.header.misc = cs_etm__cpu_mode(etmq, ip); |
6009 | event->sample.header.size = sizeof(struct perf_event_header); |
6010 | |
6011 | - sample.ip = cs_etm__last_executed_instr(etmq->prev_packet); |
6012 | + sample.ip = ip; |
6013 | sample.pid = etmq->pid; |
6014 | sample.tid = etmq->tid; |
6015 | sample.addr = cs_etm__first_executed_instr(etmq->packet); |
6016 | @@ -720,7 +741,7 @@ static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq) |
6017 | sample.period = 1; |
6018 | sample.cpu = etmq->packet->cpu; |
6019 | sample.flags = 0; |
6020 | - sample.cpumode = PERF_RECORD_MISC_USER; |
6021 | + sample.cpumode = event->sample.header.misc; |
6022 | |
6023 | /* |
6024 | * perf report cannot handle events without a branch stack |
6025 | diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c |
6026 | index 7f0c83b6332b..7127bc917fc5 100644 |
6027 | --- a/tools/perf/util/intel-bts.c |
6028 | +++ b/tools/perf/util/intel-bts.c |
6029 | @@ -269,6 +269,13 @@ static int intel_bts_do_fix_overlap(struct auxtrace_queue *queue, |
6030 | return 0; |
6031 | } |
6032 | |
6033 | +static inline u8 intel_bts_cpumode(struct intel_bts *bts, uint64_t ip) |
6034 | +{ |
6035 | + return machine__kernel_ip(bts->machine, ip) ? |
6036 | + PERF_RECORD_MISC_KERNEL : |
6037 | + PERF_RECORD_MISC_USER; |
6038 | +} |
6039 | + |
6040 | static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq, |
6041 | struct branch *branch) |
6042 | { |
6043 | @@ -281,12 +288,8 @@ static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq, |
6044 | bts->num_events++ <= bts->synth_opts.initial_skip) |
6045 | return 0; |
6046 | |
6047 | - event.sample.header.type = PERF_RECORD_SAMPLE; |
6048 | - event.sample.header.misc = PERF_RECORD_MISC_USER; |
6049 | - event.sample.header.size = sizeof(struct perf_event_header); |
6050 | - |
6051 | - sample.cpumode = PERF_RECORD_MISC_USER; |
6052 | sample.ip = le64_to_cpu(branch->from); |
6053 | + sample.cpumode = intel_bts_cpumode(bts, sample.ip); |
6054 | sample.pid = btsq->pid; |
6055 | sample.tid = btsq->tid; |
6056 | sample.addr = le64_to_cpu(branch->to); |
6057 | @@ -298,6 +301,10 @@ static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq, |
6058 | sample.insn_len = btsq->intel_pt_insn.length; |
6059 | memcpy(sample.insn, btsq->intel_pt_insn.buf, INTEL_PT_INSN_BUF_SZ); |
6060 | |
6061 | + event.sample.header.type = PERF_RECORD_SAMPLE; |
6062 | + event.sample.header.misc = sample.cpumode; |
6063 | + event.sample.header.size = sizeof(struct perf_event_header); |
6064 | + |
6065 | if (bts->synth_opts.inject) { |
6066 | event.sample.header.size = bts->branches_event_size; |
6067 | ret = perf_event__synthesize_sample(&event, |
6068 | diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c |
6069 | index aec68908d604..4f48bc11a29c 100644 |
6070 | --- a/tools/perf/util/intel-pt.c |
6071 | +++ b/tools/perf/util/intel-pt.c |
6072 | @@ -407,6 +407,13 @@ intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset) |
6073 | return auxtrace_cache__lookup(dso->auxtrace_cache, offset); |
6074 | } |
6075 | |
6076 | +static inline u8 intel_pt_cpumode(struct intel_pt *pt, uint64_t ip) |
6077 | +{ |
6078 | + return ip >= pt->kernel_start ? |
6079 | + PERF_RECORD_MISC_KERNEL : |
6080 | + PERF_RECORD_MISC_USER; |
6081 | +} |
6082 | + |
6083 | static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn, |
6084 | uint64_t *insn_cnt_ptr, uint64_t *ip, |
6085 | uint64_t to_ip, uint64_t max_insn_cnt, |
6086 | @@ -429,10 +436,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn, |
6087 | if (to_ip && *ip == to_ip) |
6088 | goto out_no_cache; |
6089 | |
6090 | - if (*ip >= ptq->pt->kernel_start) |
6091 | - cpumode = PERF_RECORD_MISC_KERNEL; |
6092 | - else |
6093 | - cpumode = PERF_RECORD_MISC_USER; |
6094 | + cpumode = intel_pt_cpumode(ptq->pt, *ip); |
6095 | |
6096 | thread = ptq->thread; |
6097 | if (!thread) { |
6098 | @@ -759,7 +763,8 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt, |
6099 | if (pt->synth_opts.callchain) { |
6100 | size_t sz = sizeof(struct ip_callchain); |
6101 | |
6102 | - sz += pt->synth_opts.callchain_sz * sizeof(u64); |
6103 | + /* Add 1 to callchain_sz for callchain context */ |
6104 | + sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64); |
6105 | ptq->chain = zalloc(sz); |
6106 | if (!ptq->chain) |
6107 | goto out_free; |
6108 | @@ -1053,15 +1058,11 @@ static void intel_pt_prep_b_sample(struct intel_pt *pt, |
6109 | union perf_event *event, |
6110 | struct perf_sample *sample) |
6111 | { |
6112 | - event->sample.header.type = PERF_RECORD_SAMPLE; |
6113 | - event->sample.header.misc = PERF_RECORD_MISC_USER; |
6114 | - event->sample.header.size = sizeof(struct perf_event_header); |
6115 | - |
6116 | if (!pt->timeless_decoding) |
6117 | sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc); |
6118 | |
6119 | - sample->cpumode = PERF_RECORD_MISC_USER; |
6120 | sample->ip = ptq->state->from_ip; |
6121 | + sample->cpumode = intel_pt_cpumode(pt, sample->ip); |
6122 | sample->pid = ptq->pid; |
6123 | sample->tid = ptq->tid; |
6124 | sample->addr = ptq->state->to_ip; |
6125 | @@ -1070,6 +1071,10 @@ static void intel_pt_prep_b_sample(struct intel_pt *pt, |
6126 | sample->flags = ptq->flags; |
6127 | sample->insn_len = ptq->insn_len; |
6128 | memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ); |
6129 | + |
6130 | + event->sample.header.type = PERF_RECORD_SAMPLE; |
6131 | + event->sample.header.misc = sample->cpumode; |
6132 | + event->sample.header.size = sizeof(struct perf_event_header); |
6133 | } |
6134 | |
6135 | static int intel_pt_inject_event(union perf_event *event, |
6136 | @@ -1155,7 +1160,8 @@ static void intel_pt_prep_sample(struct intel_pt *pt, |
6137 | |
6138 | if (pt->synth_opts.callchain) { |
6139 | thread_stack__sample(ptq->thread, ptq->chain, |
6140 | - pt->synth_opts.callchain_sz, sample->ip); |
6141 | + pt->synth_opts.callchain_sz + 1, |
6142 | + sample->ip, pt->kernel_start); |
6143 | sample->callchain = ptq->chain; |
6144 | } |
6145 | |
6146 | diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c |
6147 | index 111ae858cbcb..8ee8ab39d8ac 100644 |
6148 | --- a/tools/perf/util/machine.c |
6149 | +++ b/tools/perf/util/machine.c |
6150 | @@ -2140,6 +2140,27 @@ static int resolve_lbr_callchain_sample(struct thread *thread, |
6151 | return 0; |
6152 | } |
6153 | |
6154 | +static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread, |
6155 | + struct callchain_cursor *cursor, |
6156 | + struct symbol **parent, |
6157 | + struct addr_location *root_al, |
6158 | + u8 *cpumode, int ent) |
6159 | +{ |
6160 | + int err = 0; |
6161 | + |
6162 | + while (--ent >= 0) { |
6163 | + u64 ip = chain->ips[ent]; |
6164 | + |
6165 | + if (ip >= PERF_CONTEXT_MAX) { |
6166 | + err = add_callchain_ip(thread, cursor, parent, |
6167 | + root_al, cpumode, ip, |
6168 | + false, NULL, NULL, 0); |
6169 | + break; |
6170 | + } |
6171 | + } |
6172 | + return err; |
6173 | +} |
6174 | + |
6175 | static int thread__resolve_callchain_sample(struct thread *thread, |
6176 | struct callchain_cursor *cursor, |
6177 | struct perf_evsel *evsel, |
6178 | @@ -2246,6 +2267,12 @@ static int thread__resolve_callchain_sample(struct thread *thread, |
6179 | } |
6180 | |
6181 | check_calls: |
6182 | + if (callchain_param.order != ORDER_CALLEE) { |
6183 | + err = find_prev_cpumode(chain, thread, cursor, parent, root_al, |
6184 | + &cpumode, chain->nr - first_call); |
6185 | + if (err) |
6186 | + return (err < 0) ? err : 0; |
6187 | + } |
6188 | for (i = first_call, nr_entries = 0; |
6189 | i < chain_nr && nr_entries < max_stack; i++) { |
6190 | u64 ip; |
6191 | @@ -2260,9 +2287,15 @@ check_calls: |
6192 | continue; |
6193 | #endif |
6194 | ip = chain->ips[j]; |
6195 | - |
6196 | if (ip < PERF_CONTEXT_MAX) |
6197 | ++nr_entries; |
6198 | + else if (callchain_param.order != ORDER_CALLEE) { |
6199 | + err = find_prev_cpumode(chain, thread, cursor, parent, |
6200 | + root_al, &cpumode, j); |
6201 | + if (err) |
6202 | + return (err < 0) ? err : 0; |
6203 | + continue; |
6204 | + } |
6205 | |
6206 | err = add_callchain_ip(thread, cursor, parent, |
6207 | root_al, &cpumode, ip, |
6208 | diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c |
6209 | index 7799788f662f..7e49baad304d 100644 |
6210 | --- a/tools/perf/util/pmu.c |
6211 | +++ b/tools/perf/util/pmu.c |
6212 | @@ -773,7 +773,7 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu) |
6213 | |
6214 | if (!is_arm_pmu_core(name)) { |
6215 | pname = pe->pmu ? pe->pmu : "cpu"; |
6216 | - if (strncmp(pname, name, strlen(pname))) |
6217 | + if (strcmp(pname, name)) |
6218 | continue; |
6219 | } |
6220 | |
6221 | diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c |
6222 | index dd17d6a38d3a..a5669d05e91f 100644 |
6223 | --- a/tools/perf/util/thread-stack.c |
6224 | +++ b/tools/perf/util/thread-stack.c |
6225 | @@ -285,20 +285,46 @@ void thread_stack__free(struct thread *thread) |
6226 | } |
6227 | } |
6228 | |
6229 | +static inline u64 callchain_context(u64 ip, u64 kernel_start) |
6230 | +{ |
6231 | + return ip < kernel_start ? PERF_CONTEXT_USER : PERF_CONTEXT_KERNEL; |
6232 | +} |
6233 | + |
6234 | void thread_stack__sample(struct thread *thread, struct ip_callchain *chain, |
6235 | - size_t sz, u64 ip) |
6236 | + size_t sz, u64 ip, u64 kernel_start) |
6237 | { |
6238 | - size_t i; |
6239 | + u64 context = callchain_context(ip, kernel_start); |
6240 | + u64 last_context; |
6241 | + size_t i, j; |
6242 | |
6243 | - if (!thread || !thread->ts) |
6244 | - chain->nr = 1; |
6245 | - else |
6246 | - chain->nr = min(sz, thread->ts->cnt + 1); |
6247 | + if (sz < 2) { |
6248 | + chain->nr = 0; |
6249 | + return; |
6250 | + } |
6251 | |
6252 | - chain->ips[0] = ip; |
6253 | + chain->ips[0] = context; |
6254 | + chain->ips[1] = ip; |
6255 | + |
6256 | + if (!thread || !thread->ts) { |
6257 | + chain->nr = 2; |
6258 | + return; |
6259 | + } |
6260 | + |
6261 | + last_context = context; |
6262 | + |
6263 | + for (i = 2, j = 1; i < sz && j <= thread->ts->cnt; i++, j++) { |
6264 | + ip = thread->ts->stack[thread->ts->cnt - j].ret_addr; |
6265 | + context = callchain_context(ip, kernel_start); |
6266 | + if (context != last_context) { |
6267 | + if (i >= sz - 1) |
6268 | + break; |
6269 | + chain->ips[i++] = context; |
6270 | + last_context = context; |
6271 | + } |
6272 | + chain->ips[i] = ip; |
6273 | + } |
6274 | |
6275 | - for (i = 1; i < chain->nr; i++) |
6276 | - chain->ips[i] = thread->ts->stack[thread->ts->cnt - i].ret_addr; |
6277 | + chain->nr = i; |
6278 | } |
6279 | |
6280 | struct call_return_processor * |
6281 | diff --git a/tools/perf/util/thread-stack.h b/tools/perf/util/thread-stack.h |
6282 | index b7e41c4ebfdd..f97c00a8c251 100644 |
6283 | --- a/tools/perf/util/thread-stack.h |
6284 | +++ b/tools/perf/util/thread-stack.h |
6285 | @@ -84,7 +84,7 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, |
6286 | u64 to_ip, u16 insn_len, u64 trace_nr); |
6287 | void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr); |
6288 | void thread_stack__sample(struct thread *thread, struct ip_callchain *chain, |
6289 | - size_t sz, u64 ip); |
6290 | + size_t sz, u64 ip, u64 kernel_start); |
6291 | int thread_stack__flush(struct thread *thread); |
6292 | void thread_stack__free(struct thread *thread); |
6293 | size_t thread_stack__depth(struct thread *thread); |
6294 | diff --git a/tools/testing/selftests/powerpc/tm/tm-tmspr.c b/tools/testing/selftests/powerpc/tm/tm-tmspr.c |
6295 | index 2bda81c7bf23..df1d7d4b1c89 100644 |
6296 | --- a/tools/testing/selftests/powerpc/tm/tm-tmspr.c |
6297 | +++ b/tools/testing/selftests/powerpc/tm/tm-tmspr.c |
6298 | @@ -98,7 +98,7 @@ void texasr(void *in) |
6299 | |
6300 | int test_tmspr() |
6301 | { |
6302 | - pthread_t thread; |
6303 | + pthread_t *thread; |
6304 | int thread_num; |
6305 | unsigned long i; |
6306 | |
6307 | @@ -107,21 +107,28 @@ int test_tmspr() |
6308 | /* To cause some context switching */ |
6309 | thread_num = 10 * sysconf(_SC_NPROCESSORS_ONLN); |
6310 | |
6311 | + thread = malloc(thread_num * sizeof(pthread_t)); |
6312 | + if (thread == NULL) |
6313 | + return EXIT_FAILURE; |
6314 | + |
6315 | /* Test TFIAR and TFHAR */ |
6316 | - for (i = 0 ; i < thread_num ; i += 2){ |
6317 | - if (pthread_create(&thread, NULL, (void*)tfiar_tfhar, (void *)i)) |
6318 | + for (i = 0; i < thread_num; i += 2) { |
6319 | + if (pthread_create(&thread[i], NULL, (void *)tfiar_tfhar, |
6320 | + (void *)i)) |
6321 | return EXIT_FAILURE; |
6322 | } |
6323 | - if (pthread_join(thread, NULL) != 0) |
6324 | - return EXIT_FAILURE; |
6325 | - |
6326 | /* Test TEXASR */ |
6327 | - for (i = 0 ; i < thread_num ; i++){ |
6328 | - if (pthread_create(&thread, NULL, (void*)texasr, (void *)i)) |
6329 | + for (i = 1; i < thread_num; i += 2) { |
6330 | + if (pthread_create(&thread[i], NULL, (void *)texasr, (void *)i)) |
6331 | return EXIT_FAILURE; |
6332 | } |
6333 | - if (pthread_join(thread, NULL) != 0) |
6334 | - return EXIT_FAILURE; |
6335 | + |
6336 | + for (i = 0; i < thread_num; i++) { |
6337 | + if (pthread_join(thread[i], NULL) != 0) |
6338 | + return EXIT_FAILURE; |
6339 | + } |
6340 | + |
6341 | + free(thread); |
6342 | |
6343 | if (passed) |
6344 | return 0; |