Contents of /trunk/kernel-magellan/patches-4.15/0104-4.15.5-all-fixes.patch
Parent Directory | Revision Log
Revision 3088 -
(show annotations)
(download)
Wed Mar 21 14:52:30 2018 UTC (6 years, 6 months ago) by niro
File size: 222027 byte(s)
Wed Mar 21 14:52:30 2018 UTC (6 years, 6 months ago) by niro
File size: 222027 byte(s)
-linux-4.15.5
1 | diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt |
2 | index a122723907ac..99acc712f83a 100644 |
3 | --- a/Documentation/devicetree/bindings/dma/snps-dma.txt |
4 | +++ b/Documentation/devicetree/bindings/dma/snps-dma.txt |
5 | @@ -64,6 +64,6 @@ Example: |
6 | reg = <0xe0000000 0x1000>; |
7 | interrupts = <0 35 0x4>; |
8 | dmas = <&dmahost 12 0 1>, |
9 | - <&dmahost 13 0 1 0>; |
10 | + <&dmahost 13 1 0>; |
11 | dma-names = "rx", "rx"; |
12 | }; |
13 | diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt |
14 | index 75236c0c2ac2..d081ce0482cc 100644 |
15 | --- a/Documentation/filesystems/ext4.txt |
16 | +++ b/Documentation/filesystems/ext4.txt |
17 | @@ -233,7 +233,7 @@ data_err=ignore(*) Just print an error message if an error occurs |
18 | data_err=abort Abort the journal if an error occurs in a file |
19 | data buffer in ordered mode. |
20 | |
21 | -grpid Give objects the same group ID as their creator. |
22 | +grpid New objects have the group ID of their parent. |
23 | bsdgroups |
24 | |
25 | nogrpid (*) New objects have the group ID of their creator. |
26 | diff --git a/Makefile b/Makefile |
27 | index 8495e1ca052e..28c537fbe328 100644 |
28 | --- a/Makefile |
29 | +++ b/Makefile |
30 | @@ -1,7 +1,7 @@ |
31 | # SPDX-License-Identifier: GPL-2.0 |
32 | VERSION = 4 |
33 | PATCHLEVEL = 15 |
34 | -SUBLEVEL = 4 |
35 | +SUBLEVEL = 5 |
36 | EXTRAVERSION = |
37 | NAME = Fearless Coyote |
38 | |
39 | diff --git a/arch/arm/boot/dts/arm-realview-eb-mp.dtsi b/arch/arm/boot/dts/arm-realview-eb-mp.dtsi |
40 | index 7b8d90b7aeea..29b636fce23f 100644 |
41 | --- a/arch/arm/boot/dts/arm-realview-eb-mp.dtsi |
42 | +++ b/arch/arm/boot/dts/arm-realview-eb-mp.dtsi |
43 | @@ -150,11 +150,6 @@ |
44 | interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>; |
45 | }; |
46 | |
47 | -&charlcd { |
48 | - interrupt-parent = <&intc>; |
49 | - interrupts = <0 IRQ_TYPE_LEVEL_HIGH>; |
50 | -}; |
51 | - |
52 | &serial0 { |
53 | interrupt-parent = <&intc>; |
54 | interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>; |
55 | diff --git a/arch/arm/boot/dts/exynos5410.dtsi b/arch/arm/boot/dts/exynos5410.dtsi |
56 | index 06713ec86f0d..d2174727df9a 100644 |
57 | --- a/arch/arm/boot/dts/exynos5410.dtsi |
58 | +++ b/arch/arm/boot/dts/exynos5410.dtsi |
59 | @@ -333,7 +333,6 @@ |
60 | &rtc { |
61 | clocks = <&clock CLK_RTC>; |
62 | clock-names = "rtc"; |
63 | - interrupt-parent = <&pmu_system_controller>; |
64 | status = "disabled"; |
65 | }; |
66 | |
67 | diff --git a/arch/arm/boot/dts/lpc3250-ea3250.dts b/arch/arm/boot/dts/lpc3250-ea3250.dts |
68 | index c43adb7b4d7c..58ea0a4e7afa 100644 |
69 | --- a/arch/arm/boot/dts/lpc3250-ea3250.dts |
70 | +++ b/arch/arm/boot/dts/lpc3250-ea3250.dts |
71 | @@ -156,8 +156,8 @@ |
72 | uda1380: uda1380@18 { |
73 | compatible = "nxp,uda1380"; |
74 | reg = <0x18>; |
75 | - power-gpio = <&gpio 0x59 0>; |
76 | - reset-gpio = <&gpio 0x51 0>; |
77 | + power-gpio = <&gpio 3 10 0>; |
78 | + reset-gpio = <&gpio 3 2 0>; |
79 | dac-clk = "wspll"; |
80 | }; |
81 | |
82 | diff --git a/arch/arm/boot/dts/lpc3250-phy3250.dts b/arch/arm/boot/dts/lpc3250-phy3250.dts |
83 | index c72eb9845603..1e1c2f517a82 100644 |
84 | --- a/arch/arm/boot/dts/lpc3250-phy3250.dts |
85 | +++ b/arch/arm/boot/dts/lpc3250-phy3250.dts |
86 | @@ -81,8 +81,8 @@ |
87 | uda1380: uda1380@18 { |
88 | compatible = "nxp,uda1380"; |
89 | reg = <0x18>; |
90 | - power-gpio = <&gpio 0x59 0>; |
91 | - reset-gpio = <&gpio 0x51 0>; |
92 | + power-gpio = <&gpio 3 10 0>; |
93 | + reset-gpio = <&gpio 3 2 0>; |
94 | dac-clk = "wspll"; |
95 | }; |
96 | |
97 | diff --git a/arch/arm/boot/dts/mt2701.dtsi b/arch/arm/boot/dts/mt2701.dtsi |
98 | index 965ddfbc9953..05557fce0f1d 100644 |
99 | --- a/arch/arm/boot/dts/mt2701.dtsi |
100 | +++ b/arch/arm/boot/dts/mt2701.dtsi |
101 | @@ -604,6 +604,7 @@ |
102 | compatible = "mediatek,mt2701-hifsys", "syscon"; |
103 | reg = <0 0x1a000000 0 0x1000>; |
104 | #clock-cells = <1>; |
105 | + #reset-cells = <1>; |
106 | }; |
107 | |
108 | usb0: usb@1a1c0000 { |
109 | @@ -688,6 +689,7 @@ |
110 | compatible = "mediatek,mt2701-ethsys", "syscon"; |
111 | reg = <0 0x1b000000 0 0x1000>; |
112 | #clock-cells = <1>; |
113 | + #reset-cells = <1>; |
114 | }; |
115 | |
116 | eth: ethernet@1b100000 { |
117 | diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi |
118 | index 0640fb75bf59..3a442a16ea06 100644 |
119 | --- a/arch/arm/boot/dts/mt7623.dtsi |
120 | +++ b/arch/arm/boot/dts/mt7623.dtsi |
121 | @@ -758,6 +758,7 @@ |
122 | "syscon"; |
123 | reg = <0 0x1b000000 0 0x1000>; |
124 | #clock-cells = <1>; |
125 | + #reset-cells = <1>; |
126 | }; |
127 | |
128 | eth: ethernet@1b100000 { |
129 | diff --git a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts |
130 | index 688a86378cee..7bf5aa2237c9 100644 |
131 | --- a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts |
132 | +++ b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts |
133 | @@ -204,7 +204,7 @@ |
134 | bus-width = <4>; |
135 | max-frequency = <50000000>; |
136 | cap-sd-highspeed; |
137 | - cd-gpios = <&pio 261 0>; |
138 | + cd-gpios = <&pio 261 GPIO_ACTIVE_LOW>; |
139 | vmmc-supply = <&mt6323_vmch_reg>; |
140 | vqmmc-supply = <&mt6323_vio18_reg>; |
141 | }; |
142 | diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi |
143 | index 726c5d0dbd5b..b290a5abb901 100644 |
144 | --- a/arch/arm/boot/dts/s5pv210.dtsi |
145 | +++ b/arch/arm/boot/dts/s5pv210.dtsi |
146 | @@ -463,6 +463,7 @@ |
147 | compatible = "samsung,exynos4210-ohci"; |
148 | reg = <0xec300000 0x100>; |
149 | interrupts = <23>; |
150 | + interrupt-parent = <&vic1>; |
151 | clocks = <&clocks CLK_USB_HOST>; |
152 | clock-names = "usbhost"; |
153 | #address-cells = <1>; |
154 | diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts |
155 | index 84101e4eebbf..0f5f379323a8 100644 |
156 | --- a/arch/arm/boot/dts/spear1310-evb.dts |
157 | +++ b/arch/arm/boot/dts/spear1310-evb.dts |
158 | @@ -349,7 +349,7 @@ |
159 | spi0: spi@e0100000 { |
160 | status = "okay"; |
161 | num-cs = <3>; |
162 | - cs-gpios = <&gpio1 7 0>, <&spics 0>, <&spics 1>; |
163 | + cs-gpios = <&gpio1 7 0>, <&spics 0 0>, <&spics 1 0>; |
164 | |
165 | stmpe610@0 { |
166 | compatible = "st,stmpe610"; |
167 | diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi |
168 | index 5f347054527d..d4dbc4098653 100644 |
169 | --- a/arch/arm/boot/dts/spear1340.dtsi |
170 | +++ b/arch/arm/boot/dts/spear1340.dtsi |
171 | @@ -142,8 +142,8 @@ |
172 | reg = <0xb4100000 0x1000>; |
173 | interrupts = <0 105 0x4>; |
174 | status = "disabled"; |
175 | - dmas = <&dwdma0 0x600 0 0 1>, /* 0xC << 11 */ |
176 | - <&dwdma0 0x680 0 1 0>; /* 0xD << 7 */ |
177 | + dmas = <&dwdma0 12 0 1>, |
178 | + <&dwdma0 13 1 0>; |
179 | dma-names = "tx", "rx"; |
180 | }; |
181 | |
182 | diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi |
183 | index 17ea0abcdbd7..086b4b333249 100644 |
184 | --- a/arch/arm/boot/dts/spear13xx.dtsi |
185 | +++ b/arch/arm/boot/dts/spear13xx.dtsi |
186 | @@ -100,7 +100,7 @@ |
187 | reg = <0xb2800000 0x1000>; |
188 | interrupts = <0 29 0x4>; |
189 | status = "disabled"; |
190 | - dmas = <&dwdma0 0 0 0 0>; |
191 | + dmas = <&dwdma0 0 0 0>; |
192 | dma-names = "data"; |
193 | }; |
194 | |
195 | @@ -290,8 +290,8 @@ |
196 | #size-cells = <0>; |
197 | interrupts = <0 31 0x4>; |
198 | status = "disabled"; |
199 | - dmas = <&dwdma0 0x2000 0 0 0>, /* 0x4 << 11 */ |
200 | - <&dwdma0 0x0280 0 0 0>; /* 0x5 << 7 */ |
201 | + dmas = <&dwdma0 4 0 0>, |
202 | + <&dwdma0 5 0 0>; |
203 | dma-names = "tx", "rx"; |
204 | }; |
205 | |
206 | diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi |
207 | index 6b32d20acc9f..00166eb9be86 100644 |
208 | --- a/arch/arm/boot/dts/spear600.dtsi |
209 | +++ b/arch/arm/boot/dts/spear600.dtsi |
210 | @@ -194,6 +194,7 @@ |
211 | rtc: rtc@fc900000 { |
212 | compatible = "st,spear600-rtc"; |
213 | reg = <0xfc900000 0x1000>; |
214 | + interrupt-parent = <&vic0>; |
215 | interrupts = <10>; |
216 | status = "disabled"; |
217 | }; |
218 | diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi |
219 | index 68aab50a73ab..733678b75b88 100644 |
220 | --- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi |
221 | +++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi |
222 | @@ -750,6 +750,7 @@ |
223 | reg = <0x10120000 0x1000>; |
224 | interrupt-names = "combined"; |
225 | interrupts = <14>; |
226 | + interrupt-parent = <&vica>; |
227 | clocks = <&clcdclk>, <&hclkclcd>; |
228 | clock-names = "clcdclk", "apb_pclk"; |
229 | status = "disabled"; |
230 | diff --git a/arch/arm/boot/dts/stih407.dtsi b/arch/arm/boot/dts/stih407.dtsi |
231 | index fa149837df14..11fdecd9312e 100644 |
232 | --- a/arch/arm/boot/dts/stih407.dtsi |
233 | +++ b/arch/arm/boot/dts/stih407.dtsi |
234 | @@ -8,6 +8,7 @@ |
235 | */ |
236 | #include "stih407-clock.dtsi" |
237 | #include "stih407-family.dtsi" |
238 | +#include <dt-bindings/gpio/gpio.h> |
239 | / { |
240 | soc { |
241 | sti-display-subsystem { |
242 | @@ -122,7 +123,7 @@ |
243 | <&clk_s_d2_quadfs 0>, |
244 | <&clk_s_d2_quadfs 1>; |
245 | |
246 | - hdmi,hpd-gpio = <&pio5 3>; |
247 | + hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>; |
248 | reset-names = "hdmi"; |
249 | resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; |
250 | ddc = <&hdmiddc>; |
251 | diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi |
252 | index cffa50db5d72..68b5ff91d6a7 100644 |
253 | --- a/arch/arm/boot/dts/stih410.dtsi |
254 | +++ b/arch/arm/boot/dts/stih410.dtsi |
255 | @@ -9,6 +9,7 @@ |
256 | #include "stih410-clock.dtsi" |
257 | #include "stih407-family.dtsi" |
258 | #include "stih410-pinctrl.dtsi" |
259 | +#include <dt-bindings/gpio/gpio.h> |
260 | / { |
261 | aliases { |
262 | bdisp0 = &bdisp0; |
263 | @@ -213,7 +214,7 @@ |
264 | <&clk_s_d2_quadfs 0>, |
265 | <&clk_s_d2_quadfs 1>; |
266 | |
267 | - hdmi,hpd-gpio = <&pio5 3>; |
268 | + hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>; |
269 | reset-names = "hdmi"; |
270 | resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; |
271 | ddc = <&hdmiddc>; |
272 | diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c |
273 | index 107f37210fb9..83606087edc7 100644 |
274 | --- a/arch/arm/mach-pxa/tosa-bt.c |
275 | +++ b/arch/arm/mach-pxa/tosa-bt.c |
276 | @@ -132,3 +132,7 @@ static struct platform_driver tosa_bt_driver = { |
277 | }, |
278 | }; |
279 | module_platform_driver(tosa_bt_driver); |
280 | + |
281 | +MODULE_LICENSE("GPL"); |
282 | +MODULE_AUTHOR("Dmitry Baryshkov"); |
283 | +MODULE_DESCRIPTION("Bluetooth built-in chip control"); |
284 | diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi |
285 | index 6b2127a6ced1..b84c0ca4f84a 100644 |
286 | --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi |
287 | +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi |
288 | @@ -906,6 +906,7 @@ |
289 | "dsi_phy_regulator"; |
290 | |
291 | #clock-cells = <1>; |
292 | + #phy-cells = <0>; |
293 | |
294 | clocks = <&gcc GCC_MDSS_AHB_CLK>; |
295 | clock-names = "iface_clk"; |
296 | @@ -1435,8 +1436,8 @@ |
297 | #address-cells = <1>; |
298 | #size-cells = <0>; |
299 | |
300 | - qcom,ipc-1 = <&apcs 0 13>; |
301 | - qcom,ipc-6 = <&apcs 0 19>; |
302 | + qcom,ipc-1 = <&apcs 8 13>; |
303 | + qcom,ipc-3 = <&apcs 8 19>; |
304 | |
305 | apps_smsm: apps@0 { |
306 | reg = <0>; |
307 | diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c |
308 | index 07823595b7f0..52f15cd896e1 100644 |
309 | --- a/arch/arm64/kernel/cpu_errata.c |
310 | +++ b/arch/arm64/kernel/cpu_errata.c |
311 | @@ -406,6 +406,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = { |
312 | .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT, |
313 | MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), |
314 | }, |
315 | + { |
316 | + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
317 | + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), |
318 | + .enable = qcom_enable_link_stack_sanitization, |
319 | + }, |
320 | + { |
321 | + .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT, |
322 | + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), |
323 | + }, |
324 | { |
325 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
326 | MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), |
327 | diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c |
328 | index 0b5ab4d8b57d..30b5495b82b5 100644 |
329 | --- a/arch/arm64/kvm/hyp/switch.c |
330 | +++ b/arch/arm64/kvm/hyp/switch.c |
331 | @@ -400,8 +400,10 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) |
332 | u32 midr = read_cpuid_id(); |
333 | |
334 | /* Apply BTAC predictors mitigation to all Falkor chips */ |
335 | - if ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1) |
336 | + if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || |
337 | + ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) { |
338 | __qcom_hyp_sanitize_btac_predictors(); |
339 | + } |
340 | } |
341 | |
342 | fp_enabled = __fpsimd_enabled(); |
343 | diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S |
344 | index 08572f95bd8a..248f2e7b24ab 100644 |
345 | --- a/arch/arm64/mm/proc.S |
346 | +++ b/arch/arm64/mm/proc.S |
347 | @@ -189,7 +189,8 @@ ENDPROC(idmap_cpu_replace_ttbr1) |
348 | dc cvac, cur_\()\type\()p // Ensure any existing dirty |
349 | dmb sy // lines are written back before |
350 | ldr \type, [cur_\()\type\()p] // loading the entry |
351 | - tbz \type, #0, next_\()\type // Skip invalid entries |
352 | + tbz \type, #0, skip_\()\type // Skip invalid and |
353 | + tbnz \type, #11, skip_\()\type // non-global entries |
354 | .endm |
355 | |
356 | .macro __idmap_kpti_put_pgtable_ent_ng, type |
357 | @@ -249,8 +250,9 @@ ENTRY(idmap_kpti_install_ng_mappings) |
358 | add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8) |
359 | do_pgd: __idmap_kpti_get_pgtable_ent pgd |
360 | tbnz pgd, #1, walk_puds |
361 | - __idmap_kpti_put_pgtable_ent_ng pgd |
362 | next_pgd: |
363 | + __idmap_kpti_put_pgtable_ent_ng pgd |
364 | +skip_pgd: |
365 | add cur_pgdp, cur_pgdp, #8 |
366 | cmp cur_pgdp, end_pgdp |
367 | b.ne do_pgd |
368 | @@ -278,8 +280,9 @@ walk_puds: |
369 | add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8) |
370 | do_pud: __idmap_kpti_get_pgtable_ent pud |
371 | tbnz pud, #1, walk_pmds |
372 | - __idmap_kpti_put_pgtable_ent_ng pud |
373 | next_pud: |
374 | + __idmap_kpti_put_pgtable_ent_ng pud |
375 | +skip_pud: |
376 | add cur_pudp, cur_pudp, 8 |
377 | cmp cur_pudp, end_pudp |
378 | b.ne do_pud |
379 | @@ -298,8 +301,9 @@ walk_pmds: |
380 | add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8) |
381 | do_pmd: __idmap_kpti_get_pgtable_ent pmd |
382 | tbnz pmd, #1, walk_ptes |
383 | - __idmap_kpti_put_pgtable_ent_ng pmd |
384 | next_pmd: |
385 | + __idmap_kpti_put_pgtable_ent_ng pmd |
386 | +skip_pmd: |
387 | add cur_pmdp, cur_pmdp, #8 |
388 | cmp cur_pmdp, end_pmdp |
389 | b.ne do_pmd |
390 | @@ -317,7 +321,7 @@ walk_ptes: |
391 | add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8) |
392 | do_pte: __idmap_kpti_get_pgtable_ent pte |
393 | __idmap_kpti_put_pgtable_ent_ng pte |
394 | -next_pte: |
395 | +skip_pte: |
396 | add cur_ptep, cur_ptep, #8 |
397 | cmp cur_ptep, end_ptep |
398 | b.ne do_pte |
399 | diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig |
400 | index 8e0b3702f1c0..efaa3b130f4d 100644 |
401 | --- a/arch/mips/Kconfig |
402 | +++ b/arch/mips/Kconfig |
403 | @@ -119,12 +119,12 @@ config MIPS_GENERIC |
404 | select SYS_SUPPORTS_MULTITHREADING |
405 | select SYS_SUPPORTS_RELOCATABLE |
406 | select SYS_SUPPORTS_SMARTMIPS |
407 | - select USB_EHCI_BIG_ENDIAN_DESC if BIG_ENDIAN |
408 | - select USB_EHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN |
409 | - select USB_OHCI_BIG_ENDIAN_DESC if BIG_ENDIAN |
410 | - select USB_OHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN |
411 | - select USB_UHCI_BIG_ENDIAN_DESC if BIG_ENDIAN |
412 | - select USB_UHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN |
413 | + select USB_EHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN |
414 | + select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN |
415 | + select USB_OHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN |
416 | + select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN |
417 | + select USB_UHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN |
418 | + select USB_UHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN |
419 | select USE_OF |
420 | help |
421 | Select this to build a kernel which aims to support multiple boards, |
422 | diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S |
423 | index e68e6e04063a..1025f937ab0e 100644 |
424 | --- a/arch/mips/kernel/cps-vec.S |
425 | +++ b/arch/mips/kernel/cps-vec.S |
426 | @@ -388,15 +388,16 @@ LEAF(mips_cps_boot_vpes) |
427 | |
428 | #elif defined(CONFIG_MIPS_MT) |
429 | |
430 | - .set push |
431 | - .set MIPS_ISA_LEVEL_RAW |
432 | - .set mt |
433 | - |
434 | /* If the core doesn't support MT then return */ |
435 | has_mt t0, 5f |
436 | |
437 | /* Enter VPE configuration state */ |
438 | + .set push |
439 | + .set MIPS_ISA_LEVEL_RAW |
440 | + .set mt |
441 | dvpe |
442 | + .set pop |
443 | + |
444 | PTR_LA t1, 1f |
445 | jr.hb t1 |
446 | nop |
447 | @@ -422,6 +423,10 @@ LEAF(mips_cps_boot_vpes) |
448 | mtc0 t0, CP0_VPECONTROL |
449 | ehb |
450 | |
451 | + .set push |
452 | + .set MIPS_ISA_LEVEL_RAW |
453 | + .set mt |
454 | + |
455 | /* Skip the VPE if its TC is not halted */ |
456 | mftc0 t0, CP0_TCHALT |
457 | beqz t0, 2f |
458 | @@ -495,6 +500,8 @@ LEAF(mips_cps_boot_vpes) |
459 | ehb |
460 | evpe |
461 | |
462 | + .set pop |
463 | + |
464 | /* Check whether this VPE is meant to be running */ |
465 | li t0, 1 |
466 | sll t0, t0, a1 |
467 | @@ -509,7 +516,7 @@ LEAF(mips_cps_boot_vpes) |
468 | 1: jr.hb t0 |
469 | nop |
470 | |
471 | -2: .set pop |
472 | +2: |
473 | |
474 | #endif /* CONFIG_MIPS_MT_SMP */ |
475 | |
476 | diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c |
477 | index 702c678de116..e4a1581ce822 100644 |
478 | --- a/arch/mips/kernel/setup.c |
479 | +++ b/arch/mips/kernel/setup.c |
480 | @@ -375,6 +375,7 @@ static void __init bootmem_init(void) |
481 | unsigned long reserved_end; |
482 | unsigned long mapstart = ~0UL; |
483 | unsigned long bootmap_size; |
484 | + phys_addr_t ramstart = (phys_addr_t)ULLONG_MAX; |
485 | bool bootmap_valid = false; |
486 | int i; |
487 | |
488 | @@ -395,7 +396,8 @@ static void __init bootmem_init(void) |
489 | max_low_pfn = 0; |
490 | |
491 | /* |
492 | - * Find the highest page frame number we have available. |
493 | + * Find the highest page frame number we have available |
494 | + * and the lowest used RAM address |
495 | */ |
496 | for (i = 0; i < boot_mem_map.nr_map; i++) { |
497 | unsigned long start, end; |
498 | @@ -407,6 +409,8 @@ static void __init bootmem_init(void) |
499 | end = PFN_DOWN(boot_mem_map.map[i].addr |
500 | + boot_mem_map.map[i].size); |
501 | |
502 | + ramstart = min(ramstart, boot_mem_map.map[i].addr); |
503 | + |
504 | #ifndef CONFIG_HIGHMEM |
505 | /* |
506 | * Skip highmem here so we get an accurate max_low_pfn if low |
507 | @@ -436,6 +440,13 @@ static void __init bootmem_init(void) |
508 | mapstart = max(reserved_end, start); |
509 | } |
510 | |
511 | + /* |
512 | + * Reserve any memory between the start of RAM and PHYS_OFFSET |
513 | + */ |
514 | + if (ramstart > PHYS_OFFSET) |
515 | + add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET, |
516 | + BOOT_MEM_RESERVED); |
517 | + |
518 | if (min_low_pfn >= max_low_pfn) |
519 | panic("Incorrect memory mapping !!!"); |
520 | if (min_low_pfn > ARCH_PFN_OFFSET) { |
521 | @@ -664,9 +675,6 @@ static int __init early_parse_mem(char *p) |
522 | |
523 | add_memory_region(start, size, BOOT_MEM_RAM); |
524 | |
525 | - if (start && start > PHYS_OFFSET) |
526 | - add_memory_region(PHYS_OFFSET, start - PHYS_OFFSET, |
527 | - BOOT_MEM_RESERVED); |
528 | return 0; |
529 | } |
530 | early_param("mem", early_parse_mem); |
531 | diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h |
532 | index 88187c285c70..1c02e6900f78 100644 |
533 | --- a/arch/powerpc/include/asm/topology.h |
534 | +++ b/arch/powerpc/include/asm/topology.h |
535 | @@ -44,6 +44,11 @@ extern int sysfs_add_device_to_node(struct device *dev, int nid); |
536 | extern void sysfs_remove_device_from_node(struct device *dev, int nid); |
537 | extern int numa_update_cpu_topology(bool cpus_locked); |
538 | |
539 | +static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) |
540 | +{ |
541 | + numa_cpu_lookup_table[cpu] = node; |
542 | +} |
543 | + |
544 | static inline int early_cpu_to_node(int cpu) |
545 | { |
546 | int nid; |
547 | diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c |
548 | index 72be0c32e902..2010e4c827b7 100644 |
549 | --- a/arch/powerpc/kernel/process.c |
550 | +++ b/arch/powerpc/kernel/process.c |
551 | @@ -1509,14 +1509,15 @@ static int assign_thread_tidr(void) |
552 | { |
553 | int index; |
554 | int err; |
555 | + unsigned long flags; |
556 | |
557 | again: |
558 | if (!ida_pre_get(&vas_thread_ida, GFP_KERNEL)) |
559 | return -ENOMEM; |
560 | |
561 | - spin_lock(&vas_thread_id_lock); |
562 | + spin_lock_irqsave(&vas_thread_id_lock, flags); |
563 | err = ida_get_new_above(&vas_thread_ida, 1, &index); |
564 | - spin_unlock(&vas_thread_id_lock); |
565 | + spin_unlock_irqrestore(&vas_thread_id_lock, flags); |
566 | |
567 | if (err == -EAGAIN) |
568 | goto again; |
569 | @@ -1524,9 +1525,9 @@ static int assign_thread_tidr(void) |
570 | return err; |
571 | |
572 | if (index > MAX_THREAD_CONTEXT) { |
573 | - spin_lock(&vas_thread_id_lock); |
574 | + spin_lock_irqsave(&vas_thread_id_lock, flags); |
575 | ida_remove(&vas_thread_ida, index); |
576 | - spin_unlock(&vas_thread_id_lock); |
577 | + spin_unlock_irqrestore(&vas_thread_id_lock, flags); |
578 | return -ENOMEM; |
579 | } |
580 | |
581 | @@ -1535,9 +1536,11 @@ static int assign_thread_tidr(void) |
582 | |
583 | static void free_thread_tidr(int id) |
584 | { |
585 | - spin_lock(&vas_thread_id_lock); |
586 | + unsigned long flags; |
587 | + |
588 | + spin_lock_irqsave(&vas_thread_id_lock, flags); |
589 | ida_remove(&vas_thread_ida, id); |
590 | - spin_unlock(&vas_thread_id_lock); |
591 | + spin_unlock_irqrestore(&vas_thread_id_lock, flags); |
592 | } |
593 | |
594 | /* |
595 | diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c |
596 | index adb6364f4091..09be66fcea68 100644 |
597 | --- a/arch/powerpc/mm/numa.c |
598 | +++ b/arch/powerpc/mm/numa.c |
599 | @@ -142,11 +142,6 @@ static void reset_numa_cpu_lookup_table(void) |
600 | numa_cpu_lookup_table[cpu] = -1; |
601 | } |
602 | |
603 | -static void update_numa_cpu_lookup_table(unsigned int cpu, int node) |
604 | -{ |
605 | - numa_cpu_lookup_table[cpu] = node; |
606 | -} |
607 | - |
608 | static void map_cpu_to_node(int cpu, int node) |
609 | { |
610 | update_numa_cpu_lookup_table(cpu, node); |
611 | diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c |
612 | index cfbbee941a76..17ae5c15a9e0 100644 |
613 | --- a/arch/powerpc/mm/pgtable-radix.c |
614 | +++ b/arch/powerpc/mm/pgtable-radix.c |
615 | @@ -17,6 +17,7 @@ |
616 | #include <linux/of_fdt.h> |
617 | #include <linux/mm.h> |
618 | #include <linux/string_helpers.h> |
619 | +#include <linux/stop_machine.h> |
620 | |
621 | #include <asm/pgtable.h> |
622 | #include <asm/pgalloc.h> |
623 | @@ -671,6 +672,30 @@ static void free_pmd_table(pmd_t *pmd_start, pud_t *pud) |
624 | pud_clear(pud); |
625 | } |
626 | |
627 | +struct change_mapping_params { |
628 | + pte_t *pte; |
629 | + unsigned long start; |
630 | + unsigned long end; |
631 | + unsigned long aligned_start; |
632 | + unsigned long aligned_end; |
633 | +}; |
634 | + |
635 | +static int stop_machine_change_mapping(void *data) |
636 | +{ |
637 | + struct change_mapping_params *params = |
638 | + (struct change_mapping_params *)data; |
639 | + |
640 | + if (!data) |
641 | + return -1; |
642 | + |
643 | + spin_unlock(&init_mm.page_table_lock); |
644 | + pte_clear(&init_mm, params->aligned_start, params->pte); |
645 | + create_physical_mapping(params->aligned_start, params->start); |
646 | + create_physical_mapping(params->end, params->aligned_end); |
647 | + spin_lock(&init_mm.page_table_lock); |
648 | + return 0; |
649 | +} |
650 | + |
651 | static void remove_pte_table(pte_t *pte_start, unsigned long addr, |
652 | unsigned long end) |
653 | { |
654 | @@ -699,6 +724,52 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr, |
655 | } |
656 | } |
657 | |
658 | +/* |
659 | + * clear the pte and potentially split the mapping helper |
660 | + */ |
661 | +static void split_kernel_mapping(unsigned long addr, unsigned long end, |
662 | + unsigned long size, pte_t *pte) |
663 | +{ |
664 | + unsigned long mask = ~(size - 1); |
665 | + unsigned long aligned_start = addr & mask; |
666 | + unsigned long aligned_end = addr + size; |
667 | + struct change_mapping_params params; |
668 | + bool split_region = false; |
669 | + |
670 | + if ((end - addr) < size) { |
671 | + /* |
672 | + * We're going to clear the PTE, but not flushed |
673 | + * the mapping, time to remap and flush. The |
674 | + * effects if visible outside the processor or |
675 | + * if we are running in code close to the |
676 | + * mapping we cleared, we are in trouble. |
677 | + */ |
678 | + if (overlaps_kernel_text(aligned_start, addr) || |
679 | + overlaps_kernel_text(end, aligned_end)) { |
680 | + /* |
681 | + * Hack, just return, don't pte_clear |
682 | + */ |
683 | + WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel " |
684 | + "text, not splitting\n", addr, end); |
685 | + return; |
686 | + } |
687 | + split_region = true; |
688 | + } |
689 | + |
690 | + if (split_region) { |
691 | + params.pte = pte; |
692 | + params.start = addr; |
693 | + params.end = end; |
694 | + params.aligned_start = addr & ~(size - 1); |
695 | + params.aligned_end = min_t(unsigned long, aligned_end, |
696 | + (unsigned long)__va(memblock_end_of_DRAM())); |
697 | + stop_machine(stop_machine_change_mapping, ¶ms, NULL); |
698 | + return; |
699 | + } |
700 | + |
701 | + pte_clear(&init_mm, addr, pte); |
702 | +} |
703 | + |
704 | static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, |
705 | unsigned long end) |
706 | { |
707 | @@ -714,13 +785,7 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, |
708 | continue; |
709 | |
710 | if (pmd_huge(*pmd)) { |
711 | - if (!IS_ALIGNED(addr, PMD_SIZE) || |
712 | - !IS_ALIGNED(next, PMD_SIZE)) { |
713 | - WARN_ONCE(1, "%s: unaligned range\n", __func__); |
714 | - continue; |
715 | - } |
716 | - |
717 | - pte_clear(&init_mm, addr, (pte_t *)pmd); |
718 | + split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd); |
719 | continue; |
720 | } |
721 | |
722 | @@ -745,13 +810,7 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr, |
723 | continue; |
724 | |
725 | if (pud_huge(*pud)) { |
726 | - if (!IS_ALIGNED(addr, PUD_SIZE) || |
727 | - !IS_ALIGNED(next, PUD_SIZE)) { |
728 | - WARN_ONCE(1, "%s: unaligned range\n", __func__); |
729 | - continue; |
730 | - } |
731 | - |
732 | - pte_clear(&init_mm, addr, (pte_t *)pud); |
733 | + split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud); |
734 | continue; |
735 | } |
736 | |
737 | @@ -777,13 +836,7 @@ static void remove_pagetable(unsigned long start, unsigned long end) |
738 | continue; |
739 | |
740 | if (pgd_huge(*pgd)) { |
741 | - if (!IS_ALIGNED(addr, PGDIR_SIZE) || |
742 | - !IS_ALIGNED(next, PGDIR_SIZE)) { |
743 | - WARN_ONCE(1, "%s: unaligned range\n", __func__); |
744 | - continue; |
745 | - } |
746 | - |
747 | - pte_clear(&init_mm, addr, (pte_t *)pgd); |
748 | + split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd); |
749 | continue; |
750 | } |
751 | |
752 | diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c |
753 | index 813ea22c3e00..eec1367c2f32 100644 |
754 | --- a/arch/powerpc/mm/pgtable_64.c |
755 | +++ b/arch/powerpc/mm/pgtable_64.c |
756 | @@ -483,6 +483,8 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0, |
757 | if (old & PATB_HR) { |
758 | asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : : |
759 | "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); |
760 | + asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : |
761 | + "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); |
762 | trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1); |
763 | } else { |
764 | asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : |
765 | diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c |
766 | index 884f4b705b57..913a2b81b177 100644 |
767 | --- a/arch/powerpc/mm/tlb-radix.c |
768 | +++ b/arch/powerpc/mm/tlb-radix.c |
769 | @@ -600,14 +600,12 @@ void radix__flush_tlb_all(void) |
770 | */ |
771 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) |
772 | : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory"); |
773 | - trace_tlbie(0, 0, rb, rs, ric, prs, r); |
774 | /* |
775 | * now flush host entires by passing PRS = 0 and LPID == 0 |
776 | */ |
777 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) |
778 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory"); |
779 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); |
780 | - trace_tlbie(0, 0, rb, 0, ric, prs, r); |
781 | } |
782 | |
783 | void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm, |
784 | diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c |
785 | index 2b3eb01ab110..b7c53a51c31b 100644 |
786 | --- a/arch/powerpc/platforms/powernv/vas-window.c |
787 | +++ b/arch/powerpc/platforms/powernv/vas-window.c |
788 | @@ -1063,16 +1063,16 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop, |
789 | rc = PTR_ERR(txwin->paste_kaddr); |
790 | goto free_window; |
791 | } |
792 | + } else { |
793 | + /* |
794 | + * A user mapping must ensure that context switch issues |
795 | + * CP_ABORT for this thread. |
796 | + */ |
797 | + rc = set_thread_uses_vas(); |
798 | + if (rc) |
799 | + goto free_window; |
800 | } |
801 | |
802 | - /* |
803 | - * Now that we have a send window, ensure context switch issues |
804 | - * CP_ABORT for this thread. |
805 | - */ |
806 | - rc = -EINVAL; |
807 | - if (set_thread_uses_vas() < 0) |
808 | - goto free_window; |
809 | - |
810 | set_vinst_win(vinst, txwin); |
811 | |
812 | return txwin; |
813 | diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c |
814 | index a7d14aa7bb7c..09083ad82f7a 100644 |
815 | --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c |
816 | +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c |
817 | @@ -36,6 +36,7 @@ |
818 | #include <asm/xics.h> |
819 | #include <asm/xive.h> |
820 | #include <asm/plpar_wrappers.h> |
821 | +#include <asm/topology.h> |
822 | |
823 | #include "pseries.h" |
824 | #include "offline_states.h" |
825 | @@ -331,6 +332,7 @@ static void pseries_remove_processor(struct device_node *np) |
826 | BUG_ON(cpu_online(cpu)); |
827 | set_cpu_present(cpu, false); |
828 | set_hard_smp_processor_id(cpu, -1); |
829 | + update_numa_cpu_lookup_table(cpu, -1); |
830 | break; |
831 | } |
832 | if (cpu >= nr_cpu_ids) |
833 | diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c |
834 | index d9c4c9366049..091f1d0d0af1 100644 |
835 | --- a/arch/powerpc/sysdev/xive/spapr.c |
836 | +++ b/arch/powerpc/sysdev/xive/spapr.c |
837 | @@ -356,7 +356,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio, |
838 | |
839 | rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size); |
840 | if (rc) { |
841 | - pr_err("Error %lld getting queue info prio %d\n", rc, prio); |
842 | + pr_err("Error %lld getting queue info CPU %d prio %d\n", rc, |
843 | + target, prio); |
844 | rc = -EIO; |
845 | goto fail; |
846 | } |
847 | @@ -370,7 +371,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio, |
848 | /* Configure and enable the queue in HW */ |
849 | rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order); |
850 | if (rc) { |
851 | - pr_err("Error %lld setting queue for prio %d\n", rc, prio); |
852 | + pr_err("Error %lld setting queue for CPU %d prio %d\n", rc, |
853 | + target, prio); |
854 | rc = -EIO; |
855 | } else { |
856 | q->qpage = qpage; |
857 | @@ -389,8 +391,8 @@ static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc, |
858 | if (IS_ERR(qpage)) |
859 | return PTR_ERR(qpage); |
860 | |
861 | - return xive_spapr_configure_queue(cpu, q, prio, qpage, |
862 | - xive_queue_shift); |
863 | + return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu), |
864 | + q, prio, qpage, xive_queue_shift); |
865 | } |
866 | |
867 | static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, |
868 | @@ -399,10 +401,12 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, |
869 | struct xive_q *q = &xc->queue[prio]; |
870 | unsigned int alloc_order; |
871 | long rc; |
872 | + int hw_cpu = get_hard_smp_processor_id(cpu); |
873 | |
874 | - rc = plpar_int_set_queue_config(0, cpu, prio, 0, 0); |
875 | + rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0); |
876 | if (rc) |
877 | - pr_err("Error %ld setting queue for prio %d\n", rc, prio); |
878 | + pr_err("Error %ld setting queue for CPU %d prio %d\n", rc, |
879 | + hw_cpu, prio); |
880 | |
881 | alloc_order = xive_alloc_order(xive_queue_shift); |
882 | free_pages((unsigned long)q->qpage, alloc_order); |
883 | diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c |
884 | index 59eea9c65d3e..79b7a3438d54 100644 |
885 | --- a/arch/s390/kernel/compat_linux.c |
886 | +++ b/arch/s390/kernel/compat_linux.c |
887 | @@ -110,7 +110,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setregid16, u16, rgid, u16, egid) |
888 | |
889 | COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid) |
890 | { |
891 | - return sys_setgid((gid_t)gid); |
892 | + return sys_setgid(low2highgid(gid)); |
893 | } |
894 | |
895 | COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid) |
896 | @@ -120,7 +120,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid) |
897 | |
898 | COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid) |
899 | { |
900 | - return sys_setuid((uid_t)uid); |
901 | + return sys_setuid(low2highuid(uid)); |
902 | } |
903 | |
904 | COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid) |
905 | @@ -173,12 +173,12 @@ COMPAT_SYSCALL_DEFINE3(s390_getresgid16, u16 __user *, rgidp, |
906 | |
907 | COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid) |
908 | { |
909 | - return sys_setfsuid((uid_t)uid); |
910 | + return sys_setfsuid(low2highuid(uid)); |
911 | } |
912 | |
913 | COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid) |
914 | { |
915 | - return sys_setfsgid((gid_t)gid); |
916 | + return sys_setfsgid(low2highgid(gid)); |
917 | } |
918 | |
919 | static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info) |
920 | diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h |
921 | index 3f48f695d5e6..dce7092ab24a 100644 |
922 | --- a/arch/x86/entry/calling.h |
923 | +++ b/arch/x86/entry/calling.h |
924 | @@ -97,80 +97,69 @@ For 32-bit we have the following conventions - kernel is built with |
925 | |
926 | #define SIZEOF_PTREGS 21*8 |
927 | |
928 | - .macro ALLOC_PT_GPREGS_ON_STACK |
929 | - addq $-(15*8), %rsp |
930 | - .endm |
931 | +.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax |
932 | + /* |
933 | + * Push registers and sanitize registers of values that a |
934 | + * speculation attack might otherwise want to exploit. The |
935 | + * lower registers are likely clobbered well before they |
936 | + * could be put to use in a speculative execution gadget. |
937 | + * Interleave XOR with PUSH for better uop scheduling: |
938 | + */ |
939 | + pushq %rdi /* pt_regs->di */ |
940 | + pushq %rsi /* pt_regs->si */ |
941 | + pushq \rdx /* pt_regs->dx */ |
942 | + pushq %rcx /* pt_regs->cx */ |
943 | + pushq \rax /* pt_regs->ax */ |
944 | + pushq %r8 /* pt_regs->r8 */ |
945 | + xorq %r8, %r8 /* nospec r8 */ |
946 | + pushq %r9 /* pt_regs->r9 */ |
947 | + xorq %r9, %r9 /* nospec r9 */ |
948 | + pushq %r10 /* pt_regs->r10 */ |
949 | + xorq %r10, %r10 /* nospec r10 */ |
950 | + pushq %r11 /* pt_regs->r11 */ |
951 | + xorq %r11, %r11 /* nospec r11*/ |
952 | + pushq %rbx /* pt_regs->rbx */ |
953 | + xorl %ebx, %ebx /* nospec rbx*/ |
954 | + pushq %rbp /* pt_regs->rbp */ |
955 | + xorl %ebp, %ebp /* nospec rbp*/ |
956 | + pushq %r12 /* pt_regs->r12 */ |
957 | + xorq %r12, %r12 /* nospec r12*/ |
958 | + pushq %r13 /* pt_regs->r13 */ |
959 | + xorq %r13, %r13 /* nospec r13*/ |
960 | + pushq %r14 /* pt_regs->r14 */ |
961 | + xorq %r14, %r14 /* nospec r14*/ |
962 | + pushq %r15 /* pt_regs->r15 */ |
963 | + xorq %r15, %r15 /* nospec r15*/ |
964 | + UNWIND_HINT_REGS |
965 | +.endm |
966 | |
967 | - .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1 |
968 | - .if \r11 |
969 | - movq %r11, 6*8+\offset(%rsp) |
970 | - .endif |
971 | - .if \r8910 |
972 | - movq %r10, 7*8+\offset(%rsp) |
973 | - movq %r9, 8*8+\offset(%rsp) |
974 | - movq %r8, 9*8+\offset(%rsp) |
975 | - .endif |
976 | - .if \rax |
977 | - movq %rax, 10*8+\offset(%rsp) |
978 | - .endif |
979 | - .if \rcx |
980 | - movq %rcx, 11*8+\offset(%rsp) |
981 | - .endif |
982 | - movq %rdx, 12*8+\offset(%rsp) |
983 | - movq %rsi, 13*8+\offset(%rsp) |
984 | - movq %rdi, 14*8+\offset(%rsp) |
985 | - UNWIND_HINT_REGS offset=\offset extra=0 |
986 | - .endm |
987 | - .macro SAVE_C_REGS offset=0 |
988 | - SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 |
989 | - .endm |
990 | - .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0 |
991 | - SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1 |
992 | - .endm |
993 | - .macro SAVE_C_REGS_EXCEPT_R891011 |
994 | - SAVE_C_REGS_HELPER 0, 1, 1, 0, 0 |
995 | - .endm |
996 | - .macro SAVE_C_REGS_EXCEPT_RCX_R891011 |
997 | - SAVE_C_REGS_HELPER 0, 1, 0, 0, 0 |
998 | - .endm |
999 | - .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11 |
1000 | - SAVE_C_REGS_HELPER 0, 0, 0, 1, 0 |
1001 | - .endm |
1002 | - |
1003 | - .macro SAVE_EXTRA_REGS offset=0 |
1004 | - movq %r15, 0*8+\offset(%rsp) |
1005 | - movq %r14, 1*8+\offset(%rsp) |
1006 | - movq %r13, 2*8+\offset(%rsp) |
1007 | - movq %r12, 3*8+\offset(%rsp) |
1008 | - movq %rbp, 4*8+\offset(%rsp) |
1009 | - movq %rbx, 5*8+\offset(%rsp) |
1010 | - UNWIND_HINT_REGS offset=\offset |
1011 | - .endm |
1012 | - |
1013 | - .macro POP_EXTRA_REGS |
1014 | +.macro POP_REGS pop_rdi=1 skip_r11rcx=0 |
1015 | popq %r15 |
1016 | popq %r14 |
1017 | popq %r13 |
1018 | popq %r12 |
1019 | popq %rbp |
1020 | popq %rbx |
1021 | - .endm |
1022 | - |
1023 | - .macro POP_C_REGS |
1024 | + .if \skip_r11rcx |
1025 | + popq %rsi |
1026 | + .else |
1027 | popq %r11 |
1028 | + .endif |
1029 | popq %r10 |
1030 | popq %r9 |
1031 | popq %r8 |
1032 | popq %rax |
1033 | + .if \skip_r11rcx |
1034 | + popq %rsi |
1035 | + .else |
1036 | popq %rcx |
1037 | + .endif |
1038 | popq %rdx |
1039 | popq %rsi |
1040 | + .if \pop_rdi |
1041 | popq %rdi |
1042 | - .endm |
1043 | - |
1044 | - .macro icebp |
1045 | - .byte 0xf1 |
1046 | - .endm |
1047 | + .endif |
1048 | +.endm |
1049 | |
1050 | /* |
1051 | * This is a sneaky trick to help the unwinder find pt_regs on the stack. The |
1052 | @@ -178,7 +167,7 @@ For 32-bit we have the following conventions - kernel is built with |
1053 | * is just setting the LSB, which makes it an invalid stack address and is also |
1054 | * a signal to the unwinder that it's a pt_regs pointer in disguise. |
1055 | * |
1056 | - * NOTE: This macro must be used *after* SAVE_EXTRA_REGS because it corrupts |
1057 | + * NOTE: This macro must be used *after* PUSH_AND_CLEAR_REGS because it corrupts |
1058 | * the original rbp. |
1059 | */ |
1060 | .macro ENCODE_FRAME_POINTER ptregs_offset=0 |
1061 | diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S |
1062 | index c752abe89d80..4fd9044e72e7 100644 |
1063 | --- a/arch/x86/entry/entry_64.S |
1064 | +++ b/arch/x86/entry/entry_64.S |
1065 | @@ -213,7 +213,7 @@ ENTRY(entry_SYSCALL_64) |
1066 | |
1067 | swapgs |
1068 | /* |
1069 | - * This path is not taken when PAGE_TABLE_ISOLATION is disabled so it |
1070 | + * This path is only taken when PAGE_TABLE_ISOLATION is disabled so it |
1071 | * is not required to switch CR3. |
1072 | */ |
1073 | movq %rsp, PER_CPU_VAR(rsp_scratch) |
1074 | @@ -227,22 +227,8 @@ ENTRY(entry_SYSCALL_64) |
1075 | pushq %rcx /* pt_regs->ip */ |
1076 | GLOBAL(entry_SYSCALL_64_after_hwframe) |
1077 | pushq %rax /* pt_regs->orig_ax */ |
1078 | - pushq %rdi /* pt_regs->di */ |
1079 | - pushq %rsi /* pt_regs->si */ |
1080 | - pushq %rdx /* pt_regs->dx */ |
1081 | - pushq %rcx /* pt_regs->cx */ |
1082 | - pushq $-ENOSYS /* pt_regs->ax */ |
1083 | - pushq %r8 /* pt_regs->r8 */ |
1084 | - pushq %r9 /* pt_regs->r9 */ |
1085 | - pushq %r10 /* pt_regs->r10 */ |
1086 | - pushq %r11 /* pt_regs->r11 */ |
1087 | - pushq %rbx /* pt_regs->rbx */ |
1088 | - pushq %rbp /* pt_regs->rbp */ |
1089 | - pushq %r12 /* pt_regs->r12 */ |
1090 | - pushq %r13 /* pt_regs->r13 */ |
1091 | - pushq %r14 /* pt_regs->r14 */ |
1092 | - pushq %r15 /* pt_regs->r15 */ |
1093 | - UNWIND_HINT_REGS |
1094 | + |
1095 | + PUSH_AND_CLEAR_REGS rax=$-ENOSYS |
1096 | |
1097 | TRACE_IRQS_OFF |
1098 | |
1099 | @@ -321,15 +307,7 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) |
1100 | syscall_return_via_sysret: |
1101 | /* rcx and r11 are already restored (see code above) */ |
1102 | UNWIND_HINT_EMPTY |
1103 | - POP_EXTRA_REGS |
1104 | - popq %rsi /* skip r11 */ |
1105 | - popq %r10 |
1106 | - popq %r9 |
1107 | - popq %r8 |
1108 | - popq %rax |
1109 | - popq %rsi /* skip rcx */ |
1110 | - popq %rdx |
1111 | - popq %rsi |
1112 | + POP_REGS pop_rdi=0 skip_r11rcx=1 |
1113 | |
1114 | /* |
1115 | * Now all regs are restored except RSP and RDI. |
1116 | @@ -559,9 +537,7 @@ END(irq_entries_start) |
1117 | call switch_to_thread_stack |
1118 | 1: |
1119 | |
1120 | - ALLOC_PT_GPREGS_ON_STACK |
1121 | - SAVE_C_REGS |
1122 | - SAVE_EXTRA_REGS |
1123 | + PUSH_AND_CLEAR_REGS |
1124 | ENCODE_FRAME_POINTER |
1125 | |
1126 | testb $3, CS(%rsp) |
1127 | @@ -622,15 +598,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode) |
1128 | ud2 |
1129 | 1: |
1130 | #endif |
1131 | - POP_EXTRA_REGS |
1132 | - popq %r11 |
1133 | - popq %r10 |
1134 | - popq %r9 |
1135 | - popq %r8 |
1136 | - popq %rax |
1137 | - popq %rcx |
1138 | - popq %rdx |
1139 | - popq %rsi |
1140 | + POP_REGS pop_rdi=0 |
1141 | |
1142 | /* |
1143 | * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. |
1144 | @@ -688,8 +656,7 @@ GLOBAL(restore_regs_and_return_to_kernel) |
1145 | ud2 |
1146 | 1: |
1147 | #endif |
1148 | - POP_EXTRA_REGS |
1149 | - POP_C_REGS |
1150 | + POP_REGS |
1151 | addq $8, %rsp /* skip regs->orig_ax */ |
1152 | INTERRUPT_RETURN |
1153 | |
1154 | @@ -904,7 +871,9 @@ ENTRY(\sym) |
1155 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
1156 | .endif |
1157 | |
1158 | - ALLOC_PT_GPREGS_ON_STACK |
1159 | + /* Save all registers in pt_regs */ |
1160 | + PUSH_AND_CLEAR_REGS |
1161 | + ENCODE_FRAME_POINTER |
1162 | |
1163 | .if \paranoid < 2 |
1164 | testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ |
1165 | @@ -1117,9 +1086,7 @@ ENTRY(xen_failsafe_callback) |
1166 | addq $0x30, %rsp |
1167 | UNWIND_HINT_IRET_REGS |
1168 | pushq $-1 /* orig_ax = -1 => not a system call */ |
1169 | - ALLOC_PT_GPREGS_ON_STACK |
1170 | - SAVE_C_REGS |
1171 | - SAVE_EXTRA_REGS |
1172 | + PUSH_AND_CLEAR_REGS |
1173 | ENCODE_FRAME_POINTER |
1174 | jmp error_exit |
1175 | END(xen_failsafe_callback) |
1176 | @@ -1156,16 +1123,13 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1 |
1177 | #endif |
1178 | |
1179 | /* |
1180 | - * Save all registers in pt_regs, and switch gs if needed. |
1181 | + * Switch gs if needed. |
1182 | * Use slow, but surefire "are we in kernel?" check. |
1183 | * Return: ebx=0: need swapgs on exit, ebx=1: otherwise |
1184 | */ |
1185 | ENTRY(paranoid_entry) |
1186 | UNWIND_HINT_FUNC |
1187 | cld |
1188 | - SAVE_C_REGS 8 |
1189 | - SAVE_EXTRA_REGS 8 |
1190 | - ENCODE_FRAME_POINTER 8 |
1191 | movl $1, %ebx |
1192 | movl $MSR_GS_BASE, %ecx |
1193 | rdmsr |
1194 | @@ -1204,21 +1168,18 @@ ENTRY(paranoid_exit) |
1195 | jmp .Lparanoid_exit_restore |
1196 | .Lparanoid_exit_no_swapgs: |
1197 | TRACE_IRQS_IRETQ_DEBUG |
1198 | + RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 |
1199 | .Lparanoid_exit_restore: |
1200 | jmp restore_regs_and_return_to_kernel |
1201 | END(paranoid_exit) |
1202 | |
1203 | /* |
1204 | - * Save all registers in pt_regs, and switch gs if needed. |
1205 | + * Switch gs if needed. |
1206 | * Return: EBX=0: came from user mode; EBX=1: otherwise |
1207 | */ |
1208 | ENTRY(error_entry) |
1209 | - UNWIND_HINT_FUNC |
1210 | + UNWIND_HINT_REGS offset=8 |
1211 | cld |
1212 | - SAVE_C_REGS 8 |
1213 | - SAVE_EXTRA_REGS 8 |
1214 | - ENCODE_FRAME_POINTER 8 |
1215 | - xorl %ebx, %ebx |
1216 | testb $3, CS+8(%rsp) |
1217 | jz .Lerror_kernelspace |
1218 | |
1219 | @@ -1399,22 +1360,7 @@ ENTRY(nmi) |
1220 | pushq 1*8(%rdx) /* pt_regs->rip */ |
1221 | UNWIND_HINT_IRET_REGS |
1222 | pushq $-1 /* pt_regs->orig_ax */ |
1223 | - pushq %rdi /* pt_regs->di */ |
1224 | - pushq %rsi /* pt_regs->si */ |
1225 | - pushq (%rdx) /* pt_regs->dx */ |
1226 | - pushq %rcx /* pt_regs->cx */ |
1227 | - pushq %rax /* pt_regs->ax */ |
1228 | - pushq %r8 /* pt_regs->r8 */ |
1229 | - pushq %r9 /* pt_regs->r9 */ |
1230 | - pushq %r10 /* pt_regs->r10 */ |
1231 | - pushq %r11 /* pt_regs->r11 */ |
1232 | - pushq %rbx /* pt_regs->rbx */ |
1233 | - pushq %rbp /* pt_regs->rbp */ |
1234 | - pushq %r12 /* pt_regs->r12 */ |
1235 | - pushq %r13 /* pt_regs->r13 */ |
1236 | - pushq %r14 /* pt_regs->r14 */ |
1237 | - pushq %r15 /* pt_regs->r15 */ |
1238 | - UNWIND_HINT_REGS |
1239 | + PUSH_AND_CLEAR_REGS rdx=(%rdx) |
1240 | ENCODE_FRAME_POINTER |
1241 | |
1242 | /* |
1243 | @@ -1624,7 +1570,8 @@ end_repeat_nmi: |
1244 | * frame to point back to repeat_nmi. |
1245 | */ |
1246 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
1247 | - ALLOC_PT_GPREGS_ON_STACK |
1248 | + PUSH_AND_CLEAR_REGS |
1249 | + ENCODE_FRAME_POINTER |
1250 | |
1251 | /* |
1252 | * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit |
1253 | @@ -1648,8 +1595,7 @@ end_repeat_nmi: |
1254 | nmi_swapgs: |
1255 | SWAPGS_UNSAFE_STACK |
1256 | nmi_restore: |
1257 | - POP_EXTRA_REGS |
1258 | - POP_C_REGS |
1259 | + POP_REGS |
1260 | |
1261 | /* |
1262 | * Skip orig_ax and the "outermost" frame to point RSP at the "iret" |
1263 | diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S |
1264 | index 98d5358e4041..fd65e016e413 100644 |
1265 | --- a/arch/x86/entry/entry_64_compat.S |
1266 | +++ b/arch/x86/entry/entry_64_compat.S |
1267 | @@ -85,15 +85,25 @@ ENTRY(entry_SYSENTER_compat) |
1268 | pushq %rcx /* pt_regs->cx */ |
1269 | pushq $-ENOSYS /* pt_regs->ax */ |
1270 | pushq $0 /* pt_regs->r8 = 0 */ |
1271 | + xorq %r8, %r8 /* nospec r8 */ |
1272 | pushq $0 /* pt_regs->r9 = 0 */ |
1273 | + xorq %r9, %r9 /* nospec r9 */ |
1274 | pushq $0 /* pt_regs->r10 = 0 */ |
1275 | + xorq %r10, %r10 /* nospec r10 */ |
1276 | pushq $0 /* pt_regs->r11 = 0 */ |
1277 | + xorq %r11, %r11 /* nospec r11 */ |
1278 | pushq %rbx /* pt_regs->rbx */ |
1279 | + xorl %ebx, %ebx /* nospec rbx */ |
1280 | pushq %rbp /* pt_regs->rbp (will be overwritten) */ |
1281 | + xorl %ebp, %ebp /* nospec rbp */ |
1282 | pushq $0 /* pt_regs->r12 = 0 */ |
1283 | + xorq %r12, %r12 /* nospec r12 */ |
1284 | pushq $0 /* pt_regs->r13 = 0 */ |
1285 | + xorq %r13, %r13 /* nospec r13 */ |
1286 | pushq $0 /* pt_regs->r14 = 0 */ |
1287 | + xorq %r14, %r14 /* nospec r14 */ |
1288 | pushq $0 /* pt_regs->r15 = 0 */ |
1289 | + xorq %r15, %r15 /* nospec r15 */ |
1290 | cld |
1291 | |
1292 | /* |
1293 | @@ -214,15 +224,25 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe) |
1294 | pushq %rbp /* pt_regs->cx (stashed in bp) */ |
1295 | pushq $-ENOSYS /* pt_regs->ax */ |
1296 | pushq $0 /* pt_regs->r8 = 0 */ |
1297 | + xorq %r8, %r8 /* nospec r8 */ |
1298 | pushq $0 /* pt_regs->r9 = 0 */ |
1299 | + xorq %r9, %r9 /* nospec r9 */ |
1300 | pushq $0 /* pt_regs->r10 = 0 */ |
1301 | + xorq %r10, %r10 /* nospec r10 */ |
1302 | pushq $0 /* pt_regs->r11 = 0 */ |
1303 | + xorq %r11, %r11 /* nospec r11 */ |
1304 | pushq %rbx /* pt_regs->rbx */ |
1305 | + xorl %ebx, %ebx /* nospec rbx */ |
1306 | pushq %rbp /* pt_regs->rbp (will be overwritten) */ |
1307 | + xorl %ebp, %ebp /* nospec rbp */ |
1308 | pushq $0 /* pt_regs->r12 = 0 */ |
1309 | + xorq %r12, %r12 /* nospec r12 */ |
1310 | pushq $0 /* pt_regs->r13 = 0 */ |
1311 | + xorq %r13, %r13 /* nospec r13 */ |
1312 | pushq $0 /* pt_regs->r14 = 0 */ |
1313 | + xorq %r14, %r14 /* nospec r14 */ |
1314 | pushq $0 /* pt_regs->r15 = 0 */ |
1315 | + xorq %r15, %r15 /* nospec r15 */ |
1316 | |
1317 | /* |
1318 | * User mode is traced as though IRQs are on, and SYSENTER |
1319 | @@ -338,15 +358,25 @@ ENTRY(entry_INT80_compat) |
1320 | pushq %rcx /* pt_regs->cx */ |
1321 | pushq $-ENOSYS /* pt_regs->ax */ |
1322 | pushq $0 /* pt_regs->r8 = 0 */ |
1323 | + xorq %r8, %r8 /* nospec r8 */ |
1324 | pushq $0 /* pt_regs->r9 = 0 */ |
1325 | + xorq %r9, %r9 /* nospec r9 */ |
1326 | pushq $0 /* pt_regs->r10 = 0 */ |
1327 | + xorq %r10, %r10 /* nospec r10 */ |
1328 | pushq $0 /* pt_regs->r11 = 0 */ |
1329 | + xorq %r11, %r11 /* nospec r11 */ |
1330 | pushq %rbx /* pt_regs->rbx */ |
1331 | + xorl %ebx, %ebx /* nospec rbx */ |
1332 | pushq %rbp /* pt_regs->rbp */ |
1333 | + xorl %ebp, %ebp /* nospec rbp */ |
1334 | pushq %r12 /* pt_regs->r12 */ |
1335 | + xorq %r12, %r12 /* nospec r12 */ |
1336 | pushq %r13 /* pt_regs->r13 */ |
1337 | + xorq %r13, %r13 /* nospec r13 */ |
1338 | pushq %r14 /* pt_regs->r14 */ |
1339 | + xorq %r14, %r14 /* nospec r14 */ |
1340 | pushq %r15 /* pt_regs->r15 */ |
1341 | + xorq %r15, %r15 /* nospec r15 */ |
1342 | cld |
1343 | |
1344 | /* |
1345 | diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c |
1346 | index 731153a4681e..56457cb73448 100644 |
1347 | --- a/arch/x86/events/intel/core.c |
1348 | +++ b/arch/x86/events/intel/core.c |
1349 | @@ -3559,7 +3559,7 @@ static int intel_snb_pebs_broken(int cpu) |
1350 | break; |
1351 | |
1352 | case INTEL_FAM6_SANDYBRIDGE_X: |
1353 | - switch (cpu_data(cpu).x86_mask) { |
1354 | + switch (cpu_data(cpu).x86_stepping) { |
1355 | case 6: rev = 0x618; break; |
1356 | case 7: rev = 0x70c; break; |
1357 | } |
1358 | diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c |
1359 | index ae64d0b69729..cf372b90557e 100644 |
1360 | --- a/arch/x86/events/intel/lbr.c |
1361 | +++ b/arch/x86/events/intel/lbr.c |
1362 | @@ -1186,7 +1186,7 @@ void __init intel_pmu_lbr_init_atom(void) |
1363 | * on PMU interrupt |
1364 | */ |
1365 | if (boot_cpu_data.x86_model == 28 |
1366 | - && boot_cpu_data.x86_mask < 10) { |
1367 | + && boot_cpu_data.x86_stepping < 10) { |
1368 | pr_cont("LBR disabled due to erratum"); |
1369 | return; |
1370 | } |
1371 | diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c |
1372 | index a5604c352930..408879b0c0d4 100644 |
1373 | --- a/arch/x86/events/intel/p6.c |
1374 | +++ b/arch/x86/events/intel/p6.c |
1375 | @@ -234,7 +234,7 @@ static __initconst const struct x86_pmu p6_pmu = { |
1376 | |
1377 | static __init void p6_pmu_rdpmc_quirk(void) |
1378 | { |
1379 | - if (boot_cpu_data.x86_mask < 9) { |
1380 | + if (boot_cpu_data.x86_stepping < 9) { |
1381 | /* |
1382 | * PPro erratum 26; fixed in stepping 9 and above. |
1383 | */ |
1384 | diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h |
1385 | index 8d0ec9df1cbe..f077401869ee 100644 |
1386 | --- a/arch/x86/include/asm/acpi.h |
1387 | +++ b/arch/x86/include/asm/acpi.h |
1388 | @@ -94,7 +94,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) |
1389 | if (boot_cpu_data.x86 == 0x0F && |
1390 | boot_cpu_data.x86_vendor == X86_VENDOR_AMD && |
1391 | boot_cpu_data.x86_model <= 0x05 && |
1392 | - boot_cpu_data.x86_mask < 0x0A) |
1393 | + boot_cpu_data.x86_stepping < 0x0A) |
1394 | return 1; |
1395 | else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E)) |
1396 | return 1; |
1397 | diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h |
1398 | index 30d406146016..e1259f043ae9 100644 |
1399 | --- a/arch/x86/include/asm/barrier.h |
1400 | +++ b/arch/x86/include/asm/barrier.h |
1401 | @@ -40,7 +40,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, |
1402 | |
1403 | asm ("cmp %1,%2; sbb %0,%0;" |
1404 | :"=r" (mask) |
1405 | - :"r"(size),"r" (index) |
1406 | + :"g"(size),"r" (index) |
1407 | :"cc"); |
1408 | return mask; |
1409 | } |
1410 | diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h |
1411 | index 34d99af43994..6804d6642767 100644 |
1412 | --- a/arch/x86/include/asm/bug.h |
1413 | +++ b/arch/x86/include/asm/bug.h |
1414 | @@ -5,23 +5,20 @@ |
1415 | #include <linux/stringify.h> |
1416 | |
1417 | /* |
1418 | - * Since some emulators terminate on UD2, we cannot use it for WARN. |
1419 | - * Since various instruction decoders disagree on the length of UD1, |
1420 | - * we cannot use it either. So use UD0 for WARN. |
1421 | + * Despite that some emulators terminate on UD2, we use it for WARN(). |
1422 | * |
1423 | - * (binutils knows about "ud1" but {en,de}codes it as 2 bytes, whereas |
1424 | - * our kernel decoder thinks it takes a ModRM byte, which seems consistent |
1425 | - * with various things like the Intel SDM instruction encoding rules) |
1426 | + * Since various instruction decoders/specs disagree on the encoding of |
1427 | + * UD0/UD1. |
1428 | */ |
1429 | |
1430 | -#define ASM_UD0 ".byte 0x0f, 0xff" |
1431 | +#define ASM_UD0 ".byte 0x0f, 0xff" /* + ModRM (for Intel) */ |
1432 | #define ASM_UD1 ".byte 0x0f, 0xb9" /* + ModRM */ |
1433 | #define ASM_UD2 ".byte 0x0f, 0x0b" |
1434 | |
1435 | #define INSN_UD0 0xff0f |
1436 | #define INSN_UD2 0x0b0f |
1437 | |
1438 | -#define LEN_UD0 2 |
1439 | +#define LEN_UD2 2 |
1440 | |
1441 | #ifdef CONFIG_GENERIC_BUG |
1442 | |
1443 | @@ -77,7 +74,11 @@ do { \ |
1444 | unreachable(); \ |
1445 | } while (0) |
1446 | |
1447 | -#define __WARN_FLAGS(flags) _BUG_FLAGS(ASM_UD0, BUGFLAG_WARNING|(flags)) |
1448 | +#define __WARN_FLAGS(flags) \ |
1449 | +do { \ |
1450 | + _BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \ |
1451 | + annotate_reachable(); \ |
1452 | +} while (0) |
1453 | |
1454 | #include <asm-generic/bug.h> |
1455 | |
1456 | diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h |
1457 | index 4d57894635f2..76b058533e47 100644 |
1458 | --- a/arch/x86/include/asm/nospec-branch.h |
1459 | +++ b/arch/x86/include/asm/nospec-branch.h |
1460 | @@ -6,6 +6,7 @@ |
1461 | #include <asm/alternative.h> |
1462 | #include <asm/alternative-asm.h> |
1463 | #include <asm/cpufeatures.h> |
1464 | +#include <asm/msr-index.h> |
1465 | |
1466 | #ifdef __ASSEMBLY__ |
1467 | |
1468 | @@ -164,10 +165,15 @@ static inline void vmexit_fill_RSB(void) |
1469 | |
1470 | static inline void indirect_branch_prediction_barrier(void) |
1471 | { |
1472 | - alternative_input("", |
1473 | - "call __ibp_barrier", |
1474 | - X86_FEATURE_USE_IBPB, |
1475 | - ASM_NO_INPUT_CLOBBER("eax", "ecx", "edx", "memory")); |
1476 | + asm volatile(ALTERNATIVE("", |
1477 | + "movl %[msr], %%ecx\n\t" |
1478 | + "movl %[val], %%eax\n\t" |
1479 | + "movl $0, %%edx\n\t" |
1480 | + "wrmsr", |
1481 | + X86_FEATURE_USE_IBPB) |
1482 | + : : [msr] "i" (MSR_IA32_PRED_CMD), |
1483 | + [val] "i" (PRED_CMD_IBPB) |
1484 | + : "eax", "ecx", "edx", "memory"); |
1485 | } |
1486 | |
1487 | #endif /* __ASSEMBLY__ */ |
1488 | diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h |
1489 | index 4baa6bceb232..d652a3808065 100644 |
1490 | --- a/arch/x86/include/asm/page_64.h |
1491 | +++ b/arch/x86/include/asm/page_64.h |
1492 | @@ -52,10 +52,6 @@ static inline void clear_page(void *page) |
1493 | |
1494 | void copy_page(void *to, void *from); |
1495 | |
1496 | -#ifdef CONFIG_X86_MCE |
1497 | -#define arch_unmap_kpfn arch_unmap_kpfn |
1498 | -#endif |
1499 | - |
1500 | #endif /* !__ASSEMBLY__ */ |
1501 | |
1502 | #ifdef CONFIG_X86_VSYSCALL_EMULATION |
1503 | diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h |
1504 | index 892df375b615..554841fab717 100644 |
1505 | --- a/arch/x86/include/asm/paravirt.h |
1506 | +++ b/arch/x86/include/asm/paravirt.h |
1507 | @@ -297,9 +297,9 @@ static inline void __flush_tlb_global(void) |
1508 | { |
1509 | PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel); |
1510 | } |
1511 | -static inline void __flush_tlb_single(unsigned long addr) |
1512 | +static inline void __flush_tlb_one_user(unsigned long addr) |
1513 | { |
1514 | - PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr); |
1515 | + PVOP_VCALL1(pv_mmu_ops.flush_tlb_one_user, addr); |
1516 | } |
1517 | |
1518 | static inline void flush_tlb_others(const struct cpumask *cpumask, |
1519 | diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h |
1520 | index 6ec54d01972d..f624f1f10316 100644 |
1521 | --- a/arch/x86/include/asm/paravirt_types.h |
1522 | +++ b/arch/x86/include/asm/paravirt_types.h |
1523 | @@ -217,7 +217,7 @@ struct pv_mmu_ops { |
1524 | /* TLB operations */ |
1525 | void (*flush_tlb_user)(void); |
1526 | void (*flush_tlb_kernel)(void); |
1527 | - void (*flush_tlb_single)(unsigned long addr); |
1528 | + void (*flush_tlb_one_user)(unsigned long addr); |
1529 | void (*flush_tlb_others)(const struct cpumask *cpus, |
1530 | const struct flush_tlb_info *info); |
1531 | |
1532 | diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h |
1533 | index e67c0620aec2..e55466760ff8 100644 |
1534 | --- a/arch/x86/include/asm/pgtable_32.h |
1535 | +++ b/arch/x86/include/asm/pgtable_32.h |
1536 | @@ -61,7 +61,7 @@ void paging_init(void); |
1537 | #define kpte_clear_flush(ptep, vaddr) \ |
1538 | do { \ |
1539 | pte_clear(&init_mm, (vaddr), (ptep)); \ |
1540 | - __flush_tlb_one((vaddr)); \ |
1541 | + __flush_tlb_one_kernel((vaddr)); \ |
1542 | } while (0) |
1543 | |
1544 | #endif /* !__ASSEMBLY__ */ |
1545 | diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h |
1546 | index 513f9604c192..44c2c4ec6d60 100644 |
1547 | --- a/arch/x86/include/asm/processor.h |
1548 | +++ b/arch/x86/include/asm/processor.h |
1549 | @@ -91,7 +91,7 @@ struct cpuinfo_x86 { |
1550 | __u8 x86; /* CPU family */ |
1551 | __u8 x86_vendor; /* CPU vendor */ |
1552 | __u8 x86_model; |
1553 | - __u8 x86_mask; |
1554 | + __u8 x86_stepping; |
1555 | #ifdef CONFIG_X86_64 |
1556 | /* Number of 4K pages in DTLB/ITLB combined(in pages): */ |
1557 | int x86_tlbsize; |
1558 | @@ -109,7 +109,7 @@ struct cpuinfo_x86 { |
1559 | char x86_vendor_id[16]; |
1560 | char x86_model_id[64]; |
1561 | /* in KB - valid for CPUS which support this call: */ |
1562 | - int x86_cache_size; |
1563 | + unsigned int x86_cache_size; |
1564 | int x86_cache_alignment; /* In bytes */ |
1565 | /* Cache QoS architectural values: */ |
1566 | int x86_cache_max_rmid; /* max index */ |
1567 | @@ -969,7 +969,4 @@ bool xen_set_default_idle(void); |
1568 | |
1569 | void stop_this_cpu(void *dummy); |
1570 | void df_debug(struct pt_regs *regs, long error_code); |
1571 | - |
1572 | -void __ibp_barrier(void); |
1573 | - |
1574 | #endif /* _ASM_X86_PROCESSOR_H */ |
1575 | diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h |
1576 | index 2b8f18ca5874..84137c22fdfa 100644 |
1577 | --- a/arch/x86/include/asm/tlbflush.h |
1578 | +++ b/arch/x86/include/asm/tlbflush.h |
1579 | @@ -140,7 +140,7 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) |
1580 | #else |
1581 | #define __flush_tlb() __native_flush_tlb() |
1582 | #define __flush_tlb_global() __native_flush_tlb_global() |
1583 | -#define __flush_tlb_single(addr) __native_flush_tlb_single(addr) |
1584 | +#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr) |
1585 | #endif |
1586 | |
1587 | static inline bool tlb_defer_switch_to_init_mm(void) |
1588 | @@ -400,7 +400,7 @@ static inline void __native_flush_tlb_global(void) |
1589 | /* |
1590 | * flush one page in the user mapping |
1591 | */ |
1592 | -static inline void __native_flush_tlb_single(unsigned long addr) |
1593 | +static inline void __native_flush_tlb_one_user(unsigned long addr) |
1594 | { |
1595 | u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); |
1596 | |
1597 | @@ -437,18 +437,31 @@ static inline void __flush_tlb_all(void) |
1598 | /* |
1599 | * flush one page in the kernel mapping |
1600 | */ |
1601 | -static inline void __flush_tlb_one(unsigned long addr) |
1602 | +static inline void __flush_tlb_one_kernel(unsigned long addr) |
1603 | { |
1604 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); |
1605 | - __flush_tlb_single(addr); |
1606 | + |
1607 | + /* |
1608 | + * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its |
1609 | + * paravirt equivalent. Even with PCID, this is sufficient: we only |
1610 | + * use PCID if we also use global PTEs for the kernel mapping, and |
1611 | + * INVLPG flushes global translations across all address spaces. |
1612 | + * |
1613 | + * If PTI is on, then the kernel is mapped with non-global PTEs, and |
1614 | + * __flush_tlb_one_user() will flush the given address for the current |
1615 | + * kernel address space and for its usermode counterpart, but it does |
1616 | + * not flush it for other address spaces. |
1617 | + */ |
1618 | + __flush_tlb_one_user(addr); |
1619 | |
1620 | if (!static_cpu_has(X86_FEATURE_PTI)) |
1621 | return; |
1622 | |
1623 | /* |
1624 | - * __flush_tlb_single() will have cleared the TLB entry for this ASID, |
1625 | - * but since kernel space is replicated across all, we must also |
1626 | - * invalidate all others. |
1627 | + * See above. We need to propagate the flush to all other address |
1628 | + * spaces. In principle, we only need to propagate it to kernelmode |
1629 | + * address spaces, but the extra bookkeeping we would need is not |
1630 | + * worth it. |
1631 | */ |
1632 | invalidate_other_asid(); |
1633 | } |
1634 | diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c |
1635 | index 6db28f17ff28..c88e0b127810 100644 |
1636 | --- a/arch/x86/kernel/amd_nb.c |
1637 | +++ b/arch/x86/kernel/amd_nb.c |
1638 | @@ -235,7 +235,7 @@ int amd_cache_northbridges(void) |
1639 | if (boot_cpu_data.x86 == 0x10 && |
1640 | boot_cpu_data.x86_model >= 0x8 && |
1641 | (boot_cpu_data.x86_model > 0x9 || |
1642 | - boot_cpu_data.x86_mask >= 0x1)) |
1643 | + boot_cpu_data.x86_stepping >= 0x1)) |
1644 | amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; |
1645 | |
1646 | if (boot_cpu_data.x86 == 0x15) |
1647 | diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c |
1648 | index 25ddf02598d2..b203af0855b5 100644 |
1649 | --- a/arch/x86/kernel/apic/apic.c |
1650 | +++ b/arch/x86/kernel/apic/apic.c |
1651 | @@ -546,7 +546,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events); |
1652 | |
1653 | static u32 hsx_deadline_rev(void) |
1654 | { |
1655 | - switch (boot_cpu_data.x86_mask) { |
1656 | + switch (boot_cpu_data.x86_stepping) { |
1657 | case 0x02: return 0x3a; /* EP */ |
1658 | case 0x04: return 0x0f; /* EX */ |
1659 | } |
1660 | @@ -556,7 +556,7 @@ static u32 hsx_deadline_rev(void) |
1661 | |
1662 | static u32 bdx_deadline_rev(void) |
1663 | { |
1664 | - switch (boot_cpu_data.x86_mask) { |
1665 | + switch (boot_cpu_data.x86_stepping) { |
1666 | case 0x02: return 0x00000011; |
1667 | case 0x03: return 0x0700000e; |
1668 | case 0x04: return 0x0f00000c; |
1669 | @@ -568,7 +568,7 @@ static u32 bdx_deadline_rev(void) |
1670 | |
1671 | static u32 skx_deadline_rev(void) |
1672 | { |
1673 | - switch (boot_cpu_data.x86_mask) { |
1674 | + switch (boot_cpu_data.x86_stepping) { |
1675 | case 0x03: return 0x01000136; |
1676 | case 0x04: return 0x02000014; |
1677 | } |
1678 | diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c |
1679 | index e4b0d92b3ae0..2a7fd56e67b3 100644 |
1680 | --- a/arch/x86/kernel/apm_32.c |
1681 | +++ b/arch/x86/kernel/apm_32.c |
1682 | @@ -2389,6 +2389,7 @@ static int __init apm_init(void) |
1683 | if (HZ != 100) |
1684 | idle_period = (idle_period * HZ) / 100; |
1685 | if (idle_threshold < 100) { |
1686 | + cpuidle_poll_state_init(&apm_idle_driver); |
1687 | if (!cpuidle_register_driver(&apm_idle_driver)) |
1688 | if (cpuidle_register_device(&apm_cpuidle_device)) |
1689 | cpuidle_unregister_driver(&apm_idle_driver); |
1690 | diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c |
1691 | index fa1261eefa16..f91ba53e06c8 100644 |
1692 | --- a/arch/x86/kernel/asm-offsets_32.c |
1693 | +++ b/arch/x86/kernel/asm-offsets_32.c |
1694 | @@ -18,7 +18,7 @@ void foo(void) |
1695 | OFFSET(CPUINFO_x86, cpuinfo_x86, x86); |
1696 | OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor); |
1697 | OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model); |
1698 | - OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask); |
1699 | + OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping); |
1700 | OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level); |
1701 | OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability); |
1702 | OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id); |
1703 | diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c |
1704 | index ea831c858195..e7d5a7883632 100644 |
1705 | --- a/arch/x86/kernel/cpu/amd.c |
1706 | +++ b/arch/x86/kernel/cpu/amd.c |
1707 | @@ -119,7 +119,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) |
1708 | return; |
1709 | } |
1710 | |
1711 | - if (c->x86_model == 6 && c->x86_mask == 1) { |
1712 | + if (c->x86_model == 6 && c->x86_stepping == 1) { |
1713 | const int K6_BUG_LOOP = 1000000; |
1714 | int n; |
1715 | void (*f_vide)(void); |
1716 | @@ -149,7 +149,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) |
1717 | |
1718 | /* K6 with old style WHCR */ |
1719 | if (c->x86_model < 8 || |
1720 | - (c->x86_model == 8 && c->x86_mask < 8)) { |
1721 | + (c->x86_model == 8 && c->x86_stepping < 8)) { |
1722 | /* We can only write allocate on the low 508Mb */ |
1723 | if (mbytes > 508) |
1724 | mbytes = 508; |
1725 | @@ -168,7 +168,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) |
1726 | return; |
1727 | } |
1728 | |
1729 | - if ((c->x86_model == 8 && c->x86_mask > 7) || |
1730 | + if ((c->x86_model == 8 && c->x86_stepping > 7) || |
1731 | c->x86_model == 9 || c->x86_model == 13) { |
1732 | /* The more serious chips .. */ |
1733 | |
1734 | @@ -221,7 +221,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c) |
1735 | * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx |
1736 | * As per AMD technical note 27212 0.2 |
1737 | */ |
1738 | - if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { |
1739 | + if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) { |
1740 | rdmsr(MSR_K7_CLK_CTL, l, h); |
1741 | if ((l & 0xfff00000) != 0x20000000) { |
1742 | pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", |
1743 | @@ -241,12 +241,12 @@ static void init_amd_k7(struct cpuinfo_x86 *c) |
1744 | * but they are not certified as MP capable. |
1745 | */ |
1746 | /* Athlon 660/661 is valid. */ |
1747 | - if ((c->x86_model == 6) && ((c->x86_mask == 0) || |
1748 | - (c->x86_mask == 1))) |
1749 | + if ((c->x86_model == 6) && ((c->x86_stepping == 0) || |
1750 | + (c->x86_stepping == 1))) |
1751 | return; |
1752 | |
1753 | /* Duron 670 is valid */ |
1754 | - if ((c->x86_model == 7) && (c->x86_mask == 0)) |
1755 | + if ((c->x86_model == 7) && (c->x86_stepping == 0)) |
1756 | return; |
1757 | |
1758 | /* |
1759 | @@ -256,8 +256,8 @@ static void init_amd_k7(struct cpuinfo_x86 *c) |
1760 | * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for |
1761 | * more. |
1762 | */ |
1763 | - if (((c->x86_model == 6) && (c->x86_mask >= 2)) || |
1764 | - ((c->x86_model == 7) && (c->x86_mask >= 1)) || |
1765 | + if (((c->x86_model == 6) && (c->x86_stepping >= 2)) || |
1766 | + ((c->x86_model == 7) && (c->x86_stepping >= 1)) || |
1767 | (c->x86_model > 7)) |
1768 | if (cpu_has(c, X86_FEATURE_MP)) |
1769 | return; |
1770 | @@ -583,7 +583,7 @@ static void early_init_amd(struct cpuinfo_x86 *c) |
1771 | /* Set MTRR capability flag if appropriate */ |
1772 | if (c->x86 == 5) |
1773 | if (c->x86_model == 13 || c->x86_model == 9 || |
1774 | - (c->x86_model == 8 && c->x86_mask >= 8)) |
1775 | + (c->x86_model == 8 && c->x86_stepping >= 8)) |
1776 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); |
1777 | #endif |
1778 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) |
1779 | @@ -769,7 +769,7 @@ static void init_amd_zn(struct cpuinfo_x86 *c) |
1780 | * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects |
1781 | * all up to and including B1. |
1782 | */ |
1783 | - if (c->x86_model <= 1 && c->x86_mask <= 1) |
1784 | + if (c->x86_model <= 1 && c->x86_stepping <= 1) |
1785 | set_cpu_cap(c, X86_FEATURE_CPB); |
1786 | } |
1787 | |
1788 | @@ -880,11 +880,11 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
1789 | /* AMD errata T13 (order #21922) */ |
1790 | if ((c->x86 == 6)) { |
1791 | /* Duron Rev A0 */ |
1792 | - if (c->x86_model == 3 && c->x86_mask == 0) |
1793 | + if (c->x86_model == 3 && c->x86_stepping == 0) |
1794 | size = 64; |
1795 | /* Tbird rev A1/A2 */ |
1796 | if (c->x86_model == 4 && |
1797 | - (c->x86_mask == 0 || c->x86_mask == 1)) |
1798 | + (c->x86_stepping == 0 || c->x86_stepping == 1)) |
1799 | size = 256; |
1800 | } |
1801 | return size; |
1802 | @@ -1021,7 +1021,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) |
1803 | } |
1804 | |
1805 | /* OSVW unavailable or ID unknown, match family-model-stepping range */ |
1806 | - ms = (cpu->x86_model << 4) | cpu->x86_mask; |
1807 | + ms = (cpu->x86_model << 4) | cpu->x86_stepping; |
1808 | while ((range = *erratum++)) |
1809 | if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && |
1810 | (ms >= AMD_MODEL_RANGE_START(range)) && |
1811 | diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
1812 | index 71949bf2de5a..d71c8b54b696 100644 |
1813 | --- a/arch/x86/kernel/cpu/bugs.c |
1814 | +++ b/arch/x86/kernel/cpu/bugs.c |
1815 | @@ -162,8 +162,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) |
1816 | if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) |
1817 | return SPECTRE_V2_CMD_NONE; |
1818 | else { |
1819 | - ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, |
1820 | - sizeof(arg)); |
1821 | + ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); |
1822 | if (ret < 0) |
1823 | return SPECTRE_V2_CMD_AUTO; |
1824 | |
1825 | @@ -175,8 +174,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) |
1826 | } |
1827 | |
1828 | if (i >= ARRAY_SIZE(mitigation_options)) { |
1829 | - pr_err("unknown option (%s). Switching to AUTO select\n", |
1830 | - mitigation_options[i].option); |
1831 | + pr_err("unknown option (%s). Switching to AUTO select\n", arg); |
1832 | return SPECTRE_V2_CMD_AUTO; |
1833 | } |
1834 | } |
1835 | @@ -185,8 +183,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) |
1836 | cmd == SPECTRE_V2_CMD_RETPOLINE_AMD || |
1837 | cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) && |
1838 | !IS_ENABLED(CONFIG_RETPOLINE)) { |
1839 | - pr_err("%s selected but not compiled in. Switching to AUTO select\n", |
1840 | - mitigation_options[i].option); |
1841 | + pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option); |
1842 | return SPECTRE_V2_CMD_AUTO; |
1843 | } |
1844 | |
1845 | @@ -256,14 +253,14 @@ static void __init spectre_v2_select_mitigation(void) |
1846 | goto retpoline_auto; |
1847 | break; |
1848 | } |
1849 | - pr_err("kernel not compiled with retpoline; no mitigation available!"); |
1850 | + pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!"); |
1851 | return; |
1852 | |
1853 | retpoline_auto: |
1854 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { |
1855 | retpoline_amd: |
1856 | if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { |
1857 | - pr_err("LFENCE not serializing. Switching to generic retpoline\n"); |
1858 | + pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n"); |
1859 | goto retpoline_generic; |
1860 | } |
1861 | mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : |
1862 | @@ -281,7 +278,7 @@ static void __init spectre_v2_select_mitigation(void) |
1863 | pr_info("%s\n", spectre_v2_strings[mode]); |
1864 | |
1865 | /* |
1866 | - * If neither SMEP or KPTI are available, there is a risk of |
1867 | + * If neither SMEP nor PTI are available, there is a risk of |
1868 | * hitting userspace addresses in the RSB after a context switch |
1869 | * from a shallow call stack to a deeper one. To prevent this fill |
1870 | * the entire RSB, even when using IBRS. |
1871 | @@ -295,21 +292,20 @@ static void __init spectre_v2_select_mitigation(void) |
1872 | if ((!boot_cpu_has(X86_FEATURE_PTI) && |
1873 | !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) { |
1874 | setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); |
1875 | - pr_info("Filling RSB on context switch\n"); |
1876 | + pr_info("Spectre v2 mitigation: Filling RSB on context switch\n"); |
1877 | } |
1878 | |
1879 | /* Initialize Indirect Branch Prediction Barrier if supported */ |
1880 | if (boot_cpu_has(X86_FEATURE_IBPB)) { |
1881 | setup_force_cpu_cap(X86_FEATURE_USE_IBPB); |
1882 | - pr_info("Enabling Indirect Branch Prediction Barrier\n"); |
1883 | + pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n"); |
1884 | } |
1885 | } |
1886 | |
1887 | #undef pr_fmt |
1888 | |
1889 | #ifdef CONFIG_SYSFS |
1890 | -ssize_t cpu_show_meltdown(struct device *dev, |
1891 | - struct device_attribute *attr, char *buf) |
1892 | +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) |
1893 | { |
1894 | if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) |
1895 | return sprintf(buf, "Not affected\n"); |
1896 | @@ -318,16 +314,14 @@ ssize_t cpu_show_meltdown(struct device *dev, |
1897 | return sprintf(buf, "Vulnerable\n"); |
1898 | } |
1899 | |
1900 | -ssize_t cpu_show_spectre_v1(struct device *dev, |
1901 | - struct device_attribute *attr, char *buf) |
1902 | +ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) |
1903 | { |
1904 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) |
1905 | return sprintf(buf, "Not affected\n"); |
1906 | return sprintf(buf, "Mitigation: __user pointer sanitization\n"); |
1907 | } |
1908 | |
1909 | -ssize_t cpu_show_spectre_v2(struct device *dev, |
1910 | - struct device_attribute *attr, char *buf) |
1911 | +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) |
1912 | { |
1913 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) |
1914 | return sprintf(buf, "Not affected\n"); |
1915 | @@ -337,9 +331,3 @@ ssize_t cpu_show_spectre_v2(struct device *dev, |
1916 | spectre_v2_module_string()); |
1917 | } |
1918 | #endif |
1919 | - |
1920 | -void __ibp_barrier(void) |
1921 | -{ |
1922 | - __wrmsr(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 0); |
1923 | -} |
1924 | -EXPORT_SYMBOL_GPL(__ibp_barrier); |
1925 | diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c |
1926 | index 68bc6d9b3132..595be776727d 100644 |
1927 | --- a/arch/x86/kernel/cpu/centaur.c |
1928 | +++ b/arch/x86/kernel/cpu/centaur.c |
1929 | @@ -136,7 +136,7 @@ static void init_centaur(struct cpuinfo_x86 *c) |
1930 | clear_cpu_cap(c, X86_FEATURE_TSC); |
1931 | break; |
1932 | case 8: |
1933 | - switch (c->x86_mask) { |
1934 | + switch (c->x86_stepping) { |
1935 | default: |
1936 | name = "2"; |
1937 | break; |
1938 | @@ -211,7 +211,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
1939 | * - Note, it seems this may only be in engineering samples. |
1940 | */ |
1941 | if ((c->x86 == 6) && (c->x86_model == 9) && |
1942 | - (c->x86_mask == 1) && (size == 65)) |
1943 | + (c->x86_stepping == 1) && (size == 65)) |
1944 | size -= 1; |
1945 | return size; |
1946 | } |
1947 | diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c |
1948 | index d63f4b5706e4..824aee0117bb 100644 |
1949 | --- a/arch/x86/kernel/cpu/common.c |
1950 | +++ b/arch/x86/kernel/cpu/common.c |
1951 | @@ -731,7 +731,7 @@ void cpu_detect(struct cpuinfo_x86 *c) |
1952 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
1953 | c->x86 = x86_family(tfms); |
1954 | c->x86_model = x86_model(tfms); |
1955 | - c->x86_mask = x86_stepping(tfms); |
1956 | + c->x86_stepping = x86_stepping(tfms); |
1957 | |
1958 | if (cap0 & (1<<19)) { |
1959 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
1960 | @@ -1184,9 +1184,9 @@ static void identify_cpu(struct cpuinfo_x86 *c) |
1961 | int i; |
1962 | |
1963 | c->loops_per_jiffy = loops_per_jiffy; |
1964 | - c->x86_cache_size = -1; |
1965 | + c->x86_cache_size = 0; |
1966 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
1967 | - c->x86_model = c->x86_mask = 0; /* So far unknown... */ |
1968 | + c->x86_model = c->x86_stepping = 0; /* So far unknown... */ |
1969 | c->x86_vendor_id[0] = '\0'; /* Unset */ |
1970 | c->x86_model_id[0] = '\0'; /* Unset */ |
1971 | c->x86_max_cores = 1; |
1972 | @@ -1378,8 +1378,8 @@ void print_cpu_info(struct cpuinfo_x86 *c) |
1973 | |
1974 | pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); |
1975 | |
1976 | - if (c->x86_mask || c->cpuid_level >= 0) |
1977 | - pr_cont(", stepping: 0x%x)\n", c->x86_mask); |
1978 | + if (c->x86_stepping || c->cpuid_level >= 0) |
1979 | + pr_cont(", stepping: 0x%x)\n", c->x86_stepping); |
1980 | else |
1981 | pr_cont(")\n"); |
1982 | } |
1983 | diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c |
1984 | index 6b4bb335641f..8949b7ae6d92 100644 |
1985 | --- a/arch/x86/kernel/cpu/cyrix.c |
1986 | +++ b/arch/x86/kernel/cpu/cyrix.c |
1987 | @@ -215,7 +215,7 @@ static void init_cyrix(struct cpuinfo_x86 *c) |
1988 | |
1989 | /* common case step number/rev -- exceptions handled below */ |
1990 | c->x86_model = (dir1 >> 4) + 1; |
1991 | - c->x86_mask = dir1 & 0xf; |
1992 | + c->x86_stepping = dir1 & 0xf; |
1993 | |
1994 | /* Now cook; the original recipe is by Channing Corn, from Cyrix. |
1995 | * We do the same thing for each generation: we work out |
1996 | diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c |
1997 | index 319bf989fad1..d19e903214b4 100644 |
1998 | --- a/arch/x86/kernel/cpu/intel.c |
1999 | +++ b/arch/x86/kernel/cpu/intel.c |
2000 | @@ -116,14 +116,13 @@ struct sku_microcode { |
2001 | u32 microcode; |
2002 | }; |
2003 | static const struct sku_microcode spectre_bad_microcodes[] = { |
2004 | - { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x84 }, |
2005 | - { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x84 }, |
2006 | - { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x84 }, |
2007 | - { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x84 }, |
2008 | - { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 }, |
2009 | + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 }, |
2010 | + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 }, |
2011 | + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 }, |
2012 | + { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 }, |
2013 | + { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 }, |
2014 | { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, |
2015 | { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, |
2016 | - { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0xc2 }, |
2017 | { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 }, |
2018 | { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, |
2019 | { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, |
2020 | @@ -136,8 +135,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = { |
2021 | { INTEL_FAM6_HASWELL_X, 0x02, 0x3b }, |
2022 | { INTEL_FAM6_HASWELL_X, 0x04, 0x10 }, |
2023 | { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a }, |
2024 | - /* Updated in the 20180108 release; blacklist until we know otherwise */ |
2025 | - { INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 }, |
2026 | /* Observed in the wild */ |
2027 | { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b }, |
2028 | { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 }, |
2029 | @@ -149,7 +146,7 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c) |
2030 | |
2031 | for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { |
2032 | if (c->x86_model == spectre_bad_microcodes[i].model && |
2033 | - c->x86_mask == spectre_bad_microcodes[i].stepping) |
2034 | + c->x86_stepping == spectre_bad_microcodes[i].stepping) |
2035 | return (c->microcode <= spectre_bad_microcodes[i].microcode); |
2036 | } |
2037 | return false; |
2038 | @@ -196,7 +193,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) |
2039 | * need the microcode to have already been loaded... so if it is |
2040 | * not, recommend a BIOS update and disable large pages. |
2041 | */ |
2042 | - if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 && |
2043 | + if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 && |
2044 | c->microcode < 0x20e) { |
2045 | pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n"); |
2046 | clear_cpu_cap(c, X86_FEATURE_PSE); |
2047 | @@ -212,7 +209,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) |
2048 | |
2049 | /* CPUID workaround for 0F33/0F34 CPU */ |
2050 | if (c->x86 == 0xF && c->x86_model == 0x3 |
2051 | - && (c->x86_mask == 0x3 || c->x86_mask == 0x4)) |
2052 | + && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4)) |
2053 | c->x86_phys_bits = 36; |
2054 | |
2055 | /* |
2056 | @@ -310,7 +307,7 @@ int ppro_with_ram_bug(void) |
2057 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && |
2058 | boot_cpu_data.x86 == 6 && |
2059 | boot_cpu_data.x86_model == 1 && |
2060 | - boot_cpu_data.x86_mask < 8) { |
2061 | + boot_cpu_data.x86_stepping < 8) { |
2062 | pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n"); |
2063 | return 1; |
2064 | } |
2065 | @@ -327,7 +324,7 @@ static void intel_smp_check(struct cpuinfo_x86 *c) |
2066 | * Mask B, Pentium, but not Pentium MMX |
2067 | */ |
2068 | if (c->x86 == 5 && |
2069 | - c->x86_mask >= 1 && c->x86_mask <= 4 && |
2070 | + c->x86_stepping >= 1 && c->x86_stepping <= 4 && |
2071 | c->x86_model <= 3) { |
2072 | /* |
2073 | * Remember we have B step Pentia with bugs |
2074 | @@ -370,7 +367,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) |
2075 | * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until |
2076 | * model 3 mask 3 |
2077 | */ |
2078 | - if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) |
2079 | + if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633) |
2080 | clear_cpu_cap(c, X86_FEATURE_SEP); |
2081 | |
2082 | /* |
2083 | @@ -388,7 +385,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) |
2084 | * P4 Xeon erratum 037 workaround. |
2085 | * Hardware prefetcher may cause stale data to be loaded into the cache. |
2086 | */ |
2087 | - if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { |
2088 | + if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) { |
2089 | if (msr_set_bit(MSR_IA32_MISC_ENABLE, |
2090 | MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) { |
2091 | pr_info("CPU: C0 stepping P4 Xeon detected.\n"); |
2092 | @@ -403,7 +400,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) |
2093 | * Specification Update"). |
2094 | */ |
2095 | if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 && |
2096 | - (c->x86_mask < 0x6 || c->x86_mask == 0xb)) |
2097 | + (c->x86_stepping < 0x6 || c->x86_stepping == 0xb)) |
2098 | set_cpu_bug(c, X86_BUG_11AP); |
2099 | |
2100 | |
2101 | @@ -650,7 +647,7 @@ static void init_intel(struct cpuinfo_x86 *c) |
2102 | case 6: |
2103 | if (l2 == 128) |
2104 | p = "Celeron (Mendocino)"; |
2105 | - else if (c->x86_mask == 0 || c->x86_mask == 5) |
2106 | + else if (c->x86_stepping == 0 || c->x86_stepping == 5) |
2107 | p = "Celeron-A"; |
2108 | break; |
2109 | |
2110 | diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c |
2111 | index 99442370de40..18dd8f22e353 100644 |
2112 | --- a/arch/x86/kernel/cpu/intel_rdt.c |
2113 | +++ b/arch/x86/kernel/cpu/intel_rdt.c |
2114 | @@ -771,7 +771,7 @@ static __init void rdt_quirks(void) |
2115 | cache_alloc_hsw_probe(); |
2116 | break; |
2117 | case INTEL_FAM6_SKYLAKE_X: |
2118 | - if (boot_cpu_data.x86_mask <= 4) |
2119 | + if (boot_cpu_data.x86_stepping <= 4) |
2120 | set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat"); |
2121 | } |
2122 | } |
2123 | diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h |
2124 | index aa0d5df9dc60..e956eb267061 100644 |
2125 | --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h |
2126 | +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h |
2127 | @@ -115,4 +115,19 @@ static inline void mce_unregister_injector_chain(struct notifier_block *nb) { } |
2128 | |
2129 | extern struct mca_config mca_cfg; |
2130 | |
2131 | +#ifndef CONFIG_X86_64 |
2132 | +/* |
2133 | + * On 32-bit systems it would be difficult to safely unmap a poison page |
2134 | + * from the kernel 1:1 map because there are no non-canonical addresses that |
2135 | + * we can use to refer to the address without risking a speculative access. |
2136 | + * However, this isn't much of an issue because: |
2137 | + * 1) Few unmappable pages are in the 1:1 map. Most are in HIGHMEM which |
2138 | + * are only mapped into the kernel as needed |
2139 | + * 2) Few people would run a 32-bit kernel on a machine that supports |
2140 | + * recoverable errors because they have too much memory to boot 32-bit. |
2141 | + */ |
2142 | +static inline void mce_unmap_kpfn(unsigned long pfn) {} |
2143 | +#define mce_unmap_kpfn mce_unmap_kpfn |
2144 | +#endif |
2145 | + |
2146 | #endif /* __X86_MCE_INTERNAL_H__ */ |
2147 | diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c |
2148 | index 868e412b4f0c..2fe482f6ecd8 100644 |
2149 | --- a/arch/x86/kernel/cpu/mcheck/mce.c |
2150 | +++ b/arch/x86/kernel/cpu/mcheck/mce.c |
2151 | @@ -106,6 +106,10 @@ static struct irq_work mce_irq_work; |
2152 | |
2153 | static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); |
2154 | |
2155 | +#ifndef mce_unmap_kpfn |
2156 | +static void mce_unmap_kpfn(unsigned long pfn); |
2157 | +#endif |
2158 | + |
2159 | /* |
2160 | * CPU/chipset specific EDAC code can register a notifier call here to print |
2161 | * MCE errors in a human-readable form. |
2162 | @@ -582,7 +586,8 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val, |
2163 | |
2164 | if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) { |
2165 | pfn = mce->addr >> PAGE_SHIFT; |
2166 | - memory_failure(pfn, MCE_VECTOR, 0); |
2167 | + if (memory_failure(pfn, MCE_VECTOR, 0)) |
2168 | + mce_unmap_kpfn(pfn); |
2169 | } |
2170 | |
2171 | return NOTIFY_OK; |
2172 | @@ -1049,12 +1054,13 @@ static int do_memory_failure(struct mce *m) |
2173 | ret = memory_failure(m->addr >> PAGE_SHIFT, MCE_VECTOR, flags); |
2174 | if (ret) |
2175 | pr_err("Memory error not recovered"); |
2176 | + else |
2177 | + mce_unmap_kpfn(m->addr >> PAGE_SHIFT); |
2178 | return ret; |
2179 | } |
2180 | |
2181 | -#if defined(arch_unmap_kpfn) && defined(CONFIG_MEMORY_FAILURE) |
2182 | - |
2183 | -void arch_unmap_kpfn(unsigned long pfn) |
2184 | +#ifndef mce_unmap_kpfn |
2185 | +static void mce_unmap_kpfn(unsigned long pfn) |
2186 | { |
2187 | unsigned long decoy_addr; |
2188 | |
2189 | @@ -1065,7 +1071,7 @@ void arch_unmap_kpfn(unsigned long pfn) |
2190 | * We would like to just call: |
2191 | * set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1); |
2192 | * but doing that would radically increase the odds of a |
2193 | - * speculative access to the posion page because we'd have |
2194 | + * speculative access to the poison page because we'd have |
2195 | * the virtual address of the kernel 1:1 mapping sitting |
2196 | * around in registers. |
2197 | * Instead we get tricky. We create a non-canonical address |
2198 | @@ -1090,7 +1096,6 @@ void arch_unmap_kpfn(unsigned long pfn) |
2199 | |
2200 | if (set_memory_np(decoy_addr, 1)) |
2201 | pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn); |
2202 | - |
2203 | } |
2204 | #endif |
2205 | |
2206 | diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c |
2207 | index f7c55b0e753a..a15db2b4e0d6 100644 |
2208 | --- a/arch/x86/kernel/cpu/microcode/intel.c |
2209 | +++ b/arch/x86/kernel/cpu/microcode/intel.c |
2210 | @@ -921,7 +921,7 @@ static bool is_blacklisted(unsigned int cpu) |
2211 | */ |
2212 | if (c->x86 == 6 && |
2213 | c->x86_model == INTEL_FAM6_BROADWELL_X && |
2214 | - c->x86_mask == 0x01 && |
2215 | + c->x86_stepping == 0x01 && |
2216 | llc_size_per_core > 2621440 && |
2217 | c->microcode < 0x0b000021) { |
2218 | pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); |
2219 | @@ -944,7 +944,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device, |
2220 | return UCODE_NFOUND; |
2221 | |
2222 | sprintf(name, "intel-ucode/%02x-%02x-%02x", |
2223 | - c->x86, c->x86_model, c->x86_mask); |
2224 | + c->x86, c->x86_model, c->x86_stepping); |
2225 | |
2226 | if (request_firmware_direct(&firmware, name, device)) { |
2227 | pr_debug("data file %s load failed\n", name); |
2228 | @@ -982,7 +982,7 @@ static struct microcode_ops microcode_intel_ops = { |
2229 | |
2230 | static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) |
2231 | { |
2232 | - u64 llc_size = c->x86_cache_size * 1024; |
2233 | + u64 llc_size = c->x86_cache_size * 1024ULL; |
2234 | |
2235 | do_div(llc_size, c->x86_max_cores); |
2236 | |
2237 | diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c |
2238 | index fdc55215d44d..e12ee86906c6 100644 |
2239 | --- a/arch/x86/kernel/cpu/mtrr/generic.c |
2240 | +++ b/arch/x86/kernel/cpu/mtrr/generic.c |
2241 | @@ -859,7 +859,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size, |
2242 | */ |
2243 | if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && |
2244 | boot_cpu_data.x86_model == 1 && |
2245 | - boot_cpu_data.x86_mask <= 7) { |
2246 | + boot_cpu_data.x86_stepping <= 7) { |
2247 | if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { |
2248 | pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); |
2249 | return -EINVAL; |
2250 | diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c |
2251 | index 40d5a8a75212..7468de429087 100644 |
2252 | --- a/arch/x86/kernel/cpu/mtrr/main.c |
2253 | +++ b/arch/x86/kernel/cpu/mtrr/main.c |
2254 | @@ -711,8 +711,8 @@ void __init mtrr_bp_init(void) |
2255 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && |
2256 | boot_cpu_data.x86 == 0xF && |
2257 | boot_cpu_data.x86_model == 0x3 && |
2258 | - (boot_cpu_data.x86_mask == 0x3 || |
2259 | - boot_cpu_data.x86_mask == 0x4)) |
2260 | + (boot_cpu_data.x86_stepping == 0x3 || |
2261 | + boot_cpu_data.x86_stepping == 0x4)) |
2262 | phys_addr = 36; |
2263 | |
2264 | size_or_mask = SIZE_OR_MASK_BITS(phys_addr); |
2265 | diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c |
2266 | index e7ecedafa1c8..2c8522a39ed5 100644 |
2267 | --- a/arch/x86/kernel/cpu/proc.c |
2268 | +++ b/arch/x86/kernel/cpu/proc.c |
2269 | @@ -72,8 +72,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) |
2270 | c->x86_model, |
2271 | c->x86_model_id[0] ? c->x86_model_id : "unknown"); |
2272 | |
2273 | - if (c->x86_mask || c->cpuid_level >= 0) |
2274 | - seq_printf(m, "stepping\t: %d\n", c->x86_mask); |
2275 | + if (c->x86_stepping || c->cpuid_level >= 0) |
2276 | + seq_printf(m, "stepping\t: %d\n", c->x86_stepping); |
2277 | else |
2278 | seq_puts(m, "stepping\t: unknown\n"); |
2279 | if (c->microcode) |
2280 | @@ -91,8 +91,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) |
2281 | } |
2282 | |
2283 | /* Cache size */ |
2284 | - if (c->x86_cache_size >= 0) |
2285 | - seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); |
2286 | + if (c->x86_cache_size) |
2287 | + seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size); |
2288 | |
2289 | show_cpuinfo_core(m, c, cpu); |
2290 | show_cpuinfo_misc(m, c); |
2291 | diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c |
2292 | index 1e82f787c160..c87560e1e3ef 100644 |
2293 | --- a/arch/x86/kernel/early-quirks.c |
2294 | +++ b/arch/x86/kernel/early-quirks.c |
2295 | @@ -527,6 +527,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = { |
2296 | INTEL_SKL_IDS(&gen9_early_ops), |
2297 | INTEL_BXT_IDS(&gen9_early_ops), |
2298 | INTEL_KBL_IDS(&gen9_early_ops), |
2299 | + INTEL_CFL_IDS(&gen9_early_ops), |
2300 | INTEL_GLK_IDS(&gen9_early_ops), |
2301 | INTEL_CNL_IDS(&gen9_early_ops), |
2302 | }; |
2303 | diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S |
2304 | index c29020907886..b59e4fb40fd9 100644 |
2305 | --- a/arch/x86/kernel/head_32.S |
2306 | +++ b/arch/x86/kernel/head_32.S |
2307 | @@ -37,7 +37,7 @@ |
2308 | #define X86 new_cpu_data+CPUINFO_x86 |
2309 | #define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor |
2310 | #define X86_MODEL new_cpu_data+CPUINFO_x86_model |
2311 | -#define X86_MASK new_cpu_data+CPUINFO_x86_mask |
2312 | +#define X86_STEPPING new_cpu_data+CPUINFO_x86_stepping |
2313 | #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math |
2314 | #define X86_CPUID new_cpu_data+CPUINFO_cpuid_level |
2315 | #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability |
2316 | @@ -332,7 +332,7 @@ ENTRY(startup_32_smp) |
2317 | shrb $4,%al |
2318 | movb %al,X86_MODEL |
2319 | andb $0x0f,%cl # mask mask revision |
2320 | - movb %cl,X86_MASK |
2321 | + movb %cl,X86_STEPPING |
2322 | movl %edx,X86_CAPABILITY |
2323 | |
2324 | .Lis486: |
2325 | diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c |
2326 | index 3a4b12809ab5..bc6bc6689e68 100644 |
2327 | --- a/arch/x86/kernel/mpparse.c |
2328 | +++ b/arch/x86/kernel/mpparse.c |
2329 | @@ -407,7 +407,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) |
2330 | processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01; |
2331 | processor.cpuflag = CPU_ENABLED; |
2332 | processor.cpufeature = (boot_cpu_data.x86 << 8) | |
2333 | - (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; |
2334 | + (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping; |
2335 | processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX]; |
2336 | processor.reserved[0] = 0; |
2337 | processor.reserved[1] = 0; |
2338 | diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c |
2339 | index 041096bdef86..99dc79e76bdc 100644 |
2340 | --- a/arch/x86/kernel/paravirt.c |
2341 | +++ b/arch/x86/kernel/paravirt.c |
2342 | @@ -200,9 +200,9 @@ static void native_flush_tlb_global(void) |
2343 | __native_flush_tlb_global(); |
2344 | } |
2345 | |
2346 | -static void native_flush_tlb_single(unsigned long addr) |
2347 | +static void native_flush_tlb_one_user(unsigned long addr) |
2348 | { |
2349 | - __native_flush_tlb_single(addr); |
2350 | + __native_flush_tlb_one_user(addr); |
2351 | } |
2352 | |
2353 | struct static_key paravirt_steal_enabled; |
2354 | @@ -401,7 +401,7 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = { |
2355 | |
2356 | .flush_tlb_user = native_flush_tlb, |
2357 | .flush_tlb_kernel = native_flush_tlb_global, |
2358 | - .flush_tlb_single = native_flush_tlb_single, |
2359 | + .flush_tlb_one_user = native_flush_tlb_one_user, |
2360 | .flush_tlb_others = native_flush_tlb_others, |
2361 | |
2362 | .pgd_alloc = __paravirt_pgd_alloc, |
2363 | diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S |
2364 | index 307d3bac5f04..11eda21eb697 100644 |
2365 | --- a/arch/x86/kernel/relocate_kernel_64.S |
2366 | +++ b/arch/x86/kernel/relocate_kernel_64.S |
2367 | @@ -68,6 +68,9 @@ relocate_kernel: |
2368 | movq %cr4, %rax |
2369 | movq %rax, CR4(%r11) |
2370 | |
2371 | + /* Save CR4. Required to enable the right paging mode later. */ |
2372 | + movq %rax, %r13 |
2373 | + |
2374 | /* zero out flags, and disable interrupts */ |
2375 | pushq $0 |
2376 | popfq |
2377 | @@ -126,8 +129,13 @@ identity_mapped: |
2378 | /* |
2379 | * Set cr4 to a known state: |
2380 | * - physical address extension enabled |
2381 | + * - 5-level paging, if it was enabled before |
2382 | */ |
2383 | movl $X86_CR4_PAE, %eax |
2384 | + testq $X86_CR4_LA57, %r13 |
2385 | + jz 1f |
2386 | + orl $X86_CR4_LA57, %eax |
2387 | +1: |
2388 | movq %rax, %cr4 |
2389 | |
2390 | jmp 1f |
2391 | diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c |
2392 | index ed556d50d7ed..844279c3ff4a 100644 |
2393 | --- a/arch/x86/kernel/smpboot.c |
2394 | +++ b/arch/x86/kernel/smpboot.c |
2395 | @@ -1431,7 +1431,6 @@ static void remove_siblinginfo(int cpu) |
2396 | cpumask_clear(cpu_llc_shared_mask(cpu)); |
2397 | cpumask_clear(topology_sibling_cpumask(cpu)); |
2398 | cpumask_clear(topology_core_cpumask(cpu)); |
2399 | - c->phys_proc_id = 0; |
2400 | c->cpu_core_id = 0; |
2401 | cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); |
2402 | recompute_smt_state(); |
2403 | diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c |
2404 | index 446c9ef8cfc3..3d9b2308e7fa 100644 |
2405 | --- a/arch/x86/kernel/traps.c |
2406 | +++ b/arch/x86/kernel/traps.c |
2407 | @@ -181,7 +181,7 @@ int fixup_bug(struct pt_regs *regs, int trapnr) |
2408 | break; |
2409 | |
2410 | case BUG_TRAP_TYPE_WARN: |
2411 | - regs->ip += LEN_UD0; |
2412 | + regs->ip += LEN_UD2; |
2413 | return 1; |
2414 | } |
2415 | |
2416 | diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c |
2417 | index 2b8eb4da4d08..cc83bdcb65d1 100644 |
2418 | --- a/arch/x86/kvm/mmu.c |
2419 | +++ b/arch/x86/kvm/mmu.c |
2420 | @@ -5058,7 +5058,7 @@ void kvm_mmu_uninit_vm(struct kvm *kvm) |
2421 | typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head); |
2422 | |
2423 | /* The caller should hold mmu-lock before calling this function. */ |
2424 | -static bool |
2425 | +static __always_inline bool |
2426 | slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, |
2427 | slot_level_handler fn, int start_level, int end_level, |
2428 | gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb) |
2429 | @@ -5088,7 +5088,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, |
2430 | return flush; |
2431 | } |
2432 | |
2433 | -static bool |
2434 | +static __always_inline bool |
2435 | slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, |
2436 | slot_level_handler fn, int start_level, int end_level, |
2437 | bool lock_flush_tlb) |
2438 | @@ -5099,7 +5099,7 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, |
2439 | lock_flush_tlb); |
2440 | } |
2441 | |
2442 | -static bool |
2443 | +static __always_inline bool |
2444 | slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, |
2445 | slot_level_handler fn, bool lock_flush_tlb) |
2446 | { |
2447 | @@ -5107,7 +5107,7 @@ slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, |
2448 | PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); |
2449 | } |
2450 | |
2451 | -static bool |
2452 | +static __always_inline bool |
2453 | slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, |
2454 | slot_level_handler fn, bool lock_flush_tlb) |
2455 | { |
2456 | @@ -5115,7 +5115,7 @@ slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, |
2457 | PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); |
2458 | } |
2459 | |
2460 | -static bool |
2461 | +static __always_inline bool |
2462 | slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, |
2463 | slot_level_handler fn, bool lock_flush_tlb) |
2464 | { |
2465 | diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
2466 | index 6f623848260f..561d8937fac5 100644 |
2467 | --- a/arch/x86/kvm/vmx.c |
2468 | +++ b/arch/x86/kvm/vmx.c |
2469 | @@ -10131,7 +10131,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, |
2470 | if (cpu_has_vmx_msr_bitmap() && |
2471 | nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS) && |
2472 | nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) |
2473 | - ; |
2474 | + vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, |
2475 | + CPU_BASED_USE_MSR_BITMAPS); |
2476 | else |
2477 | vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, |
2478 | CPU_BASED_USE_MSR_BITMAPS); |
2479 | @@ -10220,8 +10221,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, |
2480 | * updated to reflect this when L1 (or its L2s) actually write to |
2481 | * the MSR. |
2482 | */ |
2483 | - bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD); |
2484 | - bool spec_ctrl = msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL); |
2485 | + bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD); |
2486 | + bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL); |
2487 | |
2488 | if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && |
2489 | !pred_cmd && !spec_ctrl) |
2490 | diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c |
2491 | index d6f848d1211d..2dd1fe13a37b 100644 |
2492 | --- a/arch/x86/lib/cpu.c |
2493 | +++ b/arch/x86/lib/cpu.c |
2494 | @@ -18,7 +18,7 @@ unsigned int x86_model(unsigned int sig) |
2495 | { |
2496 | unsigned int fam, model; |
2497 | |
2498 | - fam = x86_family(sig); |
2499 | + fam = x86_family(sig); |
2500 | |
2501 | model = (sig >> 4) & 0xf; |
2502 | |
2503 | diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c |
2504 | index 4a837289f2ad..60ae1fe3609f 100644 |
2505 | --- a/arch/x86/mm/init_64.c |
2506 | +++ b/arch/x86/mm/init_64.c |
2507 | @@ -256,7 +256,7 @@ static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte) |
2508 | * It's enough to flush this one mapping. |
2509 | * (PGE mappings get flushed as well) |
2510 | */ |
2511 | - __flush_tlb_one(vaddr); |
2512 | + __flush_tlb_one_kernel(vaddr); |
2513 | } |
2514 | |
2515 | void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte) |
2516 | diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c |
2517 | index c45b6ec5357b..e2db83bebc3b 100644 |
2518 | --- a/arch/x86/mm/ioremap.c |
2519 | +++ b/arch/x86/mm/ioremap.c |
2520 | @@ -820,5 +820,5 @@ void __init __early_set_fixmap(enum fixed_addresses idx, |
2521 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); |
2522 | else |
2523 | pte_clear(&init_mm, addr, pte); |
2524 | - __flush_tlb_one(addr); |
2525 | + __flush_tlb_one_kernel(addr); |
2526 | } |
2527 | diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c |
2528 | index 58477ec3d66d..7c8686709636 100644 |
2529 | --- a/arch/x86/mm/kmmio.c |
2530 | +++ b/arch/x86/mm/kmmio.c |
2531 | @@ -168,7 +168,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear) |
2532 | return -1; |
2533 | } |
2534 | |
2535 | - __flush_tlb_one(f->addr); |
2536 | + __flush_tlb_one_kernel(f->addr); |
2537 | return 0; |
2538 | } |
2539 | |
2540 | diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c |
2541 | index c3c5274410a9..9bb7f0ab9fe6 100644 |
2542 | --- a/arch/x86/mm/pgtable_32.c |
2543 | +++ b/arch/x86/mm/pgtable_32.c |
2544 | @@ -63,7 +63,7 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval) |
2545 | * It's enough to flush this one mapping. |
2546 | * (PGE mappings get flushed as well) |
2547 | */ |
2548 | - __flush_tlb_one(vaddr); |
2549 | + __flush_tlb_one_kernel(vaddr); |
2550 | } |
2551 | |
2552 | unsigned long __FIXADDR_TOP = 0xfffff000; |
2553 | diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c |
2554 | index 012d02624848..0c936435ea93 100644 |
2555 | --- a/arch/x86/mm/tlb.c |
2556 | +++ b/arch/x86/mm/tlb.c |
2557 | @@ -492,7 +492,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, |
2558 | * flush that changes context.tlb_gen from 2 to 3. If they get |
2559 | * processed on this CPU in reverse order, we'll see |
2560 | * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL. |
2561 | - * If we were to use __flush_tlb_single() and set local_tlb_gen to |
2562 | + * If we were to use __flush_tlb_one_user() and set local_tlb_gen to |
2563 | * 3, we'd be break the invariant: we'd update local_tlb_gen above |
2564 | * 1 without the full flush that's needed for tlb_gen 2. |
2565 | * |
2566 | @@ -513,7 +513,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, |
2567 | |
2568 | addr = f->start; |
2569 | while (addr < f->end) { |
2570 | - __flush_tlb_single(addr); |
2571 | + __flush_tlb_one_user(addr); |
2572 | addr += PAGE_SIZE; |
2573 | } |
2574 | if (local) |
2575 | @@ -660,7 +660,7 @@ static void do_kernel_range_flush(void *info) |
2576 | |
2577 | /* flush range by one by one 'invlpg' */ |
2578 | for (addr = f->start; addr < f->end; addr += PAGE_SIZE) |
2579 | - __flush_tlb_one(addr); |
2580 | + __flush_tlb_one_kernel(addr); |
2581 | } |
2582 | |
2583 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
2584 | diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c |
2585 | index 8538a6723171..7d5d53f36a7a 100644 |
2586 | --- a/arch/x86/platform/uv/tlb_uv.c |
2587 | +++ b/arch/x86/platform/uv/tlb_uv.c |
2588 | @@ -299,7 +299,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp, |
2589 | local_flush_tlb(); |
2590 | stat->d_alltlb++; |
2591 | } else { |
2592 | - __flush_tlb_single(msg->address); |
2593 | + __flush_tlb_one_user(msg->address); |
2594 | stat->d_onetlb++; |
2595 | } |
2596 | stat->d_requestee++; |
2597 | diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c |
2598 | index d85076223a69..aae88fec9941 100644 |
2599 | --- a/arch/x86/xen/mmu_pv.c |
2600 | +++ b/arch/x86/xen/mmu_pv.c |
2601 | @@ -1300,12 +1300,12 @@ static void xen_flush_tlb(void) |
2602 | preempt_enable(); |
2603 | } |
2604 | |
2605 | -static void xen_flush_tlb_single(unsigned long addr) |
2606 | +static void xen_flush_tlb_one_user(unsigned long addr) |
2607 | { |
2608 | struct mmuext_op *op; |
2609 | struct multicall_space mcs; |
2610 | |
2611 | - trace_xen_mmu_flush_tlb_single(addr); |
2612 | + trace_xen_mmu_flush_tlb_one_user(addr); |
2613 | |
2614 | preempt_disable(); |
2615 | |
2616 | @@ -2370,7 +2370,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { |
2617 | |
2618 | .flush_tlb_user = xen_flush_tlb, |
2619 | .flush_tlb_kernel = xen_flush_tlb, |
2620 | - .flush_tlb_single = xen_flush_tlb_single, |
2621 | + .flush_tlb_one_user = xen_flush_tlb_one_user, |
2622 | .flush_tlb_others = xen_flush_tlb_others, |
2623 | |
2624 | .pgd_alloc = xen_pgd_alloc, |
2625 | diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c |
2626 | index 13b4f19b9131..159a897151d6 100644 |
2627 | --- a/arch/x86/xen/p2m.c |
2628 | +++ b/arch/x86/xen/p2m.c |
2629 | @@ -694,6 +694,9 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, |
2630 | int i, ret = 0; |
2631 | pte_t *pte; |
2632 | |
2633 | + if (xen_feature(XENFEAT_auto_translated_physmap)) |
2634 | + return 0; |
2635 | + |
2636 | if (kmap_ops) { |
2637 | ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, |
2638 | kmap_ops, count); |
2639 | @@ -736,6 +739,9 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, |
2640 | { |
2641 | int i, ret = 0; |
2642 | |
2643 | + if (xen_feature(XENFEAT_auto_translated_physmap)) |
2644 | + return 0; |
2645 | + |
2646 | for (i = 0; i < count; i++) { |
2647 | unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); |
2648 | unsigned long pfn = page_to_pfn(pages[i]); |
2649 | diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S |
2650 | index 497cc55a0c16..96f26e026783 100644 |
2651 | --- a/arch/x86/xen/xen-head.S |
2652 | +++ b/arch/x86/xen/xen-head.S |
2653 | @@ -9,7 +9,9 @@ |
2654 | |
2655 | #include <asm/boot.h> |
2656 | #include <asm/asm.h> |
2657 | +#include <asm/msr.h> |
2658 | #include <asm/page_types.h> |
2659 | +#include <asm/percpu.h> |
2660 | #include <asm/unwind_hints.h> |
2661 | |
2662 | #include <xen/interface/elfnote.h> |
2663 | @@ -35,6 +37,20 @@ ENTRY(startup_xen) |
2664 | mov %_ASM_SI, xen_start_info |
2665 | mov $init_thread_union+THREAD_SIZE, %_ASM_SP |
2666 | |
2667 | +#ifdef CONFIG_X86_64 |
2668 | + /* Set up %gs. |
2669 | + * |
2670 | + * The base of %gs always points to the bottom of the irqstack |
2671 | + * union. If the stack protector canary is enabled, it is |
2672 | + * located at %gs:40. Note that, on SMP, the boot cpu uses |
2673 | + * init data section till per cpu areas are set up. |
2674 | + */ |
2675 | + movl $MSR_GS_BASE,%ecx |
2676 | + movq $INIT_PER_CPU_VAR(irq_stack_union),%rax |
2677 | + cdq |
2678 | + wrmsr |
2679 | +#endif |
2680 | + |
2681 | jmp xen_start_kernel |
2682 | END(startup_xen) |
2683 | __FINIT |
2684 | diff --git a/block/blk-wbt.c b/block/blk-wbt.c |
2685 | index ae8de9780085..f92fc84b5e2c 100644 |
2686 | --- a/block/blk-wbt.c |
2687 | +++ b/block/blk-wbt.c |
2688 | @@ -697,7 +697,15 @@ u64 wbt_default_latency_nsec(struct request_queue *q) |
2689 | |
2690 | static int wbt_data_dir(const struct request *rq) |
2691 | { |
2692 | - return rq_data_dir(rq); |
2693 | + const int op = req_op(rq); |
2694 | + |
2695 | + if (op == REQ_OP_READ) |
2696 | + return READ; |
2697 | + else if (op == REQ_OP_WRITE || op == REQ_OP_FLUSH) |
2698 | + return WRITE; |
2699 | + |
2700 | + /* don't account */ |
2701 | + return -1; |
2702 | } |
2703 | |
2704 | int wbt_init(struct request_queue *q) |
2705 | diff --git a/drivers/base/core.c b/drivers/base/core.c |
2706 | index 110230d86527..6835736daf2d 100644 |
2707 | --- a/drivers/base/core.c |
2708 | +++ b/drivers/base/core.c |
2709 | @@ -313,6 +313,9 @@ static void __device_link_del(struct device_link *link) |
2710 | dev_info(link->consumer, "Dropping the link to %s\n", |
2711 | dev_name(link->supplier)); |
2712 | |
2713 | + if (link->flags & DL_FLAG_PM_RUNTIME) |
2714 | + pm_runtime_drop_link(link->consumer); |
2715 | + |
2716 | list_del(&link->s_node); |
2717 | list_del(&link->c_node); |
2718 | device_link_free(link); |
2719 | diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c |
2720 | index cc93522a6d41..1bbf14338bdb 100644 |
2721 | --- a/drivers/block/rbd.c |
2722 | +++ b/drivers/block/rbd.c |
2723 | @@ -124,11 +124,13 @@ static int atomic_dec_return_safe(atomic_t *v) |
2724 | #define RBD_FEATURE_STRIPINGV2 (1ULL<<1) |
2725 | #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2) |
2726 | #define RBD_FEATURE_DATA_POOL (1ULL<<7) |
2727 | +#define RBD_FEATURE_OPERATIONS (1ULL<<8) |
2728 | |
2729 | #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \ |
2730 | RBD_FEATURE_STRIPINGV2 | \ |
2731 | RBD_FEATURE_EXCLUSIVE_LOCK | \ |
2732 | - RBD_FEATURE_DATA_POOL) |
2733 | + RBD_FEATURE_DATA_POOL | \ |
2734 | + RBD_FEATURE_OPERATIONS) |
2735 | |
2736 | /* Features supported by this (client software) implementation. */ |
2737 | |
2738 | diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c |
2739 | index d1f5bb534e0e..6e9df558325b 100644 |
2740 | --- a/drivers/char/hw_random/via-rng.c |
2741 | +++ b/drivers/char/hw_random/via-rng.c |
2742 | @@ -162,7 +162,7 @@ static int via_rng_init(struct hwrng *rng) |
2743 | /* Enable secondary noise source on CPUs where it is present. */ |
2744 | |
2745 | /* Nehemiah stepping 8 and higher */ |
2746 | - if ((c->x86_model == 9) && (c->x86_mask > 7)) |
2747 | + if ((c->x86_model == 9) && (c->x86_stepping > 7)) |
2748 | lo |= VIA_NOISESRC2; |
2749 | |
2750 | /* Esther */ |
2751 | diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c |
2752 | index 3a2ca0f79daf..d0c34df0529c 100644 |
2753 | --- a/drivers/cpufreq/acpi-cpufreq.c |
2754 | +++ b/drivers/cpufreq/acpi-cpufreq.c |
2755 | @@ -629,7 +629,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) |
2756 | if (c->x86_vendor == X86_VENDOR_INTEL) { |
2757 | if ((c->x86 == 15) && |
2758 | (c->x86_model == 6) && |
2759 | - (c->x86_mask == 8)) { |
2760 | + (c->x86_stepping == 8)) { |
2761 | pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n"); |
2762 | return -ENODEV; |
2763 | } |
2764 | diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c |
2765 | index c46a12df40dd..d5e27bc7585a 100644 |
2766 | --- a/drivers/cpufreq/longhaul.c |
2767 | +++ b/drivers/cpufreq/longhaul.c |
2768 | @@ -775,7 +775,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) |
2769 | break; |
2770 | |
2771 | case 7: |
2772 | - switch (c->x86_mask) { |
2773 | + switch (c->x86_stepping) { |
2774 | case 0: |
2775 | longhaul_version = TYPE_LONGHAUL_V1; |
2776 | cpu_model = CPU_SAMUEL2; |
2777 | @@ -787,7 +787,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) |
2778 | break; |
2779 | case 1 ... 15: |
2780 | longhaul_version = TYPE_LONGHAUL_V2; |
2781 | - if (c->x86_mask < 8) { |
2782 | + if (c->x86_stepping < 8) { |
2783 | cpu_model = CPU_SAMUEL2; |
2784 | cpuname = "C3 'Samuel 2' [C5B]"; |
2785 | } else { |
2786 | @@ -814,7 +814,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) |
2787 | numscales = 32; |
2788 | memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults)); |
2789 | memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr)); |
2790 | - switch (c->x86_mask) { |
2791 | + switch (c->x86_stepping) { |
2792 | case 0 ... 1: |
2793 | cpu_model = CPU_NEHEMIAH; |
2794 | cpuname = "C3 'Nehemiah A' [C5XLOE]"; |
2795 | diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c |
2796 | index fd77812313f3..a25741b1281b 100644 |
2797 | --- a/drivers/cpufreq/p4-clockmod.c |
2798 | +++ b/drivers/cpufreq/p4-clockmod.c |
2799 | @@ -168,7 +168,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) |
2800 | #endif |
2801 | |
2802 | /* Errata workaround */ |
2803 | - cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask; |
2804 | + cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping; |
2805 | switch (cpuid) { |
2806 | case 0x0f07: |
2807 | case 0x0f0a: |
2808 | diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c |
2809 | index 80ac313e6c59..302e9ce793a0 100644 |
2810 | --- a/drivers/cpufreq/powernow-k7.c |
2811 | +++ b/drivers/cpufreq/powernow-k7.c |
2812 | @@ -131,7 +131,7 @@ static int check_powernow(void) |
2813 | return 0; |
2814 | } |
2815 | |
2816 | - if ((c->x86_model == 6) && (c->x86_mask == 0)) { |
2817 | + if ((c->x86_model == 6) && (c->x86_stepping == 0)) { |
2818 | pr_info("K7 660[A0] core detected, enabling errata workarounds\n"); |
2819 | have_a0 = 1; |
2820 | } |
2821 | diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c |
2822 | index b6d7c4c98d0a..da7fdb4b661a 100644 |
2823 | --- a/drivers/cpufreq/powernv-cpufreq.c |
2824 | +++ b/drivers/cpufreq/powernv-cpufreq.c |
2825 | @@ -288,9 +288,9 @@ static int init_powernv_pstates(void) |
2826 | |
2827 | if (id == pstate_max) |
2828 | powernv_pstate_info.max = i; |
2829 | - else if (id == pstate_nominal) |
2830 | + if (id == pstate_nominal) |
2831 | powernv_pstate_info.nominal = i; |
2832 | - else if (id == pstate_min) |
2833 | + if (id == pstate_min) |
2834 | powernv_pstate_info.min = i; |
2835 | |
2836 | if (powernv_pstate_info.wof_enabled && id == pstate_turbo) { |
2837 | diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c |
2838 | index 41bc5397f4bb..4fa5adf16c70 100644 |
2839 | --- a/drivers/cpufreq/speedstep-centrino.c |
2840 | +++ b/drivers/cpufreq/speedstep-centrino.c |
2841 | @@ -37,7 +37,7 @@ struct cpu_id |
2842 | { |
2843 | __u8 x86; /* CPU family */ |
2844 | __u8 x86_model; /* model */ |
2845 | - __u8 x86_mask; /* stepping */ |
2846 | + __u8 x86_stepping; /* stepping */ |
2847 | }; |
2848 | |
2849 | enum { |
2850 | @@ -277,7 +277,7 @@ static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, |
2851 | { |
2852 | if ((c->x86 == x->x86) && |
2853 | (c->x86_model == x->x86_model) && |
2854 | - (c->x86_mask == x->x86_mask)) |
2855 | + (c->x86_stepping == x->x86_stepping)) |
2856 | return 1; |
2857 | return 0; |
2858 | } |
2859 | diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c |
2860 | index 8085ec9000d1..e3a9962ee410 100644 |
2861 | --- a/drivers/cpufreq/speedstep-lib.c |
2862 | +++ b/drivers/cpufreq/speedstep-lib.c |
2863 | @@ -272,9 +272,9 @@ unsigned int speedstep_detect_processor(void) |
2864 | ebx = cpuid_ebx(0x00000001); |
2865 | ebx &= 0x000000FF; |
2866 | |
2867 | - pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); |
2868 | + pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping); |
2869 | |
2870 | - switch (c->x86_mask) { |
2871 | + switch (c->x86_stepping) { |
2872 | case 4: |
2873 | /* |
2874 | * B-stepping [M-P4-M] |
2875 | @@ -361,7 +361,7 @@ unsigned int speedstep_detect_processor(void) |
2876 | msr_lo, msr_hi); |
2877 | if ((msr_hi & (1<<18)) && |
2878 | (relaxed_check ? 1 : (msr_hi & (3<<24)))) { |
2879 | - if (c->x86_mask == 0x01) { |
2880 | + if (c->x86_stepping == 0x01) { |
2881 | pr_debug("early PIII version\n"); |
2882 | return SPEEDSTEP_CPU_PIII_C_EARLY; |
2883 | } else |
2884 | diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c |
2885 | index 4b6642a25df5..1c6cbda56afe 100644 |
2886 | --- a/drivers/crypto/padlock-aes.c |
2887 | +++ b/drivers/crypto/padlock-aes.c |
2888 | @@ -512,7 +512,7 @@ static int __init padlock_init(void) |
2889 | |
2890 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); |
2891 | |
2892 | - if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) { |
2893 | + if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) { |
2894 | ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS; |
2895 | cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS; |
2896 | printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); |
2897 | diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c |
2898 | index 0d01d1624252..63d636424161 100644 |
2899 | --- a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c |
2900 | +++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c |
2901 | @@ -28,7 +28,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, |
2902 | algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); |
2903 | ss = algt->ss; |
2904 | |
2905 | - spin_lock(&ss->slock); |
2906 | + spin_lock_bh(&ss->slock); |
2907 | |
2908 | writel(mode, ss->base + SS_CTL); |
2909 | |
2910 | @@ -51,6 +51,6 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, |
2911 | } |
2912 | |
2913 | writel(0, ss->base + SS_CTL); |
2914 | - spin_unlock(&ss->slock); |
2915 | - return dlen; |
2916 | + spin_unlock_bh(&ss->slock); |
2917 | + return 0; |
2918 | } |
2919 | diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c |
2920 | index 78fb496ecb4e..99c4021fc33b 100644 |
2921 | --- a/drivers/devfreq/devfreq.c |
2922 | +++ b/drivers/devfreq/devfreq.c |
2923 | @@ -737,7 +737,7 @@ struct devfreq *devm_devfreq_add_device(struct device *dev, |
2924 | devfreq = devfreq_add_device(dev, profile, governor_name, data); |
2925 | if (IS_ERR(devfreq)) { |
2926 | devres_free(ptr); |
2927 | - return ERR_PTR(-ENOMEM); |
2928 | + return devfreq; |
2929 | } |
2930 | |
2931 | *ptr = devfreq; |
2932 | diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c |
2933 | index b44d9d7db347..012fa3d1f407 100644 |
2934 | --- a/drivers/dma-buf/reservation.c |
2935 | +++ b/drivers/dma-buf/reservation.c |
2936 | @@ -455,13 +455,15 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, |
2937 | unsigned long timeout) |
2938 | { |
2939 | struct dma_fence *fence; |
2940 | - unsigned seq, shared_count, i = 0; |
2941 | + unsigned seq, shared_count; |
2942 | long ret = timeout ? timeout : 1; |
2943 | + int i; |
2944 | |
2945 | retry: |
2946 | shared_count = 0; |
2947 | seq = read_seqcount_begin(&obj->seq); |
2948 | rcu_read_lock(); |
2949 | + i = -1; |
2950 | |
2951 | fence = rcu_dereference(obj->fence_excl); |
2952 | if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { |
2953 | @@ -477,14 +479,14 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, |
2954 | fence = NULL; |
2955 | } |
2956 | |
2957 | - if (!fence && wait_all) { |
2958 | + if (wait_all) { |
2959 | struct reservation_object_list *fobj = |
2960 | rcu_dereference(obj->fence); |
2961 | |
2962 | if (fobj) |
2963 | shared_count = fobj->shared_count; |
2964 | |
2965 | - for (i = 0; i < shared_count; ++i) { |
2966 | + for (i = 0; !fence && i < shared_count; ++i) { |
2967 | struct dma_fence *lfence = rcu_dereference(fobj->shared[i]); |
2968 | |
2969 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, |
2970 | diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c |
2971 | index 8b16ec595fa7..329cb96f886f 100644 |
2972 | --- a/drivers/edac/amd64_edac.c |
2973 | +++ b/drivers/edac/amd64_edac.c |
2974 | @@ -3147,7 +3147,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) |
2975 | struct amd64_family_type *fam_type = NULL; |
2976 | |
2977 | pvt->ext_model = boot_cpu_data.x86_model >> 4; |
2978 | - pvt->stepping = boot_cpu_data.x86_mask; |
2979 | + pvt->stepping = boot_cpu_data.x86_stepping; |
2980 | pvt->model = boot_cpu_data.x86_model; |
2981 | pvt->fam = boot_cpu_data.x86; |
2982 | |
2983 | diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h |
2984 | index 58888400f1b8..caebdbebdcd8 100644 |
2985 | --- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h |
2986 | +++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h |
2987 | @@ -40,7 +40,7 @@ struct smu_table_entry { |
2988 | uint32_t table_addr_high; |
2989 | uint32_t table_addr_low; |
2990 | uint8_t *table; |
2991 | - uint32_t handle; |
2992 | + unsigned long handle; |
2993 | }; |
2994 | |
2995 | struct smu_table_array { |
2996 | diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c |
2997 | index 9555a3542022..831b73392d82 100644 |
2998 | --- a/drivers/gpu/drm/ast/ast_mode.c |
2999 | +++ b/drivers/gpu/drm/ast/ast_mode.c |
3000 | @@ -644,6 +644,7 @@ static void ast_crtc_commit(struct drm_crtc *crtc) |
3001 | { |
3002 | struct ast_private *ast = crtc->dev->dev_private; |
3003 | ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0); |
3004 | + ast_crtc_load_lut(crtc); |
3005 | } |
3006 | |
3007 | |
3008 | diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c |
3009 | index aad468d170a7..d9c0f7573905 100644 |
3010 | --- a/drivers/gpu/drm/drm_auth.c |
3011 | +++ b/drivers/gpu/drm/drm_auth.c |
3012 | @@ -230,6 +230,12 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data, |
3013 | if (!dev->master) |
3014 | goto out_unlock; |
3015 | |
3016 | + if (file_priv->master->lessor != NULL) { |
3017 | + DRM_DEBUG_LEASE("Attempt to drop lessee %d as master\n", file_priv->master->lessee_id); |
3018 | + ret = -EINVAL; |
3019 | + goto out_unlock; |
3020 | + } |
3021 | + |
3022 | ret = 0; |
3023 | drm_drop_master(dev, file_priv); |
3024 | out_unlock: |
3025 | diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c |
3026 | index 4756b3c9bf2c..9a9214ae0fb5 100644 |
3027 | --- a/drivers/gpu/drm/qxl/qxl_display.c |
3028 | +++ b/drivers/gpu/drm/qxl/qxl_display.c |
3029 | @@ -289,6 +289,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc) |
3030 | { |
3031 | struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc); |
3032 | |
3033 | + qxl_bo_unref(&qxl_crtc->cursor_bo); |
3034 | drm_crtc_cleanup(crtc); |
3035 | kfree(qxl_crtc); |
3036 | } |
3037 | @@ -495,6 +496,53 @@ static int qxl_primary_atomic_check(struct drm_plane *plane, |
3038 | return 0; |
3039 | } |
3040 | |
3041 | +static int qxl_primary_apply_cursor(struct drm_plane *plane) |
3042 | +{ |
3043 | + struct drm_device *dev = plane->dev; |
3044 | + struct qxl_device *qdev = dev->dev_private; |
3045 | + struct drm_framebuffer *fb = plane->state->fb; |
3046 | + struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc); |
3047 | + struct qxl_cursor_cmd *cmd; |
3048 | + struct qxl_release *release; |
3049 | + int ret = 0; |
3050 | + |
3051 | + if (!qcrtc->cursor_bo) |
3052 | + return 0; |
3053 | + |
3054 | + ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), |
3055 | + QXL_RELEASE_CURSOR_CMD, |
3056 | + &release, NULL); |
3057 | + if (ret) |
3058 | + return ret; |
3059 | + |
3060 | + ret = qxl_release_list_add(release, qcrtc->cursor_bo); |
3061 | + if (ret) |
3062 | + goto out_free_release; |
3063 | + |
3064 | + ret = qxl_release_reserve_list(release, false); |
3065 | + if (ret) |
3066 | + goto out_free_release; |
3067 | + |
3068 | + cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); |
3069 | + cmd->type = QXL_CURSOR_SET; |
3070 | + cmd->u.set.position.x = plane->state->crtc_x + fb->hot_x; |
3071 | + cmd->u.set.position.y = plane->state->crtc_y + fb->hot_y; |
3072 | + |
3073 | + cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0); |
3074 | + |
3075 | + cmd->u.set.visible = 1; |
3076 | + qxl_release_unmap(qdev, release, &cmd->release_info); |
3077 | + |
3078 | + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); |
3079 | + qxl_release_fence_buffer_objects(release); |
3080 | + |
3081 | + return ret; |
3082 | + |
3083 | +out_free_release: |
3084 | + qxl_release_free(qdev, release); |
3085 | + return ret; |
3086 | +} |
3087 | + |
3088 | static void qxl_primary_atomic_update(struct drm_plane *plane, |
3089 | struct drm_plane_state *old_state) |
3090 | { |
3091 | @@ -510,6 +558,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, |
3092 | .x2 = qfb->base.width, |
3093 | .y2 = qfb->base.height |
3094 | }; |
3095 | + int ret; |
3096 | bool same_shadow = false; |
3097 | |
3098 | if (old_state->fb) { |
3099 | @@ -531,6 +580,11 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, |
3100 | if (!same_shadow) |
3101 | qxl_io_destroy_primary(qdev); |
3102 | bo_old->is_primary = false; |
3103 | + |
3104 | + ret = qxl_primary_apply_cursor(plane); |
3105 | + if (ret) |
3106 | + DRM_ERROR( |
3107 | + "could not set cursor after creating primary"); |
3108 | } |
3109 | |
3110 | if (!bo->is_primary) { |
3111 | @@ -571,11 +625,12 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, |
3112 | struct drm_device *dev = plane->dev; |
3113 | struct qxl_device *qdev = dev->dev_private; |
3114 | struct drm_framebuffer *fb = plane->state->fb; |
3115 | + struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc); |
3116 | struct qxl_release *release; |
3117 | struct qxl_cursor_cmd *cmd; |
3118 | struct qxl_cursor *cursor; |
3119 | struct drm_gem_object *obj; |
3120 | - struct qxl_bo *cursor_bo, *user_bo = NULL; |
3121 | + struct qxl_bo *cursor_bo = NULL, *user_bo = NULL; |
3122 | int ret; |
3123 | void *user_ptr; |
3124 | int size = 64*64*4; |
3125 | @@ -628,6 +683,10 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, |
3126 | cmd->u.set.shape = qxl_bo_physical_address(qdev, |
3127 | cursor_bo, 0); |
3128 | cmd->type = QXL_CURSOR_SET; |
3129 | + |
3130 | + qxl_bo_unref(&qcrtc->cursor_bo); |
3131 | + qcrtc->cursor_bo = cursor_bo; |
3132 | + cursor_bo = NULL; |
3133 | } else { |
3134 | |
3135 | ret = qxl_release_reserve_list(release, true); |
3136 | @@ -645,6 +704,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, |
3137 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); |
3138 | qxl_release_fence_buffer_objects(release); |
3139 | |
3140 | + qxl_bo_unref(&cursor_bo); |
3141 | + |
3142 | return; |
3143 | |
3144 | out_backoff: |
3145 | diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h |
3146 | index 08752c0ffb35..00a1a66b052a 100644 |
3147 | --- a/drivers/gpu/drm/qxl/qxl_drv.h |
3148 | +++ b/drivers/gpu/drm/qxl/qxl_drv.h |
3149 | @@ -111,6 +111,8 @@ struct qxl_bo_list { |
3150 | struct qxl_crtc { |
3151 | struct drm_crtc base; |
3152 | int index; |
3153 | + |
3154 | + struct qxl_bo *cursor_bo; |
3155 | }; |
3156 | |
3157 | struct qxl_output { |
3158 | diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c |
3159 | index d34d1cf33895..95f4db70dd22 100644 |
3160 | --- a/drivers/gpu/drm/radeon/radeon_uvd.c |
3161 | +++ b/drivers/gpu/drm/radeon/radeon_uvd.c |
3162 | @@ -995,7 +995,7 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev, |
3163 | /* calc dclk divider with current vco freq */ |
3164 | dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk, |
3165 | pd_min, pd_even); |
3166 | - if (vclk_div > pd_max) |
3167 | + if (dclk_div > pd_max) |
3168 | break; /* vco is too big, it has to stop */ |
3169 | |
3170 | /* calc score with current vco freq */ |
3171 | diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c |
3172 | index ee3e74266a13..97a0a639dad9 100644 |
3173 | --- a/drivers/gpu/drm/radeon/si_dpm.c |
3174 | +++ b/drivers/gpu/drm/radeon/si_dpm.c |
3175 | @@ -2984,6 +2984,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, |
3176 | (rdev->pdev->device == 0x6667)) { |
3177 | max_sclk = 75000; |
3178 | } |
3179 | + if ((rdev->pdev->revision == 0xC3) || |
3180 | + (rdev->pdev->device == 0x6665)) { |
3181 | + max_sclk = 60000; |
3182 | + max_mclk = 80000; |
3183 | + } |
3184 | } else if (rdev->family == CHIP_OLAND) { |
3185 | if ((rdev->pdev->revision == 0xC7) || |
3186 | (rdev->pdev->revision == 0x80) || |
3187 | diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c |
3188 | index c088703777e2..68eed684dff5 100644 |
3189 | --- a/drivers/gpu/drm/ttm/ttm_bo.c |
3190 | +++ b/drivers/gpu/drm/ttm/ttm_bo.c |
3191 | @@ -175,7 +175,8 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) |
3192 | list_add_tail(&bo->lru, &man->lru[bo->priority]); |
3193 | kref_get(&bo->list_kref); |
3194 | |
3195 | - if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { |
3196 | + if (bo->ttm && !(bo->ttm->page_flags & |
3197 | + (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) { |
3198 | list_add_tail(&bo->swap, |
3199 | &bo->glob->swap_lru[bo->priority]); |
3200 | kref_get(&bo->list_kref); |
3201 | diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c |
3202 | index c8ebb757e36b..b17d0d38f290 100644 |
3203 | --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c |
3204 | +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c |
3205 | @@ -299,7 +299,7 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma) |
3206 | |
3207 | static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, |
3208 | unsigned long offset, |
3209 | - void *buf, int len, int write) |
3210 | + uint8_t *buf, int len, int write) |
3211 | { |
3212 | unsigned long page = offset >> PAGE_SHIFT; |
3213 | unsigned long bytes_left = len; |
3214 | @@ -328,6 +328,7 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, |
3215 | ttm_bo_kunmap(&map); |
3216 | |
3217 | page++; |
3218 | + buf += bytes; |
3219 | bytes_left -= bytes; |
3220 | offset = 0; |
3221 | } while (bytes_left); |
3222 | diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c |
3223 | index c13a4fd86b3c..a42744c7665b 100644 |
3224 | --- a/drivers/hwmon/coretemp.c |
3225 | +++ b/drivers/hwmon/coretemp.c |
3226 | @@ -268,13 +268,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) |
3227 | for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) { |
3228 | const struct tjmax_model *tm = &tjmax_model_table[i]; |
3229 | if (c->x86_model == tm->model && |
3230 | - (tm->mask == ANY || c->x86_mask == tm->mask)) |
3231 | + (tm->mask == ANY || c->x86_stepping == tm->mask)) |
3232 | return tm->tjmax; |
3233 | } |
3234 | |
3235 | /* Early chips have no MSR for TjMax */ |
3236 | |
3237 | - if (c->x86_model == 0xf && c->x86_mask < 4) |
3238 | + if (c->x86_model == 0xf && c->x86_stepping < 4) |
3239 | usemsr_ee = 0; |
3240 | |
3241 | if (c->x86_model > 0xe && usemsr_ee) { |
3242 | @@ -425,7 +425,7 @@ static int chk_ucode_version(unsigned int cpu) |
3243 | * Readings might stop update when processor visited too deep sleep, |
3244 | * fixed for stepping D0 (6EC). |
3245 | */ |
3246 | - if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) { |
3247 | + if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) { |
3248 | pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n"); |
3249 | return -ENODEV; |
3250 | } |
3251 | diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c |
3252 | index ef91b8a67549..84e91286fc4f 100644 |
3253 | --- a/drivers/hwmon/hwmon-vid.c |
3254 | +++ b/drivers/hwmon/hwmon-vid.c |
3255 | @@ -293,7 +293,7 @@ u8 vid_which_vrm(void) |
3256 | if (c->x86 < 6) /* Any CPU with family lower than 6 */ |
3257 | return 0; /* doesn't have VID */ |
3258 | |
3259 | - vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_mask, c->x86_vendor); |
3260 | + vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_stepping, c->x86_vendor); |
3261 | if (vrm_ret == 134) |
3262 | vrm_ret = get_via_model_d_vrm(); |
3263 | if (vrm_ret == 0) |
3264 | diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c |
3265 | index 0721e175664a..b960015cb073 100644 |
3266 | --- a/drivers/hwmon/k10temp.c |
3267 | +++ b/drivers/hwmon/k10temp.c |
3268 | @@ -226,7 +226,7 @@ static bool has_erratum_319(struct pci_dev *pdev) |
3269 | * and AM3 formats, but that's the best we can do. |
3270 | */ |
3271 | return boot_cpu_data.x86_model < 4 || |
3272 | - (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2); |
3273 | + (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2); |
3274 | } |
3275 | |
3276 | static int k10temp_probe(struct pci_dev *pdev, |
3277 | diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c |
3278 | index 5a632bcf869b..e59f9113fb93 100644 |
3279 | --- a/drivers/hwmon/k8temp.c |
3280 | +++ b/drivers/hwmon/k8temp.c |
3281 | @@ -187,7 +187,7 @@ static int k8temp_probe(struct pci_dev *pdev, |
3282 | return -ENOMEM; |
3283 | |
3284 | model = boot_cpu_data.x86_model; |
3285 | - stepping = boot_cpu_data.x86_mask; |
3286 | + stepping = boot_cpu_data.x86_stepping; |
3287 | |
3288 | /* feature available since SH-C0, exclude older revisions */ |
3289 | if ((model == 4 && stepping == 0) || |
3290 | diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c |
3291 | index 465520627e4b..d7d042a20ab4 100644 |
3292 | --- a/drivers/infiniband/core/device.c |
3293 | +++ b/drivers/infiniband/core/device.c |
3294 | @@ -462,7 +462,6 @@ int ib_register_device(struct ib_device *device, |
3295 | struct ib_udata uhw = {.outlen = 0, .inlen = 0}; |
3296 | struct device *parent = device->dev.parent; |
3297 | |
3298 | - WARN_ON_ONCE(!parent); |
3299 | WARN_ON_ONCE(device->dma_device); |
3300 | if (device->dev.dma_ops) { |
3301 | /* |
3302 | @@ -471,16 +470,25 @@ int ib_register_device(struct ib_device *device, |
3303 | * into device->dev. |
3304 | */ |
3305 | device->dma_device = &device->dev; |
3306 | - if (!device->dev.dma_mask) |
3307 | - device->dev.dma_mask = parent->dma_mask; |
3308 | - if (!device->dev.coherent_dma_mask) |
3309 | - device->dev.coherent_dma_mask = |
3310 | - parent->coherent_dma_mask; |
3311 | + if (!device->dev.dma_mask) { |
3312 | + if (parent) |
3313 | + device->dev.dma_mask = parent->dma_mask; |
3314 | + else |
3315 | + WARN_ON_ONCE(true); |
3316 | + } |
3317 | + if (!device->dev.coherent_dma_mask) { |
3318 | + if (parent) |
3319 | + device->dev.coherent_dma_mask = |
3320 | + parent->coherent_dma_mask; |
3321 | + else |
3322 | + WARN_ON_ONCE(true); |
3323 | + } |
3324 | } else { |
3325 | /* |
3326 | * The caller did not provide custom DMA operations. Use the |
3327 | * DMA mapping operations of the parent device. |
3328 | */ |
3329 | + WARN_ON_ONCE(!parent); |
3330 | device->dma_device = parent; |
3331 | } |
3332 | |
3333 | diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c |
3334 | index e30d86fa1855..8ae1308eecc7 100644 |
3335 | --- a/drivers/infiniband/core/sysfs.c |
3336 | +++ b/drivers/infiniband/core/sysfs.c |
3337 | @@ -1276,7 +1276,6 @@ int ib_device_register_sysfs(struct ib_device *device, |
3338 | int ret; |
3339 | int i; |
3340 | |
3341 | - WARN_ON_ONCE(!device->dev.parent); |
3342 | ret = dev_set_name(class_dev, "%s", device->name); |
3343 | if (ret) |
3344 | return ret; |
3345 | diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c |
3346 | index 4b64dd02e090..3205800f9579 100644 |
3347 | --- a/drivers/infiniband/core/user_mad.c |
3348 | +++ b/drivers/infiniband/core/user_mad.c |
3349 | @@ -500,7 +500,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, |
3350 | } |
3351 | |
3352 | memset(&ah_attr, 0, sizeof ah_attr); |
3353 | - ah_attr.type = rdma_ah_find_type(file->port->ib_dev, |
3354 | + ah_attr.type = rdma_ah_find_type(agent->device, |
3355 | file->port->port_num); |
3356 | rdma_ah_set_dlid(&ah_attr, be16_to_cpu(packet->mad.hdr.lid)); |
3357 | rdma_ah_set_sl(&ah_attr, packet->mad.hdr.sl); |
3358 | diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c |
3359 | index c3ee5d9b336d..cca70d36ee15 100644 |
3360 | --- a/drivers/infiniband/core/uverbs_std_types.c |
3361 | +++ b/drivers/infiniband/core/uverbs_std_types.c |
3362 | @@ -315,7 +315,7 @@ static int uverbs_create_cq_handler(struct ib_device *ib_dev, |
3363 | cq->uobject = &obj->uobject; |
3364 | cq->comp_handler = ib_uverbs_comp_handler; |
3365 | cq->event_handler = ib_uverbs_cq_event_handler; |
3366 | - cq->cq_context = &ev_file->ev_queue; |
3367 | + cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; |
3368 | obj->uobject.object = cq; |
3369 | obj->uobject.user_handle = user_handle; |
3370 | atomic_set(&cq->usecnt, 0); |
3371 | diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c |
3372 | index 8c8a16791a3f..5caf37ba7fff 100644 |
3373 | --- a/drivers/infiniband/hw/mlx4/main.c |
3374 | +++ b/drivers/infiniband/hw/mlx4/main.c |
3375 | @@ -2995,9 +2995,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) |
3376 | kfree(ibdev->ib_uc_qpns_bitmap); |
3377 | |
3378 | err_steer_qp_release: |
3379 | - if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) |
3380 | - mlx4_qp_release_range(dev, ibdev->steer_qpn_base, |
3381 | - ibdev->steer_qpn_count); |
3382 | + mlx4_qp_release_range(dev, ibdev->steer_qpn_base, |
3383 | + ibdev->steer_qpn_count); |
3384 | err_counter: |
3385 | for (i = 0; i < ibdev->num_ports; ++i) |
3386 | mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]); |
3387 | @@ -3102,11 +3101,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) |
3388 | ibdev->iboe.nb.notifier_call = NULL; |
3389 | } |
3390 | |
3391 | - if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) { |
3392 | - mlx4_qp_release_range(dev, ibdev->steer_qpn_base, |
3393 | - ibdev->steer_qpn_count); |
3394 | - kfree(ibdev->ib_uc_qpns_bitmap); |
3395 | - } |
3396 | + mlx4_qp_release_range(dev, ibdev->steer_qpn_base, |
3397 | + ibdev->steer_qpn_count); |
3398 | + kfree(ibdev->ib_uc_qpns_bitmap); |
3399 | |
3400 | iounmap(ibdev->uar_map); |
3401 | for (p = 0; p < ibdev->num_ports; ++p) |
3402 | diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c |
3403 | index 8f5754fb8579..e4a9ba1dd9ba 100644 |
3404 | --- a/drivers/infiniband/hw/qib/qib_rc.c |
3405 | +++ b/drivers/infiniband/hw/qib/qib_rc.c |
3406 | @@ -434,13 +434,13 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) |
3407 | qp->s_state = OP(COMPARE_SWAP); |
3408 | put_ib_ateth_swap(wqe->atomic_wr.swap, |
3409 | &ohdr->u.atomic_eth); |
3410 | - put_ib_ateth_swap(wqe->atomic_wr.compare_add, |
3411 | - &ohdr->u.atomic_eth); |
3412 | + put_ib_ateth_compare(wqe->atomic_wr.compare_add, |
3413 | + &ohdr->u.atomic_eth); |
3414 | } else { |
3415 | qp->s_state = OP(FETCH_ADD); |
3416 | put_ib_ateth_swap(wqe->atomic_wr.compare_add, |
3417 | &ohdr->u.atomic_eth); |
3418 | - put_ib_ateth_swap(0, &ohdr->u.atomic_eth); |
3419 | + put_ib_ateth_compare(0, &ohdr->u.atomic_eth); |
3420 | } |
3421 | put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr, |
3422 | &ohdr->u.atomic_eth); |
3423 | diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h |
3424 | index d7472a442a2c..96c3a6c5c4b5 100644 |
3425 | --- a/drivers/infiniband/sw/rxe/rxe_loc.h |
3426 | +++ b/drivers/infiniband/sw/rxe/rxe_loc.h |
3427 | @@ -237,7 +237,6 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, |
3428 | |
3429 | void rxe_release(struct kref *kref); |
3430 | |
3431 | -void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify); |
3432 | int rxe_completer(void *arg); |
3433 | int rxe_requester(void *arg); |
3434 | int rxe_responder(void *arg); |
3435 | diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c |
3436 | index 4469592b839d..137d6c0c49d4 100644 |
3437 | --- a/drivers/infiniband/sw/rxe/rxe_qp.c |
3438 | +++ b/drivers/infiniband/sw/rxe/rxe_qp.c |
3439 | @@ -824,9 +824,9 @@ void rxe_qp_destroy(struct rxe_qp *qp) |
3440 | } |
3441 | |
3442 | /* called when the last reference to the qp is dropped */ |
3443 | -void rxe_qp_cleanup(struct rxe_pool_entry *arg) |
3444 | +static void rxe_qp_do_cleanup(struct work_struct *work) |
3445 | { |
3446 | - struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem); |
3447 | + struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); |
3448 | |
3449 | rxe_drop_all_mcast_groups(qp); |
3450 | |
3451 | @@ -859,3 +859,11 @@ void rxe_qp_cleanup(struct rxe_pool_entry *arg) |
3452 | kernel_sock_shutdown(qp->sk, SHUT_RDWR); |
3453 | sock_release(qp->sk); |
3454 | } |
3455 | + |
3456 | +/* called when the last reference to the qp is dropped */ |
3457 | +void rxe_qp_cleanup(struct rxe_pool_entry *arg) |
3458 | +{ |
3459 | + struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem); |
3460 | + |
3461 | + execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work); |
3462 | +} |
3463 | diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c |
3464 | index 26a7f923045b..7bdaf71b8221 100644 |
3465 | --- a/drivers/infiniband/sw/rxe/rxe_req.c |
3466 | +++ b/drivers/infiniband/sw/rxe/rxe_req.c |
3467 | @@ -594,15 +594,8 @@ int rxe_requester(void *arg) |
3468 | rxe_add_ref(qp); |
3469 | |
3470 | next_wqe: |
3471 | - if (unlikely(!qp->valid)) { |
3472 | - rxe_drain_req_pkts(qp, true); |
3473 | + if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR)) |
3474 | goto exit; |
3475 | - } |
3476 | - |
3477 | - if (unlikely(qp->req.state == QP_STATE_ERROR)) { |
3478 | - rxe_drain_req_pkts(qp, true); |
3479 | - goto exit; |
3480 | - } |
3481 | |
3482 | if (unlikely(qp->req.state == QP_STATE_RESET)) { |
3483 | qp->req.wqe_index = consumer_index(qp->sq.queue); |
3484 | diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c |
3485 | index 4240866a5331..01f926fd9029 100644 |
3486 | --- a/drivers/infiniband/sw/rxe/rxe_resp.c |
3487 | +++ b/drivers/infiniband/sw/rxe/rxe_resp.c |
3488 | @@ -1210,7 +1210,7 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp) |
3489 | } |
3490 | } |
3491 | |
3492 | -void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify) |
3493 | +static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify) |
3494 | { |
3495 | struct sk_buff *skb; |
3496 | |
3497 | diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c |
3498 | index d03002b9d84d..7210a784abb4 100644 |
3499 | --- a/drivers/infiniband/sw/rxe/rxe_verbs.c |
3500 | +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c |
3501 | @@ -814,6 +814,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr, |
3502 | (queue_count(qp->sq.queue) > 1); |
3503 | |
3504 | rxe_run_task(&qp->req.task, must_sched); |
3505 | + if (unlikely(qp->req.state == QP_STATE_ERROR)) |
3506 | + rxe_run_task(&qp->comp.task, 1); |
3507 | |
3508 | return err; |
3509 | } |
3510 | diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h |
3511 | index 0c2dbe45c729..1019f5e7dbdd 100644 |
3512 | --- a/drivers/infiniband/sw/rxe/rxe_verbs.h |
3513 | +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h |
3514 | @@ -35,6 +35,7 @@ |
3515 | #define RXE_VERBS_H |
3516 | |
3517 | #include <linux/interrupt.h> |
3518 | +#include <linux/workqueue.h> |
3519 | #include <rdma/rdma_user_rxe.h> |
3520 | #include "rxe_pool.h" |
3521 | #include "rxe_task.h" |
3522 | @@ -281,6 +282,8 @@ struct rxe_qp { |
3523 | struct timer_list rnr_nak_timer; |
3524 | |
3525 | spinlock_t state_lock; /* guard requester and completer */ |
3526 | + |
3527 | + struct execute_work cleanup_work; |
3528 | }; |
3529 | |
3530 | enum rxe_mem_state { |
3531 | diff --git a/drivers/md/dm.c b/drivers/md/dm.c |
3532 | index de17b7193299..1c42b00d3be2 100644 |
3533 | --- a/drivers/md/dm.c |
3534 | +++ b/drivers/md/dm.c |
3535 | @@ -817,7 +817,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error) |
3536 | queue_io(md, bio); |
3537 | } else { |
3538 | /* done with normal IO or empty flush */ |
3539 | - bio->bi_status = io_error; |
3540 | + if (io_error) |
3541 | + bio->bi_status = io_error; |
3542 | bio_endio(bio); |
3543 | } |
3544 | } |
3545 | diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c |
3546 | index ba80376a3b86..d097eb04a0e9 100644 |
3547 | --- a/drivers/media/tuners/r820t.c |
3548 | +++ b/drivers/media/tuners/r820t.c |
3549 | @@ -396,9 +396,11 @@ static int r820t_write(struct r820t_priv *priv, u8 reg, const u8 *val, |
3550 | return 0; |
3551 | } |
3552 | |
3553 | -static int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val) |
3554 | +static inline int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val) |
3555 | { |
3556 | - return r820t_write(priv, reg, &val, 1); |
3557 | + u8 tmp = val; /* work around GCC PR81715 with asan-stack=1 */ |
3558 | + |
3559 | + return r820t_write(priv, reg, &tmp, 1); |
3560 | } |
3561 | |
3562 | static int r820t_read_cache_reg(struct r820t_priv *priv, int reg) |
3563 | @@ -411,17 +413,18 @@ static int r820t_read_cache_reg(struct r820t_priv *priv, int reg) |
3564 | return -EINVAL; |
3565 | } |
3566 | |
3567 | -static int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val, |
3568 | +static inline int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val, |
3569 | u8 bit_mask) |
3570 | { |
3571 | + u8 tmp = val; |
3572 | int rc = r820t_read_cache_reg(priv, reg); |
3573 | |
3574 | if (rc < 0) |
3575 | return rc; |
3576 | |
3577 | - val = (rc & ~bit_mask) | (val & bit_mask); |
3578 | + tmp = (rc & ~bit_mask) | (tmp & bit_mask); |
3579 | |
3580 | - return r820t_write(priv, reg, &val, 1); |
3581 | + return r820t_write(priv, reg, &tmp, 1); |
3582 | } |
3583 | |
3584 | static int r820t_read(struct r820t_priv *priv, u8 reg, u8 *val, int len) |
3585 | diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c |
3586 | index 229dc18f0581..768972af8b85 100644 |
3587 | --- a/drivers/mmc/host/bcm2835.c |
3588 | +++ b/drivers/mmc/host/bcm2835.c |
3589 | @@ -1265,7 +1265,8 @@ static int bcm2835_add_host(struct bcm2835_host *host) |
3590 | char pio_limit_string[20]; |
3591 | int ret; |
3592 | |
3593 | - mmc->f_max = host->max_clk; |
3594 | + if (!mmc->f_max || mmc->f_max > host->max_clk) |
3595 | + mmc->f_max = host->max_clk; |
3596 | mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV; |
3597 | |
3598 | mmc->max_busy_timeout = ~0 / (mmc->f_max / 1000); |
3599 | diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c |
3600 | index e0862d3f65b3..730fbe01726d 100644 |
3601 | --- a/drivers/mmc/host/meson-gx-mmc.c |
3602 | +++ b/drivers/mmc/host/meson-gx-mmc.c |
3603 | @@ -716,22 +716,6 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode, |
3604 | static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) |
3605 | { |
3606 | struct meson_host *host = mmc_priv(mmc); |
3607 | - int ret; |
3608 | - |
3609 | - /* |
3610 | - * If this is the initial tuning, try to get a sane Rx starting |
3611 | - * phase before doing the actual tuning. |
3612 | - */ |
3613 | - if (!mmc->doing_retune) { |
3614 | - ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); |
3615 | - |
3616 | - if (ret) |
3617 | - return ret; |
3618 | - } |
3619 | - |
3620 | - ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk); |
3621 | - if (ret) |
3622 | - return ret; |
3623 | |
3624 | return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); |
3625 | } |
3626 | @@ -762,9 +746,8 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
3627 | if (!IS_ERR(mmc->supply.vmmc)) |
3628 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); |
3629 | |
3630 | - /* Reset phases */ |
3631 | + /* Reset rx phase */ |
3632 | clk_set_phase(host->rx_clk, 0); |
3633 | - clk_set_phase(host->tx_clk, 270); |
3634 | |
3635 | break; |
3636 | |
3637 | diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c |
3638 | index 1f424374bbbb..4ffa6b173a21 100644 |
3639 | --- a/drivers/mmc/host/sdhci-of-esdhc.c |
3640 | +++ b/drivers/mmc/host/sdhci-of-esdhc.c |
3641 | @@ -589,10 +589,18 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) |
3642 | |
3643 | static void esdhc_reset(struct sdhci_host *host, u8 mask) |
3644 | { |
3645 | + u32 val; |
3646 | + |
3647 | sdhci_reset(host, mask); |
3648 | |
3649 | sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); |
3650 | sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); |
3651 | + |
3652 | + if (mask & SDHCI_RESET_ALL) { |
3653 | + val = sdhci_readl(host, ESDHC_TBCTL); |
3654 | + val &= ~ESDHC_TB_EN; |
3655 | + sdhci_writel(host, val, ESDHC_TBCTL); |
3656 | + } |
3657 | } |
3658 | |
3659 | /* The SCFG, Supplemental Configuration Unit, provides SoC specific |
3660 | diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c |
3661 | index e9290a3439d5..d24306b2b839 100644 |
3662 | --- a/drivers/mmc/host/sdhci.c |
3663 | +++ b/drivers/mmc/host/sdhci.c |
3664 | @@ -21,6 +21,7 @@ |
3665 | #include <linux/dma-mapping.h> |
3666 | #include <linux/slab.h> |
3667 | #include <linux/scatterlist.h> |
3668 | +#include <linux/sizes.h> |
3669 | #include <linux/swiotlb.h> |
3670 | #include <linux/regulator/consumer.h> |
3671 | #include <linux/pm_runtime.h> |
3672 | @@ -502,8 +503,35 @@ static int sdhci_pre_dma_transfer(struct sdhci_host *host, |
3673 | if (data->host_cookie == COOKIE_PRE_MAPPED) |
3674 | return data->sg_count; |
3675 | |
3676 | - sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, |
3677 | - mmc_get_dma_dir(data)); |
3678 | + /* Bounce write requests to the bounce buffer */ |
3679 | + if (host->bounce_buffer) { |
3680 | + unsigned int length = data->blksz * data->blocks; |
3681 | + |
3682 | + if (length > host->bounce_buffer_size) { |
3683 | + pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n", |
3684 | + mmc_hostname(host->mmc), length, |
3685 | + host->bounce_buffer_size); |
3686 | + return -EIO; |
3687 | + } |
3688 | + if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { |
3689 | + /* Copy the data to the bounce buffer */ |
3690 | + sg_copy_to_buffer(data->sg, data->sg_len, |
3691 | + host->bounce_buffer, |
3692 | + length); |
3693 | + } |
3694 | + /* Switch ownership to the DMA */ |
3695 | + dma_sync_single_for_device(host->mmc->parent, |
3696 | + host->bounce_addr, |
3697 | + host->bounce_buffer_size, |
3698 | + mmc_get_dma_dir(data)); |
3699 | + /* Just a dummy value */ |
3700 | + sg_count = 1; |
3701 | + } else { |
3702 | + /* Just access the data directly from memory */ |
3703 | + sg_count = dma_map_sg(mmc_dev(host->mmc), |
3704 | + data->sg, data->sg_len, |
3705 | + mmc_get_dma_dir(data)); |
3706 | + } |
3707 | |
3708 | if (sg_count == 0) |
3709 | return -ENOSPC; |
3710 | @@ -673,6 +701,14 @@ static void sdhci_adma_table_post(struct sdhci_host *host, |
3711 | } |
3712 | } |
3713 | |
3714 | +static u32 sdhci_sdma_address(struct sdhci_host *host) |
3715 | +{ |
3716 | + if (host->bounce_buffer) |
3717 | + return host->bounce_addr; |
3718 | + else |
3719 | + return sg_dma_address(host->data->sg); |
3720 | +} |
3721 | + |
3722 | static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) |
3723 | { |
3724 | u8 count; |
3725 | @@ -858,8 +894,8 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) |
3726 | SDHCI_ADMA_ADDRESS_HI); |
3727 | } else { |
3728 | WARN_ON(sg_cnt != 1); |
3729 | - sdhci_writel(host, sg_dma_address(data->sg), |
3730 | - SDHCI_DMA_ADDRESS); |
3731 | + sdhci_writel(host, sdhci_sdma_address(host), |
3732 | + SDHCI_DMA_ADDRESS); |
3733 | } |
3734 | } |
3735 | |
3736 | @@ -2248,7 +2284,12 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) |
3737 | |
3738 | mrq->data->host_cookie = COOKIE_UNMAPPED; |
3739 | |
3740 | - if (host->flags & SDHCI_REQ_USE_DMA) |
3741 | + /* |
3742 | + * No pre-mapping in the pre hook if we're using the bounce buffer, |
3743 | + * for that we would need two bounce buffers since one buffer is |
3744 | + * in flight when this is getting called. |
3745 | + */ |
3746 | + if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer) |
3747 | sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); |
3748 | } |
3749 | |
3750 | @@ -2352,8 +2393,45 @@ static bool sdhci_request_done(struct sdhci_host *host) |
3751 | struct mmc_data *data = mrq->data; |
3752 | |
3753 | if (data && data->host_cookie == COOKIE_MAPPED) { |
3754 | - dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, |
3755 | - mmc_get_dma_dir(data)); |
3756 | + if (host->bounce_buffer) { |
3757 | + /* |
3758 | + * On reads, copy the bounced data into the |
3759 | + * sglist |
3760 | + */ |
3761 | + if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { |
3762 | + unsigned int length = data->bytes_xfered; |
3763 | + |
3764 | + if (length > host->bounce_buffer_size) { |
3765 | + pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", |
3766 | + mmc_hostname(host->mmc), |
3767 | + host->bounce_buffer_size, |
3768 | + data->bytes_xfered); |
3769 | + /* Cap it down and continue */ |
3770 | + length = host->bounce_buffer_size; |
3771 | + } |
3772 | + dma_sync_single_for_cpu( |
3773 | + host->mmc->parent, |
3774 | + host->bounce_addr, |
3775 | + host->bounce_buffer_size, |
3776 | + DMA_FROM_DEVICE); |
3777 | + sg_copy_from_buffer(data->sg, |
3778 | + data->sg_len, |
3779 | + host->bounce_buffer, |
3780 | + length); |
3781 | + } else { |
3782 | + /* No copying, just switch ownership */ |
3783 | + dma_sync_single_for_cpu( |
3784 | + host->mmc->parent, |
3785 | + host->bounce_addr, |
3786 | + host->bounce_buffer_size, |
3787 | + mmc_get_dma_dir(data)); |
3788 | + } |
3789 | + } else { |
3790 | + /* Unmap the raw data */ |
3791 | + dma_unmap_sg(mmc_dev(host->mmc), data->sg, |
3792 | + data->sg_len, |
3793 | + mmc_get_dma_dir(data)); |
3794 | + } |
3795 | data->host_cookie = COOKIE_UNMAPPED; |
3796 | } |
3797 | } |
3798 | @@ -2636,7 +2714,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) |
3799 | */ |
3800 | if (intmask & SDHCI_INT_DMA_END) { |
3801 | u32 dmastart, dmanow; |
3802 | - dmastart = sg_dma_address(host->data->sg); |
3803 | + |
3804 | + dmastart = sdhci_sdma_address(host); |
3805 | dmanow = dmastart + host->data->bytes_xfered; |
3806 | /* |
3807 | * Force update to the next DMA block boundary. |
3808 | @@ -3217,6 +3296,68 @@ void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1) |
3809 | } |
3810 | EXPORT_SYMBOL_GPL(__sdhci_read_caps); |
3811 | |
3812 | +static int sdhci_allocate_bounce_buffer(struct sdhci_host *host) |
3813 | +{ |
3814 | + struct mmc_host *mmc = host->mmc; |
3815 | + unsigned int max_blocks; |
3816 | + unsigned int bounce_size; |
3817 | + int ret; |
3818 | + |
3819 | + /* |
3820 | + * Cap the bounce buffer at 64KB. Using a bigger bounce buffer |
3821 | + * has diminishing returns, this is probably because SD/MMC |
3822 | + * cards are usually optimized to handle this size of requests. |
3823 | + */ |
3824 | + bounce_size = SZ_64K; |
3825 | + /* |
3826 | + * Adjust downwards to maximum request size if this is less |
3827 | + * than our segment size, else hammer down the maximum |
3828 | + * request size to the maximum buffer size. |
3829 | + */ |
3830 | + if (mmc->max_req_size < bounce_size) |
3831 | + bounce_size = mmc->max_req_size; |
3832 | + max_blocks = bounce_size / 512; |
3833 | + |
3834 | + /* |
3835 | + * When we just support one segment, we can get significant |
3836 | + * speedups by the help of a bounce buffer to group scattered |
3837 | + * reads/writes together. |
3838 | + */ |
3839 | + host->bounce_buffer = devm_kmalloc(mmc->parent, |
3840 | + bounce_size, |
3841 | + GFP_KERNEL); |
3842 | + if (!host->bounce_buffer) { |
3843 | + pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n", |
3844 | + mmc_hostname(mmc), |
3845 | + bounce_size); |
3846 | + /* |
3847 | + * Exiting with zero here makes sure we proceed with |
3848 | + * mmc->max_segs == 1. |
3849 | + */ |
3850 | + return 0; |
3851 | + } |
3852 | + |
3853 | + host->bounce_addr = dma_map_single(mmc->parent, |
3854 | + host->bounce_buffer, |
3855 | + bounce_size, |
3856 | + DMA_BIDIRECTIONAL); |
3857 | + ret = dma_mapping_error(mmc->parent, host->bounce_addr); |
3858 | + if (ret) |
3859 | + /* Again fall back to max_segs == 1 */ |
3860 | + return 0; |
3861 | + host->bounce_buffer_size = bounce_size; |
3862 | + |
3863 | + /* Lie about this since we're bouncing */ |
3864 | + mmc->max_segs = max_blocks; |
3865 | + mmc->max_seg_size = bounce_size; |
3866 | + mmc->max_req_size = bounce_size; |
3867 | + |
3868 | + pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n", |
3869 | + mmc_hostname(mmc), max_blocks, bounce_size); |
3870 | + |
3871 | + return 0; |
3872 | +} |
3873 | + |
3874 | int sdhci_setup_host(struct sdhci_host *host) |
3875 | { |
3876 | struct mmc_host *mmc; |
3877 | @@ -3713,6 +3854,13 @@ int sdhci_setup_host(struct sdhci_host *host) |
3878 | */ |
3879 | mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; |
3880 | |
3881 | + if (mmc->max_segs == 1) { |
3882 | + /* This may alter mmc->*_blk_* parameters */ |
3883 | + ret = sdhci_allocate_bounce_buffer(host); |
3884 | + if (ret) |
3885 | + return ret; |
3886 | + } |
3887 | + |
3888 | return 0; |
3889 | |
3890 | unreg: |
3891 | diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h |
3892 | index 54bc444c317f..1d7d61e25dbf 100644 |
3893 | --- a/drivers/mmc/host/sdhci.h |
3894 | +++ b/drivers/mmc/host/sdhci.h |
3895 | @@ -440,6 +440,9 @@ struct sdhci_host { |
3896 | |
3897 | int irq; /* Device IRQ */ |
3898 | void __iomem *ioaddr; /* Mapped address */ |
3899 | + char *bounce_buffer; /* For packing SDMA reads/writes */ |
3900 | + dma_addr_t bounce_addr; |
3901 | + unsigned int bounce_buffer_size; |
3902 | |
3903 | const struct sdhci_ops *ops; /* Low level hw interface */ |
3904 | |
3905 | diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c |
3906 | index 8037d4b48a05..e2583a539b41 100644 |
3907 | --- a/drivers/mtd/nand/vf610_nfc.c |
3908 | +++ b/drivers/mtd/nand/vf610_nfc.c |
3909 | @@ -752,10 +752,8 @@ static int vf610_nfc_probe(struct platform_device *pdev) |
3910 | if (mtd->oobsize > 64) |
3911 | mtd->oobsize = 64; |
3912 | |
3913 | - /* |
3914 | - * mtd->ecclayout is not specified here because we're using the |
3915 | - * default large page ECC layout defined in NAND core. |
3916 | - */ |
3917 | + /* Use default large page ECC layout defined in NAND core */ |
3918 | + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); |
3919 | if (chip->ecc.strength == 32) { |
3920 | nfc->ecc_mode = ECC_60_BYTE; |
3921 | chip->ecc.bytes = 60; |
3922 | diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c |
3923 | index 634b2f41cc9e..908acd4624e8 100644 |
3924 | --- a/drivers/net/ethernet/marvell/mvpp2.c |
3925 | +++ b/drivers/net/ethernet/marvell/mvpp2.c |
3926 | @@ -7127,6 +7127,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev) |
3927 | int id = port->id; |
3928 | bool allmulti = dev->flags & IFF_ALLMULTI; |
3929 | |
3930 | +retry: |
3931 | mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC); |
3932 | mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti); |
3933 | mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); |
3934 | @@ -7134,9 +7135,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev) |
3935 | /* Remove all port->id's mcast enries */ |
3936 | mvpp2_prs_mcast_del_all(priv, id); |
3937 | |
3938 | - if (allmulti && !netdev_mc_empty(dev)) { |
3939 | - netdev_for_each_mc_addr(ha, dev) |
3940 | - mvpp2_prs_mac_da_accept(priv, id, ha->addr, true); |
3941 | + if (!allmulti) { |
3942 | + netdev_for_each_mc_addr(ha, dev) { |
3943 | + if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) { |
3944 | + allmulti = true; |
3945 | + goto retry; |
3946 | + } |
3947 | + } |
3948 | } |
3949 | } |
3950 | |
3951 | diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c |
3952 | index 769598f7b6c8..3aaf4bad6c5a 100644 |
3953 | --- a/drivers/net/ethernet/mellanox/mlx4/qp.c |
3954 | +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c |
3955 | @@ -287,6 +287,9 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) |
3956 | u64 in_param = 0; |
3957 | int err; |
3958 | |
3959 | + if (!cnt) |
3960 | + return; |
3961 | + |
3962 | if (mlx4_is_mfunc(dev)) { |
3963 | set_param_l(&in_param, base_qpn); |
3964 | set_param_h(&in_param, cnt); |
3965 | diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c |
3966 | index cd314946452c..9511f5fe62f4 100644 |
3967 | --- a/drivers/net/wireless/marvell/mwifiex/pcie.c |
3968 | +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c |
3969 | @@ -2781,7 +2781,10 @@ static void mwifiex_pcie_card_reset_work(struct mwifiex_adapter *adapter) |
3970 | { |
3971 | struct pcie_service_card *card = adapter->card; |
3972 | |
3973 | - pci_reset_function(card->dev); |
3974 | + /* We can't afford to wait here; remove() might be waiting on us. If we |
3975 | + * can't grab the device lock, maybe we'll get another chance later. |
3976 | + */ |
3977 | + pci_try_reset_function(card->dev); |
3978 | } |
3979 | |
3980 | static void mwifiex_pcie_work(struct work_struct *work) |
3981 | diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c |
3982 | index 43e18c4c1e68..999ddd947b2a 100644 |
3983 | --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c |
3984 | +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c |
3985 | @@ -1123,7 +1123,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr) |
3986 | } |
3987 | if (0 == tmp) { |
3988 | read_addr = REG_DBI_RDATA + addr % 4; |
3989 | - ret = rtl_read_word(rtlpriv, read_addr); |
3990 | + ret = rtl_read_byte(rtlpriv, read_addr); |
3991 | } |
3992 | return ret; |
3993 | } |
3994 | @@ -1165,7 +1165,8 @@ static void _rtl8821ae_enable_aspm_back_door(struct ieee80211_hw *hw) |
3995 | } |
3996 | |
3997 | tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f); |
3998 | - _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7)); |
3999 | + _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7) | |
4000 | + ASPM_L1_LATENCY << 3); |
4001 | |
4002 | tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719); |
4003 | _rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4)); |
4004 | diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h |
4005 | index 92d4859ec906..2a37125b2ef5 100644 |
4006 | --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h |
4007 | +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h |
4008 | @@ -99,6 +99,7 @@ |
4009 | #define RTL_USB_MAX_RX_COUNT 100 |
4010 | #define QBSS_LOAD_SIZE 5 |
4011 | #define MAX_WMMELE_LENGTH 64 |
4012 | +#define ASPM_L1_LATENCY 7 |
4013 | |
4014 | #define TOTAL_CAM_ENTRY 32 |
4015 | |
4016 | diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c |
4017 | index 5bee3af47588..39405598b22d 100644 |
4018 | --- a/drivers/pci/dwc/pci-keystone.c |
4019 | +++ b/drivers/pci/dwc/pci-keystone.c |
4020 | @@ -178,7 +178,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, |
4021 | } |
4022 | |
4023 | /* interrupt controller is in a child node */ |
4024 | - *np_temp = of_find_node_by_name(np_pcie, controller); |
4025 | + *np_temp = of_get_child_by_name(np_pcie, controller); |
4026 | if (!(*np_temp)) { |
4027 | dev_err(dev, "Node for %s is absent\n", controller); |
4028 | return -EINVAL; |
4029 | @@ -187,6 +187,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, |
4030 | temp = of_irq_count(*np_temp); |
4031 | if (!temp) { |
4032 | dev_err(dev, "No IRQ entries in %s\n", controller); |
4033 | + of_node_put(*np_temp); |
4034 | return -EINVAL; |
4035 | } |
4036 | |
4037 | @@ -204,6 +205,8 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, |
4038 | break; |
4039 | } |
4040 | |
4041 | + of_node_put(*np_temp); |
4042 | + |
4043 | if (temp) { |
4044 | *num_irqs = temp; |
4045 | return 0; |
4046 | diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c |
4047 | index a5073a921a04..32228d41f746 100644 |
4048 | --- a/drivers/pci/host/pcie-iproc-platform.c |
4049 | +++ b/drivers/pci/host/pcie-iproc-platform.c |
4050 | @@ -92,6 +92,13 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) |
4051 | pcie->need_ob_cfg = true; |
4052 | } |
4053 | |
4054 | + /* |
4055 | + * DT nodes are not used by all platforms that use the iProc PCIe |
4056 | + * core driver. For platforms that require explict inbound mapping |
4057 | + * configuration, "dma-ranges" would have been present in DT |
4058 | + */ |
4059 | + pcie->need_ib_cfg = of_property_read_bool(np, "dma-ranges"); |
4060 | + |
4061 | /* PHY use is optional */ |
4062 | pcie->phy = devm_phy_get(dev, "pcie-phy"); |
4063 | if (IS_ERR(pcie->phy)) { |
4064 | diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c |
4065 | index 935909bbe5c4..75836067f538 100644 |
4066 | --- a/drivers/pci/host/pcie-iproc.c |
4067 | +++ b/drivers/pci/host/pcie-iproc.c |
4068 | @@ -1378,9 +1378,11 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) |
4069 | } |
4070 | } |
4071 | |
4072 | - ret = iproc_pcie_map_dma_ranges(pcie); |
4073 | - if (ret && ret != -ENOENT) |
4074 | - goto err_power_off_phy; |
4075 | + if (pcie->need_ib_cfg) { |
4076 | + ret = iproc_pcie_map_dma_ranges(pcie); |
4077 | + if (ret && ret != -ENOENT) |
4078 | + goto err_power_off_phy; |
4079 | + } |
4080 | |
4081 | #ifdef CONFIG_ARM |
4082 | pcie->sysdata.private_data = pcie; |
4083 | diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h |
4084 | index a6b55cec9a66..4ac6282f2bfd 100644 |
4085 | --- a/drivers/pci/host/pcie-iproc.h |
4086 | +++ b/drivers/pci/host/pcie-iproc.h |
4087 | @@ -74,6 +74,7 @@ struct iproc_msi; |
4088 | * @ob: outbound mapping related parameters |
4089 | * @ob_map: outbound mapping related parameters specific to the controller |
4090 | * |
4091 | + * @need_ib_cfg: indicates SW needs to configure the inbound mapping window |
4092 | * @ib: inbound mapping related parameters |
4093 | * @ib_map: outbound mapping region related parameters |
4094 | * |
4095 | @@ -101,6 +102,7 @@ struct iproc_pcie { |
4096 | struct iproc_pcie_ob ob; |
4097 | const struct iproc_pcie_ob_map *ob_map; |
4098 | |
4099 | + bool need_ib_cfg; |
4100 | struct iproc_pcie_ib ib; |
4101 | const struct iproc_pcie_ib_map *ib_map; |
4102 | |
4103 | diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c |
4104 | index 7bab0606f1a9..a89d8b990228 100644 |
4105 | --- a/drivers/pci/hotplug/pciehp_hpc.c |
4106 | +++ b/drivers/pci/hotplug/pciehp_hpc.c |
4107 | @@ -848,6 +848,13 @@ struct controller *pcie_init(struct pcie_device *dev) |
4108 | if (pdev->hotplug_user_indicators) |
4109 | slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP); |
4110 | |
4111 | + /* |
4112 | + * We assume no Thunderbolt controllers support Command Complete events, |
4113 | + * but some controllers falsely claim they do. |
4114 | + */ |
4115 | + if (pdev->is_thunderbolt) |
4116 | + slot_cap |= PCI_EXP_SLTCAP_NCCS; |
4117 | + |
4118 | ctrl->slot_cap = slot_cap; |
4119 | mutex_init(&ctrl->ctrl_lock); |
4120 | init_waitqueue_head(&ctrl->queue); |
4121 | diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c |
4122 | index 10684b17d0bd..d22750ea7444 100644 |
4123 | --- a/drivers/pci/quirks.c |
4124 | +++ b/drivers/pci/quirks.c |
4125 | @@ -1636,8 +1636,8 @@ static void quirk_pcie_mch(struct pci_dev *pdev) |
4126 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch); |
4127 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch); |
4128 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch); |
4129 | -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, quirk_pcie_mch); |
4130 | |
4131 | +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch); |
4132 | |
4133 | /* |
4134 | * It's possible for the MSI to get corrupted if shpc and acpi |
4135 | diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c |
4136 | index 623d322447a2..7c4eb86c851e 100644 |
4137 | --- a/drivers/platform/x86/apple-gmux.c |
4138 | +++ b/drivers/platform/x86/apple-gmux.c |
4139 | @@ -24,7 +24,6 @@ |
4140 | #include <linux/delay.h> |
4141 | #include <linux/pci.h> |
4142 | #include <linux/vga_switcheroo.h> |
4143 | -#include <linux/vgaarb.h> |
4144 | #include <acpi/video.h> |
4145 | #include <asm/io.h> |
4146 | |
4147 | @@ -54,7 +53,6 @@ struct apple_gmux_data { |
4148 | bool indexed; |
4149 | struct mutex index_lock; |
4150 | |
4151 | - struct pci_dev *pdev; |
4152 | struct backlight_device *bdev; |
4153 | |
4154 | /* switcheroo data */ |
4155 | @@ -599,23 +597,6 @@ static int gmux_resume(struct device *dev) |
4156 | return 0; |
4157 | } |
4158 | |
4159 | -static struct pci_dev *gmux_get_io_pdev(void) |
4160 | -{ |
4161 | - struct pci_dev *pdev = NULL; |
4162 | - |
4163 | - while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev))) { |
4164 | - u16 cmd; |
4165 | - |
4166 | - pci_read_config_word(pdev, PCI_COMMAND, &cmd); |
4167 | - if (!(cmd & PCI_COMMAND_IO)) |
4168 | - continue; |
4169 | - |
4170 | - return pdev; |
4171 | - } |
4172 | - |
4173 | - return NULL; |
4174 | -} |
4175 | - |
4176 | static int is_thunderbolt(struct device *dev, void *data) |
4177 | { |
4178 | return to_pci_dev(dev)->is_thunderbolt; |
4179 | @@ -631,7 +612,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) |
4180 | int ret = -ENXIO; |
4181 | acpi_status status; |
4182 | unsigned long long gpe; |
4183 | - struct pci_dev *pdev = NULL; |
4184 | |
4185 | if (apple_gmux_data) |
4186 | return -EBUSY; |
4187 | @@ -682,7 +662,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) |
4188 | ver_minor = (version >> 16) & 0xff; |
4189 | ver_release = (version >> 8) & 0xff; |
4190 | } else { |
4191 | - pr_info("gmux device not present or IO disabled\n"); |
4192 | + pr_info("gmux device not present\n"); |
4193 | ret = -ENODEV; |
4194 | goto err_release; |
4195 | } |
4196 | @@ -690,23 +670,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) |
4197 | pr_info("Found gmux version %d.%d.%d [%s]\n", ver_major, ver_minor, |
4198 | ver_release, (gmux_data->indexed ? "indexed" : "classic")); |
4199 | |
4200 | - /* |
4201 | - * Apple systems with gmux are EFI based and normally don't use |
4202 | - * VGA. In addition changing IO+MEM ownership between IGP and dGPU |
4203 | - * disables IO/MEM used for backlight control on some systems. |
4204 | - * Lock IO+MEM to GPU with active IO to prevent switch. |
4205 | - */ |
4206 | - pdev = gmux_get_io_pdev(); |
4207 | - if (pdev && vga_tryget(pdev, |
4208 | - VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM)) { |
4209 | - pr_err("IO+MEM vgaarb-locking for PCI:%s failed\n", |
4210 | - pci_name(pdev)); |
4211 | - ret = -EBUSY; |
4212 | - goto err_release; |
4213 | - } else if (pdev) |
4214 | - pr_info("locked IO for PCI:%s\n", pci_name(pdev)); |
4215 | - gmux_data->pdev = pdev; |
4216 | - |
4217 | memset(&props, 0, sizeof(props)); |
4218 | props.type = BACKLIGHT_PLATFORM; |
4219 | props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS); |
4220 | @@ -822,10 +785,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) |
4221 | err_notify: |
4222 | backlight_device_unregister(bdev); |
4223 | err_release: |
4224 | - if (gmux_data->pdev) |
4225 | - vga_put(gmux_data->pdev, |
4226 | - VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM); |
4227 | - pci_dev_put(pdev); |
4228 | release_region(gmux_data->iostart, gmux_data->iolen); |
4229 | err_free: |
4230 | kfree(gmux_data); |
4231 | @@ -845,11 +804,6 @@ static void gmux_remove(struct pnp_dev *pnp) |
4232 | &gmux_notify_handler); |
4233 | } |
4234 | |
4235 | - if (gmux_data->pdev) { |
4236 | - vga_put(gmux_data->pdev, |
4237 | - VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM); |
4238 | - pci_dev_put(gmux_data->pdev); |
4239 | - } |
4240 | backlight_device_unregister(gmux_data->bdev); |
4241 | |
4242 | release_region(gmux_data->iostart, gmux_data->iolen); |
4243 | diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c |
4244 | index daa68acbc900..c0c8945603cb 100644 |
4245 | --- a/drivers/platform/x86/wmi.c |
4246 | +++ b/drivers/platform/x86/wmi.c |
4247 | @@ -933,7 +933,7 @@ static int wmi_dev_probe(struct device *dev) |
4248 | goto probe_failure; |
4249 | } |
4250 | |
4251 | - buf = kmalloc(strlen(wdriver->driver.name) + 4, GFP_KERNEL); |
4252 | + buf = kmalloc(strlen(wdriver->driver.name) + 5, GFP_KERNEL); |
4253 | if (!buf) { |
4254 | ret = -ENOMEM; |
4255 | goto probe_string_failure; |
4256 | diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c |
4257 | index e2a946c0e667..304e891e35fc 100644 |
4258 | --- a/drivers/rtc/rtc-opal.c |
4259 | +++ b/drivers/rtc/rtc-opal.c |
4260 | @@ -58,6 +58,7 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms) |
4261 | static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) |
4262 | { |
4263 | long rc = OPAL_BUSY; |
4264 | + int retries = 10; |
4265 | u32 y_m_d; |
4266 | u64 h_m_s_ms; |
4267 | __be32 __y_m_d; |
4268 | @@ -67,8 +68,11 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) |
4269 | rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); |
4270 | if (rc == OPAL_BUSY_EVENT) |
4271 | opal_poll_events(NULL); |
4272 | - else |
4273 | + else if (retries-- && (rc == OPAL_HARDWARE |
4274 | + || rc == OPAL_INTERNAL_ERROR)) |
4275 | msleep(10); |
4276 | + else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT) |
4277 | + break; |
4278 | } |
4279 | |
4280 | if (rc != OPAL_SUCCESS) |
4281 | @@ -84,6 +88,7 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) |
4282 | static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm) |
4283 | { |
4284 | long rc = OPAL_BUSY; |
4285 | + int retries = 10; |
4286 | u32 y_m_d = 0; |
4287 | u64 h_m_s_ms = 0; |
4288 | |
4289 | @@ -92,8 +97,11 @@ static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm) |
4290 | rc = opal_rtc_write(y_m_d, h_m_s_ms); |
4291 | if (rc == OPAL_BUSY_EVENT) |
4292 | opal_poll_events(NULL); |
4293 | - else |
4294 | + else if (retries-- && (rc == OPAL_HARDWARE |
4295 | + || rc == OPAL_INTERNAL_ERROR)) |
4296 | msleep(10); |
4297 | + else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT) |
4298 | + break; |
4299 | } |
4300 | |
4301 | return rc == OPAL_SUCCESS ? 0 : -EIO; |
4302 | diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile |
4303 | index 0f42a225a664..e6b779930230 100644 |
4304 | --- a/drivers/scsi/smartpqi/Makefile |
4305 | +++ b/drivers/scsi/smartpqi/Makefile |
4306 | @@ -1,3 +1,3 @@ |
4307 | ccflags-y += -I. |
4308 | -obj-m += smartpqi.o |
4309 | +obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi.o |
4310 | smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o |
4311 | diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c |
4312 | index f9bc8ec6fb6b..9518ffd8b8ba 100644 |
4313 | --- a/drivers/target/iscsi/iscsi_target_auth.c |
4314 | +++ b/drivers/target/iscsi/iscsi_target_auth.c |
4315 | @@ -421,7 +421,8 @@ static int chap_server_compute_md5( |
4316 | auth_ret = 0; |
4317 | out: |
4318 | kzfree(desc); |
4319 | - crypto_free_shash(tfm); |
4320 | + if (tfm) |
4321 | + crypto_free_shash(tfm); |
4322 | kfree(challenge); |
4323 | kfree(challenge_binhex); |
4324 | return auth_ret; |
4325 | diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c |
4326 | index b686e2ce9c0e..8a5e8d17a942 100644 |
4327 | --- a/drivers/target/iscsi/iscsi_target_nego.c |
4328 | +++ b/drivers/target/iscsi/iscsi_target_nego.c |
4329 | @@ -432,6 +432,9 @@ static void iscsi_target_sk_data_ready(struct sock *sk) |
4330 | if (test_and_set_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { |
4331 | write_unlock_bh(&sk->sk_callback_lock); |
4332 | pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1, conn: %p >>>>\n", conn); |
4333 | + if (iscsi_target_sk_data_ready == conn->orig_data_ready) |
4334 | + return; |
4335 | + conn->orig_data_ready(sk); |
4336 | return; |
4337 | } |
4338 | |
4339 | diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig |
4340 | index f699abab1787..65812a2f60b4 100644 |
4341 | --- a/drivers/usb/Kconfig |
4342 | +++ b/drivers/usb/Kconfig |
4343 | @@ -19,6 +19,14 @@ config USB_EHCI_BIG_ENDIAN_MMIO |
4344 | config USB_EHCI_BIG_ENDIAN_DESC |
4345 | bool |
4346 | |
4347 | +config USB_UHCI_BIG_ENDIAN_MMIO |
4348 | + bool |
4349 | + default y if SPARC_LEON |
4350 | + |
4351 | +config USB_UHCI_BIG_ENDIAN_DESC |
4352 | + bool |
4353 | + default y if SPARC_LEON |
4354 | + |
4355 | menuconfig USB_SUPPORT |
4356 | bool "USB support" |
4357 | depends on HAS_IOMEM |
4358 | diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig |
4359 | index b80a94e632af..2763a640359f 100644 |
4360 | --- a/drivers/usb/host/Kconfig |
4361 | +++ b/drivers/usb/host/Kconfig |
4362 | @@ -625,14 +625,6 @@ config USB_UHCI_ASPEED |
4363 | bool |
4364 | default y if ARCH_ASPEED |
4365 | |
4366 | -config USB_UHCI_BIG_ENDIAN_MMIO |
4367 | - bool |
4368 | - default y if SPARC_LEON |
4369 | - |
4370 | -config USB_UHCI_BIG_ENDIAN_DESC |
4371 | - bool |
4372 | - default y if SPARC_LEON |
4373 | - |
4374 | config USB_FHCI_HCD |
4375 | tristate "Freescale QE USB Host Controller support" |
4376 | depends on OF_GPIO && QE_GPIO && QUICC_ENGINE |
4377 | diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c |
4378 | index 9269d5685239..b90ef96e43d6 100644 |
4379 | --- a/drivers/video/console/dummycon.c |
4380 | +++ b/drivers/video/console/dummycon.c |
4381 | @@ -67,7 +67,6 @@ const struct consw dummy_con = { |
4382 | .con_switch = DUMMY, |
4383 | .con_blank = DUMMY, |
4384 | .con_font_set = DUMMY, |
4385 | - .con_font_get = DUMMY, |
4386 | .con_font_default = DUMMY, |
4387 | .con_font_copy = DUMMY, |
4388 | }; |
4389 | diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c |
4390 | index e06358da4b99..3dee267d7c75 100644 |
4391 | --- a/drivers/video/fbdev/atmel_lcdfb.c |
4392 | +++ b/drivers/video/fbdev/atmel_lcdfb.c |
4393 | @@ -1119,7 +1119,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo) |
4394 | goto put_display_node; |
4395 | } |
4396 | |
4397 | - timings_np = of_find_node_by_name(display_np, "display-timings"); |
4398 | + timings_np = of_get_child_by_name(display_np, "display-timings"); |
4399 | if (!timings_np) { |
4400 | dev_err(dev, "failed to find display-timings node\n"); |
4401 | ret = -ENODEV; |
4402 | @@ -1140,6 +1140,12 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo) |
4403 | fb_add_videomode(&fb_vm, &info->modelist); |
4404 | } |
4405 | |
4406 | + /* |
4407 | + * FIXME: Make sure we are not referencing any fields in display_np |
4408 | + * and timings_np and drop our references to them before returning to |
4409 | + * avoid leaking the nodes on probe deferral and driver unbind. |
4410 | + */ |
4411 | + |
4412 | return 0; |
4413 | |
4414 | put_timings_node: |
4415 | diff --git a/drivers/video/fbdev/geode/video_gx.c b/drivers/video/fbdev/geode/video_gx.c |
4416 | index 6082f653c68a..67773e8bbb95 100644 |
4417 | --- a/drivers/video/fbdev/geode/video_gx.c |
4418 | +++ b/drivers/video/fbdev/geode/video_gx.c |
4419 | @@ -127,7 +127,7 @@ void gx_set_dclk_frequency(struct fb_info *info) |
4420 | int timeout = 1000; |
4421 | |
4422 | /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */ |
4423 | - if (cpu_data(0).x86_mask == 1) { |
4424 | + if (cpu_data(0).x86_stepping == 1) { |
4425 | pll_table = gx_pll_table_14MHz; |
4426 | pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz); |
4427 | } else { |
4428 | diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h |
4429 | index 149c5e7efc89..092981171df1 100644 |
4430 | --- a/drivers/xen/xenbus/xenbus.h |
4431 | +++ b/drivers/xen/xenbus/xenbus.h |
4432 | @@ -76,6 +76,7 @@ struct xb_req_data { |
4433 | struct list_head list; |
4434 | wait_queue_head_t wq; |
4435 | struct xsd_sockmsg msg; |
4436 | + uint32_t caller_req_id; |
4437 | enum xsd_sockmsg_type type; |
4438 | char *body; |
4439 | const struct kvec *vec; |
4440 | diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c |
4441 | index 5b081a01779d..d239fc3c5e3d 100644 |
4442 | --- a/drivers/xen/xenbus/xenbus_comms.c |
4443 | +++ b/drivers/xen/xenbus/xenbus_comms.c |
4444 | @@ -309,6 +309,7 @@ static int process_msg(void) |
4445 | goto out; |
4446 | |
4447 | if (req->state == xb_req_state_wait_reply) { |
4448 | + req->msg.req_id = req->caller_req_id; |
4449 | req->msg.type = state.msg.type; |
4450 | req->msg.len = state.msg.len; |
4451 | req->body = state.body; |
4452 | diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c |
4453 | index 3e59590c7254..3f3b29398ab8 100644 |
4454 | --- a/drivers/xen/xenbus/xenbus_xs.c |
4455 | +++ b/drivers/xen/xenbus/xenbus_xs.c |
4456 | @@ -227,6 +227,8 @@ static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg) |
4457 | req->state = xb_req_state_queued; |
4458 | init_waitqueue_head(&req->wq); |
4459 | |
4460 | + /* Save the caller req_id and restore it later in the reply */ |
4461 | + req->caller_req_id = req->msg.req_id; |
4462 | req->msg.req_id = xs_request_enter(req); |
4463 | |
4464 | mutex_lock(&xb_write_mutex); |
4465 | @@ -310,6 +312,7 @@ static void *xs_talkv(struct xenbus_transaction t, |
4466 | req->num_vecs = num_vecs; |
4467 | req->cb = xs_wake_up; |
4468 | |
4469 | + msg.req_id = 0; |
4470 | msg.tx_id = t.id; |
4471 | msg.type = type; |
4472 | msg.len = 0; |
4473 | diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c |
4474 | index 0f57602092cf..c04183cc2117 100644 |
4475 | --- a/fs/btrfs/inode.c |
4476 | +++ b/fs/btrfs/inode.c |
4477 | @@ -1330,8 +1330,11 @@ static noinline int run_delalloc_nocow(struct inode *inode, |
4478 | leaf = path->nodes[0]; |
4479 | if (path->slots[0] >= btrfs_header_nritems(leaf)) { |
4480 | ret = btrfs_next_leaf(root, path); |
4481 | - if (ret < 0) |
4482 | + if (ret < 0) { |
4483 | + if (cow_start != (u64)-1) |
4484 | + cur_offset = cow_start; |
4485 | goto error; |
4486 | + } |
4487 | if (ret > 0) |
4488 | break; |
4489 | leaf = path->nodes[0]; |
4490 | @@ -3366,6 +3369,11 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, |
4491 | ret = btrfs_orphan_reserve_metadata(trans, inode); |
4492 | ASSERT(!ret); |
4493 | if (ret) { |
4494 | + /* |
4495 | + * dec doesn't need spin_lock as ->orphan_block_rsv |
4496 | + * would be released only if ->orphan_inodes is |
4497 | + * zero. |
4498 | + */ |
4499 | atomic_dec(&root->orphan_inodes); |
4500 | clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, |
4501 | &inode->runtime_flags); |
4502 | @@ -3380,12 +3388,17 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, |
4503 | if (insert >= 1) { |
4504 | ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); |
4505 | if (ret) { |
4506 | - atomic_dec(&root->orphan_inodes); |
4507 | if (reserve) { |
4508 | clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, |
4509 | &inode->runtime_flags); |
4510 | btrfs_orphan_release_metadata(inode); |
4511 | } |
4512 | + /* |
4513 | + * btrfs_orphan_commit_root may race with us and set |
4514 | + * ->orphan_block_rsv to zero, in order to avoid that, |
4515 | + * decrease ->orphan_inodes after everything is done. |
4516 | + */ |
4517 | + atomic_dec(&root->orphan_inodes); |
4518 | if (ret != -EEXIST) { |
4519 | clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, |
4520 | &inode->runtime_flags); |
4521 | @@ -3417,28 +3430,26 @@ static int btrfs_orphan_del(struct btrfs_trans_handle *trans, |
4522 | { |
4523 | struct btrfs_root *root = inode->root; |
4524 | int delete_item = 0; |
4525 | - int release_rsv = 0; |
4526 | int ret = 0; |
4527 | |
4528 | - spin_lock(&root->orphan_lock); |
4529 | if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, |
4530 | &inode->runtime_flags)) |
4531 | delete_item = 1; |
4532 | |
4533 | + if (delete_item && trans) |
4534 | + ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode)); |
4535 | + |
4536 | if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, |
4537 | &inode->runtime_flags)) |
4538 | - release_rsv = 1; |
4539 | - spin_unlock(&root->orphan_lock); |
4540 | + btrfs_orphan_release_metadata(inode); |
4541 | |
4542 | - if (delete_item) { |
4543 | + /* |
4544 | + * btrfs_orphan_commit_root may race with us and set ->orphan_block_rsv |
4545 | + * to zero, in order to avoid that, decrease ->orphan_inodes after |
4546 | + * everything is done. |
4547 | + */ |
4548 | + if (delete_item) |
4549 | atomic_dec(&root->orphan_inodes); |
4550 | - if (trans) |
4551 | - ret = btrfs_del_orphan_item(trans, root, |
4552 | - btrfs_ino(inode)); |
4553 | - } |
4554 | - |
4555 | - if (release_rsv) |
4556 | - btrfs_orphan_release_metadata(inode); |
4557 | |
4558 | return ret; |
4559 | } |
4560 | @@ -5263,7 +5274,7 @@ void btrfs_evict_inode(struct inode *inode) |
4561 | trace_btrfs_inode_evict(inode); |
4562 | |
4563 | if (!root) { |
4564 | - kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); |
4565 | + clear_inode(inode); |
4566 | return; |
4567 | } |
4568 | |
4569 | diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c |
4570 | index 7bf9b31561db..b5e1afb30f36 100644 |
4571 | --- a/fs/btrfs/tree-log.c |
4572 | +++ b/fs/btrfs/tree-log.c |
4573 | @@ -28,6 +28,7 @@ |
4574 | #include "hash.h" |
4575 | #include "compression.h" |
4576 | #include "qgroup.h" |
4577 | +#include "inode-map.h" |
4578 | |
4579 | /* magic values for the inode_only field in btrfs_log_inode: |
4580 | * |
4581 | @@ -2494,6 +2495,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, |
4582 | clean_tree_block(fs_info, next); |
4583 | btrfs_wait_tree_block_writeback(next); |
4584 | btrfs_tree_unlock(next); |
4585 | + } else { |
4586 | + if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) |
4587 | + clear_extent_buffer_dirty(next); |
4588 | } |
4589 | |
4590 | WARN_ON(root_owner != |
4591 | @@ -2574,6 +2578,9 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, |
4592 | clean_tree_block(fs_info, next); |
4593 | btrfs_wait_tree_block_writeback(next); |
4594 | btrfs_tree_unlock(next); |
4595 | + } else { |
4596 | + if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) |
4597 | + clear_extent_buffer_dirty(next); |
4598 | } |
4599 | |
4600 | WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); |
4601 | @@ -2652,6 +2659,9 @@ static int walk_log_tree(struct btrfs_trans_handle *trans, |
4602 | clean_tree_block(fs_info, next); |
4603 | btrfs_wait_tree_block_writeback(next); |
4604 | btrfs_tree_unlock(next); |
4605 | + } else { |
4606 | + if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) |
4607 | + clear_extent_buffer_dirty(next); |
4608 | } |
4609 | |
4610 | WARN_ON(log->root_key.objectid != |
4611 | @@ -3040,13 +3050,14 @@ static void free_log_tree(struct btrfs_trans_handle *trans, |
4612 | |
4613 | while (1) { |
4614 | ret = find_first_extent_bit(&log->dirty_log_pages, |
4615 | - 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW, |
4616 | + 0, &start, &end, |
4617 | + EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT, |
4618 | NULL); |
4619 | if (ret) |
4620 | break; |
4621 | |
4622 | clear_extent_bits(&log->dirty_log_pages, start, end, |
4623 | - EXTENT_DIRTY | EXTENT_NEW); |
4624 | + EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT); |
4625 | } |
4626 | |
4627 | /* |
4628 | @@ -5705,6 +5716,23 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) |
4629 | path); |
4630 | } |
4631 | |
4632 | + if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { |
4633 | + struct btrfs_root *root = wc.replay_dest; |
4634 | + |
4635 | + btrfs_release_path(path); |
4636 | + |
4637 | + /* |
4638 | + * We have just replayed everything, and the highest |
4639 | + * objectid of fs roots probably has changed in case |
4640 | + * some inode_item's got replayed. |
4641 | + * |
4642 | + * root->objectid_mutex is not acquired as log replay |
4643 | + * could only happen during mount. |
4644 | + */ |
4645 | + ret = btrfs_find_highest_objectid(root, |
4646 | + &root->highest_objectid); |
4647 | + } |
4648 | + |
4649 | key.offset = found_key.offset - 1; |
4650 | wc.replay_dest->log_root = NULL; |
4651 | free_extent_buffer(log->node); |
4652 | diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c |
4653 | index 534a9130f625..4c2f8b57bdc7 100644 |
4654 | --- a/fs/ext4/inode.c |
4655 | +++ b/fs/ext4/inode.c |
4656 | @@ -3767,10 +3767,18 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter) |
4657 | /* Credits for sb + inode write */ |
4658 | handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); |
4659 | if (IS_ERR(handle)) { |
4660 | - /* This is really bad luck. We've written the data |
4661 | - * but cannot extend i_size. Bail out and pretend |
4662 | - * the write failed... */ |
4663 | - ret = PTR_ERR(handle); |
4664 | + /* |
4665 | + * We wrote the data but cannot extend |
4666 | + * i_size. Bail out. In async io case, we do |
4667 | + * not return error here because we have |
4668 | + * already submmitted the corresponding |
4669 | + * bio. Returning error here makes the caller |
4670 | + * think that this IO is done and failed |
4671 | + * resulting in race with bio's completion |
4672 | + * handler. |
4673 | + */ |
4674 | + if (!ret) |
4675 | + ret = PTR_ERR(handle); |
4676 | if (inode->i_nlink) |
4677 | ext4_orphan_del(NULL, inode); |
4678 | |
4679 | diff --git a/fs/ext4/super.c b/fs/ext4/super.c |
4680 | index 7c46693a14d7..71594382e195 100644 |
4681 | --- a/fs/ext4/super.c |
4682 | +++ b/fs/ext4/super.c |
4683 | @@ -742,6 +742,7 @@ __acquires(bitlock) |
4684 | } |
4685 | |
4686 | ext4_unlock_group(sb, grp); |
4687 | + ext4_commit_super(sb, 1); |
4688 | ext4_handle_error(sb); |
4689 | /* |
4690 | * We only get here in the ERRORS_RO case; relocking the group |
4691 | diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c |
4692 | index d5f0d96169c5..8c50d6878aa5 100644 |
4693 | --- a/fs/gfs2/bmap.c |
4694 | +++ b/fs/gfs2/bmap.c |
4695 | @@ -736,7 +736,7 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length, |
4696 | __be64 *ptr; |
4697 | sector_t lblock; |
4698 | sector_t lend; |
4699 | - int ret; |
4700 | + int ret = 0; |
4701 | int eob; |
4702 | unsigned int len; |
4703 | struct buffer_head *bh; |
4704 | @@ -748,12 +748,14 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length, |
4705 | goto out; |
4706 | } |
4707 | |
4708 | - if ((flags & IOMAP_REPORT) && gfs2_is_stuffed(ip)) { |
4709 | - gfs2_stuffed_iomap(inode, iomap); |
4710 | - if (pos >= iomap->length) |
4711 | - return -ENOENT; |
4712 | - ret = 0; |
4713 | - goto out; |
4714 | + if (gfs2_is_stuffed(ip)) { |
4715 | + if (flags & IOMAP_REPORT) { |
4716 | + gfs2_stuffed_iomap(inode, iomap); |
4717 | + if (pos >= iomap->length) |
4718 | + ret = -ENOENT; |
4719 | + goto out; |
4720 | + } |
4721 | + BUG_ON(!(flags & IOMAP_WRITE)); |
4722 | } |
4723 | |
4724 | lblock = pos >> inode->i_blkbits; |
4725 | @@ -764,7 +766,7 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length, |
4726 | iomap->type = IOMAP_HOLE; |
4727 | iomap->length = (u64)(lend - lblock) << inode->i_blkbits; |
4728 | iomap->flags = IOMAP_F_MERGED; |
4729 | - bmap_lock(ip, 0); |
4730 | + bmap_lock(ip, flags & IOMAP_WRITE); |
4731 | |
4732 | /* |
4733 | * Directory data blocks have a struct gfs2_meta_header header, so the |
4734 | @@ -807,27 +809,28 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length, |
4735 | iomap->flags |= IOMAP_F_BOUNDARY; |
4736 | iomap->length = (u64)len << inode->i_blkbits; |
4737 | |
4738 | - ret = 0; |
4739 | - |
4740 | out_release: |
4741 | release_metapath(&mp); |
4742 | - bmap_unlock(ip, 0); |
4743 | + bmap_unlock(ip, flags & IOMAP_WRITE); |
4744 | out: |
4745 | trace_gfs2_iomap_end(ip, iomap, ret); |
4746 | return ret; |
4747 | |
4748 | do_alloc: |
4749 | - if (!(flags & IOMAP_WRITE)) { |
4750 | - if (pos >= i_size_read(inode)) { |
4751 | + if (flags & IOMAP_WRITE) { |
4752 | + ret = gfs2_iomap_alloc(inode, iomap, flags, &mp); |
4753 | + } else if (flags & IOMAP_REPORT) { |
4754 | + loff_t size = i_size_read(inode); |
4755 | + if (pos >= size) |
4756 | ret = -ENOENT; |
4757 | - goto out_release; |
4758 | - } |
4759 | - ret = 0; |
4760 | - iomap->length = hole_size(inode, lblock, &mp); |
4761 | - goto out_release; |
4762 | + else if (height <= ip->i_height) |
4763 | + iomap->length = hole_size(inode, lblock, &mp); |
4764 | + else |
4765 | + iomap->length = size - pos; |
4766 | + } else { |
4767 | + if (height <= ip->i_height) |
4768 | + iomap->length = hole_size(inode, lblock, &mp); |
4769 | } |
4770 | - |
4771 | - ret = gfs2_iomap_alloc(inode, iomap, flags, &mp); |
4772 | goto out_release; |
4773 | } |
4774 | |
4775 | diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c |
4776 | index 8b08044b3120..c0681814c379 100644 |
4777 | --- a/fs/jbd2/transaction.c |
4778 | +++ b/fs/jbd2/transaction.c |
4779 | @@ -495,8 +495,10 @@ void jbd2_journal_free_reserved(handle_t *handle) |
4780 | EXPORT_SYMBOL(jbd2_journal_free_reserved); |
4781 | |
4782 | /** |
4783 | - * int jbd2_journal_start_reserved(handle_t *handle) - start reserved handle |
4784 | + * int jbd2_journal_start_reserved() - start reserved handle |
4785 | * @handle: handle to start |
4786 | + * @type: for handle statistics |
4787 | + * @line_no: for handle statistics |
4788 | * |
4789 | * Start handle that has been previously reserved with jbd2_journal_reserve(). |
4790 | * This attaches @handle to the running transaction (or creates one if there's |
4791 | @@ -626,6 +628,7 @@ int jbd2_journal_extend(handle_t *handle, int nblocks) |
4792 | * int jbd2_journal_restart() - restart a handle . |
4793 | * @handle: handle to restart |
4794 | * @nblocks: nr credits requested |
4795 | + * @gfp_mask: memory allocation flags (for start_this_handle) |
4796 | * |
4797 | * Restart a handle for a multi-transaction filesystem |
4798 | * operation. |
4799 | diff --git a/fs/mbcache.c b/fs/mbcache.c |
4800 | index b8b8b9ced9f8..46b23bb432fe 100644 |
4801 | --- a/fs/mbcache.c |
4802 | +++ b/fs/mbcache.c |
4803 | @@ -94,6 +94,7 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, |
4804 | entry->e_key = key; |
4805 | entry->e_value = value; |
4806 | entry->e_reusable = reusable; |
4807 | + entry->e_referenced = 0; |
4808 | head = mb_cache_entry_head(cache, key); |
4809 | hlist_bl_lock(head); |
4810 | hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) { |
4811 | diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c |
4812 | index 4689940a953c..5193218f5889 100644 |
4813 | --- a/fs/ocfs2/dlmglue.c |
4814 | +++ b/fs/ocfs2/dlmglue.c |
4815 | @@ -2486,6 +2486,15 @@ int ocfs2_inode_lock_with_page(struct inode *inode, |
4816 | ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK); |
4817 | if (ret == -EAGAIN) { |
4818 | unlock_page(page); |
4819 | + /* |
4820 | + * If we can't get inode lock immediately, we should not return |
4821 | + * directly here, since this will lead to a softlockup problem. |
4822 | + * The method is to get a blocking lock and immediately unlock |
4823 | + * before returning, this can avoid CPU resource waste due to |
4824 | + * lots of retries, and benefits fairness in getting lock. |
4825 | + */ |
4826 | + if (ocfs2_inode_lock(inode, ret_bh, ex) == 0) |
4827 | + ocfs2_inode_unlock(inode, ex); |
4828 | ret = AOP_TRUNCATED_PAGE; |
4829 | } |
4830 | |
4831 | diff --git a/fs/seq_file.c b/fs/seq_file.c |
4832 | index 4be761c1a03d..eea09f6d8830 100644 |
4833 | --- a/fs/seq_file.c |
4834 | +++ b/fs/seq_file.c |
4835 | @@ -181,8 +181,11 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) |
4836 | * if request is to read from zero offset, reset iterator to first |
4837 | * record as it might have been already advanced by previous requests |
4838 | */ |
4839 | - if (*ppos == 0) |
4840 | + if (*ppos == 0) { |
4841 | m->index = 0; |
4842 | + m->version = 0; |
4843 | + m->count = 0; |
4844 | + } |
4845 | |
4846 | /* Don't assume *ppos is where we left it */ |
4847 | if (unlikely(*ppos != m->read_pos)) { |
4848 | diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h |
4849 | index 972a25633525..c65e4489006d 100644 |
4850 | --- a/include/drm/i915_pciids.h |
4851 | +++ b/include/drm/i915_pciids.h |
4852 | @@ -392,6 +392,12 @@ |
4853 | INTEL_VGA_DEVICE(0x3EA8, info), /* ULT GT3 */ \ |
4854 | INTEL_VGA_DEVICE(0x3EA5, info) /* ULT GT3 */ |
4855 | |
4856 | +#define INTEL_CFL_IDS(info) \ |
4857 | + INTEL_CFL_S_GT1_IDS(info), \ |
4858 | + INTEL_CFL_S_GT2_IDS(info), \ |
4859 | + INTEL_CFL_H_GT2_IDS(info), \ |
4860 | + INTEL_CFL_U_GT3_IDS(info) |
4861 | + |
4862 | /* CNL U 2+2 */ |
4863 | #define INTEL_CNL_U_GT2_IDS(info) \ |
4864 | INTEL_VGA_DEVICE(0x5A52, info), \ |
4865 | diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h |
4866 | index 631354acfa72..73bc63e0a1c4 100644 |
4867 | --- a/include/linux/compiler-gcc.h |
4868 | +++ b/include/linux/compiler-gcc.h |
4869 | @@ -167,8 +167,6 @@ |
4870 | |
4871 | #if GCC_VERSION >= 40100 |
4872 | # define __compiletime_object_size(obj) __builtin_object_size(obj, 0) |
4873 | - |
4874 | -#define __nostackprotector __attribute__((__optimize__("no-stack-protector"))) |
4875 | #endif |
4876 | |
4877 | #if GCC_VERSION >= 40300 |
4878 | @@ -196,6 +194,11 @@ |
4879 | #endif /* __CHECKER__ */ |
4880 | #endif /* GCC_VERSION >= 40300 */ |
4881 | |
4882 | +#if GCC_VERSION >= 40400 |
4883 | +#define __optimize(level) __attribute__((__optimize__(level))) |
4884 | +#define __nostackprotector __optimize("no-stack-protector") |
4885 | +#endif /* GCC_VERSION >= 40400 */ |
4886 | + |
4887 | #if GCC_VERSION >= 40500 |
4888 | |
4889 | #ifndef __CHECKER__ |
4890 | diff --git a/include/linux/compiler.h b/include/linux/compiler.h |
4891 | index 52e611ab9a6c..5ff818e9a836 100644 |
4892 | --- a/include/linux/compiler.h |
4893 | +++ b/include/linux/compiler.h |
4894 | @@ -271,6 +271,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s |
4895 | |
4896 | #endif /* __ASSEMBLY__ */ |
4897 | |
4898 | +#ifndef __optimize |
4899 | +# define __optimize(level) |
4900 | +#endif |
4901 | + |
4902 | /* Compile time object size, -1 for unknown */ |
4903 | #ifndef __compiletime_object_size |
4904 | # define __compiletime_object_size(obj) -1 |
4905 | diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h |
4906 | index 8f7788d23b57..a6989e02d0a0 100644 |
4907 | --- a/include/linux/cpuidle.h |
4908 | +++ b/include/linux/cpuidle.h |
4909 | @@ -225,7 +225,7 @@ static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, |
4910 | } |
4911 | #endif |
4912 | |
4913 | -#ifdef CONFIG_ARCH_HAS_CPU_RELAX |
4914 | +#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_ARCH_HAS_CPU_RELAX) |
4915 | void cpuidle_poll_state_init(struct cpuidle_driver *drv); |
4916 | #else |
4917 | static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {} |
4918 | diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h |
4919 | index 296d1e0ea87b..b708e5169d1d 100644 |
4920 | --- a/include/linux/jbd2.h |
4921 | +++ b/include/linux/jbd2.h |
4922 | @@ -418,26 +418,41 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) |
4923 | #define JI_WAIT_DATA (1 << __JI_WAIT_DATA) |
4924 | |
4925 | /** |
4926 | - * struct jbd_inode is the structure linking inodes in ordered mode |
4927 | - * present in a transaction so that we can sync them during commit. |
4928 | + * struct jbd_inode - The jbd_inode type is the structure linking inodes in |
4929 | + * ordered mode present in a transaction so that we can sync them during commit. |
4930 | */ |
4931 | struct jbd2_inode { |
4932 | - /* Which transaction does this inode belong to? Either the running |
4933 | - * transaction or the committing one. [j_list_lock] */ |
4934 | + /** |
4935 | + * @i_transaction: |
4936 | + * |
4937 | + * Which transaction does this inode belong to? Either the running |
4938 | + * transaction or the committing one. [j_list_lock] |
4939 | + */ |
4940 | transaction_t *i_transaction; |
4941 | |
4942 | - /* Pointer to the running transaction modifying inode's data in case |
4943 | - * there is already a committing transaction touching it. [j_list_lock] */ |
4944 | + /** |
4945 | + * @i_next_transaction: |
4946 | + * |
4947 | + * Pointer to the running transaction modifying inode's data in case |
4948 | + * there is already a committing transaction touching it. [j_list_lock] |
4949 | + */ |
4950 | transaction_t *i_next_transaction; |
4951 | |
4952 | - /* List of inodes in the i_transaction [j_list_lock] */ |
4953 | + /** |
4954 | + * @i_list: List of inodes in the i_transaction [j_list_lock] |
4955 | + */ |
4956 | struct list_head i_list; |
4957 | |
4958 | - /* VFS inode this inode belongs to [constant during the lifetime |
4959 | - * of the structure] */ |
4960 | + /** |
4961 | + * @i_vfs_inode: |
4962 | + * |
4963 | + * VFS inode this inode belongs to [constant for lifetime of structure] |
4964 | + */ |
4965 | struct inode *i_vfs_inode; |
4966 | |
4967 | - /* Flags of inode [j_list_lock] */ |
4968 | + /** |
4969 | + * @i_flags: Flags of inode [j_list_lock] |
4970 | + */ |
4971 | unsigned long i_flags; |
4972 | }; |
4973 | |
4974 | @@ -447,12 +462,20 @@ struct jbd2_revoke_table_s; |
4975 | * struct handle_s - The handle_s type is the concrete type associated with |
4976 | * handle_t. |
4977 | * @h_transaction: Which compound transaction is this update a part of? |
4978 | + * @h_journal: Which journal handle belongs to - used iff h_reserved set. |
4979 | + * @h_rsv_handle: Handle reserved for finishing the logical operation. |
4980 | * @h_buffer_credits: Number of remaining buffers we are allowed to dirty. |
4981 | - * @h_ref: Reference count on this handle |
4982 | - * @h_err: Field for caller's use to track errors through large fs operations |
4983 | - * @h_sync: flag for sync-on-close |
4984 | - * @h_jdata: flag to force data journaling |
4985 | - * @h_aborted: flag indicating fatal error on handle |
4986 | + * @h_ref: Reference count on this handle. |
4987 | + * @h_err: Field for caller's use to track errors through large fs operations. |
4988 | + * @h_sync: Flag for sync-on-close. |
4989 | + * @h_jdata: Flag to force data journaling. |
4990 | + * @h_reserved: Flag for handle for reserved credits. |
4991 | + * @h_aborted: Flag indicating fatal error on handle. |
4992 | + * @h_type: For handle statistics. |
4993 | + * @h_line_no: For handle statistics. |
4994 | + * @h_start_jiffies: Handle Start time. |
4995 | + * @h_requested_credits: Holds @h_buffer_credits after handle is started. |
4996 | + * @saved_alloc_context: Saved context while transaction is open. |
4997 | **/ |
4998 | |
4999 | /* Docbook can't yet cope with the bit fields, but will leave the documentation |
5000 | @@ -462,32 +485,23 @@ struct jbd2_revoke_table_s; |
5001 | struct jbd2_journal_handle |
5002 | { |
5003 | union { |
5004 | - /* Which compound transaction is this update a part of? */ |
5005 | transaction_t *h_transaction; |
5006 | /* Which journal handle belongs to - used iff h_reserved set */ |
5007 | journal_t *h_journal; |
5008 | }; |
5009 | |
5010 | - /* Handle reserved for finishing the logical operation */ |
5011 | handle_t *h_rsv_handle; |
5012 | - |
5013 | - /* Number of remaining buffers we are allowed to dirty: */ |
5014 | int h_buffer_credits; |
5015 | - |
5016 | - /* Reference count on this handle */ |
5017 | int h_ref; |
5018 | - |
5019 | - /* Field for caller's use to track errors through large fs */ |
5020 | - /* operations */ |
5021 | int h_err; |
5022 | |
5023 | /* Flags [no locking] */ |
5024 | - unsigned int h_sync: 1; /* sync-on-close */ |
5025 | - unsigned int h_jdata: 1; /* force data journaling */ |
5026 | - unsigned int h_reserved: 1; /* handle with reserved credits */ |
5027 | - unsigned int h_aborted: 1; /* fatal error on handle */ |
5028 | - unsigned int h_type: 8; /* for handle statistics */ |
5029 | - unsigned int h_line_no: 16; /* for handle statistics */ |
5030 | + unsigned int h_sync: 1; |
5031 | + unsigned int h_jdata: 1; |
5032 | + unsigned int h_reserved: 1; |
5033 | + unsigned int h_aborted: 1; |
5034 | + unsigned int h_type: 8; |
5035 | + unsigned int h_line_no: 16; |
5036 | |
5037 | unsigned long h_start_jiffies; |
5038 | unsigned int h_requested_credits; |
5039 | @@ -729,228 +743,253 @@ jbd2_time_diff(unsigned long start, unsigned long end) |
5040 | /** |
5041 | * struct journal_s - The journal_s type is the concrete type associated with |
5042 | * journal_t. |
5043 | - * @j_flags: General journaling state flags |
5044 | - * @j_errno: Is there an outstanding uncleared error on the journal (from a |
5045 | - * prior abort)? |
5046 | - * @j_sb_buffer: First part of superblock buffer |
5047 | - * @j_superblock: Second part of superblock buffer |
5048 | - * @j_format_version: Version of the superblock format |
5049 | - * @j_state_lock: Protect the various scalars in the journal |
5050 | - * @j_barrier_count: Number of processes waiting to create a barrier lock |
5051 | - * @j_barrier: The barrier lock itself |
5052 | - * @j_running_transaction: The current running transaction.. |
5053 | - * @j_committing_transaction: the transaction we are pushing to disk |
5054 | - * @j_checkpoint_transactions: a linked circular list of all transactions |
5055 | - * waiting for checkpointing |
5056 | - * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction |
5057 | - * to start committing, or for a barrier lock to be released |
5058 | - * @j_wait_done_commit: Wait queue for waiting for commit to complete |
5059 | - * @j_wait_commit: Wait queue to trigger commit |
5060 | - * @j_wait_updates: Wait queue to wait for updates to complete |
5061 | - * @j_wait_reserved: Wait queue to wait for reserved buffer credits to drop |
5062 | - * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints |
5063 | - * @j_head: Journal head - identifies the first unused block in the journal |
5064 | - * @j_tail: Journal tail - identifies the oldest still-used block in the |
5065 | - * journal. |
5066 | - * @j_free: Journal free - how many free blocks are there in the journal? |
5067 | - * @j_first: The block number of the first usable block |
5068 | - * @j_last: The block number one beyond the last usable block |
5069 | - * @j_dev: Device where we store the journal |
5070 | - * @j_blocksize: blocksize for the location where we store the journal. |
5071 | - * @j_blk_offset: starting block offset for into the device where we store the |
5072 | - * journal |
5073 | - * @j_fs_dev: Device which holds the client fs. For internal journal this will |
5074 | - * be equal to j_dev |
5075 | - * @j_reserved_credits: Number of buffers reserved from the running transaction |
5076 | - * @j_maxlen: Total maximum capacity of the journal region on disk. |
5077 | - * @j_list_lock: Protects the buffer lists and internal buffer state. |
5078 | - * @j_inode: Optional inode where we store the journal. If present, all journal |
5079 | - * block numbers are mapped into this inode via bmap(). |
5080 | - * @j_tail_sequence: Sequence number of the oldest transaction in the log |
5081 | - * @j_transaction_sequence: Sequence number of the next transaction to grant |
5082 | - * @j_commit_sequence: Sequence number of the most recently committed |
5083 | - * transaction |
5084 | - * @j_commit_request: Sequence number of the most recent transaction wanting |
5085 | - * commit |
5086 | - * @j_uuid: Uuid of client object. |
5087 | - * @j_task: Pointer to the current commit thread for this journal |
5088 | - * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a |
5089 | - * single compound commit transaction |
5090 | - * @j_commit_interval: What is the maximum transaction lifetime before we begin |
5091 | - * a commit? |
5092 | - * @j_commit_timer: The timer used to wakeup the commit thread |
5093 | - * @j_revoke_lock: Protect the revoke table |
5094 | - * @j_revoke: The revoke table - maintains the list of revoked blocks in the |
5095 | - * current transaction. |
5096 | - * @j_revoke_table: alternate revoke tables for j_revoke |
5097 | - * @j_wbuf: array of buffer_heads for jbd2_journal_commit_transaction |
5098 | - * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the |
5099 | - * number that will fit in j_blocksize |
5100 | - * @j_last_sync_writer: most recent pid which did a synchronous write |
5101 | - * @j_history_lock: Protect the transactions statistics history |
5102 | - * @j_proc_entry: procfs entry for the jbd statistics directory |
5103 | - * @j_stats: Overall statistics |
5104 | - * @j_private: An opaque pointer to fs-private information. |
5105 | - * @j_trans_commit_map: Lockdep entity to track transaction commit dependencies |
5106 | */ |
5107 | - |
5108 | struct journal_s |
5109 | { |
5110 | - /* General journaling state flags [j_state_lock] */ |
5111 | + /** |
5112 | + * @j_flags: General journaling state flags [j_state_lock] |
5113 | + */ |
5114 | unsigned long j_flags; |
5115 | |
5116 | - /* |
5117 | + /** |
5118 | + * @j_errno: |
5119 | + * |
5120 | * Is there an outstanding uncleared error on the journal (from a prior |
5121 | * abort)? [j_state_lock] |
5122 | */ |
5123 | int j_errno; |
5124 | |
5125 | - /* The superblock buffer */ |
5126 | + /** |
5127 | + * @j_sb_buffer: The first part of the superblock buffer. |
5128 | + */ |
5129 | struct buffer_head *j_sb_buffer; |
5130 | + |
5131 | + /** |
5132 | + * @j_superblock: The second part of the superblock buffer. |
5133 | + */ |
5134 | journal_superblock_t *j_superblock; |
5135 | |
5136 | - /* Version of the superblock format */ |
5137 | + /** |
5138 | + * @j_format_version: Version of the superblock format. |
5139 | + */ |
5140 | int j_format_version; |
5141 | |
5142 | - /* |
5143 | - * Protect the various scalars in the journal |
5144 | + /** |
5145 | + * @j_state_lock: Protect the various scalars in the journal. |
5146 | */ |
5147 | rwlock_t j_state_lock; |
5148 | |
5149 | - /* |
5150 | + /** |
5151 | + * @j_barrier_count: |
5152 | + * |
5153 | * Number of processes waiting to create a barrier lock [j_state_lock] |
5154 | */ |
5155 | int j_barrier_count; |
5156 | |
5157 | - /* The barrier lock itself */ |
5158 | + /** |
5159 | + * @j_barrier: The barrier lock itself. |
5160 | + */ |
5161 | struct mutex j_barrier; |
5162 | |
5163 | - /* |
5164 | + /** |
5165 | + * @j_running_transaction: |
5166 | + * |
5167 | * Transactions: The current running transaction... |
5168 | * [j_state_lock] [caller holding open handle] |
5169 | */ |
5170 | transaction_t *j_running_transaction; |
5171 | |
5172 | - /* |
5173 | + /** |
5174 | + * @j_committing_transaction: |
5175 | + * |
5176 | * the transaction we are pushing to disk |
5177 | * [j_state_lock] [caller holding open handle] |
5178 | */ |
5179 | transaction_t *j_committing_transaction; |
5180 | |
5181 | - /* |
5182 | + /** |
5183 | + * @j_checkpoint_transactions: |
5184 | + * |
5185 | * ... and a linked circular list of all transactions waiting for |
5186 | * checkpointing. [j_list_lock] |
5187 | */ |
5188 | transaction_t *j_checkpoint_transactions; |
5189 | |
5190 | - /* |
5191 | + /** |
5192 | + * @j_wait_transaction_locked: |
5193 | + * |
5194 | * Wait queue for waiting for a locked transaction to start committing, |
5195 | - * or for a barrier lock to be released |
5196 | + * or for a barrier lock to be released. |
5197 | */ |
5198 | wait_queue_head_t j_wait_transaction_locked; |
5199 | |
5200 | - /* Wait queue for waiting for commit to complete */ |
5201 | + /** |
5202 | + * @j_wait_done_commit: Wait queue for waiting for commit to complete. |
5203 | + */ |
5204 | wait_queue_head_t j_wait_done_commit; |
5205 | |
5206 | - /* Wait queue to trigger commit */ |
5207 | + /** |
5208 | + * @j_wait_commit: Wait queue to trigger commit. |
5209 | + */ |
5210 | wait_queue_head_t j_wait_commit; |
5211 | |
5212 | - /* Wait queue to wait for updates to complete */ |
5213 | + /** |
5214 | + * @j_wait_updates: Wait queue to wait for updates to complete. |
5215 | + */ |
5216 | wait_queue_head_t j_wait_updates; |
5217 | |
5218 | - /* Wait queue to wait for reserved buffer credits to drop */ |
5219 | + /** |
5220 | + * @j_wait_reserved: |
5221 | + * |
5222 | + * Wait queue to wait for reserved buffer credits to drop. |
5223 | + */ |
5224 | wait_queue_head_t j_wait_reserved; |
5225 | |
5226 | - /* Semaphore for locking against concurrent checkpoints */ |
5227 | + /** |
5228 | + * @j_checkpoint_mutex: |
5229 | + * |
5230 | + * Semaphore for locking against concurrent checkpoints. |
5231 | + */ |
5232 | struct mutex j_checkpoint_mutex; |
5233 | |
5234 | - /* |
5235 | + /** |
5236 | + * @j_chkpt_bhs: |
5237 | + * |
5238 | * List of buffer heads used by the checkpoint routine. This |
5239 | * was moved from jbd2_log_do_checkpoint() to reduce stack |
5240 | * usage. Access to this array is controlled by the |
5241 | - * j_checkpoint_mutex. [j_checkpoint_mutex] |
5242 | + * @j_checkpoint_mutex. [j_checkpoint_mutex] |
5243 | */ |
5244 | struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH]; |
5245 | - |
5246 | - /* |
5247 | + |
5248 | + /** |
5249 | + * @j_head: |
5250 | + * |
5251 | * Journal head: identifies the first unused block in the journal. |
5252 | * [j_state_lock] |
5253 | */ |
5254 | unsigned long j_head; |
5255 | |
5256 | - /* |
5257 | + /** |
5258 | + * @j_tail: |
5259 | + * |
5260 | * Journal tail: identifies the oldest still-used block in the journal. |
5261 | * [j_state_lock] |
5262 | */ |
5263 | unsigned long j_tail; |
5264 | |
5265 | - /* |
5266 | + /** |
5267 | + * @j_free: |
5268 | + * |
5269 | * Journal free: how many free blocks are there in the journal? |
5270 | * [j_state_lock] |
5271 | */ |
5272 | unsigned long j_free; |
5273 | |
5274 | - /* |
5275 | - * Journal start and end: the block numbers of the first usable block |
5276 | - * and one beyond the last usable block in the journal. [j_state_lock] |
5277 | + /** |
5278 | + * @j_first: |
5279 | + * |
5280 | + * The block number of the first usable block in the journal |
5281 | + * [j_state_lock]. |
5282 | */ |
5283 | unsigned long j_first; |
5284 | + |
5285 | + /** |
5286 | + * @j_last: |
5287 | + * |
5288 | + * The block number one beyond the last usable block in the journal |
5289 | + * [j_state_lock]. |
5290 | + */ |
5291 | unsigned long j_last; |
5292 | |
5293 | - /* |
5294 | - * Device, blocksize and starting block offset for the location where we |
5295 | - * store the journal. |
5296 | + /** |
5297 | + * @j_dev: Device where we store the journal. |
5298 | */ |
5299 | struct block_device *j_dev; |
5300 | + |
5301 | + /** |
5302 | + * @j_blocksize: Block size for the location where we store the journal. |
5303 | + */ |
5304 | int j_blocksize; |
5305 | + |
5306 | + /** |
5307 | + * @j_blk_offset: |
5308 | + * |
5309 | + * Starting block offset into the device where we store the journal. |
5310 | + */ |
5311 | unsigned long long j_blk_offset; |
5312 | + |
5313 | + /** |
5314 | + * @j_devname: Journal device name. |
5315 | + */ |
5316 | char j_devname[BDEVNAME_SIZE+24]; |
5317 | |
5318 | - /* |
5319 | + /** |
5320 | + * @j_fs_dev: |
5321 | + * |
5322 | * Device which holds the client fs. For internal journal this will be |
5323 | * equal to j_dev. |
5324 | */ |
5325 | struct block_device *j_fs_dev; |
5326 | |
5327 | - /* Total maximum capacity of the journal region on disk. */ |
5328 | + /** |
5329 | + * @j_maxlen: Total maximum capacity of the journal region on disk. |
5330 | + */ |
5331 | unsigned int j_maxlen; |
5332 | |
5333 | - /* Number of buffers reserved from the running transaction */ |
5334 | + /** |
5335 | + * @j_reserved_credits: |
5336 | + * |
5337 | + * Number of buffers reserved from the running transaction. |
5338 | + */ |
5339 | atomic_t j_reserved_credits; |
5340 | |
5341 | - /* |
5342 | - * Protects the buffer lists and internal buffer state. |
5343 | + /** |
5344 | + * @j_list_lock: Protects the buffer lists and internal buffer state. |
5345 | */ |
5346 | spinlock_t j_list_lock; |
5347 | |
5348 | - /* Optional inode where we store the journal. If present, all */ |
5349 | - /* journal block numbers are mapped into this inode via */ |
5350 | - /* bmap(). */ |
5351 | + /** |
5352 | + * @j_inode: |
5353 | + * |
5354 | + * Optional inode where we store the journal. If present, all |
5355 | + * journal block numbers are mapped into this inode via bmap(). |
5356 | + */ |
5357 | struct inode *j_inode; |
5358 | |
5359 | - /* |
5360 | + /** |
5361 | + * @j_tail_sequence: |
5362 | + * |
5363 | * Sequence number of the oldest transaction in the log [j_state_lock] |
5364 | */ |
5365 | tid_t j_tail_sequence; |
5366 | |
5367 | - /* |
5368 | + /** |
5369 | + * @j_transaction_sequence: |
5370 | + * |
5371 | * Sequence number of the next transaction to grant [j_state_lock] |
5372 | */ |
5373 | tid_t j_transaction_sequence; |
5374 | |
5375 | - /* |
5376 | + /** |
5377 | + * @j_commit_sequence: |
5378 | + * |
5379 | * Sequence number of the most recently committed transaction |
5380 | * [j_state_lock]. |
5381 | */ |
5382 | tid_t j_commit_sequence; |
5383 | |
5384 | - /* |
5385 | + /** |
5386 | + * @j_commit_request: |
5387 | + * |
5388 | * Sequence number of the most recent transaction wanting commit |
5389 | * [j_state_lock] |
5390 | */ |
5391 | tid_t j_commit_request; |
5392 | |
5393 | - /* |
5394 | + /** |
5395 | + * @j_uuid: |
5396 | + * |
5397 | * Journal uuid: identifies the object (filesystem, LVM volume etc) |
5398 | * backed by this journal. This will eventually be replaced by an array |
5399 | * of uuids, allowing us to index multiple devices within a single |
5400 | @@ -958,85 +997,151 @@ struct journal_s |
5401 | */ |
5402 | __u8 j_uuid[16]; |
5403 | |
5404 | - /* Pointer to the current commit thread for this journal */ |
5405 | + /** |
5406 | + * @j_task: Pointer to the current commit thread for this journal. |
5407 | + */ |
5408 | struct task_struct *j_task; |
5409 | |
5410 | - /* |
5411 | + /** |
5412 | + * @j_max_transaction_buffers: |
5413 | + * |
5414 | * Maximum number of metadata buffers to allow in a single compound |
5415 | - * commit transaction |
5416 | + * commit transaction. |
5417 | */ |
5418 | int j_max_transaction_buffers; |
5419 | |
5420 | - /* |
5421 | + /** |
5422 | + * @j_commit_interval: |
5423 | + * |
5424 | * What is the maximum transaction lifetime before we begin a commit? |
5425 | */ |
5426 | unsigned long j_commit_interval; |
5427 | |
5428 | - /* The timer used to wakeup the commit thread: */ |
5429 | + /** |
5430 | + * @j_commit_timer: The timer used to wakeup the commit thread. |
5431 | + */ |
5432 | struct timer_list j_commit_timer; |
5433 | |
5434 | - /* |
5435 | - * The revoke table: maintains the list of revoked blocks in the |
5436 | - * current transaction. [j_revoke_lock] |
5437 | + /** |
5438 | + * @j_revoke_lock: Protect the revoke table. |
5439 | */ |
5440 | spinlock_t j_revoke_lock; |
5441 | + |
5442 | + /** |
5443 | + * @j_revoke: |
5444 | + * |
5445 | + * The revoke table - maintains the list of revoked blocks in the |
5446 | + * current transaction. |
5447 | + */ |
5448 | struct jbd2_revoke_table_s *j_revoke; |
5449 | + |
5450 | + /** |
5451 | + * @j_revoke_table: Alternate revoke tables for j_revoke. |
5452 | + */ |
5453 | struct jbd2_revoke_table_s *j_revoke_table[2]; |
5454 | |
5455 | - /* |
5456 | - * array of bhs for jbd2_journal_commit_transaction |
5457 | + /** |
5458 | + * @j_wbuf: Array of bhs for jbd2_journal_commit_transaction. |
5459 | */ |
5460 | struct buffer_head **j_wbuf; |
5461 | + |
5462 | + /** |
5463 | + * @j_wbufsize: |
5464 | + * |
5465 | + * Size of @j_wbuf array. |
5466 | + */ |
5467 | int j_wbufsize; |
5468 | |
5469 | - /* |
5470 | - * this is the pid of hte last person to run a synchronous operation |
5471 | - * through the journal |
5472 | + /** |
5473 | + * @j_last_sync_writer: |
5474 | + * |
5475 | + * The pid of the last person to run a synchronous operation |
5476 | + * through the journal. |
5477 | */ |
5478 | pid_t j_last_sync_writer; |
5479 | |
5480 | - /* |
5481 | - * the average amount of time in nanoseconds it takes to commit a |
5482 | + /** |
5483 | + * @j_average_commit_time: |
5484 | + * |
5485 | + * The average amount of time in nanoseconds it takes to commit a |
5486 | * transaction to disk. [j_state_lock] |
5487 | */ |
5488 | u64 j_average_commit_time; |
5489 | |
5490 | - /* |
5491 | - * minimum and maximum times that we should wait for |
5492 | - * additional filesystem operations to get batched into a |
5493 | - * synchronous handle in microseconds |
5494 | + /** |
5495 | + * @j_min_batch_time: |
5496 | + * |
5497 | + * Minimum time that we should wait for additional filesystem operations |
5498 | + * to get batched into a synchronous handle in microseconds. |
5499 | */ |
5500 | u32 j_min_batch_time; |
5501 | + |
5502 | + /** |
5503 | + * @j_max_batch_time: |
5504 | + * |
5505 | + * Maximum time that we should wait for additional filesystem operations |
5506 | + * to get batched into a synchronous handle in microseconds. |
5507 | + */ |
5508 | u32 j_max_batch_time; |
5509 | |
5510 | - /* This function is called when a transaction is closed */ |
5511 | + /** |
5512 | + * @j_commit_callback: |
5513 | + * |
5514 | + * This function is called when a transaction is closed. |
5515 | + */ |
5516 | void (*j_commit_callback)(journal_t *, |
5517 | transaction_t *); |
5518 | |
5519 | /* |
5520 | * Journal statistics |
5521 | */ |
5522 | + |
5523 | + /** |
5524 | + * @j_history_lock: Protect the transactions statistics history. |
5525 | + */ |
5526 | spinlock_t j_history_lock; |
5527 | + |
5528 | + /** |
5529 | + * @j_proc_entry: procfs entry for the jbd statistics directory. |
5530 | + */ |
5531 | struct proc_dir_entry *j_proc_entry; |
5532 | + |
5533 | + /** |
5534 | + * @j_stats: Overall statistics. |
5535 | + */ |
5536 | struct transaction_stats_s j_stats; |
5537 | |
5538 | - /* Failed journal commit ID */ |
5539 | + /** |
5540 | + * @j_failed_commit: Failed journal commit ID. |
5541 | + */ |
5542 | unsigned int j_failed_commit; |
5543 | |
5544 | - /* |
5545 | + /** |
5546 | + * @j_private: |
5547 | + * |
5548 | * An opaque pointer to fs-private information. ext3 puts its |
5549 | - * superblock pointer here |
5550 | + * superblock pointer here. |
5551 | */ |
5552 | void *j_private; |
5553 | |
5554 | - /* Reference to checksum algorithm driver via cryptoapi */ |
5555 | + /** |
5556 | + * @j_chksum_driver: |
5557 | + * |
5558 | + * Reference to checksum algorithm driver via cryptoapi. |
5559 | + */ |
5560 | struct crypto_shash *j_chksum_driver; |
5561 | |
5562 | - /* Precomputed journal UUID checksum for seeding other checksums */ |
5563 | + /** |
5564 | + * @j_csum_seed: |
5565 | + * |
5566 | + * Precomputed journal UUID checksum for seeding other checksums. |
5567 | + */ |
5568 | __u32 j_csum_seed; |
5569 | |
5570 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
5571 | - /* |
5572 | + /** |
5573 | + * @j_trans_commit_map: |
5574 | + * |
5575 | * Lockdep entity to track transaction commit dependencies. Handles |
5576 | * hold this "lock" for read, when we wait for commit, we acquire the |
5577 | * "lock" for writing. This matches the properties of jbd2 journalling |
5578 | diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h |
5579 | index a0610427e168..b82c4ae92411 100644 |
5580 | --- a/include/linux/mlx5/driver.h |
5581 | +++ b/include/linux/mlx5/driver.h |
5582 | @@ -1238,7 +1238,7 @@ mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector) |
5583 | int eqn; |
5584 | int err; |
5585 | |
5586 | - err = mlx5_vector2eqn(dev, vector, &eqn, &irq); |
5587 | + err = mlx5_vector2eqn(dev, MLX5_EQ_VEC_COMP_BASE + vector, &eqn, &irq); |
5588 | if (err) |
5589 | return NULL; |
5590 | |
5591 | diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h |
5592 | index c30b32e3c862..10191c28fc04 100644 |
5593 | --- a/include/linux/mm_inline.h |
5594 | +++ b/include/linux/mm_inline.h |
5595 | @@ -127,10 +127,4 @@ static __always_inline enum lru_list page_lru(struct page *page) |
5596 | |
5597 | #define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) |
5598 | |
5599 | -#ifdef arch_unmap_kpfn |
5600 | -extern void arch_unmap_kpfn(unsigned long pfn); |
5601 | -#else |
5602 | -static __always_inline void arch_unmap_kpfn(unsigned long pfn) { } |
5603 | -#endif |
5604 | - |
5605 | #endif |
5606 | diff --git a/include/linux/nospec.h b/include/linux/nospec.h |
5607 | index b99bced39ac2..fbc98e2c8228 100644 |
5608 | --- a/include/linux/nospec.h |
5609 | +++ b/include/linux/nospec.h |
5610 | @@ -19,20 +19,6 @@ |
5611 | static inline unsigned long array_index_mask_nospec(unsigned long index, |
5612 | unsigned long size) |
5613 | { |
5614 | - /* |
5615 | - * Warn developers about inappropriate array_index_nospec() usage. |
5616 | - * |
5617 | - * Even if the CPU speculates past the WARN_ONCE branch, the |
5618 | - * sign bit of @index is taken into account when generating the |
5619 | - * mask. |
5620 | - * |
5621 | - * This warning is compiled out when the compiler can infer that |
5622 | - * @index and @size are less than LONG_MAX. |
5623 | - */ |
5624 | - if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, |
5625 | - "array_index_nospec() limited to range of [0, LONG_MAX]\n")) |
5626 | - return 0; |
5627 | - |
5628 | /* |
5629 | * Always calculate and emit the mask even if the compiler |
5630 | * thinks the mask is not needed. The compiler does not take |
5631 | @@ -43,6 +29,26 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, |
5632 | } |
5633 | #endif |
5634 | |
5635 | +/* |
5636 | + * Warn developers about inappropriate array_index_nospec() usage. |
5637 | + * |
5638 | + * Even if the CPU speculates past the WARN_ONCE branch, the |
5639 | + * sign bit of @index is taken into account when generating the |
5640 | + * mask. |
5641 | + * |
5642 | + * This warning is compiled out when the compiler can infer that |
5643 | + * @index and @size are less than LONG_MAX. |
5644 | + */ |
5645 | +#define array_index_mask_nospec_check(index, size) \ |
5646 | +({ \ |
5647 | + if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, \ |
5648 | + "array_index_nospec() limited to range of [0, LONG_MAX]\n")) \ |
5649 | + _mask = 0; \ |
5650 | + else \ |
5651 | + _mask = array_index_mask_nospec(index, size); \ |
5652 | + _mask; \ |
5653 | +}) |
5654 | + |
5655 | /* |
5656 | * array_index_nospec - sanitize an array index after a bounds check |
5657 | * |
5658 | @@ -61,7 +67,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, |
5659 | ({ \ |
5660 | typeof(index) _i = (index); \ |
5661 | typeof(size) _s = (size); \ |
5662 | - unsigned long _mask = array_index_mask_nospec(_i, _s); \ |
5663 | + unsigned long _mask = array_index_mask_nospec_check(_i, _s); \ |
5664 | \ |
5665 | BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ |
5666 | BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ |
5667 | diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h |
5668 | index fd84cda5ed7c..0d6a110dae7c 100644 |
5669 | --- a/include/rdma/ib_verbs.h |
5670 | +++ b/include/rdma/ib_verbs.h |
5671 | @@ -983,9 +983,9 @@ struct ib_wc { |
5672 | u32 invalidate_rkey; |
5673 | } ex; |
5674 | u32 src_qp; |
5675 | + u32 slid; |
5676 | int wc_flags; |
5677 | u16 pkey_index; |
5678 | - u32 slid; |
5679 | u8 sl; |
5680 | u8 dlid_path_bits; |
5681 | u8 port_num; /* valid only for DR SMPs on switches */ |
5682 | diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h |
5683 | index b8adf05c534e..7dd8f34c37df 100644 |
5684 | --- a/include/trace/events/xen.h |
5685 | +++ b/include/trace/events/xen.h |
5686 | @@ -368,7 +368,7 @@ TRACE_EVENT(xen_mmu_flush_tlb, |
5687 | TP_printk("%s", "") |
5688 | ); |
5689 | |
5690 | -TRACE_EVENT(xen_mmu_flush_tlb_single, |
5691 | +TRACE_EVENT(xen_mmu_flush_tlb_one_user, |
5692 | TP_PROTO(unsigned long addr), |
5693 | TP_ARGS(addr), |
5694 | TP_STRUCT__entry( |
5695 | diff --git a/kernel/memremap.c b/kernel/memremap.c |
5696 | index 403ab9cdb949..4712ce646e04 100644 |
5697 | --- a/kernel/memremap.c |
5698 | +++ b/kernel/memremap.c |
5699 | @@ -301,7 +301,8 @@ static void devm_memremap_pages_release(struct device *dev, void *data) |
5700 | |
5701 | /* pages are dead and unused, undo the arch mapping */ |
5702 | align_start = res->start & ~(SECTION_SIZE - 1); |
5703 | - align_size = ALIGN(resource_size(res), SECTION_SIZE); |
5704 | + align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) |
5705 | + - align_start; |
5706 | |
5707 | mem_hotplug_begin(); |
5708 | arch_remove_memory(align_start, align_size); |
5709 | diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c |
5710 | index 61e7f0678d33..a764aec3c9a1 100644 |
5711 | --- a/kernel/trace/trace_events_filter.c |
5712 | +++ b/kernel/trace/trace_events_filter.c |
5713 | @@ -400,7 +400,6 @@ enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not) |
5714 | for (i = 0; i < len; i++) { |
5715 | if (buff[i] == '*') { |
5716 | if (!i) { |
5717 | - *search = buff + 1; |
5718 | type = MATCH_END_ONLY; |
5719 | } else if (i == len - 1) { |
5720 | if (type == MATCH_END_ONLY) |
5721 | @@ -410,14 +409,14 @@ enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not) |
5722 | buff[i] = 0; |
5723 | break; |
5724 | } else { /* pattern continues, use full glob */ |
5725 | - type = MATCH_GLOB; |
5726 | - break; |
5727 | + return MATCH_GLOB; |
5728 | } |
5729 | } else if (strchr("[?\\", buff[i])) { |
5730 | - type = MATCH_GLOB; |
5731 | - break; |
5732 | + return MATCH_GLOB; |
5733 | } |
5734 | } |
5735 | + if (buff[0] == '*') |
5736 | + *search = buff + 1; |
5737 | |
5738 | return type; |
5739 | } |
5740 | diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c |
5741 | index 40592e7b3568..268029ae1be6 100644 |
5742 | --- a/kernel/trace/trace_uprobe.c |
5743 | +++ b/kernel/trace/trace_uprobe.c |
5744 | @@ -608,7 +608,7 @@ static int probes_seq_show(struct seq_file *m, void *v) |
5745 | |
5746 | /* Don't print "0x (null)" when offset is 0 */ |
5747 | if (tu->offset) { |
5748 | - seq_printf(m, "0x%p", (void *)tu->offset); |
5749 | + seq_printf(m, "0x%px", (void *)tu->offset); |
5750 | } else { |
5751 | switch (sizeof(void *)) { |
5752 | case 4: |
5753 | diff --git a/lib/swiotlb.c b/lib/swiotlb.c |
5754 | index cea19aaf303c..0d7f46fb993a 100644 |
5755 | --- a/lib/swiotlb.c |
5756 | +++ b/lib/swiotlb.c |
5757 | @@ -586,7 +586,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, |
5758 | |
5759 | not_found: |
5760 | spin_unlock_irqrestore(&io_tlb_lock, flags); |
5761 | - if (printk_ratelimit()) |
5762 | + if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) |
5763 | dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size); |
5764 | return SWIOTLB_MAP_ERROR; |
5765 | found: |
5766 | @@ -713,6 +713,7 @@ void * |
5767 | swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
5768 | dma_addr_t *dma_handle, gfp_t flags) |
5769 | { |
5770 | + bool warn = !(flags & __GFP_NOWARN); |
5771 | dma_addr_t dev_addr; |
5772 | void *ret; |
5773 | int order = get_order(size); |
5774 | @@ -738,8 +739,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
5775 | * GFP_DMA memory; fall back on map_single(), which |
5776 | * will grab memory from the lowest available address range. |
5777 | */ |
5778 | - phys_addr_t paddr = map_single(hwdev, 0, size, |
5779 | - DMA_FROM_DEVICE, 0); |
5780 | + phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE, |
5781 | + warn ? 0 : DMA_ATTR_NO_WARN); |
5782 | if (paddr == SWIOTLB_MAP_ERROR) |
5783 | goto err_warn; |
5784 | |
5785 | @@ -769,9 +770,11 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
5786 | return ret; |
5787 | |
5788 | err_warn: |
5789 | - pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n", |
5790 | - dev_name(hwdev), size); |
5791 | - dump_stack(); |
5792 | + if (warn && printk_ratelimit()) { |
5793 | + pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n", |
5794 | + dev_name(hwdev), size); |
5795 | + dump_stack(); |
5796 | + } |
5797 | |
5798 | return NULL; |
5799 | } |
5800 | diff --git a/mm/memory-failure.c b/mm/memory-failure.c |
5801 | index 4acdf393a801..c85fa0038848 100644 |
5802 | --- a/mm/memory-failure.c |
5803 | +++ b/mm/memory-failure.c |
5804 | @@ -1146,8 +1146,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags) |
5805 | return 0; |
5806 | } |
5807 | |
5808 | - arch_unmap_kpfn(pfn); |
5809 | - |
5810 | orig_head = hpage = compound_head(p); |
5811 | num_poisoned_pages_inc(); |
5812 | |
5813 | diff --git a/mm/memory.c b/mm/memory.c |
5814 | index 793004608332..93e51ad41ba3 100644 |
5815 | --- a/mm/memory.c |
5816 | +++ b/mm/memory.c |
5817 | @@ -81,7 +81,7 @@ |
5818 | |
5819 | #include "internal.h" |
5820 | |
5821 | -#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS |
5822 | +#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST) |
5823 | #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. |
5824 | #endif |
5825 | |
5826 | diff --git a/mm/page_alloc.c b/mm/page_alloc.c |
5827 | index 76c9688b6a0a..d23818c5465a 100644 |
5828 | --- a/mm/page_alloc.c |
5829 | +++ b/mm/page_alloc.c |
5830 | @@ -1177,9 +1177,10 @@ static void free_one_page(struct zone *zone, |
5831 | } |
5832 | |
5833 | static void __meminit __init_single_page(struct page *page, unsigned long pfn, |
5834 | - unsigned long zone, int nid) |
5835 | + unsigned long zone, int nid, bool zero) |
5836 | { |
5837 | - mm_zero_struct_page(page); |
5838 | + if (zero) |
5839 | + mm_zero_struct_page(page); |
5840 | set_page_links(page, zone, nid, pfn); |
5841 | init_page_count(page); |
5842 | page_mapcount_reset(page); |
5843 | @@ -1194,9 +1195,9 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn, |
5844 | } |
5845 | |
5846 | static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone, |
5847 | - int nid) |
5848 | + int nid, bool zero) |
5849 | { |
5850 | - return __init_single_page(pfn_to_page(pfn), pfn, zone, nid); |
5851 | + return __init_single_page(pfn_to_page(pfn), pfn, zone, nid, zero); |
5852 | } |
5853 | |
5854 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
5855 | @@ -1217,7 +1218,7 @@ static void __meminit init_reserved_page(unsigned long pfn) |
5856 | if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) |
5857 | break; |
5858 | } |
5859 | - __init_single_pfn(pfn, zid, nid); |
5860 | + __init_single_pfn(pfn, zid, nid, true); |
5861 | } |
5862 | #else |
5863 | static inline void init_reserved_page(unsigned long pfn) |
5864 | @@ -1514,7 +1515,7 @@ static unsigned long __init deferred_init_range(int nid, int zid, |
5865 | page++; |
5866 | else |
5867 | page = pfn_to_page(pfn); |
5868 | - __init_single_page(page, pfn, zid, nid); |
5869 | + __init_single_page(page, pfn, zid, nid, true); |
5870 | cond_resched(); |
5871 | } |
5872 | } |
5873 | @@ -5393,15 +5394,20 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, |
5874 | * can be created for invalid pages (for alignment) |
5875 | * check here not to call set_pageblock_migratetype() against |
5876 | * pfn out of zone. |
5877 | + * |
5878 | + * Please note that MEMMAP_HOTPLUG path doesn't clear memmap |
5879 | + * because this is done early in sparse_add_one_section |
5880 | */ |
5881 | if (!(pfn & (pageblock_nr_pages - 1))) { |
5882 | struct page *page = pfn_to_page(pfn); |
5883 | |
5884 | - __init_single_page(page, pfn, zone, nid); |
5885 | + __init_single_page(page, pfn, zone, nid, |
5886 | + context != MEMMAP_HOTPLUG); |
5887 | set_pageblock_migratetype(page, MIGRATE_MOVABLE); |
5888 | cond_resched(); |
5889 | } else { |
5890 | - __init_single_pfn(pfn, zone, nid); |
5891 | + __init_single_pfn(pfn, zone, nid, |
5892 | + context != MEMMAP_HOTPLUG); |
5893 | } |
5894 | } |
5895 | } |
5896 | diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c |
5897 | index f3a4efcf1456..3aa5a93ad107 100644 |
5898 | --- a/net/9p/trans_virtio.c |
5899 | +++ b/net/9p/trans_virtio.c |
5900 | @@ -160,7 +160,8 @@ static void req_done(struct virtqueue *vq) |
5901 | spin_unlock_irqrestore(&chan->lock, flags); |
5902 | /* Wakeup if anyone waiting for VirtIO ring space. */ |
5903 | wake_up(chan->vc_wq); |
5904 | - p9_client_cb(chan->client, req, REQ_STATUS_RCVD); |
5905 | + if (len) |
5906 | + p9_client_cb(chan->client, req, REQ_STATUS_RCVD); |
5907 | } |
5908 | } |
5909 | |
5910 | diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c |
5911 | index 8ca9915befc8..aae3565c3a92 100644 |
5912 | --- a/net/mpls/af_mpls.c |
5913 | +++ b/net/mpls/af_mpls.c |
5914 | @@ -8,6 +8,7 @@ |
5915 | #include <linux/ipv6.h> |
5916 | #include <linux/mpls.h> |
5917 | #include <linux/netconf.h> |
5918 | +#include <linux/nospec.h> |
5919 | #include <linux/vmalloc.h> |
5920 | #include <linux/percpu.h> |
5921 | #include <net/ip.h> |
5922 | @@ -935,24 +936,27 @@ static int mpls_nh_build_multi(struct mpls_route_config *cfg, |
5923 | return err; |
5924 | } |
5925 | |
5926 | -static bool mpls_label_ok(struct net *net, unsigned int index, |
5927 | +static bool mpls_label_ok(struct net *net, unsigned int *index, |
5928 | struct netlink_ext_ack *extack) |
5929 | { |
5930 | + bool is_ok = true; |
5931 | + |
5932 | /* Reserved labels may not be set */ |
5933 | - if (index < MPLS_LABEL_FIRST_UNRESERVED) { |
5934 | + if (*index < MPLS_LABEL_FIRST_UNRESERVED) { |
5935 | NL_SET_ERR_MSG(extack, |
5936 | "Invalid label - must be MPLS_LABEL_FIRST_UNRESERVED or higher"); |
5937 | - return false; |
5938 | + is_ok = false; |
5939 | } |
5940 | |
5941 | /* The full 20 bit range may not be supported. */ |
5942 | - if (index >= net->mpls.platform_labels) { |
5943 | + if (is_ok && *index >= net->mpls.platform_labels) { |
5944 | NL_SET_ERR_MSG(extack, |
5945 | "Label >= configured maximum in platform_labels"); |
5946 | - return false; |
5947 | + is_ok = false; |
5948 | } |
5949 | |
5950 | - return true; |
5951 | + *index = array_index_nospec(*index, net->mpls.platform_labels); |
5952 | + return is_ok; |
5953 | } |
5954 | |
5955 | static int mpls_route_add(struct mpls_route_config *cfg, |
5956 | @@ -975,7 +979,7 @@ static int mpls_route_add(struct mpls_route_config *cfg, |
5957 | index = find_free_label(net); |
5958 | } |
5959 | |
5960 | - if (!mpls_label_ok(net, index, extack)) |
5961 | + if (!mpls_label_ok(net, &index, extack)) |
5962 | goto errout; |
5963 | |
5964 | /* Append makes no sense with mpls */ |
5965 | @@ -1052,7 +1056,7 @@ static int mpls_route_del(struct mpls_route_config *cfg, |
5966 | |
5967 | index = cfg->rc_label; |
5968 | |
5969 | - if (!mpls_label_ok(net, index, extack)) |
5970 | + if (!mpls_label_ok(net, &index, extack)) |
5971 | goto errout; |
5972 | |
5973 | mpls_route_update(net, index, NULL, &cfg->rc_nlinfo); |
5974 | @@ -1810,7 +1814,7 @@ static int rtm_to_route_config(struct sk_buff *skb, |
5975 | goto errout; |
5976 | |
5977 | if (!mpls_label_ok(cfg->rc_nlinfo.nl_net, |
5978 | - cfg->rc_label, extack)) |
5979 | + &cfg->rc_label, extack)) |
5980 | goto errout; |
5981 | break; |
5982 | } |
5983 | @@ -2137,7 +2141,7 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, |
5984 | goto errout; |
5985 | } |
5986 | |
5987 | - if (!mpls_label_ok(net, in_label, extack)) { |
5988 | + if (!mpls_label_ok(net, &in_label, extack)) { |
5989 | err = -EINVAL; |
5990 | goto errout; |
5991 | } |
5992 | diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c |
5993 | index a3f2ab283aeb..852b838d37b3 100644 |
5994 | --- a/net/sunrpc/xprtrdma/rpc_rdma.c |
5995 | +++ b/net/sunrpc/xprtrdma/rpc_rdma.c |
5996 | @@ -143,7 +143,7 @@ static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt, |
5997 | if (xdr->page_len) { |
5998 | remaining = xdr->page_len; |
5999 | offset = offset_in_page(xdr->page_base); |
6000 | - count = 0; |
6001 | + count = RPCRDMA_MIN_SEND_SGES; |
6002 | while (remaining) { |
6003 | remaining -= min_t(unsigned int, |
6004 | PAGE_SIZE - offset, remaining); |
6005 | diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c |
6006 | index 8607c029c0dd..8cd7ee4fa0cd 100644 |
6007 | --- a/net/sunrpc/xprtrdma/verbs.c |
6008 | +++ b/net/sunrpc/xprtrdma/verbs.c |
6009 | @@ -509,7 +509,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, |
6010 | pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); |
6011 | return -ENOMEM; |
6012 | } |
6013 | - ia->ri_max_send_sges = max_sge - RPCRDMA_MIN_SEND_SGES; |
6014 | + ia->ri_max_send_sges = max_sge; |
6015 | |
6016 | if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) { |
6017 | dprintk("RPC: %s: insufficient wqe's available\n", |
6018 | @@ -1476,6 +1476,9 @@ __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb) |
6019 | static void |
6020 | rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb) |
6021 | { |
6022 | + if (!rb) |
6023 | + return; |
6024 | + |
6025 | if (!rpcrdma_regbuf_is_mapped(rb)) |
6026 | return; |
6027 | |
6028 | @@ -1491,9 +1494,6 @@ rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb) |
6029 | void |
6030 | rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb) |
6031 | { |
6032 | - if (!rb) |
6033 | - return; |
6034 | - |
6035 | rpcrdma_dma_unmap_regbuf(rb); |
6036 | kfree(rb); |
6037 | } |
6038 | diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c |
6039 | index d01913404581..a42cbbf2c8d9 100644 |
6040 | --- a/sound/core/seq/seq_clientmgr.c |
6041 | +++ b/sound/core/seq/seq_clientmgr.c |
6042 | @@ -1003,7 +1003,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf, |
6043 | { |
6044 | struct snd_seq_client *client = file->private_data; |
6045 | int written = 0, len; |
6046 | - int err = -EINVAL; |
6047 | + int err; |
6048 | struct snd_seq_event event; |
6049 | |
6050 | if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT)) |
6051 | @@ -1018,11 +1018,15 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf, |
6052 | |
6053 | /* allocate the pool now if the pool is not allocated yet */ |
6054 | if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) { |
6055 | - if (snd_seq_pool_init(client->pool) < 0) |
6056 | + mutex_lock(&client->ioctl_mutex); |
6057 | + err = snd_seq_pool_init(client->pool); |
6058 | + mutex_unlock(&client->ioctl_mutex); |
6059 | + if (err < 0) |
6060 | return -ENOMEM; |
6061 | } |
6062 | |
6063 | /* only process whole events */ |
6064 | + err = -EINVAL; |
6065 | while (count >= sizeof(struct snd_seq_event)) { |
6066 | /* Read in the event header from the user */ |
6067 | len = sizeof(event); |
6068 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
6069 | index 1750e00c5bb4..4ff1f0ca52fc 100644 |
6070 | --- a/sound/pci/hda/patch_realtek.c |
6071 | +++ b/sound/pci/hda/patch_realtek.c |
6072 | @@ -3378,6 +3378,19 @@ static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec, |
6073 | spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; |
6074 | } |
6075 | |
6076 | +static void alc269_fixup_pincfg_U7x7_headset_mic(struct hda_codec *codec, |
6077 | + const struct hda_fixup *fix, |
6078 | + int action) |
6079 | +{ |
6080 | + unsigned int cfg_headphone = snd_hda_codec_get_pincfg(codec, 0x21); |
6081 | + unsigned int cfg_headset_mic = snd_hda_codec_get_pincfg(codec, 0x19); |
6082 | + |
6083 | + if (cfg_headphone && cfg_headset_mic == 0x411111f0) |
6084 | + snd_hda_codec_set_pincfg(codec, 0x19, |
6085 | + (cfg_headphone & ~AC_DEFCFG_DEVICE) | |
6086 | + (AC_JACK_MIC_IN << AC_DEFCFG_DEVICE_SHIFT)); |
6087 | +} |
6088 | + |
6089 | static void alc269_fixup_hweq(struct hda_codec *codec, |
6090 | const struct hda_fixup *fix, int action) |
6091 | { |
6092 | @@ -4850,6 +4863,28 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec, |
6093 | } |
6094 | } |
6095 | |
6096 | +static void alc_fixup_tpt470_dock(struct hda_codec *codec, |
6097 | + const struct hda_fixup *fix, int action) |
6098 | +{ |
6099 | + static const struct hda_pintbl pincfgs[] = { |
6100 | + { 0x17, 0x21211010 }, /* dock headphone */ |
6101 | + { 0x19, 0x21a11010 }, /* dock mic */ |
6102 | + { } |
6103 | + }; |
6104 | + struct alc_spec *spec = codec->spec; |
6105 | + |
6106 | + if (action == HDA_FIXUP_ACT_PRE_PROBE) { |
6107 | + spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; |
6108 | + /* Enable DOCK device */ |
6109 | + snd_hda_codec_write(codec, 0x17, 0, |
6110 | + AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0); |
6111 | + /* Enable DOCK device */ |
6112 | + snd_hda_codec_write(codec, 0x19, 0, |
6113 | + AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0); |
6114 | + snd_hda_apply_pincfgs(codec, pincfgs); |
6115 | + } |
6116 | +} |
6117 | + |
6118 | static void alc_shutup_dell_xps13(struct hda_codec *codec) |
6119 | { |
6120 | struct alc_spec *spec = codec->spec; |
6121 | @@ -5229,6 +5264,7 @@ enum { |
6122 | ALC269_FIXUP_LIFEBOOK_EXTMIC, |
6123 | ALC269_FIXUP_LIFEBOOK_HP_PIN, |
6124 | ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT, |
6125 | + ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC, |
6126 | ALC269_FIXUP_AMIC, |
6127 | ALC269_FIXUP_DMIC, |
6128 | ALC269VB_FIXUP_AMIC, |
6129 | @@ -5324,6 +5360,7 @@ enum { |
6130 | ALC700_FIXUP_INTEL_REFERENCE, |
6131 | ALC274_FIXUP_DELL_BIND_DACS, |
6132 | ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, |
6133 | + ALC298_FIXUP_TPT470_DOCK, |
6134 | }; |
6135 | |
6136 | static const struct hda_fixup alc269_fixups[] = { |
6137 | @@ -5434,6 +5471,10 @@ static const struct hda_fixup alc269_fixups[] = { |
6138 | .type = HDA_FIXUP_FUNC, |
6139 | .v.func = alc269_fixup_pincfg_no_hp_to_lineout, |
6140 | }, |
6141 | + [ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC] = { |
6142 | + .type = HDA_FIXUP_FUNC, |
6143 | + .v.func = alc269_fixup_pincfg_U7x7_headset_mic, |
6144 | + }, |
6145 | [ALC269_FIXUP_AMIC] = { |
6146 | .type = HDA_FIXUP_PINS, |
6147 | .v.pins = (const struct hda_pintbl[]) { |
6148 | @@ -6149,6 +6190,12 @@ static const struct hda_fixup alc269_fixups[] = { |
6149 | .chained = true, |
6150 | .chain_id = ALC274_FIXUP_DELL_BIND_DACS |
6151 | }, |
6152 | + [ALC298_FIXUP_TPT470_DOCK] = { |
6153 | + .type = HDA_FIXUP_FUNC, |
6154 | + .v.func = alc_fixup_tpt470_dock, |
6155 | + .chained = true, |
6156 | + .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE |
6157 | + }, |
6158 | }; |
6159 | |
6160 | static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
6161 | @@ -6199,6 +6246,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
6162 | SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), |
6163 | SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), |
6164 | SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), |
6165 | + SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), |
6166 | + SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), |
6167 | SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
6168 | SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
6169 | SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), |
6170 | @@ -6300,6 +6349,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
6171 | SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT), |
6172 | SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN), |
6173 | SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN), |
6174 | + SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC), |
6175 | SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), |
6176 | SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE), |
6177 | SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), |
6178 | @@ -6328,8 +6378,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
6179 | SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK), |
6180 | SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK), |
6181 | SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), |
6182 | + SND_PCI_QUIRK(0x17aa, 0x222d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), |
6183 | + SND_PCI_QUIRK(0x17aa, 0x222e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), |
6184 | SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460), |
6185 | SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460), |
6186 | + SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK), |
6187 | + SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), |
6188 | + SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), |
6189 | + SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), |
6190 | + SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), |
6191 | + SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), |
6192 | SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), |
6193 | SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), |
6194 | SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), |
6195 | @@ -6350,7 +6408,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
6196 | SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460), |
6197 | SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460), |
6198 | SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460), |
6199 | + SND_PCI_QUIRK(0x17aa, 0x505d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), |
6200 | + SND_PCI_QUIRK(0x17aa, 0x505f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), |
6201 | + SND_PCI_QUIRK(0x17aa, 0x5062, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), |
6202 | SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
6203 | + SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), |
6204 | + SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), |
6205 | SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), |
6206 | SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), |
6207 | SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */ |
6208 | @@ -6612,6 +6675,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { |
6209 | {0x12, 0xb7a60130}, |
6210 | {0x14, 0x90170110}, |
6211 | {0x21, 0x02211020}), |
6212 | + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, |
6213 | + {0x12, 0x90a60130}, |
6214 | + {0x14, 0x90170110}, |
6215 | + {0x14, 0x01011020}, |
6216 | + {0x21, 0x0221101f}), |
6217 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, |
6218 | ALC256_STANDARD_PINS), |
6219 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC, |
6220 | @@ -6681,6 +6749,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { |
6221 | {0x12, 0x90a60120}, |
6222 | {0x14, 0x90170110}, |
6223 | {0x21, 0x0321101f}), |
6224 | + SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, |
6225 | + {0x12, 0xb7a60130}, |
6226 | + {0x14, 0x90170110}, |
6227 | + {0x21, 0x04211020}), |
6228 | SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1, |
6229 | ALC290_STANDARD_PINS, |
6230 | {0x15, 0x04211040}, |
6231 | diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c |
6232 | index 2b4ceda36291..20b28a5a1456 100644 |
6233 | --- a/sound/usb/mixer.c |
6234 | +++ b/sound/usb/mixer.c |
6235 | @@ -347,17 +347,20 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, |
6236 | int validx, int *value_ret) |
6237 | { |
6238 | struct snd_usb_audio *chip = cval->head.mixer->chip; |
6239 | - unsigned char buf[4 + 3 * sizeof(__u32)]; /* enough space for one range */ |
6240 | + /* enough space for one range */ |
6241 | + unsigned char buf[sizeof(__u16) + 3 * sizeof(__u32)]; |
6242 | unsigned char *val; |
6243 | - int idx = 0, ret, size; |
6244 | + int idx = 0, ret, val_size, size; |
6245 | __u8 bRequest; |
6246 | |
6247 | + val_size = uac2_ctl_value_size(cval->val_type); |
6248 | + |
6249 | if (request == UAC_GET_CUR) { |
6250 | bRequest = UAC2_CS_CUR; |
6251 | - size = uac2_ctl_value_size(cval->val_type); |
6252 | + size = val_size; |
6253 | } else { |
6254 | bRequest = UAC2_CS_RANGE; |
6255 | - size = sizeof(buf); |
6256 | + size = sizeof(__u16) + 3 * val_size; |
6257 | } |
6258 | |
6259 | memset(buf, 0, sizeof(buf)); |
6260 | @@ -390,16 +393,17 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, |
6261 | val = buf + sizeof(__u16); |
6262 | break; |
6263 | case UAC_GET_MAX: |
6264 | - val = buf + sizeof(__u16) * 2; |
6265 | + val = buf + sizeof(__u16) + val_size; |
6266 | break; |
6267 | case UAC_GET_RES: |
6268 | - val = buf + sizeof(__u16) * 3; |
6269 | + val = buf + sizeof(__u16) + val_size * 2; |
6270 | break; |
6271 | default: |
6272 | return -EINVAL; |
6273 | } |
6274 | |
6275 | - *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(val, sizeof(__u16))); |
6276 | + *value_ret = convert_signed_value(cval, |
6277 | + snd_usb_combine_bytes(val, val_size)); |
6278 | |
6279 | return 0; |
6280 | } |
6281 | diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c |
6282 | index b9c9a19f9588..3cbfae6604f9 100644 |
6283 | --- a/sound/usb/pcm.c |
6284 | +++ b/sound/usb/pcm.c |
6285 | @@ -352,6 +352,15 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, |
6286 | ep = 0x86; |
6287 | iface = usb_ifnum_to_if(dev, 2); |
6288 | |
6289 | + if (!iface || iface->num_altsetting == 0) |
6290 | + return -EINVAL; |
6291 | + |
6292 | + alts = &iface->altsetting[1]; |
6293 | + goto add_sync_ep; |
6294 | + case USB_ID(0x1397, 0x0002): |
6295 | + ep = 0x81; |
6296 | + iface = usb_ifnum_to_if(dev, 1); |
6297 | + |
6298 | if (!iface || iface->num_altsetting == 0) |
6299 | return -EINVAL; |
6300 | |
6301 | diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c |
6302 | index a66ef5777887..ea8f3de92fa4 100644 |
6303 | --- a/sound/usb/quirks.c |
6304 | +++ b/sound/usb/quirks.c |
6305 | @@ -1363,8 +1363,11 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, |
6306 | return SNDRV_PCM_FMTBIT_DSD_U32_BE; |
6307 | break; |
6308 | |
6309 | - /* Amanero Combo384 USB interface with native DSD support */ |
6310 | - case USB_ID(0x16d0, 0x071a): |
6311 | + /* Amanero Combo384 USB based DACs with native DSD support */ |
6312 | + case USB_ID(0x16d0, 0x071a): /* Amanero - Combo384 */ |
6313 | + case USB_ID(0x2ab6, 0x0004): /* T+A DAC8DSD-V2.0, MP1000E-V2.0, MP2000R-V2.0, MP2500R-V2.0, MP3100HV-V2.0 */ |
6314 | + case USB_ID(0x2ab6, 0x0005): /* T+A USB HD Audio 1 */ |
6315 | + case USB_ID(0x2ab6, 0x0006): /* T+A USB HD Audio 2 */ |
6316 | if (fp->altsetting == 2) { |
6317 | switch (le16_to_cpu(chip->dev->descriptor.bcdDevice)) { |
6318 | case 0x199: |
6319 | diff --git a/tools/objtool/check.c b/tools/objtool/check.c |
6320 | index 2e458eb45586..c7fb5c2392ee 100644 |
6321 | --- a/tools/objtool/check.c |
6322 | +++ b/tools/objtool/check.c |
6323 | @@ -1935,13 +1935,19 @@ static bool ignore_unreachable_insn(struct instruction *insn) |
6324 | if (is_kasan_insn(insn) || is_ubsan_insn(insn)) |
6325 | return true; |
6326 | |
6327 | - if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest) { |
6328 | - insn = insn->jump_dest; |
6329 | - continue; |
6330 | + if (insn->type == INSN_JUMP_UNCONDITIONAL) { |
6331 | + if (insn->jump_dest && |
6332 | + insn->jump_dest->func == insn->func) { |
6333 | + insn = insn->jump_dest; |
6334 | + continue; |
6335 | + } |
6336 | + |
6337 | + break; |
6338 | } |
6339 | |
6340 | if (insn->offset + insn->len >= insn->func->offset + insn->func->len) |
6341 | break; |
6342 | + |
6343 | insn = list_next_entry(insn, list); |
6344 | } |
6345 | |
6346 | diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c |
6347 | index 24dbf634e2dd..0b457e8e0f0c 100644 |
6348 | --- a/tools/testing/selftests/seccomp/seccomp_bpf.c |
6349 | +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c |
6350 | @@ -1717,7 +1717,7 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee, |
6351 | |
6352 | if (nr == __NR_getpid) |
6353 | change_syscall(_metadata, tracee, __NR_getppid); |
6354 | - if (nr == __NR_open) |
6355 | + if (nr == __NR_openat) |
6356 | change_syscall(_metadata, tracee, -1); |
6357 | } |
6358 | |
6359 | @@ -1792,7 +1792,7 @@ TEST_F(TRACE_syscall, ptrace_syscall_dropped) |
6360 | true); |
6361 | |
6362 | /* Tracer should skip the open syscall, resulting in EPERM. */ |
6363 | - EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_open)); |
6364 | + EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_openat)); |
6365 | } |
6366 | |
6367 | TEST_F(TRACE_syscall, syscall_allowed) |
6368 | diff --git a/tools/testing/selftests/vm/compaction_test.c b/tools/testing/selftests/vm/compaction_test.c |
6369 | index a65b016d4c13..1097f04e4d80 100644 |
6370 | --- a/tools/testing/selftests/vm/compaction_test.c |
6371 | +++ b/tools/testing/selftests/vm/compaction_test.c |
6372 | @@ -137,6 +137,8 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size) |
6373 | printf("No of huge pages allocated = %d\n", |
6374 | (atoi(nr_hugepages))); |
6375 | |
6376 | + lseek(fd, 0, SEEK_SET); |
6377 | + |
6378 | if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages)) |
6379 | != strlen(initial_nr_hugepages)) { |
6380 | perror("Failed to write value to /proc/sys/vm/nr_hugepages\n"); |
6381 | diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile |
6382 | index 5d4f10ac2af2..aa6e2d7f6a1f 100644 |
6383 | --- a/tools/testing/selftests/x86/Makefile |
6384 | +++ b/tools/testing/selftests/x86/Makefile |
6385 | @@ -5,16 +5,26 @@ include ../lib.mk |
6386 | |
6387 | .PHONY: all all_32 all_64 warn_32bit_failure clean |
6388 | |
6389 | -TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \ |
6390 | - check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test ioperm \ |
6391 | +UNAME_M := $(shell uname -m) |
6392 | +CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32) |
6393 | +CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c) |
6394 | + |
6395 | +TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \ |
6396 | + check_initial_reg_state sigreturn iopl mpx-mini-test ioperm \ |
6397 | protection_keys test_vdso test_vsyscall |
6398 | TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \ |
6399 | test_FCMOV test_FCOMI test_FISTTP \ |
6400 | vdso_restorer |
6401 | -TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip 5lvl |
6402 | +TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip |
6403 | +# Some selftests require 32bit support enabled also on 64bit systems |
6404 | +TARGETS_C_32BIT_NEEDED := ldt_gdt ptrace_syscall |
6405 | |
6406 | -TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY) |
6407 | +TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY) $(TARGETS_C_32BIT_NEEDED) |
6408 | TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY) |
6409 | +ifeq ($(CAN_BUILD_I386)$(CAN_BUILD_X86_64),11) |
6410 | +TARGETS_C_64BIT_ALL += $(TARGETS_C_32BIT_NEEDED) |
6411 | +endif |
6412 | + |
6413 | BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32) |
6414 | BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64) |
6415 | |
6416 | @@ -23,18 +33,16 @@ BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64)) |
6417 | |
6418 | CFLAGS := -O2 -g -std=gnu99 -pthread -Wall -no-pie |
6419 | |
6420 | -UNAME_M := $(shell uname -m) |
6421 | -CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32) |
6422 | -CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c) |
6423 | - |
6424 | ifeq ($(CAN_BUILD_I386),1) |
6425 | all: all_32 |
6426 | TEST_PROGS += $(BINARIES_32) |
6427 | +EXTRA_CFLAGS += -DCAN_BUILD_32 |
6428 | endif |
6429 | |
6430 | ifeq ($(CAN_BUILD_X86_64),1) |
6431 | all: all_64 |
6432 | TEST_PROGS += $(BINARIES_64) |
6433 | +EXTRA_CFLAGS += -DCAN_BUILD_64 |
6434 | endif |
6435 | |
6436 | all_32: $(BINARIES_32) |
6437 | diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c |
6438 | index ec0f6b45ce8b..9c0325e1ea68 100644 |
6439 | --- a/tools/testing/selftests/x86/mpx-mini-test.c |
6440 | +++ b/tools/testing/selftests/x86/mpx-mini-test.c |
6441 | @@ -315,11 +315,39 @@ static inline void *__si_bounds_upper(siginfo_t *si) |
6442 | return si->si_upper; |
6443 | } |
6444 | #else |
6445 | + |
6446 | +/* |
6447 | + * This deals with old version of _sigfault in some distros: |
6448 | + * |
6449 | + |
6450 | +old _sigfault: |
6451 | + struct { |
6452 | + void *si_addr; |
6453 | + } _sigfault; |
6454 | + |
6455 | +new _sigfault: |
6456 | + struct { |
6457 | + void __user *_addr; |
6458 | + int _trapno; |
6459 | + short _addr_lsb; |
6460 | + union { |
6461 | + struct { |
6462 | + void __user *_lower; |
6463 | + void __user *_upper; |
6464 | + } _addr_bnd; |
6465 | + __u32 _pkey; |
6466 | + }; |
6467 | + } _sigfault; |
6468 | + * |
6469 | + */ |
6470 | + |
6471 | static inline void **__si_bounds_hack(siginfo_t *si) |
6472 | { |
6473 | void *sigfault = &si->_sifields._sigfault; |
6474 | void *end_sigfault = sigfault + sizeof(si->_sifields._sigfault); |
6475 | - void **__si_lower = end_sigfault; |
6476 | + int *trapno = (int*)end_sigfault; |
6477 | + /* skip _trapno and _addr_lsb */ |
6478 | + void **__si_lower = (void**)(trapno + 2); |
6479 | |
6480 | return __si_lower; |
6481 | } |
6482 | @@ -331,7 +359,7 @@ static inline void *__si_bounds_lower(siginfo_t *si) |
6483 | |
6484 | static inline void *__si_bounds_upper(siginfo_t *si) |
6485 | { |
6486 | - return (*__si_bounds_hack(si)) + sizeof(void *); |
6487 | + return *(__si_bounds_hack(si) + 1); |
6488 | } |
6489 | #endif |
6490 | |
6491 | diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c |
6492 | index bc1b0735bb50..f15aa5a76fe3 100644 |
6493 | --- a/tools/testing/selftests/x86/protection_keys.c |
6494 | +++ b/tools/testing/selftests/x86/protection_keys.c |
6495 | @@ -393,34 +393,6 @@ pid_t fork_lazy_child(void) |
6496 | return forkret; |
6497 | } |
6498 | |
6499 | -void davecmp(void *_a, void *_b, int len) |
6500 | -{ |
6501 | - int i; |
6502 | - unsigned long *a = _a; |
6503 | - unsigned long *b = _b; |
6504 | - |
6505 | - for (i = 0; i < len / sizeof(*a); i++) { |
6506 | - if (a[i] == b[i]) |
6507 | - continue; |
6508 | - |
6509 | - dprintf3("[%3d]: a: %016lx b: %016lx\n", i, a[i], b[i]); |
6510 | - } |
6511 | -} |
6512 | - |
6513 | -void dumpit(char *f) |
6514 | -{ |
6515 | - int fd = open(f, O_RDONLY); |
6516 | - char buf[100]; |
6517 | - int nr_read; |
6518 | - |
6519 | - dprintf2("maps fd: %d\n", fd); |
6520 | - do { |
6521 | - nr_read = read(fd, &buf[0], sizeof(buf)); |
6522 | - write(1, buf, nr_read); |
6523 | - } while (nr_read > 0); |
6524 | - close(fd); |
6525 | -} |
6526 | - |
6527 | #define PKEY_DISABLE_ACCESS 0x1 |
6528 | #define PKEY_DISABLE_WRITE 0x2 |
6529 | |
6530 | diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c |
6531 | index a48da95c18fd..ddfdd635de16 100644 |
6532 | --- a/tools/testing/selftests/x86/single_step_syscall.c |
6533 | +++ b/tools/testing/selftests/x86/single_step_syscall.c |
6534 | @@ -119,7 +119,9 @@ static void check_result(void) |
6535 | |
6536 | int main() |
6537 | { |
6538 | +#ifdef CAN_BUILD_32 |
6539 | int tmp; |
6540 | +#endif |
6541 | |
6542 | sethandler(SIGTRAP, sigtrap, 0); |
6543 | |
6544 | @@ -139,12 +141,13 @@ int main() |
6545 | : : "c" (post_nop) : "r11"); |
6546 | check_result(); |
6547 | #endif |
6548 | - |
6549 | +#ifdef CAN_BUILD_32 |
6550 | printf("[RUN]\tSet TF and check int80\n"); |
6551 | set_eflags(get_eflags() | X86_EFLAGS_TF); |
6552 | asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid) |
6553 | : INT80_CLOBBERS); |
6554 | check_result(); |
6555 | +#endif |
6556 | |
6557 | /* |
6558 | * This test is particularly interesting if fast syscalls use |
6559 | diff --git a/tools/testing/selftests/x86/test_mremap_vdso.c b/tools/testing/selftests/x86/test_mremap_vdso.c |
6560 | index bf0d687c7db7..64f11c8d9b76 100644 |
6561 | --- a/tools/testing/selftests/x86/test_mremap_vdso.c |
6562 | +++ b/tools/testing/selftests/x86/test_mremap_vdso.c |
6563 | @@ -90,8 +90,12 @@ int main(int argc, char **argv, char **envp) |
6564 | vdso_size += PAGE_SIZE; |
6565 | } |
6566 | |
6567 | +#ifdef __i386__ |
6568 | /* Glibc is likely to explode now - exit with raw syscall */ |
6569 | asm volatile ("int $0x80" : : "a" (__NR_exit), "b" (!!ret)); |
6570 | +#else /* __x86_64__ */ |
6571 | + syscall(SYS_exit, ret); |
6572 | +#endif |
6573 | } else { |
6574 | int status; |
6575 | |
6576 | diff --git a/tools/testing/selftests/x86/test_vdso.c b/tools/testing/selftests/x86/test_vdso.c |
6577 | index 29973cde06d3..235259011704 100644 |
6578 | --- a/tools/testing/selftests/x86/test_vdso.c |
6579 | +++ b/tools/testing/selftests/x86/test_vdso.c |
6580 | @@ -26,20 +26,59 @@ |
6581 | # endif |
6582 | #endif |
6583 | |
6584 | +/* max length of lines in /proc/self/maps - anything longer is skipped here */ |
6585 | +#define MAPS_LINE_LEN 128 |
6586 | + |
6587 | int nerrs = 0; |
6588 | |
6589 | +typedef long (*getcpu_t)(unsigned *, unsigned *, void *); |
6590 | + |
6591 | +getcpu_t vgetcpu; |
6592 | +getcpu_t vdso_getcpu; |
6593 | + |
6594 | +static void *vsyscall_getcpu(void) |
6595 | +{ |
6596 | #ifdef __x86_64__ |
6597 | -# define VSYS(x) (x) |
6598 | + FILE *maps; |
6599 | + char line[MAPS_LINE_LEN]; |
6600 | + bool found = false; |
6601 | + |
6602 | + maps = fopen("/proc/self/maps", "r"); |
6603 | + if (!maps) /* might still be present, but ignore it here, as we test vDSO not vsyscall */ |
6604 | + return NULL; |
6605 | + |
6606 | + while (fgets(line, MAPS_LINE_LEN, maps)) { |
6607 | + char r, x; |
6608 | + void *start, *end; |
6609 | + char name[MAPS_LINE_LEN]; |
6610 | + |
6611 | + /* sscanf() is safe here as strlen(name) >= strlen(line) */ |
6612 | + if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s", |
6613 | + &start, &end, &r, &x, name) != 5) |
6614 | + continue; |
6615 | + |
6616 | + if (strcmp(name, "[vsyscall]")) |
6617 | + continue; |
6618 | + |
6619 | + /* assume entries are OK, as we test vDSO here not vsyscall */ |
6620 | + found = true; |
6621 | + break; |
6622 | + } |
6623 | + |
6624 | + fclose(maps); |
6625 | + |
6626 | + if (!found) { |
6627 | + printf("Warning: failed to find vsyscall getcpu\n"); |
6628 | + return NULL; |
6629 | + } |
6630 | + return (void *) (0xffffffffff600800); |
6631 | #else |
6632 | -# define VSYS(x) 0 |
6633 | + return NULL; |
6634 | #endif |
6635 | +} |
6636 | |
6637 | -typedef long (*getcpu_t)(unsigned *, unsigned *, void *); |
6638 | - |
6639 | -const getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800); |
6640 | -getcpu_t vdso_getcpu; |
6641 | |
6642 | -void fill_function_pointers() |
6643 | +static void fill_function_pointers() |
6644 | { |
6645 | void *vdso = dlopen("linux-vdso.so.1", |
6646 | RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD); |
6647 | @@ -54,6 +93,8 @@ void fill_function_pointers() |
6648 | vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu"); |
6649 | if (!vdso_getcpu) |
6650 | printf("Warning: failed to find getcpu in vDSO\n"); |
6651 | + |
6652 | + vgetcpu = (getcpu_t) vsyscall_getcpu(); |
6653 | } |
6654 | |
6655 | static long sys_getcpu(unsigned * cpu, unsigned * node, |
6656 | diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c |
6657 | index 7a744fa7b786..be81621446f0 100644 |
6658 | --- a/tools/testing/selftests/x86/test_vsyscall.c |
6659 | +++ b/tools/testing/selftests/x86/test_vsyscall.c |
6660 | @@ -33,6 +33,9 @@ |
6661 | # endif |
6662 | #endif |
6663 | |
6664 | +/* max length of lines in /proc/self/maps - anything longer is skipped here */ |
6665 | +#define MAPS_LINE_LEN 128 |
6666 | + |
6667 | static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), |
6668 | int flags) |
6669 | { |
6670 | @@ -98,7 +101,7 @@ static int init_vsys(void) |
6671 | #ifdef __x86_64__ |
6672 | int nerrs = 0; |
6673 | FILE *maps; |
6674 | - char line[128]; |
6675 | + char line[MAPS_LINE_LEN]; |
6676 | bool found = false; |
6677 | |
6678 | maps = fopen("/proc/self/maps", "r"); |
6679 | @@ -108,10 +111,12 @@ static int init_vsys(void) |
6680 | return 0; |
6681 | } |
6682 | |
6683 | - while (fgets(line, sizeof(line), maps)) { |
6684 | + while (fgets(line, MAPS_LINE_LEN, maps)) { |
6685 | char r, x; |
6686 | void *start, *end; |
6687 | - char name[128]; |
6688 | + char name[MAPS_LINE_LEN]; |
6689 | + |
6690 | + /* sscanf() is safe here as strlen(name) >= strlen(line) */ |
6691 | if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s", |
6692 | &start, &end, &r, &x, name) != 5) |
6693 | continue; |