Contents of /trunk/kernel-magellan/patches-5.0/0101-5.0.2-all-fixes.patch
Parent Directory | Revision Log
Revision 3328 -
(show annotations)
(download)
Fri Apr 26 12:20:07 2019 UTC (5 years, 5 months ago) by niro
File size: 37112 byte(s)
Fri Apr 26 12:20:07 2019 UTC (5 years, 5 months ago) by niro
File size: 37112 byte(s)
-linux-5.0.2
1 | diff --git a/Makefile b/Makefile |
2 | index 3cd7163fe164..bb2f7664594a 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,7 +1,7 @@ |
6 | # SPDX-License-Identifier: GPL-2.0 |
7 | VERSION = 5 |
8 | PATCHLEVEL = 0 |
9 | -SUBLEVEL = 1 |
10 | +SUBLEVEL = 2 |
11 | EXTRAVERSION = |
12 | NAME = Shy Crocodile |
13 | |
14 | diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi |
15 | index 608d17454179..5892a9f7622f 100644 |
16 | --- a/arch/arm/boot/dts/exynos3250.dtsi |
17 | +++ b/arch/arm/boot/dts/exynos3250.dtsi |
18 | @@ -168,6 +168,9 @@ |
19 | interrupt-controller; |
20 | #interrupt-cells = <3>; |
21 | interrupt-parent = <&gic>; |
22 | + clock-names = "clkout8"; |
23 | + clocks = <&cmu CLK_FIN_PLL>; |
24 | + #clock-cells = <1>; |
25 | }; |
26 | |
27 | mipi_phy: video-phy { |
28 | diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi |
29 | index 3a9eb1e91c45..8a64c4e8c474 100644 |
30 | --- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi |
31 | +++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi |
32 | @@ -49,7 +49,7 @@ |
33 | }; |
34 | |
35 | emmc_pwrseq: pwrseq { |
36 | - pinctrl-0 = <&sd1_cd>; |
37 | + pinctrl-0 = <&emmc_rstn>; |
38 | pinctrl-names = "default"; |
39 | compatible = "mmc-pwrseq-emmc"; |
40 | reset-gpios = <&gpk1 2 GPIO_ACTIVE_LOW>; |
41 | @@ -165,12 +165,6 @@ |
42 | cpu0-supply = <&buck2_reg>; |
43 | }; |
44 | |
45 | -/* RSTN signal for eMMC */ |
46 | -&sd1_cd { |
47 | - samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; |
48 | - samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>; |
49 | -}; |
50 | - |
51 | &pinctrl_1 { |
52 | gpio_power_key: power_key { |
53 | samsung,pins = "gpx1-3"; |
54 | @@ -188,6 +182,11 @@ |
55 | samsung,pins = "gpx3-7"; |
56 | samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>; |
57 | }; |
58 | + |
59 | + emmc_rstn: emmc-rstn { |
60 | + samsung,pins = "gpk1-2"; |
61 | + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>; |
62 | + }; |
63 | }; |
64 | |
65 | &ehci { |
66 | diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi |
67 | index bf09eab90f8a..6bf3661293ee 100644 |
68 | --- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi |
69 | +++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi |
70 | @@ -468,7 +468,7 @@ |
71 | buck8_reg: BUCK8 { |
72 | regulator-name = "vdd_1.8v_ldo"; |
73 | regulator-min-microvolt = <800000>; |
74 | - regulator-max-microvolt = <1500000>; |
75 | + regulator-max-microvolt = <2000000>; |
76 | regulator-always-on; |
77 | regulator-boot-on; |
78 | }; |
79 | diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts |
80 | index 610235028cc7..c14205cd6bf5 100644 |
81 | --- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts |
82 | +++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts |
83 | @@ -118,6 +118,7 @@ |
84 | reset-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>; |
85 | clocks = <&pmic>; |
86 | clock-names = "ext_clock"; |
87 | + post-power-on-delay-ms = <10>; |
88 | power-off-delay-us = <10>; |
89 | }; |
90 | |
91 | @@ -300,7 +301,6 @@ |
92 | |
93 | dwmmc_0: dwmmc0@f723d000 { |
94 | cap-mmc-highspeed; |
95 | - mmc-hs200-1_8v; |
96 | non-removable; |
97 | bus-width = <0x8>; |
98 | vmmc-supply = <&ldo19>; |
99 | diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts |
100 | index 13a0a028df98..e5699d0d91e4 100644 |
101 | --- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts |
102 | +++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts |
103 | @@ -101,6 +101,7 @@ |
104 | sdio_pwrseq: sdio-pwrseq { |
105 | compatible = "mmc-pwrseq-simple"; |
106 | reset-gpios = <&gpio 7 GPIO_ACTIVE_LOW>; /* WIFI_EN */ |
107 | + post-power-on-delay-ms = <10>; |
108 | }; |
109 | }; |
110 | |
111 | diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c |
112 | index b684f0294f35..e2b1447192a8 100644 |
113 | --- a/arch/x86/events/core.c |
114 | +++ b/arch/x86/events/core.c |
115 | @@ -1995,7 +1995,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu) |
116 | */ |
117 | static void free_fake_cpuc(struct cpu_hw_events *cpuc) |
118 | { |
119 | - kfree(cpuc->shared_regs); |
120 | + intel_cpuc_finish(cpuc); |
121 | kfree(cpuc); |
122 | } |
123 | |
124 | @@ -2007,14 +2007,11 @@ static struct cpu_hw_events *allocate_fake_cpuc(void) |
125 | cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL); |
126 | if (!cpuc) |
127 | return ERR_PTR(-ENOMEM); |
128 | - |
129 | - /* only needed, if we have extra_regs */ |
130 | - if (x86_pmu.extra_regs) { |
131 | - cpuc->shared_regs = allocate_shared_regs(cpu); |
132 | - if (!cpuc->shared_regs) |
133 | - goto error; |
134 | - } |
135 | cpuc->is_fake = 1; |
136 | + |
137 | + if (intel_cpuc_prepare(cpuc, cpu)) |
138 | + goto error; |
139 | + |
140 | return cpuc; |
141 | error: |
142 | free_fake_cpuc(cpuc); |
143 | diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c |
144 | index 730978dff63f..dadb8f7e5a0d 100644 |
145 | --- a/arch/x86/events/intel/core.c |
146 | +++ b/arch/x86/events/intel/core.c |
147 | @@ -1999,6 +1999,39 @@ static void intel_pmu_nhm_enable_all(int added) |
148 | intel_pmu_enable_all(added); |
149 | } |
150 | |
151 | +static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on) |
152 | +{ |
153 | + u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0; |
154 | + |
155 | + if (cpuc->tfa_shadow != val) { |
156 | + cpuc->tfa_shadow = val; |
157 | + wrmsrl(MSR_TSX_FORCE_ABORT, val); |
158 | + } |
159 | +} |
160 | + |
161 | +static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) |
162 | +{ |
163 | + /* |
164 | + * We're going to use PMC3, make sure TFA is set before we touch it. |
165 | + */ |
166 | + if (cntr == 3 && !cpuc->is_fake) |
167 | + intel_set_tfa(cpuc, true); |
168 | +} |
169 | + |
170 | +static void intel_tfa_pmu_enable_all(int added) |
171 | +{ |
172 | + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
173 | + |
174 | + /* |
175 | + * If we find PMC3 is no longer used when we enable the PMU, we can |
176 | + * clear TFA. |
177 | + */ |
178 | + if (!test_bit(3, cpuc->active_mask)) |
179 | + intel_set_tfa(cpuc, false); |
180 | + |
181 | + intel_pmu_enable_all(added); |
182 | +} |
183 | + |
184 | static void enable_counter_freeze(void) |
185 | { |
186 | update_debugctlmsr(get_debugctlmsr() | |
187 | @@ -2768,6 +2801,35 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc) |
188 | raw_spin_unlock(&excl_cntrs->lock); |
189 | } |
190 | |
191 | +static struct event_constraint * |
192 | +dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx) |
193 | +{ |
194 | + WARN_ON_ONCE(!cpuc->constraint_list); |
195 | + |
196 | + if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) { |
197 | + struct event_constraint *cx; |
198 | + |
199 | + /* |
200 | + * grab pre-allocated constraint entry |
201 | + */ |
202 | + cx = &cpuc->constraint_list[idx]; |
203 | + |
204 | + /* |
205 | + * initialize dynamic constraint |
206 | + * with static constraint |
207 | + */ |
208 | + *cx = *c; |
209 | + |
210 | + /* |
211 | + * mark constraint as dynamic |
212 | + */ |
213 | + cx->flags |= PERF_X86_EVENT_DYNAMIC; |
214 | + c = cx; |
215 | + } |
216 | + |
217 | + return c; |
218 | +} |
219 | + |
220 | static struct event_constraint * |
221 | intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, |
222 | int idx, struct event_constraint *c) |
223 | @@ -2798,27 +2860,7 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, |
224 | * only needed when constraint has not yet |
225 | * been cloned (marked dynamic) |
226 | */ |
227 | - if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) { |
228 | - struct event_constraint *cx; |
229 | - |
230 | - /* |
231 | - * grab pre-allocated constraint entry |
232 | - */ |
233 | - cx = &cpuc->constraint_list[idx]; |
234 | - |
235 | - /* |
236 | - * initialize dynamic constraint |
237 | - * with static constraint |
238 | - */ |
239 | - *cx = *c; |
240 | - |
241 | - /* |
242 | - * mark constraint as dynamic, so we |
243 | - * can free it later on |
244 | - */ |
245 | - cx->flags |= PERF_X86_EVENT_DYNAMIC; |
246 | - c = cx; |
247 | - } |
248 | + c = dyn_constraint(cpuc, c, idx); |
249 | |
250 | /* |
251 | * From here on, the constraint is dynamic. |
252 | @@ -3345,6 +3387,26 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx, |
253 | return c; |
254 | } |
255 | |
256 | +static bool allow_tsx_force_abort = true; |
257 | + |
258 | +static struct event_constraint * |
259 | +tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx, |
260 | + struct perf_event *event) |
261 | +{ |
262 | + struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event); |
263 | + |
264 | + /* |
265 | + * Without TFA we must not use PMC3. |
266 | + */ |
267 | + if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) { |
268 | + c = dyn_constraint(cpuc, c, idx); |
269 | + c->idxmsk64 &= ~(1ULL << 3); |
270 | + c->weight--; |
271 | + } |
272 | + |
273 | + return c; |
274 | +} |
275 | + |
276 | /* |
277 | * Broadwell: |
278 | * |
279 | @@ -3398,7 +3460,7 @@ ssize_t intel_event_sysfs_show(char *page, u64 config) |
280 | return x86_event_sysfs_show(page, config, event); |
281 | } |
282 | |
283 | -struct intel_shared_regs *allocate_shared_regs(int cpu) |
284 | +static struct intel_shared_regs *allocate_shared_regs(int cpu) |
285 | { |
286 | struct intel_shared_regs *regs; |
287 | int i; |
288 | @@ -3430,23 +3492,24 @@ static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu) |
289 | return c; |
290 | } |
291 | |
292 | -static int intel_pmu_cpu_prepare(int cpu) |
293 | -{ |
294 | - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
295 | |
296 | +int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) |
297 | +{ |
298 | if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { |
299 | cpuc->shared_regs = allocate_shared_regs(cpu); |
300 | if (!cpuc->shared_regs) |
301 | goto err; |
302 | } |
303 | |
304 | - if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { |
305 | + if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) { |
306 | size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint); |
307 | |
308 | - cpuc->constraint_list = kzalloc(sz, GFP_KERNEL); |
309 | + cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); |
310 | if (!cpuc->constraint_list) |
311 | goto err_shared_regs; |
312 | + } |
313 | |
314 | + if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { |
315 | cpuc->excl_cntrs = allocate_excl_cntrs(cpu); |
316 | if (!cpuc->excl_cntrs) |
317 | goto err_constraint_list; |
318 | @@ -3468,6 +3531,11 @@ err: |
319 | return -ENOMEM; |
320 | } |
321 | |
322 | +static int intel_pmu_cpu_prepare(int cpu) |
323 | +{ |
324 | + return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu); |
325 | +} |
326 | + |
327 | static void flip_smm_bit(void *data) |
328 | { |
329 | unsigned long set = *(unsigned long *)data; |
330 | @@ -3542,9 +3610,8 @@ static void intel_pmu_cpu_starting(int cpu) |
331 | } |
332 | } |
333 | |
334 | -static void free_excl_cntrs(int cpu) |
335 | +static void free_excl_cntrs(struct cpu_hw_events *cpuc) |
336 | { |
337 | - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
338 | struct intel_excl_cntrs *c; |
339 | |
340 | c = cpuc->excl_cntrs; |
341 | @@ -3552,9 +3619,10 @@ static void free_excl_cntrs(int cpu) |
342 | if (c->core_id == -1 || --c->refcnt == 0) |
343 | kfree(c); |
344 | cpuc->excl_cntrs = NULL; |
345 | - kfree(cpuc->constraint_list); |
346 | - cpuc->constraint_list = NULL; |
347 | } |
348 | + |
349 | + kfree(cpuc->constraint_list); |
350 | + cpuc->constraint_list = NULL; |
351 | } |
352 | |
353 | static void intel_pmu_cpu_dying(int cpu) |
354 | @@ -3565,9 +3633,8 @@ static void intel_pmu_cpu_dying(int cpu) |
355 | disable_counter_freeze(); |
356 | } |
357 | |
358 | -static void intel_pmu_cpu_dead(int cpu) |
359 | +void intel_cpuc_finish(struct cpu_hw_events *cpuc) |
360 | { |
361 | - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
362 | struct intel_shared_regs *pc; |
363 | |
364 | pc = cpuc->shared_regs; |
365 | @@ -3577,7 +3644,12 @@ static void intel_pmu_cpu_dead(int cpu) |
366 | cpuc->shared_regs = NULL; |
367 | } |
368 | |
369 | - free_excl_cntrs(cpu); |
370 | + free_excl_cntrs(cpuc); |
371 | +} |
372 | + |
373 | +static void intel_pmu_cpu_dead(int cpu) |
374 | +{ |
375 | + intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu)); |
376 | } |
377 | |
378 | static void intel_pmu_sched_task(struct perf_event_context *ctx, |
379 | @@ -4070,8 +4142,11 @@ static struct attribute *intel_pmu_caps_attrs[] = { |
380 | NULL |
381 | }; |
382 | |
383 | +DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort); |
384 | + |
385 | static struct attribute *intel_pmu_attrs[] = { |
386 | &dev_attr_freeze_on_smi.attr, |
387 | + NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */ |
388 | NULL, |
389 | }; |
390 | |
391 | @@ -4564,6 +4639,15 @@ __init int intel_pmu_init(void) |
392 | tsx_attr = hsw_tsx_events_attrs; |
393 | intel_pmu_pebs_data_source_skl( |
394 | boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X); |
395 | + |
396 | + if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) { |
397 | + x86_pmu.flags |= PMU_FL_TFA; |
398 | + x86_pmu.get_event_constraints = tfa_get_event_constraints; |
399 | + x86_pmu.enable_all = intel_tfa_pmu_enable_all; |
400 | + x86_pmu.commit_scheduling = intel_tfa_commit_scheduling; |
401 | + intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr; |
402 | + } |
403 | + |
404 | pr_cont("Skylake events, "); |
405 | name = "skylake"; |
406 | break; |
407 | @@ -4715,7 +4799,7 @@ static __init int fixup_ht_bug(void) |
408 | hardlockup_detector_perf_restart(); |
409 | |
410 | for_each_online_cpu(c) |
411 | - free_excl_cntrs(c); |
412 | + free_excl_cntrs(&per_cpu(cpu_hw_events, c)); |
413 | |
414 | cpus_read_unlock(); |
415 | pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n"); |
416 | diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h |
417 | index d46fd6754d92..a345d079f876 100644 |
418 | --- a/arch/x86/events/perf_event.h |
419 | +++ b/arch/x86/events/perf_event.h |
420 | @@ -242,6 +242,11 @@ struct cpu_hw_events { |
421 | struct intel_excl_cntrs *excl_cntrs; |
422 | int excl_thread_id; /* 0 or 1 */ |
423 | |
424 | + /* |
425 | + * SKL TSX_FORCE_ABORT shadow |
426 | + */ |
427 | + u64 tfa_shadow; |
428 | + |
429 | /* |
430 | * AMD specific bits |
431 | */ |
432 | @@ -681,6 +686,7 @@ do { \ |
433 | #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */ |
434 | #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */ |
435 | #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */ |
436 | +#define PMU_FL_TFA 0x20 /* deal with TSX force abort */ |
437 | |
438 | #define EVENT_VAR(_id) event_attr_##_id |
439 | #define EVENT_PTR(_id) &event_attr_##_id.attr.attr |
440 | @@ -889,7 +895,8 @@ struct event_constraint * |
441 | x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, |
442 | struct perf_event *event); |
443 | |
444 | -struct intel_shared_regs *allocate_shared_regs(int cpu); |
445 | +extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu); |
446 | +extern void intel_cpuc_finish(struct cpu_hw_events *cpuc); |
447 | |
448 | int intel_pmu_init(void); |
449 | |
450 | @@ -1025,9 +1032,13 @@ static inline int intel_pmu_init(void) |
451 | return 0; |
452 | } |
453 | |
454 | -static inline struct intel_shared_regs *allocate_shared_regs(int cpu) |
455 | +static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu) |
456 | +{ |
457 | + return 0; |
458 | +} |
459 | + |
460 | +static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc) |
461 | { |
462 | - return NULL; |
463 | } |
464 | |
465 | static inline int is_ht_workaround_enabled(void) |
466 | diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h |
467 | index 6d6122524711..981ff9479648 100644 |
468 | --- a/arch/x86/include/asm/cpufeatures.h |
469 | +++ b/arch/x86/include/asm/cpufeatures.h |
470 | @@ -344,6 +344,7 @@ |
471 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ |
472 | #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ |
473 | #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ |
474 | +#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ |
475 | #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ |
476 | #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ |
477 | #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ |
478 | diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h |
479 | index 8e40c2446fd1..ca5bc0eacb95 100644 |
480 | --- a/arch/x86/include/asm/msr-index.h |
481 | +++ b/arch/x86/include/asm/msr-index.h |
482 | @@ -666,6 +666,12 @@ |
483 | |
484 | #define MSR_IA32_TSC_DEADLINE 0x000006E0 |
485 | |
486 | + |
487 | +#define MSR_TSX_FORCE_ABORT 0x0000010F |
488 | + |
489 | +#define MSR_TFA_RTM_FORCE_ABORT_BIT 0 |
490 | +#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT) |
491 | + |
492 | /* P4/Xeon+ specific */ |
493 | #define MSR_IA32_MCG_EAX 0x00000180 |
494 | #define MSR_IA32_MCG_EBX 0x00000181 |
495 | diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c |
496 | index 30a5111ae5fd..527e69b12002 100644 |
497 | --- a/arch/x86/pci/fixup.c |
498 | +++ b/arch/x86/pci/fixup.c |
499 | @@ -635,6 +635,22 @@ static void quirk_no_aersid(struct pci_dev *pdev) |
500 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, |
501 | PCI_CLASS_BRIDGE_PCI, 8, quirk_no_aersid); |
502 | |
503 | +static void quirk_intel_th_dnv(struct pci_dev *dev) |
504 | +{ |
505 | + struct resource *r = &dev->resource[4]; |
506 | + |
507 | + /* |
508 | + * Denverton reports 2k of RTIT_BAR (intel_th resource 4), which |
509 | + * appears to be 4 MB in reality. |
510 | + */ |
511 | + if (r->end == r->start + 0x7ff) { |
512 | + r->start = 0; |
513 | + r->end = 0x3fffff; |
514 | + r->flags |= IORESOURCE_UNSET; |
515 | + } |
516 | +} |
517 | +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv); |
518 | + |
519 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
520 | |
521 | #define AMD_141b_MMIO_BASE(x) (0x80 + (x) * 0x8) |
522 | diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c |
523 | index 6bc8e6640d71..c51462f5aa1e 100644 |
524 | --- a/drivers/firmware/iscsi_ibft.c |
525 | +++ b/drivers/firmware/iscsi_ibft.c |
526 | @@ -542,6 +542,7 @@ static umode_t __init ibft_check_tgt_for(void *data, int type) |
527 | case ISCSI_BOOT_TGT_NIC_ASSOC: |
528 | case ISCSI_BOOT_TGT_CHAP_TYPE: |
529 | rc = S_IRUGO; |
530 | + break; |
531 | case ISCSI_BOOT_TGT_NAME: |
532 | if (tgt->tgt_name_len) |
533 | rc = S_IRUGO; |
534 | diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c |
535 | index 225ae6980182..628ef617bb2f 100644 |
536 | --- a/drivers/input/mouse/elan_i2c_core.c |
537 | +++ b/drivers/input/mouse/elan_i2c_core.c |
538 | @@ -1337,6 +1337,7 @@ static const struct acpi_device_id elan_acpi_id[] = { |
539 | { "ELAN0000", 0 }, |
540 | { "ELAN0100", 0 }, |
541 | { "ELAN0600", 0 }, |
542 | + { "ELAN0601", 0 }, |
543 | { "ELAN0602", 0 }, |
544 | { "ELAN0605", 0 }, |
545 | { "ELAN0608", 0 }, |
546 | diff --git a/drivers/input/tablet/wacom_serial4.c b/drivers/input/tablet/wacom_serial4.c |
547 | index 38bfaca48eab..150f9eecaca7 100644 |
548 | --- a/drivers/input/tablet/wacom_serial4.c |
549 | +++ b/drivers/input/tablet/wacom_serial4.c |
550 | @@ -187,6 +187,7 @@ enum { |
551 | MODEL_DIGITIZER_II = 0x5544, /* UD */ |
552 | MODEL_GRAPHIRE = 0x4554, /* ET */ |
553 | MODEL_PENPARTNER = 0x4354, /* CT */ |
554 | + MODEL_ARTPAD_II = 0x4B54, /* KT */ |
555 | }; |
556 | |
557 | static void wacom_handle_model_response(struct wacom *wacom) |
558 | @@ -245,6 +246,7 @@ static void wacom_handle_model_response(struct wacom *wacom) |
559 | wacom->flags = F_HAS_STYLUS2 | F_HAS_SCROLLWHEEL; |
560 | break; |
561 | |
562 | + case MODEL_ARTPAD_II: |
563 | case MODEL_DIGITIZER_II: |
564 | wacom->dev->name = "Wacom Digitizer II"; |
565 | wacom->dev->id.version = MODEL_DIGITIZER_II; |
566 | diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c |
567 | index 66a174979b3c..81745644f720 100644 |
568 | --- a/drivers/media/rc/rc-main.c |
569 | +++ b/drivers/media/rc/rc-main.c |
570 | @@ -274,6 +274,7 @@ static unsigned int ir_update_mapping(struct rc_dev *dev, |
571 | unsigned int new_keycode) |
572 | { |
573 | int old_keycode = rc_map->scan[index].keycode; |
574 | + int i; |
575 | |
576 | /* Did the user wish to remove the mapping? */ |
577 | if (new_keycode == KEY_RESERVED || new_keycode == KEY_UNKNOWN) { |
578 | @@ -288,9 +289,20 @@ static unsigned int ir_update_mapping(struct rc_dev *dev, |
579 | old_keycode == KEY_RESERVED ? "New" : "Replacing", |
580 | rc_map->scan[index].scancode, new_keycode); |
581 | rc_map->scan[index].keycode = new_keycode; |
582 | + __set_bit(new_keycode, dev->input_dev->keybit); |
583 | } |
584 | |
585 | if (old_keycode != KEY_RESERVED) { |
586 | + /* A previous mapping was updated... */ |
587 | + __clear_bit(old_keycode, dev->input_dev->keybit); |
588 | + /* ... but another scancode might use the same keycode */ |
589 | + for (i = 0; i < rc_map->len; i++) { |
590 | + if (rc_map->scan[i].keycode == old_keycode) { |
591 | + __set_bit(old_keycode, dev->input_dev->keybit); |
592 | + break; |
593 | + } |
594 | + } |
595 | + |
596 | /* Possibly shrink the keytable, failure is not a problem */ |
597 | ir_resize_table(dev, rc_map, GFP_ATOMIC); |
598 | } |
599 | @@ -1750,7 +1762,6 @@ static int rc_prepare_rx_device(struct rc_dev *dev) |
600 | set_bit(EV_REP, dev->input_dev->evbit); |
601 | set_bit(EV_MSC, dev->input_dev->evbit); |
602 | set_bit(MSC_SCAN, dev->input_dev->mscbit); |
603 | - bitmap_fill(dev->input_dev->keybit, KEY_CNT); |
604 | |
605 | /* Pointer/mouse events */ |
606 | set_bit(EV_REL, dev->input_dev->evbit); |
607 | diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c |
608 | index b62cbd800111..33a22c016456 100644 |
609 | --- a/drivers/media/usb/uvc/uvc_driver.c |
610 | +++ b/drivers/media/usb/uvc/uvc_driver.c |
611 | @@ -1106,11 +1106,19 @@ static int uvc_parse_standard_control(struct uvc_device *dev, |
612 | return -EINVAL; |
613 | } |
614 | |
615 | - /* Make sure the terminal type MSB is not null, otherwise it |
616 | - * could be confused with a unit. |
617 | + /* |
618 | + * Reject invalid terminal types that would cause issues: |
619 | + * |
620 | + * - The high byte must be non-zero, otherwise it would be |
621 | + * confused with a unit. |
622 | + * |
623 | + * - Bit 15 must be 0, as we use it internally as a terminal |
624 | + * direction flag. |
625 | + * |
626 | + * Other unknown types are accepted. |
627 | */ |
628 | type = get_unaligned_le16(&buffer[4]); |
629 | - if ((type & 0xff00) == 0) { |
630 | + if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) { |
631 | uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol " |
632 | "interface %d INPUT_TERMINAL %d has invalid " |
633 | "type 0x%04x, skipping\n", udev->devnum, |
634 | diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c |
635 | index c070a9e51ebf..fae572b38416 100644 |
636 | --- a/drivers/net/wireless/ath/ath9k/init.c |
637 | +++ b/drivers/net/wireless/ath/ath9k/init.c |
638 | @@ -636,15 +636,15 @@ static int ath9k_of_init(struct ath_softc *sc) |
639 | ret = ath9k_eeprom_request(sc, eeprom_name); |
640 | if (ret) |
641 | return ret; |
642 | + |
643 | + ah->ah_flags &= ~AH_USE_EEPROM; |
644 | + ah->ah_flags |= AH_NO_EEP_SWAP; |
645 | } |
646 | |
647 | mac = of_get_mac_address(np); |
648 | if (mac) |
649 | ether_addr_copy(common->macaddr, mac); |
650 | |
651 | - ah->ah_flags &= ~AH_USE_EEPROM; |
652 | - ah->ah_flags |= AH_NO_EEP_SWAP; |
653 | - |
654 | return 0; |
655 | } |
656 | |
657 | diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c |
658 | index 0dbcf429089f..1a8b85051b1b 100644 |
659 | --- a/drivers/pci/pcie/pme.c |
660 | +++ b/drivers/pci/pcie/pme.c |
661 | @@ -432,31 +432,6 @@ static void pcie_pme_remove(struct pcie_device *srv) |
662 | kfree(get_service_data(srv)); |
663 | } |
664 | |
665 | -static int pcie_pme_runtime_suspend(struct pcie_device *srv) |
666 | -{ |
667 | - struct pcie_pme_service_data *data = get_service_data(srv); |
668 | - |
669 | - spin_lock_irq(&data->lock); |
670 | - pcie_pme_interrupt_enable(srv->port, false); |
671 | - pcie_clear_root_pme_status(srv->port); |
672 | - data->noirq = true; |
673 | - spin_unlock_irq(&data->lock); |
674 | - |
675 | - return 0; |
676 | -} |
677 | - |
678 | -static int pcie_pme_runtime_resume(struct pcie_device *srv) |
679 | -{ |
680 | - struct pcie_pme_service_data *data = get_service_data(srv); |
681 | - |
682 | - spin_lock_irq(&data->lock); |
683 | - pcie_pme_interrupt_enable(srv->port, true); |
684 | - data->noirq = false; |
685 | - spin_unlock_irq(&data->lock); |
686 | - |
687 | - return 0; |
688 | -} |
689 | - |
690 | static struct pcie_port_service_driver pcie_pme_driver = { |
691 | .name = "pcie_pme", |
692 | .port_type = PCI_EXP_TYPE_ROOT_PORT, |
693 | @@ -464,8 +439,6 @@ static struct pcie_port_service_driver pcie_pme_driver = { |
694 | |
695 | .probe = pcie_pme_probe, |
696 | .suspend = pcie_pme_suspend, |
697 | - .runtime_suspend = pcie_pme_runtime_suspend, |
698 | - .runtime_resume = pcie_pme_runtime_resume, |
699 | .resume = pcie_pme_resume, |
700 | .remove = pcie_pme_remove, |
701 | }; |
702 | diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c |
703 | index d5a6aa9676c8..a3adc954f40f 100644 |
704 | --- a/drivers/scsi/aacraid/commsup.c |
705 | +++ b/drivers/scsi/aacraid/commsup.c |
706 | @@ -1303,8 +1303,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) |
707 | ADD : DELETE; |
708 | break; |
709 | } |
710 | - case AifBuManagerEvent: |
711 | - aac_handle_aif_bu(dev, aifcmd); |
712 | + break; |
713 | + case AifBuManagerEvent: |
714 | + aac_handle_aif_bu(dev, aifcmd); |
715 | break; |
716 | } |
717 | |
718 | diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c |
719 | index 5596c52e246d..ecc51ef0753f 100644 |
720 | --- a/drivers/staging/erofs/namei.c |
721 | +++ b/drivers/staging/erofs/namei.c |
722 | @@ -15,74 +15,77 @@ |
723 | |
724 | #include <trace/events/erofs.h> |
725 | |
726 | -/* based on the value of qn->len is accurate */ |
727 | -static inline int dirnamecmp(struct qstr *qn, |
728 | - struct qstr *qd, unsigned int *matched) |
729 | +struct erofs_qstr { |
730 | + const unsigned char *name; |
731 | + const unsigned char *end; |
732 | +}; |
733 | + |
734 | +/* based on the end of qn is accurate and it must have the trailing '\0' */ |
735 | +static inline int dirnamecmp(const struct erofs_qstr *qn, |
736 | + const struct erofs_qstr *qd, |
737 | + unsigned int *matched) |
738 | { |
739 | - unsigned int i = *matched, len = min(qn->len, qd->len); |
740 | -loop: |
741 | - if (unlikely(i >= len)) { |
742 | - *matched = i; |
743 | - if (qn->len < qd->len) { |
744 | - /* |
745 | - * actually (qn->len == qd->len) |
746 | - * when qd->name[i] == '\0' |
747 | - */ |
748 | - return qd->name[i] == '\0' ? 0 : -1; |
749 | + unsigned int i = *matched; |
750 | + |
751 | + /* |
752 | + * on-disk error, let's only BUG_ON in the debugging mode. |
753 | + * otherwise, it will return 1 to just skip the invalid name |
754 | + * and go on (in consideration of the lookup performance). |
755 | + */ |
756 | + DBG_BUGON(qd->name > qd->end); |
757 | + |
758 | + /* qd could not have trailing '\0' */ |
759 | + /* However it is absolutely safe if < qd->end */ |
760 | + while (qd->name + i < qd->end && qd->name[i] != '\0') { |
761 | + if (qn->name[i] != qd->name[i]) { |
762 | + *matched = i; |
763 | + return qn->name[i] > qd->name[i] ? 1 : -1; |
764 | } |
765 | - return (qn->len > qd->len); |
766 | + ++i; |
767 | } |
768 | - |
769 | - if (qn->name[i] != qd->name[i]) { |
770 | - *matched = i; |
771 | - return qn->name[i] > qd->name[i] ? 1 : -1; |
772 | - } |
773 | - |
774 | - ++i; |
775 | - goto loop; |
776 | + *matched = i; |
777 | + /* See comments in __d_alloc on the terminating NUL character */ |
778 | + return qn->name[i] == '\0' ? 0 : 1; |
779 | } |
780 | |
781 | -static struct erofs_dirent *find_target_dirent( |
782 | - struct qstr *name, |
783 | - u8 *data, int maxsize) |
784 | +#define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1)) |
785 | + |
786 | +static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name, |
787 | + u8 *data, |
788 | + unsigned int dirblksize, |
789 | + const int ndirents) |
790 | { |
791 | - unsigned int ndirents, head, back; |
792 | + int head, back; |
793 | unsigned int startprfx, endprfx; |
794 | struct erofs_dirent *const de = (struct erofs_dirent *)data; |
795 | |
796 | - /* make sure that maxsize is valid */ |
797 | - BUG_ON(maxsize < sizeof(struct erofs_dirent)); |
798 | - |
799 | - ndirents = le16_to_cpu(de->nameoff) / sizeof(*de); |
800 | - |
801 | - /* corrupted dir (may be unnecessary...) */ |
802 | - BUG_ON(!ndirents); |
803 | - |
804 | - head = 0; |
805 | + /* since the 1st dirent has been evaluated previously */ |
806 | + head = 1; |
807 | back = ndirents - 1; |
808 | startprfx = endprfx = 0; |
809 | |
810 | while (head <= back) { |
811 | - unsigned int mid = head + (back - head) / 2; |
812 | - unsigned int nameoff = le16_to_cpu(de[mid].nameoff); |
813 | + const int mid = head + (back - head) / 2; |
814 | + const int nameoff = nameoff_from_disk(de[mid].nameoff, |
815 | + dirblksize); |
816 | unsigned int matched = min(startprfx, endprfx); |
817 | - |
818 | - struct qstr dname = QSTR_INIT(data + nameoff, |
819 | - unlikely(mid >= ndirents - 1) ? |
820 | - maxsize - nameoff : |
821 | - le16_to_cpu(de[mid + 1].nameoff) - nameoff); |
822 | + struct erofs_qstr dname = { |
823 | + .name = data + nameoff, |
824 | + .end = unlikely(mid >= ndirents - 1) ? |
825 | + data + dirblksize : |
826 | + data + nameoff_from_disk(de[mid + 1].nameoff, |
827 | + dirblksize) |
828 | + }; |
829 | |
830 | /* string comparison without already matched prefix */ |
831 | int ret = dirnamecmp(name, &dname, &matched); |
832 | |
833 | - if (unlikely(!ret)) |
834 | + if (unlikely(!ret)) { |
835 | return de + mid; |
836 | - else if (ret > 0) { |
837 | + } else if (ret > 0) { |
838 | head = mid + 1; |
839 | startprfx = matched; |
840 | - } else if (unlikely(mid < 1)) /* fix "mid" overflow */ |
841 | - break; |
842 | - else { |
843 | + } else { |
844 | back = mid - 1; |
845 | endprfx = matched; |
846 | } |
847 | @@ -91,12 +94,12 @@ static struct erofs_dirent *find_target_dirent( |
848 | return ERR_PTR(-ENOENT); |
849 | } |
850 | |
851 | -static struct page *find_target_block_classic( |
852 | - struct inode *dir, |
853 | - struct qstr *name, int *_diff) |
854 | +static struct page *find_target_block_classic(struct inode *dir, |
855 | + struct erofs_qstr *name, |
856 | + int *_ndirents) |
857 | { |
858 | unsigned int startprfx, endprfx; |
859 | - unsigned int head, back; |
860 | + int head, back; |
861 | struct address_space *const mapping = dir->i_mapping; |
862 | struct page *candidate = ERR_PTR(-ENOENT); |
863 | |
864 | @@ -105,41 +108,43 @@ static struct page *find_target_block_classic( |
865 | back = inode_datablocks(dir) - 1; |
866 | |
867 | while (head <= back) { |
868 | - unsigned int mid = head + (back - head) / 2; |
869 | + const int mid = head + (back - head) / 2; |
870 | struct page *page = read_mapping_page(mapping, mid, NULL); |
871 | |
872 | - if (IS_ERR(page)) { |
873 | -exact_out: |
874 | - if (!IS_ERR(candidate)) /* valid candidate */ |
875 | - put_page(candidate); |
876 | - return page; |
877 | - } else { |
878 | - int diff; |
879 | - unsigned int ndirents, matched; |
880 | - struct qstr dname; |
881 | + if (!IS_ERR(page)) { |
882 | struct erofs_dirent *de = kmap_atomic(page); |
883 | - unsigned int nameoff = le16_to_cpu(de->nameoff); |
884 | - |
885 | - ndirents = nameoff / sizeof(*de); |
886 | + const int nameoff = nameoff_from_disk(de->nameoff, |
887 | + EROFS_BLKSIZ); |
888 | + const int ndirents = nameoff / sizeof(*de); |
889 | + int diff; |
890 | + unsigned int matched; |
891 | + struct erofs_qstr dname; |
892 | |
893 | - /* corrupted dir (should have one entry at least) */ |
894 | - BUG_ON(!ndirents || nameoff > PAGE_SIZE); |
895 | + if (unlikely(!ndirents)) { |
896 | + DBG_BUGON(1); |
897 | + kunmap_atomic(de); |
898 | + put_page(page); |
899 | + page = ERR_PTR(-EIO); |
900 | + goto out; |
901 | + } |
902 | |
903 | matched = min(startprfx, endprfx); |
904 | |
905 | dname.name = (u8 *)de + nameoff; |
906 | - dname.len = ndirents == 1 ? |
907 | - /* since the rest of the last page is 0 */ |
908 | - EROFS_BLKSIZ - nameoff |
909 | - : le16_to_cpu(de[1].nameoff) - nameoff; |
910 | + if (ndirents == 1) |
911 | + dname.end = (u8 *)de + EROFS_BLKSIZ; |
912 | + else |
913 | + dname.end = (u8 *)de + |
914 | + nameoff_from_disk(de[1].nameoff, |
915 | + EROFS_BLKSIZ); |
916 | |
917 | /* string comparison without already matched prefix */ |
918 | diff = dirnamecmp(name, &dname, &matched); |
919 | kunmap_atomic(de); |
920 | |
921 | if (unlikely(!diff)) { |
922 | - *_diff = 0; |
923 | - goto exact_out; |
924 | + *_ndirents = 0; |
925 | + goto out; |
926 | } else if (diff > 0) { |
927 | head = mid + 1; |
928 | startprfx = matched; |
929 | @@ -147,45 +152,51 @@ exact_out: |
930 | if (likely(!IS_ERR(candidate))) |
931 | put_page(candidate); |
932 | candidate = page; |
933 | + *_ndirents = ndirents; |
934 | } else { |
935 | put_page(page); |
936 | |
937 | - if (unlikely(mid < 1)) /* fix "mid" overflow */ |
938 | - break; |
939 | - |
940 | back = mid - 1; |
941 | endprfx = matched; |
942 | } |
943 | + continue; |
944 | } |
945 | +out: /* free if the candidate is valid */ |
946 | + if (!IS_ERR(candidate)) |
947 | + put_page(candidate); |
948 | + return page; |
949 | } |
950 | - *_diff = 1; |
951 | return candidate; |
952 | } |
953 | |
954 | int erofs_namei(struct inode *dir, |
955 | - struct qstr *name, |
956 | - erofs_nid_t *nid, unsigned int *d_type) |
957 | + struct qstr *name, |
958 | + erofs_nid_t *nid, unsigned int *d_type) |
959 | { |
960 | - int diff; |
961 | + int ndirents; |
962 | struct page *page; |
963 | - u8 *data; |
964 | + void *data; |
965 | struct erofs_dirent *de; |
966 | + struct erofs_qstr qn; |
967 | |
968 | if (unlikely(!dir->i_size)) |
969 | return -ENOENT; |
970 | |
971 | - diff = 1; |
972 | - page = find_target_block_classic(dir, name, &diff); |
973 | + qn.name = name->name; |
974 | + qn.end = name->name + name->len; |
975 | + |
976 | + ndirents = 0; |
977 | + page = find_target_block_classic(dir, &qn, &ndirents); |
978 | |
979 | if (unlikely(IS_ERR(page))) |
980 | return PTR_ERR(page); |
981 | |
982 | data = kmap_atomic(page); |
983 | /* the target page has been mapped */ |
984 | - de = likely(diff) ? |
985 | - /* since the rest of the last page is 0 */ |
986 | - find_target_dirent(name, data, EROFS_BLKSIZ) : |
987 | - (struct erofs_dirent *)data; |
988 | + if (ndirents) |
989 | + de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents); |
990 | + else |
991 | + de = (struct erofs_dirent *)data; |
992 | |
993 | if (likely(!IS_ERR(de))) { |
994 | *nid = le64_to_cpu(de->nid); |
995 | diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c |
996 | index ca2e8fd78959..ab30d14ded06 100644 |
997 | --- a/drivers/staging/erofs/unzip_vle.c |
998 | +++ b/drivers/staging/erofs/unzip_vle.c |
999 | @@ -1017,11 +1017,10 @@ repeat: |
1000 | if (llen > grp->llen) |
1001 | llen = grp->llen; |
1002 | |
1003 | - err = z_erofs_vle_unzip_fast_percpu(compressed_pages, |
1004 | - clusterpages, pages, llen, work->pageofs, |
1005 | - z_erofs_onlinepage_endio); |
1006 | + err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages, |
1007 | + pages, llen, work->pageofs); |
1008 | if (err != -ENOTSUPP) |
1009 | - goto out_percpu; |
1010 | + goto out; |
1011 | |
1012 | if (sparsemem_pages >= nr_pages) |
1013 | goto skip_allocpage; |
1014 | @@ -1042,8 +1041,25 @@ skip_allocpage: |
1015 | erofs_vunmap(vout, nr_pages); |
1016 | |
1017 | out: |
1018 | + /* must handle all compressed pages before endding pages */ |
1019 | + for (i = 0; i < clusterpages; ++i) { |
1020 | + page = compressed_pages[i]; |
1021 | + |
1022 | +#ifdef EROFS_FS_HAS_MANAGED_CACHE |
1023 | + if (page->mapping == MNGD_MAPPING(sbi)) |
1024 | + continue; |
1025 | +#endif |
1026 | + /* recycle all individual staging pages */ |
1027 | + (void)z_erofs_gather_if_stagingpage(page_pool, page); |
1028 | + |
1029 | + WRITE_ONCE(compressed_pages[i], NULL); |
1030 | + } |
1031 | + |
1032 | for (i = 0; i < nr_pages; ++i) { |
1033 | page = pages[i]; |
1034 | + if (!page) |
1035 | + continue; |
1036 | + |
1037 | DBG_BUGON(!page->mapping); |
1038 | |
1039 | /* recycle all individual staging pages */ |
1040 | @@ -1056,20 +1072,6 @@ out: |
1041 | z_erofs_onlinepage_endio(page); |
1042 | } |
1043 | |
1044 | -out_percpu: |
1045 | - for (i = 0; i < clusterpages; ++i) { |
1046 | - page = compressed_pages[i]; |
1047 | - |
1048 | -#ifdef EROFS_FS_HAS_MANAGED_CACHE |
1049 | - if (page->mapping == MNGD_MAPPING(sbi)) |
1050 | - continue; |
1051 | -#endif |
1052 | - /* recycle all individual staging pages */ |
1053 | - (void)z_erofs_gather_if_stagingpage(page_pool, page); |
1054 | - |
1055 | - WRITE_ONCE(compressed_pages[i], NULL); |
1056 | - } |
1057 | - |
1058 | if (pages == z_pagemap_global) |
1059 | mutex_unlock(&z_pagemap_global_lock); |
1060 | else if (unlikely(pages != pages_onstack)) |
1061 | diff --git a/drivers/staging/erofs/unzip_vle.h b/drivers/staging/erofs/unzip_vle.h |
1062 | index 5a4e1b62c0d1..c0dfd6906aa8 100644 |
1063 | --- a/drivers/staging/erofs/unzip_vle.h |
1064 | +++ b/drivers/staging/erofs/unzip_vle.h |
1065 | @@ -218,8 +218,7 @@ extern int z_erofs_vle_plain_copy(struct page **compressed_pages, |
1066 | |
1067 | extern int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages, |
1068 | unsigned clusterpages, struct page **pages, |
1069 | - unsigned outlen, unsigned short pageofs, |
1070 | - void (*endio)(struct page *)); |
1071 | + unsigned int outlen, unsigned short pageofs); |
1072 | |
1073 | extern int z_erofs_vle_unzip_vmap(struct page **compressed_pages, |
1074 | unsigned clusterpages, void *vaddr, unsigned llen, |
1075 | diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c |
1076 | index 52797bd89da1..f471b894c848 100644 |
1077 | --- a/drivers/staging/erofs/unzip_vle_lz4.c |
1078 | +++ b/drivers/staging/erofs/unzip_vle_lz4.c |
1079 | @@ -125,8 +125,7 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages, |
1080 | unsigned int clusterpages, |
1081 | struct page **pages, |
1082 | unsigned int outlen, |
1083 | - unsigned short pageofs, |
1084 | - void (*endio)(struct page *)) |
1085 | + unsigned short pageofs) |
1086 | { |
1087 | void *vin, *vout; |
1088 | unsigned int nr_pages, i, j; |
1089 | @@ -148,19 +147,16 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages, |
1090 | ret = z_erofs_unzip_lz4(vin, vout + pageofs, |
1091 | clusterpages * PAGE_SIZE, outlen); |
1092 | |
1093 | - if (ret >= 0) { |
1094 | - outlen = ret; |
1095 | - ret = 0; |
1096 | - } |
1097 | + if (ret < 0) |
1098 | + goto out; |
1099 | + ret = 0; |
1100 | |
1101 | for (i = 0; i < nr_pages; ++i) { |
1102 | j = min((unsigned int)PAGE_SIZE - pageofs, outlen); |
1103 | |
1104 | if (pages[i]) { |
1105 | - if (ret < 0) { |
1106 | - SetPageError(pages[i]); |
1107 | - } else if (clusterpages == 1 && |
1108 | - pages[i] == compressed_pages[0]) { |
1109 | + if (clusterpages == 1 && |
1110 | + pages[i] == compressed_pages[0]) { |
1111 | memcpy(vin + pageofs, vout + pageofs, j); |
1112 | } else { |
1113 | void *dst = kmap_atomic(pages[i]); |
1114 | @@ -168,12 +164,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages, |
1115 | memcpy(dst + pageofs, vout + pageofs, j); |
1116 | kunmap_atomic(dst); |
1117 | } |
1118 | - endio(pages[i]); |
1119 | } |
1120 | vout += PAGE_SIZE; |
1121 | outlen -= j; |
1122 | pageofs = 0; |
1123 | } |
1124 | + |
1125 | +out: |
1126 | preempt_enable(); |
1127 | |
1128 | if (clusterpages == 1) |
1129 | diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c |
1130 | index b92740edc416..4b038f25f256 100644 |
1131 | --- a/fs/gfs2/glock.c |
1132 | +++ b/fs/gfs2/glock.c |
1133 | @@ -107,7 +107,7 @@ static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode, |
1134 | |
1135 | static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name) |
1136 | { |
1137 | - u32 hash = jhash2((u32 *)name, sizeof(*name) / 4, 0); |
1138 | + u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0); |
1139 | |
1140 | return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS); |
1141 | } |
1142 | diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h |
1143 | index bfe1639df02d..97fc498dc767 100644 |
1144 | --- a/include/drm/drm_cache.h |
1145 | +++ b/include/drm/drm_cache.h |
1146 | @@ -47,6 +47,24 @@ static inline bool drm_arch_can_wc_memory(void) |
1147 | return false; |
1148 | #elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3) |
1149 | return false; |
1150 | +#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64) |
1151 | + /* |
1152 | + * The DRM driver stack is designed to work with cache coherent devices |
1153 | + * only, but permits an optimization to be enabled in some cases, where |
1154 | + * for some buffers, both the CPU and the GPU use uncached mappings, |
1155 | + * removing the need for DMA snooping and allocation in the CPU caches. |
1156 | + * |
1157 | + * The use of uncached GPU mappings relies on the correct implementation |
1158 | + * of the PCIe NoSnoop TLP attribute by the platform, otherwise the GPU |
1159 | + * will use cached mappings nonetheless. On x86 platforms, this does not |
1160 | + * seem to matter, as uncached CPU mappings will snoop the caches in any |
1161 | + * case. However, on ARM and arm64, enabling this optimization on a |
1162 | + * platform where NoSnoop is ignored results in loss of coherency, which |
1163 | + * breaks correct operation of the device. Since we have no way of |
1164 | + * detecting whether NoSnoop works or not, just disable this |
1165 | + * optimization entirely for ARM and arm64. |
1166 | + */ |
1167 | + return false; |
1168 | #else |
1169 | return true; |
1170 | #endif |
1171 | diff --git a/net/core/skmsg.c b/net/core/skmsg.c |
1172 | index 8c826603bf36..8bc0ba1ebabe 100644 |
1173 | --- a/net/core/skmsg.c |
1174 | +++ b/net/core/skmsg.c |
1175 | @@ -545,6 +545,7 @@ static void sk_psock_destroy_deferred(struct work_struct *gc) |
1176 | struct sk_psock *psock = container_of(gc, struct sk_psock, gc); |
1177 | |
1178 | /* No sk_callback_lock since already detached. */ |
1179 | + strp_stop(&psock->parser.strp); |
1180 | strp_done(&psock->parser.strp); |
1181 | |
1182 | cancel_work_sync(&psock->work); |
1183 | diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in |
1184 | index 7aad82406422..d3319a80788a 100644 |
1185 | --- a/scripts/gdb/linux/constants.py.in |
1186 | +++ b/scripts/gdb/linux/constants.py.in |
1187 | @@ -37,12 +37,12 @@ |
1188 | import gdb |
1189 | |
1190 | /* linux/fs.h */ |
1191 | -LX_VALUE(MS_RDONLY) |
1192 | -LX_VALUE(MS_SYNCHRONOUS) |
1193 | -LX_VALUE(MS_MANDLOCK) |
1194 | -LX_VALUE(MS_DIRSYNC) |
1195 | -LX_VALUE(MS_NOATIME) |
1196 | -LX_VALUE(MS_NODIRATIME) |
1197 | +LX_VALUE(SB_RDONLY) |
1198 | +LX_VALUE(SB_SYNCHRONOUS) |
1199 | +LX_VALUE(SB_MANDLOCK) |
1200 | +LX_VALUE(SB_DIRSYNC) |
1201 | +LX_VALUE(SB_NOATIME) |
1202 | +LX_VALUE(SB_NODIRATIME) |
1203 | |
1204 | /* linux/mount.h */ |
1205 | LX_VALUE(MNT_NOSUID) |
1206 | diff --git a/scripts/gdb/linux/proc.py b/scripts/gdb/linux/proc.py |
1207 | index 0aebd7565b03..2f01a958eb22 100644 |
1208 | --- a/scripts/gdb/linux/proc.py |
1209 | +++ b/scripts/gdb/linux/proc.py |
1210 | @@ -114,11 +114,11 @@ def info_opts(lst, opt): |
1211 | return opts |
1212 | |
1213 | |
1214 | -FS_INFO = {constants.LX_MS_SYNCHRONOUS: ",sync", |
1215 | - constants.LX_MS_MANDLOCK: ",mand", |
1216 | - constants.LX_MS_DIRSYNC: ",dirsync", |
1217 | - constants.LX_MS_NOATIME: ",noatime", |
1218 | - constants.LX_MS_NODIRATIME: ",nodiratime"} |
1219 | +FS_INFO = {constants.LX_SB_SYNCHRONOUS: ",sync", |
1220 | + constants.LX_SB_MANDLOCK: ",mand", |
1221 | + constants.LX_SB_DIRSYNC: ",dirsync", |
1222 | + constants.LX_SB_NOATIME: ",noatime", |
1223 | + constants.LX_SB_NODIRATIME: ",nodiratime"} |
1224 | |
1225 | MNT_INFO = {constants.LX_MNT_NOSUID: ",nosuid", |
1226 | constants.LX_MNT_NODEV: ",nodev", |
1227 | @@ -184,7 +184,7 @@ values of that process namespace""" |
1228 | fstype = superblock['s_type']['name'].string() |
1229 | s_flags = int(superblock['s_flags']) |
1230 | m_flags = int(vfs['mnt']['mnt_flags']) |
1231 | - rd = "ro" if (s_flags & constants.LX_MS_RDONLY) else "rw" |
1232 | + rd = "ro" if (s_flags & constants.LX_SB_RDONLY) else "rw" |
1233 | |
1234 | gdb.write( |
1235 | "{} {} {} {}{}{} 0 0\n" |