Magellan Linux

Contents of /trunk/kernel-magellan/patches-5.0/0108-5.0.9-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3335 - (show annotations) (download)
Fri Apr 26 12:20:41 2019 UTC (5 years ago) by niro
File size: 117826 byte(s)
-linux-5.0.9
1 diff --git a/Makefile b/Makefile
2 index f7666051de66..ef192ca04330 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 0
9 -SUBLEVEL = 8
10 +SUBLEVEL = 9
11 EXTRAVERSION =
12 NAME = Shy Crocodile
13
14 diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
15 index 87b23b7fb781..aefcf7a4e17a 100644
16 --- a/arch/arc/configs/hsdk_defconfig
17 +++ b/arch/arc/configs/hsdk_defconfig
18 @@ -8,6 +8,7 @@ CONFIG_NAMESPACES=y
19 # CONFIG_UTS_NS is not set
20 # CONFIG_PID_NS is not set
21 CONFIG_BLK_DEV_INITRD=y
22 +CONFIG_BLK_DEV_RAM=y
23 CONFIG_EMBEDDED=y
24 CONFIG_PERF_EVENTS=y
25 # CONFIG_VM_EVENT_COUNTERS is not set
26 diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
27 index 30e090625916..a72bbda2f7aa 100644
28 --- a/arch/arc/kernel/head.S
29 +++ b/arch/arc/kernel/head.S
30 @@ -106,6 +106,7 @@ ENTRY(stext)
31 ; r2 = pointer to uboot provided cmdline or external DTB in mem
32 ; These are handled later in handle_uboot_args()
33 st r0, [@uboot_tag]
34 + st r1, [@uboot_magic]
35 st r2, [@uboot_arg]
36
37 ; setup "current" tsk and optionally cache it in dedicated r25
38 diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
39 index 7b2340996cf8..7b3a7b3b380c 100644
40 --- a/arch/arc/kernel/setup.c
41 +++ b/arch/arc/kernel/setup.c
42 @@ -36,6 +36,7 @@ unsigned int intr_to_DE_cnt;
43
44 /* Part of U-boot ABI: see head.S */
45 int __initdata uboot_tag;
46 +int __initdata uboot_magic;
47 char __initdata *uboot_arg;
48
49 const struct machine_desc *machine_desc;
50 @@ -497,6 +498,8 @@ static inline bool uboot_arg_invalid(unsigned long addr)
51 #define UBOOT_TAG_NONE 0
52 #define UBOOT_TAG_CMDLINE 1
53 #define UBOOT_TAG_DTB 2
54 +/* We always pass 0 as magic from U-boot */
55 +#define UBOOT_MAGIC_VALUE 0
56
57 void __init handle_uboot_args(void)
58 {
59 @@ -511,6 +514,11 @@ void __init handle_uboot_args(void)
60 goto ignore_uboot_args;
61 }
62
63 + if (uboot_magic != UBOOT_MAGIC_VALUE) {
64 + pr_warn(IGNORE_ARGS "non zero uboot magic\n");
65 + goto ignore_uboot_args;
66 + }
67 +
68 if (uboot_tag != UBOOT_TAG_NONE &&
69 uboot_arg_invalid((unsigned long)uboot_arg)) {
70 pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
71 diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
72 index a50dc00d79a2..d0a05a3bdb96 100644
73 --- a/arch/arm/kernel/patch.c
74 +++ b/arch/arm/kernel/patch.c
75 @@ -16,7 +16,7 @@ struct patch {
76 unsigned int insn;
77 };
78
79 -static DEFINE_SPINLOCK(patch_lock);
80 +static DEFINE_RAW_SPINLOCK(patch_lock);
81
82 static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
83 __acquires(&patch_lock)
84 @@ -33,7 +33,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
85 return addr;
86
87 if (flags)
88 - spin_lock_irqsave(&patch_lock, *flags);
89 + raw_spin_lock_irqsave(&patch_lock, *flags);
90 else
91 __acquire(&patch_lock);
92
93 @@ -48,7 +48,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
94 clear_fixmap(fixmap);
95
96 if (flags)
97 - spin_unlock_irqrestore(&patch_lock, *flags);
98 + raw_spin_unlock_irqrestore(&patch_lock, *flags);
99 else
100 __release(&patch_lock);
101 }
102 diff --git a/arch/mips/bcm47xx/workarounds.c b/arch/mips/bcm47xx/workarounds.c
103 index 46eddbec8d9f..0ab95dd431b3 100644
104 --- a/arch/mips/bcm47xx/workarounds.c
105 +++ b/arch/mips/bcm47xx/workarounds.c
106 @@ -24,6 +24,7 @@ void __init bcm47xx_workarounds(void)
107 case BCM47XX_BOARD_NETGEAR_WNR3500L:
108 bcm47xx_workarounds_enable_usb_power(12);
109 break;
110 + case BCM47XX_BOARD_NETGEAR_WNDR3400V2:
111 case BCM47XX_BOARD_NETGEAR_WNDR3400_V3:
112 bcm47xx_workarounds_enable_usb_power(21);
113 break;
114 diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
115 index d3f42b6bbdac..8a9cff1f129d 100644
116 --- a/arch/x86/hyperv/hv_init.c
117 +++ b/arch/x86/hyperv/hv_init.c
118 @@ -102,9 +102,13 @@ static int hv_cpu_init(unsigned int cpu)
119 u64 msr_vp_index;
120 struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()];
121 void **input_arg;
122 + struct page *pg;
123
124 input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
125 - *input_arg = page_address(alloc_page(GFP_KERNEL));
126 + pg = alloc_page(GFP_KERNEL);
127 + if (unlikely(!pg))
128 + return -ENOMEM;
129 + *input_arg = page_address(pg);
130
131 hv_get_vp_index(msr_vp_index);
132
133 diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
134 index 58176b56354e..294ed4392a0e 100644
135 --- a/arch/x86/kernel/aperture_64.c
136 +++ b/arch/x86/kernel/aperture_64.c
137 @@ -14,6 +14,7 @@
138 #define pr_fmt(fmt) "AGP: " fmt
139
140 #include <linux/kernel.h>
141 +#include <linux/kcore.h>
142 #include <linux/types.h>
143 #include <linux/init.h>
144 #include <linux/memblock.h>
145 @@ -57,7 +58,7 @@ int fallback_aper_force __initdata;
146
147 int fix_aperture __initdata = 1;
148
149 -#ifdef CONFIG_PROC_VMCORE
150 +#if defined(CONFIG_PROC_VMCORE) || defined(CONFIG_PROC_KCORE)
151 /*
152 * If the first kernel maps the aperture over e820 RAM, the kdump kernel will
153 * use the same range because it will remain configured in the northbridge.
154 @@ -66,20 +67,25 @@ int fix_aperture __initdata = 1;
155 */
156 static unsigned long aperture_pfn_start, aperture_page_count;
157
158 -static int gart_oldmem_pfn_is_ram(unsigned long pfn)
159 +static int gart_mem_pfn_is_ram(unsigned long pfn)
160 {
161 return likely((pfn < aperture_pfn_start) ||
162 (pfn >= aperture_pfn_start + aperture_page_count));
163 }
164
165 -static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
166 +static void __init exclude_from_core(u64 aper_base, u32 aper_order)
167 {
168 aperture_pfn_start = aper_base >> PAGE_SHIFT;
169 aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT;
170 - WARN_ON(register_oldmem_pfn_is_ram(&gart_oldmem_pfn_is_ram));
171 +#ifdef CONFIG_PROC_VMCORE
172 + WARN_ON(register_oldmem_pfn_is_ram(&gart_mem_pfn_is_ram));
173 +#endif
174 +#ifdef CONFIG_PROC_KCORE
175 + WARN_ON(register_mem_pfn_is_ram(&gart_mem_pfn_is_ram));
176 +#endif
177 }
178 #else
179 -static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
180 +static void exclude_from_core(u64 aper_base, u32 aper_order)
181 {
182 }
183 #endif
184 @@ -474,7 +480,7 @@ out:
185 * may have allocated the range over its e820 RAM
186 * and fixed up the northbridge
187 */
188 - exclude_from_vmcore(last_aper_base, last_aper_order);
189 + exclude_from_core(last_aper_base, last_aper_order);
190
191 return 1;
192 }
193 @@ -520,7 +526,7 @@ out:
194 * overlap with the first kernel's memory. We can't access the
195 * range through vmcore even though it should be part of the dump.
196 */
197 - exclude_from_vmcore(aper_alloc, aper_order);
198 + exclude_from_core(aper_alloc, aper_order);
199
200 /* Fix up the north bridges */
201 for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
202 diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
203 index d12226f60168..1d9b8aaea06c 100644
204 --- a/arch/x86/kernel/cpu/cyrix.c
205 +++ b/arch/x86/kernel/cpu/cyrix.c
206 @@ -124,7 +124,7 @@ static void set_cx86_reorder(void)
207 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
208
209 /* Load/Store Serialize to mem access disable (=reorder it) */
210 - setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80);
211 + setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
212 /* set load/store serialize from 1GB to 4GB */
213 ccr3 |= 0xe0;
214 setCx86(CX86_CCR3, ccr3);
215 @@ -135,11 +135,11 @@ static void set_cx86_memwb(void)
216 pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
217
218 /* CCR2 bit 2: unlock NW bit */
219 - setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04);
220 + setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
221 /* set 'Not Write-through' */
222 write_cr0(read_cr0() | X86_CR0_NW);
223 /* CCR2 bit 2: lock NW bit and set WT1 */
224 - setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14);
225 + setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
226 }
227
228 /*
229 @@ -153,14 +153,14 @@ static void geode_configure(void)
230 local_irq_save(flags);
231
232 /* Suspend on halt power saving and enable #SUSP pin */
233 - setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88);
234 + setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
235
236 ccr3 = getCx86(CX86_CCR3);
237 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
238
239
240 /* FPU fast, DTE cache, Mem bypass */
241 - setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38);
242 + setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
243 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
244
245 set_cx86_memwb();
246 @@ -296,7 +296,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
247 /* GXm supports extended cpuid levels 'ala' AMD */
248 if (c->cpuid_level == 2) {
249 /* Enable cxMMX extensions (GX1 Datasheet 54) */
250 - setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1);
251 + setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
252
253 /*
254 * GXm : 0x30 ... 0x5f GXm datasheet 51
255 @@ -319,7 +319,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
256 if (dir1 > 7) {
257 dir0_msn++; /* M II */
258 /* Enable MMX extensions (App note 108) */
259 - setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1);
260 + setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
261 } else {
262 /* A 6x86MX - it has the bug. */
263 set_cpu_bug(c, X86_BUG_COMA);
264 diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
265 index dfd3aca82c61..fb32925a2e62 100644
266 --- a/arch/x86/kernel/hpet.c
267 +++ b/arch/x86/kernel/hpet.c
268 @@ -905,6 +905,8 @@ int __init hpet_enable(void)
269 return 0;
270
271 hpet_set_mapping();
272 + if (!hpet_virt_address)
273 + return 0;
274
275 /*
276 * Read the period and check for a sane value:
277 diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
278 index 34a5c1715148..2882fe1d2a78 100644
279 --- a/arch/x86/kernel/hw_breakpoint.c
280 +++ b/arch/x86/kernel/hw_breakpoint.c
281 @@ -357,6 +357,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
282 #endif
283 default:
284 WARN_ON_ONCE(1);
285 + return -EINVAL;
286 }
287
288 /*
289 diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
290 index 3482460d984d..1bfe5c6e6cfe 100644
291 --- a/arch/x86/kernel/mpparse.c
292 +++ b/arch/x86/kernel/mpparse.c
293 @@ -598,8 +598,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
294 mpf_base = base;
295 mpf_found = true;
296
297 - pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n",
298 - base, base + sizeof(*mpf) - 1, mpf);
299 + pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
300 + base, base + sizeof(*mpf) - 1);
301
302 memblock_reserve(base, sizeof(*mpf));
303 if (mpf->physptr)
304 diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
305 index 2620baa1f699..507212d75ee2 100644
306 --- a/block/blk-iolatency.c
307 +++ b/block/blk-iolatency.c
308 @@ -75,6 +75,7 @@
309 #include <linux/blk-mq.h>
310 #include "blk-rq-qos.h"
311 #include "blk-stat.h"
312 +#include "blk.h"
313
314 #define DEFAULT_SCALE_COOKIE 1000000U
315
316 diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
317 index 9d66a47d32fb..49e16f009095 100644
318 --- a/drivers/acpi/ec.c
319 +++ b/drivers/acpi/ec.c
320 @@ -194,6 +194,7 @@ static struct workqueue_struct *ec_query_wq;
321 static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
322 static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
323 static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
324 +static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
325
326 /* --------------------------------------------------------------------------
327 * Logging/Debugging
328 @@ -499,6 +500,26 @@ static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
329 ec_log_drv("event blocked");
330 }
331
332 +/*
333 + * Process _Q events that might have accumulated in the EC.
334 + * Run with locked ec mutex.
335 + */
336 +static void acpi_ec_clear(struct acpi_ec *ec)
337 +{
338 + int i, status;
339 + u8 value = 0;
340 +
341 + for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
342 + status = acpi_ec_query(ec, &value);
343 + if (status || !value)
344 + break;
345 + }
346 + if (unlikely(i == ACPI_EC_CLEAR_MAX))
347 + pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
348 + else
349 + pr_info("%d stale EC events cleared\n", i);
350 +}
351 +
352 static void acpi_ec_enable_event(struct acpi_ec *ec)
353 {
354 unsigned long flags;
355 @@ -507,6 +528,10 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
356 if (acpi_ec_started(ec))
357 __acpi_ec_enable_event(ec);
358 spin_unlock_irqrestore(&ec->lock, flags);
359 +
360 + /* Drain additional events if hardware requires that */
361 + if (EC_FLAGS_CLEAR_ON_RESUME)
362 + acpi_ec_clear(ec);
363 }
364
365 #ifdef CONFIG_PM_SLEEP
366 @@ -1820,6 +1845,31 @@ static int ec_flag_query_handshake(const struct dmi_system_id *id)
367 }
368 #endif
369
370 +/*
371 + * On some hardware it is necessary to clear events accumulated by the EC during
372 + * sleep. These ECs stop reporting GPEs until they are manually polled, if too
373 + * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
374 + *
375 + * https://bugzilla.kernel.org/show_bug.cgi?id=44161
376 + *
377 + * Ideally, the EC should also be instructed NOT to accumulate events during
378 + * sleep (which Windows seems to do somehow), but the interface to control this
379 + * behaviour is not known at this time.
380 + *
381 + * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
382 + * however it is very likely that other Samsung models are affected.
383 + *
384 + * On systems which don't accumulate _Q events during sleep, this extra check
385 + * should be harmless.
386 + */
387 +static int ec_clear_on_resume(const struct dmi_system_id *id)
388 +{
389 + pr_debug("Detected system needing EC poll on resume.\n");
390 + EC_FLAGS_CLEAR_ON_RESUME = 1;
391 + ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
392 + return 0;
393 +}
394 +
395 /*
396 * Some ECDTs contain wrong register addresses.
397 * MSI MS-171F
398 @@ -1869,6 +1919,9 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
399 ec_honor_ecdt_gpe, "ASUS X580VD", {
400 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
401 DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL},
402 + {
403 + ec_clear_on_resume, "Samsung hardware", {
404 + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
405 {},
406 };
407
408 diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
409 index 78db97687f26..c4b06cc075f9 100644
410 --- a/drivers/acpi/utils.c
411 +++ b/drivers/acpi/utils.c
412 @@ -800,6 +800,7 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
413 match.hrv = hrv;
414
415 dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
416 + put_device(dev);
417 return !!dev;
418 }
419 EXPORT_SYMBOL(acpi_dev_present);
420 diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
421 index 9ad93ea42fdc..3cde351fb5c9 100644
422 --- a/drivers/auxdisplay/hd44780.c
423 +++ b/drivers/auxdisplay/hd44780.c
424 @@ -280,6 +280,8 @@ static int hd44780_remove(struct platform_device *pdev)
425 struct charlcd *lcd = platform_get_drvdata(pdev);
426
427 charlcd_unregister(lcd);
428 +
429 + kfree(lcd);
430 return 0;
431 }
432
433 diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
434 index 500de1dee967..a00ca6b8117b 100644
435 --- a/drivers/base/power/domain.c
436 +++ b/drivers/base/power/domain.c
437 @@ -1467,12 +1467,12 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
438 if (IS_ERR(gpd_data))
439 return PTR_ERR(gpd_data);
440
441 - genpd_lock(genpd);
442 -
443 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
444 if (ret)
445 goto out;
446
447 + genpd_lock(genpd);
448 +
449 dev_pm_domain_set(dev, &genpd->domain);
450
451 genpd->device_count++;
452 @@ -1480,9 +1480,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
453
454 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
455
456 - out:
457 genpd_unlock(genpd);
458 -
459 + out:
460 if (ret)
461 genpd_free_dev_data(dev, gpd_data);
462 else
463 @@ -1531,15 +1530,15 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
464 genpd->device_count--;
465 genpd->max_off_time_changed = true;
466
467 - if (genpd->detach_dev)
468 - genpd->detach_dev(genpd, dev);
469 -
470 dev_pm_domain_set(dev, NULL);
471
472 list_del_init(&pdd->list_node);
473
474 genpd_unlock(genpd);
475
476 + if (genpd->detach_dev)
477 + genpd->detach_dev(genpd, dev);
478 +
479 genpd_free_dev_data(dev, gpd_data);
480
481 return 0;
482 diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
483 index 96670eefaeb2..6d415b20fb70 100644
484 --- a/drivers/block/paride/pcd.c
485 +++ b/drivers/block/paride/pcd.c
486 @@ -314,6 +314,7 @@ static void pcd_init_units(void)
487 disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops,
488 1, BLK_MQ_F_SHOULD_MERGE);
489 if (IS_ERR(disk->queue)) {
490 + put_disk(disk);
491 disk->queue = NULL;
492 continue;
493 }
494 @@ -749,8 +750,14 @@ static int pcd_detect(void)
495 return 0;
496
497 printk("%s: No CD-ROM drive found\n", name);
498 - for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
499 + for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
500 + if (!cd->disk)
501 + continue;
502 + blk_cleanup_queue(cd->disk->queue);
503 + cd->disk->queue = NULL;
504 + blk_mq_free_tag_set(&cd->tag_set);
505 put_disk(cd->disk);
506 + }
507 pi_unregister_driver(par_drv);
508 return -1;
509 }
510 @@ -1006,8 +1013,14 @@ static int __init pcd_init(void)
511 pcd_probe_capabilities();
512
513 if (register_blkdev(major, name)) {
514 - for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
515 + for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
516 + if (!cd->disk)
517 + continue;
518 +
519 + blk_cleanup_queue(cd->disk->queue);
520 + blk_mq_free_tag_set(&cd->tag_set);
521 put_disk(cd->disk);
522 + }
523 return -EBUSY;
524 }
525
526 @@ -1028,6 +1041,9 @@ static void __exit pcd_exit(void)
527 int unit;
528
529 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
530 + if (!cd->disk)
531 + continue;
532 +
533 if (cd->present) {
534 del_gendisk(cd->disk);
535 pi_release(cd->pi);
536 diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
537 index e92e7a8eeeb2..35e6e271b219 100644
538 --- a/drivers/block/paride/pf.c
539 +++ b/drivers/block/paride/pf.c
540 @@ -761,8 +761,14 @@ static int pf_detect(void)
541 return 0;
542
543 printk("%s: No ATAPI disk detected\n", name);
544 - for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
545 + for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
546 + if (!pf->disk)
547 + continue;
548 + blk_cleanup_queue(pf->disk->queue);
549 + pf->disk->queue = NULL;
550 + blk_mq_free_tag_set(&pf->tag_set);
551 put_disk(pf->disk);
552 + }
553 pi_unregister_driver(par_drv);
554 return -1;
555 }
556 @@ -1025,8 +1031,13 @@ static int __init pf_init(void)
557 pf_busy = 0;
558
559 if (register_blkdev(major, name)) {
560 - for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
561 + for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
562 + if (!pf->disk)
563 + continue;
564 + blk_cleanup_queue(pf->disk->queue);
565 + blk_mq_free_tag_set(&pf->tag_set);
566 put_disk(pf->disk);
567 + }
568 return -EBUSY;
569 }
570
571 @@ -1047,13 +1058,18 @@ static void __exit pf_exit(void)
572 int unit;
573 unregister_blkdev(major, name);
574 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
575 - if (!pf->present)
576 + if (!pf->disk)
577 continue;
578 - del_gendisk(pf->disk);
579 +
580 + if (pf->present)
581 + del_gendisk(pf->disk);
582 +
583 blk_cleanup_queue(pf->disk->queue);
584 blk_mq_free_tag_set(&pf->tag_set);
585 put_disk(pf->disk);
586 - pi_release(pf->pi);
587 +
588 + if (pf->present)
589 + pi_release(pf->pi);
590 }
591 }
592
593 diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
594 index f3442c2bdbdc..3c70004240d6 100644
595 --- a/drivers/crypto/axis/artpec6_crypto.c
596 +++ b/drivers/crypto/axis/artpec6_crypto.c
597 @@ -284,6 +284,7 @@ enum artpec6_crypto_hash_flags {
598
599 struct artpec6_crypto_req_common {
600 struct list_head list;
601 + struct list_head complete_in_progress;
602 struct artpec6_crypto_dma_descriptors *dma;
603 struct crypto_async_request *req;
604 void (*complete)(struct crypto_async_request *req);
605 @@ -2045,7 +2046,8 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
606 return artpec6_crypto_dma_map_descs(common);
607 }
608
609 -static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
610 +static void artpec6_crypto_process_queue(struct artpec6_crypto *ac,
611 + struct list_head *completions)
612 {
613 struct artpec6_crypto_req_common *req;
614
615 @@ -2056,7 +2058,7 @@ static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
616 list_move_tail(&req->list, &ac->pending);
617 artpec6_crypto_start_dma(req);
618
619 - req->req->complete(req->req, -EINPROGRESS);
620 + list_add_tail(&req->complete_in_progress, completions);
621 }
622
623 /*
624 @@ -2086,6 +2088,11 @@ static void artpec6_crypto_task(unsigned long data)
625 struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
626 struct artpec6_crypto_req_common *req;
627 struct artpec6_crypto_req_common *n;
628 + struct list_head complete_done;
629 + struct list_head complete_in_progress;
630 +
631 + INIT_LIST_HEAD(&complete_done);
632 + INIT_LIST_HEAD(&complete_in_progress);
633
634 if (list_empty(&ac->pending)) {
635 pr_debug("Spurious IRQ\n");
636 @@ -2119,19 +2126,30 @@ static void artpec6_crypto_task(unsigned long data)
637
638 pr_debug("Completing request %p\n", req);
639
640 - list_del(&req->list);
641 + list_move_tail(&req->list, &complete_done);
642
643 artpec6_crypto_dma_unmap_all(req);
644 artpec6_crypto_copy_bounce_buffers(req);
645
646 ac->pending_count--;
647 artpec6_crypto_common_destroy(req);
648 - req->complete(req->req);
649 }
650
651 - artpec6_crypto_process_queue(ac);
652 + artpec6_crypto_process_queue(ac, &complete_in_progress);
653
654 spin_unlock_bh(&ac->queue_lock);
655 +
656 + /* Perform the completion callbacks without holding the queue lock
657 + * to allow new request submissions from the callbacks.
658 + */
659 + list_for_each_entry_safe(req, n, &complete_done, list) {
660 + req->complete(req->req);
661 + }
662 +
663 + list_for_each_entry_safe(req, n, &complete_in_progress,
664 + complete_in_progress) {
665 + req->req->complete(req->req, -EINPROGRESS);
666 + }
667 }
668
669 static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
670 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
671 index 3a9b48b227ac..a7208ca0bfe3 100644
672 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
673 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
674 @@ -546,7 +546,7 @@ static int psp_load_fw(struct amdgpu_device *adev)
675 struct psp_context *psp = &adev->psp;
676
677 if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) {
678 - psp_ring_destroy(psp, PSP_RING_TYPE__KM);
679 + psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */
680 goto skip_memalloc;
681 }
682
683 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
684 index 47243165a082..ae90a99909ef 100644
685 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
686 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
687 @@ -323,57 +323,7 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
688 struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
689 struct queue_properties *q)
690 {
691 - uint64_t addr;
692 - struct cik_mqd *m;
693 - int retval;
694 -
695 - retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
696 - mqd_mem_obj);
697 -
698 - if (retval != 0)
699 - return -ENOMEM;
700 -
701 - m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
702 - addr = (*mqd_mem_obj)->gpu_addr;
703 -
704 - memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
705 -
706 - m->header = 0xC0310800;
707 - m->compute_pipelinestat_enable = 1;
708 - m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
709 - m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
710 - m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
711 - m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
712 -
713 - m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE |
714 - PRELOAD_REQ;
715 - m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
716 - QUANTUM_DURATION(10);
717 -
718 - m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
719 - m->cp_mqd_base_addr_lo = lower_32_bits(addr);
720 - m->cp_mqd_base_addr_hi = upper_32_bits(addr);
721 -
722 - m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE;
723 -
724 - /*
725 - * Pipe Priority
726 - * Identifies the pipe relative priority when this queue is connected
727 - * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
728 - * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
729 - * 0 = CS_LOW (typically below GFX)
730 - * 1 = CS_MEDIUM (typically between HP3D and GFX
731 - * 2 = CS_HIGH (typically above HP3D)
732 - */
733 - m->cp_hqd_pipe_priority = 1;
734 - m->cp_hqd_queue_priority = 15;
735 -
736 - *mqd = m;
737 - if (gart_addr)
738 - *gart_addr = addr;
739 - retval = mm->update_mqd(mm, m, q);
740 -
741 - return retval;
742 + return init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
743 }
744
745 static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
746 diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
747 index 0573eab0e190..f35e4ab55b27 100644
748 --- a/drivers/gpu/drm/exynos/exynos_mixer.c
749 +++ b/drivers/gpu/drm/exynos/exynos_mixer.c
750 @@ -20,6 +20,7 @@
751 #include "regs-vp.h"
752
753 #include <linux/kernel.h>
754 +#include <linux/ktime.h>
755 #include <linux/spinlock.h>
756 #include <linux/wait.h>
757 #include <linux/i2c.h>
758 @@ -352,15 +353,62 @@ static void mixer_cfg_vp_blend(struct mixer_context *ctx, unsigned int alpha)
759 mixer_reg_write(ctx, MXR_VIDEO_CFG, val);
760 }
761
762 -static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
763 +static bool mixer_is_synced(struct mixer_context *ctx)
764 {
765 - /* block update on vsync */
766 - mixer_reg_writemask(ctx, MXR_STATUS, enable ?
767 - MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
768 + u32 base, shadow;
769
770 + if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
771 + ctx->mxr_ver == MXR_VER_128_0_0_184)
772 + return !(mixer_reg_read(ctx, MXR_CFG) &
773 + MXR_CFG_LAYER_UPDATE_COUNT_MASK);
774 +
775 + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
776 + vp_reg_read(ctx, VP_SHADOW_UPDATE))
777 + return false;
778 +
779 + base = mixer_reg_read(ctx, MXR_CFG);
780 + shadow = mixer_reg_read(ctx, MXR_CFG_S);
781 + if (base != shadow)
782 + return false;
783 +
784 + base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
785 + shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
786 + if (base != shadow)
787 + return false;
788 +
789 + base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
790 + shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
791 + if (base != shadow)
792 + return false;
793 +
794 + return true;
795 +}
796 +
797 +static int mixer_wait_for_sync(struct mixer_context *ctx)
798 +{
799 + ktime_t timeout = ktime_add_us(ktime_get(), 100000);
800 +
801 + while (!mixer_is_synced(ctx)) {
802 + usleep_range(1000, 2000);
803 + if (ktime_compare(ktime_get(), timeout) > 0)
804 + return -ETIMEDOUT;
805 + }
806 + return 0;
807 +}
808 +
809 +static void mixer_disable_sync(struct mixer_context *ctx)
810 +{
811 + mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_SYNC_ENABLE);
812 +}
813 +
814 +static void mixer_enable_sync(struct mixer_context *ctx)
815 +{
816 + if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
817 + ctx->mxr_ver == MXR_VER_128_0_0_184)
818 + mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
819 + mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SYNC_ENABLE);
820 if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
821 - vp_reg_write(ctx, VP_SHADOW_UPDATE, enable ?
822 - VP_SHADOW_UPDATE_ENABLE : 0);
823 + vp_reg_write(ctx, VP_SHADOW_UPDATE, VP_SHADOW_UPDATE_ENABLE);
824 }
825
826 static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height)
827 @@ -498,7 +546,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
828
829 spin_lock_irqsave(&ctx->reg_slock, flags);
830
831 - vp_reg_write(ctx, VP_SHADOW_UPDATE, 1);
832 /* interlace or progressive scan mode */
833 val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0);
834 vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP);
835 @@ -553,11 +600,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
836 vp_regs_dump(ctx);
837 }
838
839 -static void mixer_layer_update(struct mixer_context *ctx)
840 -{
841 - mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
842 -}
843 -
844 static void mixer_graph_buffer(struct mixer_context *ctx,
845 struct exynos_drm_plane *plane)
846 {
847 @@ -640,11 +682,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
848 mixer_cfg_layer(ctx, win, priority, true);
849 mixer_cfg_gfx_blend(ctx, win, pixel_alpha, state->base.alpha);
850
851 - /* layer update mandatory for mixer 16.0.33.0 */
852 - if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
853 - ctx->mxr_ver == MXR_VER_128_0_0_184)
854 - mixer_layer_update(ctx);
855 -
856 spin_unlock_irqrestore(&ctx->reg_slock, flags);
857
858 mixer_regs_dump(ctx);
859 @@ -709,7 +746,7 @@ static void mixer_win_reset(struct mixer_context *ctx)
860 static irqreturn_t mixer_irq_handler(int irq, void *arg)
861 {
862 struct mixer_context *ctx = arg;
863 - u32 val, base, shadow;
864 + u32 val;
865
866 spin_lock(&ctx->reg_slock);
867
868 @@ -723,26 +760,9 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
869 val &= ~MXR_INT_STATUS_VSYNC;
870
871 /* interlace scan need to check shadow register */
872 - if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
873 - if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
874 - vp_reg_read(ctx, VP_SHADOW_UPDATE))
875 - goto out;
876 -
877 - base = mixer_reg_read(ctx, MXR_CFG);
878 - shadow = mixer_reg_read(ctx, MXR_CFG_S);
879 - if (base != shadow)
880 - goto out;
881 -
882 - base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
883 - shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
884 - if (base != shadow)
885 - goto out;
886 -
887 - base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
888 - shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
889 - if (base != shadow)
890 - goto out;
891 - }
892 + if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)
893 + && !mixer_is_synced(ctx))
894 + goto out;
895
896 drm_crtc_handle_vblank(&ctx->crtc->base);
897 }
898 @@ -917,12 +937,14 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
899
900 static void mixer_atomic_begin(struct exynos_drm_crtc *crtc)
901 {
902 - struct mixer_context *mixer_ctx = crtc->ctx;
903 + struct mixer_context *ctx = crtc->ctx;
904
905 - if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
906 + if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
907 return;
908
909 - mixer_vsync_set_update(mixer_ctx, false);
910 + if (mixer_wait_for_sync(ctx))
911 + dev_err(ctx->dev, "timeout waiting for VSYNC\n");
912 + mixer_disable_sync(ctx);
913 }
914
915 static void mixer_update_plane(struct exynos_drm_crtc *crtc,
916 @@ -964,7 +986,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
917 if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
918 return;
919
920 - mixer_vsync_set_update(mixer_ctx, true);
921 + mixer_enable_sync(mixer_ctx);
922 exynos_crtc_handle_event(crtc);
923 }
924
925 @@ -979,7 +1001,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
926
927 exynos_drm_pipe_clk_enable(crtc, true);
928
929 - mixer_vsync_set_update(ctx, false);
930 + mixer_disable_sync(ctx);
931
932 mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
933
934 @@ -992,7 +1014,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
935
936 mixer_commit(ctx);
937
938 - mixer_vsync_set_update(ctx, true);
939 + mixer_enable_sync(ctx);
940
941 set_bit(MXR_BIT_POWERED, &ctx->flags);
942 }
943 diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
944 index 8a0f85f5fc1a..6a765682fbfa 100644
945 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
946 +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
947 @@ -38,6 +38,7 @@ int nvkm_volt_set_id(struct nvkm_volt *, u8 id, u8 min_id, u8 temp,
948
949 int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
950 int gf100_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
951 +int gf117_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
952 int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
953 int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
954 int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
955 diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
956 index 88a52f6b39fe..7dfbbbc1beea 100644
957 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
958 +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
959 @@ -181,7 +181,7 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
960 }
961
962 ret = pm_runtime_get_sync(drm->dev);
963 - if (IS_ERR_VALUE(ret) && ret != -EACCES)
964 + if (ret < 0 && ret != -EACCES)
965 return ret;
966 ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
967 pm_runtime_put_autosuspend(drm->dev);
968 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
969 index d9edb5785813..d75fa7678483 100644
970 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
971 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
972 @@ -1613,7 +1613,7 @@ nvd7_chipset = {
973 .pci = gf106_pci_new,
974 .therm = gf119_therm_new,
975 .timer = nv41_timer_new,
976 - .volt = gf100_volt_new,
977 + .volt = gf117_volt_new,
978 .ce[0] = gf100_ce_new,
979 .disp = gf119_disp_new,
980 .dma = gf119_dma_new,
981 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
982 index bcd179ba11d0..146adcdd316a 100644
983 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
984 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
985 @@ -2,6 +2,7 @@ nvkm-y += nvkm/subdev/volt/base.o
986 nvkm-y += nvkm/subdev/volt/gpio.o
987 nvkm-y += nvkm/subdev/volt/nv40.o
988 nvkm-y += nvkm/subdev/volt/gf100.o
989 +nvkm-y += nvkm/subdev/volt/gf117.o
990 nvkm-y += nvkm/subdev/volt/gk104.o
991 nvkm-y += nvkm/subdev/volt/gk20a.o
992 nvkm-y += nvkm/subdev/volt/gm20b.o
993 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
994 new file mode 100644
995 index 000000000000..547a58f0aeac
996 --- /dev/null
997 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
998 @@ -0,0 +1,60 @@
999 +/*
1000 + * Copyright 2019 Ilia Mirkin
1001 + *
1002 + * Permission is hereby granted, free of charge, to any person obtaining a
1003 + * copy of this software and associated documentation files (the "Software"),
1004 + * to deal in the Software without restriction, including without limitation
1005 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
1006 + * and/or sell copies of the Software, and to permit persons to whom the
1007 + * Software is furnished to do so, subject to the following conditions:
1008 + *
1009 + * The above copyright notice and this permission notice shall be included in
1010 + * all copies or substantial portions of the Software.
1011 + *
1012 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1013 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1014 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1015 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
1016 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
1017 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
1018 + * OTHER DEALINGS IN THE SOFTWARE.
1019 + *
1020 + * Authors: Ilia Mirkin
1021 + */
1022 +#include "priv.h"
1023 +
1024 +#include <subdev/fuse.h>
1025 +
1026 +static int
1027 +gf117_volt_speedo_read(struct nvkm_volt *volt)
1028 +{
1029 + struct nvkm_device *device = volt->subdev.device;
1030 + struct nvkm_fuse *fuse = device->fuse;
1031 +
1032 + if (!fuse)
1033 + return -EINVAL;
1034 +
1035 + return nvkm_fuse_read(fuse, 0x3a8);
1036 +}
1037 +
1038 +static const struct nvkm_volt_func
1039 +gf117_volt = {
1040 + .oneinit = gf100_volt_oneinit,
1041 + .vid_get = nvkm_voltgpio_get,
1042 + .vid_set = nvkm_voltgpio_set,
1043 + .speedo_read = gf117_volt_speedo_read,
1044 +};
1045 +
1046 +int
1047 +gf117_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
1048 +{
1049 + struct nvkm_volt *volt;
1050 + int ret;
1051 +
1052 + ret = nvkm_volt_new_(&gf117_volt, device, index, &volt);
1053 + *pvolt = volt;
1054 + if (ret)
1055 + return ret;
1056 +
1057 + return nvkm_voltgpio_init(volt);
1058 +}
1059 diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
1060 index ca4ae45dd307..8e5724b63f1f 100644
1061 --- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
1062 +++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
1063 @@ -70,18 +70,12 @@ static inline struct innolux_panel *to_innolux_panel(struct drm_panel *panel)
1064 static int innolux_panel_disable(struct drm_panel *panel)
1065 {
1066 struct innolux_panel *innolux = to_innolux_panel(panel);
1067 - int err;
1068
1069 if (!innolux->enabled)
1070 return 0;
1071
1072 backlight_disable(innolux->backlight);
1073
1074 - err = mipi_dsi_dcs_set_display_off(innolux->link);
1075 - if (err < 0)
1076 - DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
1077 - err);
1078 -
1079 innolux->enabled = false;
1080
1081 return 0;
1082 @@ -95,6 +89,11 @@ static int innolux_panel_unprepare(struct drm_panel *panel)
1083 if (!innolux->prepared)
1084 return 0;
1085
1086 + err = mipi_dsi_dcs_set_display_off(innolux->link);
1087 + if (err < 0)
1088 + DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
1089 + err);
1090 +
1091 err = mipi_dsi_dcs_enter_sleep_mode(innolux->link);
1092 if (err < 0) {
1093 DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
1094 diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
1095 index d5a23295dd80..bb7b58407039 100644
1096 --- a/drivers/gpu/drm/udl/udl_gem.c
1097 +++ b/drivers/gpu/drm/udl/udl_gem.c
1098 @@ -224,7 +224,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
1099 *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
1100
1101 out:
1102 - drm_gem_object_put(&gobj->base);
1103 + drm_gem_object_put_unlocked(&gobj->base);
1104 unlock:
1105 mutex_unlock(&udl->gem_lock);
1106 return ret;
1107 diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
1108 index 45b2460f3166..e8819d750938 100644
1109 --- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
1110 +++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
1111 @@ -668,6 +668,10 @@ static const struct amba_id debug_ids[] = {
1112 .id = 0x000bbd08,
1113 .mask = 0x000fffff,
1114 },
1115 + { /* Debug for Cortex-A73 */
1116 + .id = 0x000bbd09,
1117 + .mask = 0x000fffff,
1118 + },
1119 { 0, 0 },
1120 };
1121
1122 diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
1123 index 5344e8993b28..5866f358ea04 100644
1124 --- a/drivers/infiniband/hw/hfi1/qp.c
1125 +++ b/drivers/infiniband/hw/hfi1/qp.c
1126 @@ -833,7 +833,7 @@ void notify_error_qp(struct rvt_qp *qp)
1127 write_seqlock(lock);
1128 if (!list_empty(&priv->s_iowait.list) &&
1129 !(qp->s_flags & RVT_S_BUSY)) {
1130 - qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
1131 + qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
1132 list_del_init(&priv->s_iowait.list);
1133 priv->s_iowait.lock = NULL;
1134 rvt_put_qp(qp);
1135 diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
1136 index 509e467843f6..f4cac63194d9 100644
1137 --- a/drivers/infiniband/hw/hns/hns_roce_device.h
1138 +++ b/drivers/infiniband/hw/hns/hns_roce_device.h
1139 @@ -216,6 +216,26 @@ enum {
1140 HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
1141 };
1142
1143 +enum hns_roce_reset_stage {
1144 + HNS_ROCE_STATE_NON_RST,
1145 + HNS_ROCE_STATE_RST_BEF_DOWN,
1146 + HNS_ROCE_STATE_RST_DOWN,
1147 + HNS_ROCE_STATE_RST_UNINIT,
1148 + HNS_ROCE_STATE_RST_INIT,
1149 + HNS_ROCE_STATE_RST_INITED,
1150 +};
1151 +
1152 +enum hns_roce_instance_state {
1153 + HNS_ROCE_STATE_NON_INIT,
1154 + HNS_ROCE_STATE_INIT,
1155 + HNS_ROCE_STATE_INITED,
1156 + HNS_ROCE_STATE_UNINIT,
1157 +};
1158 +
1159 +enum {
1160 + HNS_ROCE_RST_DIRECT_RETURN = 0,
1161 +};
1162 +
1163 #define HNS_ROCE_CMD_SUCCESS 1
1164
1165 #define HNS_ROCE_PORT_DOWN 0
1166 @@ -898,6 +918,7 @@ struct hns_roce_dev {
1167 spinlock_t bt_cmd_lock;
1168 bool active;
1169 bool is_reset;
1170 + unsigned long reset_cnt;
1171 struct hns_roce_ib_iboe iboe;
1172
1173 struct list_head pgdir_list;
1174 diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
1175 index 543fa1504cd3..7ac06576d791 100644
1176 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
1177 +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
1178 @@ -5800,6 +5800,7 @@ MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
1179 static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
1180 struct hnae3_handle *handle)
1181 {
1182 + struct hns_roce_v2_priv *priv = hr_dev->priv;
1183 const struct pci_device_id *id;
1184 int i;
1185
1186 @@ -5830,10 +5831,13 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
1187 hr_dev->cmd_mod = 1;
1188 hr_dev->loop_idc = 0;
1189
1190 + hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
1191 + priv->handle = handle;
1192 +
1193 return 0;
1194 }
1195
1196 -static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
1197 +static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
1198 {
1199 struct hns_roce_dev *hr_dev;
1200 int ret;
1201 @@ -5850,7 +5854,6 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
1202
1203 hr_dev->pci_dev = handle->pdev;
1204 hr_dev->dev = &handle->pdev->dev;
1205 - handle->priv = hr_dev;
1206
1207 ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
1208 if (ret) {
1209 @@ -5864,6 +5867,8 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
1210 goto error_failed_get_cfg;
1211 }
1212
1213 + handle->priv = hr_dev;
1214 +
1215 return 0;
1216
1217 error_failed_get_cfg:
1218 @@ -5875,7 +5880,7 @@ error_failed_kzalloc:
1219 return ret;
1220 }
1221
1222 -static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
1223 +static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
1224 bool reset)
1225 {
1226 struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
1227 @@ -5883,24 +5888,78 @@ static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
1228 if (!hr_dev)
1229 return;
1230
1231 + handle->priv = NULL;
1232 hns_roce_exit(hr_dev);
1233 kfree(hr_dev->priv);
1234 ib_dealloc_device(&hr_dev->ib_dev);
1235 }
1236
1237 +static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
1238 +{
1239 + const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1240 + struct device *dev = &handle->pdev->dev;
1241 + int ret;
1242 +
1243 + handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
1244 +
1245 + if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
1246 + handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
1247 + goto reset_chk_err;
1248 + }
1249 +
1250 + ret = __hns_roce_hw_v2_init_instance(handle);
1251 + if (ret) {
1252 + handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
1253 + dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
1254 + if (ops->ae_dev_resetting(handle) ||
1255 + ops->get_hw_reset_stat(handle))
1256 + goto reset_chk_err;
1257 + else
1258 + return ret;
1259 + }
1260 +
1261 + handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
1262 +
1263 +
1264 + return 0;
1265 +
1266 +reset_chk_err:
1267 + dev_err(dev, "Device is busy in resetting state.\n"
1268 + "please retry later.\n");
1269 +
1270 + return -EBUSY;
1271 +}
1272 +
1273 +static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
1274 + bool reset)
1275 +{
1276 + if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
1277 + return;
1278 +
1279 + handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
1280 +
1281 + __hns_roce_hw_v2_uninit_instance(handle, reset);
1282 +
1283 + handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
1284 +}
1285 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
1286 {
1287 - struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
1288 + struct hns_roce_dev *hr_dev;
1289 struct ib_event event;
1290
1291 - if (!hr_dev) {
1292 - dev_err(&handle->pdev->dev,
1293 - "Input parameter handle->priv is NULL!\n");
1294 - return -EINVAL;
1295 + if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
1296 + set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
1297 + return 0;
1298 }
1299
1300 + handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
1301 + clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
1302 +
1303 + hr_dev = (struct hns_roce_dev *)handle->priv;
1304 + if (!hr_dev)
1305 + return 0;
1306 +
1307 hr_dev->active = false;
1308 - hr_dev->is_reset = true;
1309
1310 event.event = IB_EVENT_DEVICE_FATAL;
1311 event.device = &hr_dev->ib_dev;
1312 @@ -5912,17 +5971,29 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
1313
1314 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
1315 {
1316 + struct device *dev = &handle->pdev->dev;
1317 int ret;
1318
1319 - ret = hns_roce_hw_v2_init_instance(handle);
1320 + if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
1321 + &handle->rinfo.state)) {
1322 + handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
1323 + return 0;
1324 + }
1325 +
1326 + handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
1327 +
1328 + dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
1329 + ret = __hns_roce_hw_v2_init_instance(handle);
1330 if (ret) {
1331 /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
1332 * callback function, RoCE Engine reinitialize. If RoCE reinit
1333 * failed, we should inform NIC driver.
1334 */
1335 handle->priv = NULL;
1336 - dev_err(&handle->pdev->dev,
1337 - "In reset process RoCE reinit failed %d.\n", ret);
1338 + dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
1339 + } else {
1340 + handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
1341 + dev_info(dev, "Reset done, RoCE client reinit finished.\n");
1342 }
1343
1344 return ret;
1345 @@ -5930,8 +6001,14 @@ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
1346
1347 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
1348 {
1349 + if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
1350 + return 0;
1351 +
1352 + handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
1353 + dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
1354 msleep(100);
1355 - hns_roce_hw_v2_uninit_instance(handle, false);
1356 + __hns_roce_hw_v2_uninit_instance(handle, false);
1357 +
1358 return 0;
1359 }
1360
1361 diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
1362 index b72d0443c835..5398aa718cfc 100644
1363 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
1364 +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
1365 @@ -1546,6 +1546,7 @@ struct hns_roce_link_table_entry {
1366 #define HNS_ROCE_LINK_TABLE_NXT_PTR_M GENMASK(31, 20)
1367
1368 struct hns_roce_v2_priv {
1369 + struct hnae3_handle *handle;
1370 struct hns_roce_v2_cmq cmq;
1371 struct hns_roce_link_table tsq;
1372 struct hns_roce_link_table tpq;
1373 diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
1374 index 59e978141ad4..e99177533930 100644
1375 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
1376 +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
1377 @@ -173,7 +173,12 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
1378
1379 rcu_read_lock();
1380 in = __in_dev_get_rcu(upper_dev);
1381 - local_ipaddr = ntohl(in->ifa_list->ifa_address);
1382 +
1383 + if (!in->ifa_list)
1384 + local_ipaddr = 0;
1385 + else
1386 + local_ipaddr = ntohl(in->ifa_list->ifa_address);
1387 +
1388 rcu_read_unlock();
1389 } else {
1390 local_ipaddr = ntohl(ifa->ifa_address);
1391 @@ -185,6 +190,11 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
1392 case NETDEV_UP:
1393 /* Fall through */
1394 case NETDEV_CHANGEADDR:
1395 +
1396 + /* Just skip if no need to handle ARP cache */
1397 + if (!local_ipaddr)
1398 + break;
1399 +
1400 i40iw_manage_arp_cache(iwdev,
1401 netdev->dev_addr,
1402 &local_ipaddr,
1403 diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
1404 index 782499abcd98..2a0b59a4b6eb 100644
1405 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c
1406 +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
1407 @@ -804,8 +804,8 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
1408 unsigned long flags;
1409
1410 for (i = 0 ; i < dev->num_ports; i++) {
1411 - cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
1412 det = &sriov->alias_guid.ports_guid[i];
1413 + cancel_delayed_work_sync(&det->alias_guid_work);
1414 spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
1415 while (!list_empty(&det->cb_list)) {
1416 cb_ctx = list_entry(det->cb_list.next,
1417 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1418 index dbd6824dfffa..53b1fbadc496 100644
1419 --- a/drivers/iommu/intel-iommu.c
1420 +++ b/drivers/iommu/intel-iommu.c
1421 @@ -1534,6 +1534,9 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1422 u32 pmen;
1423 unsigned long flags;
1424
1425 + if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
1426 + return;
1427 +
1428 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1429 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1430 pmen &= ~DMA_PMEN_EPM;
1431 @@ -5328,7 +5331,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
1432
1433 ctx_lo = context[0].lo;
1434
1435 - sdev->did = domain->iommu_did[iommu->seq_id];
1436 + sdev->did = FLPT_DEFAULT_DID;
1437 sdev->sid = PCI_DEVID(info->bus, info->devfn);
1438
1439 if (!(ctx_lo & CONTEXT_PASIDE)) {
1440 diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
1441 index 567b29c47608..98b6e1d4b1a6 100644
1442 --- a/drivers/irqchip/irq-mbigen.c
1443 +++ b/drivers/irqchip/irq-mbigen.c
1444 @@ -161,6 +161,9 @@ static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg)
1445 void __iomem *base = d->chip_data;
1446 u32 val;
1447
1448 + if (!msg->address_lo && !msg->address_hi)
1449 + return;
1450 +
1451 base += get_mbigen_vec_reg(d->hwirq);
1452 val = readl_relaxed(base);
1453
1454 diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
1455 index a93296b9b45d..7bd1d4cb2e19 100644
1456 --- a/drivers/irqchip/irq-stm32-exti.c
1457 +++ b/drivers/irqchip/irq-stm32-exti.c
1458 @@ -716,7 +716,6 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
1459 const struct stm32_exti_bank *stm32_bank;
1460 struct stm32_exti_chip_data *chip_data;
1461 void __iomem *base = h_data->base;
1462 - u32 irqs_mask;
1463
1464 stm32_bank = h_data->drv_data->exti_banks[bank_idx];
1465 chip_data = &h_data->chips_data[bank_idx];
1466 @@ -725,21 +724,12 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
1467
1468 raw_spin_lock_init(&chip_data->rlock);
1469
1470 - /* Determine number of irqs supported */
1471 - writel_relaxed(~0UL, base + stm32_bank->rtsr_ofst);
1472 - irqs_mask = readl_relaxed(base + stm32_bank->rtsr_ofst);
1473 -
1474 /*
1475 * This IP has no reset, so after hot reboot we should
1476 * clear registers to avoid residue
1477 */
1478 writel_relaxed(0, base + stm32_bank->imr_ofst);
1479 writel_relaxed(0, base + stm32_bank->emr_ofst);
1480 - writel_relaxed(0, base + stm32_bank->rtsr_ofst);
1481 - writel_relaxed(0, base + stm32_bank->ftsr_ofst);
1482 - writel_relaxed(~0UL, base + stm32_bank->rpr_ofst);
1483 - if (stm32_bank->fpr_ofst != UNDEF_REG)
1484 - writel_relaxed(~0UL, base + stm32_bank->fpr_ofst);
1485
1486 pr_info("%pOF: bank%d\n", h_data->node, bank_idx);
1487
1488 diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
1489 index 2837dc77478e..f0f9eb30bd2b 100644
1490 --- a/drivers/misc/lkdtm/core.c
1491 +++ b/drivers/misc/lkdtm/core.c
1492 @@ -152,7 +152,9 @@ static const struct crashtype crashtypes[] = {
1493 CRASHTYPE(EXEC_VMALLOC),
1494 CRASHTYPE(EXEC_RODATA),
1495 CRASHTYPE(EXEC_USERSPACE),
1496 + CRASHTYPE(EXEC_NULL),
1497 CRASHTYPE(ACCESS_USERSPACE),
1498 + CRASHTYPE(ACCESS_NULL),
1499 CRASHTYPE(WRITE_RO),
1500 CRASHTYPE(WRITE_RO_AFTER_INIT),
1501 CRASHTYPE(WRITE_KERN),
1502 diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
1503 index 3c6fd327e166..b69ee004a3f7 100644
1504 --- a/drivers/misc/lkdtm/lkdtm.h
1505 +++ b/drivers/misc/lkdtm/lkdtm.h
1506 @@ -45,7 +45,9 @@ void lkdtm_EXEC_KMALLOC(void);
1507 void lkdtm_EXEC_VMALLOC(void);
1508 void lkdtm_EXEC_RODATA(void);
1509 void lkdtm_EXEC_USERSPACE(void);
1510 +void lkdtm_EXEC_NULL(void);
1511 void lkdtm_ACCESS_USERSPACE(void);
1512 +void lkdtm_ACCESS_NULL(void);
1513
1514 /* lkdtm_refcount.c */
1515 void lkdtm_REFCOUNT_INC_OVERFLOW(void);
1516 diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
1517 index 53b85c9d16b8..62f76d506f04 100644
1518 --- a/drivers/misc/lkdtm/perms.c
1519 +++ b/drivers/misc/lkdtm/perms.c
1520 @@ -47,7 +47,7 @@ static noinline void execute_location(void *dst, bool write)
1521 {
1522 void (*func)(void) = dst;
1523
1524 - pr_info("attempting ok execution at %p\n", do_nothing);
1525 + pr_info("attempting ok execution at %px\n", do_nothing);
1526 do_nothing();
1527
1528 if (write == CODE_WRITE) {
1529 @@ -55,7 +55,7 @@ static noinline void execute_location(void *dst, bool write)
1530 flush_icache_range((unsigned long)dst,
1531 (unsigned long)dst + EXEC_SIZE);
1532 }
1533 - pr_info("attempting bad execution at %p\n", func);
1534 + pr_info("attempting bad execution at %px\n", func);
1535 func();
1536 }
1537
1538 @@ -66,14 +66,14 @@ static void execute_user_location(void *dst)
1539 /* Intentionally crossing kernel/user memory boundary. */
1540 void (*func)(void) = dst;
1541
1542 - pr_info("attempting ok execution at %p\n", do_nothing);
1543 + pr_info("attempting ok execution at %px\n", do_nothing);
1544 do_nothing();
1545
1546 copied = access_process_vm(current, (unsigned long)dst, do_nothing,
1547 EXEC_SIZE, FOLL_WRITE);
1548 if (copied < EXEC_SIZE)
1549 return;
1550 - pr_info("attempting bad execution at %p\n", func);
1551 + pr_info("attempting bad execution at %px\n", func);
1552 func();
1553 }
1554
1555 @@ -82,7 +82,7 @@ void lkdtm_WRITE_RO(void)
1556 /* Explicitly cast away "const" for the test. */
1557 unsigned long *ptr = (unsigned long *)&rodata;
1558
1559 - pr_info("attempting bad rodata write at %p\n", ptr);
1560 + pr_info("attempting bad rodata write at %px\n", ptr);
1561 *ptr ^= 0xabcd1234;
1562 }
1563
1564 @@ -100,7 +100,7 @@ void lkdtm_WRITE_RO_AFTER_INIT(void)
1565 return;
1566 }
1567
1568 - pr_info("attempting bad ro_after_init write at %p\n", ptr);
1569 + pr_info("attempting bad ro_after_init write at %px\n", ptr);
1570 *ptr ^= 0xabcd1234;
1571 }
1572
1573 @@ -112,7 +112,7 @@ void lkdtm_WRITE_KERN(void)
1574 size = (unsigned long)do_overwritten - (unsigned long)do_nothing;
1575 ptr = (unsigned char *)do_overwritten;
1576
1577 - pr_info("attempting bad %zu byte write at %p\n", size, ptr);
1578 + pr_info("attempting bad %zu byte write at %px\n", size, ptr);
1579 memcpy(ptr, (unsigned char *)do_nothing, size);
1580 flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
1581
1582 @@ -164,6 +164,11 @@ void lkdtm_EXEC_USERSPACE(void)
1583 vm_munmap(user_addr, PAGE_SIZE);
1584 }
1585
1586 +void lkdtm_EXEC_NULL(void)
1587 +{
1588 + execute_location(NULL, CODE_AS_IS);
1589 +}
1590 +
1591 void lkdtm_ACCESS_USERSPACE(void)
1592 {
1593 unsigned long user_addr, tmp = 0;
1594 @@ -185,16 +190,29 @@ void lkdtm_ACCESS_USERSPACE(void)
1595
1596 ptr = (unsigned long *)user_addr;
1597
1598 - pr_info("attempting bad read at %p\n", ptr);
1599 + pr_info("attempting bad read at %px\n", ptr);
1600 tmp = *ptr;
1601 tmp += 0xc0dec0de;
1602
1603 - pr_info("attempting bad write at %p\n", ptr);
1604 + pr_info("attempting bad write at %px\n", ptr);
1605 *ptr = tmp;
1606
1607 vm_munmap(user_addr, PAGE_SIZE);
1608 }
1609
1610 +void lkdtm_ACCESS_NULL(void)
1611 +{
1612 + unsigned long tmp;
1613 + unsigned long *ptr = (unsigned long *)NULL;
1614 +
1615 + pr_info("attempting bad read at %px\n", ptr);
1616 + tmp = *ptr;
1617 + tmp += 0xc0dec0de;
1618 +
1619 + pr_info("attempting bad write at %px\n", ptr);
1620 + *ptr = tmp;
1621 +}
1622 +
1623 void __init lkdtm_perms_init(void)
1624 {
1625 /* Make sure we can write to __ro_after_init values during __init */
1626 diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
1627 index 9e68c3645e22..e6f14257a7d0 100644
1628 --- a/drivers/mmc/host/davinci_mmc.c
1629 +++ b/drivers/mmc/host/davinci_mmc.c
1630 @@ -1117,7 +1117,7 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
1631 {
1632 }
1633 #endif
1634 -static void __init init_mmcsd_host(struct mmc_davinci_host *host)
1635 +static void init_mmcsd_host(struct mmc_davinci_host *host)
1636 {
1637
1638 mmc_davinci_reset_ctrl(host, 1);
1639 diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
1640 index 09c774fe8853..854a55d4332a 100644
1641 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
1642 +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
1643 @@ -463,6 +463,8 @@ struct hnae3_ae_ops {
1644 int (*set_gro_en)(struct hnae3_handle *handle, int enable);
1645 u16 (*get_global_queue_id)(struct hnae3_handle *handle, u16 queue_id);
1646 void (*set_timer_task)(struct hnae3_handle *handle, bool enable);
1647 + int (*mac_connect_phy)(struct hnae3_handle *handle);
1648 + void (*mac_disconnect_phy)(struct hnae3_handle *handle);
1649 };
1650
1651 struct hnae3_dcb_ops {
1652 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
1653 index d84c50068f66..40b69eaf2cb3 100644
1654 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
1655 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
1656 @@ -3519,6 +3519,25 @@ static int hns3_init_mac_addr(struct net_device *netdev, bool init)
1657 return ret;
1658 }
1659
1660 +static int hns3_init_phy(struct net_device *netdev)
1661 +{
1662 + struct hnae3_handle *h = hns3_get_handle(netdev);
1663 + int ret = 0;
1664 +
1665 + if (h->ae_algo->ops->mac_connect_phy)
1666 + ret = h->ae_algo->ops->mac_connect_phy(h);
1667 +
1668 + return ret;
1669 +}
1670 +
1671 +static void hns3_uninit_phy(struct net_device *netdev)
1672 +{
1673 + struct hnae3_handle *h = hns3_get_handle(netdev);
1674 +
1675 + if (h->ae_algo->ops->mac_disconnect_phy)
1676 + h->ae_algo->ops->mac_disconnect_phy(h);
1677 +}
1678 +
1679 static int hns3_restore_fd_rules(struct net_device *netdev)
1680 {
1681 struct hnae3_handle *h = hns3_get_handle(netdev);
1682 @@ -3627,6 +3646,10 @@ static int hns3_client_init(struct hnae3_handle *handle)
1683 goto out_init_ring_data;
1684 }
1685
1686 + ret = hns3_init_phy(netdev);
1687 + if (ret)
1688 + goto out_init_phy;
1689 +
1690 ret = register_netdev(netdev);
1691 if (ret) {
1692 dev_err(priv->dev, "probe register netdev fail!\n");
1693 @@ -3651,6 +3674,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
1694 return ret;
1695
1696 out_reg_netdev_fail:
1697 + hns3_uninit_phy(netdev);
1698 +out_init_phy:
1699 + hns3_uninit_all_ring(priv);
1700 out_init_ring_data:
1701 (void)hns3_nic_uninit_vector_data(priv);
1702 out_init_vector_data:
1703 @@ -3685,6 +3711,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
1704
1705 hns3_force_clear_all_rx_ring(handle);
1706
1707 + hns3_uninit_phy(netdev);
1708 +
1709 ret = hns3_nic_uninit_vector_data(priv);
1710 if (ret)
1711 netdev_err(netdev, "uninit vector error\n");
1712 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1713 index f7637c08bb3a..cb7571747af7 100644
1714 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1715 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1716 @@ -6959,16 +6959,6 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
1717 *tp_mdix = ETH_TP_MDI;
1718 }
1719
1720 -static int hclge_init_instance_hw(struct hclge_dev *hdev)
1721 -{
1722 - return hclge_mac_connect_phy(hdev);
1723 -}
1724 -
1725 -static void hclge_uninit_instance_hw(struct hclge_dev *hdev)
1726 -{
1727 - hclge_mac_disconnect_phy(hdev);
1728 -}
1729 -
1730 static int hclge_init_client_instance(struct hnae3_client *client,
1731 struct hnae3_ae_dev *ae_dev)
1732 {
1733 @@ -6988,13 +6978,6 @@ static int hclge_init_client_instance(struct hnae3_client *client,
1734 if (ret)
1735 goto clear_nic;
1736
1737 - ret = hclge_init_instance_hw(hdev);
1738 - if (ret) {
1739 - client->ops->uninit_instance(&vport->nic,
1740 - 0);
1741 - goto clear_nic;
1742 - }
1743 -
1744 hnae3_set_client_init_flag(client, ae_dev, 1);
1745
1746 if (hdev->roce_client &&
1747 @@ -7079,7 +7062,6 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
1748 if (client->type == HNAE3_CLIENT_ROCE)
1749 return;
1750 if (hdev->nic_client && client->ops->uninit_instance) {
1751 - hclge_uninit_instance_hw(hdev);
1752 client->ops->uninit_instance(&vport->nic, 0);
1753 hdev->nic_client = NULL;
1754 vport->nic.client = NULL;
1755 @@ -8012,6 +7994,8 @@ static const struct hnae3_ae_ops hclge_ops = {
1756 .set_gro_en = hclge_gro_en,
1757 .get_global_queue_id = hclge_covert_handle_qid_global,
1758 .set_timer_task = hclge_set_timer_task,
1759 + .mac_connect_phy = hclge_mac_connect_phy,
1760 + .mac_disconnect_phy = hclge_mac_disconnect_phy,
1761 };
1762
1763 static struct hnae3_ae_algo ae_algo = {
1764 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
1765 index dabb8437f8dc..84f28785ba28 100644
1766 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
1767 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
1768 @@ -195,8 +195,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
1769 netdev_err(netdev, "failed to configure flow control.\n");
1770 }
1771
1772 -int hclge_mac_connect_phy(struct hclge_dev *hdev)
1773 +int hclge_mac_connect_phy(struct hnae3_handle *handle)
1774 {
1775 + struct hclge_vport *vport = hclge_get_vport(handle);
1776 + struct hclge_dev *hdev = vport->back;
1777 struct net_device *netdev = hdev->vport[0].nic.netdev;
1778 struct phy_device *phydev = hdev->hw.mac.phydev;
1779 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1780 @@ -229,8 +231,10 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev)
1781 return 0;
1782 }
1783
1784 -void hclge_mac_disconnect_phy(struct hclge_dev *hdev)
1785 +void hclge_mac_disconnect_phy(struct hnae3_handle *handle)
1786 {
1787 + struct hclge_vport *vport = hclge_get_vport(handle);
1788 + struct hclge_dev *hdev = vport->back;
1789 struct phy_device *phydev = hdev->hw.mac.phydev;
1790
1791 if (!phydev)
1792 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
1793 index 5fbf7dddb5d9..ef095d9c566f 100644
1794 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
1795 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
1796 @@ -5,8 +5,8 @@
1797 #define __HCLGE_MDIO_H
1798
1799 int hclge_mac_mdio_config(struct hclge_dev *hdev);
1800 -int hclge_mac_connect_phy(struct hclge_dev *hdev);
1801 -void hclge_mac_disconnect_phy(struct hclge_dev *hdev);
1802 +int hclge_mac_connect_phy(struct hnae3_handle *handle);
1803 +void hclge_mac_disconnect_phy(struct hnae3_handle *handle);
1804 void hclge_mac_start_phy(struct hclge_dev *hdev);
1805 void hclge_mac_stop_phy(struct hclge_dev *hdev);
1806
1807 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
1808 index c25acace7d91..e91005d0f20c 100644
1809 --- a/drivers/pci/pci.c
1810 +++ b/drivers/pci/pci.c
1811 @@ -1233,7 +1233,6 @@ static void pci_restore_pcie_state(struct pci_dev *dev)
1812 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1813 }
1814
1815 -
1816 static int pci_save_pcix_state(struct pci_dev *dev)
1817 {
1818 int pos;
1819 @@ -1270,6 +1269,45 @@ static void pci_restore_pcix_state(struct pci_dev *dev)
1820 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1821 }
1822
1823 +static void pci_save_ltr_state(struct pci_dev *dev)
1824 +{
1825 + int ltr;
1826 + struct pci_cap_saved_state *save_state;
1827 + u16 *cap;
1828 +
1829 + if (!pci_is_pcie(dev))
1830 + return;
1831 +
1832 + ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1833 + if (!ltr)
1834 + return;
1835 +
1836 + save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1837 + if (!save_state) {
1838 + pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1839 + return;
1840 + }
1841 +
1842 + cap = (u16 *)&save_state->cap.data[0];
1843 + pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
1844 + pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
1845 +}
1846 +
1847 +static void pci_restore_ltr_state(struct pci_dev *dev)
1848 +{
1849 + struct pci_cap_saved_state *save_state;
1850 + int ltr;
1851 + u16 *cap;
1852 +
1853 + save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1854 + ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1855 + if (!save_state || !ltr)
1856 + return;
1857 +
1858 + cap = (u16 *)&save_state->cap.data[0];
1859 + pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
1860 + pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
1861 +}
1862
1863 /**
1864 * pci_save_state - save the PCI configuration space of a device before suspending
1865 @@ -1291,6 +1329,7 @@ int pci_save_state(struct pci_dev *dev)
1866 if (i != 0)
1867 return i;
1868
1869 + pci_save_ltr_state(dev);
1870 pci_save_dpc_state(dev);
1871 return pci_save_vc_state(dev);
1872 }
1873 @@ -1390,7 +1429,12 @@ void pci_restore_state(struct pci_dev *dev)
1874 if (!dev->state_saved)
1875 return;
1876
1877 - /* PCI Express register must be restored first */
1878 + /*
1879 + * Restore max latencies (in the LTR capability) before enabling
1880 + * LTR itself (in the PCIe capability).
1881 + */
1882 + pci_restore_ltr_state(dev);
1883 +
1884 pci_restore_pcie_state(dev);
1885 pci_restore_pasid_state(dev);
1886 pci_restore_pri_state(dev);
1887 @@ -2501,6 +2545,25 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev)
1888 pm_runtime_put_sync(parent);
1889 }
1890
1891 +static const struct dmi_system_id bridge_d3_blacklist[] = {
1892 +#ifdef CONFIG_X86
1893 + {
1894 + /*
1895 + * Gigabyte X299 root port is not marked as hotplug capable
1896 + * which allows Linux to power manage it. However, this
1897 + * confuses the BIOS SMI handler so don't power manage root
1898 + * ports on that system.
1899 + */
1900 + .ident = "X299 DESIGNARE EX-CF",
1901 + .matches = {
1902 + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
1903 + DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
1904 + },
1905 + },
1906 +#endif
1907 + { }
1908 +};
1909 +
1910 /**
1911 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
1912 * @bridge: Bridge to check
1913 @@ -2546,6 +2609,9 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
1914 if (bridge->is_hotplug_bridge)
1915 return false;
1916
1917 + if (dmi_check_system(bridge_d3_blacklist))
1918 + return false;
1919 +
1920 /*
1921 * It should be safe to put PCIe ports from 2015 or newer
1922 * to D3.
1923 @@ -2998,6 +3064,11 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1924 if (error)
1925 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
1926
1927 + error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
1928 + 2 * sizeof(u16));
1929 + if (error)
1930 + pci_err(dev, "unable to allocate suspend buffer for LTR\n");
1931 +
1932 pci_allocate_vc_save_buffers(dev);
1933 }
1934
1935 diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
1936 index c37e74ee609d..a9cbe5be277b 100644
1937 --- a/drivers/platform/x86/intel_pmc_core.c
1938 +++ b/drivers/platform/x86/intel_pmc_core.c
1939 @@ -15,6 +15,7 @@
1940 #include <linux/bitfield.h>
1941 #include <linux/debugfs.h>
1942 #include <linux/delay.h>
1943 +#include <linux/dmi.h>
1944 #include <linux/io.h>
1945 #include <linux/module.h>
1946 #include <linux/pci.h>
1947 @@ -139,6 +140,7 @@ static const struct pmc_reg_map spt_reg_map = {
1948 .pm_cfg_offset = SPT_PMC_PM_CFG_OFFSET,
1949 .pm_read_disable_bit = SPT_PMC_READ_DISABLE_BIT,
1950 .ltr_ignore_max = SPT_NUM_IP_IGN_ALLOWED,
1951 + .pm_vric1_offset = SPT_PMC_VRIC1_OFFSET,
1952 };
1953
1954 /* Cannonlake: PGD PFET Enable Ack Status Register(s) bitmap */
1955 @@ -751,6 +753,37 @@ static const struct pci_device_id pmc_pci_ids[] = {
1956 { 0, },
1957 };
1958
1959 +/*
1960 + * This quirk can be used on those platforms where
1961 + * the platform BIOS enforces 24Mhx Crystal to shutdown
1962 + * before PMC can assert SLP_S0#.
1963 + */
1964 +int quirk_xtal_ignore(const struct dmi_system_id *id)
1965 +{
1966 + struct pmc_dev *pmcdev = &pmc;
1967 + u32 value;
1968 +
1969 + value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
1970 + /* 24MHz Crystal Shutdown Qualification Disable */
1971 + value |= SPT_PMC_VRIC1_XTALSDQDIS;
1972 + /* Low Voltage Mode Enable */
1973 + value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
1974 + pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
1975 + return 0;
1976 +}
1977 +
1978 +static const struct dmi_system_id pmc_core_dmi_table[] = {
1979 + {
1980 + .callback = quirk_xtal_ignore,
1981 + .ident = "HP Elite x2 1013 G3",
1982 + .matches = {
1983 + DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1984 + DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite x2 1013 G3"),
1985 + },
1986 + },
1987 + {}
1988 +};
1989 +
1990 static int __init pmc_core_probe(void)
1991 {
1992 struct pmc_dev *pmcdev = &pmc;
1993 @@ -792,6 +825,7 @@ static int __init pmc_core_probe(void)
1994 return err;
1995 }
1996
1997 + dmi_check_system(pmc_core_dmi_table);
1998 pr_info(" initialized\n");
1999 return 0;
2000 }
2001 diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
2002 index 1a0104d2cbf0..9bc16d7d2917 100644
2003 --- a/drivers/platform/x86/intel_pmc_core.h
2004 +++ b/drivers/platform/x86/intel_pmc_core.h
2005 @@ -25,6 +25,7 @@
2006 #define SPT_PMC_MTPMC_OFFSET 0x20
2007 #define SPT_PMC_MFPMC_OFFSET 0x38
2008 #define SPT_PMC_LTR_IGNORE_OFFSET 0x30C
2009 +#define SPT_PMC_VRIC1_OFFSET 0x31c
2010 #define SPT_PMC_MPHY_CORE_STS_0 0x1143
2011 #define SPT_PMC_MPHY_CORE_STS_1 0x1142
2012 #define SPT_PMC_MPHY_COM_STS_0 0x1155
2013 @@ -135,6 +136,9 @@ enum ppfear_regs {
2014 #define SPT_PMC_BIT_MPHY_CMN_LANE2 BIT(2)
2015 #define SPT_PMC_BIT_MPHY_CMN_LANE3 BIT(3)
2016
2017 +#define SPT_PMC_VRIC1_SLPS0LVEN BIT(13)
2018 +#define SPT_PMC_VRIC1_XTALSDQDIS BIT(22)
2019 +
2020 /* Cannonlake Power Management Controller register offsets */
2021 #define CNP_PMC_SLPS0_DBG_OFFSET 0x10B4
2022 #define CNP_PMC_PM_CFG_OFFSET 0x1818
2023 @@ -217,6 +221,7 @@ struct pmc_reg_map {
2024 const int pm_read_disable_bit;
2025 const u32 slps0_dbg_offset;
2026 const u32 ltr_ignore_max;
2027 + const u32 pm_vric1_offset;
2028 };
2029
2030 /**
2031 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
2032 index 7e35ce2162d0..503fda4e7e8e 100644
2033 --- a/drivers/scsi/qla2xxx/qla_os.c
2034 +++ b/drivers/scsi/qla2xxx/qla_os.c
2035 @@ -1459,7 +1459,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
2036 goto eh_reset_failed;
2037 }
2038 err = 2;
2039 - if (do_reset(fcport, cmd->device->lun, blk_mq_rq_cpu(cmd->request) + 1)
2040 + if (do_reset(fcport, cmd->device->lun, 1)
2041 != QLA_SUCCESS) {
2042 ql_log(ql_log_warn, vha, 0x800c,
2043 "do_reset failed for cmd=%p.\n", cmd);
2044 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
2045 index 5a6e8e12701a..655ad26106e4 100644
2046 --- a/drivers/scsi/scsi_lib.c
2047 +++ b/drivers/scsi/scsi_lib.c
2048 @@ -598,9 +598,16 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
2049 if (!blk_rq_is_scsi(req)) {
2050 WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
2051 cmd->flags &= ~SCMD_INITIALIZED;
2052 - destroy_rcu_head(&cmd->rcu);
2053 }
2054
2055 + /*
2056 + * Calling rcu_barrier() is not necessary here because the
2057 + * SCSI error handler guarantees that the function called by
2058 + * call_rcu() has been called before scsi_end_request() is
2059 + * called.
2060 + */
2061 + destroy_rcu_head(&cmd->rcu);
2062 +
2063 /*
2064 * In the MQ case the command gets freed by __blk_mq_end_request,
2065 * so we have to do all cleanup that depends on it earlier.
2066 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
2067 index 0508831d6fb9..0a82e93566dc 100644
2068 --- a/drivers/scsi/scsi_transport_iscsi.c
2069 +++ b/drivers/scsi/scsi_transport_iscsi.c
2070 @@ -2200,6 +2200,8 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
2071 scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
2072 /* flush running scans then delete devices */
2073 flush_work(&session->scan_work);
2074 + /* flush running unbind operations */
2075 + flush_work(&session->unbind_work);
2076 __iscsi_unbind_session(&session->unbind_work);
2077
2078 /* hw iscsi may not have removed all connections from session */
2079 diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
2080 index 720760cd493f..ba39647a690c 100644
2081 --- a/drivers/thermal/broadcom/bcm2835_thermal.c
2082 +++ b/drivers/thermal/broadcom/bcm2835_thermal.c
2083 @@ -119,8 +119,7 @@ static const struct debugfs_reg32 bcm2835_thermal_regs[] = {
2084
2085 static void bcm2835_thermal_debugfs(struct platform_device *pdev)
2086 {
2087 - struct thermal_zone_device *tz = platform_get_drvdata(pdev);
2088 - struct bcm2835_thermal_data *data = tz->devdata;
2089 + struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
2090 struct debugfs_regset32 *regset;
2091
2092 data->debugfsdir = debugfs_create_dir("bcm2835_thermal", NULL);
2093 @@ -266,7 +265,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
2094
2095 data->tz = tz;
2096
2097 - platform_set_drvdata(pdev, tz);
2098 + platform_set_drvdata(pdev, data);
2099
2100 /*
2101 * Thermal_zone doesn't enable hwmon as default,
2102 @@ -290,8 +289,8 @@ err_clk:
2103
2104 static int bcm2835_thermal_remove(struct platform_device *pdev)
2105 {
2106 - struct thermal_zone_device *tz = platform_get_drvdata(pdev);
2107 - struct bcm2835_thermal_data *data = tz->devdata;
2108 + struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
2109 + struct thermal_zone_device *tz = data->tz;
2110
2111 debugfs_remove_recursive(data->debugfsdir);
2112 thermal_zone_of_sensor_unregister(&pdev->dev, tz);
2113 diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
2114 index 61ca7ce3624e..5f3ed24e26ec 100644
2115 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
2116 +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
2117 @@ -22,6 +22,13 @@ enum int3400_thermal_uuid {
2118 INT3400_THERMAL_PASSIVE_1,
2119 INT3400_THERMAL_ACTIVE,
2120 INT3400_THERMAL_CRITICAL,
2121 + INT3400_THERMAL_ADAPTIVE_PERFORMANCE,
2122 + INT3400_THERMAL_EMERGENCY_CALL_MODE,
2123 + INT3400_THERMAL_PASSIVE_2,
2124 + INT3400_THERMAL_POWER_BOSS,
2125 + INT3400_THERMAL_VIRTUAL_SENSOR,
2126 + INT3400_THERMAL_COOLING_MODE,
2127 + INT3400_THERMAL_HARDWARE_DUTY_CYCLING,
2128 INT3400_THERMAL_MAXIMUM_UUID,
2129 };
2130
2131 @@ -29,6 +36,13 @@ static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
2132 "42A441D6-AE6A-462b-A84B-4A8CE79027D3",
2133 "3A95C389-E4B8-4629-A526-C52C88626BAE",
2134 "97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
2135 + "63BE270F-1C11-48FD-A6F7-3AF253FF3E2D",
2136 + "5349962F-71E6-431D-9AE8-0A635B710AEE",
2137 + "9E04115A-AE87-4D1C-9500-0F3E340BFE75",
2138 + "F5A35014-C209-46A4-993A-EB56DE7530A1",
2139 + "6ED722A7-9240-48A5-B479-31EEF723D7CF",
2140 + "16CAF1B7-DD38-40ED-B1C1-1B8A1913D531",
2141 + "BE84BABF-C4D4-403D-B495-3128FD44dAC1",
2142 };
2143
2144 struct int3400_thermal_priv {
2145 @@ -299,10 +313,9 @@ static int int3400_thermal_probe(struct platform_device *pdev)
2146
2147 platform_set_drvdata(pdev, priv);
2148
2149 - if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
2150 - int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
2151 - int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
2152 - }
2153 + int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
2154 + int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
2155 +
2156 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
2157 priv, &int3400_thermal_ops,
2158 &int3400_thermal_params, 0, 0);
2159 diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
2160 index 7571f7c2e7c9..ac7256b5f020 100644
2161 --- a/drivers/thermal/intel/intel_powerclamp.c
2162 +++ b/drivers/thermal/intel/intel_powerclamp.c
2163 @@ -101,7 +101,7 @@ struct powerclamp_worker_data {
2164 bool clamping;
2165 };
2166
2167 -static struct powerclamp_worker_data * __percpu worker_data;
2168 +static struct powerclamp_worker_data __percpu *worker_data;
2169 static struct thermal_cooling_device *cooling_dev;
2170 static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu
2171 * clamping kthread worker
2172 @@ -494,7 +494,7 @@ static void start_power_clamp_worker(unsigned long cpu)
2173 struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
2174 struct kthread_worker *worker;
2175
2176 - worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inject/%ld", cpu);
2177 + worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inj/%ld", cpu);
2178 if (IS_ERR(worker))
2179 return;
2180
2181 diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
2182 index 48eef552cba4..fc9399d9c082 100644
2183 --- a/drivers/thermal/samsung/exynos_tmu.c
2184 +++ b/drivers/thermal/samsung/exynos_tmu.c
2185 @@ -666,7 +666,7 @@ static int exynos_get_temp(void *p, int *temp)
2186 struct exynos_tmu_data *data = p;
2187 int value, ret = 0;
2188
2189 - if (!data || !data->tmu_read || !data->enabled)
2190 + if (!data || !data->tmu_read)
2191 return -EINVAL;
2192 else if (!data->enabled)
2193 /*
2194 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
2195 index 07cad54b84f1..e8e125acd712 100644
2196 --- a/fs/cifs/cifsfs.c
2197 +++ b/fs/cifs/cifsfs.c
2198 @@ -1010,7 +1010,7 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
2199 unsigned int xid;
2200 int rc;
2201
2202 - if (remap_flags & ~REMAP_FILE_ADVISORY)
2203 + if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
2204 return -EINVAL;
2205
2206 cifs_dbg(FYI, "clone range\n");
2207 diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
2208 index 924269cec135..e32c264e3adb 100644
2209 --- a/fs/cifs/smb2maperror.c
2210 +++ b/fs/cifs/smb2maperror.c
2211 @@ -1036,7 +1036,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
2212 {STATUS_UNFINISHED_CONTEXT_DELETED, -EIO,
2213 "STATUS_UNFINISHED_CONTEXT_DELETED"},
2214 {STATUS_NO_TGT_REPLY, -EIO, "STATUS_NO_TGT_REPLY"},
2215 - {STATUS_OBJECTID_NOT_FOUND, -EIO, "STATUS_OBJECTID_NOT_FOUND"},
2216 + /* Note that ENOATTTR and ENODATA are the same errno */
2217 + {STATUS_OBJECTID_NOT_FOUND, -ENODATA, "STATUS_OBJECTID_NOT_FOUND"},
2218 {STATUS_NO_IP_ADDRESSES, -EIO, "STATUS_NO_IP_ADDRESSES"},
2219 {STATUS_WRONG_CREDENTIAL_HANDLE, -EIO,
2220 "STATUS_WRONG_CREDENTIAL_HANDLE"},
2221 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2222 index b29f711ab965..ea56b1cdbdde 100644
2223 --- a/fs/cifs/smb2ops.c
2224 +++ b/fs/cifs/smb2ops.c
2225 @@ -949,6 +949,16 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
2226 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
2227 memset(rsp_iov, 0, sizeof(rsp_iov));
2228
2229 + if (ses->server->ops->query_all_EAs) {
2230 + if (!ea_value) {
2231 + rc = ses->server->ops->query_all_EAs(xid, tcon, path,
2232 + ea_name, NULL, 0,
2233 + cifs_sb);
2234 + if (rc == -ENODATA)
2235 + goto sea_exit;
2236 + }
2237 + }
2238 +
2239 /* Open */
2240 memset(&open_iov, 0, sizeof(open_iov));
2241 rqst[0].rq_iov = open_iov;
2242 diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
2243 index 59be48206932..b49bc925fb4f 100644
2244 --- a/fs/cifs/trace.h
2245 +++ b/fs/cifs/trace.h
2246 @@ -378,19 +378,19 @@ DECLARE_EVENT_CLASS(smb3_tcon_class,
2247 __field(unsigned int, xid)
2248 __field(__u32, tid)
2249 __field(__u64, sesid)
2250 - __field(const char *, unc_name)
2251 + __string(name, unc_name)
2252 __field(int, rc)
2253 ),
2254 TP_fast_assign(
2255 __entry->xid = xid;
2256 __entry->tid = tid;
2257 __entry->sesid = sesid;
2258 - __entry->unc_name = unc_name;
2259 + __assign_str(name, unc_name);
2260 __entry->rc = rc;
2261 ),
2262 TP_printk("xid=%u sid=0x%llx tid=0x%x unc_name=%s rc=%d",
2263 __entry->xid, __entry->sesid, __entry->tid,
2264 - __entry->unc_name, __entry->rc)
2265 + __get_str(name), __entry->rc)
2266 )
2267
2268 #define DEFINE_SMB3_TCON_EVENT(name) \
2269 diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
2270 index 2e76fb55d94a..5f24fdc140ad 100644
2271 --- a/fs/ext4/ioctl.c
2272 +++ b/fs/ext4/ioctl.c
2273 @@ -999,6 +999,13 @@ resizefs_out:
2274 if (!blk_queue_discard(q))
2275 return -EOPNOTSUPP;
2276
2277 + /*
2278 + * We haven't replayed the journal, so we cannot use our
2279 + * block-bitmap-guided storage zapping commands.
2280 + */
2281 + if (test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb))
2282 + return -EROFS;
2283 +
2284 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2285 sizeof(range)))
2286 return -EFAULT;
2287 diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
2288 index 3d9b18505c0c..e7ae26e36c9c 100644
2289 --- a/fs/ext4/resize.c
2290 +++ b/fs/ext4/resize.c
2291 @@ -932,11 +932,18 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
2292 memcpy(n_group_desc, o_group_desc,
2293 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
2294 n_group_desc[gdb_num] = gdb_bh;
2295 +
2296 + BUFFER_TRACE(gdb_bh, "get_write_access");
2297 + err = ext4_journal_get_write_access(handle, gdb_bh);
2298 + if (err) {
2299 + kvfree(n_group_desc);
2300 + brelse(gdb_bh);
2301 + return err;
2302 + }
2303 +
2304 EXT4_SB(sb)->s_group_desc = n_group_desc;
2305 EXT4_SB(sb)->s_gdb_count++;
2306 kvfree(o_group_desc);
2307 - BUFFER_TRACE(gdb_bh, "get_write_access");
2308 - err = ext4_journal_get_write_access(handle, gdb_bh);
2309 return err;
2310 }
2311
2312 @@ -2073,6 +2080,10 @@ out:
2313 free_flex_gd(flex_gd);
2314 if (resize_inode != NULL)
2315 iput(resize_inode);
2316 - ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", n_blocks_count);
2317 + if (err)
2318 + ext4_warning(sb, "error (%d) occurred during "
2319 + "file system resize", err);
2320 + ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
2321 + ext4_blocks_count(es));
2322 return err;
2323 }
2324 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
2325 index fb12d3c17c1b..b9bca7298f96 100644
2326 --- a/fs/ext4/super.c
2327 +++ b/fs/ext4/super.c
2328 @@ -430,6 +430,12 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
2329 spin_unlock(&sbi->s_md_lock);
2330 }
2331
2332 +static bool system_going_down(void)
2333 +{
2334 + return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
2335 + || system_state == SYSTEM_RESTART;
2336 +}
2337 +
2338 /* Deal with the reporting of failure conditions on a filesystem such as
2339 * inconsistencies detected or read IO failures.
2340 *
2341 @@ -460,7 +466,12 @@ static void ext4_handle_error(struct super_block *sb)
2342 if (journal)
2343 jbd2_journal_abort(journal, -EIO);
2344 }
2345 - if (test_opt(sb, ERRORS_RO)) {
2346 + /*
2347 + * We force ERRORS_RO behavior when system is rebooting. Otherwise we
2348 + * could panic during 'reboot -f' as the underlying device got already
2349 + * disabled.
2350 + */
2351 + if (test_opt(sb, ERRORS_RO) || system_going_down()) {
2352 ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
2353 /*
2354 * Make sure updated value of ->s_mount_flags will be visible
2355 @@ -468,8 +479,7 @@ static void ext4_handle_error(struct super_block *sb)
2356 */
2357 smp_wmb();
2358 sb->s_flags |= SB_RDONLY;
2359 - }
2360 - if (test_opt(sb, ERRORS_PANIC)) {
2361 + } else if (test_opt(sb, ERRORS_PANIC)) {
2362 if (EXT4_SB(sb)->s_journal &&
2363 !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
2364 return;
2365 diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
2366 index f955cd3e0677..7743fa83b895 100644
2367 --- a/fs/f2fs/checkpoint.c
2368 +++ b/fs/f2fs/checkpoint.c
2369 @@ -306,8 +306,9 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
2370 goto skip_write;
2371
2372 /* collect a number of dirty meta pages and write together */
2373 - if (wbc->for_kupdate ||
2374 - get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
2375 + if (wbc->sync_mode != WB_SYNC_ALL &&
2376 + get_pages(sbi, F2FS_DIRTY_META) <
2377 + nr_pages_to_skip(sbi, META))
2378 goto skip_write;
2379
2380 /* if locked failed, cp will flush dirty pages instead */
2381 @@ -405,7 +406,7 @@ static int f2fs_set_meta_page_dirty(struct page *page)
2382 if (!PageDirty(page)) {
2383 __set_page_dirty_nobuffers(page);
2384 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
2385 - SetPagePrivate(page);
2386 + f2fs_set_page_private(page, 0);
2387 f2fs_trace_pid(page);
2388 return 1;
2389 }
2390 @@ -956,7 +957,7 @@ void f2fs_update_dirty_page(struct inode *inode, struct page *page)
2391 inode_inc_dirty_pages(inode);
2392 spin_unlock(&sbi->inode_lock[type]);
2393
2394 - SetPagePrivate(page);
2395 + f2fs_set_page_private(page, 0);
2396 f2fs_trace_pid(page);
2397 }
2398
2399 diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
2400 index f91d8630c9a2..c99aab23efea 100644
2401 --- a/fs/f2fs/data.c
2402 +++ b/fs/f2fs/data.c
2403 @@ -2711,8 +2711,7 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
2404 if (IS_ATOMIC_WRITTEN_PAGE(page))
2405 return f2fs_drop_inmem_page(inode, page);
2406
2407 - set_page_private(page, 0);
2408 - ClearPagePrivate(page);
2409 + f2fs_clear_page_private(page);
2410 }
2411
2412 int f2fs_release_page(struct page *page, gfp_t wait)
2413 @@ -2726,8 +2725,7 @@ int f2fs_release_page(struct page *page, gfp_t wait)
2414 return 0;
2415
2416 clear_cold_data(page);
2417 - set_page_private(page, 0);
2418 - ClearPagePrivate(page);
2419 + f2fs_clear_page_private(page);
2420 return 1;
2421 }
2422
2423 @@ -2795,12 +2793,8 @@ int f2fs_migrate_page(struct address_space *mapping,
2424 return -EAGAIN;
2425 }
2426
2427 - /*
2428 - * A reference is expected if PagePrivate set when move mapping,
2429 - * however F2FS breaks this for maintaining dirty page counts when
2430 - * truncating pages. So here adjusting the 'extra_count' make it work.
2431 - */
2432 - extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
2433 + /* one extra reference was held for atomic_write page */
2434 + extra_count = atomic_written ? 1 : 0;
2435 rc = migrate_page_move_mapping(mapping, newpage,
2436 page, mode, extra_count);
2437 if (rc != MIGRATEPAGE_SUCCESS) {
2438 @@ -2821,9 +2815,10 @@ int f2fs_migrate_page(struct address_space *mapping,
2439 get_page(newpage);
2440 }
2441
2442 - if (PagePrivate(page))
2443 - SetPagePrivate(newpage);
2444 - set_page_private(newpage, page_private(page));
2445 + if (PagePrivate(page)) {
2446 + f2fs_set_page_private(newpage, page_private(page));
2447 + f2fs_clear_page_private(page);
2448 + }
2449
2450 if (mode != MIGRATE_SYNC_NO_COPY)
2451 migrate_page_copy(newpage, page);
2452 diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
2453 index 50d0d36280fa..99a6063c2327 100644
2454 --- a/fs/f2fs/dir.c
2455 +++ b/fs/f2fs/dir.c
2456 @@ -728,7 +728,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
2457 !f2fs_truncate_hole(dir, page->index, page->index + 1)) {
2458 f2fs_clear_page_cache_dirty_tag(page);
2459 clear_page_dirty_for_io(page);
2460 - ClearPagePrivate(page);
2461 + f2fs_clear_page_private(page);
2462 ClearPageUptodate(page);
2463 clear_cold_data(page);
2464 inode_dec_dirty_pages(dir);
2465 diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
2466 index 279bc00489cc..6d9186a6528c 100644
2467 --- a/fs/f2fs/f2fs.h
2468 +++ b/fs/f2fs/f2fs.h
2469 @@ -2825,6 +2825,27 @@ static inline bool is_valid_data_blkaddr(struct f2fs_sb_info *sbi,
2470 return true;
2471 }
2472
2473 +static inline void f2fs_set_page_private(struct page *page,
2474 + unsigned long data)
2475 +{
2476 + if (PagePrivate(page))
2477 + return;
2478 +
2479 + get_page(page);
2480 + SetPagePrivate(page);
2481 + set_page_private(page, data);
2482 +}
2483 +
2484 +static inline void f2fs_clear_page_private(struct page *page)
2485 +{
2486 + if (!PagePrivate(page))
2487 + return;
2488 +
2489 + set_page_private(page, 0);
2490 + ClearPagePrivate(page);
2491 + f2fs_put_page(page, 0);
2492 +}
2493 +
2494 /*
2495 * file.c
2496 */
2497 diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
2498 index ae2b45e75847..30ed43bce110 100644
2499 --- a/fs/f2fs/file.c
2500 +++ b/fs/f2fs/file.c
2501 @@ -768,7 +768,6 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
2502 {
2503 struct inode *inode = d_inode(dentry);
2504 int err;
2505 - bool size_changed = false;
2506
2507 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
2508 return -EIO;
2509 @@ -843,8 +842,6 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
2510 down_write(&F2FS_I(inode)->i_sem);
2511 F2FS_I(inode)->last_disk_size = i_size_read(inode);
2512 up_write(&F2FS_I(inode)->i_sem);
2513 -
2514 - size_changed = true;
2515 }
2516
2517 __setattr_copy(inode, attr);
2518 @@ -858,7 +855,7 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
2519 }
2520
2521 /* file size may changed here */
2522 - f2fs_mark_inode_dirty_sync(inode, size_changed);
2523 + f2fs_mark_inode_dirty_sync(inode, true);
2524
2525 /* inode change will produce dirty node pages flushed by checkpoint */
2526 f2fs_balance_fs(F2FS_I_SB(inode), true);
2527 diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
2528 index 4f450e573312..3f99ab288695 100644
2529 --- a/fs/f2fs/node.c
2530 +++ b/fs/f2fs/node.c
2531 @@ -1920,7 +1920,9 @@ static int f2fs_write_node_pages(struct address_space *mapping,
2532 f2fs_balance_fs_bg(sbi);
2533
2534 /* collect a number of dirty node pages and write together */
2535 - if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
2536 + if (wbc->sync_mode != WB_SYNC_ALL &&
2537 + get_pages(sbi, F2FS_DIRTY_NODES) <
2538 + nr_pages_to_skip(sbi, NODE))
2539 goto skip_write;
2540
2541 if (wbc->sync_mode == WB_SYNC_ALL)
2542 @@ -1959,7 +1961,7 @@ static int f2fs_set_node_page_dirty(struct page *page)
2543 if (!PageDirty(page)) {
2544 __set_page_dirty_nobuffers(page);
2545 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
2546 - SetPagePrivate(page);
2547 + f2fs_set_page_private(page, 0);
2548 f2fs_trace_pid(page);
2549 return 1;
2550 }
2551 diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
2552 index e1b1d390b329..b6c8b0696ef6 100644
2553 --- a/fs/f2fs/segment.c
2554 +++ b/fs/f2fs/segment.c
2555 @@ -191,8 +191,7 @@ void f2fs_register_inmem_page(struct inode *inode, struct page *page)
2556
2557 f2fs_trace_pid(page);
2558
2559 - set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
2560 - SetPagePrivate(page);
2561 + f2fs_set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
2562
2563 new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
2564
2565 @@ -280,8 +279,7 @@ next:
2566 ClearPageUptodate(page);
2567 clear_cold_data(page);
2568 }
2569 - set_page_private(page, 0);
2570 - ClearPagePrivate(page);
2571 + f2fs_clear_page_private(page);
2572 f2fs_put_page(page, 1);
2573
2574 list_del(&cur->list);
2575 @@ -370,8 +368,7 @@ void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
2576 kmem_cache_free(inmem_entry_slab, cur);
2577
2578 ClearPageUptodate(page);
2579 - set_page_private(page, 0);
2580 - ClearPagePrivate(page);
2581 + f2fs_clear_page_private(page);
2582 f2fs_put_page(page, 0);
2583
2584 trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
2585 diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
2586 index 5892fa3c885f..144ffba3ec5a 100644
2587 --- a/fs/f2fs/super.c
2588 +++ b/fs/f2fs/super.c
2589 @@ -1460,9 +1460,16 @@ static int f2fs_enable_quotas(struct super_block *sb);
2590
2591 static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
2592 {
2593 + unsigned int s_flags = sbi->sb->s_flags;
2594 struct cp_control cpc;
2595 - int err;
2596 + int err = 0;
2597 + int ret;
2598
2599 + if (s_flags & SB_RDONLY) {
2600 + f2fs_msg(sbi->sb, KERN_ERR,
2601 + "checkpoint=disable on readonly fs");
2602 + return -EINVAL;
2603 + }
2604 sbi->sb->s_flags |= SB_ACTIVE;
2605
2606 f2fs_update_time(sbi, DISABLE_TIME);
2607 @@ -1470,18 +1477,24 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
2608 while (!f2fs_time_over(sbi, DISABLE_TIME)) {
2609 mutex_lock(&sbi->gc_mutex);
2610 err = f2fs_gc(sbi, true, false, NULL_SEGNO);
2611 - if (err == -ENODATA)
2612 + if (err == -ENODATA) {
2613 + err = 0;
2614 break;
2615 + }
2616 if (err && err != -EAGAIN)
2617 - return err;
2618 + break;
2619 }
2620
2621 - err = sync_filesystem(sbi->sb);
2622 - if (err)
2623 - return err;
2624 + ret = sync_filesystem(sbi->sb);
2625 + if (ret || err) {
2626 + err = ret ? ret: err;
2627 + goto restore_flag;
2628 + }
2629
2630 - if (f2fs_disable_cp_again(sbi))
2631 - return -EAGAIN;
2632 + if (f2fs_disable_cp_again(sbi)) {
2633 + err = -EAGAIN;
2634 + goto restore_flag;
2635 + }
2636
2637 mutex_lock(&sbi->gc_mutex);
2638 cpc.reason = CP_PAUSE;
2639 @@ -1490,7 +1503,9 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
2640
2641 sbi->unusable_block_count = 0;
2642 mutex_unlock(&sbi->gc_mutex);
2643 - return 0;
2644 +restore_flag:
2645 + sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
2646 + return err;
2647 }
2648
2649 static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
2650 @@ -3359,7 +3374,7 @@ skip_recovery:
2651 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2652 err = f2fs_disable_checkpoint(sbi);
2653 if (err)
2654 - goto free_meta;
2655 + goto sync_free_meta;
2656 } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
2657 f2fs_enable_checkpoint(sbi);
2658 }
2659 @@ -3372,7 +3387,7 @@ skip_recovery:
2660 /* After POR, we can run background GC thread.*/
2661 err = f2fs_start_gc_thread(sbi);
2662 if (err)
2663 - goto free_meta;
2664 + goto sync_free_meta;
2665 }
2666 kvfree(options);
2667
2668 @@ -3394,6 +3409,11 @@ skip_recovery:
2669 f2fs_update_time(sbi, REQ_TIME);
2670 return 0;
2671
2672 +sync_free_meta:
2673 + /* safe to flush all the data */
2674 + sync_filesystem(sbi->sb);
2675 + retry = false;
2676 +
2677 free_meta:
2678 #ifdef CONFIG_QUOTA
2679 f2fs_truncate_quota_inode_pages(sb);
2680 @@ -3407,6 +3427,8 @@ free_meta:
2681 * falls into an infinite loop in f2fs_sync_meta_pages().
2682 */
2683 truncate_inode_pages_final(META_MAPPING(sbi));
2684 + /* evict some inodes being cached by GC */
2685 + evict_inodes(sb);
2686 f2fs_unregister_sysfs(sbi);
2687 free_root_inode:
2688 dput(sb->s_root);
2689 diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
2690 index 73b92985198b..6b6fe6431a64 100644
2691 --- a/fs/f2fs/xattr.c
2692 +++ b/fs/f2fs/xattr.c
2693 @@ -347,7 +347,7 @@ check:
2694 *base_addr = txattr_addr;
2695 return 0;
2696 out:
2697 - kzfree(txattr_addr);
2698 + kvfree(txattr_addr);
2699 return err;
2700 }
2701
2702 @@ -390,7 +390,7 @@ static int read_all_xattrs(struct inode *inode, struct page *ipage,
2703 *base_addr = txattr_addr;
2704 return 0;
2705 fail:
2706 - kzfree(txattr_addr);
2707 + kvfree(txattr_addr);
2708 return err;
2709 }
2710
2711 @@ -517,7 +517,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
2712 }
2713 error = size;
2714 out:
2715 - kzfree(base_addr);
2716 + kvfree(base_addr);
2717 return error;
2718 }
2719
2720 @@ -563,7 +563,7 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
2721 }
2722 error = buffer_size - rest;
2723 cleanup:
2724 - kzfree(base_addr);
2725 + kvfree(base_addr);
2726 return error;
2727 }
2728
2729 @@ -694,7 +694,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
2730 if (!error && S_ISDIR(inode->i_mode))
2731 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
2732 exit:
2733 - kzfree(base_addr);
2734 + kvfree(base_addr);
2735 return error;
2736 }
2737
2738 diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
2739 index 798f1253141a..3b7b8e95c98a 100644
2740 --- a/fs/notify/inotify/inotify_user.c
2741 +++ b/fs/notify/inotify/inotify_user.c
2742 @@ -519,8 +519,10 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
2743 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
2744 if (!fsn_mark)
2745 return -ENOENT;
2746 - else if (create)
2747 - return -EEXIST;
2748 + else if (create) {
2749 + ret = -EEXIST;
2750 + goto out;
2751 + }
2752
2753 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
2754
2755 @@ -548,6 +550,7 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
2756 /* return the wd */
2757 ret = i_mark->wd;
2758
2759 +out:
2760 /* match the get from fsnotify_find_mark() */
2761 fsnotify_put_mark(fsn_mark);
2762
2763 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
2764 index bbcc185062bb..d29d869abec1 100644
2765 --- a/fs/proc/kcore.c
2766 +++ b/fs/proc/kcore.c
2767 @@ -54,6 +54,28 @@ static LIST_HEAD(kclist_head);
2768 static DECLARE_RWSEM(kclist_lock);
2769 static int kcore_need_update = 1;
2770
2771 +/*
2772 + * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
2773 + * Same as oldmem_pfn_is_ram in vmcore
2774 + */
2775 +static int (*mem_pfn_is_ram)(unsigned long pfn);
2776 +
2777 +int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
2778 +{
2779 + if (mem_pfn_is_ram)
2780 + return -EBUSY;
2781 + mem_pfn_is_ram = fn;
2782 + return 0;
2783 +}
2784 +
2785 +static int pfn_is_ram(unsigned long pfn)
2786 +{
2787 + if (mem_pfn_is_ram)
2788 + return mem_pfn_is_ram(pfn);
2789 + else
2790 + return 1;
2791 +}
2792 +
2793 /* This doesn't grab kclist_lock, so it should only be used at init time. */
2794 void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
2795 int type)
2796 @@ -465,6 +487,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
2797 goto out;
2798 }
2799 m = NULL; /* skip the list anchor */
2800 + } else if (!pfn_is_ram(__pa(start) >> PAGE_SHIFT)) {
2801 + if (clear_user(buffer, tsz)) {
2802 + ret = -EFAULT;
2803 + goto out;
2804 + }
2805 } else if (m->type == KCORE_VMALLOC) {
2806 vread(buf, (char *)start, tsz);
2807 /* we have to zero-fill user buffer even if no read */
2808 diff --git a/include/linux/atalk.h b/include/linux/atalk.h
2809 index 840cf92307ba..d5cfc0b15b76 100644
2810 --- a/include/linux/atalk.h
2811 +++ b/include/linux/atalk.h
2812 @@ -158,7 +158,7 @@ extern int sysctl_aarp_retransmit_limit;
2813 extern int sysctl_aarp_resolve_time;
2814
2815 #ifdef CONFIG_SYSCTL
2816 -extern void atalk_register_sysctl(void);
2817 +extern int atalk_register_sysctl(void);
2818 extern void atalk_unregister_sysctl(void);
2819 #else
2820 static inline int atalk_register_sysctl(void)
2821 diff --git a/include/linux/kcore.h b/include/linux/kcore.h
2822 index 8c3f8c14eeaa..c843f4a9c512 100644
2823 --- a/include/linux/kcore.h
2824 +++ b/include/linux/kcore.h
2825 @@ -44,6 +44,8 @@ void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
2826 m->vaddr = (unsigned long)vaddr;
2827 kclist_add(m, addr, sz, KCORE_REMAP);
2828 }
2829 +
2830 +extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn));
2831 #else
2832 static inline
2833 void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
2834 diff --git a/include/linux/swap.h b/include/linux/swap.h
2835 index 622025ac1461..f1146ed21062 100644
2836 --- a/include/linux/swap.h
2837 +++ b/include/linux/swap.h
2838 @@ -157,9 +157,9 @@ struct swap_extent {
2839 /*
2840 * Max bad pages in the new format..
2841 */
2842 -#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
2843 #define MAX_SWAP_BADPAGES \
2844 - ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
2845 + ((offsetof(union swap_header, magic.magic) - \
2846 + offsetof(union swap_header, info.badpages)) / sizeof(int))
2847
2848 enum {
2849 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
2850 diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
2851 index 5b50fe4906d2..7b60fd186cfe 100644
2852 --- a/include/trace/events/rxrpc.h
2853 +++ b/include/trace/events/rxrpc.h
2854 @@ -76,6 +76,7 @@ enum rxrpc_client_trace {
2855 rxrpc_client_chan_disconnect,
2856 rxrpc_client_chan_pass,
2857 rxrpc_client_chan_unstarted,
2858 + rxrpc_client_chan_wait_failed,
2859 rxrpc_client_cleanup,
2860 rxrpc_client_count,
2861 rxrpc_client_discard,
2862 @@ -276,6 +277,7 @@ enum rxrpc_tx_point {
2863 EM(rxrpc_client_chan_disconnect, "ChDisc") \
2864 EM(rxrpc_client_chan_pass, "ChPass") \
2865 EM(rxrpc_client_chan_unstarted, "ChUnst") \
2866 + EM(rxrpc_client_chan_wait_failed, "ChWtFl") \
2867 EM(rxrpc_client_cleanup, "Clean ") \
2868 EM(rxrpc_client_count, "Count ") \
2869 EM(rxrpc_client_discard, "Discar") \
2870 diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
2871 index 2ada5e21dfa6..4a8f390a2b82 100644
2872 --- a/kernel/bpf/inode.c
2873 +++ b/kernel/bpf/inode.c
2874 @@ -554,19 +554,6 @@ struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type typ
2875 }
2876 EXPORT_SYMBOL(bpf_prog_get_type_path);
2877
2878 -static void bpf_evict_inode(struct inode *inode)
2879 -{
2880 - enum bpf_type type;
2881 -
2882 - truncate_inode_pages_final(&inode->i_data);
2883 - clear_inode(inode);
2884 -
2885 - if (S_ISLNK(inode->i_mode))
2886 - kfree(inode->i_link);
2887 - if (!bpf_inode_type(inode, &type))
2888 - bpf_any_put(inode->i_private, type);
2889 -}
2890 -
2891 /*
2892 * Display the mount options in /proc/mounts.
2893 */
2894 @@ -579,11 +566,28 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
2895 return 0;
2896 }
2897
2898 +static void bpf_destroy_inode_deferred(struct rcu_head *head)
2899 +{
2900 + struct inode *inode = container_of(head, struct inode, i_rcu);
2901 + enum bpf_type type;
2902 +
2903 + if (S_ISLNK(inode->i_mode))
2904 + kfree(inode->i_link);
2905 + if (!bpf_inode_type(inode, &type))
2906 + bpf_any_put(inode->i_private, type);
2907 + free_inode_nonrcu(inode);
2908 +}
2909 +
2910 +static void bpf_destroy_inode(struct inode *inode)
2911 +{
2912 + call_rcu(&inode->i_rcu, bpf_destroy_inode_deferred);
2913 +}
2914 +
2915 static const struct super_operations bpf_super_ops = {
2916 .statfs = simple_statfs,
2917 .drop_inode = generic_delete_inode,
2918 .show_options = bpf_show_options,
2919 - .evict_inode = bpf_evict_inode,
2920 + .destroy_inode = bpf_destroy_inode,
2921 };
2922
2923 enum {
2924 diff --git a/kernel/events/core.c b/kernel/events/core.c
2925 index 26d6edab051a..2e2305a81047 100644
2926 --- a/kernel/events/core.c
2927 +++ b/kernel/events/core.c
2928 @@ -7178,6 +7178,7 @@ static void perf_event_mmap_output(struct perf_event *event,
2929 struct perf_output_handle handle;
2930 struct perf_sample_data sample;
2931 int size = mmap_event->event_id.header.size;
2932 + u32 type = mmap_event->event_id.header.type;
2933 int ret;
2934
2935 if (!perf_event_mmap_match(event, data))
2936 @@ -7221,6 +7222,7 @@ static void perf_event_mmap_output(struct perf_event *event,
2937 perf_output_end(&handle);
2938 out:
2939 mmap_event->event_id.header.size = size;
2940 + mmap_event->event_id.header.type = type;
2941 }
2942
2943 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
2944 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
2945 index 01a2489de94e..62cc29364fba 100644
2946 --- a/kernel/sched/core.c
2947 +++ b/kernel/sched/core.c
2948 @@ -6942,7 +6942,7 @@ static int __maybe_unused cpu_period_quota_parse(char *buf,
2949 {
2950 char tok[21]; /* U64_MAX */
2951
2952 - if (!sscanf(buf, "%s %llu", tok, periodp))
2953 + if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
2954 return -EINVAL;
2955
2956 *periodp *= NSEC_PER_USEC;
2957 diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
2958 index 033ec7c45f13..1ccf77f6d346 100644
2959 --- a/kernel/sched/cpufreq_schedutil.c
2960 +++ b/kernel/sched/cpufreq_schedutil.c
2961 @@ -48,10 +48,10 @@ struct sugov_cpu {
2962
2963 bool iowait_boost_pending;
2964 unsigned int iowait_boost;
2965 - unsigned int iowait_boost_max;
2966 u64 last_update;
2967
2968 unsigned long bw_dl;
2969 + unsigned long min;
2970 unsigned long max;
2971
2972 /* The field below is for single-CPU policies only: */
2973 @@ -303,8 +303,7 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
2974 if (delta_ns <= TICK_NSEC)
2975 return false;
2976
2977 - sg_cpu->iowait_boost = set_iowait_boost
2978 - ? sg_cpu->sg_policy->policy->min : 0;
2979 + sg_cpu->iowait_boost = set_iowait_boost ? sg_cpu->min : 0;
2980 sg_cpu->iowait_boost_pending = set_iowait_boost;
2981
2982 return true;
2983 @@ -344,14 +343,13 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
2984
2985 /* Double the boost at each request */
2986 if (sg_cpu->iowait_boost) {
2987 - sg_cpu->iowait_boost <<= 1;
2988 - if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
2989 - sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
2990 + sg_cpu->iowait_boost =
2991 + min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
2992 return;
2993 }
2994
2995 /* First wakeup after IO: start with minimum boost */
2996 - sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
2997 + sg_cpu->iowait_boost = sg_cpu->min;
2998 }
2999
3000 /**
3001 @@ -373,47 +371,38 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
3002 * This mechanism is designed to boost high frequently IO waiting tasks, while
3003 * being more conservative on tasks which does sporadic IO operations.
3004 */
3005 -static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
3006 - unsigned long *util, unsigned long *max)
3007 +static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
3008 + unsigned long util, unsigned long max)
3009 {
3010 - unsigned int boost_util, boost_max;
3011 + unsigned long boost;
3012
3013 /* No boost currently required */
3014 if (!sg_cpu->iowait_boost)
3015 - return;
3016 + return util;
3017
3018 /* Reset boost if the CPU appears to have been idle enough */
3019 if (sugov_iowait_reset(sg_cpu, time, false))
3020 - return;
3021 + return util;
3022
3023 - /*
3024 - * An IO waiting task has just woken up:
3025 - * allow to further double the boost value
3026 - */
3027 - if (sg_cpu->iowait_boost_pending) {
3028 - sg_cpu->iowait_boost_pending = false;
3029 - } else {
3030 + if (!sg_cpu->iowait_boost_pending) {
3031 /*
3032 - * Otherwise: reduce the boost value and disable it when we
3033 - * reach the minimum.
3034 + * No boost pending; reduce the boost value.
3035 */
3036 sg_cpu->iowait_boost >>= 1;
3037 - if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
3038 + if (sg_cpu->iowait_boost < sg_cpu->min) {
3039 sg_cpu->iowait_boost = 0;
3040 - return;
3041 + return util;
3042 }
3043 }
3044
3045 + sg_cpu->iowait_boost_pending = false;
3046 +
3047 /*
3048 - * Apply the current boost value: a CPU is boosted only if its current
3049 - * utilization is smaller then the current IO boost level.
3050 + * @util is already in capacity scale; convert iowait_boost
3051 + * into the same scale so we can compare.
3052 */
3053 - boost_util = sg_cpu->iowait_boost;
3054 - boost_max = sg_cpu->iowait_boost_max;
3055 - if (*util * boost_max < *max * boost_util) {
3056 - *util = boost_util;
3057 - *max = boost_max;
3058 - }
3059 + boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
3060 + return max(boost, util);
3061 }
3062
3063 #ifdef CONFIG_NO_HZ_COMMON
3064 @@ -460,7 +449,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
3065
3066 util = sugov_get_util(sg_cpu);
3067 max = sg_cpu->max;
3068 - sugov_iowait_apply(sg_cpu, time, &util, &max);
3069 + util = sugov_iowait_apply(sg_cpu, time, util, max);
3070 next_f = get_next_freq(sg_policy, util, max);
3071 /*
3072 * Do not reduce the frequency if the CPU has not been idle
3073 @@ -500,7 +489,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
3074
3075 j_util = sugov_get_util(j_sg_cpu);
3076 j_max = j_sg_cpu->max;
3077 - sugov_iowait_apply(j_sg_cpu, time, &j_util, &j_max);
3078 + j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
3079
3080 if (j_util * max > j_max * util) {
3081 util = j_util;
3082 @@ -837,7 +826,9 @@ static int sugov_start(struct cpufreq_policy *policy)
3083 memset(sg_cpu, 0, sizeof(*sg_cpu));
3084 sg_cpu->cpu = cpu;
3085 sg_cpu->sg_policy = sg_policy;
3086 - sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
3087 + sg_cpu->min =
3088 + (SCHED_CAPACITY_SCALE * policy->cpuinfo.min_freq) /
3089 + policy->cpuinfo.max_freq;
3090 }
3091
3092 for_each_cpu(cpu, policy->cpus) {
3093 diff --git a/lib/div64.c b/lib/div64.c
3094 index 01c8602bb6ff..ee146bb4c558 100644
3095 --- a/lib/div64.c
3096 +++ b/lib/div64.c
3097 @@ -109,7 +109,7 @@ u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
3098 quot = div_u64_rem(dividend, divisor, &rem32);
3099 *remainder = rem32;
3100 } else {
3101 - int n = 1 + fls(high);
3102 + int n = fls(high);
3103 quot = div_u64(dividend >> n, divisor >> n);
3104
3105 if (quot != 0)
3106 @@ -147,7 +147,7 @@ u64 div64_u64(u64 dividend, u64 divisor)
3107 if (high == 0) {
3108 quot = div_u64(dividend, divisor);
3109 } else {
3110 - int n = 1 + fls(high);
3111 + int n = fls(high);
3112 quot = div_u64(dividend >> n, divisor >> n);
3113
3114 if (quot != 0)
3115 diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
3116 index 8006295f8bd7..dda73991bb54 100644
3117 --- a/net/appletalk/atalk_proc.c
3118 +++ b/net/appletalk/atalk_proc.c
3119 @@ -255,7 +255,7 @@ out_interface:
3120 goto out;
3121 }
3122
3123 -void __exit atalk_proc_exit(void)
3124 +void atalk_proc_exit(void)
3125 {
3126 remove_proc_entry("interface", atalk_proc_dir);
3127 remove_proc_entry("route", atalk_proc_dir);
3128 diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
3129 index 9b6bc5abe946..795fbc6c06aa 100644
3130 --- a/net/appletalk/ddp.c
3131 +++ b/net/appletalk/ddp.c
3132 @@ -1910,12 +1910,16 @@ static const char atalk_err_snap[] __initconst =
3133 /* Called by proto.c on kernel start up */
3134 static int __init atalk_init(void)
3135 {
3136 - int rc = proto_register(&ddp_proto, 0);
3137 + int rc;
3138
3139 - if (rc != 0)
3140 + rc = proto_register(&ddp_proto, 0);
3141 + if (rc)
3142 goto out;
3143
3144 - (void)sock_register(&atalk_family_ops);
3145 + rc = sock_register(&atalk_family_ops);
3146 + if (rc)
3147 + goto out_proto;
3148 +
3149 ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
3150 if (!ddp_dl)
3151 printk(atalk_err_snap);
3152 @@ -1923,12 +1927,33 @@ static int __init atalk_init(void)
3153 dev_add_pack(&ltalk_packet_type);
3154 dev_add_pack(&ppptalk_packet_type);
3155
3156 - register_netdevice_notifier(&ddp_notifier);
3157 + rc = register_netdevice_notifier(&ddp_notifier);
3158 + if (rc)
3159 + goto out_sock;
3160 +
3161 aarp_proto_init();
3162 - atalk_proc_init();
3163 - atalk_register_sysctl();
3164 + rc = atalk_proc_init();
3165 + if (rc)
3166 + goto out_aarp;
3167 +
3168 + rc = atalk_register_sysctl();
3169 + if (rc)
3170 + goto out_proc;
3171 out:
3172 return rc;
3173 +out_proc:
3174 + atalk_proc_exit();
3175 +out_aarp:
3176 + aarp_cleanup_module();
3177 + unregister_netdevice_notifier(&ddp_notifier);
3178 +out_sock:
3179 + dev_remove_pack(&ppptalk_packet_type);
3180 + dev_remove_pack(&ltalk_packet_type);
3181 + unregister_snap_client(ddp_dl);
3182 + sock_unregister(PF_APPLETALK);
3183 +out_proto:
3184 + proto_unregister(&ddp_proto);
3185 + goto out;
3186 }
3187 module_init(atalk_init);
3188
3189 diff --git a/net/appletalk/sysctl_net_atalk.c b/net/appletalk/sysctl_net_atalk.c
3190 index c744a853fa5f..d945b7c0176d 100644
3191 --- a/net/appletalk/sysctl_net_atalk.c
3192 +++ b/net/appletalk/sysctl_net_atalk.c
3193 @@ -45,9 +45,12 @@ static struct ctl_table atalk_table[] = {
3194
3195 static struct ctl_table_header *atalk_table_header;
3196
3197 -void atalk_register_sysctl(void)
3198 +int __init atalk_register_sysctl(void)
3199 {
3200 atalk_table_header = register_net_sysctl(&init_net, "net/appletalk", atalk_table);
3201 + if (!atalk_table_header)
3202 + return -ENOMEM;
3203 + return 0;
3204 }
3205
3206 void atalk_unregister_sysctl(void)
3207 diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
3208 index 5cf6d9f4761d..83797b3949e2 100644
3209 --- a/net/rxrpc/conn_client.c
3210 +++ b/net/rxrpc/conn_client.c
3211 @@ -704,6 +704,7 @@ int rxrpc_connect_call(struct rxrpc_sock *rx,
3212
3213 ret = rxrpc_wait_for_channel(call, gfp);
3214 if (ret < 0) {
3215 + trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
3216 rxrpc_disconnect_client_call(call);
3217 goto out;
3218 }
3219 @@ -774,16 +775,22 @@ static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
3220 */
3221 void rxrpc_disconnect_client_call(struct rxrpc_call *call)
3222 {
3223 - unsigned int channel = call->cid & RXRPC_CHANNELMASK;
3224 struct rxrpc_connection *conn = call->conn;
3225 - struct rxrpc_channel *chan = &conn->channels[channel];
3226 + struct rxrpc_channel *chan = NULL;
3227 struct rxrpc_net *rxnet = conn->params.local->rxnet;
3228 + unsigned int channel = -1;
3229 + u32 cid;
3230
3231 + spin_lock(&conn->channel_lock);
3232 +
3233 + cid = call->cid;
3234 + if (cid) {
3235 + channel = cid & RXRPC_CHANNELMASK;
3236 + chan = &conn->channels[channel];
3237 + }
3238 trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
3239 call->conn = NULL;
3240
3241 - spin_lock(&conn->channel_lock);
3242 -
3243 /* Calls that have never actually been assigned a channel can simply be
3244 * discarded. If the conn didn't get used either, it will follow
3245 * immediately unless someone else grabs it in the meantime.
3246 @@ -807,7 +814,10 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
3247 goto out;
3248 }
3249
3250 - ASSERTCMP(rcu_access_pointer(chan->call), ==, call);
3251 + if (rcu_access_pointer(chan->call) != call) {
3252 + spin_unlock(&conn->channel_lock);
3253 + BUG();
3254 + }
3255
3256 /* If a client call was exposed to the world, we save the result for
3257 * retransmission.
3258 diff --git a/sound/drivers/opl3/opl3_voice.h b/sound/drivers/opl3/opl3_voice.h
3259 index 5b02bd49fde4..4e4ecc21760b 100644
3260 --- a/sound/drivers/opl3/opl3_voice.h
3261 +++ b/sound/drivers/opl3/opl3_voice.h
3262 @@ -41,7 +41,7 @@ void snd_opl3_timer_func(struct timer_list *t);
3263
3264 /* Prototypes for opl3_drums.c */
3265 void snd_opl3_load_drums(struct snd_opl3 *opl3);
3266 -void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int on_off, int vel, struct snd_midi_channel *chan);
3267 +void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int vel, int on_off, struct snd_midi_channel *chan);
3268
3269 /* Prototypes for opl3_oss.c */
3270 #if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
3271 diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c
3272 index d77dcba276b5..1eb8b61a185b 100644
3273 --- a/sound/isa/sb/sb8.c
3274 +++ b/sound/isa/sb/sb8.c
3275 @@ -111,6 +111,10 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
3276
3277 /* block the 0x388 port to avoid PnP conflicts */
3278 acard->fm_res = request_region(0x388, 4, "SoundBlaster FM");
3279 + if (!acard->fm_res) {
3280 + err = -EBUSY;
3281 + goto _err;
3282 + }
3283
3284 if (port[dev] != SNDRV_AUTO_PORT) {
3285 if ((err = snd_sbdsp_create(card, port[dev], irq[dev],
3286 diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
3287 index 907cf1a46712..3ef2b27ebbe8 100644
3288 --- a/sound/pci/echoaudio/echoaudio.c
3289 +++ b/sound/pci/echoaudio/echoaudio.c
3290 @@ -1954,6 +1954,11 @@ static int snd_echo_create(struct snd_card *card,
3291 }
3292 chip->dsp_registers = (volatile u32 __iomem *)
3293 ioremap_nocache(chip->dsp_registers_phys, sz);
3294 + if (!chip->dsp_registers) {
3295 + dev_err(chip->card->dev, "ioremap failed\n");
3296 + snd_echo_free(chip);
3297 + return -ENOMEM;
3298 + }
3299
3300 if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED,
3301 KBUILD_MODNAME, chip)) {
3302 diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
3303 index 169e347c76f6..9ba1a2e1ed7a 100644
3304 --- a/tools/lib/bpf/libbpf.c
3305 +++ b/tools/lib/bpf/libbpf.c
3306 @@ -627,7 +627,7 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
3307 bool strict = !(flags & MAPS_RELAX_COMPAT);
3308 int i, map_idx, map_def_sz, nr_maps = 0;
3309 Elf_Scn *scn;
3310 - Elf_Data *data;
3311 + Elf_Data *data = NULL;
3312 Elf_Data *symbols = obj->efile.symbols;
3313
3314 if (obj->efile.maps_shndx < 0)
3315 diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
3316 index 4ac7775fbc11..4851285ba00c 100644
3317 --- a/tools/perf/Documentation/perf-config.txt
3318 +++ b/tools/perf/Documentation/perf-config.txt
3319 @@ -114,7 +114,7 @@ Given a $HOME/.perfconfig like this:
3320
3321 [report]
3322 # Defaults
3323 - sort-order = comm,dso,symbol
3324 + sort_order = comm,dso,symbol
3325 percent-limit = 0
3326 queue-size = 0
3327 children = true
3328 diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
3329 index 4bc2085e5197..39c05f89104e 100644
3330 --- a/tools/perf/Documentation/perf-stat.txt
3331 +++ b/tools/perf/Documentation/perf-stat.txt
3332 @@ -72,9 +72,8 @@ report::
3333 --all-cpus::
3334 system-wide collection from all CPUs (default if no target is specified)
3335
3336 --c::
3337 ---scale::
3338 - scale/normalize counter values
3339 +--no-scale::
3340 + Don't scale/normalize counter values
3341
3342 -d::
3343 --detailed::
3344 diff --git a/tools/perf/bench/epoll-ctl.c b/tools/perf/bench/epoll-ctl.c
3345 index 0c0a6e824934..2af067859966 100644
3346 --- a/tools/perf/bench/epoll-ctl.c
3347 +++ b/tools/perf/bench/epoll-ctl.c
3348 @@ -224,7 +224,7 @@ static int do_threads(struct worker *worker, struct cpu_map *cpu)
3349 pthread_attr_t thread_attr, *attrp = NULL;
3350 cpu_set_t cpuset;
3351 unsigned int i, j;
3352 - int ret;
3353 + int ret = 0;
3354
3355 if (!noaffinity)
3356 pthread_attr_init(&thread_attr);
3357 diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c
3358 index 5a11534e96a0..fe85448abd45 100644
3359 --- a/tools/perf/bench/epoll-wait.c
3360 +++ b/tools/perf/bench/epoll-wait.c
3361 @@ -293,7 +293,7 @@ static int do_threads(struct worker *worker, struct cpu_map *cpu)
3362 pthread_attr_t thread_attr, *attrp = NULL;
3363 cpu_set_t cpuset;
3364 unsigned int i, j;
3365 - int ret, events = EPOLLIN;
3366 + int ret = 0, events = EPOLLIN;
3367
3368 if (oneshot)
3369 events |= EPOLLONESHOT;
3370 diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
3371 index 63a3afc7f32b..a52295dbad2b 100644
3372 --- a/tools/perf/builtin-stat.c
3373 +++ b/tools/perf/builtin-stat.c
3374 @@ -728,7 +728,8 @@ static struct option stat_options[] = {
3375 "system-wide collection from all CPUs"),
3376 OPT_BOOLEAN('g', "group", &group,
3377 "put the counters into a counter group"),
3378 - OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"),
3379 + OPT_BOOLEAN(0, "scale", &stat_config.scale,
3380 + "Use --no-scale to disable counter scaling for multiplexing"),
3381 OPT_INCR('v', "verbose", &verbose,
3382 "be more verbose (show counter open errors, etc)"),
3383 OPT_INTEGER('r', "repeat", &stat_config.run_count,
3384 diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
3385 index f64e312db787..616408251e25 100644
3386 --- a/tools/perf/builtin-top.c
3387 +++ b/tools/perf/builtin-top.c
3388 @@ -1633,8 +1633,9 @@ int cmd_top(int argc, const char **argv)
3389 annotation_config__init();
3390
3391 symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
3392 - if (symbol__init(NULL) < 0)
3393 - return -1;
3394 + status = symbol__init(NULL);
3395 + if (status < 0)
3396 + goto out_delete_evlist;
3397
3398 sort__setup_elide(stdout);
3399
3400 diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c
3401 index 6d598cc071ae..1a9c3becf5ff 100644
3402 --- a/tools/perf/tests/backward-ring-buffer.c
3403 +++ b/tools/perf/tests/backward-ring-buffer.c
3404 @@ -18,7 +18,7 @@ static void testcase(void)
3405 int i;
3406
3407 for (i = 0; i < NR_ITERS; i++) {
3408 - char proc_name[10];
3409 + char proc_name[15];
3410
3411 snprintf(proc_name, sizeof(proc_name), "p:%d\n", i);
3412 prctl(PR_SET_NAME, proc_name);
3413 diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
3414 index ea7acf403727..71f60c0f9faa 100644
3415 --- a/tools/perf/tests/evsel-tp-sched.c
3416 +++ b/tools/perf/tests/evsel-tp-sched.c
3417 @@ -85,5 +85,6 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
3418 if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
3419 ret = -1;
3420
3421 + perf_evsel__delete(evsel);
3422 return ret;
3423 }
3424 diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
3425 index 01f0706995a9..9acc1e80b936 100644
3426 --- a/tools/perf/tests/expr.c
3427 +++ b/tools/perf/tests/expr.c
3428 @@ -19,7 +19,7 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
3429 const char *p;
3430 const char **other;
3431 double val;
3432 - int ret;
3433 + int i, ret;
3434 struct parse_ctx ctx;
3435 int num_other;
3436
3437 @@ -56,6 +56,9 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
3438 TEST_ASSERT_VAL("find other", !strcmp(other[1], "BAZ"));
3439 TEST_ASSERT_VAL("find other", !strcmp(other[2], "BOZO"));
3440 TEST_ASSERT_VAL("find other", other[3] == NULL);
3441 +
3442 + for (i = 0; i < num_other; i++)
3443 + free((void *)other[i]);
3444 free((void *)other);
3445
3446 return 0;
3447 diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
3448 index c531e6deb104..493ecb611540 100644
3449 --- a/tools/perf/tests/openat-syscall-all-cpus.c
3450 +++ b/tools/perf/tests/openat-syscall-all-cpus.c
3451 @@ -45,7 +45,7 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
3452 if (IS_ERR(evsel)) {
3453 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
3454 pr_debug("%s\n", errbuf);
3455 - goto out_thread_map_delete;
3456 + goto out_cpu_map_delete;
3457 }
3458
3459 if (perf_evsel__open(evsel, cpus, threads) < 0) {
3460 @@ -119,6 +119,8 @@ out_close_fd:
3461 perf_evsel__close_fd(evsel);
3462 out_evsel_delete:
3463 perf_evsel__delete(evsel);
3464 +out_cpu_map_delete:
3465 + cpu_map__put(cpus);
3466 out_thread_map_delete:
3467 thread_map__put(threads);
3468 return err;
3469 diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
3470 index 04b1d53e4bf9..1d352621bd48 100644
3471 --- a/tools/perf/util/build-id.c
3472 +++ b/tools/perf/util/build-id.c
3473 @@ -183,6 +183,7 @@ char *build_id_cache__linkname(const char *sbuild_id, char *bf, size_t size)
3474 return bf;
3475 }
3476
3477 +/* The caller is responsible to free the returned buffer. */
3478 char *build_id_cache__origname(const char *sbuild_id)
3479 {
3480 char *linkname;
3481 diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
3482 index 1ea8f898f1a1..9ecdbd5986b3 100644
3483 --- a/tools/perf/util/config.c
3484 +++ b/tools/perf/util/config.c
3485 @@ -632,11 +632,10 @@ static int collect_config(const char *var, const char *value,
3486 }
3487
3488 ret = set_value(item, value);
3489 - return ret;
3490
3491 out_free:
3492 free(key);
3493 - return -1;
3494 + return ret;
3495 }
3496
3497 int perf_config_set__collect(struct perf_config_set *set, const char *file_name,
3498 diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
3499 index dbc0466db368..50c933044f88 100644
3500 --- a/tools/perf/util/evsel.c
3501 +++ b/tools/perf/util/evsel.c
3502 @@ -1289,6 +1289,7 @@ void perf_evsel__exit(struct perf_evsel *evsel)
3503 {
3504 assert(list_empty(&evsel->node));
3505 assert(evsel->evlist == NULL);
3506 + perf_evsel__free_counts(evsel);
3507 perf_evsel__free_fd(evsel);
3508 perf_evsel__free_id(evsel);
3509 perf_evsel__free_config_terms(evsel);
3510 @@ -1341,8 +1342,7 @@ void perf_counts_values__scale(struct perf_counts_values *count,
3511 scaled = 1;
3512 count->val = (u64)((double) count->val * count->ena / count->run + 0.5);
3513 }
3514 - } else
3515 - count->ena = count->run = 0;
3516 + }
3517
3518 if (pscaled)
3519 *pscaled = scaled;
3520 diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
3521 index 8aad8330e392..e416e76f5600 100644
3522 --- a/tools/perf/util/hist.c
3523 +++ b/tools/perf/util/hist.c
3524 @@ -1048,8 +1048,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
3525
3526 err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
3527 iter->evsel, al, max_stack_depth);
3528 - if (err)
3529 + if (err) {
3530 + map__put(alm);
3531 return err;
3532 + }
3533
3534 err = iter->ops->prepare_entry(iter, al);
3535 if (err)
3536 diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
3537 index 6751301a755c..2b37f56f0549 100644
3538 --- a/tools/perf/util/map.c
3539 +++ b/tools/perf/util/map.c
3540 @@ -571,10 +571,25 @@ static void __maps__purge(struct maps *maps)
3541 }
3542 }
3543
3544 +static void __maps__purge_names(struct maps *maps)
3545 +{
3546 + struct rb_root *root = &maps->names;
3547 + struct rb_node *next = rb_first(root);
3548 +
3549 + while (next) {
3550 + struct map *pos = rb_entry(next, struct map, rb_node_name);
3551 +
3552 + next = rb_next(&pos->rb_node_name);
3553 + rb_erase_init(&pos->rb_node_name, root);
3554 + map__put(pos);
3555 + }
3556 +}
3557 +
3558 static void maps__exit(struct maps *maps)
3559 {
3560 down_write(&maps->lock);
3561 __maps__purge(maps);
3562 + __maps__purge_names(maps);
3563 up_write(&maps->lock);
3564 }
3565
3566 @@ -911,6 +926,9 @@ static void __maps__remove(struct maps *maps, struct map *map)
3567 {
3568 rb_erase_init(&map->rb_node, &maps->entries);
3569 map__put(map);
3570 +
3571 + rb_erase_init(&map->rb_node_name, &maps->names);
3572 + map__put(map);
3573 }
3574
3575 void maps__remove(struct maps *maps, struct map *map)
3576 diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
3577 index ea523d3b248f..989fed6f43b5 100644
3578 --- a/tools/perf/util/ordered-events.c
3579 +++ b/tools/perf/util/ordered-events.c
3580 @@ -270,6 +270,8 @@ static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
3581 "FINAL",
3582 "ROUND",
3583 "HALF ",
3584 + "TOP ",
3585 + "TIME ",
3586 };
3587 int err;
3588 bool show_progress = false;
3589 diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
3590 index 920e1e6551dd..03860313313c 100644
3591 --- a/tools/perf/util/parse-events.c
3592 +++ b/tools/perf/util/parse-events.c
3593 @@ -2271,6 +2271,7 @@ static bool is_event_supported(u8 type, unsigned config)
3594 perf_evsel__delete(evsel);
3595 }
3596
3597 + thread_map__put(tmap);
3598 return ret;
3599 }
3600
3601 @@ -2341,6 +2342,7 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob,
3602 printf(" %-50s [%s]\n", buf, "SDT event");
3603 free(buf);
3604 }
3605 + free(path);
3606 } else
3607 printf(" %-50s [%s]\n", nd->s, "SDT event");
3608 if (nd2) {
3609 diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
3610 index 4d40515307b8..2856cc9d5a31 100644
3611 --- a/tools/perf/util/stat.c
3612 +++ b/tools/perf/util/stat.c
3613 @@ -291,10 +291,8 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
3614 break;
3615 case AGGR_GLOBAL:
3616 aggr->val += count->val;
3617 - if (config->scale) {
3618 - aggr->ena += count->ena;
3619 - aggr->run += count->run;
3620 - }
3621 + aggr->ena += count->ena;
3622 + aggr->run += count->run;
3623 case AGGR_UNSET:
3624 default:
3625 break;
3626 @@ -442,10 +440,8 @@ int create_perf_stat_counter(struct perf_evsel *evsel,
3627 struct perf_event_attr *attr = &evsel->attr;
3628 struct perf_evsel *leader = evsel->leader;
3629
3630 - if (config->scale) {
3631 - attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
3632 - PERF_FORMAT_TOTAL_TIME_RUNNING;
3633 - }
3634 + attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
3635 + PERF_FORMAT_TOTAL_TIME_RUNNING;
3636
3637 /*
3638 * The event is part of non trivial group, let's enable
3639 diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
3640 index 9327c0ddc3a5..c3fad065c89c 100644
3641 --- a/tools/power/x86/turbostat/turbostat.c
3642 +++ b/tools/power/x86/turbostat/turbostat.c
3643 @@ -5077,6 +5077,9 @@ int fork_it(char **argv)
3644 signal(SIGQUIT, SIG_IGN);
3645 if (waitpid(child_pid, &status, 0) == -1)
3646 err(status, "waitpid");
3647 +
3648 + if (WIFEXITED(status))
3649 + status = WEXITSTATUS(status);
3650 }
3651 /*
3652 * n.b. fork_it() does not check for errors from for_all_cpus()