Magellan Linux

Contents of /trunk/kernel-alx/patches-4.19/0177-4.19.78-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3474 - (show annotations) (download)
Tue Oct 29 10:31:33 2019 UTC (4 years, 6 months ago) by niro
File size: 132141 byte(s)
-linux-4.19.78
1 diff --git a/Makefile b/Makefile
2 index aeabc6459acc..440c5b5c4f4b 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 19
9 -SUBLEVEL = 77
10 +SUBLEVEL = 78
11 EXTRAVERSION =
12 NAME = "People's Front"
13
14 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
15 index 51794c7fa6d5..185e552f1461 100644
16 --- a/arch/arm/Kconfig
17 +++ b/arch/arm/Kconfig
18 @@ -1586,8 +1586,9 @@ config ARM_PATCH_IDIV
19 code to do integer division.
20
21 config AEABI
22 - bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && !CPU_V7M && !CPU_V6 && !CPU_V6K
23 - default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K
24 + bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && \
25 + !CPU_V7M && !CPU_V6 && !CPU_V6K && !CC_IS_CLANG
26 + default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K || CC_IS_CLANG
27 help
28 This option allows for the kernel to be compiled using the latest
29 ARM ABI (aka EABI). This is only useful if you are using a user
30 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
31 index 3232afb6fdc0..a9ee0d9dc740 100644
32 --- a/arch/arm/mm/fault.c
33 +++ b/arch/arm/mm/fault.c
34 @@ -216,7 +216,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
35 {
36 unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
37
38 - if (fsr & FSR_WRITE)
39 + if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
40 mask = VM_WRITE;
41 if (fsr & FSR_LNX_PF)
42 mask = VM_EXEC;
43 @@ -287,7 +287,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
44
45 if (user_mode(regs))
46 flags |= FAULT_FLAG_USER;
47 - if (fsr & FSR_WRITE)
48 + if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
49 flags |= FAULT_FLAG_WRITE;
50
51 /*
52 diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
53 index c063708fa503..9ecc2097a87a 100644
54 --- a/arch/arm/mm/fault.h
55 +++ b/arch/arm/mm/fault.h
56 @@ -6,6 +6,7 @@
57 * Fault status register encodings. We steal bit 31 for our own purposes.
58 */
59 #define FSR_LNX_PF (1 << 31)
60 +#define FSR_CM (1 << 13)
61 #define FSR_WRITE (1 << 11)
62 #define FSR_FS4 (1 << 10)
63 #define FSR_FS3_0 (15)
64 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
65 index f866870db749..0b94b674aa91 100644
66 --- a/arch/arm/mm/mmap.c
67 +++ b/arch/arm/mm/mmap.c
68 @@ -18,8 +18,9 @@
69 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
70
71 /* gap between mmap and stack */
72 -#define MIN_GAP (128*1024*1024UL)
73 -#define MAX_GAP ((TASK_SIZE)/6*5)
74 +#define MIN_GAP (128*1024*1024UL)
75 +#define MAX_GAP ((STACK_TOP)/6*5)
76 +#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
77
78 static int mmap_is_legacy(struct rlimit *rlim_stack)
79 {
80 @@ -35,13 +36,22 @@ static int mmap_is_legacy(struct rlimit *rlim_stack)
81 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
82 {
83 unsigned long gap = rlim_stack->rlim_cur;
84 + unsigned long pad = stack_guard_gap;
85 +
86 + /* Account for stack randomization if necessary */
87 + if (current->flags & PF_RANDOMIZE)
88 + pad += (STACK_RND_MASK << PAGE_SHIFT);
89 +
90 + /* Values close to RLIM_INFINITY can overflow. */
91 + if (gap + pad > gap)
92 + gap += pad;
93
94 if (gap < MIN_GAP)
95 gap = MIN_GAP;
96 else if (gap > MAX_GAP)
97 gap = MAX_GAP;
98
99 - return PAGE_ALIGN(TASK_SIZE - gap - rnd);
100 + return PAGE_ALIGN(STACK_TOP - gap - rnd);
101 }
102
103 /*
104 diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
105 index e46a6a446cdd..70e560cf8ca0 100644
106 --- a/arch/arm/mm/mmu.c
107 +++ b/arch/arm/mm/mmu.c
108 @@ -1175,6 +1175,22 @@ void __init adjust_lowmem_bounds(void)
109 */
110 vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
111
112 + /*
113 + * The first usable region must be PMD aligned. Mark its start
114 + * as MEMBLOCK_NOMAP if it isn't
115 + */
116 + for_each_memblock(memory, reg) {
117 + if (!memblock_is_nomap(reg)) {
118 + if (!IS_ALIGNED(reg->base, PMD_SIZE)) {
119 + phys_addr_t len;
120 +
121 + len = round_up(reg->base, PMD_SIZE) - reg->base;
122 + memblock_mark_nomap(reg->base, len);
123 + }
124 + break;
125 + }
126 + }
127 +
128 for_each_memblock(memory, reg) {
129 phys_addr_t block_start = reg->base;
130 phys_addr_t block_end = reg->base + reg->size;
131 diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
132 index 3b0938281541..d8b01c7c9cd3 100644
133 --- a/arch/arm64/include/asm/cmpxchg.h
134 +++ b/arch/arm64/include/asm/cmpxchg.h
135 @@ -74,7 +74,7 @@ __XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory")
136 #undef __XCHG_CASE
137
138 #define __XCHG_GEN(sfx) \
139 -static inline unsigned long __xchg##sfx(unsigned long x, \
140 +static __always_inline unsigned long __xchg##sfx(unsigned long x, \
141 volatile void *ptr, \
142 int size) \
143 { \
144 @@ -116,7 +116,7 @@ __XCHG_GEN(_mb)
145 #define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
146
147 #define __CMPXCHG_GEN(sfx) \
148 -static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
149 +static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
150 unsigned long old, \
151 unsigned long new, \
152 int size) \
153 @@ -223,7 +223,7 @@ __CMPWAIT_CASE( , , 8);
154 #undef __CMPWAIT_CASE
155
156 #define __CMPWAIT_GEN(sfx) \
157 -static inline void __cmpwait##sfx(volatile void *ptr, \
158 +static __always_inline void __cmpwait##sfx(volatile void *ptr, \
159 unsigned long val, \
160 int size) \
161 { \
162 diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
163 index 842c8a5fcd53..157f2caa1351 100644
164 --- a/arch/arm64/mm/mmap.c
165 +++ b/arch/arm64/mm/mmap.c
166 @@ -65,7 +65,11 @@ unsigned long arch_mmap_rnd(void)
167 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
168 {
169 unsigned long gap = rlim_stack->rlim_cur;
170 - unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap;
171 + unsigned long pad = stack_guard_gap;
172 +
173 + /* Account for stack randomization if necessary */
174 + if (current->flags & PF_RANDOMIZE)
175 + pad += (STACK_RND_MASK << PAGE_SHIFT);
176
177 /* Values close to RLIM_INFINITY can overflow. */
178 if (gap + pad > gap)
179 diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
180 index 01df9ad62fb8..1bb9448777c5 100644
181 --- a/arch/mips/include/asm/mipsregs.h
182 +++ b/arch/mips/include/asm/mipsregs.h
183 @@ -688,6 +688,9 @@
184 #define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
185 #define MIPS_CONF7_AR (_ULCAST_(1) << 16)
186
187 +/* Ingenic Config7 bits */
188 +#define MIPS_CONF7_BTB_LOOP_EN (_ULCAST_(1) << 4)
189 +
190 /* Config7 Bits specific to MIPS Technologies. */
191
192 /* Performance counters implemented Per TC */
193 @@ -2774,6 +2777,7 @@ __BUILD_SET_C0(status)
194 __BUILD_SET_C0(cause)
195 __BUILD_SET_C0(config)
196 __BUILD_SET_C0(config5)
197 +__BUILD_SET_C0(config7)
198 __BUILD_SET_C0(intcontrol)
199 __BUILD_SET_C0(intctl)
200 __BUILD_SET_C0(srsmap)
201 diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
202 index d535fc706a8b..25cd8737e7fe 100644
203 --- a/arch/mips/kernel/cpu-probe.c
204 +++ b/arch/mips/kernel/cpu-probe.c
205 @@ -1879,6 +1879,13 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
206 c->cputype = CPU_JZRISC;
207 c->writecombine = _CACHE_UNCACHED_ACCELERATED;
208 __cpu_name[cpu] = "Ingenic JZRISC";
209 + /*
210 + * The XBurst core by default attempts to avoid branch target
211 + * buffer lookups by detecting & special casing loops. This
212 + * feature will cause BogoMIPS and lpj calculate in error.
213 + * Set cp0 config7 bit 4 to disable this feature.
214 + */
215 + set_c0_config7(MIPS_CONF7_BTB_LOOP_EN);
216 break;
217 default:
218 panic("Unknown Ingenic Processor ID!");
219 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
220 index 1b705fb2f10c..233033f99d8f 100644
221 --- a/arch/mips/mm/mmap.c
222 +++ b/arch/mips/mm/mmap.c
223 @@ -21,8 +21,9 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
224 EXPORT_SYMBOL(shm_align_mask);
225
226 /* gap between mmap and stack */
227 -#define MIN_GAP (128*1024*1024UL)
228 -#define MAX_GAP ((TASK_SIZE)/6*5)
229 +#define MIN_GAP (128*1024*1024UL)
230 +#define MAX_GAP ((TASK_SIZE)/6*5)
231 +#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
232
233 static int mmap_is_legacy(struct rlimit *rlim_stack)
234 {
235 @@ -38,6 +39,15 @@ static int mmap_is_legacy(struct rlimit *rlim_stack)
236 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
237 {
238 unsigned long gap = rlim_stack->rlim_cur;
239 + unsigned long pad = stack_guard_gap;
240 +
241 + /* Account for stack randomization if necessary */
242 + if (current->flags & PF_RANDOMIZE)
243 + pad += (STACK_RND_MASK << PAGE_SHIFT);
244 +
245 + /* Values close to RLIM_INFINITY can overflow. */
246 + if (gap + pad > gap)
247 + gap += pad;
248
249 if (gap < MIN_GAP)
250 gap = MIN_GAP;
251 diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
252 index 8c4fda52b91d..355f8eadb1cd 100644
253 --- a/arch/mips/mm/tlbex.c
254 +++ b/arch/mips/mm/tlbex.c
255 @@ -630,7 +630,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
256 return;
257 }
258
259 - if (cpu_has_rixi && _PAGE_NO_EXEC) {
260 + if (cpu_has_rixi && !!_PAGE_NO_EXEC) {
261 if (fill_includes_sw_bits) {
262 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
263 } else {
264 diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
265 index 94542776a62d..2a7b01f97a56 100644
266 --- a/arch/powerpc/include/asm/futex.h
267 +++ b/arch/powerpc/include/asm/futex.h
268 @@ -59,8 +59,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
269
270 pagefault_enable();
271
272 - if (!ret)
273 - *oval = oldval;
274 + *oval = oldval;
275
276 return ret;
277 }
278 diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
279 index 67619b4b3f96..110eba400de7 100644
280 --- a/arch/powerpc/kernel/eeh_driver.c
281 +++ b/arch/powerpc/kernel/eeh_driver.c
282 @@ -811,6 +811,10 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
283 pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n",
284 pe->freeze_count, eeh_max_freezes);
285
286 + eeh_for_each_pe(pe, tmp_pe)
287 + eeh_pe_for_each_dev(tmp_pe, edev, tmp)
288 + edev->mode &= ~EEH_DEV_NO_HANDLER;
289 +
290 /* Walk the various device drivers attached to this slot through
291 * a reset sequence, giving each an opportunity to do what it needs
292 * to accomplish the reset. Each child gets a report of the
293 @@ -1004,7 +1008,8 @@ final:
294 */
295 void eeh_handle_special_event(void)
296 {
297 - struct eeh_pe *pe, *phb_pe;
298 + struct eeh_pe *pe, *phb_pe, *tmp_pe;
299 + struct eeh_dev *edev, *tmp_edev;
300 struct pci_bus *bus;
301 struct pci_controller *hose;
302 unsigned long flags;
303 @@ -1075,6 +1080,10 @@ void eeh_handle_special_event(void)
304 (phb_pe->state & EEH_PE_RECOVERING))
305 continue;
306
307 + eeh_for_each_pe(pe, tmp_pe)
308 + eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
309 + edev->mode &= ~EEH_DEV_NO_HANDLER;
310 +
311 /* Notify all devices to be down */
312 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
313 eeh_set_channel_state(pe, pci_channel_io_perm_failure);
314 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
315 index 06cc77813dbb..90af86f143a9 100644
316 --- a/arch/powerpc/kernel/exceptions-64s.S
317 +++ b/arch/powerpc/kernel/exceptions-64s.S
318 @@ -520,6 +520,10 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
319 RFI_TO_USER_OR_KERNEL
320 9:
321 /* Deliver the machine check to host kernel in V mode. */
322 +BEGIN_FTR_SECTION
323 + ld r10,ORIG_GPR3(r1)
324 + mtspr SPRN_CFAR,r10
325 +END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
326 MACHINE_CHECK_HANDLER_WINDUP
327 b machine_check_pSeries
328
329 diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
330 index 8afd146bc9c7..9e41a9de4323 100644
331 --- a/arch/powerpc/kernel/rtas.c
332 +++ b/arch/powerpc/kernel/rtas.c
333 @@ -875,15 +875,17 @@ static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
334 return 0;
335
336 for_each_cpu(cpu, cpus) {
337 + struct device *dev = get_cpu_device(cpu);
338 +
339 switch (state) {
340 case DOWN:
341 - cpuret = cpu_down(cpu);
342 + cpuret = device_offline(dev);
343 break;
344 case UP:
345 - cpuret = cpu_up(cpu);
346 + cpuret = device_online(dev);
347 break;
348 }
349 - if (cpuret) {
350 + if (cpuret < 0) {
351 pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
352 __func__,
353 ((state == UP) ? "up" : "down"),
354 @@ -972,6 +974,8 @@ int rtas_ibm_suspend_me(u64 handle)
355 data.token = rtas_token("ibm,suspend-me");
356 data.complete = &done;
357
358 + lock_device_hotplug();
359 +
360 /* All present CPUs must be online */
361 cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
362 cpuret = rtas_online_cpus_mask(offline_mask);
363 @@ -1003,6 +1007,7 @@ int rtas_ibm_suspend_me(u64 handle)
364 __func__);
365
366 out:
367 + unlock_device_hotplug();
368 free_cpumask_var(offline_mask);
369 return atomic_read(&data.error);
370 }
371 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
372 index 02fe6d020174..d5f351f02c15 100644
373 --- a/arch/powerpc/kernel/traps.c
374 +++ b/arch/powerpc/kernel/traps.c
375 @@ -399,6 +399,7 @@ void system_reset_exception(struct pt_regs *regs)
376 if (debugger(regs))
377 goto out;
378
379 + kmsg_dump(KMSG_DUMP_OOPS);
380 /*
381 * A system reset is a request to dump, so we always send
382 * it through the crashdump code (if fadump or kdump are
383 diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
384 index f5adb6b756f7..29e66d6e5763 100644
385 --- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
386 +++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
387 @@ -36,7 +36,8 @@ static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift)
388 struct page *tce_mem = NULL;
389 __be64 *addr;
390
391 - tce_mem = alloc_pages_node(nid, GFP_KERNEL, shift - PAGE_SHIFT);
392 + tce_mem = alloc_pages_node(nid, GFP_ATOMIC | __GFP_NOWARN,
393 + shift - PAGE_SHIFT);
394 if (!tce_mem) {
395 pr_err("Failed to allocate a TCE memory, level shift=%d\n",
396 shift);
397 @@ -161,6 +162,9 @@ void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
398
399 if (ptce)
400 *ptce = cpu_to_be64(0);
401 + else
402 + /* Skip the rest of the level */
403 + i |= tbl->it_level_size - 1;
404 }
405 }
406
407 @@ -260,7 +264,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
408 unsigned int table_shift = max_t(unsigned int, entries_shift + 3,
409 PAGE_SHIFT);
410 const unsigned long tce_table_size = 1UL << table_shift;
411 - unsigned int tmplevels = levels;
412
413 if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
414 return -EINVAL;
415 @@ -268,9 +271,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
416 if (!is_power_of_2(window_size))
417 return -EINVAL;
418
419 - if (alloc_userspace_copy && (window_size > (1ULL << 32)))
420 - tmplevels = 1;
421 -
422 /* Adjust direct table size from window_size and levels */
423 entries_shift = (entries_shift + levels - 1) / levels;
424 level_shift = entries_shift + 3;
425 @@ -281,7 +281,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
426
427 /* Allocate TCE table */
428 addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
429 - tmplevels, tce_table_size, &offset, &total_allocated);
430 + 1, tce_table_size, &offset, &total_allocated);
431
432 /* addr==NULL means that the first level allocation failed */
433 if (!addr)
434 @@ -292,18 +292,18 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
435 * we did not allocate as much as we wanted,
436 * release partially allocated table.
437 */
438 - if (tmplevels == levels && offset < tce_table_size)
439 + if (levels == 1 && offset < tce_table_size)
440 goto free_tces_exit;
441
442 /* Allocate userspace view of the TCE table */
443 if (alloc_userspace_copy) {
444 offset = 0;
445 uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
446 - tmplevels, tce_table_size, &offset,
447 + 1, tce_table_size, &offset,
448 &total_allocated_uas);
449 if (!uas)
450 goto free_tces_exit;
451 - if (tmplevels == levels && (offset < tce_table_size ||
452 + if (levels == 1 && (offset < tce_table_size ||
453 total_allocated_uas != total_allocated))
454 goto free_uas_exit;
455 }
456 @@ -318,7 +318,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
457
458 pr_debug("Created TCE table: ws=%08llx ts=%lx @%08llx base=%lx uas=%p levels=%d/%d\n",
459 window_size, tce_table_size, bus_offset, tbl->it_base,
460 - tbl->it_userspace, tmplevels, levels);
461 + tbl->it_userspace, 1, levels);
462
463 return 0;
464
465 diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
466 index 8b37b28e3831..e302aa092d4f 100644
467 --- a/arch/powerpc/platforms/powernv/pci.h
468 +++ b/arch/powerpc/platforms/powernv/pci.h
469 @@ -243,7 +243,7 @@ extern void pnv_npu_release_ownership(struct pnv_ioda_pe *npe);
470 extern int pnv_npu2_init(struct pnv_phb *phb);
471
472 /* pci-ioda-tce.c */
473 -#define POWERNV_IOMMU_DEFAULT_LEVELS 1
474 +#define POWERNV_IOMMU_DEFAULT_LEVELS 2
475 #define POWERNV_IOMMU_MAX_LEVELS 5
476
477 extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
478 diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
479 index 7b60fcf04dc4..e4ea71383383 100644
480 --- a/arch/powerpc/platforms/pseries/mobility.c
481 +++ b/arch/powerpc/platforms/pseries/mobility.c
482 @@ -12,6 +12,7 @@
483 #include <linux/cpu.h>
484 #include <linux/kernel.h>
485 #include <linux/kobject.h>
486 +#include <linux/sched.h>
487 #include <linux/smp.h>
488 #include <linux/stat.h>
489 #include <linux/completion.h>
490 @@ -209,7 +210,11 @@ static int update_dt_node(__be32 phandle, s32 scope)
491
492 prop_data += vd;
493 }
494 +
495 + cond_resched();
496 }
497 +
498 + cond_resched();
499 } while (rtas_rc == 1);
500
501 of_node_put(dn);
502 @@ -318,8 +323,12 @@ int pseries_devicetree_update(s32 scope)
503 add_dt_node(phandle, drc_index);
504 break;
505 }
506 +
507 + cond_resched();
508 }
509 }
510 +
511 + cond_resched();
512 } while (rc == 1);
513
514 kfree(rtas_buf);
515 diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
516 index ba1791fd3234..67f49159ea70 100644
517 --- a/arch/powerpc/platforms/pseries/setup.c
518 +++ b/arch/powerpc/platforms/pseries/setup.c
519 @@ -325,6 +325,9 @@ static void pseries_lpar_idle(void)
520 * low power mode by ceding processor to hypervisor
521 */
522
523 + if (!prep_irq_for_idle())
524 + return;
525 +
526 /* Indicate to hypervisor that we are idle. */
527 get_lppaca()->idle = 1;
528
529 diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
530 index 74cfc1be04d6..bb5db7bfd853 100644
531 --- a/arch/powerpc/xmon/xmon.c
532 +++ b/arch/powerpc/xmon/xmon.c
533 @@ -2497,13 +2497,16 @@ static void dump_pacas(void)
534 static void dump_one_xive(int cpu)
535 {
536 unsigned int hwid = get_hard_smp_processor_id(cpu);
537 + bool hv = cpu_has_feature(CPU_FTR_HVMODE);
538
539 - opal_xive_dump(XIVE_DUMP_TM_HYP, hwid);
540 - opal_xive_dump(XIVE_DUMP_TM_POOL, hwid);
541 - opal_xive_dump(XIVE_DUMP_TM_OS, hwid);
542 - opal_xive_dump(XIVE_DUMP_TM_USER, hwid);
543 - opal_xive_dump(XIVE_DUMP_VP, hwid);
544 - opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid);
545 + if (hv) {
546 + opal_xive_dump(XIVE_DUMP_TM_HYP, hwid);
547 + opal_xive_dump(XIVE_DUMP_TM_POOL, hwid);
548 + opal_xive_dump(XIVE_DUMP_TM_OS, hwid);
549 + opal_xive_dump(XIVE_DUMP_TM_USER, hwid);
550 + opal_xive_dump(XIVE_DUMP_VP, hwid);
551 + opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid);
552 + }
553
554 if (setjmp(bus_error_jmp) != 0) {
555 catch_memory_errors = 0;
556 diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
557 index c681329fdeec..e4d17d9ea93d 100644
558 --- a/arch/s390/hypfs/inode.c
559 +++ b/arch/s390/hypfs/inode.c
560 @@ -269,7 +269,7 @@ static int hypfs_show_options(struct seq_file *s, struct dentry *root)
561 static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
562 {
563 struct inode *root_inode;
564 - struct dentry *root_dentry;
565 + struct dentry *root_dentry, *update_file;
566 int rc = 0;
567 struct hypfs_sb_info *sbi;
568
569 @@ -300,9 +300,10 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
570 rc = hypfs_diag_create_files(root_dentry);
571 if (rc)
572 return rc;
573 - sbi->update_file = hypfs_create_update_file(root_dentry);
574 - if (IS_ERR(sbi->update_file))
575 - return PTR_ERR(sbi->update_file);
576 + update_file = hypfs_create_update_file(root_dentry);
577 + if (IS_ERR(update_file))
578 + return PTR_ERR(update_file);
579 + sbi->update_file = update_file;
580 hypfs_update_update(sb);
581 pr_info("Hypervisor filesystem mounted\n");
582 return 0;
583 diff --git a/block/mq-deadline.c b/block/mq-deadline.c
584 index d5e21ce44d2c..69094d641062 100644
585 --- a/block/mq-deadline.c
586 +++ b/block/mq-deadline.c
587 @@ -376,13 +376,6 @@ done:
588 * hardware queue, but we may return a request that is for a
589 * different hardware queue. This is because mq-deadline has shared
590 * state for all hardware queues, in terms of sorting, FIFOs, etc.
591 - *
592 - * For a zoned block device, __dd_dispatch_request() may return NULL
593 - * if all the queued write requests are directed at zones that are already
594 - * locked due to on-going write requests. In this case, make sure to mark
595 - * the queue as needing a restart to ensure that the queue is run again
596 - * and the pending writes dispatched once the target zones for the ongoing
597 - * write requests are unlocked in dd_finish_request().
598 */
599 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
600 {
601 @@ -391,9 +384,6 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
602
603 spin_lock(&dd->lock);
604 rq = __dd_dispatch_request(dd);
605 - if (!rq && blk_queue_is_zoned(hctx->queue) &&
606 - !list_empty(&dd->fifo_list[WRITE]))
607 - blk_mq_sched_mark_restart_hctx(hctx);
608 spin_unlock(&dd->lock);
609
610 return rq;
611 @@ -559,6 +549,13 @@ static void dd_prepare_request(struct request *rq, struct bio *bio)
612 * spinlock so that the zone is never unlocked while deadline_fifo_request()
613 * or deadline_next_request() are executing. This function is called for
614 * all requests, whether or not these requests complete successfully.
615 + *
616 + * For a zoned block device, __dd_dispatch_request() may have stopped
617 + * dispatching requests if all the queued requests are write requests directed
618 + * at zones that are already locked due to on-going write requests. To ensure
619 + * write request dispatch progress in this case, mark the queue as needing a
620 + * restart to ensure that the queue is run again after completion of the
621 + * request and zones being unlocked.
622 */
623 static void dd_finish_request(struct request *rq)
624 {
625 @@ -570,6 +567,12 @@ static void dd_finish_request(struct request *rq)
626
627 spin_lock_irqsave(&dd->zone_lock, flags);
628 blk_req_zone_write_unlock(rq);
629 + if (!list_empty(&dd->fifo_list[WRITE])) {
630 + struct blk_mq_hw_ctx *hctx;
631 +
632 + hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
633 + blk_mq_sched_mark_restart_hctx(hctx);
634 + }
635 spin_unlock_irqrestore(&dd->zone_lock, flags);
636 }
637 }
638 diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
639 index 6ad5ef48b61e..8cd2ac650b50 100644
640 --- a/drivers/base/regmap/Kconfig
641 +++ b/drivers/base/regmap/Kconfig
642 @@ -44,7 +44,7 @@ config REGMAP_IRQ
643
644 config REGMAP_SOUNDWIRE
645 tristate
646 - depends on SOUNDWIRE_BUS
647 + depends on SOUNDWIRE
648
649 config REGMAP_SCCB
650 tristate
651 diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
652 index 6f1d25c1eb64..0bc344d22f01 100644
653 --- a/drivers/block/pktcdvd.c
654 +++ b/drivers/block/pktcdvd.c
655 @@ -2596,7 +2596,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
656 if (ret)
657 return ret;
658 if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
659 - WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
660 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
661 return -EINVAL;
662 }
663 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
664 index 75e5006f395a..006d76525678 100644
665 --- a/drivers/char/ipmi/ipmi_si_intf.c
666 +++ b/drivers/char/ipmi/ipmi_si_intf.c
667 @@ -221,6 +221,9 @@ struct smi_info {
668 */
669 bool irq_enable_broken;
670
671 + /* Is the driver in maintenance mode? */
672 + bool in_maintenance_mode;
673 +
674 /*
675 * Did we get an attention that we did not handle?
676 */
677 @@ -1013,11 +1016,20 @@ static int ipmi_thread(void *data)
678 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
679 busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
680 &busy_until);
681 - if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
682 + if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
683 ; /* do nothing */
684 - else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
685 - schedule();
686 - else if (smi_result == SI_SM_IDLE) {
687 + } else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) {
688 + /*
689 + * In maintenance mode we run as fast as
690 + * possible to allow firmware updates to
691 + * complete as fast as possible, but normally
692 + * don't bang on the scheduler.
693 + */
694 + if (smi_info->in_maintenance_mode)
695 + schedule();
696 + else
697 + usleep_range(100, 200);
698 + } else if (smi_result == SI_SM_IDLE) {
699 if (atomic_read(&smi_info->need_watch)) {
700 schedule_timeout_interruptible(100);
701 } else {
702 @@ -1025,8 +1037,9 @@ static int ipmi_thread(void *data)
703 __set_current_state(TASK_INTERRUPTIBLE);
704 schedule();
705 }
706 - } else
707 + } else {
708 schedule_timeout_interruptible(1);
709 + }
710 }
711 return 0;
712 }
713 @@ -1201,6 +1214,7 @@ static void set_maintenance_mode(void *send_info, bool enable)
714
715 if (!enable)
716 atomic_set(&smi_info->req_events, 0);
717 + smi_info->in_maintenance_mode = enable;
718 }
719
720 static void shutdown_smi(void *send_info);
721 diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
722 index 46caadca916a..0b01eb7b14e5 100644
723 --- a/drivers/char/tpm/tpm-chip.c
724 +++ b/drivers/char/tpm/tpm-chip.c
725 @@ -187,12 +187,13 @@ static int tpm_class_shutdown(struct device *dev)
726 {
727 struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
728
729 + down_write(&chip->ops_sem);
730 if (chip->flags & TPM_CHIP_FLAG_TPM2) {
731 - down_write(&chip->ops_sem);
732 tpm2_shutdown(chip, TPM2_SU_CLEAR);
733 chip->ops = NULL;
734 - up_write(&chip->ops_sem);
735 }
736 + chip->ops = NULL;
737 + up_write(&chip->ops_sem);
738
739 return 0;
740 }
741 diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
742 index 83a77a445538..177a60e5c6ec 100644
743 --- a/drivers/char/tpm/tpm-sysfs.c
744 +++ b/drivers/char/tpm/tpm-sysfs.c
745 @@ -39,7 +39,6 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
746 {
747 struct tpm_buf tpm_buf;
748 struct tpm_readpubek_out *out;
749 - ssize_t rc;
750 int i;
751 char *str = buf;
752 struct tpm_chip *chip = to_tpm_chip(dev);
753 @@ -47,19 +46,18 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
754
755 memset(&anti_replay, 0, sizeof(anti_replay));
756
757 - rc = tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK);
758 - if (rc)
759 - return rc;
760 + if (tpm_try_get_ops(chip))
761 + return 0;
762 +
763 + if (tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK))
764 + goto out_ops;
765
766 tpm_buf_append(&tpm_buf, anti_replay, sizeof(anti_replay));
767
768 - rc = tpm_transmit_cmd(chip, NULL, tpm_buf.data, PAGE_SIZE,
769 + if (tpm_transmit_cmd(chip, NULL, tpm_buf.data, PAGE_SIZE,
770 READ_PUBEK_RESULT_MIN_BODY_SIZE, 0,
771 - "attempting to read the PUBEK");
772 - if (rc) {
773 - tpm_buf_destroy(&tpm_buf);
774 - return 0;
775 - }
776 + "attempting to read the PUBEK"))
777 + goto out_buf;
778
779 out = (struct tpm_readpubek_out *)&tpm_buf.data[10];
780 str +=
781 @@ -90,9 +88,11 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
782 str += sprintf(str, "\n");
783 }
784
785 - rc = str - buf;
786 +out_buf:
787 tpm_buf_destroy(&tpm_buf);
788 - return rc;
789 +out_ops:
790 + tpm_put_ops(chip);
791 + return str - buf;
792 }
793 static DEVICE_ATTR_RO(pubek);
794
795 @@ -106,12 +106,16 @@ static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr,
796 char *str = buf;
797 struct tpm_chip *chip = to_tpm_chip(dev);
798
799 - rc = tpm_getcap(chip, TPM_CAP_PROP_PCR, &cap,
800 - "attempting to determine the number of PCRS",
801 - sizeof(cap.num_pcrs));
802 - if (rc)
803 + if (tpm_try_get_ops(chip))
804 return 0;
805
806 + if (tpm_getcap(chip, TPM_CAP_PROP_PCR, &cap,
807 + "attempting to determine the number of PCRS",
808 + sizeof(cap.num_pcrs))) {
809 + tpm_put_ops(chip);
810 + return 0;
811 + }
812 +
813 num_pcrs = be32_to_cpu(cap.num_pcrs);
814 for (i = 0; i < num_pcrs; i++) {
815 rc = tpm_pcr_read_dev(chip, i, digest);
816 @@ -122,6 +126,7 @@ static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr,
817 str += sprintf(str, "%02X ", digest[j]);
818 str += sprintf(str, "\n");
819 }
820 + tpm_put_ops(chip);
821 return str - buf;
822 }
823 static DEVICE_ATTR_RO(pcrs);
824 @@ -129,16 +134,21 @@ static DEVICE_ATTR_RO(pcrs);
825 static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
826 char *buf)
827 {
828 + struct tpm_chip *chip = to_tpm_chip(dev);
829 + ssize_t rc = 0;
830 cap_t cap;
831 - ssize_t rc;
832
833 - rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap,
834 - "attempting to determine the permanent enabled state",
835 - sizeof(cap.perm_flags));
836 - if (rc)
837 + if (tpm_try_get_ops(chip))
838 return 0;
839
840 + if (tpm_getcap(chip, TPM_CAP_FLAG_PERM, &cap,
841 + "attempting to determine the permanent enabled state",
842 + sizeof(cap.perm_flags)))
843 + goto out_ops;
844 +
845 rc = sprintf(buf, "%d\n", !cap.perm_flags.disable);
846 +out_ops:
847 + tpm_put_ops(chip);
848 return rc;
849 }
850 static DEVICE_ATTR_RO(enabled);
851 @@ -146,16 +156,21 @@ static DEVICE_ATTR_RO(enabled);
852 static ssize_t active_show(struct device *dev, struct device_attribute *attr,
853 char *buf)
854 {
855 + struct tpm_chip *chip = to_tpm_chip(dev);
856 + ssize_t rc = 0;
857 cap_t cap;
858 - ssize_t rc;
859
860 - rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap,
861 - "attempting to determine the permanent active state",
862 - sizeof(cap.perm_flags));
863 - if (rc)
864 + if (tpm_try_get_ops(chip))
865 return 0;
866
867 + if (tpm_getcap(chip, TPM_CAP_FLAG_PERM, &cap,
868 + "attempting to determine the permanent active state",
869 + sizeof(cap.perm_flags)))
870 + goto out_ops;
871 +
872 rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated);
873 +out_ops:
874 + tpm_put_ops(chip);
875 return rc;
876 }
877 static DEVICE_ATTR_RO(active);
878 @@ -163,16 +178,21 @@ static DEVICE_ATTR_RO(active);
879 static ssize_t owned_show(struct device *dev, struct device_attribute *attr,
880 char *buf)
881 {
882 + struct tpm_chip *chip = to_tpm_chip(dev);
883 + ssize_t rc = 0;
884 cap_t cap;
885 - ssize_t rc;
886
887 - rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap,
888 - "attempting to determine the owner state",
889 - sizeof(cap.owned));
890 - if (rc)
891 + if (tpm_try_get_ops(chip))
892 return 0;
893
894 + if (tpm_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap,
895 + "attempting to determine the owner state",
896 + sizeof(cap.owned)))
897 + goto out_ops;
898 +
899 rc = sprintf(buf, "%d\n", cap.owned);
900 +out_ops:
901 + tpm_put_ops(chip);
902 return rc;
903 }
904 static DEVICE_ATTR_RO(owned);
905 @@ -180,16 +200,21 @@ static DEVICE_ATTR_RO(owned);
906 static ssize_t temp_deactivated_show(struct device *dev,
907 struct device_attribute *attr, char *buf)
908 {
909 + struct tpm_chip *chip = to_tpm_chip(dev);
910 + ssize_t rc = 0;
911 cap_t cap;
912 - ssize_t rc;
913
914 - rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap,
915 - "attempting to determine the temporary state",
916 - sizeof(cap.stclear_flags));
917 - if (rc)
918 + if (tpm_try_get_ops(chip))
919 return 0;
920
921 + if (tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap,
922 + "attempting to determine the temporary state",
923 + sizeof(cap.stclear_flags)))
924 + goto out_ops;
925 +
926 rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated);
927 +out_ops:
928 + tpm_put_ops(chip);
929 return rc;
930 }
931 static DEVICE_ATTR_RO(temp_deactivated);
932 @@ -198,15 +223,18 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
933 char *buf)
934 {
935 struct tpm_chip *chip = to_tpm_chip(dev);
936 - cap_t cap;
937 - ssize_t rc;
938 + ssize_t rc = 0;
939 char *str = buf;
940 + cap_t cap;
941
942 - rc = tpm_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap,
943 - "attempting to determine the manufacturer",
944 - sizeof(cap.manufacturer_id));
945 - if (rc)
946 + if (tpm_try_get_ops(chip))
947 return 0;
948 +
949 + if (tpm_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap,
950 + "attempting to determine the manufacturer",
951 + sizeof(cap.manufacturer_id)))
952 + goto out_ops;
953 +
954 str += sprintf(str, "Manufacturer: 0x%x\n",
955 be32_to_cpu(cap.manufacturer_id));
956
957 @@ -223,20 +251,22 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
958 cap.tpm_version_1_2.revMinor);
959 } else {
960 /* Otherwise just use TPM_STRUCT_VER */
961 - rc = tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
962 - "attempting to determine the 1.1 version",
963 - sizeof(cap.tpm_version));
964 - if (rc)
965 - return 0;
966 + if (tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
967 + "attempting to determine the 1.1 version",
968 + sizeof(cap.tpm_version)))
969 + goto out_ops;
970 +
971 str += sprintf(str,
972 "TCG version: %d.%d\nFirmware version: %d.%d\n",
973 cap.tpm_version.Major,
974 cap.tpm_version.Minor,
975 cap.tpm_version.revMajor,
976 cap.tpm_version.revMinor);
977 - }
978 -
979 - return str - buf;
980 +}
981 + rc = str - buf;
982 +out_ops:
983 + tpm_put_ops(chip);
984 + return rc;
985 }
986 static DEVICE_ATTR_RO(caps);
987
988 @@ -244,10 +274,12 @@ static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
989 const char *buf, size_t count)
990 {
991 struct tpm_chip *chip = to_tpm_chip(dev);
992 - if (chip == NULL)
993 +
994 + if (tpm_try_get_ops(chip))
995 return 0;
996
997 chip->ops->cancel(chip);
998 + tpm_put_ops(chip);
999 return count;
1000 }
1001 static DEVICE_ATTR_WO(cancel);
1002 diff --git a/drivers/clk/actions/owl-common.c b/drivers/clk/actions/owl-common.c
1003 index 61c1071b5180..e9be34b17f3f 100644
1004 --- a/drivers/clk/actions/owl-common.c
1005 +++ b/drivers/clk/actions/owl-common.c
1006 @@ -67,16 +67,17 @@ int owl_clk_probe(struct device *dev, struct clk_hw_onecell_data *hw_clks)
1007 struct clk_hw *hw;
1008
1009 for (i = 0; i < hw_clks->num; i++) {
1010 + const char *name;
1011
1012 hw = hw_clks->hws[i];
1013 -
1014 if (IS_ERR_OR_NULL(hw))
1015 continue;
1016
1017 + name = hw->init->name;
1018 ret = devm_clk_hw_register(dev, hw);
1019 if (ret) {
1020 dev_err(dev, "Couldn't register clock %d - %s\n",
1021 - i, hw->init->name);
1022 + i, name);
1023 return ret;
1024 }
1025 }
1026 diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
1027 index c813c27f2e58..2f97a843d6d6 100644
1028 --- a/drivers/clk/at91/clk-main.c
1029 +++ b/drivers/clk/at91/clk-main.c
1030 @@ -27,6 +27,10 @@
1031
1032 #define MOR_KEY_MASK (0xff << 16)
1033
1034 +#define clk_main_parent_select(s) (((s) & \
1035 + (AT91_PMC_MOSCEN | \
1036 + AT91_PMC_OSCBYPASS)) ? 1 : 0)
1037 +
1038 struct clk_main_osc {
1039 struct clk_hw hw;
1040 struct regmap *regmap;
1041 @@ -119,7 +123,7 @@ static int clk_main_osc_is_prepared(struct clk_hw *hw)
1042
1043 regmap_read(regmap, AT91_PMC_SR, &status);
1044
1045 - return (status & AT91_PMC_MOSCS) && (tmp & AT91_PMC_MOSCEN);
1046 + return (status & AT91_PMC_MOSCS) && clk_main_parent_select(tmp);
1047 }
1048
1049 static const struct clk_ops main_osc_ops = {
1050 @@ -530,7 +534,7 @@ static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw)
1051
1052 regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
1053
1054 - return status & AT91_PMC_MOSCEN ? 1 : 0;
1055 + return clk_main_parent_select(status);
1056 }
1057
1058 static const struct clk_ops sam9x5_main_ops = {
1059 @@ -572,7 +576,7 @@ at91_clk_register_sam9x5_main(struct regmap *regmap,
1060 clkmain->hw.init = &init;
1061 clkmain->regmap = regmap;
1062 regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
1063 - clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0;
1064 + clkmain->parent = clk_main_parent_select(status);
1065
1066 hw = &clkmain->hw;
1067 ret = clk_hw_register(NULL, &clkmain->hw);
1068 diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
1069 index 3a1812f65e5d..8abc5c8cb8b8 100644
1070 --- a/drivers/clk/clk-qoriq.c
1071 +++ b/drivers/clk/clk-qoriq.c
1072 @@ -610,7 +610,7 @@ static const struct clockgen_chipinfo chipinfo[] = {
1073 .guts_compat = "fsl,qoriq-device-config-1.0",
1074 .init_periph = p5020_init_periph,
1075 .cmux_groups = {
1076 - &p2041_cmux_grp1, &p2041_cmux_grp2
1077 + &p5020_cmux_grp1, &p5020_cmux_grp2
1078 },
1079 .cmux_to_group = {
1080 0, 1, -1
1081 diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
1082 index 3bf11a620094..ada3e4aeb38f 100644
1083 --- a/drivers/clk/qcom/gcc-sdm845.c
1084 +++ b/drivers/clk/qcom/gcc-sdm845.c
1085 @@ -647,7 +647,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
1086 .name = "gcc_sdcc2_apps_clk_src",
1087 .parent_names = gcc_parent_names_10,
1088 .num_parents = 5,
1089 - .ops = &clk_rcg2_ops,
1090 + .ops = &clk_rcg2_floor_ops,
1091 },
1092 };
1093
1094 @@ -671,7 +671,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
1095 .name = "gcc_sdcc4_apps_clk_src",
1096 .parent_names = gcc_parent_names_0,
1097 .num_parents = 4,
1098 - .ops = &clk_rcg2_ops,
1099 + .ops = &clk_rcg2_floor_ops,
1100 },
1101 };
1102
1103 diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
1104 index e82adcb16a52..45d94fb9703d 100644
1105 --- a/drivers/clk/renesas/clk-mstp.c
1106 +++ b/drivers/clk/renesas/clk-mstp.c
1107 @@ -341,7 +341,8 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np)
1108 return;
1109
1110 pd->name = np->name;
1111 - pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
1112 + pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
1113 + GENPD_FLAG_ACTIVE_WAKEUP;
1114 pd->attach_dev = cpg_mstp_attach_dev;
1115 pd->detach_dev = cpg_mstp_detach_dev;
1116 pm_genpd_init(pd, &pm_domain_always_on_gov, false);
1117 diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
1118 index 24485bee9b49..d7a2ad617369 100644
1119 --- a/drivers/clk/renesas/renesas-cpg-mssr.c
1120 +++ b/drivers/clk/renesas/renesas-cpg-mssr.c
1121 @@ -514,7 +514,8 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev,
1122
1123 genpd = &pd->genpd;
1124 genpd->name = np->name;
1125 - genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
1126 + genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
1127 + GENPD_FLAG_ACTIVE_WAKEUP;
1128 genpd->attach_dev = cpg_mssr_attach_dev;
1129 genpd->detach_dev = cpg_mssr_detach_dev;
1130 pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
1131 diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c
1132 index d8f9efa5129a..25351d6a55ba 100644
1133 --- a/drivers/clk/sirf/clk-common.c
1134 +++ b/drivers/clk/sirf/clk-common.c
1135 @@ -298,9 +298,10 @@ static u8 dmn_clk_get_parent(struct clk_hw *hw)
1136 {
1137 struct clk_dmn *clk = to_dmnclk(hw);
1138 u32 cfg = clkc_readl(clk->regofs);
1139 + const char *name = clk_hw_get_name(hw);
1140
1141 /* parent of io domain can only be pll3 */
1142 - if (strcmp(hw->init->name, "io") == 0)
1143 + if (strcmp(name, "io") == 0)
1144 return 4;
1145
1146 WARN_ON((cfg & (BIT(3) - 1)) > 4);
1147 @@ -312,9 +313,10 @@ static int dmn_clk_set_parent(struct clk_hw *hw, u8 parent)
1148 {
1149 struct clk_dmn *clk = to_dmnclk(hw);
1150 u32 cfg = clkc_readl(clk->regofs);
1151 + const char *name = clk_hw_get_name(hw);
1152
1153 /* parent of io domain can only be pll3 */
1154 - if (strcmp(hw->init->name, "io") == 0)
1155 + if (strcmp(name, "io") == 0)
1156 return -EINVAL;
1157
1158 cfg &= ~(BIT(3) - 1);
1159 @@ -354,7 +356,8 @@ static long dmn_clk_round_rate(struct clk_hw *hw, unsigned long rate,
1160 {
1161 unsigned long fin;
1162 unsigned ratio, wait, hold;
1163 - unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
1164 + const char *name = clk_hw_get_name(hw);
1165 + unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
1166
1167 fin = *parent_rate;
1168 ratio = fin / rate;
1169 @@ -376,7 +379,8 @@ static int dmn_clk_set_rate(struct clk_hw *hw, unsigned long rate,
1170 struct clk_dmn *clk = to_dmnclk(hw);
1171 unsigned long fin;
1172 unsigned ratio, wait, hold, reg;
1173 - unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
1174 + const char *name = clk_hw_get_name(hw);
1175 + unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
1176
1177 fin = parent_rate;
1178 ratio = fin / rate;
1179 diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c
1180 index e038b0447206..8bdab1c3013b 100644
1181 --- a/drivers/clk/sprd/common.c
1182 +++ b/drivers/clk/sprd/common.c
1183 @@ -71,16 +71,17 @@ int sprd_clk_probe(struct device *dev, struct clk_hw_onecell_data *clkhw)
1184 struct clk_hw *hw;
1185
1186 for (i = 0; i < clkhw->num; i++) {
1187 + const char *name;
1188
1189 hw = clkhw->hws[i];
1190 -
1191 if (!hw)
1192 continue;
1193
1194 + name = hw->init->name;
1195 ret = devm_clk_hw_register(dev, hw);
1196 if (ret) {
1197 dev_err(dev, "Couldn't register clock %d - %s\n",
1198 - i, hw->init->name);
1199 + i, name);
1200 return ret;
1201 }
1202 }
1203 diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c
1204 index 36b4402bf09e..640270f51aa5 100644
1205 --- a/drivers/clk/sprd/pll.c
1206 +++ b/drivers/clk/sprd/pll.c
1207 @@ -136,6 +136,7 @@ static unsigned long _sprd_pll_recalc_rate(const struct sprd_pll *pll,
1208 k2 + refin * nint * CLK_PLL_1M;
1209 }
1210
1211 + kfree(cfg);
1212 return rate;
1213 }
1214
1215 @@ -222,6 +223,7 @@ static int _sprd_pll_set_rate(const struct sprd_pll *pll,
1216 if (!ret)
1217 udelay(pll->udelay);
1218
1219 + kfree(cfg);
1220 return ret;
1221 }
1222
1223 diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
1224 index ac12f261f8ca..9e3f4088724b 100644
1225 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
1226 +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
1227 @@ -499,6 +499,9 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = {
1228 [CLK_MMC1] = &mmc1_clk.common.hw,
1229 [CLK_MMC1_SAMPLE] = &mmc1_sample_clk.common.hw,
1230 [CLK_MMC1_OUTPUT] = &mmc1_output_clk.common.hw,
1231 + [CLK_MMC2] = &mmc2_clk.common.hw,
1232 + [CLK_MMC2_SAMPLE] = &mmc2_sample_clk.common.hw,
1233 + [CLK_MMC2_OUTPUT] = &mmc2_output_clk.common.hw,
1234 [CLK_CE] = &ce_clk.common.hw,
1235 [CLK_SPI0] = &spi0_clk.common.hw,
1236 [CLK_USB_PHY0] = &usb_phy0_clk.common.hw,
1237 diff --git a/drivers/clk/zte/clk-zx296718.c b/drivers/clk/zte/clk-zx296718.c
1238 index 354dd508c516..8dfb8523b79d 100644
1239 --- a/drivers/clk/zte/clk-zx296718.c
1240 +++ b/drivers/clk/zte/clk-zx296718.c
1241 @@ -567,6 +567,7 @@ static int __init top_clocks_init(struct device_node *np)
1242 {
1243 void __iomem *reg_base;
1244 int i, ret;
1245 + const char *name;
1246
1247 reg_base = of_iomap(np, 0);
1248 if (!reg_base) {
1249 @@ -576,11 +577,10 @@ static int __init top_clocks_init(struct device_node *np)
1250
1251 for (i = 0; i < ARRAY_SIZE(zx296718_pll_clk); i++) {
1252 zx296718_pll_clk[i].reg_base += (uintptr_t)reg_base;
1253 + name = zx296718_pll_clk[i].hw.init->name;
1254 ret = clk_hw_register(NULL, &zx296718_pll_clk[i].hw);
1255 - if (ret) {
1256 - pr_warn("top clk %s init error!\n",
1257 - zx296718_pll_clk[i].hw.init->name);
1258 - }
1259 + if (ret)
1260 + pr_warn("top clk %s init error!\n", name);
1261 }
1262
1263 for (i = 0; i < ARRAY_SIZE(top_ffactor_clk); i++) {
1264 @@ -588,11 +588,10 @@ static int __init top_clocks_init(struct device_node *np)
1265 top_hw_onecell_data.hws[top_ffactor_clk[i].id] =
1266 &top_ffactor_clk[i].factor.hw;
1267
1268 + name = top_ffactor_clk[i].factor.hw.init->name;
1269 ret = clk_hw_register(NULL, &top_ffactor_clk[i].factor.hw);
1270 - if (ret) {
1271 - pr_warn("top clk %s init error!\n",
1272 - top_ffactor_clk[i].factor.hw.init->name);
1273 - }
1274 + if (ret)
1275 + pr_warn("top clk %s init error!\n", name);
1276 }
1277
1278 for (i = 0; i < ARRAY_SIZE(top_mux_clk); i++) {
1279 @@ -601,11 +600,10 @@ static int __init top_clocks_init(struct device_node *np)
1280 &top_mux_clk[i].mux.hw;
1281
1282 top_mux_clk[i].mux.reg += (uintptr_t)reg_base;
1283 + name = top_mux_clk[i].mux.hw.init->name;
1284 ret = clk_hw_register(NULL, &top_mux_clk[i].mux.hw);
1285 - if (ret) {
1286 - pr_warn("top clk %s init error!\n",
1287 - top_mux_clk[i].mux.hw.init->name);
1288 - }
1289 + if (ret)
1290 + pr_warn("top clk %s init error!\n", name);
1291 }
1292
1293 for (i = 0; i < ARRAY_SIZE(top_gate_clk); i++) {
1294 @@ -614,11 +612,10 @@ static int __init top_clocks_init(struct device_node *np)
1295 &top_gate_clk[i].gate.hw;
1296
1297 top_gate_clk[i].gate.reg += (uintptr_t)reg_base;
1298 + name = top_gate_clk[i].gate.hw.init->name;
1299 ret = clk_hw_register(NULL, &top_gate_clk[i].gate.hw);
1300 - if (ret) {
1301 - pr_warn("top clk %s init error!\n",
1302 - top_gate_clk[i].gate.hw.init->name);
1303 - }
1304 + if (ret)
1305 + pr_warn("top clk %s init error!\n", name);
1306 }
1307
1308 for (i = 0; i < ARRAY_SIZE(top_div_clk); i++) {
1309 @@ -627,11 +624,10 @@ static int __init top_clocks_init(struct device_node *np)
1310 &top_div_clk[i].div.hw;
1311
1312 top_div_clk[i].div.reg += (uintptr_t)reg_base;
1313 + name = top_div_clk[i].div.hw.init->name;
1314 ret = clk_hw_register(NULL, &top_div_clk[i].div.hw);
1315 - if (ret) {
1316 - pr_warn("top clk %s init error!\n",
1317 - top_div_clk[i].div.hw.init->name);
1318 - }
1319 + if (ret)
1320 + pr_warn("top clk %s init error!\n", name);
1321 }
1322
1323 ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
1324 @@ -757,6 +753,7 @@ static int __init lsp0_clocks_init(struct device_node *np)
1325 {
1326 void __iomem *reg_base;
1327 int i, ret;
1328 + const char *name;
1329
1330 reg_base = of_iomap(np, 0);
1331 if (!reg_base) {
1332 @@ -770,11 +767,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
1333 &lsp0_mux_clk[i].mux.hw;
1334
1335 lsp0_mux_clk[i].mux.reg += (uintptr_t)reg_base;
1336 + name = lsp0_mux_clk[i].mux.hw.init->name;
1337 ret = clk_hw_register(NULL, &lsp0_mux_clk[i].mux.hw);
1338 - if (ret) {
1339 - pr_warn("lsp0 clk %s init error!\n",
1340 - lsp0_mux_clk[i].mux.hw.init->name);
1341 - }
1342 + if (ret)
1343 + pr_warn("lsp0 clk %s init error!\n", name);
1344 }
1345
1346 for (i = 0; i < ARRAY_SIZE(lsp0_gate_clk); i++) {
1347 @@ -783,11 +779,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
1348 &lsp0_gate_clk[i].gate.hw;
1349
1350 lsp0_gate_clk[i].gate.reg += (uintptr_t)reg_base;
1351 + name = lsp0_gate_clk[i].gate.hw.init->name;
1352 ret = clk_hw_register(NULL, &lsp0_gate_clk[i].gate.hw);
1353 - if (ret) {
1354 - pr_warn("lsp0 clk %s init error!\n",
1355 - lsp0_gate_clk[i].gate.hw.init->name);
1356 - }
1357 + if (ret)
1358 + pr_warn("lsp0 clk %s init error!\n", name);
1359 }
1360
1361 for (i = 0; i < ARRAY_SIZE(lsp0_div_clk); i++) {
1362 @@ -796,11 +791,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
1363 &lsp0_div_clk[i].div.hw;
1364
1365 lsp0_div_clk[i].div.reg += (uintptr_t)reg_base;
1366 + name = lsp0_div_clk[i].div.hw.init->name;
1367 ret = clk_hw_register(NULL, &lsp0_div_clk[i].div.hw);
1368 - if (ret) {
1369 - pr_warn("lsp0 clk %s init error!\n",
1370 - lsp0_div_clk[i].div.hw.init->name);
1371 - }
1372 + if (ret)
1373 + pr_warn("lsp0 clk %s init error!\n", name);
1374 }
1375
1376 ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
1377 @@ -865,6 +859,7 @@ static int __init lsp1_clocks_init(struct device_node *np)
1378 {
1379 void __iomem *reg_base;
1380 int i, ret;
1381 + const char *name;
1382
1383 reg_base = of_iomap(np, 0);
1384 if (!reg_base) {
1385 @@ -878,11 +873,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
1386 &lsp0_mux_clk[i].mux.hw;
1387
1388 lsp1_mux_clk[i].mux.reg += (uintptr_t)reg_base;
1389 + name = lsp1_mux_clk[i].mux.hw.init->name;
1390 ret = clk_hw_register(NULL, &lsp1_mux_clk[i].mux.hw);
1391 - if (ret) {
1392 - pr_warn("lsp1 clk %s init error!\n",
1393 - lsp1_mux_clk[i].mux.hw.init->name);
1394 - }
1395 + if (ret)
1396 + pr_warn("lsp1 clk %s init error!\n", name);
1397 }
1398
1399 for (i = 0; i < ARRAY_SIZE(lsp1_gate_clk); i++) {
1400 @@ -891,11 +885,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
1401 &lsp1_gate_clk[i].gate.hw;
1402
1403 lsp1_gate_clk[i].gate.reg += (uintptr_t)reg_base;
1404 + name = lsp1_gate_clk[i].gate.hw.init->name;
1405 ret = clk_hw_register(NULL, &lsp1_gate_clk[i].gate.hw);
1406 - if (ret) {
1407 - pr_warn("lsp1 clk %s init error!\n",
1408 - lsp1_gate_clk[i].gate.hw.init->name);
1409 - }
1410 + if (ret)
1411 + pr_warn("lsp1 clk %s init error!\n", name);
1412 }
1413
1414 for (i = 0; i < ARRAY_SIZE(lsp1_div_clk); i++) {
1415 @@ -904,11 +897,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
1416 &lsp1_div_clk[i].div.hw;
1417
1418 lsp1_div_clk[i].div.reg += (uintptr_t)reg_base;
1419 + name = lsp1_div_clk[i].div.hw.init->name;
1420 ret = clk_hw_register(NULL, &lsp1_div_clk[i].div.hw);
1421 - if (ret) {
1422 - pr_warn("lsp1 clk %s init error!\n",
1423 - lsp1_div_clk[i].div.hw.init->name);
1424 - }
1425 + if (ret)
1426 + pr_warn("lsp1 clk %s init error!\n", name);
1427 }
1428
1429 ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
1430 @@ -982,6 +974,7 @@ static int __init audio_clocks_init(struct device_node *np)
1431 {
1432 void __iomem *reg_base;
1433 int i, ret;
1434 + const char *name;
1435
1436 reg_base = of_iomap(np, 0);
1437 if (!reg_base) {
1438 @@ -995,11 +988,10 @@ static int __init audio_clocks_init(struct device_node *np)
1439 &audio_mux_clk[i].mux.hw;
1440
1441 audio_mux_clk[i].mux.reg += (uintptr_t)reg_base;
1442 + name = audio_mux_clk[i].mux.hw.init->name;
1443 ret = clk_hw_register(NULL, &audio_mux_clk[i].mux.hw);
1444 - if (ret) {
1445 - pr_warn("audio clk %s init error!\n",
1446 - audio_mux_clk[i].mux.hw.init->name);
1447 - }
1448 + if (ret)
1449 + pr_warn("audio clk %s init error!\n", name);
1450 }
1451
1452 for (i = 0; i < ARRAY_SIZE(audio_adiv_clk); i++) {
1453 @@ -1008,11 +1000,10 @@ static int __init audio_clocks_init(struct device_node *np)
1454 &audio_adiv_clk[i].hw;
1455
1456 audio_adiv_clk[i].reg_base += (uintptr_t)reg_base;
1457 + name = audio_adiv_clk[i].hw.init->name;
1458 ret = clk_hw_register(NULL, &audio_adiv_clk[i].hw);
1459 - if (ret) {
1460 - pr_warn("audio clk %s init error!\n",
1461 - audio_adiv_clk[i].hw.init->name);
1462 - }
1463 + if (ret)
1464 + pr_warn("audio clk %s init error!\n", name);
1465 }
1466
1467 for (i = 0; i < ARRAY_SIZE(audio_div_clk); i++) {
1468 @@ -1021,11 +1012,10 @@ static int __init audio_clocks_init(struct device_node *np)
1469 &audio_div_clk[i].div.hw;
1470
1471 audio_div_clk[i].div.reg += (uintptr_t)reg_base;
1472 + name = audio_div_clk[i].div.hw.init->name;
1473 ret = clk_hw_register(NULL, &audio_div_clk[i].div.hw);
1474 - if (ret) {
1475 - pr_warn("audio clk %s init error!\n",
1476 - audio_div_clk[i].div.hw.init->name);
1477 - }
1478 + if (ret)
1479 + pr_warn("audio clk %s init error!\n", name);
1480 }
1481
1482 for (i = 0; i < ARRAY_SIZE(audio_gate_clk); i++) {
1483 @@ -1034,11 +1024,10 @@ static int __init audio_clocks_init(struct device_node *np)
1484 &audio_gate_clk[i].gate.hw;
1485
1486 audio_gate_clk[i].gate.reg += (uintptr_t)reg_base;
1487 + name = audio_gate_clk[i].gate.hw.init->name;
1488 ret = clk_hw_register(NULL, &audio_gate_clk[i].gate.hw);
1489 - if (ret) {
1490 - pr_warn("audio clk %s init error!\n",
1491 - audio_gate_clk[i].gate.hw.init->name);
1492 - }
1493 + if (ret)
1494 + pr_warn("audio clk %s init error!\n", name);
1495 }
1496
1497 ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
1498 diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
1499 index cdc4f9a171d9..db2983c51f1e 100644
1500 --- a/drivers/crypto/hisilicon/sec/sec_algs.c
1501 +++ b/drivers/crypto/hisilicon/sec/sec_algs.c
1502 @@ -215,17 +215,18 @@ static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
1503 dma_addr_t psec_sgl, struct sec_dev_info *info)
1504 {
1505 struct sec_hw_sgl *sgl_current, *sgl_next;
1506 + dma_addr_t sgl_next_dma;
1507
1508 - if (!hw_sgl)
1509 - return;
1510 sgl_current = hw_sgl;
1511 - while (sgl_current->next) {
1512 + while (sgl_current) {
1513 sgl_next = sgl_current->next;
1514 - dma_pool_free(info->hw_sgl_pool, sgl_current,
1515 - sgl_current->next_sgl);
1516 + sgl_next_dma = sgl_current->next_sgl;
1517 +
1518 + dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl);
1519 +
1520 sgl_current = sgl_next;
1521 + psec_sgl = sgl_next_dma;
1522 }
1523 - dma_pool_free(info->hw_sgl_pool, hw_sgl, psec_sgl);
1524 }
1525
1526 static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
1527 diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
1528 index 53c1d6d36a64..81ba4eb34890 100644
1529 --- a/drivers/dma-buf/sw_sync.c
1530 +++ b/drivers/dma-buf/sw_sync.c
1531 @@ -141,17 +141,14 @@ static void timeline_fence_release(struct dma_fence *fence)
1532 {
1533 struct sync_pt *pt = dma_fence_to_sync_pt(fence);
1534 struct sync_timeline *parent = dma_fence_parent(fence);
1535 + unsigned long flags;
1536
1537 + spin_lock_irqsave(fence->lock, flags);
1538 if (!list_empty(&pt->link)) {
1539 - unsigned long flags;
1540 -
1541 - spin_lock_irqsave(fence->lock, flags);
1542 - if (!list_empty(&pt->link)) {
1543 - list_del(&pt->link);
1544 - rb_erase(&pt->node, &parent->pt_tree);
1545 - }
1546 - spin_unlock_irqrestore(fence->lock, flags);
1547 + list_del(&pt->link);
1548 + rb_erase(&pt->node, &parent->pt_tree);
1549 }
1550 + spin_unlock_irqrestore(fence->lock, flags);
1551
1552 sync_timeline_put(parent);
1553 dma_fence_free(fence);
1554 @@ -274,7 +271,8 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
1555 p = &parent->rb_left;
1556 } else {
1557 if (dma_fence_get_rcu(&other->base)) {
1558 - dma_fence_put(&pt->base);
1559 + sync_timeline_put(obj);
1560 + kfree(pt);
1561 pt = other;
1562 goto unlock;
1563 }
1564 diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
1565 index c364ef94cc36..77c9f4d8668a 100644
1566 --- a/drivers/gpu/drm/amd/amdgpu/si.c
1567 +++ b/drivers/gpu/drm/amd/amdgpu/si.c
1568 @@ -1813,7 +1813,7 @@ static void si_program_aspm(struct amdgpu_device *adev)
1569 if (orig != data)
1570 si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
1571
1572 - if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) {
1573 + if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) {
1574 orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
1575 data &= ~PLL_RAMP_UP_TIME_0_MASK;
1576 if (orig != data)
1577 @@ -1862,14 +1862,14 @@ static void si_program_aspm(struct amdgpu_device *adev)
1578
1579 orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
1580 data &= ~LS2_EXIT_TIME_MASK;
1581 - if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
1582 + if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
1583 data |= LS2_EXIT_TIME(5);
1584 if (orig != data)
1585 si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
1586
1587 orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
1588 data &= ~LS2_EXIT_TIME_MASK;
1589 - if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
1590 + if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
1591 data |= LS2_EXIT_TIME(5);
1592 if (orig != data)
1593 si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
1594 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
1595 index f4b89d1ea6f6..2b2efe443c36 100644
1596 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c
1597 +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
1598 @@ -1585,6 +1585,14 @@ void dc_set_power_state(
1599 dc_resource_state_construct(dc, dc->current_state);
1600
1601 dc->hwss.init_hw(dc);
1602 +
1603 +#ifdef CONFIG_DRM_AMD_DC_DCN2_0
1604 + if (dc->hwss.init_sys_ctx != NULL &&
1605 + dc->vm_pa_config.valid) {
1606 + dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
1607 + }
1608 +#endif
1609 +
1610 break;
1611 default:
1612
1613 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
1614 index f0d68aa7c8fc..d440b28ee43f 100644
1615 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
1616 +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
1617 @@ -229,12 +229,10 @@ bool resource_construct(
1618 DC_ERR("DC: failed to create audio!\n");
1619 return false;
1620 }
1621 -
1622 if (!aud->funcs->endpoint_valid(aud)) {
1623 aud->funcs->destroy(&aud);
1624 break;
1625 }
1626 -
1627 pool->audios[i] = aud;
1628 pool->audio_count++;
1629 }
1630 @@ -1703,24 +1701,25 @@ static struct audio *find_first_free_audio(
1631 const struct resource_pool *pool,
1632 enum engine_id id)
1633 {
1634 - int i;
1635 - for (i = 0; i < pool->audio_count; i++) {
1636 + int i, available_audio_count;
1637 +
1638 + available_audio_count = pool->audio_count;
1639 +
1640 + for (i = 0; i < available_audio_count; i++) {
1641 if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) {
1642 /*we have enough audio endpoint, find the matching inst*/
1643 if (id != i)
1644 continue;
1645 -
1646 return pool->audios[i];
1647 }
1648 }
1649
1650 - /* use engine id to find free audio */
1651 - if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
1652 + /* use engine id to find free audio */
1653 + if ((id < available_audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
1654 return pool->audios[id];
1655 }
1656 -
1657 /*not found the matching one, first come first serve*/
1658 - for (i = 0; i < pool->audio_count; i++) {
1659 + for (i = 0; i < available_audio_count; i++) {
1660 if (res_ctx->is_audio_acquired[i] == false) {
1661 return pool->audios[i];
1662 }
1663 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
1664 index 7f6d724686f1..abb559ce6408 100644
1665 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
1666 +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
1667 @@ -611,6 +611,8 @@ void dce_aud_az_configure(
1668
1669 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
1670 value);
1671 + DC_LOG_HW_AUDIO("\n\tAUDIO:az_configure: index: %u data, 0x%x, displayName %s: \n",
1672 + audio->inst, value, audio_info->display_name);
1673
1674 /*
1675 *write the port ID:
1676 @@ -922,7 +924,6 @@ static const struct audio_funcs funcs = {
1677 .az_configure = dce_aud_az_configure,
1678 .destroy = dce_aud_destroy,
1679 };
1680 -
1681 void dce_aud_destroy(struct audio **audio)
1682 {
1683 struct dce_audio *aud = DCE_AUD(*audio);
1684 @@ -953,7 +954,6 @@ struct audio *dce_audio_create(
1685 audio->regs = reg;
1686 audio->shifts = shifts;
1687 audio->masks = masks;
1688 -
1689 return &audio->base;
1690 }
1691
1692 diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
1693 index 5d95a997fd9f..f8904f73f57b 100644
1694 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
1695 +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
1696 @@ -292,9 +292,10 @@ bool cm_helper_translate_curve_to_hw_format(
1697 seg_distr[7] = 4;
1698 seg_distr[8] = 4;
1699 seg_distr[9] = 4;
1700 + seg_distr[10] = 1;
1701
1702 region_start = -10;
1703 - region_end = 0;
1704 + region_end = 1;
1705 }
1706
1707 for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
1708 diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
1709 index d68986cea132..84abf5d6f760 100644
1710 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
1711 +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
1712 @@ -1040,16 +1040,17 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
1713 if (ret)
1714 return ret;
1715
1716 + /* Check whether panel supports fast training */
1717 + ret = analogix_dp_fast_link_train_detection(dp);
1718 + if (ret)
1719 + dp->psr_enable = false;
1720 +
1721 if (dp->psr_enable) {
1722 ret = analogix_dp_enable_sink_psr(dp);
1723 if (ret)
1724 return ret;
1725 }
1726
1727 - /* Check whether panel supports fast training */
1728 - ret = analogix_dp_fast_link_train_detection(dp);
1729 - if (ret)
1730 - dp->psr_enable = false;
1731
1732 return ret;
1733 }
1734 diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
1735 index aaca5248da07..d728b6cf6109 100644
1736 --- a/drivers/gpu/drm/bridge/tc358767.c
1737 +++ b/drivers/gpu/drm/bridge/tc358767.c
1738 @@ -302,7 +302,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
1739 struct drm_dp_aux_msg *msg)
1740 {
1741 struct tc_data *tc = aux_to_tc(aux);
1742 - size_t size = min_t(size_t, 8, msg->size);
1743 + size_t size = min_t(size_t, DP_AUX_MAX_PAYLOAD_BYTES - 1, msg->size);
1744 u8 request = msg->request & ~DP_AUX_I2C_MOT;
1745 u8 *buf = msg->buffer;
1746 u32 tmp = 0;
1747 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
1748 index 7143ea4611aa..33a9fb5ac558 100644
1749 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
1750 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
1751 @@ -96,6 +96,8 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
1752 info->min = min(info->base,
1753 info->base + info->step * info->vidmask);
1754 info->max = nvbios_rd32(bios, volt + 0x0e);
1755 + if (!info->max)
1756 + info->max = max(info->base, info->base + info->step * info->vidmask);
1757 break;
1758 case 0x50:
1759 info->min = nvbios_rd32(bios, volt + 0x0a);
1760 diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
1761 index 2c9c9722734f..9a2cb8aeab3a 100644
1762 --- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
1763 +++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
1764 @@ -400,7 +400,13 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
1765
1766 /* Look up the DSI host. It needs to probe before we do. */
1767 endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
1768 + if (!endpoint)
1769 + return -ENODEV;
1770 +
1771 dsi_host_node = of_graph_get_remote_port_parent(endpoint);
1772 + if (!dsi_host_node)
1773 + goto error;
1774 +
1775 host = of_find_mipi_dsi_host_by_node(dsi_host_node);
1776 of_node_put(dsi_host_node);
1777 if (!host) {
1778 @@ -409,6 +415,9 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
1779 }
1780
1781 info.node = of_graph_get_remote_port(endpoint);
1782 + if (!info.node)
1783 + goto error;
1784 +
1785 of_node_put(endpoint);
1786
1787 ts->dsi = mipi_dsi_device_register_full(host, &info);
1788 @@ -429,6 +438,10 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
1789 return ret;
1790
1791 return 0;
1792 +
1793 +error:
1794 + of_node_put(endpoint);
1795 + return -ENODEV;
1796 }
1797
1798 static int rpi_touchscreen_remove(struct i2c_client *i2c)
1799 diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
1800 index 5fd94e206029..654fea2b4312 100644
1801 --- a/drivers/gpu/drm/panel/panel-simple.c
1802 +++ b/drivers/gpu/drm/panel/panel-simple.c
1803 @@ -689,9 +689,9 @@ static const struct panel_desc auo_g133han01 = {
1804 static const struct display_timing auo_g185han01_timings = {
1805 .pixelclock = { 120000000, 144000000, 175000000 },
1806 .hactive = { 1920, 1920, 1920 },
1807 - .hfront_porch = { 18, 60, 74 },
1808 - .hback_porch = { 12, 44, 54 },
1809 - .hsync_len = { 10, 24, 32 },
1810 + .hfront_porch = { 36, 120, 148 },
1811 + .hback_porch = { 24, 88, 108 },
1812 + .hsync_len = { 20, 48, 64 },
1813 .vactive = { 1080, 1080, 1080 },
1814 .vfront_porch = { 6, 10, 40 },
1815 .vback_porch = { 2, 5, 20 },
1816 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
1817 index 414642e5b7a3..de656f555383 100644
1818 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
1819 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
1820 @@ -751,7 +751,7 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
1821
1822 radeon_encoder->output_csc = val;
1823
1824 - if (connector->encoder->crtc) {
1825 + if (connector->encoder && connector->encoder->crtc) {
1826 struct drm_crtc *crtc = connector->encoder->crtc;
1827 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1828
1829 diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
1830 index 2a7977a23b31..25b5407c74b5 100644
1831 --- a/drivers/gpu/drm/radeon/radeon_drv.c
1832 +++ b/drivers/gpu/drm/radeon/radeon_drv.c
1833 @@ -364,11 +364,19 @@ radeon_pci_remove(struct pci_dev *pdev)
1834 static void
1835 radeon_pci_shutdown(struct pci_dev *pdev)
1836 {
1837 + struct drm_device *ddev = pci_get_drvdata(pdev);
1838 +
1839 /* if we are running in a VM, make sure the device
1840 * torn down properly on reboot/shutdown
1841 */
1842 if (radeon_device_is_virtual())
1843 radeon_pci_remove(pdev);
1844 +
1845 + /* Some adapters need to be suspended before a
1846 + * shutdown occurs in order to prevent an error
1847 + * during kexec.
1848 + */
1849 + radeon_suspend_kms(ddev, true, true, false);
1850 }
1851
1852 static int radeon_pmops_suspend(struct device *dev)
1853 diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
1854 index 808d9fb627e9..477d0a27b9a5 100644
1855 --- a/drivers/gpu/drm/stm/ltdc.c
1856 +++ b/drivers/gpu/drm/stm/ltdc.c
1857 @@ -19,6 +19,7 @@
1858 #include <drm/drm_crtc_helper.h>
1859 #include <drm/drm_fb_cma_helper.h>
1860 #include <drm/drm_gem_cma_helper.h>
1861 +#include <drm/drm_gem_framebuffer_helper.h>
1862 #include <drm/drm_of.h>
1863 #include <drm/drm_bridge.h>
1864 #include <drm/drm_plane_helper.h>
1865 @@ -825,6 +826,7 @@ static const struct drm_plane_funcs ltdc_plane_funcs = {
1866 };
1867
1868 static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = {
1869 + .prepare_fb = drm_gem_fb_prepare_fb,
1870 .atomic_check = ltdc_plane_atomic_check,
1871 .atomic_update = ltdc_plane_atomic_update,
1872 .atomic_disable = ltdc_plane_atomic_disable,
1873 diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
1874 index 1cb41992aaa1..d0a81a03ddbd 100644
1875 --- a/drivers/hid/hid-apple.c
1876 +++ b/drivers/hid/hid-apple.c
1877 @@ -57,7 +57,6 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\")
1878 struct apple_sc {
1879 unsigned long quirks;
1880 unsigned int fn_on;
1881 - DECLARE_BITMAP(pressed_fn, KEY_CNT);
1882 DECLARE_BITMAP(pressed_numlock, KEY_CNT);
1883 };
1884
1885 @@ -184,6 +183,8 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
1886 {
1887 struct apple_sc *asc = hid_get_drvdata(hid);
1888 const struct apple_key_translation *trans, *table;
1889 + bool do_translate;
1890 + u16 code = 0;
1891
1892 if (usage->code == KEY_FN) {
1893 asc->fn_on = !!value;
1894 @@ -192,8 +193,6 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
1895 }
1896
1897 if (fnmode) {
1898 - int do_translate;
1899 -
1900 if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
1901 hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
1902 table = macbookair_fn_keys;
1903 @@ -205,25 +204,33 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
1904 trans = apple_find_translation (table, usage->code);
1905
1906 if (trans) {
1907 - if (test_bit(usage->code, asc->pressed_fn))
1908 - do_translate = 1;
1909 - else if (trans->flags & APPLE_FLAG_FKEY)
1910 - do_translate = (fnmode == 2 && asc->fn_on) ||
1911 - (fnmode == 1 && !asc->fn_on);
1912 - else
1913 - do_translate = asc->fn_on;
1914 -
1915 - if (do_translate) {
1916 - if (value)
1917 - set_bit(usage->code, asc->pressed_fn);
1918 - else
1919 - clear_bit(usage->code, asc->pressed_fn);
1920 -
1921 - input_event(input, usage->type, trans->to,
1922 - value);
1923 -
1924 - return 1;
1925 + if (test_bit(trans->from, input->key))
1926 + code = trans->from;
1927 + else if (test_bit(trans->to, input->key))
1928 + code = trans->to;
1929 +
1930 + if (!code) {
1931 + if (trans->flags & APPLE_FLAG_FKEY) {
1932 + switch (fnmode) {
1933 + case 1:
1934 + do_translate = !asc->fn_on;
1935 + break;
1936 + case 2:
1937 + do_translate = asc->fn_on;
1938 + break;
1939 + default:
1940 + /* should never happen */
1941 + do_translate = false;
1942 + }
1943 + } else {
1944 + do_translate = asc->fn_on;
1945 + }
1946 +
1947 + code = do_translate ? trans->to : trans->from;
1948 }
1949 +
1950 + input_event(input, usage->type, code, value);
1951 + return 1;
1952 }
1953
1954 if (asc->quirks & APPLE_NUMLOCK_EMULATION &&
1955 diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
1956 index 5a2d5140c1f4..3038c975e417 100644
1957 --- a/drivers/hid/wacom_sys.c
1958 +++ b/drivers/hid/wacom_sys.c
1959 @@ -91,7 +91,7 @@ static void wacom_wac_queue_flush(struct hid_device *hdev,
1960 }
1961
1962 static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
1963 - struct hid_report *report, u8 *raw_data, int size)
1964 + struct hid_report *report, u8 *raw_data, int report_size)
1965 {
1966 struct wacom *wacom = hid_get_drvdata(hdev);
1967 struct wacom_wac *wacom_wac = &wacom->wacom_wac;
1968 @@ -152,7 +152,8 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
1969 if (flush)
1970 wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
1971 else if (insert)
1972 - wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo, raw_data, size);
1973 + wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo,
1974 + raw_data, report_size);
1975
1976 return insert && !flush;
1977 }
1978 @@ -2147,7 +2148,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
1979 {
1980 struct wacom_wac *wacom_wac = &wacom->wacom_wac;
1981 struct wacom_features *features = &wacom_wac->features;
1982 - char name[WACOM_NAME_MAX];
1983 + char name[WACOM_NAME_MAX - 20]; /* Leave some room for suffixes */
1984
1985 /* Generic devices name unspecified */
1986 if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
1987 diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
1988 index 6f5c838f9d47..1df037e7f0b4 100644
1989 --- a/drivers/hid/wacom_wac.c
1990 +++ b/drivers/hid/wacom_wac.c
1991 @@ -255,7 +255,7 @@ static int wacom_dtu_irq(struct wacom_wac *wacom)
1992
1993 static int wacom_dtus_irq(struct wacom_wac *wacom)
1994 {
1995 - char *data = wacom->data;
1996 + unsigned char *data = wacom->data;
1997 struct input_dev *input = wacom->pen_input;
1998 unsigned short prox, pressure = 0;
1999
2000 @@ -576,7 +576,7 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
2001 strip2 = ((data[3] & 0x1f) << 8) | data[4];
2002 }
2003
2004 - prox = (buttons & ~(~0 << nbuttons)) | (keys & ~(~0 << nkeys)) |
2005 + prox = (buttons & ~(~0U << nbuttons)) | (keys & ~(~0U << nkeys)) |
2006 (ring1 & 0x80) | (ring2 & 0x80) | strip1 | strip2;
2007
2008 wacom_report_numbered_buttons(input, nbuttons, buttons);
2009 diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
2010 index c4d176f5ed79..f890af67f501 100644
2011 --- a/drivers/i2c/busses/i2c-cht-wc.c
2012 +++ b/drivers/i2c/busses/i2c-cht-wc.c
2013 @@ -187,6 +187,51 @@ static const struct i2c_algorithm cht_wc_i2c_adap_algo = {
2014 .smbus_xfer = cht_wc_i2c_adap_smbus_xfer,
2015 };
2016
2017 +/*
2018 + * We are an i2c-adapter which itself is part of an i2c-client. This means that
2019 + * transfers done through us take adapter->bus_lock twice, once for our parent
2020 + * i2c-adapter and once to take our own bus_lock. Lockdep does not like this
2021 + * nested locking, to make lockdep happy in the case of busses with muxes, the
2022 + * i2c-core's i2c_adapter_lock_bus function calls:
2023 + * rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter));
2024 + *
2025 + * But i2c_adapter_depth only works when the direct parent of the adapter is
2026 + * another adapter, as it is only meant for muxes. In our case there is an
2027 + * i2c-client and MFD instantiated platform_device in the parent->child chain
2028 + * between the 2 devices.
2029 + *
2030 + * So we override the default i2c_lock_operations and pass a hardcoded
2031 + * depth of 1 to rt_mutex_lock_nested, to make lockdep happy.
2032 + *
2033 + * Note that if there were to be a mux attached to our adapter, this would
2034 + * break things again since the i2c-mux code expects the root-adapter to have
2035 + * a locking depth of 0. But we always have only 1 client directly attached
2036 + * in the form of the Charger IC paired with the CHT Whiskey Cove PMIC.
2037 + */
2038 +static void cht_wc_i2c_adap_lock_bus(struct i2c_adapter *adapter,
2039 + unsigned int flags)
2040 +{
2041 + rt_mutex_lock_nested(&adapter->bus_lock, 1);
2042 +}
2043 +
2044 +static int cht_wc_i2c_adap_trylock_bus(struct i2c_adapter *adapter,
2045 + unsigned int flags)
2046 +{
2047 + return rt_mutex_trylock(&adapter->bus_lock);
2048 +}
2049 +
2050 +static void cht_wc_i2c_adap_unlock_bus(struct i2c_adapter *adapter,
2051 + unsigned int flags)
2052 +{
2053 + rt_mutex_unlock(&adapter->bus_lock);
2054 +}
2055 +
2056 +static const struct i2c_lock_operations cht_wc_i2c_adap_lock_ops = {
2057 + .lock_bus = cht_wc_i2c_adap_lock_bus,
2058 + .trylock_bus = cht_wc_i2c_adap_trylock_bus,
2059 + .unlock_bus = cht_wc_i2c_adap_unlock_bus,
2060 +};
2061 +
2062 /**** irqchip for the client connected to the extchgr i2c adapter ****/
2063 static void cht_wc_i2c_irq_lock(struct irq_data *data)
2064 {
2065 @@ -295,6 +340,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
2066 adap->adapter.owner = THIS_MODULE;
2067 adap->adapter.class = I2C_CLASS_HWMON;
2068 adap->adapter.algo = &cht_wc_i2c_adap_algo;
2069 + adap->adapter.lock_ops = &cht_wc_i2c_adap_lock_ops;
2070 strlcpy(adap->adapter.name, "PMIC I2C Adapter",
2071 sizeof(adap->adapter.name));
2072 adap->adapter.dev.parent = &pdev->dev;
2073 diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
2074 index 333ed4a9d4b8..5255dcb551a7 100644
2075 --- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
2076 +++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
2077 @@ -55,7 +55,6 @@ static const struct mbox_chan_ops qcom_apcs_ipc_ops = {
2078
2079 static int qcom_apcs_ipc_probe(struct platform_device *pdev)
2080 {
2081 - struct device_node *np = pdev->dev.of_node;
2082 struct qcom_apcs_ipc *apcs;
2083 struct regmap *regmap;
2084 struct resource *res;
2085 @@ -63,6 +62,11 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
2086 void __iomem *base;
2087 unsigned long i;
2088 int ret;
2089 + const struct of_device_id apcs_clk_match_table[] = {
2090 + { .compatible = "qcom,msm8916-apcs-kpss-global", },
2091 + { .compatible = "qcom,qcs404-apcs-apps-global", },
2092 + {}
2093 + };
2094
2095 apcs = devm_kzalloc(&pdev->dev, sizeof(*apcs), GFP_KERNEL);
2096 if (!apcs)
2097 @@ -97,7 +101,7 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
2098 return ret;
2099 }
2100
2101 - if (of_device_is_compatible(np, "qcom,msm8916-apcs-kpss-global")) {
2102 + if (of_match_device(apcs_clk_match_table, &pdev->dev)) {
2103 apcs->clk = platform_device_register_data(&pdev->dev,
2104 "qcom-apcs-msm8916-clk",
2105 -1, NULL, 0);
2106 diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
2107 index 0e5282fc1467..c37c8bb86068 100644
2108 --- a/drivers/mfd/intel-lpss-pci.c
2109 +++ b/drivers/mfd/intel-lpss-pci.c
2110 @@ -39,6 +39,8 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
2111 info->mem = &pdev->resource[0];
2112 info->irq = pdev->irq;
2113
2114 + pdev->d3cold_delay = 0;
2115 +
2116 /* Probably it is enough to set this for iDMA capable devices only */
2117 pci_set_master(pdev);
2118 pci_try_set_mwi(pdev);
2119 diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
2120 index 35b767baf21f..c281c488a306 100644
2121 --- a/drivers/net/dsa/rtl8366.c
2122 +++ b/drivers/net/dsa/rtl8366.c
2123 @@ -339,10 +339,12 @@ int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
2124 const struct switchdev_obj_port_vlan *vlan)
2125 {
2126 struct realtek_smi *smi = ds->priv;
2127 + u16 vid;
2128 int ret;
2129
2130 - if (!smi->ops->is_vlan_valid(smi, port))
2131 - return -EINVAL;
2132 + for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
2133 + if (!smi->ops->is_vlan_valid(smi, vid))
2134 + return -EINVAL;
2135
2136 dev_info(smi->dev, "prepare VLANs %04x..%04x\n",
2137 vlan->vid_begin, vlan->vid_end);
2138 @@ -370,8 +372,9 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
2139 u16 vid;
2140 int ret;
2141
2142 - if (!smi->ops->is_vlan_valid(smi, port))
2143 - return;
2144 + for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
2145 + if (!smi->ops->is_vlan_valid(smi, vid))
2146 + return;
2147
2148 dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
2149 port,
2150 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
2151 index 4bc211093c98..dba8a0c1eda3 100644
2152 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
2153 +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
2154 @@ -137,13 +137,12 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
2155 static int alloc_uld_rxqs(struct adapter *adap,
2156 struct sge_uld_rxq_info *rxq_info, bool lro)
2157 {
2158 - struct sge *s = &adap->sge;
2159 unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
2160 + int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
2161 struct sge_ofld_rxq *q = rxq_info->uldrxq;
2162 unsigned short *ids = rxq_info->rspq_id;
2163 - unsigned int bmap_idx = 0;
2164 + struct sge *s = &adap->sge;
2165 unsigned int per_chan;
2166 - int i, err, msi_idx, que_idx = 0;
2167
2168 per_chan = rxq_info->nrxq / adap->params.nports;
2169
2170 @@ -161,6 +160,10 @@ static int alloc_uld_rxqs(struct adapter *adap,
2171
2172 if (msi_idx >= 0) {
2173 bmap_idx = get_msix_idx_from_bmap(adap);
2174 + if (bmap_idx < 0) {
2175 + err = -ENOSPC;
2176 + goto freeout;
2177 + }
2178 msi_idx = adap->msix_info_ulds[bmap_idx].idx;
2179 }
2180 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
2181 diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
2182 index 10b075bc5959..783ee6a32b5d 100644
2183 --- a/drivers/net/ethernet/qlogic/qla3xxx.c
2184 +++ b/drivers/net/ethernet/qlogic/qla3xxx.c
2185 @@ -2788,6 +2788,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2186 netdev_err(qdev->ndev,
2187 "PCI mapping failed with error: %d\n",
2188 err);
2189 + dev_kfree_skb_irq(skb);
2190 ql_free_large_buffers(qdev);
2191 return -ENOMEM;
2192 }
2193 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
2194 index d6916f787fce..5251c5f6f96e 100644
2195 --- a/drivers/net/usb/hso.c
2196 +++ b/drivers/net/usb/hso.c
2197 @@ -2634,14 +2634,18 @@ static struct hso_device *hso_create_bulk_serial_device(
2198 */
2199 if (serial->tiocmget) {
2200 tiocmget = serial->tiocmget;
2201 + tiocmget->endp = hso_get_ep(interface,
2202 + USB_ENDPOINT_XFER_INT,
2203 + USB_DIR_IN);
2204 + if (!tiocmget->endp) {
2205 + dev_err(&interface->dev, "Failed to find INT IN ep\n");
2206 + goto exit;
2207 + }
2208 +
2209 tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
2210 if (tiocmget->urb) {
2211 mutex_init(&tiocmget->mutex);
2212 init_waitqueue_head(&tiocmget->waitq);
2213 - tiocmget->endp = hso_get_ep(
2214 - interface,
2215 - USB_ENDPOINT_XFER_INT,
2216 - USB_DIR_IN);
2217 } else
2218 hso_free_tiomget(serial);
2219 }
2220 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2221 index 51017c6bb3bc..6f517e673020 100644
2222 --- a/drivers/net/usb/qmi_wwan.c
2223 +++ b/drivers/net/usb/qmi_wwan.c
2224 @@ -1286,6 +1286,7 @@ static const struct usb_device_id products[] = {
2225 {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
2226 {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
2227 {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
2228 + {QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
2229 {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
2230 {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
2231 {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
2232 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
2233 index a2a4c19bc95e..6b4675a9494b 100644
2234 --- a/drivers/net/xen-netfront.c
2235 +++ b/drivers/net/xen-netfront.c
2236 @@ -890,9 +890,9 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
2237 return 0;
2238 }
2239
2240 -static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
2241 - struct sk_buff *skb,
2242 - struct sk_buff_head *list)
2243 +static int xennet_fill_frags(struct netfront_queue *queue,
2244 + struct sk_buff *skb,
2245 + struct sk_buff_head *list)
2246 {
2247 RING_IDX cons = queue->rx.rsp_cons;
2248 struct sk_buff *nskb;
2249 @@ -911,7 +911,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
2250 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
2251 queue->rx.rsp_cons = ++cons + skb_queue_len(list);
2252 kfree_skb(nskb);
2253 - return ~0U;
2254 + return -ENOENT;
2255 }
2256
2257 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2258 @@ -922,7 +922,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
2259 kfree_skb(nskb);
2260 }
2261
2262 - return cons;
2263 + queue->rx.rsp_cons = cons;
2264 +
2265 + return 0;
2266 }
2267
2268 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
2269 @@ -1048,8 +1050,7 @@ err:
2270 skb->data_len = rx->status;
2271 skb->len += rx->status;
2272
2273 - i = xennet_fill_frags(queue, skb, &tmpq);
2274 - if (unlikely(i == ~0U))
2275 + if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
2276 goto err;
2277
2278 if (rx->flags & XEN_NETRXF_csum_blank)
2279 @@ -1059,7 +1060,7 @@ err:
2280
2281 __skb_queue_tail(&rxq, skb);
2282
2283 - queue->rx.rsp_cons = ++i;
2284 + i = ++queue->rx.rsp_cons;
2285 work_done++;
2286 }
2287
2288 diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
2289 index cee5f2f590e2..14a6ba4067fb 100644
2290 --- a/drivers/pci/controller/dwc/pci-exynos.c
2291 +++ b/drivers/pci/controller/dwc/pci-exynos.c
2292 @@ -465,7 +465,7 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
2293
2294 ep->phy = devm_of_phy_get(dev, np, NULL);
2295 if (IS_ERR(ep->phy)) {
2296 - if (PTR_ERR(ep->phy) == -EPROBE_DEFER)
2297 + if (PTR_ERR(ep->phy) != -ENODEV)
2298 return PTR_ERR(ep->phy);
2299
2300 ep->phy = NULL;
2301 diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
2302 index 3826b444298c..3b2ceb566728 100644
2303 --- a/drivers/pci/controller/dwc/pci-imx6.c
2304 +++ b/drivers/pci/controller/dwc/pci-imx6.c
2305 @@ -807,8 +807,8 @@ static int imx6_pcie_probe(struct platform_device *pdev)
2306
2307 imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
2308 if (IS_ERR(imx6_pcie->vpcie)) {
2309 - if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER)
2310 - return -EPROBE_DEFER;
2311 + if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
2312 + return PTR_ERR(imx6_pcie->vpcie);
2313 imx6_pcie->vpcie = NULL;
2314 }
2315
2316 diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
2317 index 7b32e619b959..a3489839a8fc 100644
2318 --- a/drivers/pci/controller/dwc/pcie-histb.c
2319 +++ b/drivers/pci/controller/dwc/pcie-histb.c
2320 @@ -340,8 +340,8 @@ static int histb_pcie_probe(struct platform_device *pdev)
2321
2322 hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie");
2323 if (IS_ERR(hipcie->vpcie)) {
2324 - if (PTR_ERR(hipcie->vpcie) == -EPROBE_DEFER)
2325 - return -EPROBE_DEFER;
2326 + if (PTR_ERR(hipcie->vpcie) != -ENODEV)
2327 + return PTR_ERR(hipcie->vpcie);
2328 hipcie->vpcie = NULL;
2329 }
2330
2331 diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
2332 index f4f53d092e00..976eaa9a9f26 100644
2333 --- a/drivers/pci/controller/pci-tegra.c
2334 +++ b/drivers/pci/controller/pci-tegra.c
2335 @@ -1975,14 +1975,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
2336 err = of_pci_get_devfn(port);
2337 if (err < 0) {
2338 dev_err(dev, "failed to parse address: %d\n", err);
2339 - return err;
2340 + goto err_node_put;
2341 }
2342
2343 index = PCI_SLOT(err);
2344
2345 if (index < 1 || index > soc->num_ports) {
2346 dev_err(dev, "invalid port number: %d\n", index);
2347 - return -EINVAL;
2348 + err = -EINVAL;
2349 + goto err_node_put;
2350 }
2351
2352 index--;
2353 @@ -1991,12 +1992,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
2354 if (err < 0) {
2355 dev_err(dev, "failed to parse # of lanes: %d\n",
2356 err);
2357 - return err;
2358 + goto err_node_put;
2359 }
2360
2361 if (value > 16) {
2362 dev_err(dev, "invalid # of lanes: %u\n", value);
2363 - return -EINVAL;
2364 + err = -EINVAL;
2365 + goto err_node_put;
2366 }
2367
2368 lanes |= value << (index << 3);
2369 @@ -2010,13 +2012,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
2370 lane += value;
2371
2372 rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
2373 - if (!rp)
2374 - return -ENOMEM;
2375 + if (!rp) {
2376 + err = -ENOMEM;
2377 + goto err_node_put;
2378 + }
2379
2380 err = of_address_to_resource(port, 0, &rp->regs);
2381 if (err < 0) {
2382 dev_err(dev, "failed to parse address: %d\n", err);
2383 - return err;
2384 + goto err_node_put;
2385 }
2386
2387 INIT_LIST_HEAD(&rp->list);
2388 @@ -2043,6 +2047,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
2389 return err;
2390
2391 return 0;
2392 +
2393 +err_node_put:
2394 + of_node_put(port);
2395 + return err;
2396 }
2397
2398 /*
2399 diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
2400 index 1372d270764f..5ce8e6375687 100644
2401 --- a/drivers/pci/controller/pcie-rockchip-host.c
2402 +++ b/drivers/pci/controller/pcie-rockchip-host.c
2403 @@ -608,29 +608,29 @@ static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
2404
2405 rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
2406 if (IS_ERR(rockchip->vpcie12v)) {
2407 - if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER)
2408 - return -EPROBE_DEFER;
2409 + if (PTR_ERR(rockchip->vpcie12v) != -ENODEV)
2410 + return PTR_ERR(rockchip->vpcie12v);
2411 dev_info(dev, "no vpcie12v regulator found\n");
2412 }
2413
2414 rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
2415 if (IS_ERR(rockchip->vpcie3v3)) {
2416 - if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
2417 - return -EPROBE_DEFER;
2418 + if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
2419 + return PTR_ERR(rockchip->vpcie3v3);
2420 dev_info(dev, "no vpcie3v3 regulator found\n");
2421 }
2422
2423 rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
2424 if (IS_ERR(rockchip->vpcie1v8)) {
2425 - if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
2426 - return -EPROBE_DEFER;
2427 + if (PTR_ERR(rockchip->vpcie1v8) != -ENODEV)
2428 + return PTR_ERR(rockchip->vpcie1v8);
2429 dev_info(dev, "no vpcie1v8 regulator found\n");
2430 }
2431
2432 rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
2433 if (IS_ERR(rockchip->vpcie0v9)) {
2434 - if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
2435 - return -EPROBE_DEFER;
2436 + if (PTR_ERR(rockchip->vpcie0v9) != -ENODEV)
2437 + return PTR_ERR(rockchip->vpcie0v9);
2438 dev_info(dev, "no vpcie0v9 regulator found\n");
2439 }
2440
2441 diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
2442 index 857c358b727b..cc860c5f7d26 100644
2443 --- a/drivers/pci/hotplug/rpaphp_core.c
2444 +++ b/drivers/pci/hotplug/rpaphp_core.c
2445 @@ -230,7 +230,7 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
2446 struct of_drc_info drc;
2447 const __be32 *value;
2448 char cell_drc_name[MAX_DRC_NAME_LEN];
2449 - int j, fndit;
2450 + int j;
2451
2452 info = of_find_property(dn->parent, "ibm,drc-info", NULL);
2453 if (info == NULL)
2454 @@ -245,17 +245,13 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
2455
2456 /* Should now know end of current entry */
2457
2458 - if (my_index > drc.last_drc_index)
2459 - continue;
2460 -
2461 - fndit = 1;
2462 - break;
2463 + /* Found it */
2464 + if (my_index <= drc.last_drc_index) {
2465 + sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix,
2466 + my_index);
2467 + break;
2468 + }
2469 }
2470 - /* Found it */
2471 -
2472 - if (fndit)
2473 - sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix,
2474 - my_index);
2475
2476 if (((drc_name == NULL) ||
2477 (drc_name && !strcmp(drc_name, cell_drc_name))) &&
2478 diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
2479 index 4edeb4cae72a..c4c70dc57dbe 100644
2480 --- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
2481 +++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
2482 @@ -198,8 +198,8 @@ static const unsigned int uart_rts_b_pins[] = { GPIODV_27 };
2483
2484 static const unsigned int uart_tx_c_pins[] = { GPIOY_13 };
2485 static const unsigned int uart_rx_c_pins[] = { GPIOY_14 };
2486 -static const unsigned int uart_cts_c_pins[] = { GPIOX_11 };
2487 -static const unsigned int uart_rts_c_pins[] = { GPIOX_12 };
2488 +static const unsigned int uart_cts_c_pins[] = { GPIOY_11 };
2489 +static const unsigned int uart_rts_c_pins[] = { GPIOY_12 };
2490
2491 static const unsigned int i2c_sck_a_pins[] = { GPIODV_25 };
2492 static const unsigned int i2c_sda_a_pins[] = { GPIODV_24 };
2493 @@ -445,10 +445,10 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
2494 GROUP(pwm_f_x, 3, 18),
2495
2496 /* Bank Y */
2497 - GROUP(uart_cts_c, 1, 19),
2498 - GROUP(uart_rts_c, 1, 18),
2499 - GROUP(uart_tx_c, 1, 17),
2500 - GROUP(uart_rx_c, 1, 16),
2501 + GROUP(uart_cts_c, 1, 17),
2502 + GROUP(uart_rts_c, 1, 16),
2503 + GROUP(uart_tx_c, 1, 19),
2504 + GROUP(uart_rx_c, 1, 18),
2505 GROUP(pwm_a_y, 1, 21),
2506 GROUP(pwm_f_y, 1, 20),
2507 GROUP(i2s_out_ch23_y, 1, 5),
2508 diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
2509 index 1425c2874d40..cd7a5d95b499 100644
2510 --- a/drivers/pinctrl/pinctrl-amd.c
2511 +++ b/drivers/pinctrl/pinctrl-amd.c
2512 @@ -569,15 +569,25 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
2513 !(regval & BIT(INTERRUPT_MASK_OFF)))
2514 continue;
2515 irq = irq_find_mapping(gc->irq.domain, irqnr + i);
2516 - generic_handle_irq(irq);
2517 + if (irq != 0)
2518 + generic_handle_irq(irq);
2519
2520 /* Clear interrupt.
2521 * We must read the pin register again, in case the
2522 * value was changed while executing
2523 * generic_handle_irq() above.
2524 + * If we didn't find a mapping for the interrupt,
2525 + * disable it in order to avoid a system hang caused
2526 + * by an interrupt storm.
2527 */
2528 raw_spin_lock_irqsave(&gpio_dev->lock, flags);
2529 regval = readl(regs + i);
2530 + if (irq == 0) {
2531 + regval &= ~BIT(INTERRUPT_ENABLE_OFF);
2532 + dev_dbg(&gpio_dev->pdev->dev,
2533 + "Disabling spurious GPIO IRQ %d\n",
2534 + irqnr + i);
2535 + }
2536 writel(regval, regs + i);
2537 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
2538 ret = IRQ_HANDLED;
2539 diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
2540 index 1aba75897d14..26a3f1eb9c6b 100644
2541 --- a/drivers/pinctrl/tegra/pinctrl-tegra.c
2542 +++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
2543 @@ -40,7 +40,9 @@ static inline u32 pmx_readl(struct tegra_pmx *pmx, u32 bank, u32 reg)
2544
2545 static inline void pmx_writel(struct tegra_pmx *pmx, u32 val, u32 bank, u32 reg)
2546 {
2547 - writel(val, pmx->regs[bank] + reg);
2548 + writel_relaxed(val, pmx->regs[bank] + reg);
2549 + /* make sure pinmux register write completed */
2550 + pmx_readl(pmx, bank, reg);
2551 }
2552
2553 static int tegra_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
2554 diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
2555 index c04a1edcd571..c3702684b342 100644
2556 --- a/drivers/rtc/rtc-pcf85363.c
2557 +++ b/drivers/rtc/rtc-pcf85363.c
2558 @@ -169,7 +169,12 @@ static int pcf85363_rtc_set_time(struct device *dev, struct rtc_time *tm)
2559 buf[DT_YEARS] = bin2bcd(tm->tm_year % 100);
2560
2561 ret = regmap_bulk_write(pcf85363->regmap, CTRL_STOP_EN,
2562 - tmp, sizeof(tmp));
2563 + tmp, 2);
2564 + if (ret)
2565 + return ret;
2566 +
2567 + ret = regmap_bulk_write(pcf85363->regmap, DT_100THS,
2568 + buf, sizeof(tmp) - 2);
2569 if (ret)
2570 return ret;
2571
2572 diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
2573 index b2483a749ac4..3cf011e12053 100644
2574 --- a/drivers/rtc/rtc-snvs.c
2575 +++ b/drivers/rtc/rtc-snvs.c
2576 @@ -273,6 +273,10 @@ static int snvs_rtc_probe(struct platform_device *pdev)
2577 if (!data)
2578 return -ENOMEM;
2579
2580 + data->rtc = devm_rtc_allocate_device(&pdev->dev);
2581 + if (IS_ERR(data->rtc))
2582 + return PTR_ERR(data->rtc);
2583 +
2584 data->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "regmap");
2585
2586 if (IS_ERR(data->regmap)) {
2587 @@ -335,10 +339,9 @@ static int snvs_rtc_probe(struct platform_device *pdev)
2588 goto error_rtc_device_register;
2589 }
2590
2591 - data->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
2592 - &snvs_rtc_ops, THIS_MODULE);
2593 - if (IS_ERR(data->rtc)) {
2594 - ret = PTR_ERR(data->rtc);
2595 + data->rtc->ops = &snvs_rtc_ops;
2596 + ret = rtc_register_device(data->rtc);
2597 + if (ret) {
2598 dev_err(&pdev->dev, "failed to register rtc: %d\n", ret);
2599 goto error_rtc_device_register;
2600 }
2601 diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
2602 index bd70339c1242..03d9855a6afd 100644
2603 --- a/drivers/scsi/scsi_logging.c
2604 +++ b/drivers/scsi/scsi_logging.c
2605 @@ -16,57 +16,15 @@
2606 #include <scsi/scsi_eh.h>
2607 #include <scsi/scsi_dbg.h>
2608
2609 -#define SCSI_LOG_SPOOLSIZE 4096
2610 -
2611 -#if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG
2612 -#warning SCSI logging bitmask too large
2613 -#endif
2614 -
2615 -struct scsi_log_buf {
2616 - char buffer[SCSI_LOG_SPOOLSIZE];
2617 - unsigned long map;
2618 -};
2619 -
2620 -static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log);
2621 -
2622 static char *scsi_log_reserve_buffer(size_t *len)
2623 {
2624 - struct scsi_log_buf *buf;
2625 - unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE;
2626 - unsigned long idx = 0;
2627 -
2628 - preempt_disable();
2629 - buf = this_cpu_ptr(&scsi_format_log);
2630 - idx = find_first_zero_bit(&buf->map, map_bits);
2631 - if (likely(idx < map_bits)) {
2632 - while (test_and_set_bit(idx, &buf->map)) {
2633 - idx = find_next_zero_bit(&buf->map, map_bits, idx);
2634 - if (idx >= map_bits)
2635 - break;
2636 - }
2637 - }
2638 - if (WARN_ON(idx >= map_bits)) {
2639 - preempt_enable();
2640 - return NULL;
2641 - }
2642 - *len = SCSI_LOG_BUFSIZE;
2643 - return buf->buffer + idx * SCSI_LOG_BUFSIZE;
2644 + *len = 128;
2645 + return kmalloc(*len, GFP_ATOMIC);
2646 }
2647
2648 static void scsi_log_release_buffer(char *bufptr)
2649 {
2650 - struct scsi_log_buf *buf;
2651 - unsigned long idx;
2652 - int ret;
2653 -
2654 - buf = this_cpu_ptr(&scsi_format_log);
2655 - if (bufptr >= buf->buffer &&
2656 - bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) {
2657 - idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE;
2658 - ret = test_and_clear_bit(idx, &buf->map);
2659 - WARN_ON(!ret);
2660 - }
2661 - preempt_enable();
2662 + kfree(bufptr);
2663 }
2664
2665 static inline const char *scmd_name(const struct scsi_cmnd *scmd)
2666 diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
2667 index 19c8efb9a5ee..1ba1556f1987 100644
2668 --- a/drivers/soundwire/Kconfig
2669 +++ b/drivers/soundwire/Kconfig
2670 @@ -3,8 +3,8 @@
2671 #
2672
2673 menuconfig SOUNDWIRE
2674 - bool "SoundWire support"
2675 - ---help---
2676 + tristate "SoundWire support"
2677 + help
2678 SoundWire is a 2-Pin interface with data and clock line ratified
2679 by the MIPI Alliance. SoundWire is used for transporting data
2680 typically related to audio functions. SoundWire interface is
2681 @@ -16,17 +16,12 @@ if SOUNDWIRE
2682
2683 comment "SoundWire Devices"
2684
2685 -config SOUNDWIRE_BUS
2686 - tristate
2687 - select REGMAP_SOUNDWIRE
2688 -
2689 config SOUNDWIRE_CADENCE
2690 tristate
2691
2692 config SOUNDWIRE_INTEL
2693 tristate "Intel SoundWire Master driver"
2694 select SOUNDWIRE_CADENCE
2695 - select SOUNDWIRE_BUS
2696 depends on X86 && ACPI && SND_SOC
2697 ---help---
2698 SoundWire Intel Master driver.
2699 diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
2700 index 5817beaca0e1..1e2c00163142 100644
2701 --- a/drivers/soundwire/Makefile
2702 +++ b/drivers/soundwire/Makefile
2703 @@ -4,7 +4,7 @@
2704
2705 #Bus Objs
2706 soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o stream.o
2707 -obj-$(CONFIG_SOUNDWIRE_BUS) += soundwire-bus.o
2708 +obj-$(CONFIG_SOUNDWIRE) += soundwire-bus.o
2709
2710 #Cadence Objs
2711 soundwire-cadence-objs := cadence_master.o
2712 diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
2713 index a6e2581ada70..29bc99c4a7b6 100644
2714 --- a/drivers/soundwire/intel.c
2715 +++ b/drivers/soundwire/intel.c
2716 @@ -282,6 +282,16 @@ intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm)
2717
2718 if (pcm) {
2719 count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
2720 +
2721 + /*
2722 + * WORKAROUND: on all existing Intel controllers, pdi
2723 + * number 2 reports channel count as 1 even though it
2724 + * supports 8 channels. Performing hardcoding for pdi
2725 + * number 2.
2726 + */
2727 + if (pdi_num == 2)
2728 + count = 7;
2729 +
2730 } else {
2731 count = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
2732 count = ((count & SDW_SHIM_PDMSCAP_CPSS) >>
2733 diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
2734 index 6cf00d9f512b..a92c2868d902 100644
2735 --- a/drivers/vfio/pci/vfio_pci.c
2736 +++ b/drivers/vfio/pci/vfio_pci.c
2737 @@ -373,11 +373,20 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
2738 pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
2739
2740 /*
2741 - * Try to reset the device. The success of this is dependent on
2742 - * being able to lock the device, which is not always possible.
2743 + * Try to get the locks ourselves to prevent a deadlock. The
2744 + * success of this is dependent on being able to lock the device,
2745 + * which is not always possible.
2746 + * We can not use the "try" reset interface here, which will
2747 + * overwrite the previously restored configuration information.
2748 */
2749 - if (vdev->reset_works && !pci_try_reset_function(pdev))
2750 - vdev->needs_reset = false;
2751 + if (vdev->reset_works && pci_cfg_access_trylock(pdev)) {
2752 + if (device_trylock(&pdev->dev)) {
2753 + if (!__pci_reset_function_locked(pdev))
2754 + vdev->needs_reset = false;
2755 + device_unlock(&pdev->dev);
2756 + }
2757 + pci_cfg_access_unlock(pdev);
2758 + }
2759
2760 pci_restore_state(pdev);
2761 out:
2762 diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
2763 index 6439231f2db2..da565f39c9b0 100644
2764 --- a/drivers/video/fbdev/ssd1307fb.c
2765 +++ b/drivers/video/fbdev/ssd1307fb.c
2766 @@ -433,7 +433,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
2767 if (ret < 0)
2768 return ret;
2769
2770 - ret = ssd1307fb_write_cmd(par->client, 0x0);
2771 + ret = ssd1307fb_write_cmd(par->client, par->page_offset);
2772 if (ret < 0)
2773 return ret;
2774
2775 diff --git a/fs/9p/cache.c b/fs/9p/cache.c
2776 index 9eb34701a566..a43a8d2436db 100644
2777 --- a/fs/9p/cache.c
2778 +++ b/fs/9p/cache.c
2779 @@ -66,6 +66,8 @@ void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
2780 if (!v9ses->cachetag) {
2781 if (v9fs_random_cachetag(v9ses) < 0) {
2782 v9ses->fscache = NULL;
2783 + kfree(v9ses->cachetag);
2784 + v9ses->cachetag = NULL;
2785 return;
2786 }
2787 }
2788 diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
2789 index e8e27cdc2f67..7edc8172c53a 100644
2790 --- a/fs/ext4/block_validity.c
2791 +++ b/fs/ext4/block_validity.c
2792 @@ -38,6 +38,7 @@ int __init ext4_init_system_zone(void)
2793
2794 void ext4_exit_system_zone(void)
2795 {
2796 + rcu_barrier();
2797 kmem_cache_destroy(ext4_system_zone_cachep);
2798 }
2799
2800 @@ -49,17 +50,26 @@ static inline int can_merge(struct ext4_system_zone *entry1,
2801 return 0;
2802 }
2803
2804 +static void release_system_zone(struct ext4_system_blocks *system_blks)
2805 +{
2806 + struct ext4_system_zone *entry, *n;
2807 +
2808 + rbtree_postorder_for_each_entry_safe(entry, n,
2809 + &system_blks->root, node)
2810 + kmem_cache_free(ext4_system_zone_cachep, entry);
2811 +}
2812 +
2813 /*
2814 * Mark a range of blocks as belonging to the "system zone" --- that
2815 * is, filesystem metadata blocks which should never be used by
2816 * inodes.
2817 */
2818 -static int add_system_zone(struct ext4_sb_info *sbi,
2819 +static int add_system_zone(struct ext4_system_blocks *system_blks,
2820 ext4_fsblk_t start_blk,
2821 unsigned int count)
2822 {
2823 struct ext4_system_zone *new_entry = NULL, *entry;
2824 - struct rb_node **n = &sbi->system_blks.rb_node, *node;
2825 + struct rb_node **n = &system_blks->root.rb_node, *node;
2826 struct rb_node *parent = NULL, *new_node = NULL;
2827
2828 while (*n) {
2829 @@ -91,7 +101,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
2830 new_node = &new_entry->node;
2831
2832 rb_link_node(new_node, parent, n);
2833 - rb_insert_color(new_node, &sbi->system_blks);
2834 + rb_insert_color(new_node, &system_blks->root);
2835 }
2836
2837 /* Can we merge to the left? */
2838 @@ -101,7 +111,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
2839 if (can_merge(entry, new_entry)) {
2840 new_entry->start_blk = entry->start_blk;
2841 new_entry->count += entry->count;
2842 - rb_erase(node, &sbi->system_blks);
2843 + rb_erase(node, &system_blks->root);
2844 kmem_cache_free(ext4_system_zone_cachep, entry);
2845 }
2846 }
2847 @@ -112,7 +122,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
2848 entry = rb_entry(node, struct ext4_system_zone, node);
2849 if (can_merge(new_entry, entry)) {
2850 new_entry->count += entry->count;
2851 - rb_erase(node, &sbi->system_blks);
2852 + rb_erase(node, &system_blks->root);
2853 kmem_cache_free(ext4_system_zone_cachep, entry);
2854 }
2855 }
2856 @@ -126,7 +136,7 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
2857 int first = 1;
2858
2859 printk(KERN_INFO "System zones: ");
2860 - node = rb_first(&sbi->system_blks);
2861 + node = rb_first(&sbi->system_blks->root);
2862 while (node) {
2863 entry = rb_entry(node, struct ext4_system_zone, node);
2864 printk(KERN_CONT "%s%llu-%llu", first ? "" : ", ",
2865 @@ -137,7 +147,47 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
2866 printk(KERN_CONT "\n");
2867 }
2868
2869 -static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
2870 +/*
2871 + * Returns 1 if the passed-in block region (start_blk,
2872 + * start_blk+count) is valid; 0 if some part of the block region
2873 + * overlaps with filesystem metadata blocks.
2874 + */
2875 +static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
2876 + struct ext4_system_blocks *system_blks,
2877 + ext4_fsblk_t start_blk,
2878 + unsigned int count)
2879 +{
2880 + struct ext4_system_zone *entry;
2881 + struct rb_node *n;
2882 +
2883 + if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
2884 + (start_blk + count < start_blk) ||
2885 + (start_blk + count > ext4_blocks_count(sbi->s_es))) {
2886 + sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
2887 + return 0;
2888 + }
2889 +
2890 + if (system_blks == NULL)
2891 + return 1;
2892 +
2893 + n = system_blks->root.rb_node;
2894 + while (n) {
2895 + entry = rb_entry(n, struct ext4_system_zone, node);
2896 + if (start_blk + count - 1 < entry->start_blk)
2897 + n = n->rb_left;
2898 + else if (start_blk >= (entry->start_blk + entry->count))
2899 + n = n->rb_right;
2900 + else {
2901 + sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
2902 + return 0;
2903 + }
2904 + }
2905 + return 1;
2906 +}
2907 +
2908 +static int ext4_protect_reserved_inode(struct super_block *sb,
2909 + struct ext4_system_blocks *system_blks,
2910 + u32 ino)
2911 {
2912 struct inode *inode;
2913 struct ext4_sb_info *sbi = EXT4_SB(sb);
2914 @@ -163,14 +213,15 @@ static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
2915 if (n == 0) {
2916 i++;
2917 } else {
2918 - if (!ext4_data_block_valid(sbi, map.m_pblk, n)) {
2919 + if (!ext4_data_block_valid_rcu(sbi, system_blks,
2920 + map.m_pblk, n)) {
2921 ext4_error(sb, "blocks %llu-%llu from inode %u "
2922 "overlap system zone", map.m_pblk,
2923 map.m_pblk + map.m_len - 1, ino);
2924 err = -EFSCORRUPTED;
2925 break;
2926 }
2927 - err = add_system_zone(sbi, map.m_pblk, n);
2928 + err = add_system_zone(system_blks, map.m_pblk, n);
2929 if (err < 0)
2930 break;
2931 i += n;
2932 @@ -180,93 +231,129 @@ static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
2933 return err;
2934 }
2935
2936 +static void ext4_destroy_system_zone(struct rcu_head *rcu)
2937 +{
2938 + struct ext4_system_blocks *system_blks;
2939 +
2940 + system_blks = container_of(rcu, struct ext4_system_blocks, rcu);
2941 + release_system_zone(system_blks);
2942 + kfree(system_blks);
2943 +}
2944 +
2945 +/*
2946 + * Build system zone rbtree which is used for block validity checking.
2947 + *
2948 + * The update of system_blks pointer in this function is protected by
2949 + * sb->s_umount semaphore. However we have to be careful as we can be
2950 + * racing with ext4_data_block_valid() calls reading system_blks rbtree
2951 + * protected only by RCU. That's why we first build the rbtree and then
2952 + * swap it in place.
2953 + */
2954 int ext4_setup_system_zone(struct super_block *sb)
2955 {
2956 ext4_group_t ngroups = ext4_get_groups_count(sb);
2957 struct ext4_sb_info *sbi = EXT4_SB(sb);
2958 + struct ext4_system_blocks *system_blks;
2959 struct ext4_group_desc *gdp;
2960 ext4_group_t i;
2961 int flex_size = ext4_flex_bg_size(sbi);
2962 int ret;
2963
2964 if (!test_opt(sb, BLOCK_VALIDITY)) {
2965 - if (sbi->system_blks.rb_node)
2966 + if (sbi->system_blks)
2967 ext4_release_system_zone(sb);
2968 return 0;
2969 }
2970 - if (sbi->system_blks.rb_node)
2971 + if (sbi->system_blks)
2972 return 0;
2973
2974 + system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL);
2975 + if (!system_blks)
2976 + return -ENOMEM;
2977 +
2978 for (i=0; i < ngroups; i++) {
2979 if (ext4_bg_has_super(sb, i) &&
2980 ((i < 5) || ((i % flex_size) == 0)))
2981 - add_system_zone(sbi, ext4_group_first_block_no(sb, i),
2982 + add_system_zone(system_blks,
2983 + ext4_group_first_block_no(sb, i),
2984 ext4_bg_num_gdb(sb, i) + 1);
2985 gdp = ext4_get_group_desc(sb, i, NULL);
2986 - ret = add_system_zone(sbi, ext4_block_bitmap(sb, gdp), 1);
2987 + ret = add_system_zone(system_blks,
2988 + ext4_block_bitmap(sb, gdp), 1);
2989 if (ret)
2990 - return ret;
2991 - ret = add_system_zone(sbi, ext4_inode_bitmap(sb, gdp), 1);
2992 + goto err;
2993 + ret = add_system_zone(system_blks,
2994 + ext4_inode_bitmap(sb, gdp), 1);
2995 if (ret)
2996 - return ret;
2997 - ret = add_system_zone(sbi, ext4_inode_table(sb, gdp),
2998 + goto err;
2999 + ret = add_system_zone(system_blks,
3000 + ext4_inode_table(sb, gdp),
3001 sbi->s_itb_per_group);
3002 if (ret)
3003 - return ret;
3004 + goto err;
3005 }
3006 if (ext4_has_feature_journal(sb) && sbi->s_es->s_journal_inum) {
3007 - ret = ext4_protect_reserved_inode(sb,
3008 + ret = ext4_protect_reserved_inode(sb, system_blks,
3009 le32_to_cpu(sbi->s_es->s_journal_inum));
3010 if (ret)
3011 - return ret;
3012 + goto err;
3013 }
3014
3015 + /*
3016 + * System blks rbtree complete, announce it once to prevent racing
3017 + * with ext4_data_block_valid() accessing the rbtree at the same
3018 + * time.
3019 + */
3020 + rcu_assign_pointer(sbi->system_blks, system_blks);
3021 +
3022 if (test_opt(sb, DEBUG))
3023 debug_print_tree(sbi);
3024 return 0;
3025 +err:
3026 + release_system_zone(system_blks);
3027 + kfree(system_blks);
3028 + return ret;
3029 }
3030
3031 -/* Called when the filesystem is unmounted */
3032 +/*
3033 + * Called when the filesystem is unmounted or when remounting it with
3034 + * noblock_validity specified.
3035 + *
3036 + * The update of system_blks pointer in this function is protected by
3037 + * sb->s_umount semaphore. However we have to be careful as we can be
3038 + * racing with ext4_data_block_valid() calls reading system_blks rbtree
3039 + * protected only by RCU. So we first clear the system_blks pointer and
3040 + * then free the rbtree only after RCU grace period expires.
3041 + */
3042 void ext4_release_system_zone(struct super_block *sb)
3043 {
3044 - struct ext4_system_zone *entry, *n;
3045 + struct ext4_system_blocks *system_blks;
3046
3047 - rbtree_postorder_for_each_entry_safe(entry, n,
3048 - &EXT4_SB(sb)->system_blks, node)
3049 - kmem_cache_free(ext4_system_zone_cachep, entry);
3050 + system_blks = rcu_dereference_protected(EXT4_SB(sb)->system_blks,
3051 + lockdep_is_held(&sb->s_umount));
3052 + rcu_assign_pointer(EXT4_SB(sb)->system_blks, NULL);
3053
3054 - EXT4_SB(sb)->system_blks = RB_ROOT;
3055 + if (system_blks)
3056 + call_rcu(&system_blks->rcu, ext4_destroy_system_zone);
3057 }
3058
3059 -/*
3060 - * Returns 1 if the passed-in block region (start_blk,
3061 - * start_blk+count) is valid; 0 if some part of the block region
3062 - * overlaps with filesystem metadata blocks.
3063 - */
3064 int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
3065 unsigned int count)
3066 {
3067 - struct ext4_system_zone *entry;
3068 - struct rb_node *n = sbi->system_blks.rb_node;
3069 + struct ext4_system_blocks *system_blks;
3070 + int ret;
3071
3072 - if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
3073 - (start_blk + count < start_blk) ||
3074 - (start_blk + count > ext4_blocks_count(sbi->s_es))) {
3075 - sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
3076 - return 0;
3077 - }
3078 - while (n) {
3079 - entry = rb_entry(n, struct ext4_system_zone, node);
3080 - if (start_blk + count - 1 < entry->start_blk)
3081 - n = n->rb_left;
3082 - else if (start_blk >= (entry->start_blk + entry->count))
3083 - n = n->rb_right;
3084 - else {
3085 - sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
3086 - return 0;
3087 - }
3088 - }
3089 - return 1;
3090 + /*
3091 + * Lock the system zone to prevent it being released concurrently
3092 + * when doing a remount which inverse current "[no]block_validity"
3093 + * mount option.
3094 + */
3095 + rcu_read_lock();
3096 + system_blks = rcu_dereference(sbi->system_blks);
3097 + ret = ext4_data_block_valid_rcu(sbi, system_blks, start_blk,
3098 + count);
3099 + rcu_read_unlock();
3100 + return ret;
3101 }
3102
3103 int ext4_check_blockref(const char *function, unsigned int line,
3104 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
3105 index 1ee51d3a978a..f8456a423c4e 100644
3106 --- a/fs/ext4/ext4.h
3107 +++ b/fs/ext4/ext4.h
3108 @@ -194,6 +194,14 @@ struct ext4_map_blocks {
3109 unsigned int m_flags;
3110 };
3111
3112 +/*
3113 + * Block validity checking, system zone rbtree.
3114 + */
3115 +struct ext4_system_blocks {
3116 + struct rb_root root;
3117 + struct rcu_head rcu;
3118 +};
3119 +
3120 /*
3121 * Flags for ext4_io_end->flags
3122 */
3123 @@ -1409,7 +1417,7 @@ struct ext4_sb_info {
3124 int s_jquota_fmt; /* Format of quota to use */
3125 #endif
3126 unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
3127 - struct rb_root system_blks;
3128 + struct ext4_system_blocks __rcu *system_blks;
3129
3130 #ifdef EXTENTS_STATS
3131 /* ext4 extents stats */
3132 diff --git a/fs/fat/dir.c b/fs/fat/dir.c
3133 index 7f5f3699fc6c..de60c05c0ca1 100644
3134 --- a/fs/fat/dir.c
3135 +++ b/fs/fat/dir.c
3136 @@ -1097,8 +1097,11 @@ static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used,
3137 err = -ENOMEM;
3138 goto error;
3139 }
3140 + /* Avoid race with userspace read via bdev */
3141 + lock_buffer(bhs[n]);
3142 memset(bhs[n]->b_data, 0, sb->s_blocksize);
3143 set_buffer_uptodate(bhs[n]);
3144 + unlock_buffer(bhs[n]);
3145 mark_buffer_dirty_inode(bhs[n], dir);
3146
3147 n++;
3148 @@ -1155,6 +1158,8 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
3149 fat_time_unix2fat(sbi, ts, &time, &date, &time_cs);
3150
3151 de = (struct msdos_dir_entry *)bhs[0]->b_data;
3152 + /* Avoid race with userspace read via bdev */
3153 + lock_buffer(bhs[0]);
3154 /* filling the new directory slots ("." and ".." entries) */
3155 memcpy(de[0].name, MSDOS_DOT, MSDOS_NAME);
3156 memcpy(de[1].name, MSDOS_DOTDOT, MSDOS_NAME);
3157 @@ -1177,6 +1182,7 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
3158 de[0].size = de[1].size = 0;
3159 memset(de + 2, 0, sb->s_blocksize - 2 * sizeof(*de));
3160 set_buffer_uptodate(bhs[0]);
3161 + unlock_buffer(bhs[0]);
3162 mark_buffer_dirty_inode(bhs[0], dir);
3163
3164 err = fat_zeroed_cluster(dir, blknr, 1, bhs, MAX_BUF_PER_PAGE);
3165 @@ -1234,11 +1240,14 @@ static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots,
3166
3167 /* fill the directory entry */
3168 copy = min(size, sb->s_blocksize);
3169 + /* Avoid race with userspace read via bdev */
3170 + lock_buffer(bhs[n]);
3171 memcpy(bhs[n]->b_data, slots, copy);
3172 - slots += copy;
3173 - size -= copy;
3174 set_buffer_uptodate(bhs[n]);
3175 + unlock_buffer(bhs[n]);
3176 mark_buffer_dirty_inode(bhs[n], dir);
3177 + slots += copy;
3178 + size -= copy;
3179 if (!size)
3180 break;
3181 n++;
3182 diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
3183 index f58c0cacc531..4c6c635bc8aa 100644
3184 --- a/fs/fat/fatent.c
3185 +++ b/fs/fat/fatent.c
3186 @@ -390,8 +390,11 @@ static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
3187 err = -ENOMEM;
3188 goto error;
3189 }
3190 + /* Avoid race with userspace read via bdev */
3191 + lock_buffer(c_bh);
3192 memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
3193 set_buffer_uptodate(c_bh);
3194 + unlock_buffer(c_bh);
3195 mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
3196 if (sb->s_flags & SB_SYNCHRONOUS)
3197 err = sync_dirty_buffer(c_bh);
3198 diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
3199 index 63d701cd1e2e..c8e9b7031d9a 100644
3200 --- a/fs/ocfs2/dlm/dlmunlock.c
3201 +++ b/fs/ocfs2/dlm/dlmunlock.c
3202 @@ -105,7 +105,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
3203 enum dlm_status status;
3204 int actions = 0;
3205 int in_use;
3206 - u8 owner;
3207 + u8 owner;
3208 + int recovery_wait = 0;
3209
3210 mlog(0, "master_node = %d, valblk = %d\n", master_node,
3211 flags & LKM_VALBLK);
3212 @@ -208,9 +209,12 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
3213 }
3214 if (flags & LKM_CANCEL)
3215 lock->cancel_pending = 0;
3216 - else
3217 - lock->unlock_pending = 0;
3218 -
3219 + else {
3220 + if (!lock->unlock_pending)
3221 + recovery_wait = 1;
3222 + else
3223 + lock->unlock_pending = 0;
3224 + }
3225 }
3226
3227 /* get an extra ref on lock. if we are just switching
3228 @@ -244,6 +248,17 @@ leave:
3229 spin_unlock(&res->spinlock);
3230 wake_up(&res->wq);
3231
3232 + if (recovery_wait) {
3233 + spin_lock(&res->spinlock);
3234 + /* Unlock request will directly succeed after owner dies,
3235 + * and the lock is already removed from grant list. We have to
3236 + * wait for RECOVERING done or we miss the chance to purge it
3237 + * since the removement is much faster than RECOVERING proc.
3238 + */
3239 + __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_RECOVERING);
3240 + spin_unlock(&res->spinlock);
3241 + }
3242 +
3243 /* let the caller's final dlm_lock_put handle the actual kfree */
3244 if (actions & DLM_UNLOCK_FREE_LOCK) {
3245 /* this should always be coupled with list removal */
3246 diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
3247 index 316c16463b20..015d74ee31a0 100644
3248 --- a/fs/pstore/ram.c
3249 +++ b/fs/pstore/ram.c
3250 @@ -162,6 +162,7 @@ static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time,
3251 if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu-%c\n%n",
3252 (time64_t *)&time->tv_sec, &time->tv_nsec, &data_type,
3253 &header_length) == 3) {
3254 + time->tv_nsec *= 1000;
3255 if (data_type == 'C')
3256 *compressed = true;
3257 else
3258 @@ -169,6 +170,7 @@ static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time,
3259 } else if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu\n%n",
3260 (time64_t *)&time->tv_sec, &time->tv_nsec,
3261 &header_length) == 2) {
3262 + time->tv_nsec *= 1000;
3263 *compressed = false;
3264 } else {
3265 time->tv_sec = 0;
3266 diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
3267 index e03bd9d41fa8..7b196d234626 100644
3268 --- a/include/scsi/scsi_dbg.h
3269 +++ b/include/scsi/scsi_dbg.h
3270 @@ -6,8 +6,6 @@ struct scsi_cmnd;
3271 struct scsi_device;
3272 struct scsi_sense_hdr;
3273
3274 -#define SCSI_LOG_BUFSIZE 128
3275 -
3276 extern void scsi_print_command(struct scsi_cmnd *);
3277 extern size_t __scsi_format_command(char *, size_t,
3278 const unsigned char *, size_t);
3279 diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
3280 index 815dcfa64743..0fe169c6afd8 100644
3281 --- a/include/trace/events/rxrpc.h
3282 +++ b/include/trace/events/rxrpc.h
3283 @@ -1073,7 +1073,7 @@ TRACE_EVENT(rxrpc_recvmsg,
3284 ),
3285
3286 TP_fast_assign(
3287 - __entry->call = call->debug_id;
3288 + __entry->call = call ? call->debug_id : 0;
3289 __entry->why = why;
3290 __entry->seq = seq;
3291 __entry->offset = offset;
3292 diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
3293 index 118e3a8fc764..6e544e364821 100644
3294 --- a/kernel/bpf/syscall.c
3295 +++ b/kernel/bpf/syscall.c
3296 @@ -1454,19 +1454,25 @@ static int bpf_prog_load(union bpf_attr *attr)
3297 if (err)
3298 goto free_used_maps;
3299
3300 + /* Upon success of bpf_prog_alloc_id(), the BPF prog is
3301 + * effectively publicly exposed. However, retrieving via
3302 + * bpf_prog_get_fd_by_id() will take another reference,
3303 + * therefore it cannot be gone underneath us.
3304 + *
3305 + * Only for the time /after/ successful bpf_prog_new_fd()
3306 + * and before returning to userspace, we might just hold
3307 + * one reference and any parallel close on that fd could
3308 + * rip everything out. Hence, below notifications must
3309 + * happen before bpf_prog_new_fd().
3310 + *
3311 + * Also, any failure handling from this point onwards must
3312 + * be using bpf_prog_put() given the program is exposed.
3313 + */
3314 + bpf_prog_kallsyms_add(prog);
3315 +
3316 err = bpf_prog_new_fd(prog);
3317 - if (err < 0) {
3318 - /* failed to allocate fd.
3319 - * bpf_prog_put() is needed because the above
3320 - * bpf_prog_alloc_id() has published the prog
3321 - * to the userspace and the userspace may
3322 - * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
3323 - */
3324 + if (err < 0)
3325 bpf_prog_put(prog);
3326 - return err;
3327 - }
3328 -
3329 - bpf_prog_kallsyms_add(prog);
3330 return err;
3331
3332 free_used_maps:
3333 diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
3334 index 23a83a4da38a..f50b90d0d1c2 100644
3335 --- a/kernel/kexec_core.c
3336 +++ b/kernel/kexec_core.c
3337 @@ -301,6 +301,8 @@ static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
3338 {
3339 struct page *pages;
3340
3341 + if (fatal_signal_pending(current))
3342 + return NULL;
3343 pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
3344 if (pages) {
3345 unsigned int count, i;
3346 diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
3347 index 722c27c40e5b..a1250ad591c1 100644
3348 --- a/kernel/livepatch/core.c
3349 +++ b/kernel/livepatch/core.c
3350 @@ -1027,6 +1027,7 @@ err:
3351 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
3352 patch->mod->name, obj->mod->name, obj->mod->name);
3353 mod->klp_alive = false;
3354 + obj->mod = NULL;
3355 klp_cleanup_module_patches_limited(mod, patch);
3356 mutex_unlock(&klp_mutex);
3357
3358 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
3359 index 3dea52f7be9c..46a910acce3f 100644
3360 --- a/lib/Kconfig.debug
3361 +++ b/lib/Kconfig.debug
3362 @@ -570,7 +570,7 @@ config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
3363 int "Maximum kmemleak early log entries"
3364 depends on DEBUG_KMEMLEAK
3365 range 200 40000
3366 - default 400
3367 + default 16000
3368 help
3369 Kmemleak must track all the memory allocations to avoid
3370 reporting false positives. Since memory may be allocated or
3371 diff --git a/net/core/sock.c b/net/core/sock.c
3372 index 9c32e8eb64da..f881eea1c4a4 100644
3373 --- a/net/core/sock.c
3374 +++ b/net/core/sock.c
3375 @@ -1563,8 +1563,6 @@ static void __sk_destruct(struct rcu_head *head)
3376 sk_filter_uncharge(sk, filter);
3377 RCU_INIT_POINTER(sk->sk_filter, NULL);
3378 }
3379 - if (rcu_access_pointer(sk->sk_reuseport_cb))
3380 - reuseport_detach_sock(sk);
3381
3382 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
3383
3384 @@ -1587,7 +1585,14 @@ static void __sk_destruct(struct rcu_head *head)
3385
3386 void sk_destruct(struct sock *sk)
3387 {
3388 - if (sock_flag(sk, SOCK_RCU_FREE))
3389 + bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
3390 +
3391 + if (rcu_access_pointer(sk->sk_reuseport_cb)) {
3392 + reuseport_detach_sock(sk);
3393 + use_call_rcu = true;
3394 + }
3395 +
3396 + if (use_call_rcu)
3397 call_rcu(&sk->sk_rcu, __sk_destruct);
3398 else
3399 __sk_destruct(&sk->sk_rcu);
3400 diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
3401 index 3c734832bb7c..0b87558f265e 100644
3402 --- a/net/ipv4/ip_gre.c
3403 +++ b/net/ipv4/ip_gre.c
3404 @@ -1531,6 +1531,7 @@ static void erspan_setup(struct net_device *dev)
3405 struct ip_tunnel *t = netdev_priv(dev);
3406
3407 ether_setup(dev);
3408 + dev->max_mtu = 0;
3409 dev->netdev_ops = &erspan_netdev_ops;
3410 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
3411 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
3412 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3413 index 232581c140a0..7065d68086ab 100644
3414 --- a/net/ipv4/route.c
3415 +++ b/net/ipv4/route.c
3416 @@ -908,16 +908,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
3417 if (peer->rate_tokens == 0 ||
3418 time_after(jiffies,
3419 (peer->rate_last +
3420 - (ip_rt_redirect_load << peer->rate_tokens)))) {
3421 + (ip_rt_redirect_load << peer->n_redirects)))) {
3422 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
3423
3424 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
3425 peer->rate_last = jiffies;
3426 - ++peer->rate_tokens;
3427 ++peer->n_redirects;
3428 #ifdef CONFIG_IP_ROUTE_VERBOSE
3429 if (log_martians &&
3430 - peer->rate_tokens == ip_rt_redirect_number)
3431 + peer->n_redirects == ip_rt_redirect_number)
3432 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
3433 &ip_hdr(skb)->saddr, inet_iif(skb),
3434 &ip_hdr(skb)->daddr, &gw);
3435 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
3436 index 2085fc0046de..aa59acc8ee0e 100644
3437 --- a/net/ipv4/udp.c
3438 +++ b/net/ipv4/udp.c
3439 @@ -775,6 +775,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
3440 int is_udplite = IS_UDPLITE(sk);
3441 int offset = skb_transport_offset(skb);
3442 int len = skb->len - offset;
3443 + int datalen = len - sizeof(*uh);
3444 __wsum csum = 0;
3445
3446 /*
3447 @@ -808,10 +809,12 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
3448 return -EIO;
3449 }
3450
3451 - skb_shinfo(skb)->gso_size = cork->gso_size;
3452 - skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
3453 - skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(uh),
3454 - cork->gso_size);
3455 + if (datalen > cork->gso_size) {
3456 + skb_shinfo(skb)->gso_size = cork->gso_size;
3457 + skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
3458 + skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
3459 + cork->gso_size);
3460 + }
3461 goto csum_partial;
3462 }
3463
3464 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
3465 index 49e2f6dac646..d2968a79abea 100644
3466 --- a/net/ipv6/addrconf.c
3467 +++ b/net/ipv6/addrconf.c
3468 @@ -5678,13 +5678,20 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
3469 switch (event) {
3470 case RTM_NEWADDR:
3471 /*
3472 - * If the address was optimistic
3473 - * we inserted the route at the start of
3474 - * our DAD process, so we don't need
3475 - * to do it again
3476 + * If the address was optimistic we inserted the route at the
3477 + * start of our DAD process, so we don't need to do it again.
3478 + * If the device was taken down in the middle of the DAD
3479 + * cycle there is a race where we could get here without a
3480 + * host route, so nothing to insert. That will be fixed when
3481 + * the device is brought up.
3482 */
3483 - if (!rcu_access_pointer(ifp->rt->fib6_node))
3484 + if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) {
3485 ip6_ins_rt(net, ifp->rt);
3486 + } else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
3487 + pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
3488 + &ifp->addr, ifp->idev->dev->name);
3489 + }
3490 +
3491 if (ifp->idev->cnf.forwarding)
3492 addrconf_join_anycast(ifp);
3493 if (!ipv6_addr_any(&ifp->peer_addr))
3494 diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
3495 index 6b74523fc1c4..2b6d43022383 100644
3496 --- a/net/ipv6/ip6_input.c
3497 +++ b/net/ipv6/ip6_input.c
3498 @@ -220,6 +220,16 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
3499 if (ipv6_addr_is_multicast(&hdr->saddr))
3500 goto err;
3501
3502 + /* While RFC4291 is not explicit about v4mapped addresses
3503 + * in IPv6 headers, it seems clear linux dual-stack
3504 + * model can not deal properly with these.
3505 + * Security models could be fooled by ::ffff:127.0.0.1 for example.
3506 + *
3507 + * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02
3508 + */
3509 + if (ipv6_addr_v4mapped(&hdr->saddr))
3510 + goto err;
3511 +
3512 skb->transport_header = skb->network_header + sizeof(*hdr);
3513 IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
3514
3515 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
3516 index 3a27c04ff62f..d1c59cb6dceb 100644
3517 --- a/net/ipv6/udp.c
3518 +++ b/net/ipv6/udp.c
3519 @@ -1047,6 +1047,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
3520 __wsum csum = 0;
3521 int offset = skb_transport_offset(skb);
3522 int len = skb->len - offset;
3523 + int datalen = len - sizeof(*uh);
3524
3525 /*
3526 * Create a UDP header
3527 @@ -1079,8 +1080,12 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
3528 return -EIO;
3529 }
3530
3531 - skb_shinfo(skb)->gso_size = cork->gso_size;
3532 - skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
3533 + if (datalen > cork->gso_size) {
3534 + skb_shinfo(skb)->gso_size = cork->gso_size;
3535 + skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
3536 + skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
3537 + cork->gso_size);
3538 + }
3539 goto csum_partial;
3540 }
3541
3542 diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
3543 index ff254e8c0c44..e0a2cb8a029f 100644
3544 --- a/net/nfc/llcp_sock.c
3545 +++ b/net/nfc/llcp_sock.c
3546 @@ -119,9 +119,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
3547 llcp_sock->service_name = kmemdup(llcp_addr.service_name,
3548 llcp_sock->service_name_len,
3549 GFP_KERNEL);
3550 -
3551 + if (!llcp_sock->service_name) {
3552 + ret = -ENOMEM;
3553 + goto put_dev;
3554 + }
3555 llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
3556 if (llcp_sock->ssap == LLCP_SAP_MAX) {
3557 + kfree(llcp_sock->service_name);
3558 + llcp_sock->service_name = NULL;
3559 ret = -EADDRINUSE;
3560 goto put_dev;
3561 }
3562 diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
3563 index 9f2875efb4ac..b3662264aa24 100644
3564 --- a/net/nfc/netlink.c
3565 +++ b/net/nfc/netlink.c
3566 @@ -981,7 +981,8 @@ static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info)
3567 int rc;
3568 u32 idx;
3569
3570 - if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
3571 + if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
3572 + !info->attrs[NFC_ATTR_TARGET_INDEX])
3573 return -EINVAL;
3574
3575 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
3576 @@ -1029,7 +1030,8 @@ static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info)
3577 struct sk_buff *msg = NULL;
3578 u32 idx;
3579
3580 - if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
3581 + if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
3582 + !info->attrs[NFC_ATTR_FIRMWARE_NAME])
3583 return -EINVAL;
3584
3585 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
3586 diff --git a/net/rds/ib.c b/net/rds/ib.c
3587 index eba75c1ba359..ba3379085c52 100644
3588 --- a/net/rds/ib.c
3589 +++ b/net/rds/ib.c
3590 @@ -143,6 +143,9 @@ static void rds_ib_add_one(struct ib_device *device)
3591 refcount_set(&rds_ibdev->refcount, 1);
3592 INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
3593
3594 + INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
3595 + INIT_LIST_HEAD(&rds_ibdev->conn_list);
3596 +
3597 rds_ibdev->max_wrs = device->attrs.max_qp_wr;
3598 rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE);
3599
3600 @@ -203,9 +206,6 @@ static void rds_ib_add_one(struct ib_device *device)
3601 device->name,
3602 rds_ibdev->use_fastreg ? "FRMR" : "FMR");
3603
3604 - INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
3605 - INIT_LIST_HEAD(&rds_ibdev->conn_list);
3606 -
3607 down_write(&rds_ib_devices_lock);
3608 list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
3609 up_write(&rds_ib_devices_lock);
3610 diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
3611 index f42025d53cfe..ebc3c8c7e666 100644
3612 --- a/net/sched/sch_cbq.c
3613 +++ b/net/sched/sch_cbq.c
3614 @@ -1132,6 +1132,32 @@ static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
3615 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
3616 };
3617
3618 +static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1],
3619 + struct nlattr *opt,
3620 + struct netlink_ext_ack *extack)
3621 +{
3622 + int err;
3623 +
3624 + if (!opt) {
3625 + NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
3626 + return -EINVAL;
3627 + }
3628 +
3629 + err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
3630 + if (err < 0)
3631 + return err;
3632 +
3633 + if (tb[TCA_CBQ_WRROPT]) {
3634 + const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
3635 +
3636 + if (wrr->priority > TC_CBQ_MAXPRIO) {
3637 + NL_SET_ERR_MSG(extack, "priority is bigger than TC_CBQ_MAXPRIO");
3638 + err = -EINVAL;
3639 + }
3640 + }
3641 + return err;
3642 +}
3643 +
3644 static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
3645 struct netlink_ext_ack *extack)
3646 {
3647 @@ -1144,12 +1170,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
3648 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
3649 q->delay_timer.function = cbq_undelay;
3650
3651 - if (!opt) {
3652 - NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
3653 - return -EINVAL;
3654 - }
3655 -
3656 - err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
3657 + err = cbq_opt_parse(tb, opt, extack);
3658 if (err < 0)
3659 return err;
3660
3661 @@ -1466,12 +1487,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
3662 struct cbq_class *parent;
3663 struct qdisc_rate_table *rtab = NULL;
3664
3665 - if (!opt) {
3666 - NL_SET_ERR_MSG(extack, "Mandatory qdisc options missing");
3667 - return -EINVAL;
3668 - }
3669 -
3670 - err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
3671 + err = cbq_opt_parse(tb, opt, extack);
3672 if (err < 0)
3673 return err;
3674
3675 diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
3676 index 049714c57075..84c948c91914 100644
3677 --- a/net/sched/sch_dsmark.c
3678 +++ b/net/sched/sch_dsmark.c
3679 @@ -357,6 +357,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
3680 goto errout;
3681
3682 err = -EINVAL;
3683 + if (!tb[TCA_DSMARK_INDICES])
3684 + goto errout;
3685 indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
3686
3687 if (hweight32(indices) != 1)
3688 diff --git a/net/tipc/link.c b/net/tipc/link.c
3689 index 836727e363c4..6344aca4487b 100644
3690 --- a/net/tipc/link.c
3691 +++ b/net/tipc/link.c
3692 @@ -161,6 +161,7 @@ struct tipc_link {
3693 struct {
3694 u16 len;
3695 u16 limit;
3696 + struct sk_buff *target_bskb;
3697 } backlog[5];
3698 u16 snd_nxt;
3699 u16 last_retransm;
3700 @@ -846,6 +847,7 @@ static void link_prepare_wakeup(struct tipc_link *l)
3701 void tipc_link_reset(struct tipc_link *l)
3702 {
3703 struct sk_buff_head list;
3704 + u32 imp;
3705
3706 __skb_queue_head_init(&list);
3707
3708 @@ -864,11 +866,10 @@ void tipc_link_reset(struct tipc_link *l)
3709 __skb_queue_purge(&l->transmq);
3710 __skb_queue_purge(&l->deferdq);
3711 __skb_queue_purge(&l->backlogq);
3712 - l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
3713 - l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
3714 - l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
3715 - l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
3716 - l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
3717 + for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
3718 + l->backlog[imp].len = 0;
3719 + l->backlog[imp].target_bskb = NULL;
3720 + }
3721 kfree_skb(l->reasm_buf);
3722 kfree_skb(l->failover_reasm_skb);
3723 l->reasm_buf = NULL;
3724 @@ -909,7 +910,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
3725 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
3726 struct sk_buff_head *transmq = &l->transmq;
3727 struct sk_buff_head *backlogq = &l->backlogq;
3728 - struct sk_buff *skb, *_skb, *bskb;
3729 + struct sk_buff *skb, *_skb, **tskb;
3730 int pkt_cnt = skb_queue_len(list);
3731 int rc = 0;
3732
3733 @@ -955,19 +956,21 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
3734 seqno++;
3735 continue;
3736 }
3737 - if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
3738 + tskb = &l->backlog[imp].target_bskb;
3739 + if (tipc_msg_bundle(*tskb, hdr, mtu)) {
3740 kfree_skb(__skb_dequeue(list));
3741 l->stats.sent_bundled++;
3742 continue;
3743 }
3744 - if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
3745 + if (tipc_msg_make_bundle(tskb, hdr, mtu, l->addr)) {
3746 kfree_skb(__skb_dequeue(list));
3747 - __skb_queue_tail(backlogq, bskb);
3748 - l->backlog[msg_importance(buf_msg(bskb))].len++;
3749 + __skb_queue_tail(backlogq, *tskb);
3750 + l->backlog[imp].len++;
3751 l->stats.sent_bundled++;
3752 l->stats.sent_bundles++;
3753 continue;
3754 }
3755 + l->backlog[imp].target_bskb = NULL;
3756 l->backlog[imp].len += skb_queue_len(list);
3757 skb_queue_splice_tail_init(list, backlogq);
3758 }
3759 @@ -983,6 +986,7 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
3760 u16 seqno = l->snd_nxt;
3761 u16 ack = l->rcv_nxt - 1;
3762 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
3763 + u32 imp;
3764
3765 while (skb_queue_len(&l->transmq) < l->window) {
3766 skb = skb_peek(&l->backlogq);
3767 @@ -993,7 +997,10 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
3768 break;
3769 __skb_dequeue(&l->backlogq);
3770 hdr = buf_msg(skb);
3771 - l->backlog[msg_importance(hdr)].len--;
3772 + imp = msg_importance(hdr);
3773 + l->backlog[imp].len--;
3774 + if (unlikely(skb == l->backlog[imp].target_bskb))
3775 + l->backlog[imp].target_bskb = NULL;
3776 __skb_queue_tail(&l->transmq, skb);
3777 __skb_queue_tail(xmitq, _skb);
3778 TIPC_SKB_CB(skb)->ackers = l->ackers;
3779 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
3780 index b61891054709..cbccf1791d3c 100644
3781 --- a/net/tipc/msg.c
3782 +++ b/net/tipc/msg.c
3783 @@ -484,10 +484,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
3784 bmsg = buf_msg(_skb);
3785 tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
3786 INT_H_SIZE, dnode);
3787 - if (msg_isdata(msg))
3788 - msg_set_importance(bmsg, TIPC_CRITICAL_IMPORTANCE);
3789 - else
3790 - msg_set_importance(bmsg, TIPC_SYSTEM_IMPORTANCE);
3791 + msg_set_importance(bmsg, msg_importance(msg));
3792 msg_set_seqno(bmsg, msg_seqno(msg));
3793 msg_set_ack(bmsg, msg_ack(msg));
3794 msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
3795 diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
3796 index 2e30bf197583..2a4613b239e0 100644
3797 --- a/net/vmw_vsock/af_vsock.c
3798 +++ b/net/vmw_vsock/af_vsock.c
3799 @@ -641,7 +641,7 @@ struct sock *__vsock_create(struct net *net,
3800 }
3801 EXPORT_SYMBOL_GPL(__vsock_create);
3802
3803 -static void __vsock_release(struct sock *sk)
3804 +static void __vsock_release(struct sock *sk, int level)
3805 {
3806 if (sk) {
3807 struct sk_buff *skb;
3808 @@ -651,9 +651,17 @@ static void __vsock_release(struct sock *sk)
3809 vsk = vsock_sk(sk);
3810 pending = NULL; /* Compiler warning. */
3811
3812 + /* The release call is supposed to use lock_sock_nested()
3813 + * rather than lock_sock(), if a sock lock should be acquired.
3814 + */
3815 transport->release(vsk);
3816
3817 - lock_sock(sk);
3818 + /* When "level" is SINGLE_DEPTH_NESTING, use the nested
3819 + * version to avoid the warning "possible recursive locking
3820 + * detected". When "level" is 0, lock_sock_nested(sk, level)
3821 + * is the same as lock_sock(sk).
3822 + */
3823 + lock_sock_nested(sk, level);
3824 sock_orphan(sk);
3825 sk->sk_shutdown = SHUTDOWN_MASK;
3826
3827 @@ -662,7 +670,7 @@ static void __vsock_release(struct sock *sk)
3828
3829 /* Clean up any sockets that never were accepted. */
3830 while ((pending = vsock_dequeue_accept(sk)) != NULL) {
3831 - __vsock_release(pending);
3832 + __vsock_release(pending, SINGLE_DEPTH_NESTING);
3833 sock_put(pending);
3834 }
3835
3836 @@ -711,7 +719,7 @@ EXPORT_SYMBOL_GPL(vsock_stream_has_space);
3837
3838 static int vsock_release(struct socket *sock)
3839 {
3840 - __vsock_release(sock->sk);
3841 + __vsock_release(sock->sk, 0);
3842 sock->sk = NULL;
3843 sock->state = SS_FREE;
3844
3845 diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
3846 index 98f193fd5315..70350dc67366 100644
3847 --- a/net/vmw_vsock/hyperv_transport.c
3848 +++ b/net/vmw_vsock/hyperv_transport.c
3849 @@ -538,7 +538,7 @@ static void hvs_release(struct vsock_sock *vsk)
3850 struct sock *sk = sk_vsock(vsk);
3851 bool remove_sock;
3852
3853 - lock_sock(sk);
3854 + lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
3855 remove_sock = hvs_close_lock_held(vsk);
3856 release_sock(sk);
3857 if (remove_sock)
3858 diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
3859 index e30f53728725..3c199f752fd3 100644
3860 --- a/net/vmw_vsock/virtio_transport_common.c
3861 +++ b/net/vmw_vsock/virtio_transport_common.c
3862 @@ -791,7 +791,7 @@ void virtio_transport_release(struct vsock_sock *vsk)
3863 struct sock *sk = &vsk->sk;
3864 bool remove_sock = true;
3865
3866 - lock_sock(sk);
3867 + lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
3868 if (sk->sk_type == SOCK_STREAM)
3869 remove_sock = virtio_transport_close(vsk);
3870
3871 diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
3872 index 9a4c0ad46518..c071c356a963 100644
3873 --- a/security/smack/smack_access.c
3874 +++ b/security/smack/smack_access.c
3875 @@ -469,7 +469,7 @@ char *smk_parse_smack(const char *string, int len)
3876 if (i == 0 || i >= SMK_LONGLABEL)
3877 return ERR_PTR(-EINVAL);
3878
3879 - smack = kzalloc(i + 1, GFP_KERNEL);
3880 + smack = kzalloc(i + 1, GFP_NOFS);
3881 if (smack == NULL)
3882 return ERR_PTR(-ENOMEM);
3883
3884 @@ -504,7 +504,7 @@ int smk_netlbl_mls(int level, char *catset, struct netlbl_lsm_secattr *sap,
3885 if ((m & *cp) == 0)
3886 continue;
3887 rc = netlbl_catmap_setbit(&sap->attr.mls.cat,
3888 - cat, GFP_KERNEL);
3889 + cat, GFP_NOFS);
3890 if (rc < 0) {
3891 netlbl_catmap_free(sap->attr.mls.cat);
3892 return rc;
3893 @@ -540,7 +540,7 @@ struct smack_known *smk_import_entry(const char *string, int len)
3894 if (skp != NULL)
3895 goto freeout;
3896
3897 - skp = kzalloc(sizeof(*skp), GFP_KERNEL);
3898 + skp = kzalloc(sizeof(*skp), GFP_NOFS);
3899 if (skp == NULL) {
3900 skp = ERR_PTR(-ENOMEM);
3901 goto freeout;
3902 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
3903 index 017c47eb795e..221de4c755c3 100644
3904 --- a/security/smack/smack_lsm.c
3905 +++ b/security/smack/smack_lsm.c
3906 @@ -270,7 +270,7 @@ static struct smack_known *smk_fetch(const char *name, struct inode *ip,
3907 if (!(ip->i_opflags & IOP_XATTR))
3908 return ERR_PTR(-EOPNOTSUPP);
3909
3910 - buffer = kzalloc(SMK_LONGLABEL, GFP_KERNEL);
3911 + buffer = kzalloc(SMK_LONGLABEL, GFP_NOFS);
3912 if (buffer == NULL)
3913 return ERR_PTR(-ENOMEM);
3914
3915 @@ -947,7 +947,8 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
3916
3917 if (rc != 0)
3918 return rc;
3919 - } else if (bprm->unsafe)
3920 + }
3921 + if (bprm->unsafe & ~LSM_UNSAFE_PTRACE)
3922 return -EPERM;
3923
3924 bsp->smk_task = isp->smk_task;
3925 @@ -4005,6 +4006,8 @@ access_check:
3926 skp = smack_ipv6host_label(&sadd);
3927 if (skp == NULL)
3928 skp = smack_net_ambient;
3929 + if (skb == NULL)
3930 + break;
3931 #ifdef CONFIG_AUDIT
3932 smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
3933 ad.a.u.net->family = family;
3934 diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
3935 index e279051bc631..270c17ab071e 100644
3936 --- a/tools/testing/selftests/net/udpgso.c
3937 +++ b/tools/testing/selftests/net/udpgso.c
3938 @@ -90,12 +90,9 @@ struct testcase testcases_v4[] = {
3939 .tfail = true,
3940 },
3941 {
3942 - /* send a single MSS: will fail with GSO, because the segment
3943 - * logic in udp4_ufo_fragment demands a gso skb to be > MTU
3944 - */
3945 + /* send a single MSS: will fall back to no GSO */
3946 .tlen = CONST_MSS_V4,
3947 .gso_len = CONST_MSS_V4,
3948 - .tfail = true,
3949 .r_num_mss = 1,
3950 },
3951 {
3952 @@ -140,10 +137,9 @@ struct testcase testcases_v4[] = {
3953 .tfail = true,
3954 },
3955 {
3956 - /* send a single 1B MSS: will fail, see single MSS above */
3957 + /* send a single 1B MSS: will fall back to no GSO */
3958 .tlen = 1,
3959 .gso_len = 1,
3960 - .tfail = true,
3961 .r_num_mss = 1,
3962 },
3963 {
3964 @@ -197,12 +193,9 @@ struct testcase testcases_v6[] = {
3965 .tfail = true,
3966 },
3967 {
3968 - /* send a single MSS: will fail with GSO, because the segment
3969 - * logic in udp4_ufo_fragment demands a gso skb to be > MTU
3970 - */
3971 + /* send a single MSS: will fall back to no GSO */
3972 .tlen = CONST_MSS_V6,
3973 .gso_len = CONST_MSS_V6,
3974 - .tfail = true,
3975 .r_num_mss = 1,
3976 },
3977 {
3978 @@ -247,10 +240,9 @@ struct testcase testcases_v6[] = {
3979 .tfail = true,
3980 },
3981 {
3982 - /* send a single 1B MSS: will fail, see single MSS above */
3983 + /* send a single 1B MSS: will fall back to no GSO */
3984 .tlen = 1,
3985 .gso_len = 1,
3986 - .tfail = true,
3987 .r_num_mss = 1,
3988 },
3989 {
3990 diff --git a/usr/Makefile b/usr/Makefile
3991 index 748f6a60bb1e..138c18cefb52 100644
3992 --- a/usr/Makefile
3993 +++ b/usr/Makefile
3994 @@ -11,6 +11,9 @@ datafile_y = initramfs_data.cpio$(suffix_y)
3995 datafile_d_y = .$(datafile_y).d
3996 AFLAGS_initramfs_data.o += -DINITRAMFS_IMAGE="usr/$(datafile_y)"
3997
3998 +# clean rules do not have CONFIG_INITRAMFS_COMPRESSION. So clean up after all
3999 +# possible compression formats.
4000 +clean-files += initramfs_data.cpio*
4001
4002 # Generate builtin.o based on initramfs_data.o
4003 obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data.o