Magellan Linux

Contents of /trunk/kernel-alx/patches-4.14/0147-4.14.48-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (show annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 6 months ago) by niro
File size: 82228 byte(s)
-added up to patches-4.14.79
1 diff --git a/Makefile b/Makefile
2 index d6db01a02252..7a246f1ce44e 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 14
9 -SUBLEVEL = 47
10 +SUBLEVEL = 48
11 EXTRAVERSION =
12 NAME = Petit Gorille
13
14 @@ -369,11 +369,6 @@ HOSTCXXFLAGS := -O2 $(HOST_LFS_CFLAGS)
15 HOSTLDFLAGS := $(HOST_LFS_LDFLAGS)
16 HOST_LOADLIBES := $(HOST_LFS_LIBS)
17
18 -ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
19 -HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
20 - -Wno-missing-field-initializers -fno-delete-null-pointer-checks
21 -endif
22 -
23 # Make variables (CC, etc...)
24 AS = $(CROSS_COMPILE)as
25 LD = $(CROSS_COMPILE)ld
26 @@ -711,7 +706,6 @@ KBUILD_CFLAGS += $(stackp-flag)
27
28 ifeq ($(cc-name),clang)
29 KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
30 -KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
31 KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
32 KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
33 KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
34 @@ -729,9 +723,9 @@ else
35 # These warnings generated too much noise in a regular build.
36 # Use make W=1 to enable them (see scripts/Makefile.extrawarn)
37 KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
38 -KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
39 endif
40
41 +KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
42 ifdef CONFIG_FRAME_POINTER
43 KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
44 else
45 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
46 index 2f2d176396aa..e1ddb94a6522 100644
47 --- a/arch/mips/kernel/process.c
48 +++ b/arch/mips/kernel/process.c
49 @@ -721,6 +721,10 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
50 if (value & ~known_bits)
51 return -EOPNOTSUPP;
52
53 + /* Setting FRE without FR is not supported. */
54 + if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
55 + return -EOPNOTSUPP;
56 +
57 /* Avoid inadvertently triggering emulation */
58 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
59 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
60 diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
61 index 006105fb12fe..e058cd300713 100644
62 --- a/arch/mips/kernel/ptrace.c
63 +++ b/arch/mips/kernel/ptrace.c
64 @@ -809,7 +809,7 @@ long arch_ptrace(struct task_struct *child, long request,
65 break;
66 }
67 #endif
68 - tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
69 + tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
70 break;
71 case PC:
72 tmp = regs->cp0_epc;
73 diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
74 index 4a157d3249ac..89026d33a07b 100644
75 --- a/arch/mips/kernel/ptrace32.c
76 +++ b/arch/mips/kernel/ptrace32.c
77 @@ -108,7 +108,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
78 addr & 1);
79 break;
80 }
81 - tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
82 + tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
83 break;
84 case PC:
85 tmp = regs->cp0_epc;
86 diff --git a/arch/powerpc/include/asm/book3s/64/slice.h b/arch/powerpc/include/asm/book3s/64/slice.h
87 new file mode 100644
88 index 000000000000..db0dedab65ee
89 --- /dev/null
90 +++ b/arch/powerpc/include/asm/book3s/64/slice.h
91 @@ -0,0 +1,27 @@
92 +/* SPDX-License-Identifier: GPL-2.0 */
93 +#ifndef _ASM_POWERPC_BOOK3S_64_SLICE_H
94 +#define _ASM_POWERPC_BOOK3S_64_SLICE_H
95 +
96 +#ifdef CONFIG_PPC_MM_SLICES
97 +
98 +#define SLICE_LOW_SHIFT 28
99 +#define SLICE_LOW_TOP (0x100000000ul)
100 +#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
101 +#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
102 +
103 +#define SLICE_HIGH_SHIFT 40
104 +#define SLICE_NUM_HIGH (H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
105 +#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
106 +
107 +#else /* CONFIG_PPC_MM_SLICES */
108 +
109 +#define get_slice_psize(mm, addr) ((mm)->context.user_psize)
110 +#define slice_set_user_psize(mm, psize) \
111 +do { \
112 + (mm)->context.user_psize = (psize); \
113 + (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
114 +} while (0)
115 +
116 +#endif /* CONFIG_PPC_MM_SLICES */
117 +
118 +#endif /* _ASM_POWERPC_BOOK3S_64_SLICE_H */
119 diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
120 index 5bb3dbede41a..1325e5b5f680 100644
121 --- a/arch/powerpc/include/asm/mmu-8xx.h
122 +++ b/arch/powerpc/include/asm/mmu-8xx.h
123 @@ -169,6 +169,12 @@ typedef struct {
124 unsigned int id;
125 unsigned int active;
126 unsigned long vdso_base;
127 +#ifdef CONFIG_PPC_MM_SLICES
128 + u16 user_psize; /* page size index */
129 + u64 low_slices_psize; /* page size encodings */
130 + unsigned char high_slices_psize[0];
131 + unsigned long addr_limit;
132 +#endif
133 } mm_context_t;
134
135 #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
136 diff --git a/arch/powerpc/include/asm/nohash/32/slice.h b/arch/powerpc/include/asm/nohash/32/slice.h
137 new file mode 100644
138 index 000000000000..95d532e18092
139 --- /dev/null
140 +++ b/arch/powerpc/include/asm/nohash/32/slice.h
141 @@ -0,0 +1,18 @@
142 +/* SPDX-License-Identifier: GPL-2.0 */
143 +#ifndef _ASM_POWERPC_NOHASH_32_SLICE_H
144 +#define _ASM_POWERPC_NOHASH_32_SLICE_H
145 +
146 +#ifdef CONFIG_PPC_MM_SLICES
147 +
148 +#define SLICE_LOW_SHIFT 28
149 +#define SLICE_LOW_TOP (0x100000000ull)
150 +#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
151 +#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
152 +
153 +#define SLICE_HIGH_SHIFT 0
154 +#define SLICE_NUM_HIGH 0ul
155 +#define GET_HIGH_SLICE_INDEX(addr) (addr & 0)
156 +
157 +#endif /* CONFIG_PPC_MM_SLICES */
158 +
159 +#endif /* _ASM_POWERPC_NOHASH_32_SLICE_H */
160 diff --git a/arch/powerpc/include/asm/nohash/64/slice.h b/arch/powerpc/include/asm/nohash/64/slice.h
161 new file mode 100644
162 index 000000000000..ad0d6e3cc1c5
163 --- /dev/null
164 +++ b/arch/powerpc/include/asm/nohash/64/slice.h
165 @@ -0,0 +1,12 @@
166 +/* SPDX-License-Identifier: GPL-2.0 */
167 +#ifndef _ASM_POWERPC_NOHASH_64_SLICE_H
168 +#define _ASM_POWERPC_NOHASH_64_SLICE_H
169 +
170 +#ifdef CONFIG_PPC_64K_PAGES
171 +#define get_slice_psize(mm, addr) MMU_PAGE_64K
172 +#else /* CONFIG_PPC_64K_PAGES */
173 +#define get_slice_psize(mm, addr) MMU_PAGE_4K
174 +#endif /* !CONFIG_PPC_64K_PAGES */
175 +#define slice_set_user_psize(mm, psize) do { BUG(); } while (0)
176 +
177 +#endif /* _ASM_POWERPC_NOHASH_64_SLICE_H */
178 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
179 index 8da5d4c1cab2..d5f1c41b7dba 100644
180 --- a/arch/powerpc/include/asm/page.h
181 +++ b/arch/powerpc/include/asm/page.h
182 @@ -344,5 +344,6 @@ typedef struct page *pgtable_t;
183
184 #include <asm-generic/memory_model.h>
185 #endif /* __ASSEMBLY__ */
186 +#include <asm/slice.h>
187
188 #endif /* _ASM_POWERPC_PAGE_H */
189 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
190 index c4d9654bd637..af04acdb873f 100644
191 --- a/arch/powerpc/include/asm/page_64.h
192 +++ b/arch/powerpc/include/asm/page_64.h
193 @@ -86,65 +86,6 @@ extern u64 ppc64_pft_size;
194
195 #endif /* __ASSEMBLY__ */
196
197 -#ifdef CONFIG_PPC_MM_SLICES
198 -
199 -#define SLICE_LOW_SHIFT 28
200 -#define SLICE_HIGH_SHIFT 40
201 -
202 -#define SLICE_LOW_TOP (0x100000000ul)
203 -#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
204 -#define SLICE_NUM_HIGH (H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
205 -
206 -#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
207 -#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
208 -
209 -#ifndef __ASSEMBLY__
210 -struct mm_struct;
211 -
212 -extern unsigned long slice_get_unmapped_area(unsigned long addr,
213 - unsigned long len,
214 - unsigned long flags,
215 - unsigned int psize,
216 - int topdown);
217 -
218 -extern unsigned int get_slice_psize(struct mm_struct *mm,
219 - unsigned long addr);
220 -
221 -extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
222 -extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
223 - unsigned long len, unsigned int psize);
224 -
225 -#endif /* __ASSEMBLY__ */
226 -#else
227 -#define slice_init()
228 -#ifdef CONFIG_PPC_STD_MMU_64
229 -#define get_slice_psize(mm, addr) ((mm)->context.user_psize)
230 -#define slice_set_user_psize(mm, psize) \
231 -do { \
232 - (mm)->context.user_psize = (psize); \
233 - (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
234 -} while (0)
235 -#else /* CONFIG_PPC_STD_MMU_64 */
236 -#ifdef CONFIG_PPC_64K_PAGES
237 -#define get_slice_psize(mm, addr) MMU_PAGE_64K
238 -#else /* CONFIG_PPC_64K_PAGES */
239 -#define get_slice_psize(mm, addr) MMU_PAGE_4K
240 -#endif /* !CONFIG_PPC_64K_PAGES */
241 -#define slice_set_user_psize(mm, psize) do { BUG(); } while(0)
242 -#endif /* !CONFIG_PPC_STD_MMU_64 */
243 -
244 -#define slice_set_range_psize(mm, start, len, psize) \
245 - slice_set_user_psize((mm), (psize))
246 -#endif /* CONFIG_PPC_MM_SLICES */
247 -
248 -#ifdef CONFIG_HUGETLB_PAGE
249 -
250 -#ifdef CONFIG_PPC_MM_SLICES
251 -#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
252 -#endif
253 -
254 -#endif /* !CONFIG_HUGETLB_PAGE */
255 -
256 #define VM_DATA_DEFAULT_FLAGS \
257 (is_32bit_task() ? \
258 VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
259 diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h
260 new file mode 100644
261 index 000000000000..172711fadb1c
262 --- /dev/null
263 +++ b/arch/powerpc/include/asm/slice.h
264 @@ -0,0 +1,42 @@
265 +/* SPDX-License-Identifier: GPL-2.0 */
266 +#ifndef _ASM_POWERPC_SLICE_H
267 +#define _ASM_POWERPC_SLICE_H
268 +
269 +#ifdef CONFIG_PPC_BOOK3S_64
270 +#include <asm/book3s/64/slice.h>
271 +#elif defined(CONFIG_PPC64)
272 +#include <asm/nohash/64/slice.h>
273 +#elif defined(CONFIG_PPC_MMU_NOHASH)
274 +#include <asm/nohash/32/slice.h>
275 +#endif
276 +
277 +#ifdef CONFIG_PPC_MM_SLICES
278 +
279 +#ifdef CONFIG_HUGETLB_PAGE
280 +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
281 +#endif
282 +#define HAVE_ARCH_UNMAPPED_AREA
283 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
284 +
285 +#ifndef __ASSEMBLY__
286 +
287 +struct mm_struct;
288 +
289 +unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
290 + unsigned long flags, unsigned int psize,
291 + int topdown);
292 +
293 +unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr);
294 +
295 +void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
296 +void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
297 + unsigned long len, unsigned int psize);
298 +#endif /* __ASSEMBLY__ */
299 +
300 +#else /* CONFIG_PPC_MM_SLICES */
301 +
302 +#define slice_set_range_psize(mm, start, len, psize) \
303 + slice_set_user_psize((mm), (psize))
304 +#endif /* CONFIG_PPC_MM_SLICES */
305 +
306 +#endif /* _ASM_POWERPC_SLICE_H */
307 diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
308 index b4fcb54b9686..008447664643 100644
309 --- a/arch/powerpc/kernel/setup-common.c
310 +++ b/arch/powerpc/kernel/setup-common.c
311 @@ -915,6 +915,8 @@ void __init setup_arch(char **cmdline_p)
312 #ifdef CONFIG_PPC_MM_SLICES
313 #ifdef CONFIG_PPC64
314 init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
315 +#elif defined(CONFIG_PPC_8xx)
316 + init_mm.context.addr_limit = DEFAULT_MAP_WINDOW;
317 #else
318 #error "context.addr_limit not initialized."
319 #endif
320 diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
321 index f29212e40f40..0be77709446c 100644
322 --- a/arch/powerpc/mm/8xx_mmu.c
323 +++ b/arch/powerpc/mm/8xx_mmu.c
324 @@ -192,7 +192,7 @@ void set_context(unsigned long id, pgd_t *pgd)
325 mtspr(SPRN_M_TW, __pa(pgd) - offset);
326
327 /* Update context */
328 - mtspr(SPRN_M_CASID, id);
329 + mtspr(SPRN_M_CASID, id - 1);
330 /* sync */
331 mb();
332 }
333 diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
334 index 1571a498a33f..4c9e5f9c7a44 100644
335 --- a/arch/powerpc/mm/hugetlbpage.c
336 +++ b/arch/powerpc/mm/hugetlbpage.c
337 @@ -552,9 +552,11 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
338 struct hstate *hstate = hstate_file(file);
339 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
340
341 +#ifdef CONFIG_PPC_RADIX_MMU
342 if (radix_enabled())
343 return radix__hugetlb_get_unmapped_area(file, addr, len,
344 pgoff, flags);
345 +#endif
346 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
347 }
348 #endif
349 diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
350 index 4554d6527682..e2b28b3a512e 100644
351 --- a/arch/powerpc/mm/mmu_context_nohash.c
352 +++ b/arch/powerpc/mm/mmu_context_nohash.c
353 @@ -331,6 +331,20 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm)
354 {
355 pr_hard("initing context for mm @%p\n", mm);
356
357 +#ifdef CONFIG_PPC_MM_SLICES
358 + if (!mm->context.addr_limit)
359 + mm->context.addr_limit = DEFAULT_MAP_WINDOW;
360 +
361 + /*
362 + * We have MMU_NO_CONTEXT set to be ~0. Hence check
363 + * explicitly against context.id == 0. This ensures that we properly
364 + * initialize context slice details for newly allocated mm's (which will
365 + * have id == 0) and don't alter context slice inherited via fork (which
366 + * will have id != 0).
367 + */
368 + if (mm->context.id == 0)
369 + slice_set_user_psize(mm, mmu_virtual_psize);
370 +#endif
371 mm->context.id = MMU_NO_CONTEXT;
372 mm->context.active = 0;
373 return 0;
374 @@ -428,8 +442,8 @@ void __init mmu_context_init(void)
375 * -- BenH
376 */
377 if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
378 - first_context = 0;
379 - last_context = 15;
380 + first_context = 1;
381 + last_context = 16;
382 no_selective_tlbil = true;
383 } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
384 first_context = 1;
385 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
386 index a4f93699194b..8baaa6c6f21c 100644
387 --- a/arch/powerpc/mm/slice.c
388 +++ b/arch/powerpc/mm/slice.c
389 @@ -73,10 +73,12 @@ static void slice_range_to_mask(unsigned long start, unsigned long len,
390 unsigned long end = start + len - 1;
391
392 ret->low_slices = 0;
393 - bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
394 + if (SLICE_NUM_HIGH)
395 + bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
396
397 if (start < SLICE_LOW_TOP) {
398 - unsigned long mend = min(end, (SLICE_LOW_TOP - 1));
399 + unsigned long mend = min(end,
400 + (unsigned long)(SLICE_LOW_TOP - 1));
401
402 ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
403 - (1u << GET_LOW_SLICE_INDEX(start));
404 @@ -113,11 +115,13 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
405 unsigned long start = slice << SLICE_HIGH_SHIFT;
406 unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
407
408 +#ifdef CONFIG_PPC64
409 /* Hack, so that each addresses is controlled by exactly one
410 * of the high or low area bitmaps, the first high area starts
411 * at 4GB, not 0 */
412 if (start == 0)
413 start = SLICE_LOW_TOP;
414 +#endif
415
416 return !slice_area_is_free(mm, start, end - start);
417 }
418 @@ -127,7 +131,8 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
419 unsigned long i;
420
421 ret->low_slices = 0;
422 - bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
423 + if (SLICE_NUM_HIGH)
424 + bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
425
426 for (i = 0; i < SLICE_NUM_LOW; i++)
427 if (!slice_low_has_vma(mm, i))
428 @@ -149,7 +154,8 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
429 u64 lpsizes;
430
431 ret->low_slices = 0;
432 - bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
433 + if (SLICE_NUM_HIGH)
434 + bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
435
436 lpsizes = mm->context.low_slices_psize;
437 for (i = 0; i < SLICE_NUM_LOW; i++)
438 @@ -171,6 +177,10 @@ static int slice_check_fit(struct mm_struct *mm,
439 DECLARE_BITMAP(result, SLICE_NUM_HIGH);
440 unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.addr_limit);
441
442 + if (!SLICE_NUM_HIGH)
443 + return (mask.low_slices & available.low_slices) ==
444 + mask.low_slices;
445 +
446 bitmap_and(result, mask.high_slices,
447 available.high_slices, slice_count);
448
449 @@ -180,6 +190,7 @@ static int slice_check_fit(struct mm_struct *mm,
450
451 static void slice_flush_segments(void *parm)
452 {
453 +#ifdef CONFIG_PPC64
454 struct mm_struct *mm = parm;
455 unsigned long flags;
456
457 @@ -191,6 +202,7 @@ static void slice_flush_segments(void *parm)
458 local_irq_save(flags);
459 slb_flush_and_rebolt();
460 local_irq_restore(flags);
461 +#endif
462 }
463
464 static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
465 @@ -379,21 +391,21 @@ static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
466
467 static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src)
468 {
469 - DECLARE_BITMAP(result, SLICE_NUM_HIGH);
470 -
471 dst->low_slices |= src->low_slices;
472 - bitmap_or(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
473 - bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
474 + if (!SLICE_NUM_HIGH)
475 + return;
476 + bitmap_or(dst->high_slices, dst->high_slices, src->high_slices,
477 + SLICE_NUM_HIGH);
478 }
479
480 static inline void slice_andnot_mask(struct slice_mask *dst, struct slice_mask *src)
481 {
482 - DECLARE_BITMAP(result, SLICE_NUM_HIGH);
483 -
484 dst->low_slices &= ~src->low_slices;
485
486 - bitmap_andnot(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
487 - bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
488 + if (!SLICE_NUM_HIGH)
489 + return;
490 + bitmap_andnot(dst->high_slices, dst->high_slices, src->high_slices,
491 + SLICE_NUM_HIGH);
492 }
493
494 #ifdef CONFIG_PPC_64K_PAGES
495 @@ -441,14 +453,17 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
496 * init different masks
497 */
498 mask.low_slices = 0;
499 - bitmap_zero(mask.high_slices, SLICE_NUM_HIGH);
500
501 /* silence stupid warning */;
502 potential_mask.low_slices = 0;
503 - bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH);
504
505 compat_mask.low_slices = 0;
506 - bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH);
507 +
508 + if (SLICE_NUM_HIGH) {
509 + bitmap_zero(mask.high_slices, SLICE_NUM_HIGH);
510 + bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH);
511 + bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH);
512 + }
513
514 /* Sanity checks */
515 BUG_ON(mm->task_size == 0);
516 @@ -586,7 +601,9 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
517 convert:
518 slice_andnot_mask(&mask, &good_mask);
519 slice_andnot_mask(&mask, &compat_mask);
520 - if (mask.low_slices || !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH)) {
521 + if (mask.low_slices ||
522 + (SLICE_NUM_HIGH &&
523 + !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH))) {
524 slice_convert(mm, mask, psize);
525 if (psize > MMU_PAGE_BASE)
526 on_each_cpu(slice_flush_segments, mm, 1);
527 diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
528 index a78f255111f2..3ce376b42330 100644
529 --- a/arch/powerpc/platforms/Kconfig.cputype
530 +++ b/arch/powerpc/platforms/Kconfig.cputype
531 @@ -325,6 +325,7 @@ config PPC_BOOK3E_MMU
532 config PPC_MM_SLICES
533 bool
534 default y if PPC_STD_MMU_64
535 + default y if PPC_8xx && HUGETLB_PAGE
536 default n
537
538 config PPC_HAVE_PMU_SUPPORT
539 diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
540 index 259c75d7a2a0..dbcb01006749 100644
541 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
542 +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
543 @@ -94,6 +94,11 @@ static struct smca_bank_name smca_names[] = {
544 [SMCA_SMU] = { "smu", "System Management Unit" },
545 };
546
547 +static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init =
548 +{
549 + [0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 }
550 +};
551 +
552 const char *smca_get_name(enum smca_bank_types t)
553 {
554 if (t >= N_SMCA_BANK_TYPES)
555 @@ -429,52 +434,51 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
556 wrmsr(MSR_CU_DEF_ERR, low, high);
557 }
558
559 -static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high,
560 - unsigned int bank, unsigned int block)
561 +static u32 smca_get_block_address(unsigned int cpu, unsigned int bank,
562 + unsigned int block)
563 {
564 - u32 addr = 0, offset = 0;
565 + u32 low, high;
566 + u32 addr = 0;
567
568 - if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
569 + if (smca_get_bank_type(bank) == SMCA_RESERVED)
570 return addr;
571
572 - /* Get address from already initialized block. */
573 - if (per_cpu(threshold_banks, cpu)) {
574 - struct threshold_bank *bankp = per_cpu(threshold_banks, cpu)[bank];
575 + if (!block)
576 + return MSR_AMD64_SMCA_MCx_MISC(bank);
577
578 - if (bankp && bankp->blocks) {
579 - struct threshold_block *blockp = &bankp->blocks[block];
580 + /* Check our cache first: */
581 + if (smca_bank_addrs[bank][block] != -1)
582 + return smca_bank_addrs[bank][block];
583
584 - if (blockp)
585 - return blockp->address;
586 - }
587 - }
588 + /*
589 + * For SMCA enabled processors, BLKPTR field of the first MISC register
590 + * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4).
591 + */
592 + if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
593 + goto out;
594
595 - if (mce_flags.smca) {
596 - if (smca_get_bank_type(bank) == SMCA_RESERVED)
597 - return addr;
598 + if (!(low & MCI_CONFIG_MCAX))
599 + goto out;
600
601 - if (!block) {
602 - addr = MSR_AMD64_SMCA_MCx_MISC(bank);
603 - } else {
604 - /*
605 - * For SMCA enabled processors, BLKPTR field of the
606 - * first MISC register (MCx_MISC0) indicates presence of
607 - * additional MISC register set (MISC1-4).
608 - */
609 - u32 low, high;
610 + if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
611 + (low & MASK_BLKPTR_LO))
612 + addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
613
614 - if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
615 - return addr;
616 +out:
617 + smca_bank_addrs[bank][block] = addr;
618 + return addr;
619 +}
620
621 - if (!(low & MCI_CONFIG_MCAX))
622 - return addr;
623 +static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high,
624 + unsigned int bank, unsigned int block)
625 +{
626 + u32 addr = 0, offset = 0;
627
628 - if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
629 - (low & MASK_BLKPTR_LO))
630 - addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
631 - }
632 + if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
633 return addr;
634 - }
635 +
636 + if (mce_flags.smca)
637 + return smca_get_block_address(cpu, bank, block);
638
639 /* Fall back to method we used for older processors: */
640 switch (block) {
641 diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
642 index 4a038dcf5361..bc1cb284111c 100644
643 --- a/drivers/dma-buf/dma-buf.c
644 +++ b/drivers/dma-buf/dma-buf.c
645 @@ -625,7 +625,7 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
646 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
647 enum dma_data_direction direction)
648 {
649 - struct sg_table *sg_table = ERR_PTR(-EINVAL);
650 + struct sg_table *sg_table;
651
652 might_sleep();
653
654 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
655 index b33935fcf428..e6c6994e74ba 100644
656 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
657 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
658 @@ -176,10 +176,10 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
659 cz_dpm_powerup_uvd(hwmgr);
660 cgs_set_clockgating_state(hwmgr->device,
661 AMD_IP_BLOCK_TYPE_UVD,
662 - AMD_PG_STATE_UNGATE);
663 + AMD_CG_STATE_UNGATE);
664 cgs_set_powergating_state(hwmgr->device,
665 AMD_IP_BLOCK_TYPE_UVD,
666 - AMD_CG_STATE_UNGATE);
667 + AMD_PG_STATE_UNGATE);
668 cz_dpm_update_uvd_dpm(hwmgr, false);
669 }
670
671 @@ -208,11 +208,11 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
672 cgs_set_clockgating_state(
673 hwmgr->device,
674 AMD_IP_BLOCK_TYPE_VCE,
675 - AMD_PG_STATE_UNGATE);
676 + AMD_CG_STATE_UNGATE);
677 cgs_set_powergating_state(
678 hwmgr->device,
679 AMD_IP_BLOCK_TYPE_VCE,
680 - AMD_CG_STATE_UNGATE);
681 + AMD_PG_STATE_UNGATE);
682 cz_dpm_update_vce_dpm(hwmgr);
683 cz_enable_disable_vce_dpm(hwmgr, true);
684 return 0;
685 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
686 index 261b828ad590..2f3509be226f 100644
687 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
688 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
689 @@ -162,7 +162,7 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
690 AMD_CG_STATE_UNGATE);
691 cgs_set_powergating_state(hwmgr->device,
692 AMD_IP_BLOCK_TYPE_UVD,
693 - AMD_CG_STATE_UNGATE);
694 + AMD_PG_STATE_UNGATE);
695 smu7_update_uvd_dpm(hwmgr, false);
696 }
697
698 diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
699 index 08af8d6b844b..493d8f56d14e 100644
700 --- a/drivers/gpu/drm/drm_dp_helper.c
701 +++ b/drivers/gpu/drm/drm_dp_helper.c
702 @@ -1139,6 +1139,7 @@ int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE])
703 static const u16 psr_setup_time_us[] = {
704 PSR_SETUP_TIME(330),
705 PSR_SETUP_TIME(275),
706 + PSR_SETUP_TIME(220),
707 PSR_SETUP_TIME(165),
708 PSR_SETUP_TIME(110),
709 PSR_SETUP_TIME(55),
710 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
711 index 3b2c0538e48d..90359c7954c8 100644
712 --- a/drivers/gpu/drm/i915/i915_gem.c
713 +++ b/drivers/gpu/drm/i915/i915_gem.c
714 @@ -3378,24 +3378,12 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
715 return 0;
716 }
717
718 -static int wait_for_engine(struct intel_engine_cs *engine, int timeout_ms)
719 -{
720 - return wait_for(intel_engine_is_idle(engine), timeout_ms);
721 -}
722 -
723 static int wait_for_engines(struct drm_i915_private *i915)
724 {
725 - struct intel_engine_cs *engine;
726 - enum intel_engine_id id;
727 -
728 - for_each_engine(engine, i915, id) {
729 - if (GEM_WARN_ON(wait_for_engine(engine, 50))) {
730 - i915_gem_set_wedged(i915);
731 - return -EIO;
732 - }
733 -
734 - GEM_BUG_ON(intel_engine_get_seqno(engine) !=
735 - intel_engine_last_submit(engine));
736 + if (wait_for(intel_engines_are_idle(i915), 50)) {
737 + DRM_ERROR("Failed to idle engines, declaring wedged!\n");
738 + i915_gem_set_wedged(i915);
739 + return -EIO;
740 }
741
742 return 0;
743 @@ -4575,7 +4563,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
744 ret = i915_gem_wait_for_idle(dev_priv,
745 I915_WAIT_INTERRUPTIBLE |
746 I915_WAIT_LOCKED);
747 - if (ret)
748 + if (ret && ret != -EIO)
749 goto err_unlock;
750
751 assert_kernel_context_is_current(dev_priv);
752 @@ -4619,11 +4607,12 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
753 * machine in an unusable condition.
754 */
755 i915_gem_sanitize(dev_priv);
756 - goto out_rpm_put;
757 +
758 + intel_runtime_pm_put(dev_priv);
759 + return 0;
760
761 err_unlock:
762 mutex_unlock(&dev->struct_mutex);
763 -out_rpm_put:
764 intel_runtime_pm_put(dev_priv);
765 return ret;
766 }
767 diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
768 index 240308f1b6dd..dae4e22a2c3f 100644
769 --- a/drivers/gpu/drm/i915/intel_lvds.c
770 +++ b/drivers/gpu/drm/i915/intel_lvds.c
771 @@ -565,6 +565,36 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
772 return NOTIFY_OK;
773 }
774
775 +static int
776 +intel_lvds_connector_register(struct drm_connector *connector)
777 +{
778 + struct intel_lvds_connector *lvds = to_lvds_connector(connector);
779 + int ret;
780 +
781 + ret = intel_connector_register(connector);
782 + if (ret)
783 + return ret;
784 +
785 + lvds->lid_notifier.notifier_call = intel_lid_notify;
786 + if (acpi_lid_notifier_register(&lvds->lid_notifier)) {
787 + DRM_DEBUG_KMS("lid notifier registration failed\n");
788 + lvds->lid_notifier.notifier_call = NULL;
789 + }
790 +
791 + return 0;
792 +}
793 +
794 +static void
795 +intel_lvds_connector_unregister(struct drm_connector *connector)
796 +{
797 + struct intel_lvds_connector *lvds = to_lvds_connector(connector);
798 +
799 + if (lvds->lid_notifier.notifier_call)
800 + acpi_lid_notifier_unregister(&lvds->lid_notifier);
801 +
802 + intel_connector_unregister(connector);
803 +}
804 +
805 /**
806 * intel_lvds_destroy - unregister and free LVDS structures
807 * @connector: connector to free
808 @@ -577,9 +607,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
809 struct intel_lvds_connector *lvds_connector =
810 to_lvds_connector(connector);
811
812 - if (lvds_connector->lid_notifier.notifier_call)
813 - acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
814 -
815 if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
816 kfree(lvds_connector->base.edid);
817
818 @@ -600,8 +627,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
819 .fill_modes = drm_helper_probe_single_connector_modes,
820 .atomic_get_property = intel_digital_connector_atomic_get_property,
821 .atomic_set_property = intel_digital_connector_atomic_set_property,
822 - .late_register = intel_connector_register,
823 - .early_unregister = intel_connector_unregister,
824 + .late_register = intel_lvds_connector_register,
825 + .early_unregister = intel_lvds_connector_unregister,
826 .destroy = intel_lvds_destroy,
827 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
828 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
829 @@ -818,6 +845,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
830 DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
831 },
832 },
833 + {
834 + .callback = intel_no_lvds_dmi_callback,
835 + .ident = "Radiant P845",
836 + .matches = {
837 + DMI_MATCH(DMI_SYS_VENDOR, "Radiant Systems Inc"),
838 + DMI_MATCH(DMI_PRODUCT_NAME, "P845"),
839 + },
840 + },
841
842 { } /* terminating entry */
843 };
844 @@ -1149,12 +1184,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
845
846 lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
847
848 - lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
849 - if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
850 - DRM_DEBUG_KMS("lid notifier registration failed\n");
851 - lvds_connector->lid_notifier.notifier_call = NULL;
852 - }
853 -
854 return;
855
856 failed:
857 diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
858 index dfb57eaa9f22..58ac786634dc 100644
859 --- a/drivers/hwtracing/intel_th/msu.c
860 +++ b/drivers/hwtracing/intel_th/msu.c
861 @@ -741,8 +741,8 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
862 /* Reset the page to write-back before releasing */
863 set_memory_wb((unsigned long)win->block[i].bdesc, 1);
864 #endif
865 - dma_free_coherent(msc_dev(msc), size, win->block[i].bdesc,
866 - win->block[i].addr);
867 + dma_free_coherent(msc_dev(msc)->parent->parent, size,
868 + win->block[i].bdesc, win->block[i].addr);
869 }
870 kfree(win);
871
872 @@ -777,7 +777,7 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
873 /* Reset the page to write-back before releasing */
874 set_memory_wb((unsigned long)win->block[i].bdesc, 1);
875 #endif
876 - dma_free_coherent(msc_dev(win->msc), PAGE_SIZE,
877 + dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
878 win->block[i].bdesc, win->block[i].addr);
879 }
880
881 diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
882 index f129869e05a9..736862967e32 100644
883 --- a/drivers/hwtracing/stm/core.c
884 +++ b/drivers/hwtracing/stm/core.c
885 @@ -27,6 +27,7 @@
886 #include <linux/stm.h>
887 #include <linux/fs.h>
888 #include <linux/mm.h>
889 +#include <linux/vmalloc.h>
890 #include "stm.h"
891
892 #include <uapi/linux/stm.h>
893 @@ -682,7 +683,7 @@ static void stm_device_release(struct device *dev)
894 {
895 struct stm_device *stm = to_stm_device(dev);
896
897 - kfree(stm);
898 + vfree(stm);
899 }
900
901 int stm_register_device(struct device *parent, struct stm_data *stm_data,
902 @@ -699,7 +700,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
903 return -EINVAL;
904
905 nmasters = stm_data->sw_end - stm_data->sw_start + 1;
906 - stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL);
907 + stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *));
908 if (!stm)
909 return -ENOMEM;
910
911 @@ -752,7 +753,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
912 /* matches device_initialize() above */
913 put_device(&stm->dev);
914 err_free:
915 - kfree(stm);
916 + vfree(stm);
917
918 return err;
919 }
920 diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
921 index 1d13bf03c758..369a2c632e46 100644
922 --- a/drivers/iio/adc/Kconfig
923 +++ b/drivers/iio/adc/Kconfig
924 @@ -158,6 +158,7 @@ config AT91_SAMA5D2_ADC
925 tristate "Atmel AT91 SAMA5D2 ADC"
926 depends on ARCH_AT91 || COMPILE_TEST
927 depends on HAS_IOMEM
928 + select IIO_BUFFER
929 select IIO_TRIGGERED_BUFFER
930 help
931 Say yes here to build support for Atmel SAMA5D2 ADC which is
932 diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
933 index 47c3d7f32900..07246a6037e3 100644
934 --- a/drivers/iio/adc/ad7793.c
935 +++ b/drivers/iio/adc/ad7793.c
936 @@ -348,55 +348,6 @@ static const u16 ad7793_sample_freq_avail[16] = {0, 470, 242, 123, 62, 50, 39,
937 static const u16 ad7797_sample_freq_avail[16] = {0, 0, 0, 123, 62, 50, 0,
938 33, 0, 17, 16, 12, 10, 8, 6, 4};
939
940 -static ssize_t ad7793_read_frequency(struct device *dev,
941 - struct device_attribute *attr,
942 - char *buf)
943 -{
944 - struct iio_dev *indio_dev = dev_to_iio_dev(dev);
945 - struct ad7793_state *st = iio_priv(indio_dev);
946 -
947 - return sprintf(buf, "%d\n",
948 - st->chip_info->sample_freq_avail[AD7793_MODE_RATE(st->mode)]);
949 -}
950 -
951 -static ssize_t ad7793_write_frequency(struct device *dev,
952 - struct device_attribute *attr,
953 - const char *buf,
954 - size_t len)
955 -{
956 - struct iio_dev *indio_dev = dev_to_iio_dev(dev);
957 - struct ad7793_state *st = iio_priv(indio_dev);
958 - long lval;
959 - int i, ret;
960 -
961 - ret = kstrtol(buf, 10, &lval);
962 - if (ret)
963 - return ret;
964 -
965 - if (lval == 0)
966 - return -EINVAL;
967 -
968 - for (i = 0; i < 16; i++)
969 - if (lval == st->chip_info->sample_freq_avail[i])
970 - break;
971 - if (i == 16)
972 - return -EINVAL;
973 -
974 - ret = iio_device_claim_direct_mode(indio_dev);
975 - if (ret)
976 - return ret;
977 - st->mode &= ~AD7793_MODE_RATE(-1);
978 - st->mode |= AD7793_MODE_RATE(i);
979 - ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), st->mode);
980 - iio_device_release_direct_mode(indio_dev);
981 -
982 - return len;
983 -}
984 -
985 -static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
986 - ad7793_read_frequency,
987 - ad7793_write_frequency);
988 -
989 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
990 "470 242 123 62 50 39 33 19 17 16 12 10 8 6 4");
991
992 @@ -424,7 +375,6 @@ static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available,
993 ad7793_show_scale_available, NULL, 0);
994
995 static struct attribute *ad7793_attributes[] = {
996 - &iio_dev_attr_sampling_frequency.dev_attr.attr,
997 &iio_const_attr_sampling_frequency_available.dev_attr.attr,
998 &iio_dev_attr_in_m_in_scale_available.dev_attr.attr,
999 NULL
1000 @@ -435,7 +385,6 @@ static const struct attribute_group ad7793_attribute_group = {
1001 };
1002
1003 static struct attribute *ad7797_attributes[] = {
1004 - &iio_dev_attr_sampling_frequency.dev_attr.attr,
1005 &iio_const_attr_sampling_frequency_available_ad7797.dev_attr.attr,
1006 NULL
1007 };
1008 @@ -505,6 +454,10 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
1009 *val -= offset;
1010 }
1011 return IIO_VAL_INT;
1012 + case IIO_CHAN_INFO_SAMP_FREQ:
1013 + *val = st->chip_info
1014 + ->sample_freq_avail[AD7793_MODE_RATE(st->mode)];
1015 + return IIO_VAL_INT;
1016 }
1017 return -EINVAL;
1018 }
1019 @@ -542,6 +495,26 @@ static int ad7793_write_raw(struct iio_dev *indio_dev,
1020 break;
1021 }
1022 break;
1023 + case IIO_CHAN_INFO_SAMP_FREQ:
1024 + if (!val) {
1025 + ret = -EINVAL;
1026 + break;
1027 + }
1028 +
1029 + for (i = 0; i < 16; i++)
1030 + if (val == st->chip_info->sample_freq_avail[i])
1031 + break;
1032 +
1033 + if (i == 16) {
1034 + ret = -EINVAL;
1035 + break;
1036 + }
1037 +
1038 + st->mode &= ~AD7793_MODE_RATE(-1);
1039 + st->mode |= AD7793_MODE_RATE(i);
1040 + ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode),
1041 + st->mode);
1042 + break;
1043 default:
1044 ret = -EINVAL;
1045 }
1046 diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
1047 index ff03324dee13..0a7289571b68 100644
1048 --- a/drivers/iio/buffer/industrialio-buffer-dma.c
1049 +++ b/drivers/iio/buffer/industrialio-buffer-dma.c
1050 @@ -587,7 +587,7 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
1051 * Should be used as the set_length callback for iio_buffer_access_ops
1052 * struct for DMA buffers.
1053 */
1054 -int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length)
1055 +int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
1056 {
1057 /* Avoid an invalid state */
1058 if (length < 2)
1059 diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
1060 index 047fe757ab97..70c302a93d7f 100644
1061 --- a/drivers/iio/buffer/kfifo_buf.c
1062 +++ b/drivers/iio/buffer/kfifo_buf.c
1063 @@ -22,11 +22,18 @@ struct iio_kfifo {
1064 #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
1065
1066 static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
1067 - int bytes_per_datum, int length)
1068 + size_t bytes_per_datum, unsigned int length)
1069 {
1070 if ((length == 0) || (bytes_per_datum == 0))
1071 return -EINVAL;
1072
1073 + /*
1074 + * Make sure we don't overflow an unsigned int after kfifo rounds up to
1075 + * the next power of 2.
1076 + */
1077 + if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum)
1078 + return -EINVAL;
1079 +
1080 return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
1081 bytes_per_datum, GFP_KERNEL);
1082 }
1083 @@ -67,7 +74,7 @@ static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
1084 return 0;
1085 }
1086
1087 -static int iio_set_length_kfifo(struct iio_buffer *r, int length)
1088 +static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length)
1089 {
1090 /* Avoid an invalid state */
1091 if (length < 2)
1092 diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
1093 index 77515638c55c..896cfd9303b0 100644
1094 --- a/drivers/infiniband/core/cache.c
1095 +++ b/drivers/infiniband/core/cache.c
1096 @@ -434,7 +434,7 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
1097 return -EINVAL;
1098
1099 if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
1100 - return -EAGAIN;
1101 + return -EINVAL;
1102
1103 memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
1104 if (attr) {
1105 diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
1106 index 29f99529b187..cfcb32559925 100644
1107 --- a/drivers/input/mouse/elan_i2c_smbus.c
1108 +++ b/drivers/input/mouse/elan_i2c_smbus.c
1109 @@ -130,7 +130,7 @@ static int elan_smbus_get_baseline_data(struct i2c_client *client,
1110 bool max_baseline, u8 *value)
1111 {
1112 int error;
1113 - u8 val[3];
1114 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1115
1116 error = i2c_smbus_read_block_data(client,
1117 max_baseline ?
1118 @@ -149,7 +149,7 @@ static int elan_smbus_get_version(struct i2c_client *client,
1119 bool iap, u8 *version)
1120 {
1121 int error;
1122 - u8 val[3];
1123 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1124
1125 error = i2c_smbus_read_block_data(client,
1126 iap ? ETP_SMBUS_IAP_VERSION_CMD :
1127 @@ -170,7 +170,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
1128 u8 *clickpad)
1129 {
1130 int error;
1131 - u8 val[3];
1132 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1133
1134 error = i2c_smbus_read_block_data(client,
1135 ETP_SMBUS_SM_VERSION_CMD, val);
1136 @@ -188,7 +188,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
1137 static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id)
1138 {
1139 int error;
1140 - u8 val[3];
1141 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1142
1143 error = i2c_smbus_read_block_data(client,
1144 ETP_SMBUS_UNIQUEID_CMD, val);
1145 @@ -205,7 +205,7 @@ static int elan_smbus_get_checksum(struct i2c_client *client,
1146 bool iap, u16 *csum)
1147 {
1148 int error;
1149 - u8 val[3];
1150 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1151
1152 error = i2c_smbus_read_block_data(client,
1153 iap ? ETP_SMBUS_FW_CHECKSUM_CMD :
1154 @@ -226,7 +226,7 @@ static int elan_smbus_get_max(struct i2c_client *client,
1155 {
1156 int ret;
1157 int error;
1158 - u8 val[3];
1159 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1160
1161 ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val);
1162 if (ret != 3) {
1163 @@ -246,7 +246,7 @@ static int elan_smbus_get_resolution(struct i2c_client *client,
1164 {
1165 int ret;
1166 int error;
1167 - u8 val[3];
1168 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1169
1170 ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RESOLUTION_CMD, val);
1171 if (ret != 3) {
1172 @@ -267,7 +267,7 @@ static int elan_smbus_get_num_traces(struct i2c_client *client,
1173 {
1174 int ret;
1175 int error;
1176 - u8 val[3];
1177 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1178
1179 ret = i2c_smbus_read_block_data(client, ETP_SMBUS_XY_TRACENUM_CMD, val);
1180 if (ret != 3) {
1181 @@ -294,7 +294,7 @@ static int elan_smbus_iap_get_mode(struct i2c_client *client,
1182 {
1183 int error;
1184 u16 constant;
1185 - u8 val[3];
1186 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1187
1188 error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val);
1189 if (error < 0) {
1190 @@ -345,7 +345,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client)
1191 int len;
1192 int error;
1193 enum tp_mode mode;
1194 - u8 val[3];
1195 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1196 u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06};
1197 u16 password;
1198
1199 @@ -419,7 +419,7 @@ static int elan_smbus_write_fw_block(struct i2c_client *client,
1200 struct device *dev = &client->dev;
1201 int error;
1202 u16 result;
1203 - u8 val[3];
1204 + u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
1205
1206 /*
1207 * Due to the limitation of smbus protocol limiting
1208 diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
1209 index a246fc686bb7..6c4bbd38700e 100644
1210 --- a/drivers/input/mouse/synaptics.c
1211 +++ b/drivers/input/mouse/synaptics.c
1212 @@ -172,6 +172,12 @@ static const char * const smbus_pnp_ids[] = {
1213 "LEN0048", /* X1 Carbon 3 */
1214 "LEN0046", /* X250 */
1215 "LEN004a", /* W541 */
1216 + "LEN0071", /* T480 */
1217 + "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
1218 + "LEN0073", /* X1 Carbon G5 (Elantech) */
1219 + "LEN0092", /* X1 Carbon 6 */
1220 + "LEN0096", /* X280 */
1221 + "LEN0097", /* X280 -> ALPS trackpoint */
1222 "LEN200f", /* T450s */
1223 NULL
1224 };
1225 diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
1226 index a2c1ca5c76d1..e1660b92b20c 100644
1227 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
1228 +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
1229 @@ -372,16 +372,15 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
1230
1231 /*
1232 * Determine IFS values
1233 - * - Use TXOP_BACKOFF for probe and management frames except beacons
1234 + * - Use TXOP_BACKOFF for management frames except beacons
1235 * - Use TXOP_SIFS for fragment bursts
1236 * - Use TXOP_HTTXOP for everything else
1237 *
1238 * Note: rt2800 devices won't use CTS protection (if used)
1239 * for frames not transmitted with TXOP_HTTXOP
1240 */
1241 - if ((ieee80211_is_mgmt(hdr->frame_control) &&
1242 - !ieee80211_is_beacon(hdr->frame_control)) ||
1243 - (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
1244 + if (ieee80211_is_mgmt(hdr->frame_control) &&
1245 + !ieee80211_is_beacon(hdr->frame_control))
1246 txdesc->u.ht.txop = TXOP_BACKOFF;
1247 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
1248 txdesc->u.ht.txop = TXOP_SIFS;
1249 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
1250 index 9cff6bc4049c..cf551785eb08 100644
1251 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
1252 +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
1253 @@ -299,9 +299,6 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
1254 writeVal = 0x00000000;
1255 if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
1256 writeVal = writeVal - 0x06060606;
1257 - else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
1258 - TXHIGHPWRLEVEL_BT2)
1259 - writeVal = writeVal;
1260 *(p_outwriteval + rf) = writeVal;
1261 }
1262 }
1263 diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
1264 index 73b724143be0..c91662927de0 100644
1265 --- a/drivers/pci/host/pci-hyperv.c
1266 +++ b/drivers/pci/host/pci-hyperv.c
1267 @@ -531,6 +531,8 @@ struct hv_pci_compl {
1268 s32 completion_status;
1269 };
1270
1271 +static void hv_pci_onchannelcallback(void *context);
1272 +
1273 /**
1274 * hv_pci_generic_compl() - Invoked for a completion packet
1275 * @context: Set up by the sender of the packet.
1276 @@ -675,6 +677,31 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
1277 }
1278 }
1279
1280 +static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
1281 +{
1282 + u16 ret;
1283 + unsigned long flags;
1284 + void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET +
1285 + PCI_VENDOR_ID;
1286 +
1287 + spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
1288 +
1289 + /* Choose the function to be read. (See comment above) */
1290 + writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
1291 + /* Make sure the function was chosen before we start reading. */
1292 + mb();
1293 + /* Read from that function's config space. */
1294 + ret = readw(addr);
1295 + /*
1296 + * mb() is not required here, because the spin_unlock_irqrestore()
1297 + * is a barrier.
1298 + */
1299 +
1300 + spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
1301 +
1302 + return ret;
1303 +}
1304 +
1305 /**
1306 * _hv_pcifront_write_config() - Internal PCI config write
1307 * @hpdev: The PCI driver's representation of the device
1308 @@ -1121,8 +1148,37 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1309 * Since this function is called with IRQ locks held, can't
1310 * do normal wait for completion; instead poll.
1311 */
1312 - while (!try_wait_for_completion(&comp.comp_pkt.host_event))
1313 + while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
1314 + /* 0xFFFF means an invalid PCI VENDOR ID. */
1315 + if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
1316 + dev_err_once(&hbus->hdev->device,
1317 + "the device has gone\n");
1318 + goto free_int_desc;
1319 + }
1320 +
1321 + /*
1322 + * When the higher level interrupt code calls us with
1323 + * interrupt disabled, we must poll the channel by calling
1324 + * the channel callback directly when channel->target_cpu is
1325 + * the current CPU. When the higher level interrupt code
1326 + * calls us with interrupt enabled, let's add the
1327 + * local_bh_disable()/enable() to avoid race.
1328 + */
1329 + local_bh_disable();
1330 +
1331 + if (hbus->hdev->channel->target_cpu == smp_processor_id())
1332 + hv_pci_onchannelcallback(hbus);
1333 +
1334 + local_bh_enable();
1335 +
1336 + if (hpdev->state == hv_pcichild_ejecting) {
1337 + dev_err_once(&hbus->hdev->device,
1338 + "the device is being ejected\n");
1339 + goto free_int_desc;
1340 + }
1341 +
1342 udelay(100);
1343 + }
1344
1345 if (comp.comp_pkt.completion_status < 0) {
1346 dev_err(&hbus->hdev->device,
1347 diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
1348 index 19cd357bb464..ff491da64dab 100644
1349 --- a/drivers/pinctrl/qcom/pinctrl-msm.c
1350 +++ b/drivers/pinctrl/qcom/pinctrl-msm.c
1351 @@ -818,7 +818,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
1352 return -EINVAL;
1353
1354 chip = &pctrl->chip;
1355 - chip->base = -1;
1356 + chip->base = 0;
1357 chip->ngpio = ngpio;
1358 chip->label = dev_name(pctrl->dev);
1359 chip->parent = pctrl->dev;
1360 diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
1361 index 1baf720faf69..87e9747d229a 100644
1362 --- a/drivers/platform/chrome/cros_ec_lpc.c
1363 +++ b/drivers/platform/chrome/cros_ec_lpc.c
1364 @@ -54,7 +54,6 @@ static int ec_response_timed_out(void)
1365 static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
1366 struct cros_ec_command *msg)
1367 {
1368 - struct ec_host_request *request;
1369 struct ec_host_response response;
1370 u8 sum;
1371 int ret = 0;
1372 @@ -65,8 +64,6 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
1373 /* Write buffer */
1374 cros_ec_lpc_write_bytes(EC_LPC_ADDR_HOST_PACKET, ret, ec->dout);
1375
1376 - request = (struct ec_host_request *)ec->dout;
1377 -
1378 /* Here we go */
1379 sum = EC_COMMAND_PROTOCOL_3;
1380 cros_ec_lpc_write_bytes(EC_LPC_ADDR_HOST_CMD, 1, &sum);
1381 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
1382 index 36f6190931bc..456ce9f19569 100644
1383 --- a/drivers/scsi/scsi_transport_srp.c
1384 +++ b/drivers/scsi/scsi_transport_srp.c
1385 @@ -51,6 +51,8 @@ struct srp_internal {
1386 struct transport_container rport_attr_cont;
1387 };
1388
1389 +static int scsi_is_srp_rport(const struct device *dev);
1390 +
1391 #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
1392
1393 #define dev_to_rport(d) container_of(d, struct srp_rport, dev)
1394 @@ -60,9 +62,24 @@ static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
1395 return dev_to_shost(r->dev.parent);
1396 }
1397
1398 +static int find_child_rport(struct device *dev, void *data)
1399 +{
1400 + struct device **child = data;
1401 +
1402 + if (scsi_is_srp_rport(dev)) {
1403 + WARN_ON_ONCE(*child);
1404 + *child = dev;
1405 + }
1406 + return 0;
1407 +}
1408 +
1409 static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
1410 {
1411 - return transport_class_to_srp_rport(&shost->shost_gendev);
1412 + struct device *child = NULL;
1413 +
1414 + WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child,
1415 + find_child_rport) < 0);
1416 + return child ? dev_to_rport(child) : NULL;
1417 }
1418
1419 /**
1420 @@ -600,7 +617,8 @@ enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
1421 struct srp_rport *rport = shost_to_rport(shost);
1422
1423 pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
1424 - return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 &&
1425 + return rport && rport->fast_io_fail_tmo < 0 &&
1426 + rport->dev_loss_tmo < 0 &&
1427 i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
1428 BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
1429 }
1430 diff --git a/drivers/soc/lantiq/gphy.c b/drivers/soc/lantiq/gphy.c
1431 index 8d8659463b3e..feeb17cebc25 100644
1432 --- a/drivers/soc/lantiq/gphy.c
1433 +++ b/drivers/soc/lantiq/gphy.c
1434 @@ -30,7 +30,6 @@ struct xway_gphy_priv {
1435 struct clk *gphy_clk_gate;
1436 struct reset_control *gphy_reset;
1437 struct reset_control *gphy_reset2;
1438 - struct notifier_block gphy_reboot_nb;
1439 void __iomem *membase;
1440 char *fw_name;
1441 };
1442 @@ -64,24 +63,6 @@ static const struct of_device_id xway_gphy_match[] = {
1443 };
1444 MODULE_DEVICE_TABLE(of, xway_gphy_match);
1445
1446 -static struct xway_gphy_priv *to_xway_gphy_priv(struct notifier_block *nb)
1447 -{
1448 - return container_of(nb, struct xway_gphy_priv, gphy_reboot_nb);
1449 -}
1450 -
1451 -static int xway_gphy_reboot_notify(struct notifier_block *reboot_nb,
1452 - unsigned long code, void *unused)
1453 -{
1454 - struct xway_gphy_priv *priv = to_xway_gphy_priv(reboot_nb);
1455 -
1456 - if (priv) {
1457 - reset_control_assert(priv->gphy_reset);
1458 - reset_control_assert(priv->gphy_reset2);
1459 - }
1460 -
1461 - return NOTIFY_DONE;
1462 -}
1463 -
1464 static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv,
1465 dma_addr_t *dev_addr)
1466 {
1467 @@ -205,14 +186,6 @@ static int xway_gphy_probe(struct platform_device *pdev)
1468 reset_control_deassert(priv->gphy_reset);
1469 reset_control_deassert(priv->gphy_reset2);
1470
1471 - /* assert the gphy reset because it can hang after a reboot: */
1472 - priv->gphy_reboot_nb.notifier_call = xway_gphy_reboot_notify;
1473 - priv->gphy_reboot_nb.priority = -1;
1474 -
1475 - ret = register_reboot_notifier(&priv->gphy_reboot_nb);
1476 - if (ret)
1477 - dev_warn(dev, "Failed to register reboot notifier\n");
1478 -
1479 platform_set_drvdata(pdev, priv);
1480
1481 return ret;
1482 @@ -220,21 +193,12 @@ static int xway_gphy_probe(struct platform_device *pdev)
1483
1484 static int xway_gphy_remove(struct platform_device *pdev)
1485 {
1486 - struct device *dev = &pdev->dev;
1487 struct xway_gphy_priv *priv = platform_get_drvdata(pdev);
1488 - int ret;
1489 -
1490 - reset_control_assert(priv->gphy_reset);
1491 - reset_control_assert(priv->gphy_reset2);
1492
1493 iowrite32be(0, priv->membase);
1494
1495 clk_disable_unprepare(priv->gphy_clk_gate);
1496
1497 - ret = unregister_reboot_notifier(&priv->gphy_reboot_nb);
1498 - if (ret)
1499 - dev_warn(dev, "Failed to unregister reboot notifier\n");
1500 -
1501 return 0;
1502 }
1503
1504 diff --git a/fs/aio.c b/fs/aio.c
1505 index 4e23958c2509..3a749c3a92e3 100644
1506 --- a/fs/aio.c
1507 +++ b/fs/aio.c
1508 @@ -643,9 +643,8 @@ static void free_ioctx_users(struct percpu_ref *ref)
1509 while (!list_empty(&ctx->active_reqs)) {
1510 req = list_first_entry(&ctx->active_reqs,
1511 struct aio_kiocb, ki_list);
1512 -
1513 - list_del_init(&req->ki_list);
1514 kiocb_cancel(req);
1515 + list_del_init(&req->ki_list);
1516 }
1517
1518 spin_unlock_irq(&ctx->ctx_lock);
1519 diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
1520 index f965ce832bc0..516e0c57cf9c 100644
1521 --- a/fs/xfs/libxfs/xfs_alloc.c
1522 +++ b/fs/xfs/libxfs/xfs_alloc.c
1523 @@ -52,6 +52,23 @@ STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
1524 STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
1525 xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
1526
1527 +/*
1528 + * Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in
1529 + * the beginning of the block for a proper header with the location information
1530 + * and CRC.
1531 + */
1532 +unsigned int
1533 +xfs_agfl_size(
1534 + struct xfs_mount *mp)
1535 +{
1536 + unsigned int size = mp->m_sb.sb_sectsize;
1537 +
1538 + if (xfs_sb_version_hascrc(&mp->m_sb))
1539 + size -= sizeof(struct xfs_agfl);
1540 +
1541 + return size / sizeof(xfs_agblock_t);
1542 +}
1543 +
1544 unsigned int
1545 xfs_refc_block(
1546 struct xfs_mount *mp)
1547 @@ -540,7 +557,7 @@ xfs_agfl_verify(
1548 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
1549 return false;
1550
1551 - for (i = 0; i < XFS_AGFL_SIZE(mp); i++) {
1552 + for (i = 0; i < xfs_agfl_size(mp); i++) {
1553 if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
1554 be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
1555 return false;
1556 @@ -2039,6 +2056,93 @@ xfs_alloc_space_available(
1557 return true;
1558 }
1559
1560 +/*
1561 + * Check the agfl fields of the agf for inconsistency or corruption. The purpose
1562 + * is to detect an agfl header padding mismatch between current and early v5
1563 + * kernels. This problem manifests as a 1-slot size difference between the
1564 + * on-disk flcount and the active [first, last] range of a wrapped agfl. This
1565 + * may also catch variants of agfl count corruption unrelated to padding. Either
1566 + * way, we'll reset the agfl and warn the user.
1567 + *
1568 + * Return true if a reset is required before the agfl can be used, false
1569 + * otherwise.
1570 + */
1571 +static bool
1572 +xfs_agfl_needs_reset(
1573 + struct xfs_mount *mp,
1574 + struct xfs_agf *agf)
1575 +{
1576 + uint32_t f = be32_to_cpu(agf->agf_flfirst);
1577 + uint32_t l = be32_to_cpu(agf->agf_fllast);
1578 + uint32_t c = be32_to_cpu(agf->agf_flcount);
1579 + int agfl_size = xfs_agfl_size(mp);
1580 + int active;
1581 +
1582 + /* no agfl header on v4 supers */
1583 + if (!xfs_sb_version_hascrc(&mp->m_sb))
1584 + return false;
1585 +
1586 + /*
1587 + * The agf read verifier catches severe corruption of these fields.
1588 + * Repeat some sanity checks to cover a packed -> unpacked mismatch if
1589 + * the verifier allows it.
1590 + */
1591 + if (f >= agfl_size || l >= agfl_size)
1592 + return true;
1593 + if (c > agfl_size)
1594 + return true;
1595 +
1596 + /*
1597 + * Check consistency between the on-disk count and the active range. An
1598 + * agfl padding mismatch manifests as an inconsistent flcount.
1599 + */
1600 + if (c && l >= f)
1601 + active = l - f + 1;
1602 + else if (c)
1603 + active = agfl_size - f + l + 1;
1604 + else
1605 + active = 0;
1606 +
1607 + return active != c;
1608 +}
1609 +
1610 +/*
1611 + * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
1612 + * agfl content cannot be trusted. Warn the user that a repair is required to
1613 + * recover leaked blocks.
1614 + *
1615 + * The purpose of this mechanism is to handle filesystems affected by the agfl
1616 + * header padding mismatch problem. A reset keeps the filesystem online with a
1617 + * relatively minor free space accounting inconsistency rather than suffer the
1618 + * inevitable crash from use of an invalid agfl block.
1619 + */
1620 +static void
1621 +xfs_agfl_reset(
1622 + struct xfs_trans *tp,
1623 + struct xfs_buf *agbp,
1624 + struct xfs_perag *pag)
1625 +{
1626 + struct xfs_mount *mp = tp->t_mountp;
1627 + struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
1628 +
1629 + ASSERT(pag->pagf_agflreset);
1630 + trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
1631 +
1632 + xfs_warn(mp,
1633 + "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
1634 + "Please unmount and run xfs_repair.",
1635 + pag->pag_agno, pag->pagf_flcount);
1636 +
1637 + agf->agf_flfirst = 0;
1638 + agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
1639 + agf->agf_flcount = 0;
1640 + xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
1641 + XFS_AGF_FLCOUNT);
1642 +
1643 + pag->pagf_flcount = 0;
1644 + pag->pagf_agflreset = false;
1645 +}
1646 +
1647 /*
1648 * Decide whether to use this allocation group for this allocation.
1649 * If so, fix up the btree freelist's size.
1650 @@ -2100,6 +2204,10 @@ xfs_alloc_fix_freelist(
1651 }
1652 }
1653
1654 + /* reset a padding mismatched agfl before final free space check */
1655 + if (pag->pagf_agflreset)
1656 + xfs_agfl_reset(tp, agbp, pag);
1657 +
1658 /* If there isn't enough total space or single-extent, reject it. */
1659 need = xfs_alloc_min_freelist(mp, pag);
1660 if (!xfs_alloc_space_available(args, need, flags))
1661 @@ -2252,10 +2360,11 @@ xfs_alloc_get_freelist(
1662 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
1663 be32_add_cpu(&agf->agf_flfirst, 1);
1664 xfs_trans_brelse(tp, agflbp);
1665 - if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
1666 + if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
1667 agf->agf_flfirst = 0;
1668
1669 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
1670 + ASSERT(!pag->pagf_agflreset);
1671 be32_add_cpu(&agf->agf_flcount, -1);
1672 xfs_trans_agflist_delta(tp, -1);
1673 pag->pagf_flcount--;
1674 @@ -2363,10 +2472,11 @@ xfs_alloc_put_freelist(
1675 be32_to_cpu(agf->agf_seqno), &agflbp)))
1676 return error;
1677 be32_add_cpu(&agf->agf_fllast, 1);
1678 - if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
1679 + if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
1680 agf->agf_fllast = 0;
1681
1682 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
1683 + ASSERT(!pag->pagf_agflreset);
1684 be32_add_cpu(&agf->agf_flcount, 1);
1685 xfs_trans_agflist_delta(tp, 1);
1686 pag->pagf_flcount++;
1687 @@ -2381,7 +2491,7 @@ xfs_alloc_put_freelist(
1688
1689 xfs_alloc_log_agf(tp, agbp, logflags);
1690
1691 - ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
1692 + ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
1693
1694 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
1695 blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
1696 @@ -2414,9 +2524,9 @@ xfs_agf_verify(
1697 if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
1698 XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
1699 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
1700 - be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
1701 - be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
1702 - be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)))
1703 + be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) &&
1704 + be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) &&
1705 + be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
1706 return false;
1707
1708 if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
1709 @@ -2572,6 +2682,7 @@ xfs_alloc_read_agf(
1710 pag->pagb_count = 0;
1711 pag->pagb_tree = RB_ROOT;
1712 pag->pagf_init = 1;
1713 + pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
1714 }
1715 #ifdef DEBUG
1716 else if (!XFS_FORCED_SHUTDOWN(mp)) {
1717 diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
1718 index ef26edc2e938..346ba8ab68b5 100644
1719 --- a/fs/xfs/libxfs/xfs_alloc.h
1720 +++ b/fs/xfs/libxfs/xfs_alloc.h
1721 @@ -26,6 +26,8 @@ struct xfs_trans;
1722
1723 extern struct workqueue_struct *xfs_alloc_wq;
1724
1725 +unsigned int xfs_agfl_size(struct xfs_mount *mp);
1726 +
1727 /*
1728 * Freespace allocation types. Argument to xfs_alloc_[v]extent.
1729 */
1730 diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
1731 index 23229f0c5b15..ed4481b2f113 100644
1732 --- a/fs/xfs/libxfs/xfs_format.h
1733 +++ b/fs/xfs/libxfs/xfs_format.h
1734 @@ -798,24 +798,13 @@ typedef struct xfs_agi {
1735 &(XFS_BUF_TO_AGFL(bp)->agfl_bno[0]) : \
1736 (__be32 *)(bp)->b_addr)
1737
1738 -/*
1739 - * Size of the AGFL. For CRC-enabled filesystes we steal a couple of
1740 - * slots in the beginning of the block for a proper header with the
1741 - * location information and CRC.
1742 - */
1743 -#define XFS_AGFL_SIZE(mp) \
1744 - (((mp)->m_sb.sb_sectsize - \
1745 - (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
1746 - sizeof(struct xfs_agfl) : 0)) / \
1747 - sizeof(xfs_agblock_t))
1748 -
1749 typedef struct xfs_agfl {
1750 __be32 agfl_magicnum;
1751 __be32 agfl_seqno;
1752 uuid_t agfl_uuid;
1753 __be64 agfl_lsn;
1754 __be32 agfl_crc;
1755 - __be32 agfl_bno[]; /* actually XFS_AGFL_SIZE(mp) */
1756 + __be32 agfl_bno[]; /* actually xfs_agfl_size(mp) */
1757 } __attribute__((packed)) xfs_agfl_t;
1758
1759 #define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc)
1760 diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
1761 index 8f22fc579dbb..40783a313df9 100644
1762 --- a/fs/xfs/xfs_fsops.c
1763 +++ b/fs/xfs/xfs_fsops.c
1764 @@ -294,7 +294,7 @@ xfs_growfs_data_private(
1765 }
1766
1767 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
1768 - for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++)
1769 + for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++)
1770 agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
1771
1772 error = xfs_bwrite(bp);
1773 diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
1774 index e0792d036be2..d359a88ea249 100644
1775 --- a/fs/xfs/xfs_mount.h
1776 +++ b/fs/xfs/xfs_mount.h
1777 @@ -353,6 +353,7 @@ typedef struct xfs_perag {
1778 char pagi_inodeok; /* The agi is ok for inodes */
1779 uint8_t pagf_levels[XFS_BTNUM_AGF];
1780 /* # of levels in bno & cnt btree */
1781 + bool pagf_agflreset; /* agfl requires reset before use */
1782 uint32_t pagf_flcount; /* count of blocks in freelist */
1783 xfs_extlen_t pagf_freeblks; /* total free blocks */
1784 xfs_extlen_t pagf_longest; /* longest free space */
1785 diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
1786 index bb5514688d47..06bc87369632 100644
1787 --- a/fs/xfs/xfs_trace.h
1788 +++ b/fs/xfs/xfs_trace.h
1789 @@ -1513,7 +1513,7 @@ TRACE_EVENT(xfs_extent_busy_trim,
1790 __entry->tlen)
1791 );
1792
1793 -TRACE_EVENT(xfs_agf,
1794 +DECLARE_EVENT_CLASS(xfs_agf_class,
1795 TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags,
1796 unsigned long caller_ip),
1797 TP_ARGS(mp, agf, flags, caller_ip),
1798 @@ -1569,6 +1569,13 @@ TRACE_EVENT(xfs_agf,
1799 __entry->longest,
1800 (void *)__entry->caller_ip)
1801 );
1802 +#define DEFINE_AGF_EVENT(name) \
1803 +DEFINE_EVENT(xfs_agf_class, name, \
1804 + TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags, \
1805 + unsigned long caller_ip), \
1806 + TP_ARGS(mp, agf, flags, caller_ip))
1807 +DEFINE_AGF_EVENT(xfs_agf);
1808 +DEFINE_AGF_EVENT(xfs_agfl_reset);
1809
1810 TRACE_EVENT(xfs_free_extent,
1811 TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
1812 diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h
1813 index b9e22b7e2f28..d1171db23742 100644
1814 --- a/include/linux/iio/buffer_impl.h
1815 +++ b/include/linux/iio/buffer_impl.h
1816 @@ -53,7 +53,7 @@ struct iio_buffer_access_funcs {
1817 int (*request_update)(struct iio_buffer *buffer);
1818
1819 int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
1820 - int (*set_length)(struct iio_buffer *buffer, int length);
1821 + int (*set_length)(struct iio_buffer *buffer, unsigned int length);
1822
1823 int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
1824 int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
1825 @@ -72,10 +72,10 @@ struct iio_buffer_access_funcs {
1826 */
1827 struct iio_buffer {
1828 /** @length: Number of datums in buffer. */
1829 - int length;
1830 + unsigned int length;
1831
1832 /** @bytes_per_datum: Size of individual datum including timestamp. */
1833 - int bytes_per_datum;
1834 + size_t bytes_per_datum;
1835
1836 /**
1837 * @access: Buffer access functions associated with the
1838 diff --git a/include/linux/tcp.h b/include/linux/tcp.h
1839 index e8418fc77a43..fe322fa611e6 100644
1840 --- a/include/linux/tcp.h
1841 +++ b/include/linux/tcp.h
1842 @@ -334,7 +334,7 @@ struct tcp_sock {
1843
1844 /* Receiver queue space */
1845 struct {
1846 - int space;
1847 + u32 space;
1848 u32 seq;
1849 u64 time;
1850 } rcvq_space;
1851 diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
1852 index 3fab6c81917f..f41ea5af22ee 100644
1853 --- a/include/uapi/linux/nl80211.h
1854 +++ b/include/uapi/linux/nl80211.h
1855 @@ -2604,7 +2604,7 @@ enum nl80211_attrs {
1856 #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
1857 #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
1858
1859 -#define NL80211_WIPHY_NAME_MAXLEN 128
1860 +#define NL80211_WIPHY_NAME_MAXLEN 64
1861
1862 #define NL80211_MAX_SUPP_RATES 32
1863 #define NL80211_MAX_SUPP_HT_RATES 77
1864 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1865 index 76bcc80b893e..520ecaf61dc4 100644
1866 --- a/kernel/trace/trace.c
1867 +++ b/kernel/trace/trace.c
1868 @@ -894,7 +894,7 @@ int __trace_bputs(unsigned long ip, const char *str)
1869 EXPORT_SYMBOL_GPL(__trace_bputs);
1870
1871 #ifdef CONFIG_TRACER_SNAPSHOT
1872 -static void tracing_snapshot_instance(struct trace_array *tr)
1873 +void tracing_snapshot_instance(struct trace_array *tr)
1874 {
1875 struct tracer *tracer = tr->current_trace;
1876 unsigned long flags;
1877 @@ -950,7 +950,7 @@ static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
1878 struct trace_buffer *size_buf, int cpu_id);
1879 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
1880
1881 -static int alloc_snapshot(struct trace_array *tr)
1882 +int tracing_alloc_snapshot_instance(struct trace_array *tr)
1883 {
1884 int ret;
1885
1886 @@ -996,7 +996,7 @@ int tracing_alloc_snapshot(void)
1887 struct trace_array *tr = &global_trace;
1888 int ret;
1889
1890 - ret = alloc_snapshot(tr);
1891 + ret = tracing_alloc_snapshot_instance(tr);
1892 WARN_ON(ret < 0);
1893
1894 return ret;
1895 @@ -5400,7 +5400,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
1896
1897 #ifdef CONFIG_TRACER_MAX_TRACE
1898 if (t->use_max_tr && !had_max_tr) {
1899 - ret = alloc_snapshot(tr);
1900 + ret = tracing_alloc_snapshot_instance(tr);
1901 if (ret < 0)
1902 goto out;
1903 }
1904 @@ -6378,7 +6378,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
1905 }
1906 #endif
1907 if (!tr->allocated_snapshot) {
1908 - ret = alloc_snapshot(tr);
1909 + ret = tracing_alloc_snapshot_instance(tr);
1910 if (ret < 0)
1911 break;
1912 }
1913 @@ -7099,7 +7099,7 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
1914 return ret;
1915
1916 out_reg:
1917 - ret = alloc_snapshot(tr);
1918 + ret = tracing_alloc_snapshot_instance(tr);
1919 if (ret < 0)
1920 goto out;
1921
1922 diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
1923 index 401b0639116f..851cd1605085 100644
1924 --- a/kernel/trace/trace.h
1925 +++ b/kernel/trace/trace.h
1926 @@ -1807,6 +1807,17 @@ static inline void __init trace_event_init(void) { }
1927 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
1928 #endif
1929
1930 +#ifdef CONFIG_TRACER_SNAPSHOT
1931 +void tracing_snapshot_instance(struct trace_array *tr);
1932 +int tracing_alloc_snapshot_instance(struct trace_array *tr);
1933 +#else
1934 +static inline void tracing_snapshot_instance(struct trace_array *tr) { }
1935 +static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
1936 +{
1937 + return 0;
1938 +}
1939 +#endif
1940 +
1941 extern struct trace_iterator *tracepoint_print_iter;
1942
1943 #endif /* _LINUX_KERNEL_TRACE_H */
1944 diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
1945 index f2ac9d44f6c4..b413fab7d75b 100644
1946 --- a/kernel/trace/trace_events_trigger.c
1947 +++ b/kernel/trace/trace_events_trigger.c
1948 @@ -482,9 +482,10 @@ clear_event_triggers(struct trace_array *tr)
1949 struct trace_event_file *file;
1950
1951 list_for_each_entry(file, &tr->events, list) {
1952 - struct event_trigger_data *data;
1953 - list_for_each_entry_rcu(data, &file->triggers, list) {
1954 + struct event_trigger_data *data, *n;
1955 + list_for_each_entry_safe(data, n, &file->triggers, list) {
1956 trace_event_trigger_enable_disable(file, 0);
1957 + list_del_rcu(&data->list);
1958 if (data->ops->free)
1959 data->ops->free(data->ops, data);
1960 }
1961 @@ -641,6 +642,7 @@ event_trigger_callback(struct event_command *cmd_ops,
1962 trigger_data->count = -1;
1963 trigger_data->ops = trigger_ops;
1964 trigger_data->cmd_ops = cmd_ops;
1965 + trigger_data->private_data = file;
1966 INIT_LIST_HEAD(&trigger_data->list);
1967 INIT_LIST_HEAD(&trigger_data->named_list);
1968
1969 @@ -1041,7 +1043,12 @@ static struct event_command trigger_traceoff_cmd = {
1970 static void
1971 snapshot_trigger(struct event_trigger_data *data, void *rec)
1972 {
1973 - tracing_snapshot();
1974 + struct trace_event_file *file = data->private_data;
1975 +
1976 + if (file)
1977 + tracing_snapshot_instance(file->tr);
1978 + else
1979 + tracing_snapshot();
1980 }
1981
1982 static void
1983 @@ -1063,7 +1070,7 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1984 {
1985 int ret = register_trigger(glob, ops, data, file);
1986
1987 - if (ret > 0 && tracing_alloc_snapshot() != 0) {
1988 + if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
1989 unregister_trigger(glob, ops, data, file);
1990 ret = 0;
1991 }
1992 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
1993 index e774898c91d5..8af604f3b370 100644
1994 --- a/mm/huge_memory.c
1995 +++ b/mm/huge_memory.c
1996 @@ -2388,7 +2388,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
1997 __split_huge_page_tail(head, i, lruvec, list);
1998 /* Some pages can be beyond i_size: drop them from page cache */
1999 if (head[i].index >= end) {
2000 - __ClearPageDirty(head + i);
2001 + ClearPageDirty(head + i);
2002 __delete_from_page_cache(head + i, NULL);
2003 if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
2004 shmem_uncharge(head->mapping->host, 1);
2005 diff --git a/mm/vmscan.c b/mm/vmscan.c
2006 index 1a581468a9cf..be56e2e1931e 100644
2007 --- a/mm/vmscan.c
2008 +++ b/mm/vmscan.c
2009 @@ -1451,7 +1451,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
2010 return ret;
2011
2012 mapping = page_mapping(page);
2013 - migrate_dirty = mapping && mapping->a_ops->migratepage;
2014 + migrate_dirty = !mapping || mapping->a_ops->migratepage;
2015 unlock_page(page);
2016 if (!migrate_dirty)
2017 return ret;
2018 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2019 index ebbb54bcbcac..125b49c166a4 100644
2020 --- a/net/ipv4/tcp_input.c
2021 +++ b/net/ipv4/tcp_input.c
2022 @@ -591,8 +591,8 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
2023 void tcp_rcv_space_adjust(struct sock *sk)
2024 {
2025 struct tcp_sock *tp = tcp_sk(sk);
2026 + u32 copied;
2027 int time;
2028 - int copied;
2029
2030 tcp_mstamp_refresh(tp);
2031 time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
2032 @@ -615,12 +615,13 @@ void tcp_rcv_space_adjust(struct sock *sk)
2033
2034 if (sysctl_tcp_moderate_rcvbuf &&
2035 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
2036 - int rcvwin, rcvmem, rcvbuf;
2037 + int rcvmem, rcvbuf;
2038 + u64 rcvwin;
2039
2040 /* minimal window to cope with packet losses, assuming
2041 * steady state. Add some cushion because of small variations.
2042 */
2043 - rcvwin = (copied << 1) + 16 * tp->advmss;
2044 + rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
2045
2046 /* If rate increased by 25%,
2047 * assume slow start, rcvwin = 3 * copied
2048 @@ -640,7 +641,8 @@ void tcp_rcv_space_adjust(struct sock *sk)
2049 while (tcp_win_from_space(rcvmem) < tp->advmss)
2050 rcvmem += 128;
2051
2052 - rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]);
2053 + do_div(rcvwin, tp->advmss);
2054 + rcvbuf = min_t(u64, rcvwin * rcvmem, sysctl_tcp_rmem[2]);
2055 if (rcvbuf > sk->sk_rcvbuf) {
2056 sk->sk_rcvbuf = rcvbuf;
2057
2058 diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
2059 index c9c031e3d1ae..b275743e23cc 100644
2060 --- a/security/selinux/ss/services.c
2061 +++ b/security/selinux/ss/services.c
2062 @@ -1448,7 +1448,7 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
2063 scontext_len, &context, def_sid);
2064 if (rc == -EINVAL && force) {
2065 context.str = str;
2066 - context.len = scontext_len;
2067 + context.len = strlen(str) + 1;
2068 str = NULL;
2069 } else if (rc)
2070 goto out_unlock;
2071 diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c
2072 index a086c35f91bb..79a9fdf94d38 100644
2073 --- a/sound/soc/intel/common/sst-firmware.c
2074 +++ b/sound/soc/intel/common/sst-firmware.c
2075 @@ -274,7 +274,6 @@ int sst_dma_new(struct sst_dsp *sst)
2076 struct sst_pdata *sst_pdata = sst->pdata;
2077 struct sst_dma *dma;
2078 struct resource mem;
2079 - const char *dma_dev_name;
2080 int ret = 0;
2081
2082 if (sst->pdata->resindex_dma_base == -1)
2083 @@ -285,7 +284,6 @@ int sst_dma_new(struct sst_dsp *sst)
2084 * is attached to the ADSP IP. */
2085 switch (sst->pdata->dma_engine) {
2086 case SST_DMA_TYPE_DW:
2087 - dma_dev_name = "dw_dmac";
2088 break;
2089 default:
2090 dev_err(sst->dev, "error: invalid DMA engine %d\n",
2091 diff --git a/tools/objtool/check.c b/tools/objtool/check.c
2092 index c8b8b7101c6f..e128d1c71c30 100644
2093 --- a/tools/objtool/check.c
2094 +++ b/tools/objtool/check.c
2095 @@ -59,6 +59,31 @@ static struct instruction *next_insn_same_sec(struct objtool_file *file,
2096 return next;
2097 }
2098
2099 +static struct instruction *next_insn_same_func(struct objtool_file *file,
2100 + struct instruction *insn)
2101 +{
2102 + struct instruction *next = list_next_entry(insn, list);
2103 + struct symbol *func = insn->func;
2104 +
2105 + if (!func)
2106 + return NULL;
2107 +
2108 + if (&next->list != &file->insn_list && next->func == func)
2109 + return next;
2110 +
2111 + /* Check if we're already in the subfunction: */
2112 + if (func == func->cfunc)
2113 + return NULL;
2114 +
2115 + /* Move to the subfunction: */
2116 + return find_insn(file, func->cfunc->sec, func->cfunc->offset);
2117 +}
2118 +
2119 +#define func_for_each_insn_all(file, func, insn) \
2120 + for (insn = find_insn(file, func->sec, func->offset); \
2121 + insn; \
2122 + insn = next_insn_same_func(file, insn))
2123 +
2124 #define func_for_each_insn(file, func, insn) \
2125 for (insn = find_insn(file, func->sec, func->offset); \
2126 insn && &insn->list != &file->insn_list && \
2127 @@ -148,10 +173,14 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
2128 if (!strcmp(func->name, global_noreturns[i]))
2129 return 1;
2130
2131 - if (!func->sec)
2132 + if (!func->len)
2133 return 0;
2134
2135 - func_for_each_insn(file, func, insn) {
2136 + insn = find_insn(file, func->sec, func->offset);
2137 + if (!insn->func)
2138 + return 0;
2139 +
2140 + func_for_each_insn_all(file, func, insn) {
2141 empty = false;
2142
2143 if (insn->type == INSN_RETURN)
2144 @@ -166,35 +195,28 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
2145 * case, the function's dead-end status depends on whether the target
2146 * of the sibling call returns.
2147 */
2148 - func_for_each_insn(file, func, insn) {
2149 - if (insn->sec != func->sec ||
2150 - insn->offset >= func->offset + func->len)
2151 - break;
2152 -
2153 + func_for_each_insn_all(file, func, insn) {
2154 if (insn->type == INSN_JUMP_UNCONDITIONAL) {
2155 struct instruction *dest = insn->jump_dest;
2156 - struct symbol *dest_func;
2157
2158 if (!dest)
2159 /* sibling call to another file */
2160 return 0;
2161
2162 - if (dest->sec != func->sec ||
2163 - dest->offset < func->offset ||
2164 - dest->offset >= func->offset + func->len) {
2165 - /* local sibling call */
2166 - dest_func = find_symbol_by_offset(dest->sec,
2167 - dest->offset);
2168 - if (!dest_func)
2169 - continue;
2170 + if (dest->func && dest->func->pfunc != insn->func->pfunc) {
2171
2172 + /* local sibling call */
2173 if (recursion == 5) {
2174 - WARN_FUNC("infinite recursion (objtool bug!)",
2175 - dest->sec, dest->offset);
2176 - return -1;
2177 + /*
2178 + * Infinite recursion: two functions
2179 + * have sibling calls to each other.
2180 + * This is a very rare case. It means
2181 + * they aren't dead ends.
2182 + */
2183 + return 0;
2184 }
2185
2186 - return __dead_end_function(file, dest_func,
2187 + return __dead_end_function(file, dest->func,
2188 recursion + 1);
2189 }
2190 }
2191 @@ -421,7 +443,7 @@ static void add_ignores(struct objtool_file *file)
2192 if (!ignore_func(file, func))
2193 continue;
2194
2195 - func_for_each_insn(file, func, insn)
2196 + func_for_each_insn_all(file, func, insn)
2197 insn->ignore = true;
2198 }
2199 }
2200 @@ -781,30 +803,35 @@ static int add_special_section_alts(struct objtool_file *file)
2201 return ret;
2202 }
2203
2204 -static int add_switch_table(struct objtool_file *file, struct symbol *func,
2205 - struct instruction *insn, struct rela *table,
2206 - struct rela *next_table)
2207 +static int add_switch_table(struct objtool_file *file, struct instruction *insn,
2208 + struct rela *table, struct rela *next_table)
2209 {
2210 struct rela *rela = table;
2211 struct instruction *alt_insn;
2212 struct alternative *alt;
2213 + struct symbol *pfunc = insn->func->pfunc;
2214 + unsigned int prev_offset = 0;
2215
2216 list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) {
2217 if (rela == next_table)
2218 break;
2219
2220 - if (rela->sym->sec != insn->sec ||
2221 - rela->addend <= func->offset ||
2222 - rela->addend >= func->offset + func->len)
2223 + /* Make sure the switch table entries are consecutive: */
2224 + if (prev_offset && rela->offset != prev_offset + 8)
2225 break;
2226
2227 - alt_insn = find_insn(file, insn->sec, rela->addend);
2228 - if (!alt_insn) {
2229 - WARN("%s: can't find instruction at %s+0x%x",
2230 - file->rodata->rela->name, insn->sec->name,
2231 - rela->addend);
2232 - return -1;
2233 - }
2234 + /* Detect function pointers from contiguous objects: */
2235 + if (rela->sym->sec == pfunc->sec &&
2236 + rela->addend == pfunc->offset)
2237 + break;
2238 +
2239 + alt_insn = find_insn(file, rela->sym->sec, rela->addend);
2240 + if (!alt_insn)
2241 + break;
2242 +
2243 + /* Make sure the jmp dest is in the function or subfunction: */
2244 + if (alt_insn->func->pfunc != pfunc)
2245 + break;
2246
2247 alt = malloc(sizeof(*alt));
2248 if (!alt) {
2249 @@ -814,6 +841,13 @@ static int add_switch_table(struct objtool_file *file, struct symbol *func,
2250
2251 alt->insn = alt_insn;
2252 list_add_tail(&alt->list, &insn->alts);
2253 + prev_offset = rela->offset;
2254 + }
2255 +
2256 + if (!prev_offset) {
2257 + WARN_FUNC("can't find switch jump table",
2258 + insn->sec, insn->offset);
2259 + return -1;
2260 }
2261
2262 return 0;
2263 @@ -868,40 +902,21 @@ static struct rela *find_switch_table(struct objtool_file *file,
2264 {
2265 struct rela *text_rela, *rodata_rela;
2266 struct instruction *orig_insn = insn;
2267 + unsigned long table_offset;
2268
2269 - text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len);
2270 - if (text_rela && text_rela->sym == file->rodata->sym) {
2271 - /* case 1 */
2272 - rodata_rela = find_rela_by_dest(file->rodata,
2273 - text_rela->addend);
2274 - if (rodata_rela)
2275 - return rodata_rela;
2276 -
2277 - /* case 2 */
2278 - rodata_rela = find_rela_by_dest(file->rodata,
2279 - text_rela->addend + 4);
2280 - if (!rodata_rela)
2281 - return NULL;
2282 -
2283 - file->ignore_unreachables = true;
2284 - return rodata_rela;
2285 - }
2286 -
2287 - /* case 3 */
2288 /*
2289 * Backward search using the @first_jump_src links, these help avoid
2290 * much of the 'in between' code. Which avoids us getting confused by
2291 * it.
2292 */
2293 - for (insn = list_prev_entry(insn, list);
2294 -
2295 + for (;
2296 &insn->list != &file->insn_list &&
2297 insn->sec == func->sec &&
2298 insn->offset >= func->offset;
2299
2300 insn = insn->first_jump_src ?: list_prev_entry(insn, list)) {
2301
2302 - if (insn->type == INSN_JUMP_DYNAMIC)
2303 + if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
2304 break;
2305
2306 /* allow small jumps within the range */
2307 @@ -917,18 +932,29 @@ static struct rela *find_switch_table(struct objtool_file *file,
2308 if (!text_rela || text_rela->sym != file->rodata->sym)
2309 continue;
2310
2311 + table_offset = text_rela->addend;
2312 + if (text_rela->type == R_X86_64_PC32)
2313 + table_offset += 4;
2314 +
2315 /*
2316 * Make sure the .rodata address isn't associated with a
2317 * symbol. gcc jump tables are anonymous data.
2318 */
2319 - if (find_symbol_containing(file->rodata, text_rela->addend))
2320 + if (find_symbol_containing(file->rodata, table_offset))
2321 continue;
2322
2323 - rodata_rela = find_rela_by_dest(file->rodata, text_rela->addend);
2324 - if (!rodata_rela)
2325 - continue;
2326 + rodata_rela = find_rela_by_dest(file->rodata, table_offset);
2327 + if (rodata_rela) {
2328 + /*
2329 + * Use of RIP-relative switch jumps is quite rare, and
2330 + * indicates a rare GCC quirk/bug which can leave dead
2331 + * code behind.
2332 + */
2333 + if (text_rela->type == R_X86_64_PC32)
2334 + file->ignore_unreachables = true;
2335
2336 - return rodata_rela;
2337 + return rodata_rela;
2338 + }
2339 }
2340
2341 return NULL;
2342 @@ -942,7 +968,7 @@ static int add_func_switch_tables(struct objtool_file *file,
2343 struct rela *rela, *prev_rela = NULL;
2344 int ret;
2345
2346 - func_for_each_insn(file, func, insn) {
2347 + func_for_each_insn_all(file, func, insn) {
2348 if (!last)
2349 last = insn;
2350
2351 @@ -973,8 +999,7 @@ static int add_func_switch_tables(struct objtool_file *file,
2352 * the beginning of another switch table in the same function.
2353 */
2354 if (prev_jump) {
2355 - ret = add_switch_table(file, func, prev_jump, prev_rela,
2356 - rela);
2357 + ret = add_switch_table(file, prev_jump, prev_rela, rela);
2358 if (ret)
2359 return ret;
2360 }
2361 @@ -984,7 +1009,7 @@ static int add_func_switch_tables(struct objtool_file *file,
2362 }
2363
2364 if (prev_jump) {
2365 - ret = add_switch_table(file, func, prev_jump, prev_rela, NULL);
2366 + ret = add_switch_table(file, prev_jump, prev_rela, NULL);
2367 if (ret)
2368 return ret;
2369 }
2370 @@ -1748,15 +1773,13 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
2371 while (1) {
2372 next_insn = next_insn_same_sec(file, insn);
2373
2374 -
2375 - if (file->c_file && func && insn->func && func != insn->func) {
2376 + if (file->c_file && func && insn->func && func != insn->func->pfunc) {
2377 WARN("%s() falls through to next function %s()",
2378 func->name, insn->func->name);
2379 return 1;
2380 }
2381
2382 - if (insn->func)
2383 - func = insn->func;
2384 + func = insn->func ? insn->func->pfunc : NULL;
2385
2386 if (func && insn->ignore) {
2387 WARN_FUNC("BUG: why am I validating an ignored function?",
2388 @@ -1777,7 +1800,7 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
2389
2390 i = insn;
2391 save_insn = NULL;
2392 - func_for_each_insn_continue_reverse(file, func, i) {
2393 + func_for_each_insn_continue_reverse(file, insn->func, i) {
2394 if (i->save) {
2395 save_insn = i;
2396 break;
2397 @@ -1864,7 +1887,7 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
2398 case INSN_JUMP_UNCONDITIONAL:
2399 if (insn->jump_dest &&
2400 (!func || !insn->jump_dest->func ||
2401 - func == insn->jump_dest->func)) {
2402 + insn->jump_dest->func->pfunc == func)) {
2403 ret = validate_branch(file, insn->jump_dest,
2404 state);
2405 if (ret)
2406 @@ -2059,7 +2082,7 @@ static int validate_functions(struct objtool_file *file)
2407
2408 for_each_sec(file, sec) {
2409 list_for_each_entry(func, &sec->symbol_list, list) {
2410 - if (func->type != STT_FUNC)
2411 + if (func->type != STT_FUNC || func->pfunc != func)
2412 continue;
2413
2414 insn = find_insn(file, sec, func->offset);
2415 diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
2416 index c1c338661699..4e60e105583e 100644
2417 --- a/tools/objtool/elf.c
2418 +++ b/tools/objtool/elf.c
2419 @@ -79,6 +79,19 @@ struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset)
2420 return NULL;
2421 }
2422
2423 +struct symbol *find_symbol_by_name(struct elf *elf, const char *name)
2424 +{
2425 + struct section *sec;
2426 + struct symbol *sym;
2427 +
2428 + list_for_each_entry(sec, &elf->sections, list)
2429 + list_for_each_entry(sym, &sec->symbol_list, list)
2430 + if (!strcmp(sym->name, name))
2431 + return sym;
2432 +
2433 + return NULL;
2434 +}
2435 +
2436 struct symbol *find_symbol_containing(struct section *sec, unsigned long offset)
2437 {
2438 struct symbol *sym;
2439 @@ -203,10 +216,11 @@ static int read_sections(struct elf *elf)
2440
2441 static int read_symbols(struct elf *elf)
2442 {
2443 - struct section *symtab;
2444 - struct symbol *sym;
2445 + struct section *symtab, *sec;
2446 + struct symbol *sym, *pfunc;
2447 struct list_head *entry, *tmp;
2448 int symbols_nr, i;
2449 + char *coldstr;
2450
2451 symtab = find_section_by_name(elf, ".symtab");
2452 if (!symtab) {
2453 @@ -281,6 +295,30 @@ static int read_symbols(struct elf *elf)
2454 hash_add(sym->sec->symbol_hash, &sym->hash, sym->idx);
2455 }
2456
2457 + /* Create parent/child links for any cold subfunctions */
2458 + list_for_each_entry(sec, &elf->sections, list) {
2459 + list_for_each_entry(sym, &sec->symbol_list, list) {
2460 + if (sym->type != STT_FUNC)
2461 + continue;
2462 + sym->pfunc = sym->cfunc = sym;
2463 + coldstr = strstr(sym->name, ".cold.");
2464 + if (coldstr) {
2465 + coldstr[0] = '\0';
2466 + pfunc = find_symbol_by_name(elf, sym->name);
2467 + coldstr[0] = '.';
2468 +
2469 + if (!pfunc) {
2470 + WARN("%s(): can't find parent function",
2471 + sym->name);
2472 + goto err;
2473 + }
2474 +
2475 + sym->pfunc = pfunc;
2476 + pfunc->cfunc = sym;
2477 + }
2478 + }
2479 + }
2480 +
2481 return 0;
2482
2483 err:
2484 diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h
2485 index d86e2ff14466..de5cd2ddded9 100644
2486 --- a/tools/objtool/elf.h
2487 +++ b/tools/objtool/elf.h
2488 @@ -61,6 +61,7 @@ struct symbol {
2489 unsigned char bind, type;
2490 unsigned long offset;
2491 unsigned int len;
2492 + struct symbol *pfunc, *cfunc;
2493 };
2494
2495 struct rela {
2496 @@ -86,6 +87,7 @@ struct elf {
2497 struct elf *elf_open(const char *name, int flags);
2498 struct section *find_section_by_name(struct elf *elf, const char *name);
2499 struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset);
2500 +struct symbol *find_symbol_by_name(struct elf *elf, const char *name);
2501 struct symbol *find_symbol_containing(struct section *sec, unsigned long offset);
2502 struct rela *find_rela_by_dest(struct section *sec, unsigned long offset);
2503 struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset,