Magellan Linux

Annotation of /trunk/kernel-alx-legacy/patches-4.9/0359-4.9.260-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3661 - (hide annotations) (download)
Mon Oct 24 14:07:39 2022 UTC (19 months, 2 weeks ago) by niro
File size: 81843 byte(s)
-linux-4.9.260
1 niro 3661 diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt
2     index 24da7b32c489f..1218a5e2975ca 100644
3     --- a/Documentation/filesystems/sysfs.txt
4     +++ b/Documentation/filesystems/sysfs.txt
5     @@ -211,12 +211,10 @@ Other notes:
6     is 4096.
7    
8     - show() methods should return the number of bytes printed into the
9     - buffer. This is the return value of scnprintf().
10     + buffer.
11    
12     -- show() must not use snprintf() when formatting the value to be
13     - returned to user space. If you can guarantee that an overflow
14     - will never happen you can use sprintf() otherwise you must use
15     - scnprintf().
16     +- show() should only use sysfs_emit() or sysfs_emit_at() when formatting
17     + the value to be returned to user space.
18    
19     - store() should return the number of bytes used from the buffer. If the
20     entire buffer has been used, just return the count argument.
21     diff --git a/Makefile b/Makefile
22     index cdc71bda92c4b..7a29676e2b2f9 100644
23     --- a/Makefile
24     +++ b/Makefile
25     @@ -1,6 +1,6 @@
26     VERSION = 4
27     PATCHLEVEL = 9
28     -SUBLEVEL = 259
29     +SUBLEVEL = 260
30     EXTRAVERSION =
31     NAME = Roaring Lionus
32    
33     diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
34     index 3eb018fa1a1f5..c3362ddd6c4cb 100644
35     --- a/arch/arm/probes/kprobes/core.c
36     +++ b/arch/arm/probes/kprobes/core.c
37     @@ -270,6 +270,7 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
38     switch (kcb->kprobe_status) {
39     case KPROBE_HIT_ACTIVE:
40     case KPROBE_HIT_SSDONE:
41     + case KPROBE_HIT_SS:
42     /* A pre- or post-handler probe got us here. */
43     kprobes_inc_nmissed_count(p);
44     save_previous_kprobe(kcb);
45     @@ -278,6 +279,11 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
46     singlestep(p, regs, kcb);
47     restore_previous_kprobe(kcb);
48     break;
49     + case KPROBE_REENTER:
50     + /* A nested probe was hit in FIQ, it is a BUG */
51     + pr_warn("Unrecoverable kprobe detected at %p.\n",
52     + p->addr);
53     + /* fall through */
54     default:
55     /* impossible cases */
56     BUG();
57     diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
58     index 02579e6569f0c..b4ec8d1b0befd 100644
59     --- a/arch/arm/xen/p2m.c
60     +++ b/arch/arm/xen/p2m.c
61     @@ -91,12 +91,39 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
62     int i;
63    
64     for (i = 0; i < count; i++) {
65     + struct gnttab_unmap_grant_ref unmap;
66     + int rc;
67     +
68     if (map_ops[i].status)
69     continue;
70     - if (unlikely(!set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
71     - map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT))) {
72     - return -ENOMEM;
73     - }
74     + if (likely(set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
75     + map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT)))
76     + continue;
77     +
78     + /*
79     + * Signal an error for this slot. This in turn requires
80     + * immediate unmapping.
81     + */
82     + map_ops[i].status = GNTST_general_error;
83     + unmap.host_addr = map_ops[i].host_addr,
84     + unmap.handle = map_ops[i].handle;
85     + map_ops[i].handle = ~0;
86     + if (map_ops[i].flags & GNTMAP_device_map)
87     + unmap.dev_bus_addr = map_ops[i].dev_bus_addr;
88     + else
89     + unmap.dev_bus_addr = 0;
90     +
91     + /*
92     + * Pre-populate the status field, to be recognizable in
93     + * the log message below.
94     + */
95     + unmap.status = 1;
96     +
97     + rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
98     + &unmap, 1);
99     + if (rc || unmap.status != GNTST_okay)
100     + pr_err_once("gnttab unmap failed: rc=%d st=%d\n",
101     + rc, unmap.status);
102     }
103    
104     return 0;
105     diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
106     index f819fdcff1acc..1cc42441bc672 100644
107     --- a/arch/arm64/include/asm/atomic_ll_sc.h
108     +++ b/arch/arm64/include/asm/atomic_ll_sc.h
109     @@ -37,7 +37,7 @@
110     * (the optimize attribute silently ignores these options).
111     */
112    
113     -#define ATOMIC_OP(op, asm_op) \
114     +#define ATOMIC_OP(op, asm_op, constraint) \
115     __LL_SC_INLINE void \
116     __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
117     { \
118     @@ -51,11 +51,11 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
119     " stxr %w1, %w0, %2\n" \
120     " cbnz %w1, 1b" \
121     : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
122     - : "Ir" (i)); \
123     + : #constraint "r" (i)); \
124     } \
125     __LL_SC_EXPORT(atomic_##op);
126    
127     -#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
128     +#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
129     __LL_SC_INLINE int \
130     __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
131     { \
132     @@ -70,14 +70,14 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
133     " cbnz %w1, 1b\n" \
134     " " #mb \
135     : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
136     - : "Ir" (i) \
137     + : #constraint "r" (i) \
138     : cl); \
139     \
140     return result; \
141     } \
142     __LL_SC_EXPORT(atomic_##op##_return##name);
143    
144     -#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
145     +#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
146     __LL_SC_INLINE int \
147     __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
148     { \
149     @@ -92,7 +92,7 @@ __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
150     " cbnz %w2, 1b\n" \
151     " " #mb \
152     : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
153     - : "Ir" (i) \
154     + : #constraint "r" (i) \
155     : cl); \
156     \
157     return result; \
158     @@ -110,8 +110,8 @@ __LL_SC_EXPORT(atomic_fetch_##op##name);
159     ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
160     ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
161    
162     -ATOMIC_OPS(add, add)
163     -ATOMIC_OPS(sub, sub)
164     +ATOMIC_OPS(add, add, I)
165     +ATOMIC_OPS(sub, sub, J)
166    
167     #undef ATOMIC_OPS
168     #define ATOMIC_OPS(...) \
169     @@ -121,17 +121,17 @@ ATOMIC_OPS(sub, sub)
170     ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
171     ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
172    
173     -ATOMIC_OPS(and, and)
174     -ATOMIC_OPS(andnot, bic)
175     -ATOMIC_OPS(or, orr)
176     -ATOMIC_OPS(xor, eor)
177     +ATOMIC_OPS(and, and, )
178     +ATOMIC_OPS(andnot, bic, )
179     +ATOMIC_OPS(or, orr, )
180     +ATOMIC_OPS(xor, eor, )
181    
182     #undef ATOMIC_OPS
183     #undef ATOMIC_FETCH_OP
184     #undef ATOMIC_OP_RETURN
185     #undef ATOMIC_OP
186    
187     -#define ATOMIC64_OP(op, asm_op) \
188     +#define ATOMIC64_OP(op, asm_op, constraint) \
189     __LL_SC_INLINE void \
190     __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
191     { \
192     @@ -145,11 +145,11 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
193     " stxr %w1, %0, %2\n" \
194     " cbnz %w1, 1b" \
195     : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
196     - : "Ir" (i)); \
197     + : #constraint "r" (i)); \
198     } \
199     __LL_SC_EXPORT(atomic64_##op);
200    
201     -#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
202     +#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
203     __LL_SC_INLINE long \
204     __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
205     { \
206     @@ -164,14 +164,14 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
207     " cbnz %w1, 1b\n" \
208     " " #mb \
209     : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
210     - : "Ir" (i) \
211     + : #constraint "r" (i) \
212     : cl); \
213     \
214     return result; \
215     } \
216     __LL_SC_EXPORT(atomic64_##op##_return##name);
217    
218     -#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
219     +#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
220     __LL_SC_INLINE long \
221     __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
222     { \
223     @@ -186,7 +186,7 @@ __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
224     " cbnz %w2, 1b\n" \
225     " " #mb \
226     : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
227     - : "Ir" (i) \
228     + : #constraint "r" (i) \
229     : cl); \
230     \
231     return result; \
232     @@ -204,8 +204,8 @@ __LL_SC_EXPORT(atomic64_fetch_##op##name);
233     ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
234     ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
235    
236     -ATOMIC64_OPS(add, add)
237     -ATOMIC64_OPS(sub, sub)
238     +ATOMIC64_OPS(add, add, I)
239     +ATOMIC64_OPS(sub, sub, J)
240    
241     #undef ATOMIC64_OPS
242     #define ATOMIC64_OPS(...) \
243     @@ -215,10 +215,10 @@ ATOMIC64_OPS(sub, sub)
244     ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
245     ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
246    
247     -ATOMIC64_OPS(and, and)
248     -ATOMIC64_OPS(andnot, bic)
249     -ATOMIC64_OPS(or, orr)
250     -ATOMIC64_OPS(xor, eor)
251     +ATOMIC64_OPS(and, and, L)
252     +ATOMIC64_OPS(andnot, bic, )
253     +ATOMIC64_OPS(or, orr, L)
254     +ATOMIC64_OPS(xor, eor, L)
255    
256     #undef ATOMIC64_OPS
257     #undef ATOMIC64_FETCH_OP
258     @@ -248,49 +248,54 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
259     }
260     __LL_SC_EXPORT(atomic64_dec_if_positive);
261    
262     -#define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl) \
263     -__LL_SC_INLINE unsigned long \
264     -__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
265     - unsigned long old, \
266     - unsigned long new)) \
267     +#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \
268     +__LL_SC_INLINE u##sz \
269     +__LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \
270     + unsigned long old, \
271     + u##sz new)) \
272     { \
273     - unsigned long tmp, oldval; \
274     + unsigned long tmp; \
275     + u##sz oldval; \
276     \
277     asm volatile( \
278     " prfm pstl1strm, %[v]\n" \
279     - "1: ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n" \
280     + "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \
281     " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
282     " cbnz %" #w "[tmp], 2f\n" \
283     - " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
284     + " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \
285     " cbnz %w[tmp], 1b\n" \
286     " " #mb "\n" \
287     - " mov %" #w "[oldval], %" #w "[old]\n" \
288     "2:" \
289     : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
290     - [v] "+Q" (*(unsigned long *)ptr) \
291     - : [old] "Lr" (old), [new] "r" (new) \
292     + [v] "+Q" (*(u##sz *)ptr) \
293     + : [old] #constraint "r" (old), [new] "r" (new) \
294     : cl); \
295     \
296     return oldval; \
297     } \
298     -__LL_SC_EXPORT(__cmpxchg_case_##name);
299     +__LL_SC_EXPORT(__cmpxchg_case_##name##sz);
300    
301     -__CMPXCHG_CASE(w, b, 1, , , , )
302     -__CMPXCHG_CASE(w, h, 2, , , , )
303     -__CMPXCHG_CASE(w, , 4, , , , )
304     -__CMPXCHG_CASE( , , 8, , , , )
305     -__CMPXCHG_CASE(w, b, acq_1, , a, , "memory")
306     -__CMPXCHG_CASE(w, h, acq_2, , a, , "memory")
307     -__CMPXCHG_CASE(w, , acq_4, , a, , "memory")
308     -__CMPXCHG_CASE( , , acq_8, , a, , "memory")
309     -__CMPXCHG_CASE(w, b, rel_1, , , l, "memory")
310     -__CMPXCHG_CASE(w, h, rel_2, , , l, "memory")
311     -__CMPXCHG_CASE(w, , rel_4, , , l, "memory")
312     -__CMPXCHG_CASE( , , rel_8, , , l, "memory")
313     -__CMPXCHG_CASE(w, b, mb_1, dmb ish, , l, "memory")
314     -__CMPXCHG_CASE(w, h, mb_2, dmb ish, , l, "memory")
315     -__CMPXCHG_CASE(w, , mb_4, dmb ish, , l, "memory")
316     -__CMPXCHG_CASE( , , mb_8, dmb ish, , l, "memory")
317     +/*
318     + * Earlier versions of GCC (no later than 8.1.0) appear to incorrectly
319     + * handle the 'K' constraint for the value 4294967295 - thus we use no
320     + * constraint for 32 bit operations.
321     + */
322     +__CMPXCHG_CASE(w, b, , 8, , , , , )
323     +__CMPXCHG_CASE(w, h, , 16, , , , , )
324     +__CMPXCHG_CASE(w, , , 32, , , , , )
325     +__CMPXCHG_CASE( , , , 64, , , , , L)
326     +__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", )
327     +__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", )
328     +__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", )
329     +__CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L)
330     +__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", )
331     +__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", )
332     +__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", )
333     +__CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L)
334     +__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", )
335     +__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", )
336     +__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", )
337     +__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L)
338    
339     #undef __CMPXCHG_CASE
340    
341     diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
342     index d32a0160c89f7..982fe05e50585 100644
343     --- a/arch/arm64/include/asm/atomic_lse.h
344     +++ b/arch/arm64/include/asm/atomic_lse.h
345     @@ -446,22 +446,22 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
346    
347     #define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
348    
349     -#define __CMPXCHG_CASE(w, sz, name, mb, cl...) \
350     -static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
351     - unsigned long old, \
352     - unsigned long new) \
353     +#define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \
354     +static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \
355     + unsigned long old, \
356     + u##sz new) \
357     { \
358     register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
359     register unsigned long x1 asm ("x1") = old; \
360     - register unsigned long x2 asm ("x2") = new; \
361     + register u##sz x2 asm ("x2") = new; \
362     \
363     asm volatile(ARM64_LSE_ATOMIC_INSN( \
364     /* LL/SC */ \
365     - __LL_SC_CMPXCHG(name) \
366     + __LL_SC_CMPXCHG(name##sz) \
367     __nops(2), \
368     /* LSE atomics */ \
369     " mov " #w "30, %" #w "[old]\n" \
370     - " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
371     + " cas" #mb #sfx "\t" #w "30, %" #w "[new], %[v]\n" \
372     " mov %" #w "[ret], " #w "30") \
373     : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
374     : [old] "r" (x1), [new] "r" (x2) \
375     @@ -470,22 +470,22 @@ static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
376     return x0; \
377     }
378    
379     -__CMPXCHG_CASE(w, b, 1, )
380     -__CMPXCHG_CASE(w, h, 2, )
381     -__CMPXCHG_CASE(w, , 4, )
382     -__CMPXCHG_CASE(x, , 8, )
383     -__CMPXCHG_CASE(w, b, acq_1, a, "memory")
384     -__CMPXCHG_CASE(w, h, acq_2, a, "memory")
385     -__CMPXCHG_CASE(w, , acq_4, a, "memory")
386     -__CMPXCHG_CASE(x, , acq_8, a, "memory")
387     -__CMPXCHG_CASE(w, b, rel_1, l, "memory")
388     -__CMPXCHG_CASE(w, h, rel_2, l, "memory")
389     -__CMPXCHG_CASE(w, , rel_4, l, "memory")
390     -__CMPXCHG_CASE(x, , rel_8, l, "memory")
391     -__CMPXCHG_CASE(w, b, mb_1, al, "memory")
392     -__CMPXCHG_CASE(w, h, mb_2, al, "memory")
393     -__CMPXCHG_CASE(w, , mb_4, al, "memory")
394     -__CMPXCHG_CASE(x, , mb_8, al, "memory")
395     +__CMPXCHG_CASE(w, b, , 8, )
396     +__CMPXCHG_CASE(w, h, , 16, )
397     +__CMPXCHG_CASE(w, , , 32, )
398     +__CMPXCHG_CASE(x, , , 64, )
399     +__CMPXCHG_CASE(w, b, acq_, 8, a, "memory")
400     +__CMPXCHG_CASE(w, h, acq_, 16, a, "memory")
401     +__CMPXCHG_CASE(w, , acq_, 32, a, "memory")
402     +__CMPXCHG_CASE(x, , acq_, 64, a, "memory")
403     +__CMPXCHG_CASE(w, b, rel_, 8, l, "memory")
404     +__CMPXCHG_CASE(w, h, rel_, 16, l, "memory")
405     +__CMPXCHG_CASE(w, , rel_, 32, l, "memory")
406     +__CMPXCHG_CASE(x, , rel_, 64, l, "memory")
407     +__CMPXCHG_CASE(w, b, mb_, 8, al, "memory")
408     +__CMPXCHG_CASE(w, h, mb_, 16, al, "memory")
409     +__CMPXCHG_CASE(w, , mb_, 32, al, "memory")
410     +__CMPXCHG_CASE(x, , mb_, 64, al, "memory")
411    
412     #undef __LL_SC_CMPXCHG
413     #undef __CMPXCHG_CASE
414     diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
415     index 9b2e2e2e728ae..ed6a1aae6fbb9 100644
416     --- a/arch/arm64/include/asm/cmpxchg.h
417     +++ b/arch/arm64/include/asm/cmpxchg.h
418     @@ -29,46 +29,46 @@
419     * barrier case is generated as release+dmb for the former and
420     * acquire+release for the latter.
421     */
422     -#define __XCHG_CASE(w, sz, name, mb, nop_lse, acq, acq_lse, rel, cl) \
423     -static inline unsigned long __xchg_case_##name(unsigned long x, \
424     - volatile void *ptr) \
425     -{ \
426     - unsigned long ret, tmp; \
427     - \
428     - asm volatile(ARM64_LSE_ATOMIC_INSN( \
429     - /* LL/SC */ \
430     - " prfm pstl1strm, %2\n" \
431     - "1: ld" #acq "xr" #sz "\t%" #w "0, %2\n" \
432     - " st" #rel "xr" #sz "\t%w1, %" #w "3, %2\n" \
433     - " cbnz %w1, 1b\n" \
434     - " " #mb, \
435     - /* LSE atomics */ \
436     - " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \
437     - __nops(3) \
438     - " " #nop_lse) \
439     - : "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \
440     - : "r" (x) \
441     - : cl); \
442     - \
443     - return ret; \
444     +#define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl) \
445     +static inline u##sz __xchg_case_##name##sz(u##sz x, volatile void *ptr) \
446     +{ \
447     + u##sz ret; \
448     + unsigned long tmp; \
449     + \
450     + asm volatile(ARM64_LSE_ATOMIC_INSN( \
451     + /* LL/SC */ \
452     + " prfm pstl1strm, %2\n" \
453     + "1: ld" #acq "xr" #sfx "\t%" #w "0, %2\n" \
454     + " st" #rel "xr" #sfx "\t%w1, %" #w "3, %2\n" \
455     + " cbnz %w1, 1b\n" \
456     + " " #mb, \
457     + /* LSE atomics */ \
458     + " swp" #acq_lse #rel #sfx "\t%" #w "3, %" #w "0, %2\n" \
459     + __nops(3) \
460     + " " #nop_lse) \
461     + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u##sz *)ptr) \
462     + : "r" (x) \
463     + : cl); \
464     + \
465     + return ret; \
466     }
467    
468     -__XCHG_CASE(w, b, 1, , , , , , )
469     -__XCHG_CASE(w, h, 2, , , , , , )
470     -__XCHG_CASE(w, , 4, , , , , , )
471     -__XCHG_CASE( , , 8, , , , , , )
472     -__XCHG_CASE(w, b, acq_1, , , a, a, , "memory")
473     -__XCHG_CASE(w, h, acq_2, , , a, a, , "memory")
474     -__XCHG_CASE(w, , acq_4, , , a, a, , "memory")
475     -__XCHG_CASE( , , acq_8, , , a, a, , "memory")
476     -__XCHG_CASE(w, b, rel_1, , , , , l, "memory")
477     -__XCHG_CASE(w, h, rel_2, , , , , l, "memory")
478     -__XCHG_CASE(w, , rel_4, , , , , l, "memory")
479     -__XCHG_CASE( , , rel_8, , , , , l, "memory")
480     -__XCHG_CASE(w, b, mb_1, dmb ish, nop, , a, l, "memory")
481     -__XCHG_CASE(w, h, mb_2, dmb ish, nop, , a, l, "memory")
482     -__XCHG_CASE(w, , mb_4, dmb ish, nop, , a, l, "memory")
483     -__XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory")
484     +__XCHG_CASE(w, b, , 8, , , , , , )
485     +__XCHG_CASE(w, h, , 16, , , , , , )
486     +__XCHG_CASE(w, , , 32, , , , , , )
487     +__XCHG_CASE( , , , 64, , , , , , )
488     +__XCHG_CASE(w, b, acq_, 8, , , a, a, , "memory")
489     +__XCHG_CASE(w, h, acq_, 16, , , a, a, , "memory")
490     +__XCHG_CASE(w, , acq_, 32, , , a, a, , "memory")
491     +__XCHG_CASE( , , acq_, 64, , , a, a, , "memory")
492     +__XCHG_CASE(w, b, rel_, 8, , , , , l, "memory")
493     +__XCHG_CASE(w, h, rel_, 16, , , , , l, "memory")
494     +__XCHG_CASE(w, , rel_, 32, , , , , l, "memory")
495     +__XCHG_CASE( , , rel_, 64, , , , , l, "memory")
496     +__XCHG_CASE(w, b, mb_, 8, dmb ish, nop, , a, l, "memory")
497     +__XCHG_CASE(w, h, mb_, 16, dmb ish, nop, , a, l, "memory")
498     +__XCHG_CASE(w, , mb_, 32, dmb ish, nop, , a, l, "memory")
499     +__XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory")
500    
501     #undef __XCHG_CASE
502    
503     @@ -79,13 +79,13 @@ static __always_inline unsigned long __xchg##sfx(unsigned long x, \
504     { \
505     switch (size) { \
506     case 1: \
507     - return __xchg_case##sfx##_1(x, ptr); \
508     + return __xchg_case##sfx##_8(x, ptr); \
509     case 2: \
510     - return __xchg_case##sfx##_2(x, ptr); \
511     + return __xchg_case##sfx##_16(x, ptr); \
512     case 4: \
513     - return __xchg_case##sfx##_4(x, ptr); \
514     + return __xchg_case##sfx##_32(x, ptr); \
515     case 8: \
516     - return __xchg_case##sfx##_8(x, ptr); \
517     + return __xchg_case##sfx##_64(x, ptr); \
518     default: \
519     BUILD_BUG(); \
520     } \
521     @@ -122,13 +122,13 @@ static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
522     { \
523     switch (size) { \
524     case 1: \
525     - return __cmpxchg_case##sfx##_1(ptr, (u8)old, new); \
526     + return __cmpxchg_case##sfx##_8(ptr, (u8)old, new); \
527     case 2: \
528     - return __cmpxchg_case##sfx##_2(ptr, (u16)old, new); \
529     + return __cmpxchg_case##sfx##_16(ptr, (u16)old, new); \
530     case 4: \
531     - return __cmpxchg_case##sfx##_4(ptr, old, new); \
532     + return __cmpxchg_case##sfx##_32(ptr, old, new); \
533     case 8: \
534     - return __cmpxchg_case##sfx##_8(ptr, old, new); \
535     + return __cmpxchg_case##sfx##_64(ptr, old, new); \
536     default: \
537     BUILD_BUG(); \
538     } \
539     @@ -222,16 +222,16 @@ __CMPXCHG_GEN(_mb)
540     __ret; \
541     })
542    
543     -#define __CMPWAIT_CASE(w, sz, name) \
544     -static inline void __cmpwait_case_##name(volatile void *ptr, \
545     - unsigned long val) \
546     +#define __CMPWAIT_CASE(w, sfx, sz) \
547     +static inline void __cmpwait_case_##sz(volatile void *ptr, \
548     + unsigned long val) \
549     { \
550     unsigned long tmp; \
551     \
552     asm volatile( \
553     " sevl\n" \
554     " wfe\n" \
555     - " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
556     + " ldxr" #sfx "\t%" #w "[tmp], %[v]\n" \
557     " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
558     " cbnz %" #w "[tmp], 1f\n" \
559     " wfe\n" \
560     @@ -240,10 +240,10 @@ static inline void __cmpwait_case_##name(volatile void *ptr, \
561     : [val] "r" (val)); \
562     }
563    
564     -__CMPWAIT_CASE(w, b, 1);
565     -__CMPWAIT_CASE(w, h, 2);
566     -__CMPWAIT_CASE(w, , 4);
567     -__CMPWAIT_CASE( , , 8);
568     +__CMPWAIT_CASE(w, b, 8);
569     +__CMPWAIT_CASE(w, h, 16);
570     +__CMPWAIT_CASE(w, , 32);
571     +__CMPWAIT_CASE( , , 64);
572    
573     #undef __CMPWAIT_CASE
574    
575     @@ -254,13 +254,13 @@ static __always_inline void __cmpwait##sfx(volatile void *ptr, \
576     { \
577     switch (size) { \
578     case 1: \
579     - return __cmpwait_case##sfx##_1(ptr, (u8)val); \
580     + return __cmpwait_case##sfx##_8(ptr, (u8)val); \
581     case 2: \
582     - return __cmpwait_case##sfx##_2(ptr, (u16)val); \
583     + return __cmpwait_case##sfx##_16(ptr, (u16)val); \
584     case 4: \
585     - return __cmpwait_case##sfx##_4(ptr, val); \
586     + return __cmpwait_case##sfx##_32(ptr, val); \
587     case 8: \
588     - return __cmpwait_case##sfx##_8(ptr, val); \
589     + return __cmpwait_case##sfx##_64(ptr, val); \
590     default: \
591     BUILD_BUG(); \
592     } \
593     diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
594     index 19977d2f97fb7..3c09ca384199d 100644
595     --- a/arch/x86/kernel/module.c
596     +++ b/arch/x86/kernel/module.c
597     @@ -125,6 +125,7 @@ int apply_relocate(Elf32_Shdr *sechdrs,
598     *location += sym->st_value;
599     break;
600     case R_386_PC32:
601     + case R_386_PLT32:
602     /* Add the value, subtract its position */
603     *location += sym->st_value - (uint32_t)location;
604     break;
605     diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
606     index 597ce32fa33f2..75a1fd8b0e903 100644
607     --- a/arch/x86/kernel/reboot.c
608     +++ b/arch/x86/kernel/reboot.c
609     @@ -478,6 +478,15 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
610     },
611     },
612    
613     + { /* PCIe Wifi card isn't detected after reboot otherwise */
614     + .callback = set_pci_reboot,
615     + .ident = "Zotac ZBOX CI327 nano",
616     + .matches = {
617     + DMI_MATCH(DMI_SYS_VENDOR, "NA"),
618     + DMI_MATCH(DMI_PRODUCT_NAME, "ZBOX-CI327NANO-GS-01"),
619     + },
620     + },
621     +
622     /* Sony */
623     { /* Handle problems with rebooting on Sony VGN-Z540N */
624     .callback = set_bios_reboot,
625     diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
626     index 5b6c8486a0bec..d1c3f82c78826 100644
627     --- a/arch/x86/tools/relocs.c
628     +++ b/arch/x86/tools/relocs.c
629     @@ -839,9 +839,11 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
630     case R_386_PC32:
631     case R_386_PC16:
632     case R_386_PC8:
633     + case R_386_PLT32:
634     /*
635     - * NONE can be ignored and PC relative relocations don't
636     - * need to be adjusted.
637     + * NONE can be ignored and PC relative relocations don't need
638     + * to be adjusted. Because sym must be defined, R_386_PLT32 can
639     + * be treated the same way as R_386_PC32.
640     */
641     break;
642    
643     @@ -882,9 +884,11 @@ static int do_reloc_real(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
644     case R_386_PC32:
645     case R_386_PC16:
646     case R_386_PC8:
647     + case R_386_PLT32:
648     /*
649     - * NONE can be ignored and PC relative relocations don't
650     - * need to be adjusted.
651     + * NONE can be ignored and PC relative relocations don't need
652     + * to be adjusted. Because sym must be defined, R_386_PLT32 can
653     + * be treated the same way as R_386_PC32.
654     */
655     break;
656    
657     diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
658     index fbf8508e558ac..d6ed664c1e39d 100644
659     --- a/arch/x86/xen/p2m.c
660     +++ b/arch/x86/xen/p2m.c
661     @@ -723,6 +723,8 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
662    
663     for (i = 0; i < count; i++) {
664     unsigned long mfn, pfn;
665     + struct gnttab_unmap_grant_ref unmap[2];
666     + int rc;
667    
668     /* Do not add to override if the map failed. */
669     if (map_ops[i].status != GNTST_okay ||
670     @@ -740,10 +742,46 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
671    
672     WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");
673    
674     - if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
675     - ret = -ENOMEM;
676     - goto out;
677     + if (likely(set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
678     + continue;
679     +
680     + /*
681     + * Signal an error for this slot. This in turn requires
682     + * immediate unmapping.
683     + */
684     + map_ops[i].status = GNTST_general_error;
685     + unmap[0].host_addr = map_ops[i].host_addr,
686     + unmap[0].handle = map_ops[i].handle;
687     + map_ops[i].handle = ~0;
688     + if (map_ops[i].flags & GNTMAP_device_map)
689     + unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr;
690     + else
691     + unmap[0].dev_bus_addr = 0;
692     +
693     + if (kmap_ops) {
694     + kmap_ops[i].status = GNTST_general_error;
695     + unmap[1].host_addr = kmap_ops[i].host_addr,
696     + unmap[1].handle = kmap_ops[i].handle;
697     + kmap_ops[i].handle = ~0;
698     + if (kmap_ops[i].flags & GNTMAP_device_map)
699     + unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr;
700     + else
701     + unmap[1].dev_bus_addr = 0;
702     }
703     +
704     + /*
705     + * Pre-populate both status fields, to be recognizable in
706     + * the log message below.
707     + */
708     + unmap[0].status = 1;
709     + unmap[1].status = 1;
710     +
711     + rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
712     + unmap, 1 + !!kmap_ops);
713     + if (rc || unmap[0].status != GNTST_okay ||
714     + unmap[1].status != GNTST_okay)
715     + pr_err_once("gnttab unmap failed: rc=%d st0=%d st1=%d\n",
716     + rc, unmap[0].status, unmap[1].status);
717     }
718    
719     out:
720     diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
721     index d64a53d3270a1..7ab4152150629 100644
722     --- a/drivers/block/zram/zram_drv.c
723     +++ b/drivers/block/zram/zram_drv.c
724     @@ -440,7 +440,7 @@ static ssize_t mm_stat_show(struct device *dev,
725     zram->limit_pages << PAGE_SHIFT,
726     max_used << PAGE_SHIFT,
727     (u64)atomic64_read(&zram->stats.zero_pages),
728     - pool_stats.pages_compacted);
729     + atomic_long_read(&pool_stats.pages_compacted));
730     up_read(&zram->init_lock);
731    
732     return ret;
733     diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
734     index 9803135f2e593..96e9c25926e17 100644
735     --- a/drivers/media/usb/uvc/uvc_driver.c
736     +++ b/drivers/media/usb/uvc/uvc_driver.c
737     @@ -869,7 +869,10 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
738     unsigned int i;
739    
740     extra_size = roundup(extra_size, sizeof(*entity->pads));
741     - num_inputs = (type & UVC_TERM_OUTPUT) ? num_pads : num_pads - 1;
742     + if (num_pads)
743     + num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1;
744     + else
745     + num_inputs = 0;
746     size = sizeof(*entity) + extra_size + sizeof(*entity->pads) * num_pads
747     + num_inputs;
748     entity = kzalloc(size, GFP_KERNEL);
749     @@ -885,7 +888,7 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
750    
751     for (i = 0; i < num_inputs; ++i)
752     entity->pads[i].flags = MEDIA_PAD_FL_SINK;
753     - if (!UVC_ENTITY_IS_OTERM(entity))
754     + if (!UVC_ENTITY_IS_OTERM(entity) && num_pads)
755     entity->pads[num_pads-1].flags = MEDIA_PAD_FL_SOURCE;
756    
757     entity->bNrInPins = num_inputs;
758     diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
759     index 699e5f8e0a710..2cdd6d84e5196 100644
760     --- a/drivers/media/v4l2-core/v4l2-ioctl.c
761     +++ b/drivers/media/v4l2-core/v4l2-ioctl.c
762     @@ -2804,7 +2804,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
763     v4l2_kioctl func)
764     {
765     char sbuf[128];
766     - void *mbuf = NULL;
767     + void *mbuf = NULL, *array_buf = NULL;
768     void *parg = (void *)arg;
769     long err = -EINVAL;
770     bool has_array_args;
771     @@ -2859,20 +2859,14 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
772     has_array_args = err;
773    
774     if (has_array_args) {
775     - /*
776     - * When adding new types of array args, make sure that the
777     - * parent argument to ioctl (which contains the pointer to the
778     - * array) fits into sbuf (so that mbuf will still remain
779     - * unused up to here).
780     - */
781     - mbuf = kmalloc(array_size, GFP_KERNEL);
782     + array_buf = kmalloc(array_size, GFP_KERNEL);
783     err = -ENOMEM;
784     - if (NULL == mbuf)
785     + if (array_buf == NULL)
786     goto out_array_args;
787     err = -EFAULT;
788     - if (copy_from_user(mbuf, user_ptr, array_size))
789     + if (copy_from_user(array_buf, user_ptr, array_size))
790     goto out_array_args;
791     - *kernel_ptr = mbuf;
792     + *kernel_ptr = array_buf;
793     }
794    
795     /* Handles IOCTL */
796     @@ -2891,7 +2885,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
797    
798     if (has_array_args) {
799     *kernel_ptr = (void __force *)user_ptr;
800     - if (copy_to_user(user_ptr, mbuf, array_size))
801     + if (copy_to_user(user_ptr, array_buf, array_size))
802     err = -EFAULT;
803     goto out_array_args;
804     }
805     @@ -2911,6 +2905,7 @@ out_array_args:
806     }
807    
808     out:
809     + kfree(array_buf);
810     kfree(mbuf);
811     return err;
812     }
813     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
814     index f9e57405b167b..a8c960152a357 100644
815     --- a/drivers/net/usb/qmi_wwan.c
816     +++ b/drivers/net/usb/qmi_wwan.c
817     @@ -881,6 +881,7 @@ static const struct usb_device_id products[] = {
818     {QMI_FIXED_INTF(0x19d2, 0x1255, 4)},
819     {QMI_FIXED_INTF(0x19d2, 0x1256, 4)},
820     {QMI_FIXED_INTF(0x19d2, 0x1270, 5)}, /* ZTE MF667 */
821     + {QMI_FIXED_INTF(0x19d2, 0x1275, 3)}, /* ZTE P685M */
822     {QMI_FIXED_INTF(0x19d2, 0x1401, 2)},
823     {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */
824     {QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
825     diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
826     index 8b3fe88d1c4e7..564181bb0906a 100644
827     --- a/drivers/net/wireless/ath/ath10k/mac.c
828     +++ b/drivers/net/wireless/ath/ath10k/mac.c
829     @@ -3452,23 +3452,16 @@ bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
830     static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
831     {
832     struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
833     - int ret = 0;
834     -
835     - spin_lock_bh(&ar->data_lock);
836    
837     - if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
838     + if (skb_queue_len_lockless(q) >= ATH10K_MAX_NUM_MGMT_PENDING) {
839     ath10k_warn(ar, "wmi mgmt tx queue is full\n");
840     - ret = -ENOSPC;
841     - goto unlock;
842     + return -ENOSPC;
843     }
844    
845     - __skb_queue_tail(q, skb);
846     + skb_queue_tail(q, skb);
847     ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
848    
849     -unlock:
850     - spin_unlock_bh(&ar->data_lock);
851     -
852     - return ret;
853     + return 0;
854     }
855    
856     static enum ath10k_mac_tx_path
857     diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
858     index 22009e14a8fc1..9bd635ec7827b 100644
859     --- a/drivers/net/wireless/ti/wl12xx/main.c
860     +++ b/drivers/net/wireless/ti/wl12xx/main.c
861     @@ -648,7 +648,6 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
862     wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
863     WLCORE_QUIRK_DUAL_PROBE_TMPL |
864     WLCORE_QUIRK_TKIP_HEADER_SPACE |
865     - WLCORE_QUIRK_START_STA_FAILS |
866     WLCORE_QUIRK_AP_ZERO_SESSION_ID;
867     wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
868     wl->mr_fw_name = WL127X_FW_NAME_MULTI;
869     @@ -672,7 +671,6 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
870     wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
871     WLCORE_QUIRK_DUAL_PROBE_TMPL |
872     WLCORE_QUIRK_TKIP_HEADER_SPACE |
873     - WLCORE_QUIRK_START_STA_FAILS |
874     WLCORE_QUIRK_AP_ZERO_SESSION_ID;
875     wl->plt_fw_name = WL127X_PLT_FW_NAME;
876     wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
877     @@ -701,7 +699,6 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
878     wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN |
879     WLCORE_QUIRK_DUAL_PROBE_TMPL |
880     WLCORE_QUIRK_TKIP_HEADER_SPACE |
881     - WLCORE_QUIRK_START_STA_FAILS |
882     WLCORE_QUIRK_AP_ZERO_SESSION_ID;
883    
884     wlcore_set_min_fw_ver(wl, WL128X_CHIP_VER,
885     diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
886     index 17d32ce5d16b6..a973dac456be4 100644
887     --- a/drivers/net/wireless/ti/wlcore/main.c
888     +++ b/drivers/net/wireless/ti/wlcore/main.c
889     @@ -2833,21 +2833,8 @@ static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
890    
891     if (is_ibss)
892     ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
893     - else {
894     - if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
895     - /*
896     - * TODO: this is an ugly workaround for wl12xx fw
897     - * bug - we are not able to tx/rx after the first
898     - * start_sta, so make dummy start+stop calls,
899     - * and then call start_sta again.
900     - * this should be fixed in the fw.
901     - */
902     - wl12xx_cmd_role_start_sta(wl, wlvif);
903     - wl12xx_cmd_role_stop_sta(wl, wlvif);
904     - }
905     -
906     + else
907     ret = wl12xx_cmd_role_start_sta(wl, wlvif);
908     - }
909    
910     return ret;
911     }
912     diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
913     index 1827546ba8075..34f0ba17fac92 100644
914     --- a/drivers/net/wireless/ti/wlcore/wlcore.h
915     +++ b/drivers/net/wireless/ti/wlcore/wlcore.h
916     @@ -557,9 +557,6 @@ wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip,
917     /* Each RX/TX transaction requires an end-of-transaction transfer */
918     #define WLCORE_QUIRK_END_OF_TRANSACTION BIT(0)
919    
920     -/* the first start_role(sta) sometimes doesn't work on wl12xx */
921     -#define WLCORE_QUIRK_START_STA_FAILS BIT(1)
922     -
923     /* wl127x and SPI don't support SDIO block size alignment */
924     #define WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN BIT(2)
925    
926     diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
927     index 0024200c30ce4..f7fd8b5a6a8cf 100644
928     --- a/drivers/net/xen-netback/netback.c
929     +++ b/drivers/net/xen-netback/netback.c
930     @@ -1328,11 +1328,21 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget)
931     return 0;
932    
933     gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
934     - if (nr_mops != 0)
935     + if (nr_mops != 0) {
936     ret = gnttab_map_refs(queue->tx_map_ops,
937     NULL,
938     queue->pages_to_map,
939     nr_mops);
940     + if (ret) {
941     + unsigned int i;
942     +
943     + netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n",
944     + nr_mops, ret);
945     + for (i = 0; i < nr_mops; ++i)
946     + WARN_ON_ONCE(queue->tx_map_ops[i].status ==
947     + GNTST_okay);
948     + }
949     + }
950    
951     work_done = xenvif_tx_submit(queue);
952    
953     diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
954     index a84b473d4a08b..b9c924bb6e3dd 100644
955     --- a/drivers/scsi/libiscsi.c
956     +++ b/drivers/scsi/libiscsi.c
957     @@ -3368,125 +3368,125 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
958    
959     switch(param) {
960     case ISCSI_PARAM_FAST_ABORT:
961     - len = sprintf(buf, "%d\n", session->fast_abort);
962     + len = sysfs_emit(buf, "%d\n", session->fast_abort);
963     break;
964     case ISCSI_PARAM_ABORT_TMO:
965     - len = sprintf(buf, "%d\n", session->abort_timeout);
966     + len = sysfs_emit(buf, "%d\n", session->abort_timeout);
967     break;
968     case ISCSI_PARAM_LU_RESET_TMO:
969     - len = sprintf(buf, "%d\n", session->lu_reset_timeout);
970     + len = sysfs_emit(buf, "%d\n", session->lu_reset_timeout);
971     break;
972     case ISCSI_PARAM_TGT_RESET_TMO:
973     - len = sprintf(buf, "%d\n", session->tgt_reset_timeout);
974     + len = sysfs_emit(buf, "%d\n", session->tgt_reset_timeout);
975     break;
976     case ISCSI_PARAM_INITIAL_R2T_EN:
977     - len = sprintf(buf, "%d\n", session->initial_r2t_en);
978     + len = sysfs_emit(buf, "%d\n", session->initial_r2t_en);
979     break;
980     case ISCSI_PARAM_MAX_R2T:
981     - len = sprintf(buf, "%hu\n", session->max_r2t);
982     + len = sysfs_emit(buf, "%hu\n", session->max_r2t);
983     break;
984     case ISCSI_PARAM_IMM_DATA_EN:
985     - len = sprintf(buf, "%d\n", session->imm_data_en);
986     + len = sysfs_emit(buf, "%d\n", session->imm_data_en);
987     break;
988     case ISCSI_PARAM_FIRST_BURST:
989     - len = sprintf(buf, "%u\n", session->first_burst);
990     + len = sysfs_emit(buf, "%u\n", session->first_burst);
991     break;
992     case ISCSI_PARAM_MAX_BURST:
993     - len = sprintf(buf, "%u\n", session->max_burst);
994     + len = sysfs_emit(buf, "%u\n", session->max_burst);
995     break;
996     case ISCSI_PARAM_PDU_INORDER_EN:
997     - len = sprintf(buf, "%d\n", session->pdu_inorder_en);
998     + len = sysfs_emit(buf, "%d\n", session->pdu_inorder_en);
999     break;
1000     case ISCSI_PARAM_DATASEQ_INORDER_EN:
1001     - len = sprintf(buf, "%d\n", session->dataseq_inorder_en);
1002     + len = sysfs_emit(buf, "%d\n", session->dataseq_inorder_en);
1003     break;
1004     case ISCSI_PARAM_DEF_TASKMGMT_TMO:
1005     - len = sprintf(buf, "%d\n", session->def_taskmgmt_tmo);
1006     + len = sysfs_emit(buf, "%d\n", session->def_taskmgmt_tmo);
1007     break;
1008     case ISCSI_PARAM_ERL:
1009     - len = sprintf(buf, "%d\n", session->erl);
1010     + len = sysfs_emit(buf, "%d\n", session->erl);
1011     break;
1012     case ISCSI_PARAM_TARGET_NAME:
1013     - len = sprintf(buf, "%s\n", session->targetname);
1014     + len = sysfs_emit(buf, "%s\n", session->targetname);
1015     break;
1016     case ISCSI_PARAM_TARGET_ALIAS:
1017     - len = sprintf(buf, "%s\n", session->targetalias);
1018     + len = sysfs_emit(buf, "%s\n", session->targetalias);
1019     break;
1020     case ISCSI_PARAM_TPGT:
1021     - len = sprintf(buf, "%d\n", session->tpgt);
1022     + len = sysfs_emit(buf, "%d\n", session->tpgt);
1023     break;
1024     case ISCSI_PARAM_USERNAME:
1025     - len = sprintf(buf, "%s\n", session->username);
1026     + len = sysfs_emit(buf, "%s\n", session->username);
1027     break;
1028     case ISCSI_PARAM_USERNAME_IN:
1029     - len = sprintf(buf, "%s\n", session->username_in);
1030     + len = sysfs_emit(buf, "%s\n", session->username_in);
1031     break;
1032     case ISCSI_PARAM_PASSWORD:
1033     - len = sprintf(buf, "%s\n", session->password);
1034     + len = sysfs_emit(buf, "%s\n", session->password);
1035     break;
1036     case ISCSI_PARAM_PASSWORD_IN:
1037     - len = sprintf(buf, "%s\n", session->password_in);
1038     + len = sysfs_emit(buf, "%s\n", session->password_in);
1039     break;
1040     case ISCSI_PARAM_IFACE_NAME:
1041     - len = sprintf(buf, "%s\n", session->ifacename);
1042     + len = sysfs_emit(buf, "%s\n", session->ifacename);
1043     break;
1044     case ISCSI_PARAM_INITIATOR_NAME:
1045     - len = sprintf(buf, "%s\n", session->initiatorname);
1046     + len = sysfs_emit(buf, "%s\n", session->initiatorname);
1047     break;
1048     case ISCSI_PARAM_BOOT_ROOT:
1049     - len = sprintf(buf, "%s\n", session->boot_root);
1050     + len = sysfs_emit(buf, "%s\n", session->boot_root);
1051     break;
1052     case ISCSI_PARAM_BOOT_NIC:
1053     - len = sprintf(buf, "%s\n", session->boot_nic);
1054     + len = sysfs_emit(buf, "%s\n", session->boot_nic);
1055     break;
1056     case ISCSI_PARAM_BOOT_TARGET:
1057     - len = sprintf(buf, "%s\n", session->boot_target);
1058     + len = sysfs_emit(buf, "%s\n", session->boot_target);
1059     break;
1060     case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
1061     - len = sprintf(buf, "%u\n", session->auto_snd_tgt_disable);
1062     + len = sysfs_emit(buf, "%u\n", session->auto_snd_tgt_disable);
1063     break;
1064     case ISCSI_PARAM_DISCOVERY_SESS:
1065     - len = sprintf(buf, "%u\n", session->discovery_sess);
1066     + len = sysfs_emit(buf, "%u\n", session->discovery_sess);
1067     break;
1068     case ISCSI_PARAM_PORTAL_TYPE:
1069     - len = sprintf(buf, "%s\n", session->portal_type);
1070     + len = sysfs_emit(buf, "%s\n", session->portal_type);
1071     break;
1072     case ISCSI_PARAM_CHAP_AUTH_EN:
1073     - len = sprintf(buf, "%u\n", session->chap_auth_en);
1074     + len = sysfs_emit(buf, "%u\n", session->chap_auth_en);
1075     break;
1076     case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
1077     - len = sprintf(buf, "%u\n", session->discovery_logout_en);
1078     + len = sysfs_emit(buf, "%u\n", session->discovery_logout_en);
1079     break;
1080     case ISCSI_PARAM_BIDI_CHAP_EN:
1081     - len = sprintf(buf, "%u\n", session->bidi_chap_en);
1082     + len = sysfs_emit(buf, "%u\n", session->bidi_chap_en);
1083     break;
1084     case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
1085     - len = sprintf(buf, "%u\n", session->discovery_auth_optional);
1086     + len = sysfs_emit(buf, "%u\n", session->discovery_auth_optional);
1087     break;
1088     case ISCSI_PARAM_DEF_TIME2WAIT:
1089     - len = sprintf(buf, "%d\n", session->time2wait);
1090     + len = sysfs_emit(buf, "%d\n", session->time2wait);
1091     break;
1092     case ISCSI_PARAM_DEF_TIME2RETAIN:
1093     - len = sprintf(buf, "%d\n", session->time2retain);
1094     + len = sysfs_emit(buf, "%d\n", session->time2retain);
1095     break;
1096     case ISCSI_PARAM_TSID:
1097     - len = sprintf(buf, "%u\n", session->tsid);
1098     + len = sysfs_emit(buf, "%u\n", session->tsid);
1099     break;
1100     case ISCSI_PARAM_ISID:
1101     - len = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
1102     + len = sysfs_emit(buf, "%02x%02x%02x%02x%02x%02x\n",
1103     session->isid[0], session->isid[1],
1104     session->isid[2], session->isid[3],
1105     session->isid[4], session->isid[5]);
1106     break;
1107     case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
1108     - len = sprintf(buf, "%u\n", session->discovery_parent_idx);
1109     + len = sysfs_emit(buf, "%u\n", session->discovery_parent_idx);
1110     break;
1111     case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
1112     if (session->discovery_parent_type)
1113     - len = sprintf(buf, "%s\n",
1114     + len = sysfs_emit(buf, "%s\n",
1115     session->discovery_parent_type);
1116     else
1117     - len = sprintf(buf, "\n");
1118     + len = sysfs_emit(buf, "\n");
1119     break;
1120     default:
1121     return -ENOSYS;
1122     @@ -3518,16 +3518,16 @@ int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
1123     case ISCSI_PARAM_CONN_ADDRESS:
1124     case ISCSI_HOST_PARAM_IPADDRESS:
1125     if (sin)
1126     - len = sprintf(buf, "%pI4\n", &sin->sin_addr.s_addr);
1127     + len = sysfs_emit(buf, "%pI4\n", &sin->sin_addr.s_addr);
1128     else
1129     - len = sprintf(buf, "%pI6\n", &sin6->sin6_addr);
1130     + len = sysfs_emit(buf, "%pI6\n", &sin6->sin6_addr);
1131     break;
1132     case ISCSI_PARAM_CONN_PORT:
1133     case ISCSI_PARAM_LOCAL_PORT:
1134     if (sin)
1135     - len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port));
1136     + len = sysfs_emit(buf, "%hu\n", be16_to_cpu(sin->sin_port));
1137     else
1138     - len = sprintf(buf, "%hu\n",
1139     + len = sysfs_emit(buf, "%hu\n",
1140     be16_to_cpu(sin6->sin6_port));
1141     break;
1142     default:
1143     @@ -3546,88 +3546,88 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
1144    
1145     switch(param) {
1146     case ISCSI_PARAM_PING_TMO:
1147     - len = sprintf(buf, "%u\n", conn->ping_timeout);
1148     + len = sysfs_emit(buf, "%u\n", conn->ping_timeout);
1149     break;
1150     case ISCSI_PARAM_RECV_TMO:
1151     - len = sprintf(buf, "%u\n", conn->recv_timeout);
1152     + len = sysfs_emit(buf, "%u\n", conn->recv_timeout);
1153     break;
1154     case ISCSI_PARAM_MAX_RECV_DLENGTH:
1155     - len = sprintf(buf, "%u\n", conn->max_recv_dlength);
1156     + len = sysfs_emit(buf, "%u\n", conn->max_recv_dlength);
1157     break;
1158     case ISCSI_PARAM_MAX_XMIT_DLENGTH:
1159     - len = sprintf(buf, "%u\n", conn->max_xmit_dlength);
1160     + len = sysfs_emit(buf, "%u\n", conn->max_xmit_dlength);
1161     break;
1162     case ISCSI_PARAM_HDRDGST_EN:
1163     - len = sprintf(buf, "%d\n", conn->hdrdgst_en);
1164     + len = sysfs_emit(buf, "%d\n", conn->hdrdgst_en);
1165     break;
1166     case ISCSI_PARAM_DATADGST_EN:
1167     - len = sprintf(buf, "%d\n", conn->datadgst_en);
1168     + len = sysfs_emit(buf, "%d\n", conn->datadgst_en);
1169     break;
1170     case ISCSI_PARAM_IFMARKER_EN:
1171     - len = sprintf(buf, "%d\n", conn->ifmarker_en);
1172     + len = sysfs_emit(buf, "%d\n", conn->ifmarker_en);
1173     break;
1174     case ISCSI_PARAM_OFMARKER_EN:
1175     - len = sprintf(buf, "%d\n", conn->ofmarker_en);
1176     + len = sysfs_emit(buf, "%d\n", conn->ofmarker_en);
1177     break;
1178     case ISCSI_PARAM_EXP_STATSN:
1179     - len = sprintf(buf, "%u\n", conn->exp_statsn);
1180     + len = sysfs_emit(buf, "%u\n", conn->exp_statsn);
1181     break;
1182     case ISCSI_PARAM_PERSISTENT_PORT:
1183     - len = sprintf(buf, "%d\n", conn->persistent_port);
1184     + len = sysfs_emit(buf, "%d\n", conn->persistent_port);
1185     break;
1186     case ISCSI_PARAM_PERSISTENT_ADDRESS:
1187     - len = sprintf(buf, "%s\n", conn->persistent_address);
1188     + len = sysfs_emit(buf, "%s\n", conn->persistent_address);
1189     break;
1190     case ISCSI_PARAM_STATSN:
1191     - len = sprintf(buf, "%u\n", conn->statsn);
1192     + len = sysfs_emit(buf, "%u\n", conn->statsn);
1193     break;
1194     case ISCSI_PARAM_MAX_SEGMENT_SIZE:
1195     - len = sprintf(buf, "%u\n", conn->max_segment_size);
1196     + len = sysfs_emit(buf, "%u\n", conn->max_segment_size);
1197     break;
1198     case ISCSI_PARAM_KEEPALIVE_TMO:
1199     - len = sprintf(buf, "%u\n", conn->keepalive_tmo);
1200     + len = sysfs_emit(buf, "%u\n", conn->keepalive_tmo);
1201     break;
1202     case ISCSI_PARAM_LOCAL_PORT:
1203     - len = sprintf(buf, "%u\n", conn->local_port);
1204     + len = sysfs_emit(buf, "%u\n", conn->local_port);
1205     break;
1206     case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
1207     - len = sprintf(buf, "%u\n", conn->tcp_timestamp_stat);
1208     + len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_stat);
1209     break;
1210     case ISCSI_PARAM_TCP_NAGLE_DISABLE:
1211     - len = sprintf(buf, "%u\n", conn->tcp_nagle_disable);
1212     + len = sysfs_emit(buf, "%u\n", conn->tcp_nagle_disable);
1213     break;
1214     case ISCSI_PARAM_TCP_WSF_DISABLE:
1215     - len = sprintf(buf, "%u\n", conn->tcp_wsf_disable);
1216     + len = sysfs_emit(buf, "%u\n", conn->tcp_wsf_disable);
1217     break;
1218     case ISCSI_PARAM_TCP_TIMER_SCALE:
1219     - len = sprintf(buf, "%u\n", conn->tcp_timer_scale);
1220     + len = sysfs_emit(buf, "%u\n", conn->tcp_timer_scale);
1221     break;
1222     case ISCSI_PARAM_TCP_TIMESTAMP_EN:
1223     - len = sprintf(buf, "%u\n", conn->tcp_timestamp_en);
1224     + len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_en);
1225     break;
1226     case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
1227     - len = sprintf(buf, "%u\n", conn->fragment_disable);
1228     + len = sysfs_emit(buf, "%u\n", conn->fragment_disable);
1229     break;
1230     case ISCSI_PARAM_IPV4_TOS:
1231     - len = sprintf(buf, "%u\n", conn->ipv4_tos);
1232     + len = sysfs_emit(buf, "%u\n", conn->ipv4_tos);
1233     break;
1234     case ISCSI_PARAM_IPV6_TC:
1235     - len = sprintf(buf, "%u\n", conn->ipv6_traffic_class);
1236     + len = sysfs_emit(buf, "%u\n", conn->ipv6_traffic_class);
1237     break;
1238     case ISCSI_PARAM_IPV6_FLOW_LABEL:
1239     - len = sprintf(buf, "%u\n", conn->ipv6_flow_label);
1240     + len = sysfs_emit(buf, "%u\n", conn->ipv6_flow_label);
1241     break;
1242     case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
1243     - len = sprintf(buf, "%u\n", conn->is_fw_assigned_ipv6);
1244     + len = sysfs_emit(buf, "%u\n", conn->is_fw_assigned_ipv6);
1245     break;
1246     case ISCSI_PARAM_TCP_XMIT_WSF:
1247     - len = sprintf(buf, "%u\n", conn->tcp_xmit_wsf);
1248     + len = sysfs_emit(buf, "%u\n", conn->tcp_xmit_wsf);
1249     break;
1250     case ISCSI_PARAM_TCP_RECV_WSF:
1251     - len = sprintf(buf, "%u\n", conn->tcp_recv_wsf);
1252     + len = sysfs_emit(buf, "%u\n", conn->tcp_recv_wsf);
1253     break;
1254     case ISCSI_PARAM_LOCAL_IPADDR:
1255     - len = sprintf(buf, "%s\n", conn->local_ipaddr);
1256     + len = sysfs_emit(buf, "%s\n", conn->local_ipaddr);
1257     break;
1258     default:
1259     return -ENOSYS;
1260     @@ -3645,13 +3645,13 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
1261    
1262     switch (param) {
1263     case ISCSI_HOST_PARAM_NETDEV_NAME:
1264     - len = sprintf(buf, "%s\n", ihost->netdev);
1265     + len = sysfs_emit(buf, "%s\n", ihost->netdev);
1266     break;
1267     case ISCSI_HOST_PARAM_HWADDRESS:
1268     - len = sprintf(buf, "%s\n", ihost->hwaddress);
1269     + len = sysfs_emit(buf, "%s\n", ihost->hwaddress);
1270     break;
1271     case ISCSI_HOST_PARAM_INITIATOR_NAME:
1272     - len = sprintf(buf, "%s\n", ihost->initiatorname);
1273     + len = sysfs_emit(buf, "%s\n", ihost->initiatorname);
1274     break;
1275     default:
1276     return -ENOSYS;
1277     diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
1278     index c2bce3f6eaace..4f4d2d65a4a70 100644
1279     --- a/drivers/scsi/scsi_transport_iscsi.c
1280     +++ b/drivers/scsi/scsi_transport_iscsi.c
1281     @@ -119,7 +119,11 @@ show_transport_handle(struct device *dev, struct device_attribute *attr,
1282     char *buf)
1283     {
1284     struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
1285     - return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport));
1286     +
1287     + if (!capable(CAP_SYS_ADMIN))
1288     + return -EACCES;
1289     + return sysfs_emit(buf, "%llu\n",
1290     + (unsigned long long)iscsi_handle(priv->iscsi_transport));
1291     }
1292     static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
1293    
1294     @@ -129,7 +133,7 @@ show_transport_##name(struct device *dev, \
1295     struct device_attribute *attr,char *buf) \
1296     { \
1297     struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \
1298     - return sprintf(buf, format"\n", priv->iscsi_transport->name); \
1299     + return sysfs_emit(buf, format"\n", priv->iscsi_transport->name);\
1300     } \
1301     static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
1302    
1303     @@ -170,7 +174,7 @@ static ssize_t
1304     show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
1305     {
1306     struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
1307     - return sprintf(buf, "%llu\n", (unsigned long long) ep->id);
1308     + return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id);
1309     }
1310     static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
1311    
1312     @@ -2782,6 +2786,9 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
1313     struct iscsi_cls_session *session;
1314     int err = 0, value = 0;
1315    
1316     + if (ev->u.set_param.len > PAGE_SIZE)
1317     + return -EINVAL;
1318     +
1319     session = iscsi_session_lookup(ev->u.set_param.sid);
1320     conn = iscsi_conn_lookup(ev->u.set_param.sid, ev->u.set_param.cid);
1321     if (!conn || !session)
1322     @@ -2929,6 +2936,9 @@ iscsi_set_host_param(struct iscsi_transport *transport,
1323     if (!transport->set_host_param)
1324     return -ENOSYS;
1325    
1326     + if (ev->u.set_host_param.len > PAGE_SIZE)
1327     + return -EINVAL;
1328     +
1329     shost = scsi_host_lookup(ev->u.set_host_param.host_no);
1330     if (!shost) {
1331     printk(KERN_ERR "set_host_param could not find host no %u\n",
1332     @@ -3515,6 +3525,7 @@ static int
1333     iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
1334     {
1335     int err = 0;
1336     + u32 pdu_len;
1337     struct iscsi_uevent *ev = nlmsg_data(nlh);
1338     struct iscsi_transport *transport = NULL;
1339     struct iscsi_internal *priv;
1340     @@ -3522,6 +3533,9 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
1341     struct iscsi_cls_conn *conn;
1342     struct iscsi_endpoint *ep = NULL;
1343    
1344     + if (!netlink_capable(skb, CAP_SYS_ADMIN))
1345     + return -EPERM;
1346     +
1347     if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE)
1348     *group = ISCSI_NL_GRP_UIP;
1349     else
1350     @@ -3627,6 +3641,14 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
1351     err = -EINVAL;
1352     break;
1353     case ISCSI_UEVENT_SEND_PDU:
1354     + pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
1355     +
1356     + if ((ev->u.send_pdu.hdr_size > pdu_len) ||
1357     + (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) {
1358     + err = -EINVAL;
1359     + break;
1360     + }
1361     +
1362     conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
1363     if (conn)
1364     ev->r.retcode = transport->send_pdu(conn,
1365     @@ -4031,7 +4053,7 @@ show_priv_session_state(struct device *dev, struct device_attribute *attr,
1366     char *buf)
1367     {
1368     struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
1369     - return sprintf(buf, "%s\n", iscsi_session_state_name(session->state));
1370     + return sysfs_emit(buf, "%s\n", iscsi_session_state_name(session->state));
1371     }
1372     static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
1373     NULL);
1374     @@ -4040,7 +4062,7 @@ show_priv_session_creator(struct device *dev, struct device_attribute *attr,
1375     char *buf)
1376     {
1377     struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
1378     - return sprintf(buf, "%d\n", session->creator);
1379     + return sysfs_emit(buf, "%d\n", session->creator);
1380     }
1381     static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator,
1382     NULL);
1383     @@ -4049,7 +4071,7 @@ show_priv_session_target_id(struct device *dev, struct device_attribute *attr,
1384     char *buf)
1385     {
1386     struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
1387     - return sprintf(buf, "%d\n", session->target_id);
1388     + return sysfs_emit(buf, "%d\n", session->target_id);
1389     }
1390     static ISCSI_CLASS_ATTR(priv_sess, target_id, S_IRUGO,
1391     show_priv_session_target_id, NULL);
1392     @@ -4062,8 +4084,8 @@ show_priv_session_##field(struct device *dev, \
1393     struct iscsi_cls_session *session = \
1394     iscsi_dev_to_session(dev->parent); \
1395     if (session->field == -1) \
1396     - return sprintf(buf, "off\n"); \
1397     - return sprintf(buf, format"\n", session->field); \
1398     + return sysfs_emit(buf, "off\n"); \
1399     + return sysfs_emit(buf, format"\n", session->field); \
1400     }
1401    
1402     #define iscsi_priv_session_attr_store(field) \
1403     diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
1404     index 49c718b91e55a..16f6f35954fb5 100644
1405     --- a/drivers/staging/fwserial/fwserial.c
1406     +++ b/drivers/staging/fwserial/fwserial.c
1407     @@ -2255,6 +2255,7 @@ static int fwserial_create(struct fw_unit *unit)
1408     err = fw_core_add_address_handler(&port->rx_handler,
1409     &fw_high_memory_region);
1410     if (err) {
1411     + tty_port_destroy(&port->port);
1412     kfree(port);
1413     goto free_ports;
1414     }
1415     @@ -2337,6 +2338,7 @@ unregister_ttys:
1416    
1417     free_ports:
1418     for (--i; i >= 0; --i) {
1419     + fw_core_remove_address_handler(&serial->ports[i]->rx_handler);
1420     tty_port_destroy(&serial->ports[i]->port);
1421     kfree(serial->ports[i]);
1422     }
1423     diff --git a/drivers/staging/most/aim-sound/sound.c b/drivers/staging/most/aim-sound/sound.c
1424     index e4198e5e064b5..288c7bf129457 100644
1425     --- a/drivers/staging/most/aim-sound/sound.c
1426     +++ b/drivers/staging/most/aim-sound/sound.c
1427     @@ -92,6 +92,8 @@ static void swap_copy24(u8 *dest, const u8 *source, unsigned int bytes)
1428     {
1429     unsigned int i = 0;
1430    
1431     + if (bytes < 2)
1432     + return;
1433     while (i < bytes - 2) {
1434     dest[i] = source[i + 2];
1435     dest[i + 1] = source[i + 1];
1436     diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
1437     index 9d7ab7b66a8a1..3e668d7c4b57e 100644
1438     --- a/drivers/tty/vt/consolemap.c
1439     +++ b/drivers/tty/vt/consolemap.c
1440     @@ -494,7 +494,7 @@ con_insert_unipair(struct uni_pagedir *p, u_short unicode, u_short fontpos)
1441    
1442     p2[unicode & 0x3f] = fontpos;
1443    
1444     - p->sum += (fontpos << 20) + unicode;
1445     + p->sum += (fontpos << 20U) + unicode;
1446    
1447     return 0;
1448     }
1449     diff --git a/fs/jfs/jfs_filsys.h b/fs/jfs/jfs_filsys.h
1450     index b67d64671bb40..415bfa90607a2 100644
1451     --- a/fs/jfs/jfs_filsys.h
1452     +++ b/fs/jfs/jfs_filsys.h
1453     @@ -281,5 +281,6 @@
1454     * fsck() must be run to repair
1455     */
1456     #define FM_EXTENDFS 0x00000008 /* file system extendfs() in progress */
1457     +#define FM_STATE_MAX 0x0000000f /* max value of s_state */
1458    
1459     #endif /* _H_JFS_FILSYS */
1460     diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c
1461     index 9895595fd2f24..103788ecc28c1 100644
1462     --- a/fs/jfs/jfs_mount.c
1463     +++ b/fs/jfs/jfs_mount.c
1464     @@ -49,6 +49,7 @@
1465    
1466     #include <linux/fs.h>
1467     #include <linux/buffer_head.h>
1468     +#include <linux/log2.h>
1469    
1470     #include "jfs_incore.h"
1471     #include "jfs_filsys.h"
1472     @@ -378,6 +379,15 @@ static int chkSuper(struct super_block *sb)
1473     sbi->bsize = bsize;
1474     sbi->l2bsize = le16_to_cpu(j_sb->s_l2bsize);
1475    
1476     + /* check some fields for possible corruption */
1477     + if (sbi->l2bsize != ilog2((u32)bsize) ||
1478     + j_sb->pad != 0 ||
1479     + le32_to_cpu(j_sb->s_state) > FM_STATE_MAX) {
1480     + rc = -EINVAL;
1481     + jfs_err("jfs_mount: Mount Failure: superblock is corrupt!");
1482     + goto out;
1483     + }
1484     +
1485     /*
1486     * For now, ignore s_pbsize, l2bfactor. All I/O going through buffer
1487     * cache.
1488     diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
1489     index 666986b95c5d1..300cdbdc8494e 100644
1490     --- a/fs/sysfs/file.c
1491     +++ b/fs/sysfs/file.c
1492     @@ -17,6 +17,7 @@
1493     #include <linux/list.h>
1494     #include <linux/mutex.h>
1495     #include <linux/seq_file.h>
1496     +#include <linux/mm.h>
1497    
1498     #include "sysfs.h"
1499     #include "../kernfs/kernfs-internal.h"
1500     @@ -549,3 +550,57 @@ void sysfs_remove_bin_file(struct kobject *kobj,
1501     kernfs_remove_by_name(kobj->sd, attr->attr.name);
1502     }
1503     EXPORT_SYMBOL_GPL(sysfs_remove_bin_file);
1504     +
1505     +/**
1506     + * sysfs_emit - scnprintf equivalent, aware of PAGE_SIZE buffer.
1507     + * @buf: start of PAGE_SIZE buffer.
1508     + * @fmt: format
1509     + * @...: optional arguments to @format
1510     + *
1511     + *
1512     + * Returns number of characters written to @buf.
1513     + */
1514     +int sysfs_emit(char *buf, const char *fmt, ...)
1515     +{
1516     + va_list args;
1517     + int len;
1518     +
1519     + if (WARN(!buf || offset_in_page(buf),
1520     + "invalid sysfs_emit: buf:%p\n", buf))
1521     + return 0;
1522     +
1523     + va_start(args, fmt);
1524     + len = vscnprintf(buf, PAGE_SIZE, fmt, args);
1525     + va_end(args);
1526     +
1527     + return len;
1528     +}
1529     +EXPORT_SYMBOL_GPL(sysfs_emit);
1530     +
1531     +/**
1532     + * sysfs_emit_at - scnprintf equivalent, aware of PAGE_SIZE buffer.
1533     + * @buf: start of PAGE_SIZE buffer.
1534     + * @at: offset in @buf to start write in bytes
1535     + * @at must be >= 0 && < PAGE_SIZE
1536     + * @fmt: format
1537     + * @...: optional arguments to @fmt
1538     + *
1539     + *
1540     + * Returns number of characters written starting at &@buf[@at].
1541     + */
1542     +int sysfs_emit_at(char *buf, int at, const char *fmt, ...)
1543     +{
1544     + va_list args;
1545     + int len;
1546     +
1547     + if (WARN(!buf || offset_in_page(buf) || at < 0 || at >= PAGE_SIZE,
1548     + "invalid sysfs_emit_at: buf:%p at:%d\n", buf, at))
1549     + return 0;
1550     +
1551     + va_start(args, fmt);
1552     + len = vscnprintf(buf + at, PAGE_SIZE - at, fmt, args);
1553     + va_end(args);
1554     +
1555     + return len;
1556     +}
1557     +EXPORT_SYMBOL_GPL(sysfs_emit_at);
1558     diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
1559     index 0d587657056d8..d5948fb386fa0 100644
1560     --- a/fs/xfs/xfs_iops.c
1561     +++ b/fs/xfs/xfs_iops.c
1562     @@ -820,7 +820,7 @@ xfs_setattr_size(
1563     ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1564     ASSERT(S_ISREG(inode->i_mode));
1565     ASSERT((iattr->ia_valid & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
1566     - ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
1567     + ATTR_MTIME_SET|ATTR_TIMES_SET)) == 0);
1568    
1569     oldsize = inode->i_size;
1570     newsize = iattr->ia_size;
1571     diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
1572     index d3c19f8c45649..a0cbc4836f366 100644
1573     --- a/include/linux/sysfs.h
1574     +++ b/include/linux/sysfs.h
1575     @@ -300,6 +300,11 @@ static inline void sysfs_enable_ns(struct kernfs_node *kn)
1576     return kernfs_enable_ns(kn);
1577     }
1578    
1579     +__printf(2, 3)
1580     +int sysfs_emit(char *buf, const char *fmt, ...);
1581     +__printf(3, 4)
1582     +int sysfs_emit_at(char *buf, int at, const char *fmt, ...);
1583     +
1584     #else /* CONFIG_SYSFS */
1585    
1586     static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
1587     @@ -506,6 +511,17 @@ static inline void sysfs_enable_ns(struct kernfs_node *kn)
1588     {
1589     }
1590    
1591     +__printf(2, 3)
1592     +static inline int sysfs_emit(char *buf, const char *fmt, ...)
1593     +{
1594     + return 0;
1595     +}
1596     +
1597     +__printf(3, 4)
1598     +static inline int sysfs_emit_at(char *buf, int at, const char *fmt, ...)
1599     +{
1600     + return 0;
1601     +}
1602     #endif /* CONFIG_SYSFS */
1603    
1604     static inline int __must_check sysfs_create_file(struct kobject *kobj,
1605     diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
1606     index 57a8e98f2708c..6c871102c2735 100644
1607     --- a/include/linux/zsmalloc.h
1608     +++ b/include/linux/zsmalloc.h
1609     @@ -36,7 +36,7 @@ enum zs_mapmode {
1610    
1611     struct zs_pool_stats {
1612     /* How many pages were migrated (freed) */
1613     - unsigned long pages_compacted;
1614     + atomic_long_t pages_compacted;
1615     };
1616    
1617     struct zs_pool;
1618     diff --git a/kernel/futex.c b/kernel/futex.c
1619     index 0b49a8e1e1bec..0015c14ac2c04 100644
1620     --- a/kernel/futex.c
1621     +++ b/kernel/futex.c
1622     @@ -827,7 +827,7 @@ static int refill_pi_state_cache(void)
1623     return 0;
1624     }
1625    
1626     -static struct futex_pi_state * alloc_pi_state(void)
1627     +static struct futex_pi_state *alloc_pi_state(void)
1628     {
1629     struct futex_pi_state *pi_state = current->pi_state_cache;
1630    
1631     @@ -860,11 +860,14 @@ static void pi_state_update_owner(struct futex_pi_state *pi_state,
1632     }
1633     }
1634    
1635     +static void get_pi_state(struct futex_pi_state *pi_state)
1636     +{
1637     + WARN_ON_ONCE(!atomic_inc_not_zero(&pi_state->refcount));
1638     +}
1639     +
1640     /*
1641     * Drops a reference to the pi_state object and frees or caches it
1642     * when the last reference is gone.
1643     - *
1644     - * Must be called with the hb lock held.
1645     */
1646     static void put_pi_state(struct futex_pi_state *pi_state)
1647     {
1648     @@ -879,13 +882,17 @@ static void put_pi_state(struct futex_pi_state *pi_state)
1649     * and has cleaned up the pi_state already
1650     */
1651     if (pi_state->owner) {
1652     + unsigned long flags;
1653     +
1654     + raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags);
1655     pi_state_update_owner(pi_state, NULL);
1656     rt_mutex_proxy_unlock(&pi_state->pi_mutex);
1657     + raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags);
1658     }
1659    
1660     - if (current->pi_state_cache)
1661     + if (current->pi_state_cache) {
1662     kfree(pi_state);
1663     - else {
1664     + } else {
1665     /*
1666     * pi_state->list is already empty.
1667     * clear pi_state->owner.
1668     @@ -901,7 +908,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
1669     * Look up the task based on what TID userspace gave us.
1670     * We dont trust it.
1671     */
1672     -static struct task_struct * futex_find_get_task(pid_t pid)
1673     +static struct task_struct *futex_find_get_task(pid_t pid)
1674     {
1675     struct task_struct *p;
1676    
1677     @@ -936,22 +943,41 @@ static void exit_pi_state_list(struct task_struct *curr)
1678     */
1679     raw_spin_lock_irq(&curr->pi_lock);
1680     while (!list_empty(head)) {
1681     -
1682     next = head->next;
1683     pi_state = list_entry(next, struct futex_pi_state, list);
1684     key = pi_state->key;
1685     hb = hash_futex(&key);
1686     +
1687     + /*
1688     + * We can race against put_pi_state() removing itself from the
1689     + * list (a waiter going away). put_pi_state() will first
1690     + * decrement the reference count and then modify the list, so
1691     + * its possible to see the list entry but fail this reference
1692     + * acquire.
1693     + *
1694     + * In that case; drop the locks to let put_pi_state() make
1695     + * progress and retry the loop.
1696     + */
1697     + if (!atomic_inc_not_zero(&pi_state->refcount)) {
1698     + raw_spin_unlock_irq(&curr->pi_lock);
1699     + cpu_relax();
1700     + raw_spin_lock_irq(&curr->pi_lock);
1701     + continue;
1702     + }
1703     raw_spin_unlock_irq(&curr->pi_lock);
1704    
1705     spin_lock(&hb->lock);
1706     -
1707     - raw_spin_lock_irq(&curr->pi_lock);
1708     + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1709     + raw_spin_lock(&curr->pi_lock);
1710     /*
1711     * We dropped the pi-lock, so re-check whether this
1712     * task still owns the PI-state:
1713     */
1714     if (head->next != next) {
1715     + /* retain curr->pi_lock for the loop invariant */
1716     + raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
1717     spin_unlock(&hb->lock);
1718     + put_pi_state(pi_state);
1719     continue;
1720     }
1721    
1722     @@ -959,12 +985,14 @@ static void exit_pi_state_list(struct task_struct *curr)
1723     WARN_ON(list_empty(&pi_state->list));
1724     list_del_init(&pi_state->list);
1725     pi_state->owner = NULL;
1726     - raw_spin_unlock_irq(&curr->pi_lock);
1727     -
1728     - rt_mutex_futex_unlock(&pi_state->pi_mutex);
1729    
1730     + raw_spin_unlock(&curr->pi_lock);
1731     + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1732     spin_unlock(&hb->lock);
1733    
1734     + rt_mutex_futex_unlock(&pi_state->pi_mutex);
1735     + put_pi_state(pi_state);
1736     +
1737     raw_spin_lock_irq(&curr->pi_lock);
1738     }
1739     raw_spin_unlock_irq(&curr->pi_lock);
1740     @@ -1078,6 +1106,11 @@ static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
1741     * has dropped the hb->lock in between queue_me() and unqueue_me_pi(),
1742     * which in turn means that futex_lock_pi() still has a reference on
1743     * our pi_state.
1744     + *
1745     + * The waiter holding a reference on @pi_state also protects against
1746     + * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi()
1747     + * and futex_wait_requeue_pi() as it cannot go to 0 and consequently
1748     + * free pi_state before we can take a reference ourselves.
1749     */
1750     WARN_ON(!atomic_read(&pi_state->refcount));
1751    
1752     @@ -1149,7 +1182,7 @@ static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
1753     goto out_einval;
1754    
1755     out_attach:
1756     - atomic_inc(&pi_state->refcount);
1757     + get_pi_state(pi_state);
1758     raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1759     *ps = pi_state;
1760     return 0;
1761     @@ -1337,6 +1370,10 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
1762    
1763     WARN_ON(!list_empty(&pi_state->list));
1764     list_add(&pi_state->list, &p->pi_state_list);
1765     + /*
1766     + * Assignment without holding pi_state->pi_mutex.wait_lock is safe
1767     + * because there is no concurrency as the object is not published yet.
1768     + */
1769     pi_state->owner = p;
1770     raw_spin_unlock_irq(&p->pi_lock);
1771    
1772     @@ -1352,14 +1389,14 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
1773     union futex_key *key, struct futex_pi_state **ps,
1774     struct task_struct **exiting)
1775     {
1776     - struct futex_q *match = futex_top_waiter(hb, key);
1777     + struct futex_q *top_waiter = futex_top_waiter(hb, key);
1778    
1779     /*
1780     * If there is a waiter on that futex, validate it and
1781     * attach to the pi_state when the validation succeeds.
1782     */
1783     - if (match)
1784     - return attach_to_pi_state(uaddr, uval, match->pi_state, ps);
1785     + if (top_waiter)
1786     + return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1787    
1788     /*
1789     * We are the first waiter - try to look up the owner based on
1790     @@ -1414,7 +1451,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
1791     int set_waiters)
1792     {
1793     u32 uval, newval, vpid = task_pid_vnr(task);
1794     - struct futex_q *match;
1795     + struct futex_q *top_waiter;
1796     int ret;
1797    
1798     /*
1799     @@ -1440,9 +1477,9 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
1800     * Lookup existing state first. If it exists, try to attach to
1801     * its pi_state.
1802     */
1803     - match = futex_top_waiter(hb, key);
1804     - if (match)
1805     - return attach_to_pi_state(uaddr, uval, match->pi_state, ps);
1806     + top_waiter = futex_top_waiter(hb, key);
1807     + if (top_waiter)
1808     + return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1809    
1810     /*
1811     * No waiter and user TID is 0. We are here because the
1812     @@ -1532,48 +1569,35 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
1813     q->lock_ptr = NULL;
1814     }
1815    
1816     -static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
1817     - struct futex_hash_bucket *hb)
1818     +/*
1819     + * Caller must hold a reference on @pi_state.
1820     + */
1821     +static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
1822     {
1823     - struct task_struct *new_owner;
1824     - struct futex_pi_state *pi_state = this->pi_state;
1825     u32 uninitialized_var(curval), newval;
1826     + struct task_struct *new_owner;
1827     + bool deboost = false;
1828     WAKE_Q(wake_q);
1829     - bool deboost;
1830     int ret = 0;
1831    
1832     - if (!pi_state)
1833     - return -EINVAL;
1834     -
1835     - /*
1836     - * If current does not own the pi_state then the futex is
1837     - * inconsistent and user space fiddled with the futex value.
1838     - */
1839     - if (pi_state->owner != current)
1840     - return -EINVAL;
1841     -
1842     - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1843     new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1844     -
1845     - /*
1846     - * When we interleave with futex_lock_pi() where it does
1847     - * rt_mutex_timed_futex_lock(), we might observe @this futex_q waiter,
1848     - * but the rt_mutex's wait_list can be empty (either still, or again,
1849     - * depending on which side we land).
1850     - *
1851     - * When this happens, give up our locks and try again, giving the
1852     - * futex_lock_pi() instance time to complete, either by waiting on the
1853     - * rtmutex or removing itself from the futex queue.
1854     - */
1855     - if (!new_owner) {
1856     - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1857     - return -EAGAIN;
1858     + if (WARN_ON_ONCE(!new_owner)) {
1859     + /*
1860     + * As per the comment in futex_unlock_pi() this should not happen.
1861     + *
1862     + * When this happens, give up our locks and try again, giving
1863     + * the futex_lock_pi() instance time to complete, either by
1864     + * waiting on the rtmutex or removing itself from the futex
1865     + * queue.
1866     + */
1867     + ret = -EAGAIN;
1868     + goto out_unlock;
1869     }
1870    
1871     /*
1872     - * We pass it to the next owner. The WAITERS bit is always
1873     - * kept enabled while there is PI state around. We cleanup the
1874     - * owner died bit, because we are the owner.
1875     + * We pass it to the next owner. The WAITERS bit is always kept
1876     + * enabled while there is PI state around. We cleanup the owner
1877     + * died bit, because we are the owner.
1878     */
1879     newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1880    
1881     @@ -1606,15 +1630,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
1882     deboost = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
1883     }
1884    
1885     +out_unlock:
1886     raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1887     - spin_unlock(&hb->lock);
1888    
1889     if (deboost) {
1890     wake_up_q(&wake_q);
1891     rt_mutex_adjust_prio(current);
1892     }
1893    
1894     - return 0;
1895     + return ret;
1896     }
1897    
1898     /*
1899     @@ -2210,7 +2234,7 @@ retry_private:
1900     * refcount on the pi_state and store the pointer in
1901     * the futex_q object of the waiter.
1902     */
1903     - atomic_inc(&pi_state->refcount);
1904     + get_pi_state(pi_state);
1905     this->pi_state = pi_state;
1906     ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1907     this->rt_waiter,
1908     @@ -2488,7 +2512,7 @@ retry:
1909     if (get_futex_value_locked(&uval, uaddr))
1910     goto handle_fault;
1911    
1912     - while (1) {
1913     + for (;;) {
1914     newval = (uval & FUTEX_OWNER_DIED) | newtid;
1915    
1916     if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
1917     @@ -2975,7 +2999,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
1918     u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
1919     union futex_key key = FUTEX_KEY_INIT;
1920     struct futex_hash_bucket *hb;
1921     - struct futex_q *match;
1922     + struct futex_q *top_waiter;
1923     int ret;
1924    
1925     retry:
1926     @@ -2999,12 +3023,42 @@ retry:
1927     * all and we at least want to know if user space fiddled
1928     * with the futex value instead of blindly unlocking.
1929     */
1930     - match = futex_top_waiter(hb, &key);
1931     - if (match) {
1932     - ret = wake_futex_pi(uaddr, uval, match, hb);
1933     + top_waiter = futex_top_waiter(hb, &key);
1934     + if (top_waiter) {
1935     + struct futex_pi_state *pi_state = top_waiter->pi_state;
1936     +
1937     + ret = -EINVAL;
1938     + if (!pi_state)
1939     + goto out_unlock;
1940     +
1941     + /*
1942     + * If current does not own the pi_state then the futex is
1943     + * inconsistent and user space fiddled with the futex value.
1944     + */
1945     + if (pi_state->owner != current)
1946     + goto out_unlock;
1947     +
1948     + get_pi_state(pi_state);
1949     + /*
1950     + * Since modifying the wait_list is done while holding both
1951     + * hb->lock and wait_lock, holding either is sufficient to
1952     + * observe it.
1953     + *
1954     + * By taking wait_lock while still holding hb->lock, we ensure
1955     + * there is no point where we hold neither; and therefore
1956     + * wake_futex_pi() must observe a state consistent with what we
1957     + * observed.
1958     + */
1959     + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1960     + spin_unlock(&hb->lock);
1961     +
1962     + /* drops pi_state->pi_mutex.wait_lock */
1963     + ret = wake_futex_pi(uaddr, uval, pi_state);
1964     +
1965     + put_pi_state(pi_state);
1966     +
1967     /*
1968     - * In case of success wake_futex_pi dropped the hash
1969     - * bucket lock.
1970     + * Success, we're done! No tricky corner cases.
1971     */
1972     if (!ret)
1973     goto out_putkey;
1974     @@ -3019,7 +3073,6 @@ retry:
1975     * setting the FUTEX_WAITERS bit. Try again.
1976     */
1977     if (ret == -EAGAIN) {
1978     - spin_unlock(&hb->lock);
1979     put_futex_key(&key);
1980     goto retry;
1981     }
1982     @@ -3027,7 +3080,7 @@ retry:
1983     * wake_futex_pi has detected invalid state. Tell user
1984     * space.
1985     */
1986     - goto out_unlock;
1987     + goto out_putkey;
1988     }
1989    
1990     /*
1991     @@ -3037,8 +3090,10 @@ retry:
1992     * preserve the WAITERS bit not the OWNER_DIED one. We are the
1993     * owner.
1994     */
1995     - if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))
1996     + if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) {
1997     + spin_unlock(&hb->lock);
1998     goto pi_faulted;
1999     + }
2000    
2001     /*
2002     * If uval has changed, let user space handle it.
2003     @@ -3052,7 +3107,6 @@ out_putkey:
2004     return ret;
2005    
2006     pi_faulted:
2007     - spin_unlock(&hb->lock);
2008     put_futex_key(&key);
2009    
2010     ret = fault_in_user_writeable(uaddr);
2011     diff --git a/kernel/printk/nmi.c b/kernel/printk/nmi.c
2012     index 2c3e7f024c15c..7a50b405ad288 100644
2013     --- a/kernel/printk/nmi.c
2014     +++ b/kernel/printk/nmi.c
2015     @@ -52,6 +52,8 @@ struct nmi_seq_buf {
2016     };
2017     static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq);
2018    
2019     +static DEFINE_RAW_SPINLOCK(nmi_read_lock);
2020     +
2021     /*
2022     * Safe printk() for NMI context. It uses a per-CPU buffer to
2023     * store the message. NMIs are not nested, so there is always only
2024     @@ -134,8 +136,6 @@ static void printk_nmi_flush_seq_line(struct nmi_seq_buf *s,
2025     */
2026     static void __printk_nmi_flush(struct irq_work *work)
2027     {
2028     - static raw_spinlock_t read_lock =
2029     - __RAW_SPIN_LOCK_INITIALIZER(read_lock);
2030     struct nmi_seq_buf *s = container_of(work, struct nmi_seq_buf, work);
2031     unsigned long flags;
2032     size_t len, size;
2033     @@ -148,7 +148,7 @@ static void __printk_nmi_flush(struct irq_work *work)
2034     * different CPUs. This is especially important when printing
2035     * a backtrace.
2036     */
2037     - raw_spin_lock_irqsave(&read_lock, flags);
2038     + raw_spin_lock_irqsave(&nmi_read_lock, flags);
2039    
2040     i = 0;
2041     more:
2042     @@ -197,7 +197,7 @@ more:
2043     goto more;
2044    
2045     out:
2046     - raw_spin_unlock_irqrestore(&read_lock, flags);
2047     + raw_spin_unlock_irqrestore(&nmi_read_lock, flags);
2048     }
2049    
2050     /**
2051     @@ -239,6 +239,14 @@ void printk_nmi_flush_on_panic(void)
2052     raw_spin_lock_init(&logbuf_lock);
2053     }
2054    
2055     + if (in_nmi() && raw_spin_is_locked(&nmi_read_lock)) {
2056     + if (num_online_cpus() > 1)
2057     + return;
2058     +
2059     + debug_locks_off();
2060     + raw_spin_lock_init(&nmi_read_lock);
2061     + }
2062     +
2063     printk_nmi_flush();
2064     }
2065    
2066     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2067     index e814cc1785354..e2b5e38e7a4b7 100644
2068     --- a/mm/hugetlb.c
2069     +++ b/mm/hugetlb.c
2070     @@ -1185,14 +1185,16 @@ static inline int alloc_fresh_gigantic_page(struct hstate *h,
2071     static void update_and_free_page(struct hstate *h, struct page *page)
2072     {
2073     int i;
2074     + struct page *subpage = page;
2075    
2076     if (hstate_is_gigantic(h) && !gigantic_page_supported())
2077     return;
2078    
2079     h->nr_huge_pages--;
2080     h->nr_huge_pages_node[page_to_nid(page)]--;
2081     - for (i = 0; i < pages_per_huge_page(h); i++) {
2082     - page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
2083     + for (i = 0; i < pages_per_huge_page(h);
2084     + i++, subpage = mem_map_next(subpage, page, i)) {
2085     + subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
2086     1 << PG_referenced | 1 << PG_dirty |
2087     1 << PG_active | 1 << PG_private |
2088     1 << PG_writeback);
2089     @@ -4434,21 +4436,23 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
2090     void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
2091     unsigned long *start, unsigned long *end)
2092     {
2093     - unsigned long a_start, a_end;
2094     + unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
2095     + v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
2096    
2097     - if (!(vma->vm_flags & VM_MAYSHARE))
2098     + /*
2099     + * vma need span at least one aligned PUD size and the start,end range
2100     + * must at least partialy within it.
2101     + */
2102     + if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
2103     + (*end <= v_start) || (*start >= v_end))
2104     return;
2105    
2106     /* Extend the range to be PUD aligned for a worst case scenario */
2107     - a_start = ALIGN_DOWN(*start, PUD_SIZE);
2108     - a_end = ALIGN(*end, PUD_SIZE);
2109     + if (*start > v_start)
2110     + *start = ALIGN_DOWN(*start, PUD_SIZE);
2111    
2112     - /*
2113     - * Intersect the range with the vma range, since pmd sharing won't be
2114     - * across vma after all
2115     - */
2116     - *start = max(vma->vm_start, a_start);
2117     - *end = min(vma->vm_end, a_end);
2118     + if (*end < v_end)
2119     + *end = ALIGN(*end, PUD_SIZE);
2120     }
2121    
2122     /*
2123     diff --git a/mm/page_io.c b/mm/page_io.c
2124     index a2651f58c86a2..ad0e0ce31090e 100644
2125     --- a/mm/page_io.c
2126     +++ b/mm/page_io.c
2127     @@ -32,7 +32,6 @@ static struct bio *get_swap_bio(gfp_t gfp_flags,
2128     bio = bio_alloc(gfp_flags, 1);
2129     if (bio) {
2130     bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev);
2131     - bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
2132     bio->bi_end_io = end_io;
2133    
2134     bio_add_page(bio, page, PAGE_SIZE, 0);
2135     @@ -252,11 +251,6 @@ out:
2136     return ret;
2137     }
2138    
2139     -static sector_t swap_page_sector(struct page *page)
2140     -{
2141     - return (sector_t)__page_file_index(page) << (PAGE_SHIFT - 9);
2142     -}
2143     -
2144     int __swap_writepage(struct page *page, struct writeback_control *wbc,
2145     bio_end_io_t end_write_func)
2146     {
2147     @@ -306,7 +300,8 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
2148     return ret;
2149     }
2150    
2151     - ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
2152     + ret = bdev_write_page(sis->bdev, map_swap_page(page, &sis->bdev),
2153     + page, wbc);
2154     if (!ret) {
2155     count_vm_event(PSWPOUT);
2156     return 0;
2157     @@ -357,7 +352,7 @@ int swap_readpage(struct page *page)
2158     return ret;
2159     }
2160    
2161     - ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
2162     + ret = bdev_read_page(sis->bdev, map_swap_page(page, &sis->bdev), page);
2163     if (!ret) {
2164     if (trylock_page(page)) {
2165     swap_slot_free_notify(page);
2166     diff --git a/mm/swapfile.c b/mm/swapfile.c
2167     index 855f62ab8c1b3..8a0d969a6ebd9 100644
2168     --- a/mm/swapfile.c
2169     +++ b/mm/swapfile.c
2170     @@ -1666,7 +1666,7 @@ sector_t map_swap_page(struct page *page, struct block_device **bdev)
2171     {
2172     swp_entry_t entry;
2173     entry.val = page_private(page);
2174     - return map_swap_entry(entry, bdev);
2175     + return map_swap_entry(entry, bdev) << (PAGE_SHIFT - 9);
2176     }
2177    
2178     /*
2179     diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
2180     index e4cca3f5331ec..8db3c2b27a175 100644
2181     --- a/mm/zsmalloc.c
2182     +++ b/mm/zsmalloc.c
2183     @@ -2332,11 +2332,13 @@ static unsigned long zs_can_compact(struct size_class *class)
2184     return obj_wasted * class->pages_per_zspage;
2185     }
2186    
2187     -static void __zs_compact(struct zs_pool *pool, struct size_class *class)
2188     +static unsigned long __zs_compact(struct zs_pool *pool,
2189     + struct size_class *class)
2190     {
2191     struct zs_compact_control cc;
2192     struct zspage *src_zspage;
2193     struct zspage *dst_zspage = NULL;
2194     + unsigned long pages_freed = 0;
2195    
2196     spin_lock(&class->lock);
2197     while ((src_zspage = isolate_zspage(class, true))) {
2198     @@ -2366,7 +2368,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
2199     putback_zspage(class, dst_zspage);
2200     if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
2201     free_zspage(pool, class, src_zspage);
2202     - pool->stats.pages_compacted += class->pages_per_zspage;
2203     + pages_freed += class->pages_per_zspage;
2204     }
2205     spin_unlock(&class->lock);
2206     cond_resched();
2207     @@ -2377,12 +2379,15 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
2208     putback_zspage(class, src_zspage);
2209    
2210     spin_unlock(&class->lock);
2211     +
2212     + return pages_freed;
2213     }
2214    
2215     unsigned long zs_compact(struct zs_pool *pool)
2216     {
2217     int i;
2218     struct size_class *class;
2219     + unsigned long pages_freed = 0;
2220    
2221     for (i = zs_size_classes - 1; i >= 0; i--) {
2222     class = pool->size_class[i];
2223     @@ -2390,10 +2395,11 @@ unsigned long zs_compact(struct zs_pool *pool)
2224     continue;
2225     if (class->index != i)
2226     continue;
2227     - __zs_compact(pool, class);
2228     + pages_freed += __zs_compact(pool, class);
2229     }
2230     + atomic_long_add(pages_freed, &pool->stats.pages_compacted);
2231    
2232     - return pool->stats.pages_compacted;
2233     + return pages_freed;
2234     }
2235     EXPORT_SYMBOL_GPL(zs_compact);
2236    
2237     @@ -2410,13 +2416,12 @@ static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
2238     struct zs_pool *pool = container_of(shrinker, struct zs_pool,
2239     shrinker);
2240    
2241     - pages_freed = pool->stats.pages_compacted;
2242     /*
2243     * Compact classes and calculate compaction delta.
2244     * Can run concurrently with a manually triggered
2245     * (by user) compaction.
2246     */
2247     - pages_freed = zs_compact(pool) - pages_freed;
2248     + pages_freed = zs_compact(pool);
2249    
2250     return pages_freed ? pages_freed : SHRINK_STOP;
2251     }
2252     diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
2253     index e32f341890079..b01b43ab6f834 100644
2254     --- a/net/bluetooth/amp.c
2255     +++ b/net/bluetooth/amp.c
2256     @@ -305,6 +305,9 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
2257     struct hci_request req;
2258     int err = 0;
2259    
2260     + if (!mgr)
2261     + return;
2262     +
2263     cp.phy_handle = hcon->handle;
2264     cp.len_so_far = cpu_to_le16(0);
2265     cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
2266     diff --git a/net/core/pktgen.c b/net/core/pktgen.c
2267     index 433b26feb320c..8a72b984267a6 100644
2268     --- a/net/core/pktgen.c
2269     +++ b/net/core/pktgen.c
2270     @@ -3555,7 +3555,7 @@ static int pktgen_thread_worker(void *arg)
2271     struct pktgen_dev *pkt_dev = NULL;
2272     int cpu = t->cpu;
2273    
2274     - BUG_ON(smp_processor_id() != cpu);
2275     + WARN_ON(smp_processor_id() != cpu);
2276    
2277     init_waitqueue_head(&t->queue);
2278     complete(&t->start_done);
2279     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2280     index 79034fb861b52..076444dac96d1 100644
2281     --- a/net/core/skbuff.c
2282     +++ b/net/core/skbuff.c
2283     @@ -2673,7 +2673,19 @@ EXPORT_SYMBOL(skb_split);
2284     */
2285     static int skb_prepare_for_shift(struct sk_buff *skb)
2286     {
2287     - return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2288     + int ret = 0;
2289     +
2290     + if (skb_cloned(skb)) {
2291     + /* Save and restore truesize: pskb_expand_head() may reallocate
2292     + * memory where ksize(kmalloc(S)) != ksize(kmalloc(S)), but we
2293     + * cannot change truesize at this point.
2294     + */
2295     + unsigned int save_truesize = skb->truesize;
2296     +
2297     + ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2298     + skb->truesize = save_truesize;
2299     + }
2300     + return ret;
2301     }
2302    
2303     /**
2304     diff --git a/scripts/Makefile b/scripts/Makefile
2305     index 1d80897a96442..9116feaacee2a 100644
2306     --- a/scripts/Makefile
2307     +++ b/scripts/Makefile
2308     @@ -11,6 +11,9 @@
2309    
2310     HOST_EXTRACFLAGS += -I$(srctree)/tools/include
2311    
2312     +CRYPTO_LIBS = $(shell pkg-config --libs libcrypto 2> /dev/null || echo -lcrypto)
2313     +CRYPTO_CFLAGS = $(shell pkg-config --cflags libcrypto 2> /dev/null)
2314     +
2315     hostprogs-$(CONFIG_KALLSYMS) += kallsyms
2316     hostprogs-$(CONFIG_LOGO) += pnmtologo
2317     hostprogs-$(CONFIG_VT) += conmakehash
2318     @@ -23,8 +26,10 @@ hostprogs-$(CONFIG_SYSTEM_EXTRA_CERTIFICATE) += insert-sys-cert
2319    
2320     HOSTCFLAGS_sortextable.o = -I$(srctree)/tools/include
2321     HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include
2322     -HOSTLOADLIBES_sign-file = -lcrypto
2323     -HOSTLOADLIBES_extract-cert = -lcrypto
2324     +HOSTCFLAGS_sign-file.o = $(CRYPTO_CFLAGS)
2325     +HOSTLOADLIBES_sign-file = $(CRYPTO_LIBS)
2326     +HOSTCFLAGS_extract-cert.o = $(CRYPTO_CFLAGS)
2327     +HOSTLOADLIBES_extract-cert = $(CRYPTO_LIBS)
2328    
2329     always := $(hostprogs-y) $(hostprogs-m)
2330    
2331     diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
2332     index 4aecdc8f74b2a..04a53cdb409fa 100644
2333     --- a/security/smack/smackfs.c
2334     +++ b/security/smack/smackfs.c
2335     @@ -1186,7 +1186,7 @@ static ssize_t smk_write_net4addr(struct file *file, const char __user *buf,
2336     return -EPERM;
2337     if (*ppos != 0)
2338     return -EINVAL;
2339     - if (count < SMK_NETLBLADDRMIN)
2340     + if (count < SMK_NETLBLADDRMIN || count > PAGE_SIZE - 1)
2341     return -EINVAL;
2342    
2343     data = memdup_user_nul(buf, count);
2344     @@ -1446,7 +1446,7 @@ static ssize_t smk_write_net6addr(struct file *file, const char __user *buf,
2345     return -EPERM;
2346     if (*ppos != 0)
2347     return -EINVAL;
2348     - if (count < SMK_NETLBLADDRMIN)
2349     + if (count < SMK_NETLBLADDRMIN || count > PAGE_SIZE - 1)
2350     return -EINVAL;
2351    
2352     data = memdup_user_nul(buf, count);
2353     @@ -1853,6 +1853,10 @@ static ssize_t smk_write_ambient(struct file *file, const char __user *buf,
2354     if (!smack_privileged(CAP_MAC_ADMIN))
2355     return -EPERM;
2356    
2357     + /* Enough data must be present */
2358     + if (count == 0 || count > PAGE_SIZE)
2359     + return -EINVAL;
2360     +
2361     data = memdup_user_nul(buf, count);
2362     if (IS_ERR(data))
2363     return PTR_ERR(data);
2364     @@ -2024,6 +2028,9 @@ static ssize_t smk_write_onlycap(struct file *file, const char __user *buf,
2365     if (!smack_privileged(CAP_MAC_ADMIN))
2366     return -EPERM;
2367    
2368     + if (count > PAGE_SIZE)
2369     + return -EINVAL;
2370     +
2371     data = memdup_user_nul(buf, count);
2372     if (IS_ERR(data))
2373     return PTR_ERR(data);
2374     @@ -2111,6 +2118,9 @@ static ssize_t smk_write_unconfined(struct file *file, const char __user *buf,
2375     if (!smack_privileged(CAP_MAC_ADMIN))
2376     return -EPERM;
2377    
2378     + if (count > PAGE_SIZE)
2379     + return -EINVAL;
2380     +
2381     data = memdup_user_nul(buf, count);
2382     if (IS_ERR(data))
2383     return PTR_ERR(data);
2384     @@ -2664,6 +2674,10 @@ static ssize_t smk_write_syslog(struct file *file, const char __user *buf,
2385     if (!smack_privileged(CAP_MAC_ADMIN))
2386     return -EPERM;
2387    
2388     + /* Enough data must be present */
2389     + if (count == 0 || count > PAGE_SIZE)
2390     + return -EINVAL;
2391     +
2392     data = memdup_user_nul(buf, count);
2393     if (IS_ERR(data))
2394     return PTR_ERR(data);
2395     @@ -2756,10 +2770,13 @@ static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf,
2396     return -EPERM;
2397    
2398     /*
2399     + * No partial write.
2400     * Enough data must be present.
2401     */
2402     if (*ppos != 0)
2403     return -EINVAL;
2404     + if (count == 0 || count > PAGE_SIZE)
2405     + return -EINVAL;
2406    
2407     data = memdup_user_nul(buf, count);
2408     if (IS_ERR(data))