Magellan Linux

Contents of /trunk/kernel-alx-legacy/patches-4.9/0393-4.9.294-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3695 - (show annotations) (download)
Mon Oct 24 14:08:09 2022 UTC (18 months, 2 weeks ago) by niro
File size: 53022 byte(s)
-linux-4.9.294
1 diff --git a/Makefile b/Makefile
2 index a07a010095bc9..6f3b4e1e9a144 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 293
9 +SUBLEVEL = 294
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c
14 index d130a5ece5d55..bf24690ec83af 100644
15 --- a/arch/arm/mm/copypage-fa.c
16 +++ b/arch/arm/mm/copypage-fa.c
17 @@ -17,26 +17,25 @@
18 /*
19 * Faraday optimised copy_user_page
20 */
21 -static void __naked
22 -fa_copy_user_page(void *kto, const void *kfrom)
23 +static void fa_copy_user_page(void *kto, const void *kfrom)
24 {
25 - asm("\
26 - stmfd sp!, {r4, lr} @ 2\n\
27 - mov r2, %0 @ 1\n\
28 -1: ldmia r1!, {r3, r4, ip, lr} @ 4\n\
29 - stmia r0, {r3, r4, ip, lr} @ 4\n\
30 - mcr p15, 0, r0, c7, c14, 1 @ 1 clean and invalidate D line\n\
31 - add r0, r0, #16 @ 1\n\
32 - ldmia r1!, {r3, r4, ip, lr} @ 4\n\
33 - stmia r0, {r3, r4, ip, lr} @ 4\n\
34 - mcr p15, 0, r0, c7, c14, 1 @ 1 clean and invalidate D line\n\
35 - add r0, r0, #16 @ 1\n\
36 - subs r2, r2, #1 @ 1\n\
37 + int tmp;
38 +
39 + asm volatile ("\
40 +1: ldmia %1!, {r3, r4, ip, lr} @ 4\n\
41 + stmia %0, {r3, r4, ip, lr} @ 4\n\
42 + mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\
43 + add %0, %0, #16 @ 1\n\
44 + ldmia %1!, {r3, r4, ip, lr} @ 4\n\
45 + stmia %0, {r3, r4, ip, lr} @ 4\n\
46 + mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\
47 + add %0, %0, #16 @ 1\n\
48 + subs %2, %2, #1 @ 1\n\
49 bne 1b @ 1\n\
50 - mcr p15, 0, r2, c7, c10, 4 @ 1 drain WB\n\
51 - ldmfd sp!, {r4, pc} @ 3"
52 - :
53 - : "I" (PAGE_SIZE / 32));
54 + mcr p15, 0, %2, c7, c10, 4 @ 1 drain WB"
55 + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
56 + : "2" (PAGE_SIZE / 32)
57 + : "r3", "r4", "ip", "lr");
58 }
59
60 void fa_copy_user_highpage(struct page *to, struct page *from,
61 diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c
62 index 49ee0c1a72097..cc819732d9b82 100644
63 --- a/arch/arm/mm/copypage-feroceon.c
64 +++ b/arch/arm/mm/copypage-feroceon.c
65 @@ -13,58 +13,56 @@
66 #include <linux/init.h>
67 #include <linux/highmem.h>
68
69 -static void __naked
70 -feroceon_copy_user_page(void *kto, const void *kfrom)
71 +static void feroceon_copy_user_page(void *kto, const void *kfrom)
72 {
73 - asm("\
74 - stmfd sp!, {r4-r9, lr} \n\
75 - mov ip, %2 \n\
76 -1: mov lr, r1 \n\
77 - ldmia r1!, {r2 - r9} \n\
78 - pld [lr, #32] \n\
79 - pld [lr, #64] \n\
80 - pld [lr, #96] \n\
81 - pld [lr, #128] \n\
82 - pld [lr, #160] \n\
83 - pld [lr, #192] \n\
84 - pld [lr, #224] \n\
85 - stmia r0, {r2 - r9} \n\
86 - ldmia r1!, {r2 - r9} \n\
87 - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
88 - add r0, r0, #32 \n\
89 - stmia r0, {r2 - r9} \n\
90 - ldmia r1!, {r2 - r9} \n\
91 - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
92 - add r0, r0, #32 \n\
93 - stmia r0, {r2 - r9} \n\
94 - ldmia r1!, {r2 - r9} \n\
95 - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
96 - add r0, r0, #32 \n\
97 - stmia r0, {r2 - r9} \n\
98 - ldmia r1!, {r2 - r9} \n\
99 - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
100 - add r0, r0, #32 \n\
101 - stmia r0, {r2 - r9} \n\
102 - ldmia r1!, {r2 - r9} \n\
103 - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
104 - add r0, r0, #32 \n\
105 - stmia r0, {r2 - r9} \n\
106 - ldmia r1!, {r2 - r9} \n\
107 - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
108 - add r0, r0, #32 \n\
109 - stmia r0, {r2 - r9} \n\
110 - ldmia r1!, {r2 - r9} \n\
111 - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
112 - add r0, r0, #32 \n\
113 - stmia r0, {r2 - r9} \n\
114 - subs ip, ip, #(32 * 8) \n\
115 - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
116 - add r0, r0, #32 \n\
117 + int tmp;
118 +
119 + asm volatile ("\
120 +1: ldmia %1!, {r2 - r7, ip, lr} \n\
121 + pld [%1, #0] \n\
122 + pld [%1, #32] \n\
123 + pld [%1, #64] \n\
124 + pld [%1, #96] \n\
125 + pld [%1, #128] \n\
126 + pld [%1, #160] \n\
127 + pld [%1, #192] \n\
128 + stmia %0, {r2 - r7, ip, lr} \n\
129 + ldmia %1!, {r2 - r7, ip, lr} \n\
130 + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
131 + add %0, %0, #32 \n\
132 + stmia %0, {r2 - r7, ip, lr} \n\
133 + ldmia %1!, {r2 - r7, ip, lr} \n\
134 + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
135 + add %0, %0, #32 \n\
136 + stmia %0, {r2 - r7, ip, lr} \n\
137 + ldmia %1!, {r2 - r7, ip, lr} \n\
138 + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
139 + add %0, %0, #32 \n\
140 + stmia %0, {r2 - r7, ip, lr} \n\
141 + ldmia %1!, {r2 - r7, ip, lr} \n\
142 + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
143 + add %0, %0, #32 \n\
144 + stmia %0, {r2 - r7, ip, lr} \n\
145 + ldmia %1!, {r2 - r7, ip, lr} \n\
146 + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
147 + add %0, %0, #32 \n\
148 + stmia %0, {r2 - r7, ip, lr} \n\
149 + ldmia %1!, {r2 - r7, ip, lr} \n\
150 + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
151 + add %0, %0, #32 \n\
152 + stmia %0, {r2 - r7, ip, lr} \n\
153 + ldmia %1!, {r2 - r7, ip, lr} \n\
154 + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
155 + add %0, %0, #32 \n\
156 + stmia %0, {r2 - r7, ip, lr} \n\
157 + subs %2, %2, #(32 * 8) \n\
158 + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
159 + add %0, %0, #32 \n\
160 bne 1b \n\
161 - mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\
162 - ldmfd sp!, {r4-r9, pc}"
163 - :
164 - : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE));
165 + mcr p15, 0, %2, c7, c10, 4 @ drain WB"
166 + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
167 + : "2" (PAGE_SIZE)
168 + : "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr");
169 }
170
171 void feroceon_copy_user_highpage(struct page *to, struct page *from,
172 diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
173 index 1267e64133b92..db624170854a0 100644
174 --- a/arch/arm/mm/copypage-v4mc.c
175 +++ b/arch/arm/mm/copypage-v4mc.c
176 @@ -40,12 +40,11 @@ static DEFINE_RAW_SPINLOCK(minicache_lock);
177 * instruction. If your processor does not supply this, you have to write your
178 * own copy_user_highpage that does the right thing.
179 */
180 -static void __naked
181 -mc_copy_user_page(void *from, void *to)
182 +static void mc_copy_user_page(void *from, void *to)
183 {
184 - asm volatile(
185 - "stmfd sp!, {r4, lr} @ 2\n\
186 - mov r4, %2 @ 1\n\
187 + int tmp;
188 +
189 + asm volatile ("\
190 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
191 1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
192 stmia %1!, {r2, r3, ip, lr} @ 4\n\
193 @@ -55,13 +54,13 @@ mc_copy_user_page(void *from, void *to)
194 mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
195 stmia %1!, {r2, r3, ip, lr} @ 4\n\
196 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
197 - subs r4, r4, #1 @ 1\n\
198 + subs %2, %2, #1 @ 1\n\
199 stmia %1!, {r2, r3, ip, lr} @ 4\n\
200 ldmneia %0!, {r2, r3, ip, lr} @ 4\n\
201 - bne 1b @ 1\n\
202 - ldmfd sp!, {r4, pc} @ 3"
203 - :
204 - : "r" (from), "r" (to), "I" (PAGE_SIZE / 64));
205 + bne 1b @ "
206 + : "+&r" (from), "+&r" (to), "=&r" (tmp)
207 + : "2" (PAGE_SIZE / 64)
208 + : "r2", "r3", "ip", "lr");
209 }
210
211 void v4_mc_copy_user_highpage(struct page *to, struct page *from,
212 diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
213 index 067d0fdd630c1..cd3e165afeede 100644
214 --- a/arch/arm/mm/copypage-v4wb.c
215 +++ b/arch/arm/mm/copypage-v4wb.c
216 @@ -22,29 +22,28 @@
217 * instruction. If your processor does not supply this, you have to write your
218 * own copy_user_highpage that does the right thing.
219 */
220 -static void __naked
221 -v4wb_copy_user_page(void *kto, const void *kfrom)
222 +static void v4wb_copy_user_page(void *kto, const void *kfrom)
223 {
224 - asm("\
225 - stmfd sp!, {r4, lr} @ 2\n\
226 - mov r2, %2 @ 1\n\
227 - ldmia r1!, {r3, r4, ip, lr} @ 4\n\
228 -1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
229 - stmia r0!, {r3, r4, ip, lr} @ 4\n\
230 - ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\
231 - stmia r0!, {r3, r4, ip, lr} @ 4\n\
232 - ldmia r1!, {r3, r4, ip, lr} @ 4\n\
233 - mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
234 - stmia r0!, {r3, r4, ip, lr} @ 4\n\
235 - ldmia r1!, {r3, r4, ip, lr} @ 4\n\
236 - subs r2, r2, #1 @ 1\n\
237 - stmia r0!, {r3, r4, ip, lr} @ 4\n\
238 - ldmneia r1!, {r3, r4, ip, lr} @ 4\n\
239 + int tmp;
240 +
241 + asm volatile ("\
242 + ldmia %1!, {r3, r4, ip, lr} @ 4\n\
243 +1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
244 + stmia %0!, {r3, r4, ip, lr} @ 4\n\
245 + ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\
246 + stmia %0!, {r3, r4, ip, lr} @ 4\n\
247 + ldmia %1!, {r3, r4, ip, lr} @ 4\n\
248 + mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
249 + stmia %0!, {r3, r4, ip, lr} @ 4\n\
250 + ldmia %1!, {r3, r4, ip, lr} @ 4\n\
251 + subs %2, %2, #1 @ 1\n\
252 + stmia %0!, {r3, r4, ip, lr} @ 4\n\
253 + ldmneia %1!, {r3, r4, ip, lr} @ 4\n\
254 bne 1b @ 1\n\
255 - mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\
256 - ldmfd sp!, {r4, pc} @ 3"
257 - :
258 - : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
259 + mcr p15, 0, %1, c7, c10, 4 @ 1 drain WB"
260 + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
261 + : "2" (PAGE_SIZE / 64)
262 + : "r3", "r4", "ip", "lr");
263 }
264
265 void v4wb_copy_user_highpage(struct page *to, struct page *from,
266 diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
267 index b85c5da2e510e..8614572e1296b 100644
268 --- a/arch/arm/mm/copypage-v4wt.c
269 +++ b/arch/arm/mm/copypage-v4wt.c
270 @@ -20,27 +20,26 @@
271 * dirty data in the cache. However, we do have to ensure that
272 * subsequent reads are up to date.
273 */
274 -static void __naked
275 -v4wt_copy_user_page(void *kto, const void *kfrom)
276 +static void v4wt_copy_user_page(void *kto, const void *kfrom)
277 {
278 - asm("\
279 - stmfd sp!, {r4, lr} @ 2\n\
280 - mov r2, %2 @ 1\n\
281 - ldmia r1!, {r3, r4, ip, lr} @ 4\n\
282 -1: stmia r0!, {r3, r4, ip, lr} @ 4\n\
283 - ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\
284 - stmia r0!, {r3, r4, ip, lr} @ 4\n\
285 - ldmia r1!, {r3, r4, ip, lr} @ 4\n\
286 - stmia r0!, {r3, r4, ip, lr} @ 4\n\
287 - ldmia r1!, {r3, r4, ip, lr} @ 4\n\
288 - subs r2, r2, #1 @ 1\n\
289 - stmia r0!, {r3, r4, ip, lr} @ 4\n\
290 - ldmneia r1!, {r3, r4, ip, lr} @ 4\n\
291 + int tmp;
292 +
293 + asm volatile ("\
294 + ldmia %1!, {r3, r4, ip, lr} @ 4\n\
295 +1: stmia %0!, {r3, r4, ip, lr} @ 4\n\
296 + ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\
297 + stmia %0!, {r3, r4, ip, lr} @ 4\n\
298 + ldmia %1!, {r3, r4, ip, lr} @ 4\n\
299 + stmia %0!, {r3, r4, ip, lr} @ 4\n\
300 + ldmia %1!, {r3, r4, ip, lr} @ 4\n\
301 + subs %2, %2, #1 @ 1\n\
302 + stmia %0!, {r3, r4, ip, lr} @ 4\n\
303 + ldmneia %1!, {r3, r4, ip, lr} @ 4\n\
304 bne 1b @ 1\n\
305 - mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\
306 - ldmfd sp!, {r4, pc} @ 3"
307 - :
308 - : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
309 + mcr p15, 0, %2, c7, c7, 0 @ flush ID cache"
310 + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
311 + : "2" (PAGE_SIZE / 64)
312 + : "r3", "r4", "ip", "lr");
313 }
314
315 void v4wt_copy_user_highpage(struct page *to, struct page *from,
316 diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c
317 index 03a2042aced5f..55cbc3a89d858 100644
318 --- a/arch/arm/mm/copypage-xsc3.c
319 +++ b/arch/arm/mm/copypage-xsc3.c
320 @@ -21,53 +21,46 @@
321
322 /*
323 * XSC3 optimised copy_user_highpage
324 - * r0 = destination
325 - * r1 = source
326 *
327 * The source page may have some clean entries in the cache already, but we
328 * can safely ignore them - break_cow() will flush them out of the cache
329 * if we eventually end up using our copied page.
330 *
331 */
332 -static void __naked
333 -xsc3_mc_copy_user_page(void *kto, const void *kfrom)
334 +static void xsc3_mc_copy_user_page(void *kto, const void *kfrom)
335 {
336 - asm("\
337 - stmfd sp!, {r4, r5, lr} \n\
338 - mov lr, %2 \n\
339 - \n\
340 - pld [r1, #0] \n\
341 - pld [r1, #32] \n\
342 -1: pld [r1, #64] \n\
343 - pld [r1, #96] \n\
344 + int tmp;
345 +
346 + asm volatile ("\
347 + pld [%1, #0] \n\
348 + pld [%1, #32] \n\
349 +1: pld [%1, #64] \n\
350 + pld [%1, #96] \n\
351 \n\
352 -2: ldrd r2, [r1], #8 \n\
353 - mov ip, r0 \n\
354 - ldrd r4, [r1], #8 \n\
355 - mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\
356 - strd r2, [r0], #8 \n\
357 - ldrd r2, [r1], #8 \n\
358 - strd r4, [r0], #8 \n\
359 - ldrd r4, [r1], #8 \n\
360 - strd r2, [r0], #8 \n\
361 - strd r4, [r0], #8 \n\
362 - ldrd r2, [r1], #8 \n\
363 - mov ip, r0 \n\
364 - ldrd r4, [r1], #8 \n\
365 - mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\
366 - strd r2, [r0], #8 \n\
367 - ldrd r2, [r1], #8 \n\
368 - subs lr, lr, #1 \n\
369 - strd r4, [r0], #8 \n\
370 - ldrd r4, [r1], #8 \n\
371 - strd r2, [r0], #8 \n\
372 - strd r4, [r0], #8 \n\
373 +2: ldrd r2, [%1], #8 \n\
374 + ldrd r4, [%1], #8 \n\
375 + mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\
376 + strd r2, [%0], #8 \n\
377 + ldrd r2, [%1], #8 \n\
378 + strd r4, [%0], #8 \n\
379 + ldrd r4, [%1], #8 \n\
380 + strd r2, [%0], #8 \n\
381 + strd r4, [%0], #8 \n\
382 + ldrd r2, [%1], #8 \n\
383 + ldrd r4, [%1], #8 \n\
384 + mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\
385 + strd r2, [%0], #8 \n\
386 + ldrd r2, [%1], #8 \n\
387 + subs %2, %2, #1 \n\
388 + strd r4, [%0], #8 \n\
389 + ldrd r4, [%1], #8 \n\
390 + strd r2, [%0], #8 \n\
391 + strd r4, [%0], #8 \n\
392 bgt 1b \n\
393 - beq 2b \n\
394 - \n\
395 - ldmfd sp!, {r4, r5, pc}"
396 - :
397 - : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1));
398 + beq 2b "
399 + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
400 + : "2" (PAGE_SIZE / 64 - 1)
401 + : "r2", "r3", "r4", "r5");
402 }
403
404 void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
405 @@ -85,8 +78,6 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
406
407 /*
408 * XScale optimised clear_user_page
409 - * r0 = destination
410 - * r1 = virtual user address of ultimate destination page
411 */
412 void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
413 {
414 diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
415 index 0fb85025344d9..c775d4b7adb08 100644
416 --- a/arch/arm/mm/copypage-xscale.c
417 +++ b/arch/arm/mm/copypage-xscale.c
418 @@ -36,52 +36,51 @@ static DEFINE_RAW_SPINLOCK(minicache_lock);
419 * Dcache aliasing issue. The writes will be forwarded to the write buffer,
420 * and merged as appropriate.
421 */
422 -static void __naked
423 -mc_copy_user_page(void *from, void *to)
424 +static void mc_copy_user_page(void *from, void *to)
425 {
426 + int tmp;
427 +
428 /*
429 * Strangely enough, best performance is achieved
430 * when prefetching destination as well. (NP)
431 */
432 - asm volatile(
433 - "stmfd sp!, {r4, r5, lr} \n\
434 - mov lr, %2 \n\
435 - pld [r0, #0] \n\
436 - pld [r0, #32] \n\
437 - pld [r1, #0] \n\
438 - pld [r1, #32] \n\
439 -1: pld [r0, #64] \n\
440 - pld [r0, #96] \n\
441 - pld [r1, #64] \n\
442 - pld [r1, #96] \n\
443 -2: ldrd r2, [r0], #8 \n\
444 - ldrd r4, [r0], #8 \n\
445 - mov ip, r1 \n\
446 - strd r2, [r1], #8 \n\
447 - ldrd r2, [r0], #8 \n\
448 - strd r4, [r1], #8 \n\
449 - ldrd r4, [r0], #8 \n\
450 - strd r2, [r1], #8 \n\
451 - strd r4, [r1], #8 \n\
452 + asm volatile ("\
453 + pld [%0, #0] \n\
454 + pld [%0, #32] \n\
455 + pld [%1, #0] \n\
456 + pld [%1, #32] \n\
457 +1: pld [%0, #64] \n\
458 + pld [%0, #96] \n\
459 + pld [%1, #64] \n\
460 + pld [%1, #96] \n\
461 +2: ldrd r2, [%0], #8 \n\
462 + ldrd r4, [%0], #8 \n\
463 + mov ip, %1 \n\
464 + strd r2, [%1], #8 \n\
465 + ldrd r2, [%0], #8 \n\
466 + strd r4, [%1], #8 \n\
467 + ldrd r4, [%0], #8 \n\
468 + strd r2, [%1], #8 \n\
469 + strd r4, [%1], #8 \n\
470 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
471 - ldrd r2, [r0], #8 \n\
472 + ldrd r2, [%0], #8 \n\
473 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
474 - ldrd r4, [r0], #8 \n\
475 - mov ip, r1 \n\
476 - strd r2, [r1], #8 \n\
477 - ldrd r2, [r0], #8 \n\
478 - strd r4, [r1], #8 \n\
479 - ldrd r4, [r0], #8 \n\
480 - strd r2, [r1], #8 \n\
481 - strd r4, [r1], #8 \n\
482 + ldrd r4, [%0], #8 \n\
483 + mov ip, %1 \n\
484 + strd r2, [%1], #8 \n\
485 + ldrd r2, [%0], #8 \n\
486 + strd r4, [%1], #8 \n\
487 + ldrd r4, [%0], #8 \n\
488 + strd r2, [%1], #8 \n\
489 + strd r4, [%1], #8 \n\
490 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
491 - subs lr, lr, #1 \n\
492 + subs %2, %2, #1 \n\
493 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
494 bgt 1b \n\
495 - beq 2b \n\
496 - ldmfd sp!, {r4, r5, pc} "
497 - :
498 - : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
499 + beq 2b "
500 + : "+&r" (from), "+&r" (to), "=&r" (tmp)
501 + : "2" (PAGE_SIZE / 64 - 1)
502 + : "r2", "r3", "r4", "r5", "ip");
503 }
504
505 void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
506 diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
507 index df58e7d793f52..d420597b0d2b4 100644
508 --- a/drivers/block/xen-blkfront.c
509 +++ b/drivers/block/xen-blkfront.c
510 @@ -1555,9 +1555,12 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
511 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
512 struct blkfront_info *info = rinfo->dev_info;
513 int error;
514 + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
515
516 - if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
517 + if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
518 + xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
519 return IRQ_HANDLED;
520 + }
521
522 spin_lock_irqsave(&rinfo->ring_lock, flags);
523 again:
524 @@ -1573,6 +1576,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
525 unsigned long id;
526 unsigned int op;
527
528 + eoiflag = 0;
529 +
530 RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
531 id = bret.id;
532
533 @@ -1684,6 +1689,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
534
535 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
536
537 + xen_irq_lateeoi(irq, eoiflag);
538 +
539 return IRQ_HANDLED;
540
541 err:
542 @@ -1691,6 +1698,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
543
544 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
545
546 + /* No EOI in order to avoid further interrupts. */
547 +
548 pr_alert("%s disabled for further use\n", info->gd->disk_name);
549 return IRQ_HANDLED;
550 }
551 @@ -1730,8 +1739,8 @@ static int setup_blkring(struct xenbus_device *dev,
552 if (err)
553 goto fail;
554
555 - err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0,
556 - "blkif", rinfo);
557 + err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt,
558 + 0, "blkif", rinfo);
559 if (err <= 0) {
560 xenbus_dev_fatal(dev, err,
561 "bind_evtchn_to_irqhandler failed");
562 diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
563 index 15f2e7025b78e..1d5510cb6db4e 100644
564 --- a/drivers/char/agp/parisc-agp.c
565 +++ b/drivers/char/agp/parisc-agp.c
566 @@ -285,7 +285,7 @@ agp_ioc_init(void __iomem *ioc_regs)
567 return 0;
568 }
569
570 -static int
571 +static int __init
572 lba_find_capability(int cap)
573 {
574 struct _parisc_agp_info *info = &parisc_agp_info;
575 @@ -370,7 +370,7 @@ fail:
576 return error;
577 }
578
579 -static int
580 +static int __init
581 find_quicksilver(struct device *dev, void *data)
582 {
583 struct parisc_device **lba = data;
584 @@ -382,7 +382,7 @@ find_quicksilver(struct device *dev, void *data)
585 return 0;
586 }
587
588 -static int
589 +static int __init
590 parisc_agp_init(void)
591 {
592 extern struct sba_device *sba_list;
593 diff --git a/drivers/firmware/scpi_pm_domain.c b/drivers/firmware/scpi_pm_domain.c
594 index f395dec271131..a6e62a793fbe6 100644
595 --- a/drivers/firmware/scpi_pm_domain.c
596 +++ b/drivers/firmware/scpi_pm_domain.c
597 @@ -27,7 +27,6 @@ struct scpi_pm_domain {
598 struct generic_pm_domain genpd;
599 struct scpi_ops *ops;
600 u32 domain;
601 - char name[30];
602 };
603
604 /*
605 @@ -121,8 +120,13 @@ static int scpi_pm_domain_probe(struct platform_device *pdev)
606
607 scpi_pd->domain = i;
608 scpi_pd->ops = scpi_ops;
609 - sprintf(scpi_pd->name, "%s.%d", np->name, i);
610 - scpi_pd->genpd.name = scpi_pd->name;
611 + scpi_pd->genpd.name = devm_kasprintf(dev, GFP_KERNEL,
612 + "%s.%d", np->name, i);
613 + if (!scpi_pd->genpd.name) {
614 + dev_err(dev, "Failed to allocate genpd name:%s.%d\n",
615 + np->name, i);
616 + continue;
617 + }
618 scpi_pd->genpd.power_off = scpi_pd_power_off;
619 scpi_pd->genpd.power_on = scpi_pd_power_on;
620
621 diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
622 index 34704b0451b49..d19ad92eede95 100644
623 --- a/drivers/hwmon/dell-smm-hwmon.c
624 +++ b/drivers/hwmon/dell-smm-hwmon.c
625 @@ -578,15 +578,18 @@ static const struct file_operations i8k_fops = {
626 .unlocked_ioctl = i8k_ioctl,
627 };
628
629 +static struct proc_dir_entry *entry;
630 +
631 static void __init i8k_init_procfs(void)
632 {
633 /* Register the proc entry */
634 - proc_create("i8k", 0, NULL, &i8k_fops);
635 + entry = proc_create("i8k", 0, NULL, &i8k_fops);
636 }
637
638 static void __exit i8k_exit_procfs(void)
639 {
640 - remove_proc_entry("i8k", NULL);
641 + if (entry)
642 + remove_proc_entry("i8k", NULL);
643 }
644
645 #else
646 diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
647 index df220666d6274..b4f8cd7dc8b74 100644
648 --- a/drivers/i2c/busses/i2c-rk3x.c
649 +++ b/drivers/i2c/busses/i2c-rk3x.c
650 @@ -424,8 +424,8 @@ static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd)
651 if (!(ipd & REG_INT_MBRF))
652 return;
653
654 - /* ack interrupt */
655 - i2c_writel(i2c, REG_INT_MBRF, REG_IPD);
656 + /* ack interrupt (read also produces a spurious START flag, clear it too) */
657 + i2c_writel(i2c, REG_INT_MBRF | REG_INT_START, REG_IPD);
658
659 /* Can only handle a maximum of 32 bytes at a time */
660 if (len > 32)
661 diff --git a/drivers/input/touchscreen/of_touchscreen.c b/drivers/input/touchscreen/of_touchscreen.c
662 index 8d7f9c8f2771c..db499ef6ccff4 100644
663 --- a/drivers/input/touchscreen/of_touchscreen.c
664 +++ b/drivers/input/touchscreen/of_touchscreen.c
665 @@ -79,8 +79,8 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
666 data_present = touchscreen_get_prop_u32(dev, "touchscreen-size-x",
667 input_abs_get_max(input,
668 axis) + 1,
669 - &maximum) |
670 - touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x",
671 + &maximum);
672 + data_present |= touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x",
673 input_abs_get_fuzz(input, axis),
674 &fuzz);
675 if (data_present)
676 @@ -90,8 +90,8 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
677 data_present = touchscreen_get_prop_u32(dev, "touchscreen-size-y",
678 input_abs_get_max(input,
679 axis) + 1,
680 - &maximum) |
681 - touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y",
682 + &maximum);
683 + data_present |= touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y",
684 input_abs_get_fuzz(input, axis),
685 &fuzz);
686 if (data_present)
687 @@ -101,11 +101,11 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
688 data_present = touchscreen_get_prop_u32(dev,
689 "touchscreen-max-pressure",
690 input_abs_get_max(input, axis),
691 - &maximum) |
692 - touchscreen_get_prop_u32(dev,
693 - "touchscreen-fuzz-pressure",
694 - input_abs_get_fuzz(input, axis),
695 - &fuzz);
696 + &maximum);
697 + data_present |= touchscreen_get_prop_u32(dev,
698 + "touchscreen-fuzz-pressure",
699 + input_abs_get_fuzz(input, axis),
700 + &fuzz);
701 if (data_present)
702 touchscreen_set_params(input, axis, maximum, fuzz);
703
704 diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
705 index 9e4d1212f4c16..63f2baed3c8a6 100644
706 --- a/drivers/md/persistent-data/dm-btree-remove.c
707 +++ b/drivers/md/persistent-data/dm-btree-remove.c
708 @@ -423,9 +423,9 @@ static int rebalance_children(struct shadow_spine *s,
709
710 memcpy(n, dm_block_data(child),
711 dm_bm_block_size(dm_tm_get_bm(info->tm)));
712 - dm_tm_unlock(info->tm, child);
713
714 dm_tm_dec(info->tm, dm_block_location(child));
715 + dm_tm_unlock(info->tm, child);
716 return 0;
717 }
718
719 diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
720 index 5d67dbdd943dc..98392a069f2b2 100644
721 --- a/drivers/net/ethernet/broadcom/bcmsysport.c
722 +++ b/drivers/net/ethernet/broadcom/bcmsysport.c
723 @@ -90,9 +90,13 @@ static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
724 struct dma_desc *desc,
725 unsigned int port)
726 {
727 + unsigned long desc_flags;
728 +
729 /* Ports are latched, so write upper address first */
730 + spin_lock_irqsave(&priv->desc_lock, desc_flags);
731 tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
732 tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
733 + spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
734 }
735
736 /* Ethtool operations */
737 @@ -1587,6 +1591,7 @@ static int bcm_sysport_open(struct net_device *dev)
738 }
739
740 /* Initialize both hardware and software ring */
741 + spin_lock_init(&priv->desc_lock);
742 for (i = 0; i < dev->num_tx_queues; i++) {
743 ret = bcm_sysport_init_tx_ring(priv, i);
744 if (ret) {
745 diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
746 index 0d3444f1d78a0..1cf5af2b11e1f 100644
747 --- a/drivers/net/ethernet/broadcom/bcmsysport.h
748 +++ b/drivers/net/ethernet/broadcom/bcmsysport.h
749 @@ -660,6 +660,7 @@ struct bcm_sysport_priv {
750 int wol_irq;
751
752 /* Transmit rings */
753 + spinlock_t desc_lock;
754 struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS];
755
756 /* Receive queue */
757 diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
758 index 5428e39fa4e5c..7587a8f98619a 100644
759 --- a/drivers/net/ethernet/intel/igbvf/netdev.c
760 +++ b/drivers/net/ethernet/intel/igbvf/netdev.c
761 @@ -2846,6 +2846,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
762 return 0;
763
764 err_hw_init:
765 + netif_napi_del(&adapter->rx_ring->napi);
766 kfree(adapter->tx_ring);
767 kfree(adapter->rx_ring);
768 err_sw_init:
769 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
770 index 8466f3874a285..5029db8835d7d 100644
771 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
772 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
773 @@ -2597,6 +2597,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
774 /* flush pending Tx transactions */
775 ixgbe_clear_tx_pending(hw);
776
777 + /* set MDIO speed before talking to the PHY in case it's the 1st time */
778 + ixgbe_set_mdio_speed(hw);
779 +
780 /* PHY ops must be identified and initialized prior to reset */
781
782 /* Identify PHY and related function pointers */
783 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
784 index 410a36c982419..1569300844f0c 100644
785 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
786 +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
787 @@ -620,7 +620,7 @@ void __init mlx4_en_init_ptys2ethtool_map(void)
788 MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000,
789 ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
790 MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000,
791 - ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
792 + ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
793 MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000,
794 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
795 MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000,
796 @@ -632,9 +632,9 @@ void __init mlx4_en_init_ptys2ethtool_map(void)
797 MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000,
798 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
799 MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000,
800 - ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
801 + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT);
802 MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000,
803 - ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
804 + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT);
805 MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000,
806 ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
807 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
808 diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
809 index bce898ad6e96f..7752cc09a1da5 100644
810 --- a/drivers/net/usb/lan78xx.c
811 +++ b/drivers/net/usb/lan78xx.c
812 @@ -865,11 +865,9 @@ static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
813 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
814
815 if (ret == 0) {
816 - if (sig == OTP_INDICATOR_1)
817 - offset = offset;
818 - else if (sig == OTP_INDICATOR_2)
819 + if (sig == OTP_INDICATOR_2)
820 offset += 0x100;
821 - else
822 + else if (sig != OTP_INDICATOR_1)
823 ret = -EINVAL;
824 if (!ret)
825 ret = lan78xx_read_raw_otp(dev, offset, length, data);
826 diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
827 index 53477280f39c2..7c848852aa094 100644
828 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
829 +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
830 @@ -321,9 +321,9 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
831
832 adapter->seq_num++;
833 sleep_cfm_buf->seq_num =
834 - cpu_to_le16((HostCmd_SET_SEQ_NO_BSS_INFO
835 + cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO
836 (adapter->seq_num, priv->bss_num,
837 - priv->bss_type)));
838 + priv->bss_type));
839
840 mwifiex_dbg(adapter, CMD,
841 "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n",
842 diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
843 index 341f6ed5b3556..fccce1b4afcd1 100644
844 --- a/drivers/net/wireless/marvell/mwifiex/fw.h
845 +++ b/drivers/net/wireless/marvell/mwifiex/fw.h
846 @@ -482,10 +482,10 @@ enum mwifiex_channel_flags {
847
848 #define RF_ANTENNA_AUTO 0xFFFF
849
850 -#define HostCmd_SET_SEQ_NO_BSS_INFO(seq, num, type) { \
851 - (((seq) & 0x00ff) | \
852 - (((num) & 0x000f) << 8)) | \
853 - (((type) & 0x000f) << 12); }
854 +#define HostCmd_SET_SEQ_NO_BSS_INFO(seq, num, type) \
855 + ((((seq) & 0x00ff) | \
856 + (((num) & 0x000f) << 8)) | \
857 + (((type) & 0x000f) << 12))
858
859 #define HostCmd_GET_SEQ_NO(seq) \
860 ((seq) & HostCmd_SEQ_NUM_MASK)
861 diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
862 index 347c796afd4ed..bfa3c6aaebe6b 100644
863 --- a/drivers/net/xen-netback/common.h
864 +++ b/drivers/net/xen-netback/common.h
865 @@ -203,6 +203,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
866 unsigned int rx_queue_max;
867 unsigned int rx_queue_len;
868 unsigned long last_rx_time;
869 + unsigned int rx_slots_needed;
870 bool stalled;
871
872 struct xenvif_copy_state rx_copy;
873 diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
874 index ddfb1cfa2dd94..29c7645f57805 100644
875 --- a/drivers/net/xen-netback/rx.c
876 +++ b/drivers/net/xen-netback/rx.c
877 @@ -33,28 +33,36 @@
878 #include <xen/xen.h>
879 #include <xen/events.h>
880
881 -static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
882 +/*
883 + * Update the needed ring page slots for the first SKB queued.
884 + * Note that any call sequence outside the RX thread calling this function
885 + * needs to wake up the RX thread via a call of xenvif_kick_thread()
886 + * afterwards in order to avoid a race with putting the thread to sleep.
887 + */
888 +static void xenvif_update_needed_slots(struct xenvif_queue *queue,
889 + const struct sk_buff *skb)
890 {
891 - RING_IDX prod, cons;
892 - struct sk_buff *skb;
893 - int needed;
894 - unsigned long flags;
895 -
896 - spin_lock_irqsave(&queue->rx_queue.lock, flags);
897 + unsigned int needed = 0;
898
899 - skb = skb_peek(&queue->rx_queue);
900 - if (!skb) {
901 - spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
902 - return false;
903 + if (skb) {
904 + needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
905 + if (skb_is_gso(skb))
906 + needed++;
907 + if (skb->sw_hash)
908 + needed++;
909 }
910
911 - needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
912 - if (skb_is_gso(skb))
913 - needed++;
914 - if (skb->sw_hash)
915 - needed++;
916 + WRITE_ONCE(queue->rx_slots_needed, needed);
917 +}
918
919 - spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
920 +static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
921 +{
922 + RING_IDX prod, cons;
923 + unsigned int needed;
924 +
925 + needed = READ_ONCE(queue->rx_slots_needed);
926 + if (!needed)
927 + return false;
928
929 do {
930 prod = queue->rx.sring->req_prod;
931 @@ -80,13 +88,19 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
932
933 spin_lock_irqsave(&queue->rx_queue.lock, flags);
934
935 - __skb_queue_tail(&queue->rx_queue, skb);
936 -
937 - queue->rx_queue_len += skb->len;
938 - if (queue->rx_queue_len > queue->rx_queue_max) {
939 + if (queue->rx_queue_len >= queue->rx_queue_max) {
940 struct net_device *dev = queue->vif->dev;
941
942 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
943 + kfree_skb(skb);
944 + queue->vif->dev->stats.rx_dropped++;
945 + } else {
946 + if (skb_queue_empty(&queue->rx_queue))
947 + xenvif_update_needed_slots(queue, skb);
948 +
949 + __skb_queue_tail(&queue->rx_queue, skb);
950 +
951 + queue->rx_queue_len += skb->len;
952 }
953
954 spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
955 @@ -100,6 +114,8 @@ static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
956
957 skb = __skb_dequeue(&queue->rx_queue);
958 if (skb) {
959 + xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
960 +
961 queue->rx_queue_len -= skb->len;
962 if (queue->rx_queue_len < queue->rx_queue_max) {
963 struct netdev_queue *txq;
964 @@ -134,6 +150,7 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
965 break;
966 xenvif_rx_dequeue(queue);
967 kfree_skb(skb);
968 + queue->vif->dev->stats.rx_dropped++;
969 }
970 }
971
972 @@ -474,27 +491,31 @@ void xenvif_rx_action(struct xenvif_queue *queue)
973 xenvif_rx_copy_flush(queue);
974 }
975
976 -static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
977 +static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
978 {
979 RING_IDX prod, cons;
980
981 prod = queue->rx.sring->req_prod;
982 cons = queue->rx.req_cons;
983
984 + return prod - cons;
985 +}
986 +
987 +static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
988 +{
989 + unsigned int needed = READ_ONCE(queue->rx_slots_needed);
990 +
991 return !queue->stalled &&
992 - prod - cons < 1 &&
993 + xenvif_rx_queue_slots(queue) < needed &&
994 time_after(jiffies,
995 queue->last_rx_time + queue->vif->stall_timeout);
996 }
997
998 static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
999 {
1000 - RING_IDX prod, cons;
1001 -
1002 - prod = queue->rx.sring->req_prod;
1003 - cons = queue->rx.req_cons;
1004 + unsigned int needed = READ_ONCE(queue->rx_slots_needed);
1005
1006 - return queue->stalled && prod - cons >= 1;
1007 + return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
1008 }
1009
1010 bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
1011 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
1012 index 0d2df76902384..65a50bc5661d2 100644
1013 --- a/drivers/net/xen-netfront.c
1014 +++ b/drivers/net/xen-netfront.c
1015 @@ -141,6 +141,9 @@ struct netfront_queue {
1016 struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
1017 grant_ref_t gref_rx_head;
1018 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
1019 +
1020 + unsigned int rx_rsp_unconsumed;
1021 + spinlock_t rx_cons_lock;
1022 };
1023
1024 struct netfront_info {
1025 @@ -365,12 +368,13 @@ static int xennet_open(struct net_device *dev)
1026 return 0;
1027 }
1028
1029 -static void xennet_tx_buf_gc(struct netfront_queue *queue)
1030 +static bool xennet_tx_buf_gc(struct netfront_queue *queue)
1031 {
1032 RING_IDX cons, prod;
1033 unsigned short id;
1034 struct sk_buff *skb;
1035 bool more_to_do;
1036 + bool work_done = false;
1037 const struct device *dev = &queue->info->netdev->dev;
1038
1039 BUG_ON(!netif_carrier_ok(queue->info->netdev));
1040 @@ -387,6 +391,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
1041 for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
1042 struct xen_netif_tx_response txrsp;
1043
1044 + work_done = true;
1045 +
1046 RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
1047 if (txrsp.status == XEN_NETIF_RSP_NULL)
1048 continue;
1049 @@ -430,11 +436,13 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
1050
1051 xennet_maybe_wake_tx(queue);
1052
1053 - return;
1054 + return work_done;
1055
1056 err:
1057 queue->info->broken = true;
1058 dev_alert(dev, "Disabled for further use\n");
1059 +
1060 + return work_done;
1061 }
1062
1063 struct xennet_gnttab_make_txreq {
1064 @@ -754,6 +762,16 @@ static int xennet_close(struct net_device *dev)
1065 return 0;
1066 }
1067
1068 +static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
1069 +{
1070 + unsigned long flags;
1071 +
1072 + spin_lock_irqsave(&queue->rx_cons_lock, flags);
1073 + queue->rx.rsp_cons = val;
1074 + queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
1075 + spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1076 +}
1077 +
1078 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
1079 grant_ref_t ref)
1080 {
1081 @@ -805,7 +823,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
1082 xennet_move_rx_slot(queue, skb, ref);
1083 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1084
1085 - queue->rx.rsp_cons = cons;
1086 + xennet_set_rx_rsp_cons(queue, cons);
1087 return err;
1088 }
1089
1090 @@ -885,7 +903,7 @@ next:
1091 }
1092
1093 if (unlikely(err))
1094 - queue->rx.rsp_cons = cons + slots;
1095 + xennet_set_rx_rsp_cons(queue, cons + slots);
1096
1097 return err;
1098 }
1099 @@ -939,7 +957,8 @@ static int xennet_fill_frags(struct netfront_queue *queue,
1100 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1101 }
1102 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
1103 - queue->rx.rsp_cons = ++cons + skb_queue_len(list);
1104 + xennet_set_rx_rsp_cons(queue,
1105 + ++cons + skb_queue_len(list));
1106 kfree_skb(nskb);
1107 return -ENOENT;
1108 }
1109 @@ -952,7 +971,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
1110 kfree_skb(nskb);
1111 }
1112
1113 - queue->rx.rsp_cons = cons;
1114 + xennet_set_rx_rsp_cons(queue, cons);
1115
1116 return 0;
1117 }
1118 @@ -1073,7 +1092,9 @@ err:
1119
1120 if (unlikely(xennet_set_skb_gso(skb, gso))) {
1121 __skb_queue_head(&tmpq, skb);
1122 - queue->rx.rsp_cons += skb_queue_len(&tmpq);
1123 + xennet_set_rx_rsp_cons(queue,
1124 + queue->rx.rsp_cons +
1125 + skb_queue_len(&tmpq));
1126 goto err;
1127 }
1128 }
1129 @@ -1097,7 +1118,8 @@ err:
1130
1131 __skb_queue_tail(&rxq, skb);
1132
1133 - i = ++queue->rx.rsp_cons;
1134 + i = queue->rx.rsp_cons + 1;
1135 + xennet_set_rx_rsp_cons(queue, i);
1136 work_done++;
1137 }
1138
1139 @@ -1281,40 +1303,79 @@ static int xennet_set_features(struct net_device *dev,
1140 return 0;
1141 }
1142
1143 -static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1144 +static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
1145 {
1146 - struct netfront_queue *queue = dev_id;
1147 unsigned long flags;
1148
1149 - if (queue->info->broken)
1150 - return IRQ_HANDLED;
1151 + if (unlikely(queue->info->broken))
1152 + return false;
1153
1154 spin_lock_irqsave(&queue->tx_lock, flags);
1155 - xennet_tx_buf_gc(queue);
1156 + if (xennet_tx_buf_gc(queue))
1157 + *eoi = 0;
1158 spin_unlock_irqrestore(&queue->tx_lock, flags);
1159
1160 + return true;
1161 +}
1162 +
1163 +static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1164 +{
1165 + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1166 +
1167 + if (likely(xennet_handle_tx(dev_id, &eoiflag)))
1168 + xen_irq_lateeoi(irq, eoiflag);
1169 +
1170 return IRQ_HANDLED;
1171 }
1172
1173 -static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1174 +static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
1175 {
1176 - struct netfront_queue *queue = dev_id;
1177 - struct net_device *dev = queue->info->netdev;
1178 + unsigned int work_queued;
1179 + unsigned long flags;
1180
1181 - if (queue->info->broken)
1182 - return IRQ_HANDLED;
1183 + if (unlikely(queue->info->broken))
1184 + return false;
1185 +
1186 + spin_lock_irqsave(&queue->rx_cons_lock, flags);
1187 + work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
1188 + if (work_queued > queue->rx_rsp_unconsumed) {
1189 + queue->rx_rsp_unconsumed = work_queued;
1190 + *eoi = 0;
1191 + } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
1192 + const struct device *dev = &queue->info->netdev->dev;
1193 +
1194 + spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1195 + dev_alert(dev, "RX producer index going backwards\n");
1196 + dev_alert(dev, "Disabled for further use\n");
1197 + queue->info->broken = true;
1198 + return false;
1199 + }
1200 + spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1201
1202 - if (likely(netif_carrier_ok(dev) &&
1203 - RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1204 + if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
1205 napi_schedule(&queue->napi);
1206
1207 + return true;
1208 +}
1209 +
1210 +static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1211 +{
1212 + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1213 +
1214 + if (likely(xennet_handle_rx(dev_id, &eoiflag)))
1215 + xen_irq_lateeoi(irq, eoiflag);
1216 +
1217 return IRQ_HANDLED;
1218 }
1219
1220 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1221 {
1222 - xennet_tx_interrupt(irq, dev_id);
1223 - xennet_rx_interrupt(irq, dev_id);
1224 + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1225 +
1226 + if (xennet_handle_tx(dev_id, &eoiflag) &&
1227 + xennet_handle_rx(dev_id, &eoiflag))
1228 + xen_irq_lateeoi(irq, eoiflag);
1229 +
1230 return IRQ_HANDLED;
1231 }
1232
1233 @@ -1546,9 +1607,10 @@ static int setup_netfront_single(struct netfront_queue *queue)
1234 if (err < 0)
1235 goto fail;
1236
1237 - err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1238 - xennet_interrupt,
1239 - 0, queue->info->netdev->name, queue);
1240 + err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1241 + xennet_interrupt, 0,
1242 + queue->info->netdev->name,
1243 + queue);
1244 if (err < 0)
1245 goto bind_fail;
1246 queue->rx_evtchn = queue->tx_evtchn;
1247 @@ -1576,18 +1638,18 @@ static int setup_netfront_split(struct netfront_queue *queue)
1248
1249 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1250 "%s-tx", queue->name);
1251 - err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1252 - xennet_tx_interrupt,
1253 - 0, queue->tx_irq_name, queue);
1254 + err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1255 + xennet_tx_interrupt, 0,
1256 + queue->tx_irq_name, queue);
1257 if (err < 0)
1258 goto bind_tx_fail;
1259 queue->tx_irq = err;
1260
1261 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1262 "%s-rx", queue->name);
1263 - err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
1264 - xennet_rx_interrupt,
1265 - 0, queue->rx_irq_name, queue);
1266 + err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
1267 + xennet_rx_interrupt, 0,
1268 + queue->rx_irq_name, queue);
1269 if (err < 0)
1270 goto bind_rx_fail;
1271 queue->rx_irq = err;
1272 @@ -1689,6 +1751,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
1273
1274 spin_lock_init(&queue->tx_lock);
1275 spin_lock_init(&queue->rx_lock);
1276 + spin_lock_init(&queue->rx_cons_lock);
1277
1278 setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
1279 (unsigned long)queue);
1280 diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
1281 index be2f1402c84c2..161de28dc1626 100644
1282 --- a/drivers/pci/msi.c
1283 +++ b/drivers/pci/msi.c
1284 @@ -871,7 +871,7 @@ out_free:
1285 free_msi_irqs(dev);
1286
1287 out_disable:
1288 - pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1289 + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE, 0);
1290
1291 return ret;
1292 }
1293 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
1294 index 99bfb003be3fc..4358eb158c48c 100644
1295 --- a/drivers/scsi/scsi_debug.c
1296 +++ b/drivers/scsi/scsi_debug.c
1297 @@ -2175,11 +2175,11 @@ static int resp_mode_select(struct scsi_cmnd *scp,
1298 __func__, param_len, res);
1299 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
1300 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
1301 - if (md_len > 2) {
1302 + off = bd_len + (mselect6 ? 4 : 8);
1303 + if (md_len > 2 || off >= res) {
1304 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
1305 return check_condition_result;
1306 }
1307 - off = bd_len + (mselect6 ? 4 : 8);
1308 mpage = arr[off] & 0x3f;
1309 ps = !!(arr[off] & 0x80);
1310 if (ps) {
1311 diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
1312 index c4f5e5bbb8dce..9397e8ba26469 100644
1313 --- a/drivers/soc/tegra/fuse/fuse-tegra.c
1314 +++ b/drivers/soc/tegra/fuse/fuse-tegra.c
1315 @@ -176,7 +176,7 @@ static struct platform_driver tegra_fuse_driver = {
1316 };
1317 module_platform_driver(tegra_fuse_driver);
1318
1319 -bool __init tegra_fuse_read_spare(unsigned int spare)
1320 +u32 __init tegra_fuse_read_spare(unsigned int spare)
1321 {
1322 unsigned int offset = fuse->soc->info->spare + spare * 4;
1323
1324 diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h
1325 index 10c2076d5089a..f368bd5373088 100644
1326 --- a/drivers/soc/tegra/fuse/fuse.h
1327 +++ b/drivers/soc/tegra/fuse/fuse.h
1328 @@ -62,7 +62,7 @@ struct tegra_fuse {
1329 void tegra_init_revision(void);
1330 void tegra_init_apbmisc(void);
1331
1332 -bool __init tegra_fuse_read_spare(unsigned int spare);
1333 +u32 __init tegra_fuse_read_spare(unsigned int spare);
1334 u32 __init tegra_fuse_read_early(unsigned int offset);
1335
1336 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
1337 diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
1338 index 858c7b4b197cb..2af089b2a343d 100644
1339 --- a/drivers/tty/hvc/hvc_xen.c
1340 +++ b/drivers/tty/hvc/hvc_xen.c
1341 @@ -50,6 +50,8 @@ struct xencons_info {
1342 struct xenbus_device *xbdev;
1343 struct xencons_interface *intf;
1344 unsigned int evtchn;
1345 + XENCONS_RING_IDX out_cons;
1346 + unsigned int out_cons_same;
1347 struct hvc_struct *hvc;
1348 int irq;
1349 int vtermno;
1350 @@ -151,6 +153,8 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
1351 XENCONS_RING_IDX cons, prod;
1352 int recv = 0;
1353 struct xencons_info *xencons = vtermno_to_xencons(vtermno);
1354 + unsigned int eoiflag = 0;
1355 +
1356 if (xencons == NULL)
1357 return -EINVAL;
1358 intf = xencons->intf;
1359 @@ -170,7 +174,27 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
1360 mb(); /* read ring before consuming */
1361 intf->in_cons = cons;
1362
1363 - notify_daemon(xencons);
1364 + /*
1365 + * When to mark interrupt having been spurious:
1366 + * - there was no new data to be read, and
1367 + * - the backend did not consume some output bytes, and
1368 + * - the previous round with no read data didn't see consumed bytes
1369 + * (we might have a race with an interrupt being in flight while
1370 + * updating xencons->out_cons, so account for that by allowing one
1371 + * round without any visible reason)
1372 + */
1373 + if (intf->out_cons != xencons->out_cons) {
1374 + xencons->out_cons = intf->out_cons;
1375 + xencons->out_cons_same = 0;
1376 + }
1377 + if (recv) {
1378 + notify_daemon(xencons);
1379 + } else if (xencons->out_cons_same++ > 1) {
1380 + eoiflag = XEN_EOI_FLAG_SPURIOUS;
1381 + }
1382 +
1383 + xen_irq_lateeoi(xencons->irq, eoiflag);
1384 +
1385 return recv;
1386 }
1387
1388 @@ -399,7 +423,7 @@ static int xencons_connect_backend(struct xenbus_device *dev,
1389 if (ret)
1390 return ret;
1391 info->evtchn = evtchn;
1392 - irq = bind_evtchn_to_irq(evtchn);
1393 + irq = bind_interdomain_evtchn_to_irq_lateeoi(dev->otherend_id, evtchn);
1394 if (irq < 0)
1395 return irq;
1396 info->irq = irq;
1397 @@ -563,7 +587,7 @@ static int __init xen_hvc_init(void)
1398 return r;
1399
1400 info = vtermno_to_xencons(HVC_COOKIE);
1401 - info->irq = bind_evtchn_to_irq(info->evtchn);
1402 + info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn);
1403 }
1404 if (info->irq < 0)
1405 info->irq = 0; /* NO_IRQ */
1406 diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
1407 index 3d14a316830a6..a7c44a3cb2d25 100644
1408 --- a/drivers/usb/gadget/composite.c
1409 +++ b/drivers/usb/gadget/composite.c
1410 @@ -1632,14 +1632,14 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1411 u8 endp;
1412
1413 if (w_length > USB_COMP_EP0_BUFSIZ) {
1414 - if (ctrl->bRequestType == USB_DIR_OUT) {
1415 - goto done;
1416 - } else {
1417 + if (ctrl->bRequestType & USB_DIR_IN) {
1418 /* Cast away the const, we are going to overwrite on purpose. */
1419 __le16 *temp = (__le16 *)&ctrl->wLength;
1420
1421 *temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ);
1422 w_length = USB_COMP_EP0_BUFSIZ;
1423 + } else {
1424 + goto done;
1425 }
1426 }
1427
1428 diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c
1429 index f1c5a22704b28..e8818ad973e4b 100644
1430 --- a/drivers/usb/gadget/legacy/dbgp.c
1431 +++ b/drivers/usb/gadget/legacy/dbgp.c
1432 @@ -345,14 +345,14 @@ static int dbgp_setup(struct usb_gadget *gadget,
1433 u16 len = 0;
1434
1435 if (length > DBGP_REQ_LEN) {
1436 - if (ctrl->bRequestType == USB_DIR_OUT) {
1437 - return err;
1438 - } else {
1439 + if (ctrl->bRequestType & USB_DIR_IN) {
1440 /* Cast away the const, we are going to overwrite on purpose. */
1441 __le16 *temp = (__le16 *)&ctrl->wLength;
1442
1443 *temp = cpu_to_le16(DBGP_REQ_LEN);
1444 length = DBGP_REQ_LEN;
1445 + } else {
1446 + return err;
1447 }
1448 }
1449
1450 diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
1451 index d39bd1a1ab8fc..19eb954a7afa3 100644
1452 --- a/drivers/usb/gadget/legacy/inode.c
1453 +++ b/drivers/usb/gadget/legacy/inode.c
1454 @@ -1339,14 +1339,14 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1455 u16 w_length = le16_to_cpu(ctrl->wLength);
1456
1457 if (w_length > RBUF_SIZE) {
1458 - if (ctrl->bRequestType == USB_DIR_OUT) {
1459 - return value;
1460 - } else {
1461 + if (ctrl->bRequestType & USB_DIR_IN) {
1462 /* Cast away the const, we are going to overwrite on purpose. */
1463 __le16 *temp = (__le16 *)&ctrl->wLength;
1464
1465 *temp = cpu_to_le16(RBUF_SIZE);
1466 w_length = RBUF_SIZE;
1467 + } else {
1468 + return value;
1469 }
1470 }
1471
1472 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1473 index 502931f658a8e..9479abb9eaaaf 100644
1474 --- a/drivers/usb/serial/option.c
1475 +++ b/drivers/usb/serial/option.c
1476 @@ -1195,6 +1195,14 @@ static const struct usb_device_id option_ids[] = {
1477 .driver_info = NCTRL(2) | RSVD(3) },
1478 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff), /* Telit LN920 (ECM) */
1479 .driver_info = NCTRL(0) | RSVD(1) },
1480 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff), /* Telit FN990 (rmnet) */
1481 + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
1482 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff), /* Telit FN990 (MBIM) */
1483 + .driver_info = NCTRL(0) | RSVD(1) },
1484 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff), /* Telit FN990 (RNDIS) */
1485 + .driver_info = NCTRL(2) | RSVD(3) },
1486 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990 (ECM) */
1487 + .driver_info = NCTRL(0) | RSVD(1) },
1488 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
1489 .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
1490 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
1491 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
1492 index 9af23f4365586..b41cc537eb311 100644
1493 --- a/fs/fuse/dir.c
1494 +++ b/fs/fuse/dir.c
1495 @@ -973,7 +973,7 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
1496 if (!parent)
1497 return -ENOENT;
1498
1499 - inode_lock(parent);
1500 + inode_lock_nested(parent, I_MUTEX_PARENT);
1501 if (!S_ISDIR(parent->i_mode))
1502 goto unlock;
1503
1504 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
1505 index 5c9231d5e14a0..524d98e3bcf5b 100644
1506 --- a/fs/nfsd/nfs4state.c
1507 +++ b/fs/nfsd/nfs4state.c
1508 @@ -955,6 +955,11 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
1509 return 0;
1510 }
1511
1512 +static bool delegation_hashed(struct nfs4_delegation *dp)
1513 +{
1514 + return !(list_empty(&dp->dl_perfile));
1515 +}
1516 +
1517 static bool
1518 unhash_delegation_locked(struct nfs4_delegation *dp)
1519 {
1520 @@ -962,7 +967,7 @@ unhash_delegation_locked(struct nfs4_delegation *dp)
1521
1522 lockdep_assert_held(&state_lock);
1523
1524 - if (list_empty(&dp->dl_perfile))
1525 + if (!delegation_hashed(dp))
1526 return false;
1527
1528 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
1529 @@ -3882,7 +3887,7 @@ static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
1530 * queued for a lease break. Don't queue it again.
1531 */
1532 spin_lock(&state_lock);
1533 - if (dp->dl_time == 0) {
1534 + if (delegation_hashed(dp) && dp->dl_time == 0) {
1535 dp->dl_time = get_seconds();
1536 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
1537 }
1538 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
1539 index e21b4d8b72405..bcba817f7af20 100644
1540 --- a/kernel/time/timekeeping.c
1541 +++ b/kernel/time/timekeeping.c
1542 @@ -1198,8 +1198,7 @@ int do_settimeofday64(const struct timespec64 *ts)
1543 timekeeping_forward_now(tk);
1544
1545 xt = tk_xtime(tk);
1546 - ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
1547 - ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
1548 + ts_delta = timespec64_sub(*ts, xt);
1549
1550 if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
1551 ret = -EINVAL;
1552 diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
1553 index 379db35838b64..572c0854d631c 100644
1554 --- a/kernel/trace/tracing_map.c
1555 +++ b/kernel/trace/tracing_map.c
1556 @@ -24,6 +24,7 @@
1557 #include <linux/jhash.h>
1558 #include <linux/slab.h>
1559 #include <linux/sort.h>
1560 +#include <linux/kmemleak.h>
1561
1562 #include "tracing_map.h"
1563 #include "trace.h"
1564 @@ -227,6 +228,7 @@ void tracing_map_array_free(struct tracing_map_array *a)
1565 for (i = 0; i < a->n_pages; i++) {
1566 if (!a->pages[i])
1567 break;
1568 + kmemleak_free(a->pages[i]);
1569 free_page((unsigned long)a->pages[i]);
1570 }
1571
1572 @@ -262,6 +264,7 @@ struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
1573 a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
1574 if (!a->pages[i])
1575 goto free;
1576 + kmemleak_alloc(a->pages[i], PAGE_SIZE, 1, GFP_KERNEL);
1577 }
1578 out:
1579 return a;
1580 diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
1581 index 80c45567ee3ad..030d85790584d 100644
1582 --- a/net/mac80211/agg-tx.c
1583 +++ b/net/mac80211/agg-tx.c
1584 @@ -109,7 +109,7 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
1585 mgmt->u.action.u.addba_req.start_seq_num =
1586 cpu_to_le16(start_seq_num << 4);
1587
1588 - ieee80211_tx_skb(sdata, skb);
1589 + ieee80211_tx_skb_tid(sdata, skb, tid);
1590 }
1591
1592 void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
1593 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
1594 index 1b70de5898c42..13d69cbd14c20 100644
1595 --- a/net/netlink/af_netlink.c
1596 +++ b/net/netlink/af_netlink.c
1597 @@ -1804,6 +1804,11 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1598 if (msg->msg_flags&MSG_OOB)
1599 return -EOPNOTSUPP;
1600
1601 + if (len == 0) {
1602 + pr_warn_once("Zero length message leads to an empty skb\n");
1603 + return -ENODATA;
1604 + }
1605 +
1606 err = scm_send(sock, msg, &scm, true);
1607 if (err < 0)
1608 return err;
1609 diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
1610 index 27ad73861a33f..6e771022d43e1 100644
1611 --- a/net/nfc/netlink.c
1612 +++ b/net/nfc/netlink.c
1613 @@ -669,8 +669,10 @@ static int nfc_genl_dump_devices_done(struct netlink_callback *cb)
1614 {
1615 struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
1616
1617 - nfc_device_iter_exit(iter);
1618 - kfree(iter);
1619 + if (iter) {
1620 + nfc_device_iter_exit(iter);
1621 + kfree(iter);
1622 + }
1623
1624 return 0;
1625 }
1626 diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
1627 index 113c7a0718108..21da2831e05b4 100755
1628 --- a/scripts/recordmcount.pl
1629 +++ b/scripts/recordmcount.pl
1630 @@ -250,7 +250,7 @@ if ($arch eq "x86_64") {
1631
1632 } elsif ($arch eq "s390" && $bits == 64) {
1633 if ($cc =~ /-DCC_USING_HOTPATCH/) {
1634 - $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*brcl\\s*0,[0-9a-f]+ <([^\+]*)>\$";
1635 + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*(bcrl\\s*0,|jgnop\\s*)[0-9a-f]+ <([^\+]*)>\$";
1636 $mcount_adjust = 0;
1637 } else {
1638 $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$";