Magellan Linux

Contents of /trunk/kernel-magellan/patches-3.15/0103-3.15.4-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2484 - (show annotations) (download)
Mon Jul 21 12:24:36 2014 UTC (9 years, 9 months ago) by niro
File size: 92004 byte(s)
-linux-3.15.4
1 diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
2 index 2a8e89e13e45..7e9abb8a276b 100644
3 --- a/Documentation/SubmittingPatches
4 +++ b/Documentation/SubmittingPatches
5 @@ -132,6 +132,20 @@ Example:
6 platform_set_drvdata(), but left the variable "dev" unused,
7 delete it.
8
9 +If your patch fixes a bug in a specific commit, e.g. you found an issue using
10 +git-bisect, please use the 'Fixes:' tag with the first 12 characters of the
11 +SHA-1 ID, and the one line summary.
12 +Example:
13 +
14 + Fixes: e21d2170f366 ("video: remove unnecessary platform_set_drvdata()")
15 +
16 +The following git-config settings can be used to add a pretty format for
17 +outputting the above style in the git log or git show commands
18 +
19 + [core]
20 + abbrev = 12
21 + [pretty]
22 + fixes = Fixes: %h (\"%s\")
23
24 3) Separate your changes.
25
26 @@ -443,7 +457,7 @@ person it names. This tag documents that potentially interested parties
27 have been included in the discussion
28
29
30 -14) Using Reported-by:, Tested-by:, Reviewed-by: and Suggested-by:
31 +14) Using Reported-by:, Tested-by:, Reviewed-by:, Suggested-by: and Fixes:
32
33 If this patch fixes a problem reported by somebody else, consider adding a
34 Reported-by: tag to credit the reporter for their contribution. Please
35 @@ -498,6 +512,12 @@ idea was not posted in a public forum. That said, if we diligently credit our
36 idea reporters, they will, hopefully, be inspired to help us again in the
37 future.
38
39 +A Fixes: tag indicates that the patch fixes an issue in a previous commit. It
40 +is used to make it easy to determine where a bug originated, which can help
41 +review a bug fix. This tag also assists the stable kernel team in determining
42 +which stable kernel versions should receive your fix. This is the preferred
43 +method for indicating a bug fixed by the patch. See #2 above for more details.
44 +
45
46 15) The canonical patch format
47
48 diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
49 index 85c362d8ea34..d1ab5e17eb13 100644
50 --- a/Documentation/sound/alsa/HD-Audio-Models.txt
51 +++ b/Documentation/sound/alsa/HD-Audio-Models.txt
52 @@ -286,6 +286,11 @@ STAC92HD83*
53 hp-inv-led HP with broken BIOS for inverted mute LED
54 auto BIOS setup (default)
55
56 +STAC92HD95
57 +==========
58 + hp-led LED support for HP laptops
59 + hp-bass Bass HPF setup for HP Spectre 13
60 +
61 STAC9872
62 ========
63 vaio VAIO laptop without SPDIF
64 diff --git a/Makefile b/Makefile
65 index 2e37d8b0bb96..25ecc1dd5bb5 100644
66 --- a/Makefile
67 +++ b/Makefile
68 @@ -1,6 +1,6 @@
69 VERSION = 3
70 PATCHLEVEL = 15
71 -SUBLEVEL = 3
72 +SUBLEVEL = 4
73 EXTRAVERSION =
74 NAME = Shuffling Zombie Juror
75
76 diff --git a/arch/mips/include/asm/sigcontext.h b/arch/mips/include/asm/sigcontext.h
77 index f54bdbe85c0d..eeeb0f48c767 100644
78 --- a/arch/mips/include/asm/sigcontext.h
79 +++ b/arch/mips/include/asm/sigcontext.h
80 @@ -32,8 +32,6 @@ struct sigcontext32 {
81 __u32 sc_lo2;
82 __u32 sc_hi3;
83 __u32 sc_lo3;
84 - __u64 sc_msaregs[32]; /* Most significant 64 bits */
85 - __u32 sc_msa_csr;
86 };
87 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
88 #endif /* _ASM_SIGCONTEXT_H */
89 diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h
90 index 681c17603a48..6c9906f59c6e 100644
91 --- a/arch/mips/include/uapi/asm/sigcontext.h
92 +++ b/arch/mips/include/uapi/asm/sigcontext.h
93 @@ -12,10 +12,6 @@
94 #include <linux/types.h>
95 #include <asm/sgidefs.h>
96
97 -/* Bits which may be set in sc_used_math */
98 -#define USEDMATH_FP (1 << 0)
99 -#define USEDMATH_MSA (1 << 1)
100 -
101 #if _MIPS_SIM == _MIPS_SIM_ABI32
102
103 /*
104 @@ -41,8 +37,6 @@ struct sigcontext {
105 unsigned long sc_lo2;
106 unsigned long sc_hi3;
107 unsigned long sc_lo3;
108 - unsigned long long sc_msaregs[32]; /* Most significant 64 bits */
109 - unsigned long sc_msa_csr;
110 };
111
112 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
113 @@ -76,8 +70,6 @@ struct sigcontext {
114 __u32 sc_used_math;
115 __u32 sc_dsp;
116 __u32 sc_reserved;
117 - __u64 sc_msaregs[32];
118 - __u32 sc_msa_csr;
119 };
120
121
122 diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
123 index 0ea75c244b48..7ff80622c8d9 100644
124 --- a/arch/mips/kernel/asm-offsets.c
125 +++ b/arch/mips/kernel/asm-offsets.c
126 @@ -295,7 +295,6 @@ void output_sc_defines(void)
127 OFFSET(SC_LO2, sigcontext, sc_lo2);
128 OFFSET(SC_HI3, sigcontext, sc_hi3);
129 OFFSET(SC_LO3, sigcontext, sc_lo3);
130 - OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
131 BLANK();
132 }
133 #endif
134 @@ -310,7 +309,6 @@ void output_sc_defines(void)
135 OFFSET(SC_MDLO, sigcontext, sc_mdlo);
136 OFFSET(SC_PC, sigcontext, sc_pc);
137 OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
138 - OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
139 BLANK();
140 }
141 #endif
142 @@ -322,7 +320,6 @@ void output_sc32_defines(void)
143 OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
144 OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
145 OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
146 - OFFSET(SC32_MSAREGS, sigcontext32, sc_msaregs);
147 BLANK();
148 }
149 #endif
150 diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
151 index fab40f7d2e03..ac9facc08694 100644
152 --- a/arch/mips/kernel/irq-msc01.c
153 +++ b/arch/mips/kernel/irq-msc01.c
154 @@ -131,7 +131,7 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma
155
156 board_bind_eic_interrupt = &msc_bind_eic_interrupt;
157
158 - for (; nirq >= 0; nirq--, imp++) {
159 + for (; nirq > 0; nirq--, imp++) {
160 int n = imp->im_irq;
161
162 switch (imp->im_type) {
163 diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
164 index 71814272d148..8352523568e6 100644
165 --- a/arch/mips/kernel/r4k_fpu.S
166 +++ b/arch/mips/kernel/r4k_fpu.S
167 @@ -13,7 +13,6 @@
168 * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
169 */
170 #include <asm/asm.h>
171 -#include <asm/asmmacro.h>
172 #include <asm/errno.h>
173 #include <asm/fpregdef.h>
174 #include <asm/mipsregs.h>
175 @@ -246,218 +245,6 @@ LEAF(_restore_fp_context32)
176 END(_restore_fp_context32)
177 #endif
178
179 -#ifdef CONFIG_CPU_HAS_MSA
180 -
181 - .macro save_sc_msareg wr, off, sc, tmp
182 -#ifdef CONFIG_64BIT
183 - copy_u_d \tmp, \wr, 1
184 - EX sd \tmp, (\off+(\wr*8))(\sc)
185 -#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
186 - copy_u_w \tmp, \wr, 2
187 - EX sw \tmp, (\off+(\wr*8)+0)(\sc)
188 - copy_u_w \tmp, \wr, 3
189 - EX sw \tmp, (\off+(\wr*8)+4)(\sc)
190 -#else /* CONFIG_CPU_BIG_ENDIAN */
191 - copy_u_w \tmp, \wr, 2
192 - EX sw \tmp, (\off+(\wr*8)+4)(\sc)
193 - copy_u_w \tmp, \wr, 3
194 - EX sw \tmp, (\off+(\wr*8)+0)(\sc)
195 -#endif
196 - .endm
197 -
198 -/*
199 - * int _save_msa_context(struct sigcontext *sc)
200 - *
201 - * Save the upper 64 bits of each vector register along with the MSA_CSR
202 - * register into sc. Returns zero on success, else non-zero.
203 - */
204 -LEAF(_save_msa_context)
205 - save_sc_msareg 0, SC_MSAREGS, a0, t0
206 - save_sc_msareg 1, SC_MSAREGS, a0, t0
207 - save_sc_msareg 2, SC_MSAREGS, a0, t0
208 - save_sc_msareg 3, SC_MSAREGS, a0, t0
209 - save_sc_msareg 4, SC_MSAREGS, a0, t0
210 - save_sc_msareg 5, SC_MSAREGS, a0, t0
211 - save_sc_msareg 6, SC_MSAREGS, a0, t0
212 - save_sc_msareg 7, SC_MSAREGS, a0, t0
213 - save_sc_msareg 8, SC_MSAREGS, a0, t0
214 - save_sc_msareg 9, SC_MSAREGS, a0, t0
215 - save_sc_msareg 10, SC_MSAREGS, a0, t0
216 - save_sc_msareg 11, SC_MSAREGS, a0, t0
217 - save_sc_msareg 12, SC_MSAREGS, a0, t0
218 - save_sc_msareg 13, SC_MSAREGS, a0, t0
219 - save_sc_msareg 14, SC_MSAREGS, a0, t0
220 - save_sc_msareg 15, SC_MSAREGS, a0, t0
221 - save_sc_msareg 16, SC_MSAREGS, a0, t0
222 - save_sc_msareg 17, SC_MSAREGS, a0, t0
223 - save_sc_msareg 18, SC_MSAREGS, a0, t0
224 - save_sc_msareg 19, SC_MSAREGS, a0, t0
225 - save_sc_msareg 20, SC_MSAREGS, a0, t0
226 - save_sc_msareg 21, SC_MSAREGS, a0, t0
227 - save_sc_msareg 22, SC_MSAREGS, a0, t0
228 - save_sc_msareg 23, SC_MSAREGS, a0, t0
229 - save_sc_msareg 24, SC_MSAREGS, a0, t0
230 - save_sc_msareg 25, SC_MSAREGS, a0, t0
231 - save_sc_msareg 26, SC_MSAREGS, a0, t0
232 - save_sc_msareg 27, SC_MSAREGS, a0, t0
233 - save_sc_msareg 28, SC_MSAREGS, a0, t0
234 - save_sc_msareg 29, SC_MSAREGS, a0, t0
235 - save_sc_msareg 30, SC_MSAREGS, a0, t0
236 - save_sc_msareg 31, SC_MSAREGS, a0, t0
237 - jr ra
238 - li v0, 0
239 - END(_save_msa_context)
240 -
241 -#ifdef CONFIG_MIPS32_COMPAT
242 -
243 -/*
244 - * int _save_msa_context32(struct sigcontext32 *sc)
245 - *
246 - * Save the upper 64 bits of each vector register along with the MSA_CSR
247 - * register into sc. Returns zero on success, else non-zero.
248 - */
249 -LEAF(_save_msa_context32)
250 - save_sc_msareg 0, SC32_MSAREGS, a0, t0
251 - save_sc_msareg 1, SC32_MSAREGS, a0, t0
252 - save_sc_msareg 2, SC32_MSAREGS, a0, t0
253 - save_sc_msareg 3, SC32_MSAREGS, a0, t0
254 - save_sc_msareg 4, SC32_MSAREGS, a0, t0
255 - save_sc_msareg 5, SC32_MSAREGS, a0, t0
256 - save_sc_msareg 6, SC32_MSAREGS, a0, t0
257 - save_sc_msareg 7, SC32_MSAREGS, a0, t0
258 - save_sc_msareg 8, SC32_MSAREGS, a0, t0
259 - save_sc_msareg 9, SC32_MSAREGS, a0, t0
260 - save_sc_msareg 10, SC32_MSAREGS, a0, t0
261 - save_sc_msareg 11, SC32_MSAREGS, a0, t0
262 - save_sc_msareg 12, SC32_MSAREGS, a0, t0
263 - save_sc_msareg 13, SC32_MSAREGS, a0, t0
264 - save_sc_msareg 14, SC32_MSAREGS, a0, t0
265 - save_sc_msareg 15, SC32_MSAREGS, a0, t0
266 - save_sc_msareg 16, SC32_MSAREGS, a0, t0
267 - save_sc_msareg 17, SC32_MSAREGS, a0, t0
268 - save_sc_msareg 18, SC32_MSAREGS, a0, t0
269 - save_sc_msareg 19, SC32_MSAREGS, a0, t0
270 - save_sc_msareg 20, SC32_MSAREGS, a0, t0
271 - save_sc_msareg 21, SC32_MSAREGS, a0, t0
272 - save_sc_msareg 22, SC32_MSAREGS, a0, t0
273 - save_sc_msareg 23, SC32_MSAREGS, a0, t0
274 - save_sc_msareg 24, SC32_MSAREGS, a0, t0
275 - save_sc_msareg 25, SC32_MSAREGS, a0, t0
276 - save_sc_msareg 26, SC32_MSAREGS, a0, t0
277 - save_sc_msareg 27, SC32_MSAREGS, a0, t0
278 - save_sc_msareg 28, SC32_MSAREGS, a0, t0
279 - save_sc_msareg 29, SC32_MSAREGS, a0, t0
280 - save_sc_msareg 30, SC32_MSAREGS, a0, t0
281 - save_sc_msareg 31, SC32_MSAREGS, a0, t0
282 - jr ra
283 - li v0, 0
284 - END(_save_msa_context32)
285 -
286 -#endif /* CONFIG_MIPS32_COMPAT */
287 -
288 - .macro restore_sc_msareg wr, off, sc, tmp
289 -#ifdef CONFIG_64BIT
290 - EX ld \tmp, (\off+(\wr*8))(\sc)
291 - insert_d \wr, 1, \tmp
292 -#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
293 - EX lw \tmp, (\off+(\wr*8)+0)(\sc)
294 - insert_w \wr, 2, \tmp
295 - EX lw \tmp, (\off+(\wr*8)+4)(\sc)
296 - insert_w \wr, 3, \tmp
297 -#else /* CONFIG_CPU_BIG_ENDIAN */
298 - EX lw \tmp, (\off+(\wr*8)+4)(\sc)
299 - insert_w \wr, 2, \tmp
300 - EX lw \tmp, (\off+(\wr*8)+0)(\sc)
301 - insert_w \wr, 3, \tmp
302 -#endif
303 - .endm
304 -
305 -/*
306 - * int _restore_msa_context(struct sigcontext *sc)
307 - */
308 -LEAF(_restore_msa_context)
309 - restore_sc_msareg 0, SC_MSAREGS, a0, t0
310 - restore_sc_msareg 1, SC_MSAREGS, a0, t0
311 - restore_sc_msareg 2, SC_MSAREGS, a0, t0
312 - restore_sc_msareg 3, SC_MSAREGS, a0, t0
313 - restore_sc_msareg 4, SC_MSAREGS, a0, t0
314 - restore_sc_msareg 5, SC_MSAREGS, a0, t0
315 - restore_sc_msareg 6, SC_MSAREGS, a0, t0
316 - restore_sc_msareg 7, SC_MSAREGS, a0, t0
317 - restore_sc_msareg 8, SC_MSAREGS, a0, t0
318 - restore_sc_msareg 9, SC_MSAREGS, a0, t0
319 - restore_sc_msareg 10, SC_MSAREGS, a0, t0
320 - restore_sc_msareg 11, SC_MSAREGS, a0, t0
321 - restore_sc_msareg 12, SC_MSAREGS, a0, t0
322 - restore_sc_msareg 13, SC_MSAREGS, a0, t0
323 - restore_sc_msareg 14, SC_MSAREGS, a0, t0
324 - restore_sc_msareg 15, SC_MSAREGS, a0, t0
325 - restore_sc_msareg 16, SC_MSAREGS, a0, t0
326 - restore_sc_msareg 17, SC_MSAREGS, a0, t0
327 - restore_sc_msareg 18, SC_MSAREGS, a0, t0
328 - restore_sc_msareg 19, SC_MSAREGS, a0, t0
329 - restore_sc_msareg 20, SC_MSAREGS, a0, t0
330 - restore_sc_msareg 21, SC_MSAREGS, a0, t0
331 - restore_sc_msareg 22, SC_MSAREGS, a0, t0
332 - restore_sc_msareg 23, SC_MSAREGS, a0, t0
333 - restore_sc_msareg 24, SC_MSAREGS, a0, t0
334 - restore_sc_msareg 25, SC_MSAREGS, a0, t0
335 - restore_sc_msareg 26, SC_MSAREGS, a0, t0
336 - restore_sc_msareg 27, SC_MSAREGS, a0, t0
337 - restore_sc_msareg 28, SC_MSAREGS, a0, t0
338 - restore_sc_msareg 29, SC_MSAREGS, a0, t0
339 - restore_sc_msareg 30, SC_MSAREGS, a0, t0
340 - restore_sc_msareg 31, SC_MSAREGS, a0, t0
341 - jr ra
342 - li v0, 0
343 - END(_restore_msa_context)
344 -
345 -#ifdef CONFIG_MIPS32_COMPAT
346 -
347 -/*
348 - * int _restore_msa_context32(struct sigcontext32 *sc)
349 - */
350 -LEAF(_restore_msa_context32)
351 - restore_sc_msareg 0, SC32_MSAREGS, a0, t0
352 - restore_sc_msareg 1, SC32_MSAREGS, a0, t0
353 - restore_sc_msareg 2, SC32_MSAREGS, a0, t0
354 - restore_sc_msareg 3, SC32_MSAREGS, a0, t0
355 - restore_sc_msareg 4, SC32_MSAREGS, a0, t0
356 - restore_sc_msareg 5, SC32_MSAREGS, a0, t0
357 - restore_sc_msareg 6, SC32_MSAREGS, a0, t0
358 - restore_sc_msareg 7, SC32_MSAREGS, a0, t0
359 - restore_sc_msareg 8, SC32_MSAREGS, a0, t0
360 - restore_sc_msareg 9, SC32_MSAREGS, a0, t0
361 - restore_sc_msareg 10, SC32_MSAREGS, a0, t0
362 - restore_sc_msareg 11, SC32_MSAREGS, a0, t0
363 - restore_sc_msareg 12, SC32_MSAREGS, a0, t0
364 - restore_sc_msareg 13, SC32_MSAREGS, a0, t0
365 - restore_sc_msareg 14, SC32_MSAREGS, a0, t0
366 - restore_sc_msareg 15, SC32_MSAREGS, a0, t0
367 - restore_sc_msareg 16, SC32_MSAREGS, a0, t0
368 - restore_sc_msareg 17, SC32_MSAREGS, a0, t0
369 - restore_sc_msareg 18, SC32_MSAREGS, a0, t0
370 - restore_sc_msareg 19, SC32_MSAREGS, a0, t0
371 - restore_sc_msareg 20, SC32_MSAREGS, a0, t0
372 - restore_sc_msareg 21, SC32_MSAREGS, a0, t0
373 - restore_sc_msareg 22, SC32_MSAREGS, a0, t0
374 - restore_sc_msareg 23, SC32_MSAREGS, a0, t0
375 - restore_sc_msareg 24, SC32_MSAREGS, a0, t0
376 - restore_sc_msareg 25, SC32_MSAREGS, a0, t0
377 - restore_sc_msareg 26, SC32_MSAREGS, a0, t0
378 - restore_sc_msareg 27, SC32_MSAREGS, a0, t0
379 - restore_sc_msareg 28, SC32_MSAREGS, a0, t0
380 - restore_sc_msareg 29, SC32_MSAREGS, a0, t0
381 - restore_sc_msareg 30, SC32_MSAREGS, a0, t0
382 - restore_sc_msareg 31, SC32_MSAREGS, a0, t0
383 - jr ra
384 - li v0, 0
385 - END(_restore_msa_context32)
386 -
387 -#endif /* CONFIG_MIPS32_COMPAT */
388 -
389 -#endif /* CONFIG_CPU_HAS_MSA */
390 -
391 .set reorder
392
393 .type fault@function
394 diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
395 index 33133d3df3e5..9e60d117e41e 100644
396 --- a/arch/mips/kernel/signal.c
397 +++ b/arch/mips/kernel/signal.c
398 @@ -31,7 +31,6 @@
399 #include <linux/bitops.h>
400 #include <asm/cacheflush.h>
401 #include <asm/fpu.h>
402 -#include <asm/msa.h>
403 #include <asm/sim.h>
404 #include <asm/ucontext.h>
405 #include <asm/cpu-features.h>
406 @@ -48,9 +47,6 @@ static int (*restore_fp_context)(struct sigcontext __user *sc);
407 extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
408 extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
409
410 -extern asmlinkage int _save_msa_context(struct sigcontext __user *sc);
411 -extern asmlinkage int _restore_msa_context(struct sigcontext __user *sc);
412 -
413 struct sigframe {
414 u32 sf_ass[4]; /* argument save space for o32 */
415 u32 sf_pad[2]; /* Was: signal trampoline */
416 @@ -100,60 +96,20 @@ static int copy_fp_from_sigcontext(struct sigcontext __user *sc)
417 }
418
419 /*
420 - * These functions will save only the upper 64 bits of the vector registers,
421 - * since the lower 64 bits have already been saved as the scalar FP context.
422 - */
423 -static int copy_msa_to_sigcontext(struct sigcontext __user *sc)
424 -{
425 - int i;
426 - int err = 0;
427 -
428 - for (i = 0; i < NUM_FPU_REGS; i++) {
429 - err |=
430 - __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
431 - &sc->sc_msaregs[i]);
432 - }
433 - err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
434 -
435 - return err;
436 -}
437 -
438 -static int copy_msa_from_sigcontext(struct sigcontext __user *sc)
439 -{
440 - int i;
441 - int err = 0;
442 - u64 val;
443 -
444 - for (i = 0; i < NUM_FPU_REGS; i++) {
445 - err |= __get_user(val, &sc->sc_msaregs[i]);
446 - set_fpr64(&current->thread.fpu.fpr[i], 1, val);
447 - }
448 - err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
449 -
450 - return err;
451 -}
452 -
453 -/*
454 * Helper routines
455 */
456 -static int protected_save_fp_context(struct sigcontext __user *sc,
457 - unsigned used_math)
458 +static int protected_save_fp_context(struct sigcontext __user *sc)
459 {
460 int err;
461 - bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
462 #ifndef CONFIG_EVA
463 while (1) {
464 lock_fpu_owner();
465 if (is_fpu_owner()) {
466 err = save_fp_context(sc);
467 - if (save_msa && !err)
468 - err = _save_msa_context(sc);
469 unlock_fpu_owner();
470 } else {
471 unlock_fpu_owner();
472 err = copy_fp_to_sigcontext(sc);
473 - if (save_msa && !err)
474 - err = copy_msa_to_sigcontext(sc);
475 }
476 if (likely(!err))
477 break;
478 @@ -169,38 +125,24 @@ static int protected_save_fp_context(struct sigcontext __user *sc,
479 * EVA does not have FPU EVA instructions so saving fpu context directly
480 * does not work.
481 */
482 - disable_msa();
483 lose_fpu(1);
484 err = save_fp_context(sc); /* this might fail */
485 - if (save_msa && !err)
486 - err = copy_msa_to_sigcontext(sc);
487 #endif
488 return err;
489 }
490
491 -static int protected_restore_fp_context(struct sigcontext __user *sc,
492 - unsigned used_math)
493 +static int protected_restore_fp_context(struct sigcontext __user *sc)
494 {
495 int err, tmp __maybe_unused;
496 - bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
497 #ifndef CONFIG_EVA
498 while (1) {
499 lock_fpu_owner();
500 if (is_fpu_owner()) {
501 err = restore_fp_context(sc);
502 - if (restore_msa && !err) {
503 - enable_msa();
504 - err = _restore_msa_context(sc);
505 - } else {
506 - /* signal handler may have used MSA */
507 - disable_msa();
508 - }
509 unlock_fpu_owner();
510 } else {
511 unlock_fpu_owner();
512 err = copy_fp_from_sigcontext(sc);
513 - if (!err && (used_math & USEDMATH_MSA))
514 - err = copy_msa_from_sigcontext(sc);
515 }
516 if (likely(!err))
517 break;
518 @@ -216,11 +158,8 @@ static int protected_restore_fp_context(struct sigcontext __user *sc,
519 * EVA does not have FPU EVA instructions so restoring fpu context
520 * directly does not work.
521 */
522 - enable_msa();
523 lose_fpu(0);
524 err = restore_fp_context(sc); /* this might fail */
525 - if (restore_msa && !err)
526 - err = copy_msa_from_sigcontext(sc);
527 #endif
528 return err;
529 }
530 @@ -252,8 +191,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
531 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
532 }
533
534 - used_math = used_math() ? USEDMATH_FP : 0;
535 - used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
536 + used_math = !!used_math();
537 err |= __put_user(used_math, &sc->sc_used_math);
538
539 if (used_math) {
540 @@ -261,7 +199,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
541 * Save FPU state to signal context. Signal handler
542 * will "inherit" current FPU state.
543 */
544 - err |= protected_save_fp_context(sc, used_math);
545 + err |= protected_save_fp_context(sc);
546 }
547 return err;
548 }
549 @@ -286,14 +224,14 @@ int fpcsr_pending(unsigned int __user *fpcsr)
550 }
551
552 static int
553 -check_and_restore_fp_context(struct sigcontext __user *sc, unsigned used_math)
554 +check_and_restore_fp_context(struct sigcontext __user *sc)
555 {
556 int err, sig;
557
558 err = sig = fpcsr_pending(&sc->sc_fpc_csr);
559 if (err > 0)
560 err = 0;
561 - err |= protected_restore_fp_context(sc, used_math);
562 + err |= protected_restore_fp_context(sc);
563 return err ?: sig;
564 }
565
566 @@ -333,10 +271,9 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
567 if (used_math) {
568 /* restore fpu context if we have used it before */
569 if (!err)
570 - err = check_and_restore_fp_context(sc, used_math);
571 + err = check_and_restore_fp_context(sc);
572 } else {
573 - /* signal handler may have used FPU or MSA. Disable them. */
574 - disable_msa();
575 + /* signal handler may have used FPU. Give it up. */
576 lose_fpu(0);
577 }
578
579 diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
580 index 299f956e4db3..bae2e6ee2109 100644
581 --- a/arch/mips/kernel/signal32.c
582 +++ b/arch/mips/kernel/signal32.c
583 @@ -30,7 +30,6 @@
584 #include <asm/sim.h>
585 #include <asm/ucontext.h>
586 #include <asm/fpu.h>
587 -#include <asm/msa.h>
588 #include <asm/war.h>
589 #include <asm/vdso.h>
590 #include <asm/dsp.h>
591 @@ -43,9 +42,6 @@ static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
592 extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
593 extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
594
595 -extern asmlinkage int _save_msa_context32(struct sigcontext32 __user *sc);
596 -extern asmlinkage int _restore_msa_context32(struct sigcontext32 __user *sc);
597 -
598 /*
599 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
600 */
601 @@ -115,59 +111,19 @@ static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc)
602 }
603
604 /*
605 - * These functions will save only the upper 64 bits of the vector registers,
606 - * since the lower 64 bits have already been saved as the scalar FP context.
607 - */
608 -static int copy_msa_to_sigcontext32(struct sigcontext32 __user *sc)
609 -{
610 - int i;
611 - int err = 0;
612 -
613 - for (i = 0; i < NUM_FPU_REGS; i++) {
614 - err |=
615 - __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
616 - &sc->sc_msaregs[i]);
617 - }
618 - err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
619 -
620 - return err;
621 -}
622 -
623 -static int copy_msa_from_sigcontext32(struct sigcontext32 __user *sc)
624 -{
625 - int i;
626 - int err = 0;
627 - u64 val;
628 -
629 - for (i = 0; i < NUM_FPU_REGS; i++) {
630 - err |= __get_user(val, &sc->sc_msaregs[i]);
631 - set_fpr64(&current->thread.fpu.fpr[i], 1, val);
632 - }
633 - err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
634 -
635 - return err;
636 -}
637 -
638 -/*
639 * sigcontext handlers
640 */
641 -static int protected_save_fp_context32(struct sigcontext32 __user *sc,
642 - unsigned used_math)
643 +static int protected_save_fp_context32(struct sigcontext32 __user *sc)
644 {
645 int err;
646 - bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
647 while (1) {
648 lock_fpu_owner();
649 if (is_fpu_owner()) {
650 err = save_fp_context32(sc);
651 - if (save_msa && !err)
652 - err = _save_msa_context32(sc);
653 unlock_fpu_owner();
654 } else {
655 unlock_fpu_owner();
656 err = copy_fp_to_sigcontext32(sc);
657 - if (save_msa && !err)
658 - err = copy_msa_to_sigcontext32(sc);
659 }
660 if (likely(!err))
661 break;
662 @@ -181,28 +137,17 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc,
663 return err;
664 }
665
666 -static int protected_restore_fp_context32(struct sigcontext32 __user *sc,
667 - unsigned used_math)
668 +static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
669 {
670 int err, tmp __maybe_unused;
671 - bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
672 while (1) {
673 lock_fpu_owner();
674 if (is_fpu_owner()) {
675 err = restore_fp_context32(sc);
676 - if (restore_msa && !err) {
677 - enable_msa();
678 - err = _restore_msa_context32(sc);
679 - } else {
680 - /* signal handler may have used MSA */
681 - disable_msa();
682 - }
683 unlock_fpu_owner();
684 } else {
685 unlock_fpu_owner();
686 err = copy_fp_from_sigcontext32(sc);
687 - if (restore_msa && !err)
688 - err = copy_msa_from_sigcontext32(sc);
689 }
690 if (likely(!err))
691 break;
692 @@ -241,8 +186,7 @@ static int setup_sigcontext32(struct pt_regs *regs,
693 err |= __put_user(mflo3(), &sc->sc_lo3);
694 }
695
696 - used_math = used_math() ? USEDMATH_FP : 0;
697 - used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
698 + used_math = !!used_math();
699 err |= __put_user(used_math, &sc->sc_used_math);
700
701 if (used_math) {
702 @@ -250,21 +194,20 @@ static int setup_sigcontext32(struct pt_regs *regs,
703 * Save FPU state to signal context. Signal handler
704 * will "inherit" current FPU state.
705 */
706 - err |= protected_save_fp_context32(sc, used_math);
707 + err |= protected_save_fp_context32(sc);
708 }
709 return err;
710 }
711
712 static int
713 -check_and_restore_fp_context32(struct sigcontext32 __user *sc,
714 - unsigned used_math)
715 +check_and_restore_fp_context32(struct sigcontext32 __user *sc)
716 {
717 int err, sig;
718
719 err = sig = fpcsr_pending(&sc->sc_fpc_csr);
720 if (err > 0)
721 err = 0;
722 - err |= protected_restore_fp_context32(sc, used_math);
723 + err |= protected_restore_fp_context32(sc);
724 return err ?: sig;
725 }
726
727 @@ -301,10 +244,9 @@ static int restore_sigcontext32(struct pt_regs *regs,
728 if (used_math) {
729 /* restore fpu context if we have used it before */
730 if (!err)
731 - err = check_and_restore_fp_context32(sc, used_math);
732 + err = check_and_restore_fp_context32(sc);
733 } else {
734 - /* signal handler may have used FPU or MSA. Disable them. */
735 - disable_msa();
736 + /* signal handler may have used FPU. Give it up. */
737 lose_fpu(0);
738 }
739
740 diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
741 index 5efce56f0df0..3e0ff8d0fbf9 100644
742 --- a/arch/mips/kvm/kvm_mips.c
743 +++ b/arch/mips/kvm/kvm_mips.c
744 @@ -149,9 +149,7 @@ void kvm_mips_free_vcpus(struct kvm *kvm)
745 if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
746 kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
747 }
748 -
749 - if (kvm->arch.guest_pmap)
750 - kfree(kvm->arch.guest_pmap);
751 + kfree(kvm->arch.guest_pmap);
752
753 kvm_for_each_vcpu(i, vcpu, kvm) {
754 kvm_arch_vcpu_free(vcpu);
755 @@ -389,12 +387,9 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
756
757 kvm_mips_dump_stats(vcpu);
758
759 - if (vcpu->arch.guest_ebase)
760 - kfree(vcpu->arch.guest_ebase);
761 -
762 - if (vcpu->arch.kseg0_commpage)
763 - kfree(vcpu->arch.kseg0_commpage);
764 -
765 + kfree(vcpu->arch.guest_ebase);
766 + kfree(vcpu->arch.kseg0_commpage);
767 + kfree(vcpu);
768 }
769
770 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
771 diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
772 index 0e83e7d8c73f..b5fad8afe837 100644
773 --- a/arch/powerpc/include/asm/switch_to.h
774 +++ b/arch/powerpc/include/asm/switch_to.h
775 @@ -84,6 +84,8 @@ static inline void clear_task_ebb(struct task_struct *t)
776 {
777 #ifdef CONFIG_PPC_BOOK3S_64
778 /* EBB perf events are not inherited, so clear all EBB state. */
779 + t->thread.ebbrr = 0;
780 + t->thread.ebbhr = 0;
781 t->thread.bescr = 0;
782 t->thread.mmcr2 = 0;
783 t->thread.mmcr0 = 0;
784 diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
785 index ea4dc3a89c1f..14b2862533b5 100644
786 --- a/arch/powerpc/include/asm/systbl.h
787 +++ b/arch/powerpc/include/asm/systbl.h
788 @@ -190,7 +190,7 @@ SYSCALL_SPU(getcwd)
789 SYSCALL_SPU(capget)
790 SYSCALL_SPU(capset)
791 COMPAT_SYS(sigaltstack)
792 -COMPAT_SYS_SPU(sendfile)
793 +SYSX_SPU(sys_sendfile64,compat_sys_sendfile,sys_sendfile)
794 SYSCALL(ni_syscall)
795 SYSCALL(ni_syscall)
796 PPC_SYS(vfork)
797 diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
798 index 5b7657959faa..de2c0e4ee1aa 100644
799 --- a/arch/powerpc/include/uapi/asm/cputable.h
800 +++ b/arch/powerpc/include/uapi/asm/cputable.h
801 @@ -41,5 +41,6 @@
802 #define PPC_FEATURE2_EBB 0x10000000
803 #define PPC_FEATURE2_ISEL 0x08000000
804 #define PPC_FEATURE2_TAR 0x04000000
805 +#define PPC_FEATURE2_VEC_CRYPTO 0x02000000
806
807 #endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */
808 diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
809 index c1faade6506d..11da04a4625a 100644
810 --- a/arch/powerpc/kernel/cputable.c
811 +++ b/arch/powerpc/kernel/cputable.c
812 @@ -109,7 +109,8 @@ extern void __restore_cpu_e6500(void);
813 PPC_FEATURE_PSERIES_PERFMON_COMPAT)
814 #define COMMON_USER2_POWER8 (PPC_FEATURE2_ARCH_2_07 | \
815 PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_DSCR | \
816 - PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR)
817 + PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \
818 + PPC_FEATURE2_VEC_CRYPTO)
819 #define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\
820 PPC_FEATURE_TRUE_LE | \
821 PPC_FEATURE_HAS_ALTIVEC_COMP)
822 diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
823 index 40bd7bd4e19a..8a8b722870a1 100644
824 --- a/arch/powerpc/kernel/legacy_serial.c
825 +++ b/arch/powerpc/kernel/legacy_serial.c
826 @@ -48,6 +48,9 @@ static struct of_device_id legacy_serial_parents[] __initdata = {
827 static unsigned int legacy_serial_count;
828 static int legacy_serial_console = -1;
829
830 +static const upf_t legacy_port_flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
831 + UPF_SHARE_IRQ | UPF_FIXED_PORT;
832 +
833 static unsigned int tsi_serial_in(struct uart_port *p, int offset)
834 {
835 unsigned int tmp;
836 @@ -153,8 +156,6 @@ static int __init add_legacy_soc_port(struct device_node *np,
837 {
838 u64 addr;
839 const __be32 *addrp;
840 - upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ
841 - | UPF_FIXED_PORT;
842 struct device_node *tsi = of_get_parent(np);
843
844 /* We only support ports that have a clock frequency properly
845 @@ -185,9 +186,11 @@ static int __init add_legacy_soc_port(struct device_node *np,
846 * IO port value. It will be fixed up later along with the irq
847 */
848 if (tsi && !strcmp(tsi->type, "tsi-bridge"))
849 - return add_legacy_port(np, -1, UPIO_TSI, addr, addr, NO_IRQ, flags, 0);
850 + return add_legacy_port(np, -1, UPIO_TSI, addr, addr,
851 + NO_IRQ, legacy_port_flags, 0);
852 else
853 - return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0);
854 + return add_legacy_port(np, -1, UPIO_MEM, addr, addr,
855 + NO_IRQ, legacy_port_flags, 0);
856 }
857
858 static int __init add_legacy_isa_port(struct device_node *np,
859 @@ -233,7 +236,7 @@ static int __init add_legacy_isa_port(struct device_node *np,
860
861 /* Add port, irq will be dealt with later */
862 return add_legacy_port(np, index, UPIO_PORT, be32_to_cpu(reg[1]),
863 - taddr, NO_IRQ, UPF_BOOT_AUTOCONF, 0);
864 + taddr, NO_IRQ, legacy_port_flags, 0);
865
866 }
867
868 @@ -306,7 +309,7 @@ static int __init add_legacy_pci_port(struct device_node *np,
869 * IO port value. It will be fixed up later along with the irq
870 */
871 return add_legacy_port(np, index, iotype, base, addr, NO_IRQ,
872 - UPF_BOOT_AUTOCONF, np != pci_dev);
873 + legacy_port_flags, np != pci_dev);
874 }
875 #endif
876
877 diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
878 index 79b7612ac6fa..aa3e7c0f60e2 100644
879 --- a/arch/powerpc/kernel/setup-common.c
880 +++ b/arch/powerpc/kernel/setup-common.c
881 @@ -459,9 +459,17 @@ void __init smp_setup_cpu_maps(void)
882 }
883
884 for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
885 + bool avail;
886 +
887 DBG(" thread %d -> cpu %d (hard id %d)\n",
888 j, cpu, be32_to_cpu(intserv[j]));
889 - set_cpu_present(cpu, true);
890 +
891 + avail = of_device_is_available(dn);
892 + if (!avail)
893 + avail = !of_property_match_string(dn,
894 + "enable-method", "spin-table");
895 +
896 + set_cpu_present(cpu, avail);
897 set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j]));
898 set_cpu_possible(cpu, true);
899 cpu++;
900 diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
901 index 7e711bdcc6da..9fff9cdcc519 100644
902 --- a/arch/powerpc/kernel/time.c
903 +++ b/arch/powerpc/kernel/time.c
904 @@ -551,7 +551,7 @@ void timer_interrupt(struct pt_regs * regs)
905 may_hard_irq_enable();
906
907
908 -#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
909 +#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
910 if (atomic_read(&ppc_n_lost_interrupts) != 0)
911 do_IRQ(regs);
912 #endif
913 diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
914 index c0511c27a733..412dd46dd0b7 100644
915 --- a/arch/powerpc/lib/sstep.c
916 +++ b/arch/powerpc/lib/sstep.c
917 @@ -1470,7 +1470,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
918 regs->gpr[rd] = byterev_4(val);
919 goto ldst_done;
920
921 -#ifdef CONFIG_PPC_CPU
922 +#ifdef CONFIG_PPC_FPU
923 case 535: /* lfsx */
924 case 567: /* lfsux */
925 if (!(regs->msr & MSR_FP))
926 diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
927 index 06ba83b036d3..5cbfde131839 100644
928 --- a/arch/powerpc/mm/hash_utils_64.c
929 +++ b/arch/powerpc/mm/hash_utils_64.c
930 @@ -964,6 +964,22 @@ void hash_failure_debug(unsigned long ea, unsigned long access,
931 trap, vsid, ssize, psize, lpsize, pte);
932 }
933
934 +static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
935 + int psize, bool user_region)
936 +{
937 + if (user_region) {
938 + if (psize != get_paca_psize(ea)) {
939 + get_paca()->context = mm->context;
940 + slb_flush_and_rebolt();
941 + }
942 + } else if (get_paca()->vmalloc_sllp !=
943 + mmu_psize_defs[mmu_vmalloc_psize].sllp) {
944 + get_paca()->vmalloc_sllp =
945 + mmu_psize_defs[mmu_vmalloc_psize].sllp;
946 + slb_vmalloc_update();
947 + }
948 +}
949 +
950 /* Result code is:
951 * 0 - handled
952 * 1 - normal page fault
953 @@ -1085,6 +1101,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
954 WARN_ON(1);
955 }
956 #endif
957 + check_paca_psize(ea, mm, psize, user_region);
958 +
959 goto bail;
960 }
961
962 @@ -1125,17 +1143,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
963 #endif
964 }
965 }
966 - if (user_region) {
967 - if (psize != get_paca_psize(ea)) {
968 - get_paca()->context = mm->context;
969 - slb_flush_and_rebolt();
970 - }
971 - } else if (get_paca()->vmalloc_sllp !=
972 - mmu_psize_defs[mmu_vmalloc_psize].sllp) {
973 - get_paca()->vmalloc_sllp =
974 - mmu_psize_defs[mmu_vmalloc_psize].sllp;
975 - slb_vmalloc_update();
976 - }
977 +
978 + check_paca_psize(ea, mm, psize, user_region);
979 #endif /* CONFIG_PPC_64K_PAGES */
980
981 #ifdef CONFIG_PPC_HAS_HASH_64K
982 diff --git a/arch/powerpc/platforms/powernv/opal-sysparam.c b/arch/powerpc/platforms/powernv/opal-sysparam.c
983 index d202f9bc3683..9d1acf22a099 100644
984 --- a/arch/powerpc/platforms/powernv/opal-sysparam.c
985 +++ b/arch/powerpc/platforms/powernv/opal-sysparam.c
986 @@ -260,10 +260,10 @@ void __init opal_sys_param_init(void)
987 attr[i].kobj_attr.attr.mode = S_IRUGO;
988 break;
989 case OPAL_SYSPARAM_WRITE:
990 - attr[i].kobj_attr.attr.mode = S_IWUGO;
991 + attr[i].kobj_attr.attr.mode = S_IWUSR;
992 break;
993 case OPAL_SYSPARAM_RW:
994 - attr[i].kobj_attr.attr.mode = S_IRUGO | S_IWUGO;
995 + attr[i].kobj_attr.attr.mode = S_IRUGO | S_IWUSR;
996 break;
997 default:
998 break;
999 diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
1000 index 8a8f0472d98f..83da53fde6b5 100644
1001 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c
1002 +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
1003 @@ -464,6 +464,7 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *state)
1004 } else {
1005 result = EEH_STATE_NOT_SUPPORT;
1006 }
1007 + break;
1008 default:
1009 result = EEH_STATE_NOT_SUPPORT;
1010 }
1011 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
1012 index 14fd6fd75a19..6205f0c434db 100644
1013 --- a/arch/x86/include/asm/ptrace.h
1014 +++ b/arch/x86/include/asm/ptrace.h
1015 @@ -231,6 +231,22 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
1016
1017 #define ARCH_HAS_USER_SINGLE_STEP_INFO
1018
1019 +/*
1020 + * When hitting ptrace_stop(), we cannot return using SYSRET because
1021 + * that does not restore the full CPU state, only a minimal set. The
1022 + * ptracer can change arbitrary register values, which is usually okay
1023 + * because the usual ptrace stops run off the signal delivery path which
1024 + * forces IRET; however, ptrace_event() stops happen in arbitrary places
1025 + * in the kernel and don't force IRET path.
1026 + *
1027 + * So force IRET path after a ptrace stop.
1028 + */
1029 +#define arch_ptrace_stop_needed(code, info) \
1030 +({ \
1031 + set_thread_flag(TIF_NOTIFY_RESUME); \
1032 + false; \
1033 +})
1034 +
1035 struct user_desc;
1036 extern int do_get_thread_area(struct task_struct *p, int idx,
1037 struct user_desc __user *info);
1038 diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
1039 index 59c5abe32f06..fb624469d0ee 100644
1040 --- a/drivers/block/mtip32xx/mtip32xx.c
1041 +++ b/drivers/block/mtip32xx/mtip32xx.c
1042 @@ -1529,6 +1529,37 @@ static inline void ata_swap_string(u16 *buf, unsigned int len)
1043 be16_to_cpus(&buf[i]);
1044 }
1045
1046 +static void mtip_set_timeout(struct driver_data *dd,
1047 + struct host_to_dev_fis *fis,
1048 + unsigned int *timeout, u8 erasemode)
1049 +{
1050 + switch (fis->command) {
1051 + case ATA_CMD_DOWNLOAD_MICRO:
1052 + *timeout = 120000; /* 2 minutes */
1053 + break;
1054 + case ATA_CMD_SEC_ERASE_UNIT:
1055 + case 0xFC:
1056 + if (erasemode)
1057 + *timeout = ((*(dd->port->identify + 90) * 2) * 60000);
1058 + else
1059 + *timeout = ((*(dd->port->identify + 89) * 2) * 60000);
1060 + break;
1061 + case ATA_CMD_STANDBYNOW1:
1062 + *timeout = 120000; /* 2 minutes */
1063 + break;
1064 + case 0xF7:
1065 + case 0xFA:
1066 + *timeout = 60000; /* 60 seconds */
1067 + break;
1068 + case ATA_CMD_SMART:
1069 + *timeout = 15000; /* 15 seconds */
1070 + break;
1071 + default:
1072 + *timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
1073 + break;
1074 + }
1075 +}
1076 +
1077 /*
1078 * Request the device identity information.
1079 *
1080 @@ -1644,6 +1675,7 @@ static int mtip_standby_immediate(struct mtip_port *port)
1081 int rv;
1082 struct host_to_dev_fis fis;
1083 unsigned long start;
1084 + unsigned int timeout;
1085
1086 /* Build the FIS. */
1087 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1088 @@ -1651,6 +1683,8 @@ static int mtip_standby_immediate(struct mtip_port *port)
1089 fis.opts = 1 << 7;
1090 fis.command = ATA_CMD_STANDBYNOW1;
1091
1092 + mtip_set_timeout(port->dd, &fis, &timeout, 0);
1093 +
1094 start = jiffies;
1095 rv = mtip_exec_internal_command(port,
1096 &fis,
1097 @@ -1659,7 +1693,7 @@ static int mtip_standby_immediate(struct mtip_port *port)
1098 0,
1099 0,
1100 GFP_ATOMIC,
1101 - 15000);
1102 + timeout);
1103 dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n",
1104 jiffies_to_msecs(jiffies - start));
1105 if (rv)
1106 @@ -2202,36 +2236,6 @@ static unsigned int implicit_sector(unsigned char command,
1107 }
1108 return rv;
1109 }
1110 -static void mtip_set_timeout(struct driver_data *dd,
1111 - struct host_to_dev_fis *fis,
1112 - unsigned int *timeout, u8 erasemode)
1113 -{
1114 - switch (fis->command) {
1115 - case ATA_CMD_DOWNLOAD_MICRO:
1116 - *timeout = 120000; /* 2 minutes */
1117 - break;
1118 - case ATA_CMD_SEC_ERASE_UNIT:
1119 - case 0xFC:
1120 - if (erasemode)
1121 - *timeout = ((*(dd->port->identify + 90) * 2) * 60000);
1122 - else
1123 - *timeout = ((*(dd->port->identify + 89) * 2) * 60000);
1124 - break;
1125 - case ATA_CMD_STANDBYNOW1:
1126 - *timeout = 120000; /* 2 minutes */
1127 - break;
1128 - case 0xF7:
1129 - case 0xFA:
1130 - *timeout = 60000; /* 60 seconds */
1131 - break;
1132 - case ATA_CMD_SMART:
1133 - *timeout = 15000; /* 15 seconds */
1134 - break;
1135 - default:
1136 - *timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
1137 - break;
1138 - }
1139 -}
1140
1141 /*
1142 * Executes a taskfile
1143 @@ -4479,6 +4483,57 @@ static DEFINE_HANDLER(5);
1144 static DEFINE_HANDLER(6);
1145 static DEFINE_HANDLER(7);
1146
1147 +static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev)
1148 +{
1149 + int pos;
1150 + unsigned short pcie_dev_ctrl;
1151 +
1152 + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1153 + if (pos) {
1154 + pci_read_config_word(pdev,
1155 + pos + PCI_EXP_DEVCTL,
1156 + &pcie_dev_ctrl);
1157 + if (pcie_dev_ctrl & (1 << 11) ||
1158 + pcie_dev_ctrl & (1 << 4)) {
1159 + dev_info(&dd->pdev->dev,
1160 + "Disabling ERO/No-Snoop on bridge device %04x:%04x\n",
1161 + pdev->vendor, pdev->device);
1162 + pcie_dev_ctrl &= ~(PCI_EXP_DEVCTL_NOSNOOP_EN |
1163 + PCI_EXP_DEVCTL_RELAX_EN);
1164 + pci_write_config_word(pdev,
1165 + pos + PCI_EXP_DEVCTL,
1166 + pcie_dev_ctrl);
1167 + }
1168 + }
1169 +}
1170 +
1171 +static void mtip_fix_ero_nosnoop(struct driver_data *dd, struct pci_dev *pdev)
1172 +{
1173 + /*
1174 + * This workaround is specific to AMD/ATI chipset with a PCI upstream
1175 + * device with device id 0x5aXX
1176 + */
1177 + if (pdev->bus && pdev->bus->self) {
1178 + if (pdev->bus->self->vendor == PCI_VENDOR_ID_ATI &&
1179 + ((pdev->bus->self->device & 0xff00) == 0x5a00)) {
1180 + mtip_disable_link_opts(dd, pdev->bus->self);
1181 + } else {
1182 + /* Check further up the topology */
1183 + struct pci_dev *parent_dev = pdev->bus->self;
1184 + if (parent_dev->bus &&
1185 + parent_dev->bus->parent &&
1186 + parent_dev->bus->parent->self &&
1187 + parent_dev->bus->parent->self->vendor ==
1188 + PCI_VENDOR_ID_ATI &&
1189 + (parent_dev->bus->parent->self->device &
1190 + 0xff00) == 0x5a00) {
1191 + mtip_disable_link_opts(dd,
1192 + parent_dev->bus->parent->self);
1193 + }
1194 + }
1195 + }
1196 +}
1197 +
1198 /*
1199 * Called for each supported PCI device detected.
1200 *
1201 @@ -4630,6 +4685,8 @@ static int mtip_pci_probe(struct pci_dev *pdev,
1202 goto msi_initialize_err;
1203 }
1204
1205 + mtip_fix_ero_nosnoop(dd, pdev);
1206 +
1207 /* Initialize the block layer. */
1208 rv = mtip_block_initialize(dd);
1209 if (rv < 0) {
1210 @@ -4935,13 +4992,13 @@ static int __init mtip_init(void)
1211 */
1212 static void __exit mtip_exit(void)
1213 {
1214 - debugfs_remove_recursive(dfs_parent);
1215 -
1216 /* Release the allocated major block device number. */
1217 unregister_blkdev(mtip_major, MTIP_DRV_NAME);
1218
1219 /* Unregister the PCI driver. */
1220 pci_unregister_driver(&mtip_pci_driver);
1221 +
1222 + debugfs_remove_recursive(dfs_parent);
1223 }
1224
1225 MODULE_AUTHOR("Micron Technology, Inc");
1226 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
1227 index abda6609d3e7..558224cf55bf 100644
1228 --- a/drivers/cpufreq/cpufreq.c
1229 +++ b/drivers/cpufreq/cpufreq.c
1230 @@ -2166,10 +2166,8 @@ int cpufreq_update_policy(unsigned int cpu)
1231 struct cpufreq_policy new_policy;
1232 int ret;
1233
1234 - if (!policy) {
1235 - ret = -ENODEV;
1236 - goto no_policy;
1237 - }
1238 + if (!policy)
1239 + return -ENODEV;
1240
1241 down_write(&policy->rwsem);
1242
1243 @@ -2188,7 +2186,7 @@ int cpufreq_update_policy(unsigned int cpu)
1244 new_policy.cur = cpufreq_driver->get(cpu);
1245 if (WARN_ON(!new_policy.cur)) {
1246 ret = -EIO;
1247 - goto no_policy;
1248 + goto unlock;
1249 }
1250
1251 if (!policy->cur) {
1252 @@ -2203,10 +2201,10 @@ int cpufreq_update_policy(unsigned int cpu)
1253
1254 ret = cpufreq_set_policy(policy, &new_policy);
1255
1256 +unlock:
1257 up_write(&policy->rwsem);
1258
1259 cpufreq_cpu_put(policy);
1260 -no_policy:
1261 return ret;
1262 }
1263 EXPORT_SYMBOL(cpufreq_update_policy);
1264 diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
1265 index 0af618abebaf..3607070797af 100644
1266 --- a/drivers/cpufreq/ppc-corenet-cpufreq.c
1267 +++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
1268 @@ -138,7 +138,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
1269 struct cpufreq_frequency_table *table;
1270 struct cpu_data *data;
1271 unsigned int cpu = policy->cpu;
1272 - u64 transition_latency_hz;
1273 + u64 u64temp;
1274
1275 np = of_get_cpu_node(cpu, NULL);
1276 if (!np)
1277 @@ -206,9 +206,10 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
1278 for_each_cpu(i, per_cpu(cpu_mask, cpu))
1279 per_cpu(cpu_data, i) = data;
1280
1281 - transition_latency_hz = 12ULL * NSEC_PER_SEC;
1282 - policy->cpuinfo.transition_latency =
1283 - do_div(transition_latency_hz, fsl_get_sys_freq());
1284 + /* Minimum transition latency is 12 platform clocks */
1285 + u64temp = 12ULL * NSEC_PER_SEC;
1286 + do_div(u64temp, fsl_get_sys_freq());
1287 + policy->cpuinfo.transition_latency = u64temp + 1;
1288
1289 of_node_put(np);
1290
1291 diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
1292 index f0d588f8859e..1acb99100556 100644
1293 --- a/drivers/infiniband/core/user_mad.c
1294 +++ b/drivers/infiniband/core/user_mad.c
1295 @@ -98,7 +98,7 @@ struct ib_umad_port {
1296
1297 struct ib_umad_device {
1298 int start_port, end_port;
1299 - struct kref ref;
1300 + struct kobject kobj;
1301 struct ib_umad_port port[0];
1302 };
1303
1304 @@ -134,14 +134,18 @@ static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
1305 static void ib_umad_add_one(struct ib_device *device);
1306 static void ib_umad_remove_one(struct ib_device *device);
1307
1308 -static void ib_umad_release_dev(struct kref *ref)
1309 +static void ib_umad_release_dev(struct kobject *kobj)
1310 {
1311 struct ib_umad_device *dev =
1312 - container_of(ref, struct ib_umad_device, ref);
1313 + container_of(kobj, struct ib_umad_device, kobj);
1314
1315 kfree(dev);
1316 }
1317
1318 +static struct kobj_type ib_umad_dev_ktype = {
1319 + .release = ib_umad_release_dev,
1320 +};
1321 +
1322 static int hdr_size(struct ib_umad_file *file)
1323 {
1324 return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) :
1325 @@ -780,27 +784,19 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
1326 {
1327 struct ib_umad_port *port;
1328 struct ib_umad_file *file;
1329 - int ret;
1330 + int ret = -ENXIO;
1331
1332 port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
1333 - if (port)
1334 - kref_get(&port->umad_dev->ref);
1335 - else
1336 - return -ENXIO;
1337
1338 mutex_lock(&port->file_mutex);
1339
1340 - if (!port->ib_dev) {
1341 - ret = -ENXIO;
1342 + if (!port->ib_dev)
1343 goto out;
1344 - }
1345
1346 + ret = -ENOMEM;
1347 file = kzalloc(sizeof *file, GFP_KERNEL);
1348 - if (!file) {
1349 - kref_put(&port->umad_dev->ref, ib_umad_release_dev);
1350 - ret = -ENOMEM;
1351 + if (!file)
1352 goto out;
1353 - }
1354
1355 mutex_init(&file->mutex);
1356 spin_lock_init(&file->send_lock);
1357 @@ -814,6 +810,13 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
1358 list_add_tail(&file->port_list, &port->file_list);
1359
1360 ret = nonseekable_open(inode, filp);
1361 + if (ret) {
1362 + list_del(&file->port_list);
1363 + kfree(file);
1364 + goto out;
1365 + }
1366 +
1367 + kobject_get(&port->umad_dev->kobj);
1368
1369 out:
1370 mutex_unlock(&port->file_mutex);
1371 @@ -852,7 +855,7 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
1372 mutex_unlock(&file->port->file_mutex);
1373
1374 kfree(file);
1375 - kref_put(&dev->ref, ib_umad_release_dev);
1376 + kobject_put(&dev->kobj);
1377
1378 return 0;
1379 }
1380 @@ -880,10 +883,6 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
1381 int ret;
1382
1383 port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev);
1384 - if (port)
1385 - kref_get(&port->umad_dev->ref);
1386 - else
1387 - return -ENXIO;
1388
1389 if (filp->f_flags & O_NONBLOCK) {
1390 if (down_trylock(&port->sm_sem)) {
1391 @@ -898,17 +897,27 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
1392 }
1393
1394 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
1395 - if (ret) {
1396 - up(&port->sm_sem);
1397 - goto fail;
1398 - }
1399 + if (ret)
1400 + goto err_up_sem;
1401
1402 filp->private_data = port;
1403
1404 - return nonseekable_open(inode, filp);
1405 + ret = nonseekable_open(inode, filp);
1406 + if (ret)
1407 + goto err_clr_sm_cap;
1408 +
1409 + kobject_get(&port->umad_dev->kobj);
1410 +
1411 + return 0;
1412 +
1413 +err_clr_sm_cap:
1414 + swap(props.set_port_cap_mask, props.clr_port_cap_mask);
1415 + ib_modify_port(port->ib_dev, port->port_num, 0, &props);
1416 +
1417 +err_up_sem:
1418 + up(&port->sm_sem);
1419
1420 fail:
1421 - kref_put(&port->umad_dev->ref, ib_umad_release_dev);
1422 return ret;
1423 }
1424
1425 @@ -927,7 +936,7 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
1426
1427 up(&port->sm_sem);
1428
1429 - kref_put(&port->umad_dev->ref, ib_umad_release_dev);
1430 + kobject_put(&port->umad_dev->kobj);
1431
1432 return ret;
1433 }
1434 @@ -995,6 +1004,7 @@ static int find_overflow_devnum(void)
1435 }
1436
1437 static int ib_umad_init_port(struct ib_device *device, int port_num,
1438 + struct ib_umad_device *umad_dev,
1439 struct ib_umad_port *port)
1440 {
1441 int devnum;
1442 @@ -1027,6 +1037,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
1443
1444 cdev_init(&port->cdev, &umad_fops);
1445 port->cdev.owner = THIS_MODULE;
1446 + port->cdev.kobj.parent = &umad_dev->kobj;
1447 kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num);
1448 if (cdev_add(&port->cdev, base, 1))
1449 goto err_cdev;
1450 @@ -1045,6 +1056,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
1451 base += IB_UMAD_MAX_PORTS;
1452 cdev_init(&port->sm_cdev, &umad_sm_fops);
1453 port->sm_cdev.owner = THIS_MODULE;
1454 + port->sm_cdev.kobj.parent = &umad_dev->kobj;
1455 kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num);
1456 if (cdev_add(&port->sm_cdev, base, 1))
1457 goto err_sm_cdev;
1458 @@ -1138,7 +1150,7 @@ static void ib_umad_add_one(struct ib_device *device)
1459 if (!umad_dev)
1460 return;
1461
1462 - kref_init(&umad_dev->ref);
1463 + kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype);
1464
1465 umad_dev->start_port = s;
1466 umad_dev->end_port = e;
1467 @@ -1146,7 +1158,8 @@ static void ib_umad_add_one(struct ib_device *device)
1468 for (i = s; i <= e; ++i) {
1469 umad_dev->port[i - s].umad_dev = umad_dev;
1470
1471 - if (ib_umad_init_port(device, i, &umad_dev->port[i - s]))
1472 + if (ib_umad_init_port(device, i, umad_dev,
1473 + &umad_dev->port[i - s]))
1474 goto err;
1475 }
1476
1477 @@ -1158,7 +1171,7 @@ err:
1478 while (--i >= s)
1479 ib_umad_kill_port(&umad_dev->port[i - s]);
1480
1481 - kref_put(&umad_dev->ref, ib_umad_release_dev);
1482 + kobject_put(&umad_dev->kobj);
1483 }
1484
1485 static void ib_umad_remove_one(struct ib_device *device)
1486 @@ -1172,7 +1185,7 @@ static void ib_umad_remove_one(struct ib_device *device)
1487 for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i)
1488 ib_umad_kill_port(&umad_dev->port[i]);
1489
1490 - kref_put(&umad_dev->ref, ib_umad_release_dev);
1491 + kobject_put(&umad_dev->kobj);
1492 }
1493
1494 static char *umad_devnode(struct device *dev, umode_t *mode)
1495 diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
1496 index cfaa56ada189..7151a02b4ebb 100644
1497 --- a/drivers/infiniband/hw/cxgb4/cq.c
1498 +++ b/drivers/infiniband/hw/cxgb4/cq.c
1499 @@ -940,7 +940,6 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
1500 if (!mm2)
1501 goto err4;
1502
1503 - memset(&uresp, 0, sizeof(uresp));
1504 uresp.qid_mask = rhp->rdev.cqmask;
1505 uresp.cqid = chp->cq.cqid;
1506 uresp.size = chp->cq.size;
1507 @@ -951,7 +950,8 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
1508 uresp.gts_key = ucontext->key;
1509 ucontext->key += PAGE_SIZE;
1510 spin_unlock(&ucontext->mmap_lock);
1511 - ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1512 + ret = ib_copy_to_udata(udata, &uresp,
1513 + sizeof(uresp) - sizeof(uresp.reserved));
1514 if (ret)
1515 goto err5;
1516
1517 diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
1518 index f4fa50a609e2..8914ea90ddd9 100644
1519 --- a/drivers/infiniband/hw/cxgb4/device.c
1520 +++ b/drivers/infiniband/hw/cxgb4/device.c
1521 @@ -736,6 +736,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
1522 pci_resource_len(devp->rdev.lldi.pdev, 2));
1523 if (!devp->rdev.bar2_kva) {
1524 pr_err(MOD "Unable to ioremap BAR2\n");
1525 + ib_dealloc_device(&devp->ibdev);
1526 return ERR_PTR(-EINVAL);
1527 }
1528 } else if (ocqp_supported(infop)) {
1529 @@ -747,6 +748,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
1530 devp->rdev.lldi.vr->ocq.size);
1531 if (!devp->rdev.oc_mw_kva) {
1532 pr_err(MOD "Unable to ioremap onchip mem\n");
1533 + ib_dealloc_device(&devp->ibdev);
1534 return ERR_PTR(-EINVAL);
1535 }
1536 }
1537 diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
1538 index a94a3e12c349..c777e22bd8d5 100644
1539 --- a/drivers/infiniband/hw/cxgb4/provider.c
1540 +++ b/drivers/infiniband/hw/cxgb4/provider.c
1541 @@ -122,7 +122,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
1542 INIT_LIST_HEAD(&context->mmaps);
1543 spin_lock_init(&context->mmap_lock);
1544
1545 - if (udata->outlen < sizeof(uresp)) {
1546 + if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
1547 if (!warned++)
1548 pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled.");
1549 rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
1550 @@ -140,7 +140,8 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
1551 context->key += PAGE_SIZE;
1552 spin_unlock(&context->mmap_lock);
1553
1554 - ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1555 + ret = ib_copy_to_udata(udata, &uresp,
1556 + sizeof(uresp) - sizeof(uresp.reserved));
1557 if (ret)
1558 goto err_mm;
1559
1560 diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/cxgb4/user.h
1561 index 11ccd276e5d9..cbd0ce170728 100644
1562 --- a/drivers/infiniband/hw/cxgb4/user.h
1563 +++ b/drivers/infiniband/hw/cxgb4/user.h
1564 @@ -48,6 +48,7 @@ struct c4iw_create_cq_resp {
1565 __u32 cqid;
1566 __u32 size;
1567 __u32 qid_mask;
1568 + __u32 reserved; /* explicit padding (optional for i386) */
1569 };
1570
1571
1572 @@ -74,5 +75,6 @@ struct c4iw_create_qp_resp {
1573 struct c4iw_alloc_ucontext_resp {
1574 __u64 status_page_key;
1575 __u32 status_page_size;
1576 + __u32 reserved; /* explicit padding (optional for i386) */
1577 };
1578 #endif
1579 diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
1580 index e2f9a51f4a38..45802e97332e 100644
1581 --- a/drivers/infiniband/hw/ipath/ipath_diag.c
1582 +++ b/drivers/infiniband/hw/ipath/ipath_diag.c
1583 @@ -346,6 +346,10 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
1584 ret = -EFAULT;
1585 goto bail;
1586 }
1587 + dp.len = odp.len;
1588 + dp.unit = odp.unit;
1589 + dp.data = odp.data;
1590 + dp.pbc_wd = 0;
1591 } else {
1592 ret = -EINVAL;
1593 goto bail;
1594 diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
1595 index 62bb6b49dc1d..8ae4f896cb41 100644
1596 --- a/drivers/infiniband/hw/mlx5/cq.c
1597 +++ b/drivers/infiniband/hw/mlx5/cq.c
1598 @@ -32,6 +32,7 @@
1599
1600 #include <linux/kref.h>
1601 #include <rdma/ib_umem.h>
1602 +#include <rdma/ib_user_verbs.h>
1603 #include "mlx5_ib.h"
1604 #include "user.h"
1605
1606 @@ -602,14 +603,24 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
1607 int *cqe_size, int *index, int *inlen)
1608 {
1609 struct mlx5_ib_create_cq ucmd;
1610 + size_t ucmdlen;
1611 int page_shift;
1612 int npages;
1613 int ncont;
1614 int err;
1615
1616 - if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
1617 + ucmdlen =
1618 + (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) <
1619 + sizeof(ucmd)) ? (sizeof(ucmd) -
1620 + sizeof(ucmd.reserved)) : sizeof(ucmd);
1621 +
1622 + if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
1623 return -EFAULT;
1624
1625 + if (ucmdlen == sizeof(ucmd) &&
1626 + ucmd.reserved != 0)
1627 + return -EINVAL;
1628 +
1629 if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
1630 return -EINVAL;
1631
1632 diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
1633 index 210b3eaf188a..384af6dec5eb 100644
1634 --- a/drivers/infiniband/hw/mlx5/srq.c
1635 +++ b/drivers/infiniband/hw/mlx5/srq.c
1636 @@ -35,6 +35,7 @@
1637 #include <linux/mlx5/srq.h>
1638 #include <linux/slab.h>
1639 #include <rdma/ib_umem.h>
1640 +#include <rdma/ib_user_verbs.h>
1641
1642 #include "mlx5_ib.h"
1643 #include "user.h"
1644 @@ -78,16 +79,27 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
1645 {
1646 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1647 struct mlx5_ib_create_srq ucmd;
1648 + size_t ucmdlen;
1649 int err;
1650 int npages;
1651 int page_shift;
1652 int ncont;
1653 u32 offset;
1654
1655 - if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
1656 + ucmdlen =
1657 + (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) <
1658 + sizeof(ucmd)) ? (sizeof(ucmd) -
1659 + sizeof(ucmd.reserved)) : sizeof(ucmd);
1660 +
1661 + if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
1662 mlx5_ib_dbg(dev, "failed copy udata\n");
1663 return -EFAULT;
1664 }
1665 +
1666 + if (ucmdlen == sizeof(ucmd) &&
1667 + ucmd.reserved != 0)
1668 + return -EINVAL;
1669 +
1670 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
1671
1672 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
1673 diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
1674 index 0f4f8e42a17f..d0ba264ac1ed 100644
1675 --- a/drivers/infiniband/hw/mlx5/user.h
1676 +++ b/drivers/infiniband/hw/mlx5/user.h
1677 @@ -91,6 +91,7 @@ struct mlx5_ib_create_cq {
1678 __u64 buf_addr;
1679 __u64 db_addr;
1680 __u32 cqe_size;
1681 + __u32 reserved; /* explicit padding (optional on i386) */
1682 };
1683
1684 struct mlx5_ib_create_cq_resp {
1685 @@ -109,6 +110,7 @@ struct mlx5_ib_create_srq {
1686 __u64 buf_addr;
1687 __u64 db_addr;
1688 __u32 flags;
1689 + __u32 reserved; /* explicit padding (optional on i386) */
1690 };
1691
1692 struct mlx5_ib_create_srq_resp {
1693 diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
1694 index edad991d60ed..22c720e5740d 100644
1695 --- a/drivers/infiniband/hw/qib/qib_mad.c
1696 +++ b/drivers/infiniband/hw/qib/qib_mad.c
1697 @@ -1028,7 +1028,7 @@ static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
1698
1699 event.event = IB_EVENT_PKEY_CHANGE;
1700 event.device = &dd->verbs_dev.ibdev;
1701 - event.element.port_num = 1;
1702 + event.element.port_num = port;
1703 ib_dispatch_event(&event);
1704 }
1705 return 0;
1706 diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
1707 index 66a908bf3fb9..5b2bed8fc493 100644
1708 --- a/drivers/infiniband/ulp/srp/ib_srp.c
1709 +++ b/drivers/infiniband/ulp/srp/ib_srp.c
1710 @@ -1594,6 +1594,12 @@ err_unmap:
1711 err_iu:
1712 srp_put_tx_iu(target, iu, SRP_IU_CMD);
1713
1714 + /*
1715 + * Avoid that the loops that iterate over the request ring can
1716 + * encounter a dangling SCSI command pointer.
1717 + */
1718 + req->scmnd = NULL;
1719 +
1720 spin_lock_irqsave(&target->lock, flags);
1721 list_add(&req->list, &target->free_reqs);
1722
1723 diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
1724 index b96e978a37b7..ee2a04d90d20 100644
1725 --- a/drivers/input/mouse/elantech.c
1726 +++ b/drivers/input/mouse/elantech.c
1727 @@ -473,8 +473,15 @@ static void elantech_report_absolute_v3(struct psmouse *psmouse,
1728 input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
1729 input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2);
1730 input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3);
1731 - input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
1732 - input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
1733 +
1734 + /* For clickpads map both buttons to BTN_LEFT */
1735 + if (etd->fw_version & 0x001000) {
1736 + input_report_key(dev, BTN_LEFT, packet[0] & 0x03);
1737 + } else {
1738 + input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
1739 + input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
1740 + }
1741 +
1742 input_report_abs(dev, ABS_PRESSURE, pres);
1743 input_report_abs(dev, ABS_TOOL_WIDTH, width);
1744
1745 @@ -484,10 +491,17 @@ static void elantech_report_absolute_v3(struct psmouse *psmouse,
1746 static void elantech_input_sync_v4(struct psmouse *psmouse)
1747 {
1748 struct input_dev *dev = psmouse->dev;
1749 + struct elantech_data *etd = psmouse->private;
1750 unsigned char *packet = psmouse->packet;
1751
1752 - input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
1753 - input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
1754 + /* For clickpads map both buttons to BTN_LEFT */
1755 + if (etd->fw_version & 0x001000) {
1756 + input_report_key(dev, BTN_LEFT, packet[0] & 0x03);
1757 + } else {
1758 + input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
1759 + input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
1760 + }
1761 +
1762 input_mt_report_pointer_emulation(dev, true);
1763 input_sync(dev);
1764 }
1765 @@ -835,7 +849,7 @@ static int elantech_set_absolute_mode(struct psmouse *psmouse)
1766 if (etd->set_hw_resolution)
1767 etd->reg_10 = 0x0b;
1768 else
1769 - etd->reg_10 = 0x03;
1770 + etd->reg_10 = 0x01;
1771
1772 if (elantech_write_reg(psmouse, 0x10, etd->reg_10))
1773 rc = -1;
1774 @@ -1336,7 +1350,8 @@ static int elantech_reconnect(struct psmouse *psmouse)
1775 }
1776
1777 /*
1778 - * Some hw_version 3 models go into error state when we try to set bit 3 of r10
1779 + * Some hw_version 3 models go into error state when we try to set
1780 + * bit 3 and/or bit 1 of r10.
1781 */
1782 static const struct dmi_system_id no_hw_res_dmi_table[] = {
1783 #if defined(CONFIG_DMI) && defined(CONFIG_X86)
1784 diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
1785 index c5ec703c727e..ec772d962f06 100644
1786 --- a/drivers/input/mouse/synaptics.c
1787 +++ b/drivers/input/mouse/synaptics.c
1788 @@ -347,15 +347,6 @@ static int synaptics_resolution(struct psmouse *psmouse)
1789 unsigned char resp[3];
1790 int i;
1791
1792 - for (i = 0; min_max_pnpid_table[i].pnp_ids; i++)
1793 - if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) {
1794 - priv->x_min = min_max_pnpid_table[i].x_min;
1795 - priv->x_max = min_max_pnpid_table[i].x_max;
1796 - priv->y_min = min_max_pnpid_table[i].y_min;
1797 - priv->y_max = min_max_pnpid_table[i].y_max;
1798 - return 0;
1799 - }
1800 -
1801 if (SYN_ID_MAJOR(priv->identity) < 4)
1802 return 0;
1803
1804 @@ -366,6 +357,16 @@ static int synaptics_resolution(struct psmouse *psmouse)
1805 }
1806 }
1807
1808 + for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) {
1809 + if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) {
1810 + priv->x_min = min_max_pnpid_table[i].x_min;
1811 + priv->x_max = min_max_pnpid_table[i].x_max;
1812 + priv->y_min = min_max_pnpid_table[i].y_min;
1813 + priv->y_max = min_max_pnpid_table[i].y_max;
1814 + return 0;
1815 + }
1816 + }
1817 +
1818 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 &&
1819 SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) {
1820 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) {
1821 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
1822 index 759475ef6ff3..83b01fa02400 100644
1823 --- a/drivers/pci/pci.c
1824 +++ b/drivers/pci/pci.c
1825 @@ -4126,7 +4126,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
1826 u16 cmd;
1827 int rc;
1828
1829 - WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
1830 + WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
1831
1832 /* ARCH specific VGA enables */
1833 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
1834 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
1835 index e7292065a1b1..0feb4a32a941 100644
1836 --- a/drivers/pci/quirks.c
1837 +++ b/drivers/pci/quirks.c
1838 @@ -2954,6 +2954,7 @@ static void disable_igfx_irq(struct pci_dev *dev)
1839 }
1840 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
1841 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
1842 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
1843
1844 /*
1845 * PCI devices which are on Intel chips can skip the 10ms delay
1846 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
1847 index 9a6e4a2cd072..fda6cf19fafe 100644
1848 --- a/drivers/scsi/hpsa.c
1849 +++ b/drivers/scsi/hpsa.c
1850 @@ -115,9 +115,15 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
1851 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
1852 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
1853 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
1854 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
1855 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
1856 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
1857 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
1858 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
1859 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
1860 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
1861 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
1862 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
1863 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
1864 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
1865 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
1866 @@ -165,9 +171,15 @@ static struct board_type products[] = {
1867 {0x21C3103C, "Smart Array", &SA5_access},
1868 {0x21C4103C, "Smart Array", &SA5_access},
1869 {0x21C5103C, "Smart Array", &SA5_access},
1870 + {0x21C6103C, "Smart Array", &SA5_access},
1871 {0x21C7103C, "Smart Array", &SA5_access},
1872 {0x21C8103C, "Smart Array", &SA5_access},
1873 {0x21C9103C, "Smart Array", &SA5_access},
1874 + {0x21CA103C, "Smart Array", &SA5_access},
1875 + {0x21CB103C, "Smart Array", &SA5_access},
1876 + {0x21CC103C, "Smart Array", &SA5_access},
1877 + {0x21CD103C, "Smart Array", &SA5_access},
1878 + {0x21CE103C, "Smart Array", &SA5_access},
1879 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
1880 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
1881 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
1882 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
1883 index ca2bc348ef5b..e71e1840ac02 100644
1884 --- a/drivers/target/iscsi/iscsi_target.c
1885 +++ b/drivers/target/iscsi/iscsi_target.c
1886 @@ -1309,7 +1309,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
1887 if (cmd->data_direction != DMA_TO_DEVICE) {
1888 pr_err("Command ITT: 0x%08x received DataOUT for a"
1889 " NON-WRITE command.\n", cmd->init_task_tag);
1890 - return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
1891 + return iscsit_dump_data_payload(conn, payload_length, 1);
1892 }
1893 se_cmd = &cmd->se_cmd;
1894 iscsit_mod_dataout_timer(cmd);
1895 diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
1896 index d9b1d88e1ad3..621b56fcb877 100644
1897 --- a/drivers/target/iscsi/iscsi_target_login.c
1898 +++ b/drivers/target/iscsi/iscsi_target_login.c
1899 @@ -1216,7 +1216,7 @@ old_sess_out:
1900 static int __iscsi_target_login_thread(struct iscsi_np *np)
1901 {
1902 u8 *buffer, zero_tsih = 0;
1903 - int ret = 0, rc, stop;
1904 + int ret = 0, rc;
1905 struct iscsi_conn *conn = NULL;
1906 struct iscsi_login *login;
1907 struct iscsi_portal_group *tpg = NULL;
1908 @@ -1230,6 +1230,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1909 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
1910 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
1911 complete(&np->np_restart_comp);
1912 + } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
1913 + spin_unlock_bh(&np->np_thread_lock);
1914 + goto exit;
1915 } else {
1916 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
1917 }
1918 @@ -1422,10 +1425,8 @@ old_sess_out:
1919 }
1920
1921 out:
1922 - stop = kthread_should_stop();
1923 - /* Wait for another socket.. */
1924 - if (!stop)
1925 - return 1;
1926 + return 1;
1927 +
1928 exit:
1929 iscsi_stop_login_thread_timer(np);
1930 spin_lock_bh(&np->np_thread_lock);
1931 @@ -1442,7 +1443,7 @@ int iscsi_target_login_thread(void *arg)
1932
1933 allow_signal(SIGINT);
1934
1935 - while (!kthread_should_stop()) {
1936 + while (1) {
1937 ret = __iscsi_target_login_thread(np);
1938 /*
1939 * We break and exit here unless another sock_accept() call
1940 diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
1941 index 53e157cb8c54..fd90b28f1d94 100644
1942 --- a/drivers/target/iscsi/iscsi_target_util.c
1943 +++ b/drivers/target/iscsi/iscsi_target_util.c
1944 @@ -1295,6 +1295,8 @@ int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_deta
1945 login->login_failed = 1;
1946 iscsit_collect_login_stats(conn, status_class, status_detail);
1947
1948 + memset(&login->rsp[0], 0, ISCSI_HDR_LEN);
1949 +
1950 hdr = (struct iscsi_login_rsp *)&login->rsp[0];
1951 hdr->opcode = ISCSI_OP_LOGIN_RSP;
1952 hdr->status_class = status_class;
1953 diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
1954 index 26416c15d65c..6ea95d216eb8 100644
1955 --- a/drivers/target/target_core_device.c
1956 +++ b/drivers/target/target_core_device.c
1957 @@ -616,6 +616,7 @@ void core_dev_unexport(
1958 dev->export_count--;
1959 spin_unlock(&hba->device_lock);
1960
1961 + lun->lun_sep = NULL;
1962 lun->lun_se_dev = NULL;
1963 }
1964
1965 diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
1966 index 399c3fddecf6..0e67d96b3ebd 100644
1967 --- a/drivers/watchdog/ath79_wdt.c
1968 +++ b/drivers/watchdog/ath79_wdt.c
1969 @@ -20,6 +20,7 @@
1970 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1971
1972 #include <linux/bitops.h>
1973 +#include <linux/delay.h>
1974 #include <linux/errno.h>
1975 #include <linux/fs.h>
1976 #include <linux/io.h>
1977 @@ -90,6 +91,15 @@ static inline void ath79_wdt_keepalive(void)
1978 static inline void ath79_wdt_enable(void)
1979 {
1980 ath79_wdt_keepalive();
1981 +
1982 + /*
1983 + * Updating the TIMER register requires a few microseconds
1984 + * on the AR934x SoCs at least. Use a small delay to ensure
1985 + * that the TIMER register is updated within the hardware
1986 + * before enabling the watchdog.
1987 + */
1988 + udelay(2);
1989 +
1990 ath79_wdt_wr(WDOG_REG_CTRL, WDOG_CTRL_ACTION_FCR);
1991 /* flush write */
1992 ath79_wdt_rr(WDOG_REG_CTRL);
1993 diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
1994 index 20dc73844737..d9c1a1601926 100644
1995 --- a/drivers/watchdog/kempld_wdt.c
1996 +++ b/drivers/watchdog/kempld_wdt.c
1997 @@ -162,7 +162,7 @@ static int kempld_wdt_set_stage_timeout(struct kempld_wdt_data *wdt_data,
1998 kempld_get_mutex(pld);
1999 stage_cfg = kempld_read8(pld, KEMPLD_WDT_STAGE_CFG(stage->id));
2000 stage_cfg &= ~STAGE_CFG_PRESCALER_MASK;
2001 - stage_cfg |= STAGE_CFG_SET_PRESCALER(prescaler);
2002 + stage_cfg |= STAGE_CFG_SET_PRESCALER(PRESCALER_21);
2003 kempld_write8(pld, KEMPLD_WDT_STAGE_CFG(stage->id), stage_cfg);
2004 kempld_write32(pld, KEMPLD_WDT_STAGE_TIMEOUT(stage->id),
2005 stage_timeout);
2006 diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
2007 index 47629d268e0a..c1b03f4235b9 100644
2008 --- a/drivers/watchdog/sp805_wdt.c
2009 +++ b/drivers/watchdog/sp805_wdt.c
2010 @@ -59,7 +59,6 @@
2011 * @adev: amba device structure of wdt
2012 * @status: current status of wdt
2013 * @load_val: load value to be set for current timeout
2014 - * @timeout: current programmed timeout
2015 */
2016 struct sp805_wdt {
2017 struct watchdog_device wdd;
2018 @@ -68,7 +67,6 @@ struct sp805_wdt {
2019 struct clk *clk;
2020 struct amba_device *adev;
2021 unsigned int load_val;
2022 - unsigned int timeout;
2023 };
2024
2025 static bool nowayout = WATCHDOG_NOWAYOUT;
2026 @@ -98,7 +96,7 @@ static int wdt_setload(struct watchdog_device *wdd, unsigned int timeout)
2027 spin_lock(&wdt->lock);
2028 wdt->load_val = load;
2029 /* roundup timeout to closest positive integer value */
2030 - wdt->timeout = div_u64((load + 1) * 2 + (rate / 2), rate);
2031 + wdd->timeout = div_u64((load + 1) * 2 + (rate / 2), rate);
2032 spin_unlock(&wdt->lock);
2033
2034 return 0;
2035 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
2036 index 0c438973f3c8..c79f3e767c8c 100644
2037 --- a/fs/nfs/inode.c
2038 +++ b/fs/nfs/inode.c
2039 @@ -1575,18 +1575,20 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2040 inode->i_version = fattr->change_attr;
2041 }
2042 } else if (server->caps & NFS_CAP_CHANGE_ATTR)
2043 - invalid |= save_cache_validity;
2044 + nfsi->cache_validity |= save_cache_validity;
2045
2046 if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
2047 memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
2048 } else if (server->caps & NFS_CAP_MTIME)
2049 - invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
2050 + nfsi->cache_validity |= save_cache_validity &
2051 + (NFS_INO_INVALID_ATTR
2052 | NFS_INO_REVAL_FORCED);
2053
2054 if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
2055 memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
2056 } else if (server->caps & NFS_CAP_CTIME)
2057 - invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
2058 + nfsi->cache_validity |= save_cache_validity &
2059 + (NFS_INO_INVALID_ATTR
2060 | NFS_INO_REVAL_FORCED);
2061
2062 /* Check if our cached file size is stale */
2063 @@ -1608,7 +1610,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2064 (long long)new_isize);
2065 }
2066 } else
2067 - invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
2068 + nfsi->cache_validity |= save_cache_validity &
2069 + (NFS_INO_INVALID_ATTR
2070 | NFS_INO_REVAL_PAGECACHE
2071 | NFS_INO_REVAL_FORCED);
2072
2073 @@ -1616,7 +1619,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2074 if (fattr->valid & NFS_ATTR_FATTR_ATIME)
2075 memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
2076 else if (server->caps & NFS_CAP_ATIME)
2077 - invalid |= save_cache_validity & (NFS_INO_INVALID_ATIME
2078 + nfsi->cache_validity |= save_cache_validity &
2079 + (NFS_INO_INVALID_ATIME
2080 | NFS_INO_REVAL_FORCED);
2081
2082 if (fattr->valid & NFS_ATTR_FATTR_MODE) {
2083 @@ -1627,7 +1631,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2084 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
2085 }
2086 } else if (server->caps & NFS_CAP_MODE)
2087 - invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
2088 + nfsi->cache_validity |= save_cache_validity &
2089 + (NFS_INO_INVALID_ATTR
2090 | NFS_INO_INVALID_ACCESS
2091 | NFS_INO_INVALID_ACL
2092 | NFS_INO_REVAL_FORCED);
2093 @@ -1638,7 +1643,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2094 inode->i_uid = fattr->uid;
2095 }
2096 } else if (server->caps & NFS_CAP_OWNER)
2097 - invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
2098 + nfsi->cache_validity |= save_cache_validity &
2099 + (NFS_INO_INVALID_ATTR
2100 | NFS_INO_INVALID_ACCESS
2101 | NFS_INO_INVALID_ACL
2102 | NFS_INO_REVAL_FORCED);
2103 @@ -1649,7 +1655,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2104 inode->i_gid = fattr->gid;
2105 }
2106 } else if (server->caps & NFS_CAP_OWNER_GROUP)
2107 - invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
2108 + nfsi->cache_validity |= save_cache_validity &
2109 + (NFS_INO_INVALID_ATTR
2110 | NFS_INO_INVALID_ACCESS
2111 | NFS_INO_INVALID_ACL
2112 | NFS_INO_REVAL_FORCED);
2113 @@ -1662,7 +1669,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2114 set_nlink(inode, fattr->nlink);
2115 }
2116 } else if (server->caps & NFS_CAP_NLINK)
2117 - invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
2118 + nfsi->cache_validity |= save_cache_validity &
2119 + (NFS_INO_INVALID_ATTR
2120 | NFS_INO_REVAL_FORCED);
2121
2122 if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
2123 diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
2124 index b9a35c05b60f..5e992fc51e61 100644
2125 --- a/fs/nfs/nfs4filelayout.c
2126 +++ b/fs/nfs/nfs4filelayout.c
2127 @@ -1330,7 +1330,7 @@ filelayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
2128 struct nfs4_filelayout *flo;
2129
2130 flo = kzalloc(sizeof(*flo), gfp_flags);
2131 - return &flo->generic_hdr;
2132 + return flo != NULL ? &flo->generic_hdr : NULL;
2133 }
2134
2135 static void
2136 diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
2137 index 2349518eef2c..21275148fc13 100644
2138 --- a/fs/nfs/nfs4state.c
2139 +++ b/fs/nfs/nfs4state.c
2140 @@ -1456,7 +1456,7 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
2141 * server that doesn't support a grace period.
2142 */
2143 spin_lock(&sp->so_lock);
2144 - write_seqcount_begin(&sp->so_reclaim_seqcount);
2145 + raw_write_seqcount_begin(&sp->so_reclaim_seqcount);
2146 restart:
2147 list_for_each_entry(state, &sp->so_states, open_states) {
2148 if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
2149 @@ -1519,13 +1519,13 @@ restart:
2150 spin_lock(&sp->so_lock);
2151 goto restart;
2152 }
2153 - write_seqcount_end(&sp->so_reclaim_seqcount);
2154 + raw_write_seqcount_end(&sp->so_reclaim_seqcount);
2155 spin_unlock(&sp->so_lock);
2156 return 0;
2157 out_err:
2158 nfs4_put_open_state(state);
2159 spin_lock(&sp->so_lock);
2160 - write_seqcount_end(&sp->so_reclaim_seqcount);
2161 + raw_write_seqcount_end(&sp->so_reclaim_seqcount);
2162 spin_unlock(&sp->so_lock);
2163 return status;
2164 }
2165 diff --git a/fs/nfs/super.c b/fs/nfs/super.c
2166 index 2cb56943e232..104ef01d694d 100644
2167 --- a/fs/nfs/super.c
2168 +++ b/fs/nfs/super.c
2169 @@ -2248,6 +2248,7 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
2170 data->nfs_server.addrlen = nfss->nfs_client->cl_addrlen;
2171 data->version = nfsvers;
2172 data->minorversion = nfss->nfs_client->cl_minorversion;
2173 + data->net = current->nsproxy->net_ns;
2174 memcpy(&data->nfs_server.address, &nfss->nfs_client->cl_addr,
2175 data->nfs_server.addrlen);
2176
2177 diff --git a/fs/nfs/write.c b/fs/nfs/write.c
2178 index 9a3b6a4cd6b9..aaa16b31e21e 100644
2179 --- a/fs/nfs/write.c
2180 +++ b/fs/nfs/write.c
2181 @@ -913,12 +913,14 @@ static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
2182
2183 if (nfs_have_delegated_attributes(inode))
2184 goto out;
2185 - if (nfsi->cache_validity & (NFS_INO_INVALID_DATA|NFS_INO_REVAL_PAGECACHE))
2186 + if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
2187 return false;
2188 smp_rmb();
2189 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
2190 return false;
2191 out:
2192 + if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
2193 + return false;
2194 return PageUptodate(page) != 0;
2195 }
2196
2197 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
2198 index 9a77a5a21557..6134ee283798 100644
2199 --- a/fs/nfsd/nfs4state.c
2200 +++ b/fs/nfsd/nfs4state.c
2201 @@ -3726,7 +3726,7 @@ nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
2202 * correspondance, and we have to delete the lockowner when we
2203 * delete the lock stateid:
2204 */
2205 - unhash_lockowner(lo);
2206 + release_lockowner(lo);
2207 return nfs_ok;
2208 }
2209
2210 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
2211 index 18881f34737a..b4c49588eada 100644
2212 --- a/fs/nfsd/nfs4xdr.c
2213 +++ b/fs/nfsd/nfs4xdr.c
2214 @@ -2095,8 +2095,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
2215 err = vfs_getattr(&path, &stat);
2216 if (err)
2217 goto out_nfserr;
2218 - if ((bmval0 & (FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL |
2219 - FATTR4_WORD0_MAXNAME)) ||
2220 + if ((bmval0 & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
2221 + FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_MAXNAME)) ||
2222 (bmval1 & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
2223 FATTR4_WORD1_SPACE_TOTAL))) {
2224 err = vfs_statfs(&path, &statfs);
2225 diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
2226 index bc8b8009897d..6e48eb0ff61d 100644
2227 --- a/fs/reiserfs/inode.c
2228 +++ b/fs/reiserfs/inode.c
2229 @@ -3220,8 +3220,14 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
2230 attr->ia_size != i_size_read(inode)) {
2231 error = inode_newsize_ok(inode, attr->ia_size);
2232 if (!error) {
2233 + /*
2234 + * Could race against reiserfs_file_release
2235 + * if called from NFS, so take tailpack mutex.
2236 + */
2237 + mutex_lock(&REISERFS_I(inode)->tailpack);
2238 truncate_setsize(inode, attr->ia_size);
2239 - reiserfs_vfs_truncate_file(inode);
2240 + reiserfs_truncate_file(inode, 1);
2241 + mutex_unlock(&REISERFS_I(inode)->tailpack);
2242 }
2243 }
2244
2245 diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
2246 index 4f34dbae823d..f7d48a08f443 100644
2247 --- a/fs/ubifs/file.c
2248 +++ b/fs/ubifs/file.c
2249 @@ -1525,8 +1525,7 @@ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma,
2250 }
2251
2252 wait_for_stable_page(page);
2253 - unlock_page(page);
2254 - return 0;
2255 + return VM_FAULT_LOCKED;
2256
2257 out_unlock:
2258 unlock_page(page);
2259 diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c
2260 index f35135e28e96..9a9fb94a41c6 100644
2261 --- a/fs/ubifs/shrinker.c
2262 +++ b/fs/ubifs/shrinker.c
2263 @@ -128,7 +128,6 @@ static int shrink_tnc(struct ubifs_info *c, int nr, int age, int *contention)
2264 freed = ubifs_destroy_tnc_subtree(znode);
2265 atomic_long_sub(freed, &ubifs_clean_zn_cnt);
2266 atomic_long_sub(freed, &c->clean_zn_cnt);
2267 - ubifs_assert(atomic_long_read(&c->clean_zn_cnt) >= 0);
2268 total_freed += freed;
2269 znode = zprev;
2270 }
2271 diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
2272 index 944f3d9456a8..a9e29ea37620 100644
2273 --- a/fs/xfs/xfs_mount.c
2274 +++ b/fs/xfs/xfs_mount.c
2275 @@ -323,8 +323,19 @@ reread:
2276 /*
2277 * Initialize the mount structure from the superblock.
2278 */
2279 - xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp));
2280 - xfs_sb_quota_from_disk(&mp->m_sb);
2281 + xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
2282 + xfs_sb_quota_from_disk(sbp);
2283 +
2284 + /*
2285 + * If we haven't validated the superblock, do so now before we try
2286 + * to check the sector size and reread the superblock appropriately.
2287 + */
2288 + if (sbp->sb_magicnum != XFS_SB_MAGIC) {
2289 + if (loud)
2290 + xfs_warn(mp, "Invalid superblock magic number");
2291 + error = EINVAL;
2292 + goto release_buf;
2293 + }
2294
2295 /*
2296 * We must be able to do sector-sized and sector-aligned IO.
2297 @@ -337,11 +348,11 @@ reread:
2298 goto release_buf;
2299 }
2300
2301 - /*
2302 - * Re-read the superblock so the buffer is correctly sized,
2303 - * and properly verified.
2304 - */
2305 if (buf_ops == NULL) {
2306 + /*
2307 + * Re-read the superblock so the buffer is correctly sized,
2308 + * and properly verified.
2309 + */
2310 xfs_buf_relse(bp);
2311 sector_size = sbp->sb_sectsize;
2312 buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops;
2313 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
2314 index 077904c8b70d..cc79eff4a1ad 100644
2315 --- a/include/linux/ptrace.h
2316 +++ b/include/linux/ptrace.h
2317 @@ -334,6 +334,9 @@ static inline void user_single_step_siginfo(struct task_struct *tsk,
2318 * calling arch_ptrace_stop() when it would be superfluous. For example,
2319 * if the thread has not been back to user mode since the last stop, the
2320 * thread state might indicate that nothing needs to be done.
2321 + *
2322 + * This is guaranteed to be invoked once before a task stops for ptrace and
2323 + * may include arch-specific operations necessary prior to a ptrace stop.
2324 */
2325 #define arch_ptrace_stop_needed(code, info) (0)
2326 #endif
2327 diff --git a/include/trace/syscall.h b/include/trace/syscall.h
2328 index fed853f3d7aa..9674145e2f6a 100644
2329 --- a/include/trace/syscall.h
2330 +++ b/include/trace/syscall.h
2331 @@ -4,6 +4,7 @@
2332 #include <linux/tracepoint.h>
2333 #include <linux/unistd.h>
2334 #include <linux/ftrace_event.h>
2335 +#include <linux/thread_info.h>
2336
2337 #include <asm/ptrace.h>
2338
2339 @@ -32,4 +33,18 @@ struct syscall_metadata {
2340 struct ftrace_event_call *exit_event;
2341 };
2342
2343 +#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
2344 +static inline void syscall_tracepoint_update(struct task_struct *p)
2345 +{
2346 + if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
2347 + set_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
2348 + else
2349 + clear_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
2350 +}
2351 +#else
2352 +static inline void syscall_tracepoint_update(struct task_struct *p)
2353 +{
2354 +}
2355 +#endif
2356 +
2357 #endif /* _TRACE_SYSCALL_H */
2358 diff --git a/kernel/fork.c b/kernel/fork.c
2359 index 142904349fb5..68b92262dc45 100644
2360 --- a/kernel/fork.c
2361 +++ b/kernel/fork.c
2362 @@ -1487,7 +1487,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
2363
2364 total_forks++;
2365 spin_unlock(&current->sighand->siglock);
2366 + syscall_tracepoint_update(p);
2367 write_unlock_irq(&tasklist_lock);
2368 +
2369 proc_fork_connector(p);
2370 cgroup_post_fork(p);
2371 if (clone_flags & CLONE_THREAD)
2372 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2373 index 737b0efa1a62..e916972c6d87 100644
2374 --- a/kernel/trace/trace.c
2375 +++ b/kernel/trace/trace.c
2376 @@ -1461,12 +1461,12 @@ static void tracing_stop_tr(struct trace_array *tr)
2377
2378 void trace_stop_cmdline_recording(void);
2379
2380 -static void trace_save_cmdline(struct task_struct *tsk)
2381 +static int trace_save_cmdline(struct task_struct *tsk)
2382 {
2383 unsigned pid, idx;
2384
2385 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
2386 - return;
2387 + return 0;
2388
2389 /*
2390 * It's not the end of the world if we don't get
2391 @@ -1475,7 +1475,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
2392 * so if we miss here, then better luck next time.
2393 */
2394 if (!arch_spin_trylock(&trace_cmdline_lock))
2395 - return;
2396 + return 0;
2397
2398 idx = map_pid_to_cmdline[tsk->pid];
2399 if (idx == NO_CMDLINE_MAP) {
2400 @@ -1500,6 +1500,8 @@ static void trace_save_cmdline(struct task_struct *tsk)
2401 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
2402
2403 arch_spin_unlock(&trace_cmdline_lock);
2404 +
2405 + return 1;
2406 }
2407
2408 void trace_find_cmdline(int pid, char comm[])
2409 @@ -1541,9 +1543,8 @@ void tracing_record_cmdline(struct task_struct *tsk)
2410 if (!__this_cpu_read(trace_cmdline_save))
2411 return;
2412
2413 - __this_cpu_write(trace_cmdline_save, false);
2414 -
2415 - trace_save_cmdline(tsk);
2416 + if (trace_save_cmdline(tsk))
2417 + __this_cpu_write(trace_cmdline_save, false);
2418 }
2419
2420 void
2421 diff --git a/kernel/watchdog.c b/kernel/watchdog.c
2422 index 516203e665fc..30e482240dae 100644
2423 --- a/kernel/watchdog.c
2424 +++ b/kernel/watchdog.c
2425 @@ -527,10 +527,8 @@ static void update_timers_all_cpus(void)
2426 int cpu;
2427
2428 get_online_cpus();
2429 - preempt_disable();
2430 for_each_online_cpu(cpu)
2431 update_timers(cpu);
2432 - preempt_enable();
2433 put_online_cpus();
2434 }
2435
2436 diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
2437 index b74da447e81e..7a85967060a5 100644
2438 --- a/lib/lz4/lz4_decompress.c
2439 +++ b/lib/lz4/lz4_decompress.c
2440 @@ -192,6 +192,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
2441 int s = 255;
2442 while ((ip < iend) && (s == 255)) {
2443 s = *ip++;
2444 + if (unlikely(length > (size_t)(length + s)))
2445 + goto _output_error;
2446 length += s;
2447 }
2448 }
2449 @@ -232,6 +234,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
2450 if (length == ML_MASK) {
2451 while (ip < iend) {
2452 int s = *ip++;
2453 + if (unlikely(length > (size_t)(length + s)))
2454 + goto _output_error;
2455 length += s;
2456 if (s == 255)
2457 continue;
2458 @@ -284,7 +288,7 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
2459
2460 /* write overflow error detected */
2461 _output_error:
2462 - return (int) (-(((char *) ip) - source));
2463 + return -1;
2464 }
2465
2466 int lz4_decompress(const unsigned char *src, size_t *src_len,
2467 diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
2468 index 06c6ff0cb911..a4acaf2bcf18 100644
2469 --- a/net/sunrpc/svc_xprt.c
2470 +++ b/net/sunrpc/svc_xprt.c
2471 @@ -730,6 +730,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
2472 newxpt = xprt->xpt_ops->xpo_accept(xprt);
2473 if (newxpt)
2474 svc_add_new_temp_xprt(serv, newxpt);
2475 + else
2476 + module_put(xprt->xpt_class->xcl_owner);
2477 } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) {
2478 /* XPT_DATA|XPT_DEFERRED case: */
2479 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
2480 diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
2481 index 9d1421e63ff8..49b582a225b0 100644
2482 --- a/scripts/recordmcount.h
2483 +++ b/scripts/recordmcount.h
2484 @@ -163,11 +163,11 @@ static int mcount_adjust = 0;
2485
2486 static int MIPS_is_fake_mcount(Elf_Rel const *rp)
2487 {
2488 - static Elf_Addr old_r_offset;
2489 + static Elf_Addr old_r_offset = ~(Elf_Addr)0;
2490 Elf_Addr current_r_offset = _w(rp->r_offset);
2491 int is_fake;
2492
2493 - is_fake = old_r_offset &&
2494 + is_fake = (old_r_offset != ~(Elf_Addr)0) &&
2495 (current_r_offset - old_r_offset == MIPS_FAKEMCOUNT_OFFSET);
2496 old_r_offset = current_r_offset;
2497
2498 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
2499 index 6cc3cf285558..0176cf0e01d9 100644
2500 --- a/sound/pci/hda/hda_intel.c
2501 +++ b/sound/pci/hda/hda_intel.c
2502 @@ -282,6 +282,24 @@ static char *driver_short_names[] = {
2503 [AZX_DRIVER_GENERIC] = "HD-Audio Generic",
2504 };
2505
2506 +
2507 +/* Intel HSW/BDW display HDA controller Extended Mode registers.
2508 + * EM4 (M value) and EM5 (N Value) are used to convert CDClk (Core Display
2509 + * Clock) to 24MHz BCLK: BCLK = CDCLK * M / N
2510 + * The values will be lost when the display power well is disabled.
2511 + */
2512 +#define ICH6_REG_EM4 0x100c
2513 +#define ICH6_REG_EM5 0x1010
2514 +
2515 +struct hda_intel {
2516 + struct azx chip;
2517 +
2518 + /* HSW/BDW display HDA controller to restore BCLK from CDCLK */
2519 + unsigned int bclk_m;
2520 + unsigned int bclk_n;
2521 +};
2522 +
2523 +
2524 #ifdef CONFIG_X86
2525 static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool on)
2526 {
2527 @@ -574,6 +592,22 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
2528 #define azx_del_card_list(chip) /* NOP */
2529 #endif /* CONFIG_PM */
2530
2531 +static void haswell_save_bclk(struct azx *chip)
2532 +{
2533 + struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
2534 +
2535 + hda->bclk_m = azx_readw(chip, EM4);
2536 + hda->bclk_n = azx_readw(chip, EM5);
2537 +}
2538 +
2539 +static void haswell_restore_bclk(struct azx *chip)
2540 +{
2541 + struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
2542 +
2543 + azx_writew(chip, EM4, hda->bclk_m);
2544 + azx_writew(chip, EM5, hda->bclk_n);
2545 +}
2546 +
2547 #if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO)
2548 /*
2549 * power management
2550 @@ -600,6 +634,13 @@ static int azx_suspend(struct device *dev)
2551 free_irq(chip->irq, chip);
2552 chip->irq = -1;
2553 }
2554 +
2555 + /* Save BCLK M/N values before they become invalid in D3.
2556 + * Will test if display power well can be released now.
2557 + */
2558 + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
2559 + haswell_save_bclk(chip);
2560 +
2561 if (chip->msi)
2562 pci_disable_msi(chip->pci);
2563 pci_disable_device(pci);
2564 @@ -619,8 +660,10 @@ static int azx_resume(struct device *dev)
2565 if (chip->disabled)
2566 return 0;
2567
2568 - if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
2569 + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
2570 hda_display_power(true);
2571 + haswell_restore_bclk(chip);
2572 + }
2573 pci_set_power_state(pci, PCI_D0);
2574 pci_restore_state(pci);
2575 if (pci_enable_device(pci) < 0) {
2576 @@ -664,8 +707,10 @@ static int azx_runtime_suspend(struct device *dev)
2577 azx_stop_chip(chip);
2578 azx_enter_link_reset(chip);
2579 azx_clear_irq_pending(chip);
2580 - if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
2581 + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
2582 + haswell_save_bclk(chip);
2583 hda_display_power(false);
2584 + }
2585 return 0;
2586 }
2587
2588 @@ -683,8 +728,10 @@ static int azx_runtime_resume(struct device *dev)
2589 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
2590 return 0;
2591
2592 - if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
2593 + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
2594 hda_display_power(true);
2595 + haswell_restore_bclk(chip);
2596 + }
2597
2598 /* Read STATESTS before controller reset */
2599 status = azx_readw(chip, STATESTS);
2600 @@ -877,6 +924,8 @@ static int register_vga_switcheroo(struct azx *chip)
2601 static int azx_free(struct azx *chip)
2602 {
2603 struct pci_dev *pci = chip->pci;
2604 + struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
2605 +
2606 int i;
2607
2608 if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
2609 @@ -924,7 +973,7 @@ static int azx_free(struct azx *chip)
2610 hda_display_power(false);
2611 hda_i915_exit();
2612 }
2613 - kfree(chip);
2614 + kfree(hda);
2615
2616 return 0;
2617 }
2618 @@ -1168,6 +1217,7 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
2619 static struct snd_device_ops ops = {
2620 .dev_free = azx_dev_free,
2621 };
2622 + struct hda_intel *hda;
2623 struct azx *chip;
2624 int err;
2625
2626 @@ -1177,13 +1227,14 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
2627 if (err < 0)
2628 return err;
2629
2630 - chip = kzalloc(sizeof(*chip), GFP_KERNEL);
2631 - if (!chip) {
2632 - dev_err(card->dev, "Cannot allocate chip\n");
2633 + hda = kzalloc(sizeof(*hda), GFP_KERNEL);
2634 + if (!hda) {
2635 + dev_err(card->dev, "Cannot allocate hda\n");
2636 pci_disable_device(pci);
2637 return -ENOMEM;
2638 }
2639
2640 + chip = &hda->chip;
2641 spin_lock_init(&chip->reg_lock);
2642 mutex_init(&chip->open_mutex);
2643 chip->card = card;
2644 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
2645 index 8867ab3a71d4..bce551293e2a 100644
2646 --- a/sound/pci/hda/patch_hdmi.c
2647 +++ b/sound/pci/hda/patch_hdmi.c
2648 @@ -2208,7 +2208,7 @@ static int generic_hdmi_resume(struct hda_codec *codec)
2649 struct hdmi_spec *spec = codec->spec;
2650 int pin_idx;
2651
2652 - generic_hdmi_init(codec);
2653 + codec->patch_ops.init(codec);
2654 snd_hda_codec_resume_amp(codec);
2655 snd_hda_codec_resume_cache(codec);
2656
2657 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
2658 index 75515b494034..37710495fa0a 100644
2659 --- a/sound/pci/hda/patch_sigmatel.c
2660 +++ b/sound/pci/hda/patch_sigmatel.c
2661 @@ -122,6 +122,12 @@ enum {
2662 };
2663
2664 enum {
2665 + STAC_92HD95_HP_LED,
2666 + STAC_92HD95_HP_BASS,
2667 + STAC_92HD95_MODELS
2668 +};
2669 +
2670 +enum {
2671 STAC_925x_REF,
2672 STAC_M1,
2673 STAC_M1_2,
2674 @@ -4128,6 +4134,48 @@ static const struct snd_pci_quirk stac9205_fixup_tbl[] = {
2675 {} /* terminator */
2676 };
2677
2678 +static void stac92hd95_fixup_hp_led(struct hda_codec *codec,
2679 + const struct hda_fixup *fix, int action)
2680 +{
2681 + struct sigmatel_spec *spec = codec->spec;
2682 +
2683 + if (action != HDA_FIXUP_ACT_PRE_PROBE)
2684 + return;
2685 +
2686 + if (find_mute_led_cfg(codec, spec->default_polarity))
2687 + codec_dbg(codec, "mute LED gpio %d polarity %d\n",
2688 + spec->gpio_led,
2689 + spec->gpio_led_polarity);
2690 +}
2691 +
2692 +static const struct hda_fixup stac92hd95_fixups[] = {
2693 + [STAC_92HD95_HP_LED] = {
2694 + .type = HDA_FIXUP_FUNC,
2695 + .v.func = stac92hd95_fixup_hp_led,
2696 + },
2697 + [STAC_92HD95_HP_BASS] = {
2698 + .type = HDA_FIXUP_VERBS,
2699 + .v.verbs = (const struct hda_verb[]) {
2700 + {0x1a, 0x795, 0x00}, /* HPF to 100Hz */
2701 + {}
2702 + },
2703 + .chained = true,
2704 + .chain_id = STAC_92HD95_HP_LED,
2705 + },
2706 +};
2707 +
2708 +static const struct snd_pci_quirk stac92hd95_fixup_tbl[] = {
2709 + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1911, "HP Spectre 13", STAC_92HD95_HP_BASS),
2710 + {} /* terminator */
2711 +};
2712 +
2713 +static const struct hda_model_fixup stac92hd95_models[] = {
2714 + { .id = STAC_92HD95_HP_LED, .name = "hp-led" },
2715 + { .id = STAC_92HD95_HP_BASS, .name = "hp-bass" },
2716 + {}
2717 +};
2718 +
2719 +
2720 static int stac_parse_auto_config(struct hda_codec *codec)
2721 {
2722 struct sigmatel_spec *spec = codec->spec;
2723 @@ -4580,10 +4628,16 @@ static int patch_stac92hd95(struct hda_codec *codec)
2724 spec->gen.beep_nid = 0x19; /* digital beep */
2725 spec->pwr_nids = stac92hd95_pwr_nids;
2726 spec->num_pwrs = ARRAY_SIZE(stac92hd95_pwr_nids);
2727 - spec->default_polarity = -1; /* no default cfg */
2728 + spec->default_polarity = 0;
2729
2730 codec->patch_ops = stac_patch_ops;
2731
2732 + snd_hda_pick_fixup(codec, stac92hd95_models, stac92hd95_fixup_tbl,
2733 + stac92hd95_fixups);
2734 + snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
2735 +
2736 + stac_setup_gpio(codec);
2737 +
2738 err = stac_parse_auto_config(codec);
2739 if (err < 0) {
2740 stac_free(codec);
2741 @@ -4592,6 +4646,8 @@ static int patch_stac92hd95(struct hda_codec *codec)
2742
2743 codec->proc_widget_hook = stac92hd_proc_hook;
2744
2745 + snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE);
2746 +
2747 return 0;
2748 }
2749
2750 diff --git a/sound/usb/card.c b/sound/usb/card.c
2751 index c3b5b7dca1c3..a09e5f3519e3 100644
2752 --- a/sound/usb/card.c
2753 +++ b/sound/usb/card.c
2754 @@ -307,6 +307,11 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
2755
2756 static int snd_usb_audio_free(struct snd_usb_audio *chip)
2757 {
2758 + struct list_head *p, *n;
2759 +
2760 + list_for_each_safe(p, n, &chip->ep_list)
2761 + snd_usb_endpoint_free(p);
2762 +
2763 mutex_destroy(&chip->mutex);
2764 kfree(chip);
2765 return 0;
2766 @@ -585,7 +590,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
2767 struct snd_usb_audio *chip)
2768 {
2769 struct snd_card *card;
2770 - struct list_head *p, *n;
2771 + struct list_head *p;
2772
2773 if (chip == (void *)-1L)
2774 return;
2775 @@ -598,14 +603,16 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
2776 mutex_lock(&register_mutex);
2777 chip->num_interfaces--;
2778 if (chip->num_interfaces <= 0) {
2779 + struct snd_usb_endpoint *ep;
2780 +
2781 snd_card_disconnect(card);
2782 /* release the pcm resources */
2783 list_for_each(p, &chip->pcm_list) {
2784 snd_usb_stream_disconnect(p);
2785 }
2786 /* release the endpoint resources */
2787 - list_for_each_safe(p, n, &chip->ep_list) {
2788 - snd_usb_endpoint_free(p);
2789 + list_for_each_entry(ep, &chip->ep_list, list) {
2790 + snd_usb_endpoint_release(ep);
2791 }
2792 /* release the midi resources */
2793 list_for_each(p, &chip->midi_list) {
2794 diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
2795 index 289f582c9130..114e3e7ff511 100644
2796 --- a/sound/usb/endpoint.c
2797 +++ b/sound/usb/endpoint.c
2798 @@ -987,19 +987,30 @@ void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
2799 }
2800
2801 /**
2802 + * snd_usb_endpoint_release: Tear down an snd_usb_endpoint
2803 + *
2804 + * @ep: the endpoint to release
2805 + *
2806 + * This function does not care for the endpoint's use count but will tear
2807 + * down all the streaming URBs immediately.
2808 + */
2809 +void snd_usb_endpoint_release(struct snd_usb_endpoint *ep)
2810 +{
2811 + release_urbs(ep, 1);
2812 +}
2813 +
2814 +/**
2815 * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint
2816 *
2817 * @ep: the list header of the endpoint to free
2818 *
2819 - * This function does not care for the endpoint's use count but will tear
2820 - * down all the streaming URBs immediately and free all resources.
2821 + * This free all resources of the given ep.
2822 */
2823 void snd_usb_endpoint_free(struct list_head *head)
2824 {
2825 struct snd_usb_endpoint *ep;
2826
2827 ep = list_entry(head, struct snd_usb_endpoint, list);
2828 - release_urbs(ep, 1);
2829 kfree(ep);
2830 }
2831
2832 diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
2833 index 1c7e8ee48abc..e61ee5c356a3 100644
2834 --- a/sound/usb/endpoint.h
2835 +++ b/sound/usb/endpoint.h
2836 @@ -23,6 +23,7 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep);
2837 void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep);
2838 int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
2839 void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep);
2840 +void snd_usb_endpoint_release(struct snd_usb_endpoint *ep);
2841 void snd_usb_endpoint_free(struct list_head *head);
2842
2843 int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep);