Magellan Linux

Contents of /trunk/kernel26-alx/patches-3.10/0154-3.10.55-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2672 - (show annotations) (download)
Tue Jul 21 16:46:35 2015 UTC (9 years, 3 months ago) by niro
File size: 109260 byte(s)
-3.10.84-alx-r1
1 diff --git a/Makefile b/Makefile
2 index 9429aa5e89de..6141df04fcb5 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 10
8 -SUBLEVEL = 54
9 +SUBLEVEL = 55
10 EXTRAVERSION =
11 NAME = TOSSUG Baby Fish
12
13 diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
14 index 2a75ff249e71..6430e7acb1eb 100644
15 --- a/arch/mips/cavium-octeon/setup.c
16 +++ b/arch/mips/cavium-octeon/setup.c
17 @@ -463,6 +463,18 @@ static void octeon_halt(void)
18 octeon_kill_core(NULL);
19 }
20
21 +static char __read_mostly octeon_system_type[80];
22 +
23 +static int __init init_octeon_system_type(void)
24 +{
25 + snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
26 + cvmx_board_type_to_string(octeon_bootinfo->board_type),
27 + octeon_model_get_string(read_c0_prid()));
28 +
29 + return 0;
30 +}
31 +early_initcall(init_octeon_system_type);
32 +
33 /**
34 * Handle all the error condition interrupts that might occur.
35 *
36 @@ -482,11 +494,7 @@ static irqreturn_t octeon_rlm_interrupt(int cpl, void *dev_id)
37 */
38 const char *octeon_board_type_string(void)
39 {
40 - static char name[80];
41 - sprintf(name, "%s (%s)",
42 - cvmx_board_type_to_string(octeon_bootinfo->board_type),
43 - octeon_model_get_string(read_c0_prid()));
44 - return name;
45 + return octeon_system_type;
46 }
47
48 const char *get_system_type(void)
49 diff --git a/arch/mips/include/asm/reg.h b/arch/mips/include/asm/reg.h
50 index 910e71a12466..b8343ccbc989 100644
51 --- a/arch/mips/include/asm/reg.h
52 +++ b/arch/mips/include/asm/reg.h
53 @@ -12,116 +12,194 @@
54 #ifndef __ASM_MIPS_REG_H
55 #define __ASM_MIPS_REG_H
56
57 -
58 -#if defined(CONFIG_32BIT) || defined(WANT_COMPAT_REG_H)
59 -
60 -#define EF_R0 6
61 -#define EF_R1 7
62 -#define EF_R2 8
63 -#define EF_R3 9
64 -#define EF_R4 10
65 -#define EF_R5 11
66 -#define EF_R6 12
67 -#define EF_R7 13
68 -#define EF_R8 14
69 -#define EF_R9 15
70 -#define EF_R10 16
71 -#define EF_R11 17
72 -#define EF_R12 18
73 -#define EF_R13 19
74 -#define EF_R14 20
75 -#define EF_R15 21
76 -#define EF_R16 22
77 -#define EF_R17 23
78 -#define EF_R18 24
79 -#define EF_R19 25
80 -#define EF_R20 26
81 -#define EF_R21 27
82 -#define EF_R22 28
83 -#define EF_R23 29
84 -#define EF_R24 30
85 -#define EF_R25 31
86 +#define MIPS32_EF_R0 6
87 +#define MIPS32_EF_R1 7
88 +#define MIPS32_EF_R2 8
89 +#define MIPS32_EF_R3 9
90 +#define MIPS32_EF_R4 10
91 +#define MIPS32_EF_R5 11
92 +#define MIPS32_EF_R6 12
93 +#define MIPS32_EF_R7 13
94 +#define MIPS32_EF_R8 14
95 +#define MIPS32_EF_R9 15
96 +#define MIPS32_EF_R10 16
97 +#define MIPS32_EF_R11 17
98 +#define MIPS32_EF_R12 18
99 +#define MIPS32_EF_R13 19
100 +#define MIPS32_EF_R14 20
101 +#define MIPS32_EF_R15 21
102 +#define MIPS32_EF_R16 22
103 +#define MIPS32_EF_R17 23
104 +#define MIPS32_EF_R18 24
105 +#define MIPS32_EF_R19 25
106 +#define MIPS32_EF_R20 26
107 +#define MIPS32_EF_R21 27
108 +#define MIPS32_EF_R22 28
109 +#define MIPS32_EF_R23 29
110 +#define MIPS32_EF_R24 30
111 +#define MIPS32_EF_R25 31
112
113 /*
114 * k0/k1 unsaved
115 */
116 -#define EF_R26 32
117 -#define EF_R27 33
118 +#define MIPS32_EF_R26 32
119 +#define MIPS32_EF_R27 33
120
121 -#define EF_R28 34
122 -#define EF_R29 35
123 -#define EF_R30 36
124 -#define EF_R31 37
125 +#define MIPS32_EF_R28 34
126 +#define MIPS32_EF_R29 35
127 +#define MIPS32_EF_R30 36
128 +#define MIPS32_EF_R31 37
129
130 /*
131 * Saved special registers
132 */
133 -#define EF_LO 38
134 -#define EF_HI 39
135 -
136 -#define EF_CP0_EPC 40
137 -#define EF_CP0_BADVADDR 41
138 -#define EF_CP0_STATUS 42
139 -#define EF_CP0_CAUSE 43
140 -#define EF_UNUSED0 44
141 -
142 -#define EF_SIZE 180
143 -
144 -#endif
145 -
146 -#if defined(CONFIG_64BIT) && !defined(WANT_COMPAT_REG_H)
147 -
148 -#define EF_R0 0
149 -#define EF_R1 1
150 -#define EF_R2 2
151 -#define EF_R3 3
152 -#define EF_R4 4
153 -#define EF_R5 5
154 -#define EF_R6 6
155 -#define EF_R7 7
156 -#define EF_R8 8
157 -#define EF_R9 9
158 -#define EF_R10 10
159 -#define EF_R11 11
160 -#define EF_R12 12
161 -#define EF_R13 13
162 -#define EF_R14 14
163 -#define EF_R15 15
164 -#define EF_R16 16
165 -#define EF_R17 17
166 -#define EF_R18 18
167 -#define EF_R19 19
168 -#define EF_R20 20
169 -#define EF_R21 21
170 -#define EF_R22 22
171 -#define EF_R23 23
172 -#define EF_R24 24
173 -#define EF_R25 25
174 +#define MIPS32_EF_LO 38
175 +#define MIPS32_EF_HI 39
176 +
177 +#define MIPS32_EF_CP0_EPC 40
178 +#define MIPS32_EF_CP0_BADVADDR 41
179 +#define MIPS32_EF_CP0_STATUS 42
180 +#define MIPS32_EF_CP0_CAUSE 43
181 +#define MIPS32_EF_UNUSED0 44
182 +
183 +#define MIPS32_EF_SIZE 180
184 +
185 +#define MIPS64_EF_R0 0
186 +#define MIPS64_EF_R1 1
187 +#define MIPS64_EF_R2 2
188 +#define MIPS64_EF_R3 3
189 +#define MIPS64_EF_R4 4
190 +#define MIPS64_EF_R5 5
191 +#define MIPS64_EF_R6 6
192 +#define MIPS64_EF_R7 7
193 +#define MIPS64_EF_R8 8
194 +#define MIPS64_EF_R9 9
195 +#define MIPS64_EF_R10 10
196 +#define MIPS64_EF_R11 11
197 +#define MIPS64_EF_R12 12
198 +#define MIPS64_EF_R13 13
199 +#define MIPS64_EF_R14 14
200 +#define MIPS64_EF_R15 15
201 +#define MIPS64_EF_R16 16
202 +#define MIPS64_EF_R17 17
203 +#define MIPS64_EF_R18 18
204 +#define MIPS64_EF_R19 19
205 +#define MIPS64_EF_R20 20
206 +#define MIPS64_EF_R21 21
207 +#define MIPS64_EF_R22 22
208 +#define MIPS64_EF_R23 23
209 +#define MIPS64_EF_R24 24
210 +#define MIPS64_EF_R25 25
211
212 /*
213 * k0/k1 unsaved
214 */
215 -#define EF_R26 26
216 -#define EF_R27 27
217 +#define MIPS64_EF_R26 26
218 +#define MIPS64_EF_R27 27
219
220
221 -#define EF_R28 28
222 -#define EF_R29 29
223 -#define EF_R30 30
224 -#define EF_R31 31
225 +#define MIPS64_EF_R28 28
226 +#define MIPS64_EF_R29 29
227 +#define MIPS64_EF_R30 30
228 +#define MIPS64_EF_R31 31
229
230 /*
231 * Saved special registers
232 */
233 -#define EF_LO 32
234 -#define EF_HI 33
235 -
236 -#define EF_CP0_EPC 34
237 -#define EF_CP0_BADVADDR 35
238 -#define EF_CP0_STATUS 36
239 -#define EF_CP0_CAUSE 37
240 -
241 -#define EF_SIZE 304 /* size in bytes */
242 +#define MIPS64_EF_LO 32
243 +#define MIPS64_EF_HI 33
244 +
245 +#define MIPS64_EF_CP0_EPC 34
246 +#define MIPS64_EF_CP0_BADVADDR 35
247 +#define MIPS64_EF_CP0_STATUS 36
248 +#define MIPS64_EF_CP0_CAUSE 37
249 +
250 +#define MIPS64_EF_SIZE 304 /* size in bytes */
251 +
252 +#if defined(CONFIG_32BIT)
253 +
254 +#define EF_R0 MIPS32_EF_R0
255 +#define EF_R1 MIPS32_EF_R1
256 +#define EF_R2 MIPS32_EF_R2
257 +#define EF_R3 MIPS32_EF_R3
258 +#define EF_R4 MIPS32_EF_R4
259 +#define EF_R5 MIPS32_EF_R5
260 +#define EF_R6 MIPS32_EF_R6
261 +#define EF_R7 MIPS32_EF_R7
262 +#define EF_R8 MIPS32_EF_R8
263 +#define EF_R9 MIPS32_EF_R9
264 +#define EF_R10 MIPS32_EF_R10
265 +#define EF_R11 MIPS32_EF_R11
266 +#define EF_R12 MIPS32_EF_R12
267 +#define EF_R13 MIPS32_EF_R13
268 +#define EF_R14 MIPS32_EF_R14
269 +#define EF_R15 MIPS32_EF_R15
270 +#define EF_R16 MIPS32_EF_R16
271 +#define EF_R17 MIPS32_EF_R17
272 +#define EF_R18 MIPS32_EF_R18
273 +#define EF_R19 MIPS32_EF_R19
274 +#define EF_R20 MIPS32_EF_R20
275 +#define EF_R21 MIPS32_EF_R21
276 +#define EF_R22 MIPS32_EF_R22
277 +#define EF_R23 MIPS32_EF_R23
278 +#define EF_R24 MIPS32_EF_R24
279 +#define EF_R25 MIPS32_EF_R25
280 +#define EF_R26 MIPS32_EF_R26
281 +#define EF_R27 MIPS32_EF_R27
282 +#define EF_R28 MIPS32_EF_R28
283 +#define EF_R29 MIPS32_EF_R29
284 +#define EF_R30 MIPS32_EF_R30
285 +#define EF_R31 MIPS32_EF_R31
286 +#define EF_LO MIPS32_EF_LO
287 +#define EF_HI MIPS32_EF_HI
288 +#define EF_CP0_EPC MIPS32_EF_CP0_EPC
289 +#define EF_CP0_BADVADDR MIPS32_EF_CP0_BADVADDR
290 +#define EF_CP0_STATUS MIPS32_EF_CP0_STATUS
291 +#define EF_CP0_CAUSE MIPS32_EF_CP0_CAUSE
292 +#define EF_UNUSED0 MIPS32_EF_UNUSED0
293 +#define EF_SIZE MIPS32_EF_SIZE
294 +
295 +#elif defined(CONFIG_64BIT)
296 +
297 +#define EF_R0 MIPS64_EF_R0
298 +#define EF_R1 MIPS64_EF_R1
299 +#define EF_R2 MIPS64_EF_R2
300 +#define EF_R3 MIPS64_EF_R3
301 +#define EF_R4 MIPS64_EF_R4
302 +#define EF_R5 MIPS64_EF_R5
303 +#define EF_R6 MIPS64_EF_R6
304 +#define EF_R7 MIPS64_EF_R7
305 +#define EF_R8 MIPS64_EF_R8
306 +#define EF_R9 MIPS64_EF_R9
307 +#define EF_R10 MIPS64_EF_R10
308 +#define EF_R11 MIPS64_EF_R11
309 +#define EF_R12 MIPS64_EF_R12
310 +#define EF_R13 MIPS64_EF_R13
311 +#define EF_R14 MIPS64_EF_R14
312 +#define EF_R15 MIPS64_EF_R15
313 +#define EF_R16 MIPS64_EF_R16
314 +#define EF_R17 MIPS64_EF_R17
315 +#define EF_R18 MIPS64_EF_R18
316 +#define EF_R19 MIPS64_EF_R19
317 +#define EF_R20 MIPS64_EF_R20
318 +#define EF_R21 MIPS64_EF_R21
319 +#define EF_R22 MIPS64_EF_R22
320 +#define EF_R23 MIPS64_EF_R23
321 +#define EF_R24 MIPS64_EF_R24
322 +#define EF_R25 MIPS64_EF_R25
323 +#define EF_R26 MIPS64_EF_R26
324 +#define EF_R27 MIPS64_EF_R27
325 +#define EF_R28 MIPS64_EF_R28
326 +#define EF_R29 MIPS64_EF_R29
327 +#define EF_R30 MIPS64_EF_R30
328 +#define EF_R31 MIPS64_EF_R31
329 +#define EF_LO MIPS64_EF_LO
330 +#define EF_HI MIPS64_EF_HI
331 +#define EF_CP0_EPC MIPS64_EF_CP0_EPC
332 +#define EF_CP0_BADVADDR MIPS64_EF_CP0_BADVADDR
333 +#define EF_CP0_STATUS MIPS64_EF_CP0_STATUS
334 +#define EF_CP0_CAUSE MIPS64_EF_CP0_CAUSE
335 +#define EF_SIZE MIPS64_EF_SIZE
336
337 #endif /* CONFIG_64BIT */
338
339 diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
340 index 895320e25662..e6e5d9162213 100644
341 --- a/arch/mips/include/asm/thread_info.h
342 +++ b/arch/mips/include/asm/thread_info.h
343 @@ -131,6 +131,8 @@ static inline struct thread_info *current_thread_info(void)
344 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
345 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
346
347 +#define _TIF_WORK_SYSCALL_ENTRY (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP)
348 +
349 /* work to do in syscall_trace_leave() */
350 #define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
351
352 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
353 index 202e581e6096..7fdf1de0447f 100644
354 --- a/arch/mips/kernel/binfmt_elfo32.c
355 +++ b/arch/mips/kernel/binfmt_elfo32.c
356 @@ -58,12 +58,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
357
358 #include <asm/processor.h>
359
360 -/*
361 - * When this file is selected, we are definitely running a 64bit kernel.
362 - * So using the right regs define in asm/reg.h
363 - */
364 -#define WANT_COMPAT_REG_H
365 -
366 /* These MUST be defined before elf.h gets included */
367 extern void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs);
368 #define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs);
369 @@ -135,21 +129,21 @@ void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs)
370 {
371 int i;
372
373 - for (i = 0; i < EF_R0; i++)
374 + for (i = 0; i < MIPS32_EF_R0; i++)
375 grp[i] = 0;
376 - grp[EF_R0] = 0;
377 + grp[MIPS32_EF_R0] = 0;
378 for (i = 1; i <= 31; i++)
379 - grp[EF_R0 + i] = (elf_greg_t) regs->regs[i];
380 - grp[EF_R26] = 0;
381 - grp[EF_R27] = 0;
382 - grp[EF_LO] = (elf_greg_t) regs->lo;
383 - grp[EF_HI] = (elf_greg_t) regs->hi;
384 - grp[EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc;
385 - grp[EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr;
386 - grp[EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status;
387 - grp[EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause;
388 -#ifdef EF_UNUSED0
389 - grp[EF_UNUSED0] = 0;
390 + grp[MIPS32_EF_R0 + i] = (elf_greg_t) regs->regs[i];
391 + grp[MIPS32_EF_R26] = 0;
392 + grp[MIPS32_EF_R27] = 0;
393 + grp[MIPS32_EF_LO] = (elf_greg_t) regs->lo;
394 + grp[MIPS32_EF_HI] = (elf_greg_t) regs->hi;
395 + grp[MIPS32_EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc;
396 + grp[MIPS32_EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr;
397 + grp[MIPS32_EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status;
398 + grp[MIPS32_EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause;
399 +#ifdef MIPS32_EF_UNUSED0
400 + grp[MIPS32_EF_UNUSED0] = 0;
401 #endif
402 }
403
404 diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
405 index c01b307317a9..bffbbc557879 100644
406 --- a/arch/mips/kernel/irq-gic.c
407 +++ b/arch/mips/kernel/irq-gic.c
408 @@ -256,11 +256,13 @@ static void __init gic_setup_intr(unsigned int intr, unsigned int cpu,
409
410 /* Setup Intr to Pin mapping */
411 if (pin & GIC_MAP_TO_NMI_MSK) {
412 + int i;
413 +
414 GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
415 /* FIXME: hack to route NMI to all cpu's */
416 - for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
417 + for (i = 0; i < NR_CPUS; i += 32) {
418 GICWRITE(GIC_REG_ADDR(SHARED,
419 - GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
420 + GIC_SH_MAP_TO_VPE_REG_OFF(intr, i)),
421 0xffffffff);
422 }
423 } else {
424 diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
425 index 9c6299c733a3..1b95b2443221 100644
426 --- a/arch/mips/kernel/ptrace.c
427 +++ b/arch/mips/kernel/ptrace.c
428 @@ -161,6 +161,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
429 __get_user(fregs[i], i + (__u64 __user *) data);
430
431 __get_user(child->thread.fpu.fcr31, data + 64);
432 + child->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
433
434 /* FIR may not be written. */
435
436 @@ -451,7 +452,7 @@ long arch_ptrace(struct task_struct *child, long request,
437 break;
438 #endif
439 case FPC_CSR:
440 - child->thread.fpu.fcr31 = data;
441 + child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X;
442 break;
443 case DSP_BASE ... DSP_BASE + 5: {
444 dspreg_t *dregs;
445 diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
446 index 9b36424b03c5..ed5bafb5d637 100644
447 --- a/arch/mips/kernel/scall32-o32.S
448 +++ b/arch/mips/kernel/scall32-o32.S
449 @@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
450
451 stack_done:
452 lw t0, TI_FLAGS($28) # syscall tracing enabled?
453 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
454 + li t1, _TIF_WORK_SYSCALL_ENTRY
455 and t0, t1
456 bnez t0, syscall_trace_entry # -> yes
457
458 diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
459 index 97a5909a61cf..be6627ead619 100644
460 --- a/arch/mips/kernel/scall64-64.S
461 +++ b/arch/mips/kernel/scall64-64.S
462 @@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
463
464 sd a3, PT_R26(sp) # save a3 for syscall restarting
465
466 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
467 + li t1, _TIF_WORK_SYSCALL_ENTRY
468 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
469 and t0, t1, t0
470 bnez t0, syscall_trace_entry
471 diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
472 index edcb6594e7b5..cab150789c8d 100644
473 --- a/arch/mips/kernel/scall64-n32.S
474 +++ b/arch/mips/kernel/scall64-n32.S
475 @@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
476
477 sd a3, PT_R26(sp) # save a3 for syscall restarting
478
479 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
480 + li t1, _TIF_WORK_SYSCALL_ENTRY
481 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
482 and t0, t1, t0
483 bnez t0, n32_syscall_trace_entry
484 diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
485 index 74f485d3c0ef..37605dc8eef7 100644
486 --- a/arch/mips/kernel/scall64-o32.S
487 +++ b/arch/mips/kernel/scall64-o32.S
488 @@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
489 PTR 4b, bad_stack
490 .previous
491
492 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
493 + li t1, _TIF_WORK_SYSCALL_ENTRY
494 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
495 and t0, t1, t0
496 bnez t0, trace_a_syscall
497 diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
498 index 203d8857070d..2c81265bcf46 100644
499 --- a/arch/mips/kernel/unaligned.c
500 +++ b/arch/mips/kernel/unaligned.c
501 @@ -604,7 +604,6 @@ static void emulate_load_store_insn(struct pt_regs *regs,
502 case sdc1_op:
503 die_if_kernel("Unaligned FP access in kernel code", regs);
504 BUG_ON(!used_math());
505 - BUG_ON(!is_fpu_owner());
506
507 lose_fpu(1); /* Save FPU state for the emulator. */
508 res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
509 diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
510 index 21813beec7a5..5495101d32c8 100644
511 --- a/arch/mips/mm/c-r4k.c
512 +++ b/arch/mips/mm/c-r4k.c
513 @@ -12,6 +12,7 @@
514 #include <linux/highmem.h>
515 #include <linux/kernel.h>
516 #include <linux/linkage.h>
517 +#include <linux/preempt.h>
518 #include <linux/sched.h>
519 #include <linux/smp.h>
520 #include <linux/mm.h>
521 @@ -601,6 +602,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
522 /* Catch bad driver code */
523 BUG_ON(size == 0);
524
525 + preempt_disable();
526 if (cpu_has_inclusive_pcaches) {
527 if (size >= scache_size)
528 r4k_blast_scache();
529 @@ -621,6 +623,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
530 R4600_HIT_CACHEOP_WAR_IMPL;
531 blast_dcache_range(addr, addr + size);
532 }
533 + preempt_enable();
534
535 bc_wback_inv(addr, size);
536 __sync();
537 @@ -631,6 +634,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
538 /* Catch bad driver code */
539 BUG_ON(size == 0);
540
541 + preempt_disable();
542 if (cpu_has_inclusive_pcaches) {
543 if (size >= scache_size)
544 r4k_blast_scache();
545 @@ -655,6 +659,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
546 R4600_HIT_CACHEOP_WAR_IMPL;
547 blast_inv_dcache_range(addr, addr + size);
548 }
549 + preempt_enable();
550
551 bc_inv(addr, size);
552 __sync();
553 diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
554 index afeef93f81a7..0e17e1352718 100644
555 --- a/arch/mips/mm/tlbex.c
556 +++ b/arch/mips/mm/tlbex.c
557 @@ -1329,6 +1329,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
558 }
559 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
560 uasm_l_tlb_huge_update(&l, p);
561 + UASM_i_LW(&p, K0, 0, K1);
562 build_huge_update_entries(&p, htlb_info.huge_pte, K1);
563 build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
564 htlb_info.restore_scratch);
565 diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
566 index d8a455ede5a7..fec8bf97d806 100644
567 --- a/arch/openrisc/kernel/entry.S
568 +++ b/arch/openrisc/kernel/entry.S
569 @@ -853,37 +853,44 @@ UNHANDLED_EXCEPTION(_vector_0x1f00,0x1f00)
570
571 /* ========================================================[ return ] === */
572
573 +_resume_userspace:
574 + DISABLE_INTERRUPTS(r3,r4)
575 + l.lwz r4,TI_FLAGS(r10)
576 + l.andi r13,r4,_TIF_WORK_MASK
577 + l.sfeqi r13,0
578 + l.bf _restore_all
579 + l.nop
580 +
581 _work_pending:
582 - /*
583 - * if (current_thread_info->flags & _TIF_NEED_RESCHED)
584 - * schedule();
585 - */
586 - l.lwz r5,TI_FLAGS(r10)
587 - l.andi r3,r5,_TIF_NEED_RESCHED
588 - l.sfnei r3,0
589 - l.bnf _work_notifysig
590 + l.lwz r5,PT_ORIG_GPR11(r1)
591 + l.sfltsi r5,0
592 + l.bnf 1f
593 l.nop
594 - l.jal schedule
595 + l.andi r5,r5,0
596 +1:
597 + l.jal do_work_pending
598 + l.ori r3,r1,0 /* pt_regs */
599 +
600 + l.sfeqi r11,0
601 + l.bf _restore_all
602 l.nop
603 - l.j _resume_userspace
604 + l.sfltsi r11,0
605 + l.bnf 1f
606 l.nop
607 -
608 -/* Handle pending signals and notify-resume requests.
609 - * do_notify_resume must be passed the latest pushed pt_regs, not
610 - * necessarily the "userspace" ones. Also, pt_regs->syscallno
611 - * must be set so that the syscall restart functionality works.
612 - */
613 -_work_notifysig:
614 - l.jal do_notify_resume
615 - l.ori r3,r1,0 /* pt_regs */
616 -
617 -_resume_userspace:
618 - DISABLE_INTERRUPTS(r3,r4)
619 - l.lwz r3,TI_FLAGS(r10)
620 - l.andi r3,r3,_TIF_WORK_MASK
621 - l.sfnei r3,0
622 - l.bf _work_pending
623 + l.and r11,r11,r0
624 + l.ori r11,r11,__NR_restart_syscall
625 + l.j _syscall_check_trace_enter
626 l.nop
627 +1:
628 + l.lwz r11,PT_ORIG_GPR11(r1)
629 + /* Restore arg registers */
630 + l.lwz r3,PT_GPR3(r1)
631 + l.lwz r4,PT_GPR4(r1)
632 + l.lwz r5,PT_GPR5(r1)
633 + l.lwz r6,PT_GPR6(r1)
634 + l.lwz r7,PT_GPR7(r1)
635 + l.j _syscall_check_trace_enter
636 + l.lwz r8,PT_GPR8(r1)
637
638 _restore_all:
639 RESTORE_ALL
640 diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c
641 index ae167f7e081a..c277ec82783d 100644
642 --- a/arch/openrisc/kernel/signal.c
643 +++ b/arch/openrisc/kernel/signal.c
644 @@ -28,24 +28,24 @@
645 #include <linux/tracehook.h>
646
647 #include <asm/processor.h>
648 +#include <asm/syscall.h>
649 #include <asm/ucontext.h>
650 #include <asm/uaccess.h>
651
652 #define DEBUG_SIG 0
653
654 struct rt_sigframe {
655 - struct siginfo *pinfo;
656 - void *puc;
657 struct siginfo info;
658 struct ucontext uc;
659 unsigned char retcode[16]; /* trampoline code */
660 };
661
662 -static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
663 +static int restore_sigcontext(struct pt_regs *regs,
664 + struct sigcontext __user *sc)
665 {
666 - unsigned int err = 0;
667 + int err = 0;
668
669 - /* Alwys make any pending restarted system call return -EINTR */
670 + /* Always make any pending restarted system calls return -EINTR */
671 current_thread_info()->restart_block.fn = do_no_restart_syscall;
672
673 /*
674 @@ -53,25 +53,21 @@ static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
675 * (sc is already checked for VERIFY_READ since the sigframe was
676 * checked in sys_sigreturn previously)
677 */
678 - if (__copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long)))
679 - goto badframe;
680 - if (__copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long)))
681 - goto badframe;
682 - if (__copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long)))
683 - goto badframe;
684 + err |= __copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long));
685 + err |= __copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long));
686 + err |= __copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long));
687
688 /* make sure the SM-bit is cleared so user-mode cannot fool us */
689 regs->sr &= ~SPR_SR_SM;
690
691 + regs->orig_gpr11 = -1; /* Avoid syscall restart checks */
692 +
693 /* TODO: the other ports use regs->orig_XX to disable syscall checks
694 * after this completes, but we don't use that mechanism. maybe we can
695 * use it now ?
696 */
697
698 return err;
699 -
700 -badframe:
701 - return 1;
702 }
703
704 asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
705 @@ -111,21 +107,18 @@ badframe:
706 * Set up a signal frame.
707 */
708
709 -static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
710 - unsigned long mask)
711 +static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
712 {
713 int err = 0;
714
715 /* copy the regs */
716 -
717 + /* There should be no need to save callee-saved registers here...
718 + * ...but we save them anyway. Revisit this
719 + */
720 err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long));
721 err |= __copy_to_user(&sc->regs.pc, &regs->pc, sizeof(unsigned long));
722 err |= __copy_to_user(&sc->regs.sr, &regs->sr, sizeof(unsigned long));
723
724 - /* then some other stuff */
725 -
726 - err |= __put_user(mask, &sc->oldmask);
727 -
728 return err;
729 }
730
731 @@ -181,24 +174,18 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
732 int err = 0;
733
734 frame = get_sigframe(ka, regs, sizeof(*frame));
735 -
736 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
737 goto give_sigsegv;
738
739 - err |= __put_user(&frame->info, &frame->pinfo);
740 - err |= __put_user(&frame->uc, &frame->puc);
741 -
742 + /* Create siginfo. */
743 if (ka->sa.sa_flags & SA_SIGINFO)
744 err |= copy_siginfo_to_user(&frame->info, info);
745 - if (err)
746 - goto give_sigsegv;
747
748 - /* Clear all the bits of the ucontext we don't use. */
749 - err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));
750 + /* Create the ucontext. */
751 err |= __put_user(0, &frame->uc.uc_flags);
752 err |= __put_user(NULL, &frame->uc.uc_link);
753 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
754 - err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
755 + err |= setup_sigcontext(regs, &frame->uc.uc_mcontext);
756
757 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
758
759 @@ -207,9 +194,12 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
760
761 /* trampoline - the desired return ip is the retcode itself */
762 return_ip = (unsigned long)&frame->retcode;
763 - /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
764 - err |= __put_user(0xa960, (short *)(frame->retcode + 0));
765 - err |= __put_user(__NR_rt_sigreturn, (short *)(frame->retcode + 2));
766 + /* This is:
767 + l.ori r11,r0,__NR_sigreturn
768 + l.sys 1
769 + */
770 + err |= __put_user(0xa960, (short *)(frame->retcode + 0));
771 + err |= __put_user(__NR_rt_sigreturn, (short *)(frame->retcode + 2));
772 err |= __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
773 err |= __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
774
775 @@ -262,82 +252,106 @@ handle_signal(unsigned long sig,
776 * mode below.
777 */
778
779 -void do_signal(struct pt_regs *regs)
780 +int do_signal(struct pt_regs *regs, int syscall)
781 {
782 siginfo_t info;
783 int signr;
784 struct k_sigaction ka;
785 -
786 - /*
787 - * We want the common case to go fast, which
788 - * is why we may in certain cases get here from
789 - * kernel mode. Just return without doing anything
790 - * if so.
791 - */
792 - if (!user_mode(regs))
793 - return;
794 -
795 - signr = get_signal_to_deliver(&info, &ka, regs, NULL);
796 -
797 - /* If we are coming out of a syscall then we need
798 - * to check if the syscall was interrupted and wants to be
799 - * restarted after handling the signal. If so, the original
800 - * syscall number is put back into r11 and the PC rewound to
801 - * point at the l.sys instruction that resulted in the
802 - * original syscall. Syscall results other than the four
803 - * below mean that the syscall executed to completion and no
804 - * restart is necessary.
805 - */
806 - if (regs->orig_gpr11) {
807 - int restart = 0;
808 -
809 - switch (regs->gpr[11]) {
810 + unsigned long continue_addr = 0;
811 + unsigned long restart_addr = 0;
812 + unsigned long retval = 0;
813 + int restart = 0;
814 +
815 + if (syscall) {
816 + continue_addr = regs->pc;
817 + restart_addr = continue_addr - 4;
818 + retval = regs->gpr[11];
819 +
820 + /*
821 + * Setup syscall restart here so that a debugger will
822 + * see the already changed PC.
823 + */
824 + switch (retval) {
825 case -ERESTART_RESTARTBLOCK:
826 + restart = -2;
827 + /* Fall through */
828 case -ERESTARTNOHAND:
829 - /* Restart if there is no signal handler */
830 - restart = (signr <= 0);
831 - break;
832 case -ERESTARTSYS:
833 - /* Restart if there no signal handler or
834 - * SA_RESTART flag is set */
835 - restart = (signr <= 0 || (ka.sa.sa_flags & SA_RESTART));
836 - break;
837 case -ERESTARTNOINTR:
838 - /* Always restart */
839 - restart = 1;
840 + restart++;
841 + regs->gpr[11] = regs->orig_gpr11;
842 + regs->pc = restart_addr;
843 break;
844 }
845 + }
846
847 - if (restart) {
848 - if (regs->gpr[11] == -ERESTART_RESTARTBLOCK)
849 - regs->gpr[11] = __NR_restart_syscall;
850 - else
851 - regs->gpr[11] = regs->orig_gpr11;
852 - regs->pc -= 4;
853 - } else {
854 - regs->gpr[11] = -EINTR;
855 + /*
856 + * Get the signal to deliver. When running under ptrace, at this
857 + * point the debugger may change all our registers ...
858 + */
859 + signr = get_signal_to_deliver(&info, &ka, regs, NULL);
860 + /*
861 + * Depending on the signal settings we may need to revert the
862 + * decision to restart the system call. But skip this if a
863 + * debugger has chosen to restart at a different PC.
864 + */
865 + if (signr > 0) {
866 + if (unlikely(restart) && regs->pc == restart_addr) {
867 + if (retval == -ERESTARTNOHAND ||
868 + retval == -ERESTART_RESTARTBLOCK
869 + || (retval == -ERESTARTSYS
870 + && !(ka.sa.sa_flags & SA_RESTART))) {
871 + /* No automatic restart */
872 + regs->gpr[11] = -EINTR;
873 + regs->pc = continue_addr;
874 + }
875 }
876 - }
877
878 - if (signr <= 0) {
879 - /* no signal to deliver so we just put the saved sigmask
880 - * back */
881 - restore_saved_sigmask();
882 - } else { /* signr > 0 */
883 - /* Whee! Actually deliver the signal. */
884 handle_signal(signr, &info, &ka, regs);
885 + } else {
886 + /* no handler */
887 + restore_saved_sigmask();
888 + /*
889 + * Restore pt_regs PC as syscall restart will be handled by
890 + * kernel without return to userspace
891 + */
892 + if (unlikely(restart) && regs->pc == restart_addr) {
893 + regs->pc = continue_addr;
894 + return restart;
895 + }
896 }
897
898 - return;
899 + return 0;
900 }
901
902 -asmlinkage void do_notify_resume(struct pt_regs *regs)
903 +asmlinkage int
904 +do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
905 {
906 - if (current_thread_info()->flags & _TIF_SIGPENDING)
907 - do_signal(regs);
908 -
909 - if (current_thread_info()->flags & _TIF_NOTIFY_RESUME) {
910 - clear_thread_flag(TIF_NOTIFY_RESUME);
911 - tracehook_notify_resume(regs);
912 - }
913 + do {
914 + if (likely(thread_flags & _TIF_NEED_RESCHED)) {
915 + schedule();
916 + } else {
917 + if (unlikely(!user_mode(regs)))
918 + return 0;
919 + local_irq_enable();
920 + if (thread_flags & _TIF_SIGPENDING) {
921 + int restart = do_signal(regs, syscall);
922 + if (unlikely(restart)) {
923 + /*
924 + * Restart without handlers.
925 + * Deal with it without leaving
926 + * the kernel space.
927 + */
928 + return restart;
929 + }
930 + syscall = 0;
931 + } else {
932 + clear_thread_flag(TIF_NOTIFY_RESUME);
933 + tracehook_notify_resume(regs);
934 + }
935 + }
936 + local_irq_disable();
937 + thread_flags = current_thread_info()->flags;
938 + } while (thread_flags & _TIF_WORK_MASK);
939 + return 0;
940 }
941 diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
942 index d836d945068d..063fcadd1a00 100644
943 --- a/arch/powerpc/include/asm/pte-hash64-64k.h
944 +++ b/arch/powerpc/include/asm/pte-hash64-64k.h
945 @@ -40,17 +40,39 @@
946
947 #ifndef __ASSEMBLY__
948
949 +#include <asm/barrier.h> /* for smp_rmb() */
950 +
951 /*
952 * With 64K pages on hash table, we have a special PTE format that
953 * uses a second "half" of the page table to encode sub-page information
954 * in order to deal with 64K made of 4K HW pages. Thus we override the
955 * generic accessors and iterators here
956 */
957 -#define __real_pte(e,p) ((real_pte_t) { \
958 - (e), (pte_val(e) & _PAGE_COMBO) ? \
959 - (pte_val(*((p) + PTRS_PER_PTE))) : 0 })
960 -#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
961 - (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
962 +#define __real_pte __real_pte
963 +static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
964 +{
965 + real_pte_t rpte;
966 +
967 + rpte.pte = pte;
968 + rpte.hidx = 0;
969 + if (pte_val(pte) & _PAGE_COMBO) {
970 + /*
971 + * Make sure we order the hidx load against the _PAGE_COMBO
972 + * check. The store side ordering is done in __hash_page_4K
973 + */
974 + smp_rmb();
975 + rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE));
976 + }
977 + return rpte;
978 +}
979 +
980 +static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
981 +{
982 + if ((pte_val(rpte.pte) & _PAGE_COMBO))
983 + return (rpte.hidx >> (index<<2)) & 0xf;
984 + return (pte_val(rpte.pte) >> 12) & 0xf;
985 +}
986 +
987 #define __rpte_to_pte(r) ((r).pte)
988 #define __rpte_sub_valid(rpte, index) \
989 (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
990 diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
991 index b7293bba0062..08c6f3185d45 100644
992 --- a/arch/powerpc/mm/numa.c
993 +++ b/arch/powerpc/mm/numa.c
994 @@ -586,8 +586,8 @@ static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
995 case CPU_UP_CANCELED:
996 case CPU_UP_CANCELED_FROZEN:
997 unmap_cpu_from_node(lcpu);
998 - break;
999 ret = NOTIFY_OK;
1000 + break;
1001 #endif
1002 }
1003 return ret;
1004 diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
1005 index 9a432de363b8..bebe64ed5dc3 100644
1006 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c
1007 +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
1008 @@ -158,7 +158,7 @@ static int pseries_remove_memory(struct device_node *np)
1009 static inline int pseries_remove_memblock(unsigned long base,
1010 unsigned int memblock_size)
1011 {
1012 - return -EOPNOTSUPP;
1013 + return 0;
1014 }
1015 static inline int pseries_remove_memory(struct device_node *np)
1016 {
1017 diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
1018 index b95219d2168d..1ff8e97f853a 100644
1019 --- a/block/blk-cgroup.c
1020 +++ b/block/blk-cgroup.c
1021 @@ -883,6 +883,13 @@ void blkcg_drain_queue(struct request_queue *q)
1022 if (!q->root_blkg)
1023 return;
1024
1025 + /*
1026 + * @q could be exiting and already have destroyed all blkgs as
1027 + * indicated by NULL root_blkg. If so, don't confuse policies.
1028 + */
1029 + if (!q->root_blkg)
1030 + return;
1031 +
1032 blk_throtl_drain(q);
1033 }
1034
1035 diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
1036 index e4c9291fc0a3..a63a4cdd2ce8 100644
1037 --- a/drivers/acpi/acpica/utcopy.c
1038 +++ b/drivers/acpi/acpica/utcopy.c
1039 @@ -998,5 +998,11 @@ acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc,
1040 status = acpi_ut_copy_simple_object(source_desc, *dest_desc);
1041 }
1042
1043 + /* Delete the allocated object if copy failed */
1044 +
1045 + if (ACPI_FAILURE(status)) {
1046 + acpi_ut_remove_reference(*dest_desc);
1047 + }
1048 +
1049 return_ACPI_STATUS(status);
1050 }
1051 diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
1052 index 4056d3175178..a88894190e41 100644
1053 --- a/drivers/acpi/processor_idle.c
1054 +++ b/drivers/acpi/processor_idle.c
1055 @@ -1101,9 +1101,9 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1056
1057 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1058
1059 - cpuidle_pause_and_lock();
1060 /* Protect against cpu-hotplug */
1061 get_online_cpus();
1062 + cpuidle_pause_and_lock();
1063
1064 /* Disable all cpuidle devices */
1065 for_each_online_cpu(cpu) {
1066 @@ -1130,8 +1130,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1067 cpuidle_enable_device(dev);
1068 }
1069 }
1070 - put_online_cpus();
1071 cpuidle_resume_and_unlock();
1072 + put_online_cpus();
1073 }
1074
1075 return 0;
1076 diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
1077 index cca761e80d89..091682fb1617 100644
1078 --- a/drivers/acpi/scan.c
1079 +++ b/drivers/acpi/scan.c
1080 @@ -769,12 +769,17 @@ static void acpi_device_notify(acpi_handle handle, u32 event, void *data)
1081 device->driver->ops.notify(device, event);
1082 }
1083
1084 -static acpi_status acpi_device_notify_fixed(void *data)
1085 +static void acpi_device_notify_fixed(void *data)
1086 {
1087 struct acpi_device *device = data;
1088
1089 /* Fixed hardware devices have no handles */
1090 acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
1091 +}
1092 +
1093 +static acpi_status acpi_device_fixed_event(void *data)
1094 +{
1095 + acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data);
1096 return AE_OK;
1097 }
1098
1099 @@ -785,12 +790,12 @@ static int acpi_device_install_notify_handler(struct acpi_device *device)
1100 if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
1101 status =
1102 acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
1103 - acpi_device_notify_fixed,
1104 + acpi_device_fixed_event,
1105 device);
1106 else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
1107 status =
1108 acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
1109 - acpi_device_notify_fixed,
1110 + acpi_device_fixed_event,
1111 device);
1112 else
1113 status = acpi_install_notify_handler(device->handle,
1114 @@ -807,10 +812,10 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device)
1115 {
1116 if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
1117 acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
1118 - acpi_device_notify_fixed);
1119 + acpi_device_fixed_event);
1120 else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
1121 acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
1122 - acpi_device_notify_fixed);
1123 + acpi_device_fixed_event);
1124 else
1125 acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
1126 acpi_device_notify);
1127 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
1128 index 7c3b3dcbfbc8..f659a571ad23 100644
1129 --- a/drivers/char/tpm/tpm.c
1130 +++ b/drivers/char/tpm/tpm.c
1131 @@ -533,11 +533,10 @@ static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
1132 int tpm_get_timeouts(struct tpm_chip *chip)
1133 {
1134 struct tpm_cmd_t tpm_cmd;
1135 - struct timeout_t *timeout_cap;
1136 + unsigned long new_timeout[4];
1137 + unsigned long old_timeout[4];
1138 struct duration_t *duration_cap;
1139 ssize_t rc;
1140 - u32 timeout;
1141 - unsigned int scale = 1;
1142
1143 tpm_cmd.header.in = tpm_getcap_header;
1144 tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
1145 @@ -571,25 +570,46 @@ int tpm_get_timeouts(struct tpm_chip *chip)
1146 != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
1147 return -EINVAL;
1148
1149 - timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
1150 - /* Don't overwrite default if value is 0 */
1151 - timeout = be32_to_cpu(timeout_cap->a);
1152 - if (timeout && timeout < 1000) {
1153 - /* timeouts in msec rather usec */
1154 - scale = 1000;
1155 - chip->vendor.timeout_adjusted = true;
1156 + old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
1157 + old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
1158 + old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
1159 + old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
1160 + memcpy(new_timeout, old_timeout, sizeof(new_timeout));
1161 +
1162 + /*
1163 + * Provide ability for vendor overrides of timeout values in case
1164 + * of misreporting.
1165 + */
1166 + if (chip->vendor.update_timeouts != NULL)
1167 + chip->vendor.timeout_adjusted =
1168 + chip->vendor.update_timeouts(chip, new_timeout);
1169 +
1170 + if (!chip->vendor.timeout_adjusted) {
1171 + /* Don't overwrite default if value is 0 */
1172 + if (new_timeout[0] != 0 && new_timeout[0] < 1000) {
1173 + int i;
1174 +
1175 + /* timeouts in msec rather usec */
1176 + for (i = 0; i != ARRAY_SIZE(new_timeout); i++)
1177 + new_timeout[i] *= 1000;
1178 + chip->vendor.timeout_adjusted = true;
1179 + }
1180 }
1181 - if (timeout)
1182 - chip->vendor.timeout_a = usecs_to_jiffies(timeout * scale);
1183 - timeout = be32_to_cpu(timeout_cap->b);
1184 - if (timeout)
1185 - chip->vendor.timeout_b = usecs_to_jiffies(timeout * scale);
1186 - timeout = be32_to_cpu(timeout_cap->c);
1187 - if (timeout)
1188 - chip->vendor.timeout_c = usecs_to_jiffies(timeout * scale);
1189 - timeout = be32_to_cpu(timeout_cap->d);
1190 - if (timeout)
1191 - chip->vendor.timeout_d = usecs_to_jiffies(timeout * scale);
1192 +
1193 + /* Report adjusted timeouts */
1194 + if (chip->vendor.timeout_adjusted) {
1195 + dev_info(chip->dev,
1196 + HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
1197 + old_timeout[0], new_timeout[0],
1198 + old_timeout[1], new_timeout[1],
1199 + old_timeout[2], new_timeout[2],
1200 + old_timeout[3], new_timeout[3]);
1201 + }
1202 +
1203 + chip->vendor.timeout_a = usecs_to_jiffies(new_timeout[0]);
1204 + chip->vendor.timeout_b = usecs_to_jiffies(new_timeout[1]);
1205 + chip->vendor.timeout_c = usecs_to_jiffies(new_timeout[2]);
1206 + chip->vendor.timeout_d = usecs_to_jiffies(new_timeout[3]);
1207
1208 duration:
1209 tpm_cmd.header.in = tpm_getcap_header;
1210 @@ -1423,13 +1443,13 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
1211 int err, total = 0, retries = 5;
1212 u8 *dest = out;
1213
1214 + if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
1215 + return -EINVAL;
1216 +
1217 chip = tpm_chip_find_get(chip_num);
1218 if (chip == NULL)
1219 return -ENODEV;
1220
1221 - if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
1222 - return -EINVAL;
1223 -
1224 do {
1225 tpm_cmd.header.in = tpm_getrandom_header;
1226 tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes);
1227 @@ -1448,6 +1468,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
1228 num_bytes -= recd;
1229 } while (retries-- && total < max);
1230
1231 + tpm_chip_put(chip);
1232 return total ? total : -EIO;
1233 }
1234 EXPORT_SYMBOL_GPL(tpm_get_random);
1235 diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
1236 index 0770d1d79366..deffda7678a0 100644
1237 --- a/drivers/char/tpm/tpm.h
1238 +++ b/drivers/char/tpm/tpm.h
1239 @@ -95,6 +95,9 @@ struct tpm_vendor_specific {
1240 int (*send) (struct tpm_chip *, u8 *, size_t);
1241 void (*cancel) (struct tpm_chip *);
1242 u8 (*status) (struct tpm_chip *);
1243 + bool (*update_timeouts)(struct tpm_chip *chip,
1244 + unsigned long *timeout_cap);
1245 +
1246 void (*release) (struct device *);
1247 struct miscdevice miscdev;
1248 struct attribute_group *attr_group;
1249 diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
1250 index 8a41b6be23a0..72f21377fa02 100644
1251 --- a/drivers/char/tpm/tpm_tis.c
1252 +++ b/drivers/char/tpm/tpm_tis.c
1253 @@ -373,6 +373,36 @@ out_err:
1254 return rc;
1255 }
1256
1257 +struct tis_vendor_timeout_override {
1258 + u32 did_vid;
1259 + unsigned long timeout_us[4];
1260 +};
1261 +
1262 +static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = {
1263 + /* Atmel 3204 */
1264 + { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000),
1265 + (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } },
1266 +};
1267 +
1268 +static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
1269 + unsigned long *timeout_cap)
1270 +{
1271 + int i;
1272 + u32 did_vid;
1273 +
1274 + did_vid = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
1275 +
1276 + for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
1277 + if (vendor_timeout_overrides[i].did_vid != did_vid)
1278 + continue;
1279 + memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
1280 + sizeof(vendor_timeout_overrides[i].timeout_us));
1281 + return true;
1282 + }
1283 +
1284 + return false;
1285 +}
1286 +
1287 /*
1288 * Early probing for iTPM with STS_DATA_EXPECT flaw.
1289 * Try sending command without itpm flag set and if that
1290 @@ -475,6 +505,7 @@ static struct tpm_vendor_specific tpm_tis = {
1291 .recv = tpm_tis_recv,
1292 .send = tpm_tis_send,
1293 .cancel = tpm_tis_ready,
1294 + .update_timeouts = tpm_tis_update_timeouts,
1295 .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
1296 .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
1297 .req_canceled = tpm_tis_req_canceled,
1298 diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
1299 index 391c67b182d9..7dbc319e1cf5 100644
1300 --- a/drivers/firmware/efi/vars.c
1301 +++ b/drivers/firmware/efi/vars.c
1302 @@ -481,7 +481,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_remove);
1303 */
1304 static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
1305 {
1306 - WARN_ON(!spin_is_locked(&__efivars->lock));
1307 + lockdep_assert_held(&__efivars->lock);
1308
1309 list_del(&entry->list);
1310 spin_unlock_irq(&__efivars->lock);
1311 @@ -507,7 +507,7 @@ int __efivar_entry_delete(struct efivar_entry *entry)
1312 const struct efivar_operations *ops = __efivars->ops;
1313 efi_status_t status;
1314
1315 - WARN_ON(!spin_is_locked(&__efivars->lock));
1316 + lockdep_assert_held(&__efivars->lock);
1317
1318 status = ops->set_variable(entry->var.VariableName,
1319 &entry->var.VendorGuid,
1320 @@ -667,7 +667,7 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
1321 int strsize1, strsize2;
1322 bool found = false;
1323
1324 - WARN_ON(!spin_is_locked(&__efivars->lock));
1325 + lockdep_assert_held(&__efivars->lock);
1326
1327 list_for_each_entry_safe(entry, n, head, list) {
1328 strsize1 = ucs2_strsize(name, 1024);
1329 @@ -731,7 +731,7 @@ int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
1330 const struct efivar_operations *ops = __efivars->ops;
1331 efi_status_t status;
1332
1333 - WARN_ON(!spin_is_locked(&__efivars->lock));
1334 + lockdep_assert_held(&__efivars->lock);
1335
1336 status = ops->get_variable(entry->var.VariableName,
1337 &entry->var.VendorGuid,
1338 diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
1339 index c47c2034ca71..4293e89bbbdd 100644
1340 --- a/drivers/infiniband/core/iwcm.c
1341 +++ b/drivers/infiniband/core/iwcm.c
1342 @@ -46,6 +46,7 @@
1343 #include <linux/completion.h>
1344 #include <linux/slab.h>
1345 #include <linux/module.h>
1346 +#include <linux/sysctl.h>
1347
1348 #include <rdma/iw_cm.h>
1349 #include <rdma/ib_addr.h>
1350 @@ -65,6 +66,20 @@ struct iwcm_work {
1351 struct list_head free_list;
1352 };
1353
1354 +static unsigned int default_backlog = 256;
1355 +
1356 +static struct ctl_table_header *iwcm_ctl_table_hdr;
1357 +static struct ctl_table iwcm_ctl_table[] = {
1358 + {
1359 + .procname = "default_backlog",
1360 + .data = &default_backlog,
1361 + .maxlen = sizeof(default_backlog),
1362 + .mode = 0644,
1363 + .proc_handler = proc_dointvec,
1364 + },
1365 + { }
1366 +};
1367 +
1368 /*
1369 * The following services provide a mechanism for pre-allocating iwcm_work
1370 * elements. The design pre-allocates them based on the cm_id type:
1371 @@ -419,6 +434,9 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
1372
1373 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
1374
1375 + if (!backlog)
1376 + backlog = default_backlog;
1377 +
1378 ret = alloc_work_entries(cm_id_priv, backlog);
1379 if (ret)
1380 return ret;
1381 @@ -1024,11 +1042,20 @@ static int __init iw_cm_init(void)
1382 if (!iwcm_wq)
1383 return -ENOMEM;
1384
1385 + iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
1386 + iwcm_ctl_table);
1387 + if (!iwcm_ctl_table_hdr) {
1388 + pr_err("iw_cm: couldn't register sysctl paths\n");
1389 + destroy_workqueue(iwcm_wq);
1390 + return -ENOMEM;
1391 + }
1392 +
1393 return 0;
1394 }
1395
1396 static void __exit iw_cm_cleanup(void)
1397 {
1398 + unregister_net_sysctl_table(iwcm_ctl_table_hdr);
1399 destroy_workqueue(iwcm_wq);
1400 }
1401
1402 diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
1403 index 1954daac0b59..35dd5ff662f1 100644
1404 --- a/drivers/infiniband/ulp/srp/ib_srp.c
1405 +++ b/drivers/infiniband/ulp/srp/ib_srp.c
1406 @@ -93,6 +93,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
1407 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
1408
1409 static struct scsi_transport_template *ib_srp_transport_template;
1410 +static struct workqueue_struct *srp_remove_wq;
1411
1412 static struct ib_client srp_client = {
1413 .name = "srp",
1414 @@ -456,7 +457,7 @@ static bool srp_queue_remove_work(struct srp_target_port *target)
1415 spin_unlock_irq(&target->lock);
1416
1417 if (changed)
1418 - queue_work(system_long_wq, &target->remove_work);
1419 + queue_work(srp_remove_wq, &target->remove_work);
1420
1421 return changed;
1422 }
1423 @@ -2530,9 +2531,10 @@ static void srp_remove_one(struct ib_device *device)
1424 spin_unlock(&host->target_lock);
1425
1426 /*
1427 - * Wait for target port removal tasks.
1428 + * Wait for tl_err and target port removal tasks.
1429 */
1430 flush_workqueue(system_long_wq);
1431 + flush_workqueue(srp_remove_wq);
1432
1433 kfree(host);
1434 }
1435 @@ -2577,16 +2579,22 @@ static int __init srp_init_module(void)
1436 indirect_sg_entries = cmd_sg_entries;
1437 }
1438
1439 + srp_remove_wq = create_workqueue("srp_remove");
1440 + if (IS_ERR(srp_remove_wq)) {
1441 + ret = PTR_ERR(srp_remove_wq);
1442 + goto out;
1443 + }
1444 +
1445 + ret = -ENOMEM;
1446 ib_srp_transport_template =
1447 srp_attach_transport(&ib_srp_transport_functions);
1448 if (!ib_srp_transport_template)
1449 - return -ENOMEM;
1450 + goto destroy_wq;
1451
1452 ret = class_register(&srp_class);
1453 if (ret) {
1454 pr_err("couldn't register class infiniband_srp\n");
1455 - srp_release_transport(ib_srp_transport_template);
1456 - return ret;
1457 + goto release_tr;
1458 }
1459
1460 ib_sa_register_client(&srp_sa_client);
1461 @@ -2594,13 +2602,22 @@ static int __init srp_init_module(void)
1462 ret = ib_register_client(&srp_client);
1463 if (ret) {
1464 pr_err("couldn't register IB client\n");
1465 - srp_release_transport(ib_srp_transport_template);
1466 - ib_sa_unregister_client(&srp_sa_client);
1467 - class_unregister(&srp_class);
1468 - return ret;
1469 + goto unreg_sa;
1470 }
1471
1472 - return 0;
1473 +out:
1474 + return ret;
1475 +
1476 +unreg_sa:
1477 + ib_sa_unregister_client(&srp_sa_client);
1478 + class_unregister(&srp_class);
1479 +
1480 +release_tr:
1481 + srp_release_transport(ib_srp_transport_template);
1482 +
1483 +destroy_wq:
1484 + destroy_workqueue(srp_remove_wq);
1485 + goto out;
1486 }
1487
1488 static void __exit srp_cleanup_module(void)
1489 @@ -2609,6 +2626,7 @@ static void __exit srp_cleanup_module(void)
1490 ib_sa_unregister_client(&srp_sa_client);
1491 class_unregister(&srp_class);
1492 srp_release_transport(ib_srp_transport_template);
1493 + destroy_workqueue(srp_remove_wq);
1494 }
1495
1496 module_init(srp_init_module);
1497 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
1498 index 6f849cbcac6f..dfb401cba733 100644
1499 --- a/drivers/iommu/amd_iommu.c
1500 +++ b/drivers/iommu/amd_iommu.c
1501 @@ -3187,14 +3187,16 @@ free_domains:
1502
1503 static void cleanup_domain(struct protection_domain *domain)
1504 {
1505 - struct iommu_dev_data *dev_data, *next;
1506 + struct iommu_dev_data *entry;
1507 unsigned long flags;
1508
1509 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1510
1511 - list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
1512 - __detach_device(dev_data);
1513 - atomic_set(&dev_data->bind, 0);
1514 + while (!list_empty(&domain->dev_list)) {
1515 + entry = list_first_entry(&domain->dev_list,
1516 + struct iommu_dev_data, list);
1517 + __detach_device(entry);
1518 + atomic_set(&entry->bind, 0);
1519 }
1520
1521 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1522 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
1523 index 75771b2077c0..a176791509f6 100644
1524 --- a/drivers/md/raid1.c
1525 +++ b/drivers/md/raid1.c
1526 @@ -1406,12 +1406,12 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
1527 mddev->degraded++;
1528 set_bit(Faulty, &rdev->flags);
1529 spin_unlock_irqrestore(&conf->device_lock, flags);
1530 - /*
1531 - * if recovery is running, make sure it aborts.
1532 - */
1533 - set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1534 } else
1535 set_bit(Faulty, &rdev->flags);
1536 + /*
1537 + * if recovery is running, make sure it aborts.
1538 + */
1539 + set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1540 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1541 printk(KERN_ALERT
1542 "md/raid1:%s: Disk failure on %s, disabling device.\n"
1543 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
1544 index d2f8cd332b4a..a1ea2a753912 100644
1545 --- a/drivers/md/raid10.c
1546 +++ b/drivers/md/raid10.c
1547 @@ -1681,11 +1681,11 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
1548 spin_lock_irqsave(&conf->device_lock, flags);
1549 mddev->degraded++;
1550 spin_unlock_irqrestore(&conf->device_lock, flags);
1551 - /*
1552 - * if recovery is running, make sure it aborts.
1553 - */
1554 - set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1555 }
1556 + /*
1557 + * If recovery is running, make sure it aborts.
1558 + */
1559 + set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1560 set_bit(Blocked, &rdev->flags);
1561 set_bit(Faulty, &rdev->flags);
1562 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1563 @@ -2948,6 +2948,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
1564 */
1565 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
1566 end_reshape(conf);
1567 + close_sync(conf);
1568 return 0;
1569 }
1570
1571 @@ -4398,7 +4399,7 @@ read_more:
1572 read_bio->bi_private = r10_bio;
1573 read_bio->bi_end_io = end_sync_read;
1574 read_bio->bi_rw = READ;
1575 - read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
1576 + read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
1577 read_bio->bi_flags |= 1 << BIO_UPTODATE;
1578 read_bio->bi_vcnt = 0;
1579 read_bio->bi_size = 0;
1580 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1581 index 5e3c25d4562c..774f81423d78 100644
1582 --- a/drivers/md/raid5.c
1583 +++ b/drivers/md/raid5.c
1584 @@ -3561,6 +3561,8 @@ static void handle_stripe(struct stripe_head *sh)
1585 set_bit(R5_Wantwrite, &dev->flags);
1586 if (prexor)
1587 continue;
1588 + if (s.failed > 1)
1589 + continue;
1590 if (!test_bit(R5_Insync, &dev->flags) ||
1591 ((i == sh->pd_idx || i == sh->qd_idx) &&
1592 s.failed == 0))
1593 diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
1594 index 79715f9feb0a..fdb5840f034b 100644
1595 --- a/drivers/media/media-device.c
1596 +++ b/drivers/media/media-device.c
1597 @@ -106,8 +106,6 @@ static long media_device_enum_entities(struct media_device *mdev,
1598 if (ent->name) {
1599 strncpy(u_ent.name, ent->name, sizeof(u_ent.name));
1600 u_ent.name[sizeof(u_ent.name) - 1] = '\0';
1601 - } else {
1602 - memset(u_ent.name, 0, sizeof(u_ent.name));
1603 }
1604 u_ent.type = ent->type;
1605 u_ent.revision = ent->revision;
1606 diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
1607 index 2018befabb5a..e71decbfd0af 100644
1608 --- a/drivers/media/tuners/xc4000.c
1609 +++ b/drivers/media/tuners/xc4000.c
1610 @@ -93,7 +93,7 @@ struct xc4000_priv {
1611 struct firmware_description *firm;
1612 int firm_size;
1613 u32 if_khz;
1614 - u32 freq_hz;
1615 + u32 freq_hz, freq_offset;
1616 u32 bandwidth;
1617 u8 video_standard;
1618 u8 rf_mode;
1619 @@ -1157,14 +1157,14 @@ static int xc4000_set_params(struct dvb_frontend *fe)
1620 case SYS_ATSC:
1621 dprintk(1, "%s() VSB modulation\n", __func__);
1622 priv->rf_mode = XC_RF_MODE_AIR;
1623 - priv->freq_hz = c->frequency - 1750000;
1624 + priv->freq_offset = 1750000;
1625 priv->video_standard = XC4000_DTV6;
1626 type = DTV6;
1627 break;
1628 case SYS_DVBC_ANNEX_B:
1629 dprintk(1, "%s() QAM modulation\n", __func__);
1630 priv->rf_mode = XC_RF_MODE_CABLE;
1631 - priv->freq_hz = c->frequency - 1750000;
1632 + priv->freq_offset = 1750000;
1633 priv->video_standard = XC4000_DTV6;
1634 type = DTV6;
1635 break;
1636 @@ -1173,23 +1173,23 @@ static int xc4000_set_params(struct dvb_frontend *fe)
1637 dprintk(1, "%s() OFDM\n", __func__);
1638 if (bw == 0) {
1639 if (c->frequency < 400000000) {
1640 - priv->freq_hz = c->frequency - 2250000;
1641 + priv->freq_offset = 2250000;
1642 } else {
1643 - priv->freq_hz = c->frequency - 2750000;
1644 + priv->freq_offset = 2750000;
1645 }
1646 priv->video_standard = XC4000_DTV7_8;
1647 type = DTV78;
1648 } else if (bw <= 6000000) {
1649 priv->video_standard = XC4000_DTV6;
1650 - priv->freq_hz = c->frequency - 1750000;
1651 + priv->freq_offset = 1750000;
1652 type = DTV6;
1653 } else if (bw <= 7000000) {
1654 priv->video_standard = XC4000_DTV7;
1655 - priv->freq_hz = c->frequency - 2250000;
1656 + priv->freq_offset = 2250000;
1657 type = DTV7;
1658 } else {
1659 priv->video_standard = XC4000_DTV8;
1660 - priv->freq_hz = c->frequency - 2750000;
1661 + priv->freq_offset = 2750000;
1662 type = DTV8;
1663 }
1664 priv->rf_mode = XC_RF_MODE_AIR;
1665 @@ -1200,6 +1200,8 @@ static int xc4000_set_params(struct dvb_frontend *fe)
1666 goto fail;
1667 }
1668
1669 + priv->freq_hz = c->frequency - priv->freq_offset;
1670 +
1671 dprintk(1, "%s() frequency=%d (compensated)\n",
1672 __func__, priv->freq_hz);
1673
1674 @@ -1520,7 +1522,7 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
1675 {
1676 struct xc4000_priv *priv = fe->tuner_priv;
1677
1678 - *freq = priv->freq_hz;
1679 + *freq = priv->freq_hz + priv->freq_offset;
1680
1681 if (debug) {
1682 mutex_lock(&priv->lock);
1683 diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
1684 index 5cd09a681b6a..b2d9e9cb97f7 100644
1685 --- a/drivers/media/tuners/xc5000.c
1686 +++ b/drivers/media/tuners/xc5000.c
1687 @@ -55,7 +55,7 @@ struct xc5000_priv {
1688
1689 u32 if_khz;
1690 u16 xtal_khz;
1691 - u32 freq_hz;
1692 + u32 freq_hz, freq_offset;
1693 u32 bandwidth;
1694 u8 video_standard;
1695 u8 rf_mode;
1696 @@ -755,13 +755,13 @@ static int xc5000_set_params(struct dvb_frontend *fe)
1697 case SYS_ATSC:
1698 dprintk(1, "%s() VSB modulation\n", __func__);
1699 priv->rf_mode = XC_RF_MODE_AIR;
1700 - priv->freq_hz = freq - 1750000;
1701 + priv->freq_offset = 1750000;
1702 priv->video_standard = DTV6;
1703 break;
1704 case SYS_DVBC_ANNEX_B:
1705 dprintk(1, "%s() QAM modulation\n", __func__);
1706 priv->rf_mode = XC_RF_MODE_CABLE;
1707 - priv->freq_hz = freq - 1750000;
1708 + priv->freq_offset = 1750000;
1709 priv->video_standard = DTV6;
1710 break;
1711 case SYS_ISDBT:
1712 @@ -776,15 +776,15 @@ static int xc5000_set_params(struct dvb_frontend *fe)
1713 switch (bw) {
1714 case 6000000:
1715 priv->video_standard = DTV6;
1716 - priv->freq_hz = freq - 1750000;
1717 + priv->freq_offset = 1750000;
1718 break;
1719 case 7000000:
1720 priv->video_standard = DTV7;
1721 - priv->freq_hz = freq - 2250000;
1722 + priv->freq_offset = 2250000;
1723 break;
1724 case 8000000:
1725 priv->video_standard = DTV8;
1726 - priv->freq_hz = freq - 2750000;
1727 + priv->freq_offset = 2750000;
1728 break;
1729 default:
1730 printk(KERN_ERR "xc5000 bandwidth not set!\n");
1731 @@ -798,15 +798,15 @@ static int xc5000_set_params(struct dvb_frontend *fe)
1732 priv->rf_mode = XC_RF_MODE_CABLE;
1733 if (bw <= 6000000) {
1734 priv->video_standard = DTV6;
1735 - priv->freq_hz = freq - 1750000;
1736 + priv->freq_offset = 1750000;
1737 b = 6;
1738 } else if (bw <= 7000000) {
1739 priv->video_standard = DTV7;
1740 - priv->freq_hz = freq - 2250000;
1741 + priv->freq_offset = 2250000;
1742 b = 7;
1743 } else {
1744 priv->video_standard = DTV7_8;
1745 - priv->freq_hz = freq - 2750000;
1746 + priv->freq_offset = 2750000;
1747 b = 8;
1748 }
1749 dprintk(1, "%s() Bandwidth %dMHz (%d)\n", __func__,
1750 @@ -817,6 +817,8 @@ static int xc5000_set_params(struct dvb_frontend *fe)
1751 return -EINVAL;
1752 }
1753
1754 + priv->freq_hz = freq - priv->freq_offset;
1755 +
1756 dprintk(1, "%s() frequency=%d (compensated to %d)\n",
1757 __func__, freq, priv->freq_hz);
1758
1759 @@ -1067,7 +1069,7 @@ static int xc5000_get_frequency(struct dvb_frontend *fe, u32 *freq)
1760 {
1761 struct xc5000_priv *priv = fe->tuner_priv;
1762 dprintk(1, "%s()\n", __func__);
1763 - *freq = priv->freq_hz;
1764 + *freq = priv->freq_hz + priv->freq_offset;
1765 return 0;
1766 }
1767
1768 diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
1769 index 75ac9947cdac..98e1b937b500 100644
1770 --- a/drivers/media/usb/au0828/au0828-video.c
1771 +++ b/drivers/media/usb/au0828/au0828-video.c
1772 @@ -788,11 +788,27 @@ static int au0828_i2s_init(struct au0828_dev *dev)
1773
1774 /*
1775 * Auvitek au0828 analog stream enable
1776 - * Please set interface0 to AS5 before enable the stream
1777 */
1778 static int au0828_analog_stream_enable(struct au0828_dev *d)
1779 {
1780 + struct usb_interface *iface;
1781 + int ret;
1782 +
1783 dprintk(1, "au0828_analog_stream_enable called\n");
1784 +
1785 + iface = usb_ifnum_to_if(d->usbdev, 0);
1786 + if (iface && iface->cur_altsetting->desc.bAlternateSetting != 5) {
1787 + dprintk(1, "Changing intf#0 to alt 5\n");
1788 + /* set au0828 interface0 to AS5 here again */
1789 + ret = usb_set_interface(d->usbdev, 0, 5);
1790 + if (ret < 0) {
1791 + printk(KERN_INFO "Au0828 can't set alt setting to 5!\n");
1792 + return -EBUSY;
1793 + }
1794 + }
1795 +
1796 + /* FIXME: size should be calculated using d->width, d->height */
1797 +
1798 au0828_writereg(d, AU0828_SENSORCTRL_VBI_103, 0x00);
1799 au0828_writereg(d, 0x106, 0x00);
1800 /* set x position */
1801 @@ -1003,15 +1019,6 @@ static int au0828_v4l2_open(struct file *filp)
1802 return -ERESTARTSYS;
1803 }
1804 if (dev->users == 0) {
1805 - /* set au0828 interface0 to AS5 here again */
1806 - ret = usb_set_interface(dev->usbdev, 0, 5);
1807 - if (ret < 0) {
1808 - mutex_unlock(&dev->lock);
1809 - printk(KERN_INFO "Au0828 can't set alternate to 5!\n");
1810 - kfree(fh);
1811 - return -EBUSY;
1812 - }
1813 -
1814 au0828_analog_stream_enable(dev);
1815 au0828_analog_stream_reset(dev);
1816
1817 @@ -1253,13 +1260,6 @@ static int au0828_set_format(struct au0828_dev *dev, unsigned int cmd,
1818 }
1819 }
1820
1821 - /* set au0828 interface0 to AS5 here again */
1822 - ret = usb_set_interface(dev->usbdev, 0, 5);
1823 - if (ret < 0) {
1824 - printk(KERN_INFO "Au0828 can't set alt setting to 5!\n");
1825 - return -EBUSY;
1826 - }
1827 -
1828 au0828_analog_stream_enable(dev);
1829
1830 return 0;
1831 diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
1832 index 759fae3ca7fb..a36f3f282ae7 100644
1833 --- a/drivers/mfd/omap-usb-host.c
1834 +++ b/drivers/mfd/omap-usb-host.c
1835 @@ -445,7 +445,7 @@ static unsigned omap_usbhs_rev1_hostconfig(struct usbhs_hcd_omap *omap,
1836
1837 for (i = 0; i < omap->nports; i++) {
1838 if (is_ehci_phy_mode(pdata->port_mode[i])) {
1839 - reg &= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
1840 + reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
1841 break;
1842 }
1843 }
1844 diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
1845 index 19d637266fcd..71e4f6ccae2f 100644
1846 --- a/drivers/mtd/ftl.c
1847 +++ b/drivers/mtd/ftl.c
1848 @@ -1075,7 +1075,6 @@ static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
1849 return;
1850 }
1851
1852 - ftl_freepart(partition);
1853 kfree(partition);
1854 }
1855
1856 diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
1857 index 8c4eb287bbdb..e9b1797cdb5f 100644
1858 --- a/drivers/mtd/nand/omap2.c
1859 +++ b/drivers/mtd/nand/omap2.c
1860 @@ -948,7 +948,7 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
1861 u32 val;
1862
1863 val = readl(info->reg.gpmc_ecc_config);
1864 - if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs)
1865 + if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs)
1866 return -EINVAL;
1867
1868 /* read ecc result */
1869 diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
1870 index 81d8681c3195..b1b35f38d11d 100644
1871 --- a/drivers/regulator/arizona-ldo1.c
1872 +++ b/drivers/regulator/arizona-ldo1.c
1873 @@ -141,8 +141,6 @@ static struct regulator_ops arizona_ldo1_ops = {
1874 .map_voltage = regulator_map_voltage_linear,
1875 .get_voltage_sel = regulator_get_voltage_sel_regmap,
1876 .set_voltage_sel = regulator_set_voltage_sel_regmap,
1877 - .get_bypass = regulator_get_bypass_regmap,
1878 - .set_bypass = regulator_set_bypass_regmap,
1879 };
1880
1881 static const struct regulator_desc arizona_ldo1 = {
1882 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
1883 index 23a90e7b7107..a119421cb324 100644
1884 --- a/drivers/scsi/bfa/bfa_ioc.h
1885 +++ b/drivers/scsi/bfa/bfa_ioc.h
1886 @@ -72,7 +72,7 @@ struct bfa_sge_s {
1887 } while (0)
1888
1889 #define bfa_swap_words(_x) ( \
1890 - ((_x) << 32) | ((_x) >> 32))
1891 + ((u64)(_x) << 32) | ((u64)(_x) >> 32))
1892
1893 #ifdef __BIG_ENDIAN
1894 #define bfa_sge_to_be(_x)
1895 diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
1896 index 91b76cea3e3c..87ca72d36d5b 100644
1897 --- a/drivers/scsi/storvsc_drv.c
1898 +++ b/drivers/scsi/storvsc_drv.c
1899 @@ -33,6 +33,7 @@
1900 #include <linux/device.h>
1901 #include <linux/hyperv.h>
1902 #include <linux/mempool.h>
1903 +#include <linux/blkdev.h>
1904 #include <scsi/scsi.h>
1905 #include <scsi/scsi_cmnd.h>
1906 #include <scsi/scsi_host.h>
1907 @@ -803,6 +804,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
1908 case ATA_12:
1909 set_host_byte(scmnd, DID_PASSTHROUGH);
1910 break;
1911 + /*
1912 + * On Some Windows hosts TEST_UNIT_READY command can return
1913 + * SRB_STATUS_ERROR, let the upper level code deal with it
1914 + * based on the sense information.
1915 + */
1916 + case TEST_UNIT_READY:
1917 + break;
1918 default:
1919 set_host_byte(scmnd, DID_TARGET_FAILURE);
1920 }
1921 @@ -1285,6 +1293,16 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
1922 return SUCCESS;
1923 }
1924
1925 +/*
1926 + * The host guarantees to respond to each command, although I/O latencies might
1927 + * be unbounded on Azure. Reset the timer unconditionally to give the host a
1928 + * chance to perform EH.
1929 + */
1930 +static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
1931 +{
1932 + return BLK_EH_RESET_TIMER;
1933 +}
1934 +
1935 static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
1936 {
1937 bool allowed = true;
1938 @@ -1444,6 +1462,7 @@ static struct scsi_host_template scsi_driver = {
1939 .bios_param = storvsc_get_chs,
1940 .queuecommand = storvsc_queuecommand,
1941 .eh_host_reset_handler = storvsc_host_reset_handler,
1942 + .eh_timed_out = storvsc_eh_timed_out,
1943 .slave_alloc = storvsc_device_alloc,
1944 .slave_destroy = storvsc_device_destroy,
1945 .slave_configure = storvsc_device_configure,
1946 diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
1947 index 86d2158946bb..798729eb6689 100644
1948 --- a/drivers/spi/spi-omap2-mcspi.c
1949 +++ b/drivers/spi/spi-omap2-mcspi.c
1950 @@ -136,6 +136,7 @@ struct omap2_mcspi_cs {
1951 void __iomem *base;
1952 unsigned long phys;
1953 int word_len;
1954 + u16 mode;
1955 struct list_head node;
1956 /* Context save and restore shadow register */
1957 u32 chconf0;
1958 @@ -801,6 +802,8 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
1959
1960 mcspi_write_chconf0(spi, l);
1961
1962 + cs->mode = spi->mode;
1963 +
1964 dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
1965 OMAP2_MCSPI_MAX_FREQ >> div,
1966 (spi->mode & SPI_CPHA) ? "trailing" : "leading",
1967 @@ -871,6 +874,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
1968 return -ENOMEM;
1969 cs->base = mcspi->base + spi->chip_select * 0x14;
1970 cs->phys = mcspi->phys + spi->chip_select * 0x14;
1971 + cs->mode = 0;
1972 cs->chconf0 = 0;
1973 spi->controller_state = cs;
1974 /* Link this to context save list */
1975 @@ -1043,6 +1047,16 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
1976 mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
1977 }
1978
1979 + /*
1980 + * The slave driver could have changed spi->mode in which case
1981 + * it will be different from cs->mode (the current hardware setup).
1982 + * If so, set par_override (even though its not a parity issue) so
1983 + * omap2_mcspi_setup_transfer will be called to configure the hardware
1984 + * with the correct mode on the first iteration of the loop below.
1985 + */
1986 + if (spi->mode != cs->mode)
1987 + par_override = 1;
1988 +
1989 omap2_mcspi_set_enable(spi, 0);
1990
1991 m->status = status;
1992 diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
1993 index 66a5f82cf138..183aa80c9017 100644
1994 --- a/drivers/spi/spi-orion.c
1995 +++ b/drivers/spi/spi-orion.c
1996 @@ -403,8 +403,6 @@ static int orion_spi_probe(struct platform_device *pdev)
1997 struct resource *r;
1998 unsigned long tclk_hz;
1999 int status = 0;
2000 - const u32 *iprop;
2001 - int size;
2002
2003 master = spi_alloc_master(&pdev->dev, sizeof *spi);
2004 if (master == NULL) {
2005 @@ -415,10 +413,10 @@ static int orion_spi_probe(struct platform_device *pdev)
2006 if (pdev->id != -1)
2007 master->bus_num = pdev->id;
2008 if (pdev->dev.of_node) {
2009 - iprop = of_get_property(pdev->dev.of_node, "cell-index",
2010 - &size);
2011 - if (iprop && size == sizeof(*iprop))
2012 - master->bus_num = *iprop;
2013 + u32 cell_index;
2014 + if (!of_property_read_u32(pdev->dev.of_node, "cell-index",
2015 + &cell_index))
2016 + master->bus_num = cell_index;
2017 }
2018
2019 /* we support only mode 0, and no options */
2020 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
2021 index e2c2d96491fa..52480240168e 100644
2022 --- a/fs/cifs/cifsglob.h
2023 +++ b/fs/cifs/cifsglob.h
2024 @@ -74,11 +74,6 @@
2025 #define SERVER_NAME_LENGTH 40
2026 #define SERVER_NAME_LEN_WITH_NULL (SERVER_NAME_LENGTH + 1)
2027
2028 -/* used to define string lengths for reversing unicode strings */
2029 -/* (256+1)*2 = 514 */
2030 -/* (max path length + 1 for null) * 2 for unicode */
2031 -#define MAX_NAME 514
2032 -
2033 /* SMB echo "timeout" -- FIXME: tunable? */
2034 #define SMB_ECHO_INTERVAL (60 * HZ)
2035
2036 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
2037 index 8b0c656f2ab2..97b03895ac8c 100644
2038 --- a/fs/cifs/file.c
2039 +++ b/fs/cifs/file.c
2040 @@ -2809,7 +2809,7 @@ cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2041 total_read += result;
2042 }
2043
2044 - return total_read > 0 ? total_read : result;
2045 + return total_read > 0 && result != -EAGAIN ? total_read : result;
2046 }
2047
2048 static ssize_t
2049 @@ -3232,7 +3232,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
2050 total_read += result;
2051 }
2052
2053 - return total_read > 0 ? total_read : result;
2054 + return total_read > 0 && result != -EAGAIN ? total_read : result;
2055 }
2056
2057 static int cifs_readpages(struct file *file, struct address_space *mapping,
2058 diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
2059 index 9d463501348f..c9bce9b43855 100644
2060 --- a/fs/cifs/inode.c
2061 +++ b/fs/cifs/inode.c
2062 @@ -1647,6 +1647,12 @@ unlink_target:
2063 target_dentry, to_name);
2064 }
2065
2066 + /* force revalidate to go get info when needed */
2067 + CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;
2068 +
2069 + source_dir->i_ctime = source_dir->i_mtime = target_dir->i_ctime =
2070 + target_dir->i_mtime = current_fs_time(source_dir->i_sb);
2071 +
2072 cifs_rename_exit:
2073 kfree(info_buf_source);
2074 kfree(from_name);
2075 diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
2076 index 036279c064ff..87d125f682cd 100644
2077 --- a/fs/cifs/readdir.c
2078 +++ b/fs/cifs/readdir.c
2079 @@ -585,8 +585,8 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon,
2080 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
2081 cfile->invalidHandle = true;
2082 spin_unlock(&cifs_file_list_lock);
2083 - if (server->ops->close)
2084 - server->ops->close(xid, tcon, &cfile->fid);
2085 + if (server->ops->close_dir)
2086 + server->ops->close_dir(xid, tcon, &cfile->fid);
2087 } else
2088 spin_unlock(&cifs_file_list_lock);
2089 if (cfile->srch_inf.ntwrk_buf_start) {
2090 diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
2091 index 5da1b55a2258..d801f63cddd0 100644
2092 --- a/fs/cifs/smb2file.c
2093 +++ b/fs/cifs/smb2file.c
2094 @@ -73,7 +73,7 @@ smb2_open_file(const unsigned int xid, struct cifs_tcon *tcon, const char *path,
2095 goto out;
2096 }
2097
2098 - smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
2099 + smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
2100 GFP_KERNEL);
2101 if (smb2_data == NULL) {
2102 rc = -ENOMEM;
2103 diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
2104 index fff6dfba6204..6d535797ec76 100644
2105 --- a/fs/cifs/smb2inode.c
2106 +++ b/fs/cifs/smb2inode.c
2107 @@ -123,7 +123,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
2108
2109 *adjust_tz = false;
2110
2111 - smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
2112 + smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
2113 GFP_KERNEL);
2114 if (smb2_data == NULL)
2115 return -ENOMEM;
2116 diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
2117 index 7c2f45c06fc2..824696fb24db 100644
2118 --- a/fs/cifs/smb2maperror.c
2119 +++ b/fs/cifs/smb2maperror.c
2120 @@ -605,7 +605,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
2121 {STATUS_MAPPED_FILE_SIZE_ZERO, -EIO, "STATUS_MAPPED_FILE_SIZE_ZERO"},
2122 {STATUS_TOO_MANY_OPENED_FILES, -EMFILE, "STATUS_TOO_MANY_OPENED_FILES"},
2123 {STATUS_CANCELLED, -EIO, "STATUS_CANCELLED"},
2124 - {STATUS_CANNOT_DELETE, -EIO, "STATUS_CANNOT_DELETE"},
2125 + {STATUS_CANNOT_DELETE, -EACCES, "STATUS_CANNOT_DELETE"},
2126 {STATUS_INVALID_COMPUTER_NAME, -EIO, "STATUS_INVALID_COMPUTER_NAME"},
2127 {STATUS_FILE_DELETED, -EIO, "STATUS_FILE_DELETED"},
2128 {STATUS_SPECIAL_ACCOUNT, -EIO, "STATUS_SPECIAL_ACCOUNT"},
2129 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2130 index e2756bb40b4d..fe7ac989c6c4 100644
2131 --- a/fs/cifs/smb2ops.c
2132 +++ b/fs/cifs/smb2ops.c
2133 @@ -243,7 +243,7 @@ smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
2134 int rc;
2135 struct smb2_file_all_info *smb2_data;
2136
2137 - smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
2138 + smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
2139 GFP_KERNEL);
2140 if (smb2_data == NULL)
2141 return -ENOMEM;
2142 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
2143 index c7a6fd87bb6e..e37790841446 100644
2144 --- a/fs/cifs/smb2pdu.c
2145 +++ b/fs/cifs/smb2pdu.c
2146 @@ -809,7 +809,8 @@ tcon_exit:
2147 tcon_error_exit:
2148 if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
2149 cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
2150 - tcon->bad_network_name = true;
2151 + if (tcon)
2152 + tcon->bad_network_name = true;
2153 }
2154 goto tcon_exit;
2155 }
2156 @@ -1203,7 +1204,7 @@ SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
2157 {
2158 return query_info(xid, tcon, persistent_fid, volatile_fid,
2159 FILE_ALL_INFORMATION,
2160 - sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
2161 + sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
2162 sizeof(struct smb2_file_all_info), data);
2163 }
2164
2165 diff --git a/fs/dcache.c b/fs/dcache.c
2166 index 9a59653d3449..25c0a1b5f6c0 100644
2167 --- a/fs/dcache.c
2168 +++ b/fs/dcache.c
2169 @@ -96,8 +96,6 @@ static struct kmem_cache *dentry_cache __read_mostly;
2170 * This hash-function tries to avoid losing too many bits of hash
2171 * information, yet avoid using a prime hash-size or similar.
2172 */
2173 -#define D_HASHBITS d_hash_shift
2174 -#define D_HASHMASK d_hash_mask
2175
2176 static unsigned int d_hash_mask __read_mostly;
2177 static unsigned int d_hash_shift __read_mostly;
2178 @@ -108,8 +106,7 @@ static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
2179 unsigned int hash)
2180 {
2181 hash += (unsigned long) parent / L1_CACHE_BYTES;
2182 - hash = hash + (hash >> D_HASHBITS);
2183 - return dentry_hashtable + (hash & D_HASHMASK);
2184 + return dentry_hashtable + hash_32(hash, d_hash_shift);
2185 }
2186
2187 /* Statistics gathering. */
2188 diff --git a/fs/namei.c b/fs/namei.c
2189 index 6ac16a37ded2..f7c4393f8535 100644
2190 --- a/fs/namei.c
2191 +++ b/fs/namei.c
2192 @@ -34,6 +34,7 @@
2193 #include <linux/device_cgroup.h>
2194 #include <linux/fs_struct.h>
2195 #include <linux/posix_acl.h>
2196 +#include <linux/hash.h>
2197 #include <asm/uaccess.h>
2198
2199 #include "internal.h"
2200 @@ -1647,8 +1648,7 @@ static inline int can_lookup(struct inode *inode)
2201
2202 static inline unsigned int fold_hash(unsigned long hash)
2203 {
2204 - hash += hash >> (8*sizeof(int));
2205 - return hash;
2206 + return hash_64(hash, 32);
2207 }
2208
2209 #else /* 32-bit case */
2210 diff --git a/fs/namespace.c b/fs/namespace.c
2211 index a45ba4f267fe..00409add4d96 100644
2212 --- a/fs/namespace.c
2213 +++ b/fs/namespace.c
2214 @@ -828,8 +828,21 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
2215
2216 mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
2217 /* Don't allow unprivileged users to change mount flags */
2218 - if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))
2219 - mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
2220 + if (flag & CL_UNPRIVILEGED) {
2221 + mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
2222 +
2223 + if (mnt->mnt.mnt_flags & MNT_READONLY)
2224 + mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
2225 +
2226 + if (mnt->mnt.mnt_flags & MNT_NODEV)
2227 + mnt->mnt.mnt_flags |= MNT_LOCK_NODEV;
2228 +
2229 + if (mnt->mnt.mnt_flags & MNT_NOSUID)
2230 + mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID;
2231 +
2232 + if (mnt->mnt.mnt_flags & MNT_NOEXEC)
2233 + mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC;
2234 + }
2235
2236 atomic_inc(&sb->s_active);
2237 mnt->mnt.mnt_sb = sb;
2238 @@ -1764,9 +1777,6 @@ static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
2239 if (readonly_request == __mnt_is_readonly(mnt))
2240 return 0;
2241
2242 - if (mnt->mnt_flags & MNT_LOCK_READONLY)
2243 - return -EPERM;
2244 -
2245 if (readonly_request)
2246 error = mnt_make_readonly(real_mount(mnt));
2247 else
2248 @@ -1792,6 +1802,33 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
2249 if (path->dentry != path->mnt->mnt_root)
2250 return -EINVAL;
2251
2252 + /* Don't allow changing of locked mnt flags.
2253 + *
2254 + * No locks need to be held here while testing the various
2255 + * MNT_LOCK flags because those flags can never be cleared
2256 + * once they are set.
2257 + */
2258 + if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
2259 + !(mnt_flags & MNT_READONLY)) {
2260 + return -EPERM;
2261 + }
2262 + if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
2263 + !(mnt_flags & MNT_NODEV)) {
2264 + return -EPERM;
2265 + }
2266 + if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
2267 + !(mnt_flags & MNT_NOSUID)) {
2268 + return -EPERM;
2269 + }
2270 + if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) &&
2271 + !(mnt_flags & MNT_NOEXEC)) {
2272 + return -EPERM;
2273 + }
2274 + if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) &&
2275 + ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) {
2276 + return -EPERM;
2277 + }
2278 +
2279 err = security_sb_remount(sb, data);
2280 if (err)
2281 return err;
2282 @@ -1805,7 +1842,7 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
2283 err = do_remount_sb(sb, flags, data, 0);
2284 if (!err) {
2285 br_write_lock(&vfsmount_lock);
2286 - mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
2287 + mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
2288 mnt->mnt.mnt_flags = mnt_flags;
2289 br_write_unlock(&vfsmount_lock);
2290 }
2291 @@ -1991,7 +2028,7 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
2292 */
2293 if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) {
2294 flags |= MS_NODEV;
2295 - mnt_flags |= MNT_NODEV;
2296 + mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
2297 }
2298 }
2299
2300 @@ -2309,6 +2346,14 @@ long do_mount(const char *dev_name, const char *dir_name,
2301 if (flags & MS_RDONLY)
2302 mnt_flags |= MNT_READONLY;
2303
2304 + /* The default atime for remount is preservation */
2305 + if ((flags & MS_REMOUNT) &&
2306 + ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
2307 + MS_STRICTATIME)) == 0)) {
2308 + mnt_flags &= ~MNT_ATIME_MASK;
2309 + mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK;
2310 + }
2311 +
2312 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
2313 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
2314 MS_STRICTATIME);
2315 diff --git a/fs/proc/array.c b/fs/proc/array.c
2316 index cbd0f1b324b9..09f0d9c374a3 100644
2317 --- a/fs/proc/array.c
2318 +++ b/fs/proc/array.c
2319 @@ -304,15 +304,11 @@ static void render_cap_t(struct seq_file *m, const char *header,
2320 seq_puts(m, header);
2321 CAP_FOR_EACH_U32(__capi) {
2322 seq_printf(m, "%08x",
2323 - a->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]);
2324 + a->cap[CAP_LAST_U32 - __capi]);
2325 }
2326 seq_putc(m, '\n');
2327 }
2328
2329 -/* Remove non-existent capabilities */
2330 -#define NORM_CAPS(v) (v.cap[CAP_TO_INDEX(CAP_LAST_CAP)] &= \
2331 - CAP_TO_MASK(CAP_LAST_CAP + 1) - 1)
2332 -
2333 static inline void task_cap(struct seq_file *m, struct task_struct *p)
2334 {
2335 const struct cred *cred;
2336 @@ -326,11 +322,6 @@ static inline void task_cap(struct seq_file *m, struct task_struct *p)
2337 cap_bset = cred->cap_bset;
2338 rcu_read_unlock();
2339
2340 - NORM_CAPS(cap_inheritable);
2341 - NORM_CAPS(cap_permitted);
2342 - NORM_CAPS(cap_effective);
2343 - NORM_CAPS(cap_bset);
2344 -
2345 render_cap_t(m, "CapInh:\t", &cap_inheritable);
2346 render_cap_t(m, "CapPrm:\t", &cap_permitted);
2347 render_cap_t(m, "CapEff:\t", &cap_effective);
2348 diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
2349 index 41a695048be7..cfbb4c1b2f17 100644
2350 --- a/fs/xfs/xfs_aops.c
2351 +++ b/fs/xfs/xfs_aops.c
2352 @@ -1661,11 +1661,72 @@ xfs_vm_readpages(
2353 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
2354 }
2355
2356 +/*
2357 + * This is basically a copy of __set_page_dirty_buffers() with one
2358 + * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
2359 + * dirty, we'll never be able to clean them because we don't write buffers
2360 + * beyond EOF, and that means we can't invalidate pages that span EOF
2361 + * that have been marked dirty. Further, the dirty state can leak into
2362 + * the file interior if the file is extended, resulting in all sorts of
2363 + * bad things happening as the state does not match the underlying data.
2364 + *
2365 + * XXX: this really indicates that bufferheads in XFS need to die. Warts like
2366 + * this only exist because of bufferheads and how the generic code manages them.
2367 + */
2368 +STATIC int
2369 +xfs_vm_set_page_dirty(
2370 + struct page *page)
2371 +{
2372 + struct address_space *mapping = page->mapping;
2373 + struct inode *inode = mapping->host;
2374 + loff_t end_offset;
2375 + loff_t offset;
2376 + int newly_dirty;
2377 +
2378 + if (unlikely(!mapping))
2379 + return !TestSetPageDirty(page);
2380 +
2381 + end_offset = i_size_read(inode);
2382 + offset = page_offset(page);
2383 +
2384 + spin_lock(&mapping->private_lock);
2385 + if (page_has_buffers(page)) {
2386 + struct buffer_head *head = page_buffers(page);
2387 + struct buffer_head *bh = head;
2388 +
2389 + do {
2390 + if (offset < end_offset)
2391 + set_buffer_dirty(bh);
2392 + bh = bh->b_this_page;
2393 + offset += 1 << inode->i_blkbits;
2394 + } while (bh != head);
2395 + }
2396 + newly_dirty = !TestSetPageDirty(page);
2397 + spin_unlock(&mapping->private_lock);
2398 +
2399 + if (newly_dirty) {
2400 + /* sigh - __set_page_dirty() is static, so copy it here, too */
2401 + unsigned long flags;
2402 +
2403 + spin_lock_irqsave(&mapping->tree_lock, flags);
2404 + if (page->mapping) { /* Race with truncate? */
2405 + WARN_ON_ONCE(!PageUptodate(page));
2406 + account_page_dirtied(page, mapping);
2407 + radix_tree_tag_set(&mapping->page_tree,
2408 + page_index(page), PAGECACHE_TAG_DIRTY);
2409 + }
2410 + spin_unlock_irqrestore(&mapping->tree_lock, flags);
2411 + __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
2412 + }
2413 + return newly_dirty;
2414 +}
2415 +
2416 const struct address_space_operations xfs_address_space_operations = {
2417 .readpage = xfs_vm_readpage,
2418 .readpages = xfs_vm_readpages,
2419 .writepage = xfs_vm_writepage,
2420 .writepages = xfs_vm_writepages,
2421 + .set_page_dirty = xfs_vm_set_page_dirty,
2422 .releasepage = xfs_vm_releasepage,
2423 .invalidatepage = xfs_vm_invalidatepage,
2424 .write_begin = xfs_vm_write_begin,
2425 diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
2426 index 044e97a33c8d..bac3e1635b7d 100644
2427 --- a/fs/xfs/xfs_dquot.c
2428 +++ b/fs/xfs/xfs_dquot.c
2429 @@ -1104,7 +1104,8 @@ xfs_qm_dqflush(
2430 * Get the buffer containing the on-disk dquot
2431 */
2432 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
2433 - mp->m_quotainfo->qi_dqchunklen, 0, &bp, NULL);
2434 + mp->m_quotainfo->qi_dqchunklen, 0, &bp,
2435 + &xfs_dquot_buf_ops);
2436 if (error)
2437 goto out_unlock;
2438
2439 diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
2440 index a5f2042aec8b..9f457fedbcfc 100644
2441 --- a/fs/xfs/xfs_file.c
2442 +++ b/fs/xfs/xfs_file.c
2443 @@ -298,7 +298,16 @@ xfs_file_aio_read(
2444 xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
2445 return ret;
2446 }
2447 - truncate_pagecache_range(VFS_I(ip), pos, -1);
2448 +
2449 + /*
2450 + * Invalidate whole pages. This can return an error if
2451 + * we fail to invalidate a page, but this should never
2452 + * happen on XFS. Warn if it does fail.
2453 + */
2454 + ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
2455 + pos >> PAGE_CACHE_SHIFT, -1);
2456 + WARN_ON_ONCE(ret);
2457 + ret = 0;
2458 }
2459 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
2460 }
2461 @@ -677,7 +686,15 @@ xfs_file_dio_aio_write(
2462 pos, -1);
2463 if (ret)
2464 goto out;
2465 - truncate_pagecache_range(VFS_I(ip), pos, -1);
2466 + /*
2467 + * Invalidate whole pages. This can return an error if
2468 + * we fail to invalidate a page, but this should never
2469 + * happen on XFS. Warn if it does fail.
2470 + */
2471 + ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
2472 + pos >> PAGE_CACHE_SHIFT, -1);
2473 + WARN_ON_ONCE(ret);
2474 + ret = 0;
2475 }
2476
2477 /*
2478 diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
2479 index b75c9bb6e71e..29d1ca567ed3 100644
2480 --- a/fs/xfs/xfs_qm.c
2481 +++ b/fs/xfs/xfs_qm.c
2482 @@ -935,6 +935,12 @@ xfs_qm_dqiter_bufs(
2483 if (error)
2484 break;
2485
2486 + /*
2487 + * A corrupt buffer might not have a verifier attached, so
2488 + * make sure we have the correct one attached before writeback
2489 + * occurs.
2490 + */
2491 + bp->b_ops = &xfs_dquot_buf_ops;
2492 xfs_qm_reset_dqcounts(mp, bp, firstid, type);
2493 xfs_buf_delwri_queue(bp, buffer_list);
2494 xfs_buf_relse(bp);
2495 @@ -1018,7 +1024,7 @@ xfs_qm_dqiterate(
2496 xfs_buf_readahead(mp->m_ddev_targp,
2497 XFS_FSB_TO_DADDR(mp, rablkno),
2498 mp->m_quotainfo->qi_dqchunklen,
2499 - NULL);
2500 + &xfs_dquot_buf_ops);
2501 rablkno++;
2502 }
2503 }
2504 diff --git a/include/linux/capability.h b/include/linux/capability.h
2505 index 15f90929fb51..9b4378af414c 100644
2506 --- a/include/linux/capability.h
2507 +++ b/include/linux/capability.h
2508 @@ -78,8 +78,11 @@ extern const kernel_cap_t __cap_init_eff_set;
2509 # error Fix up hand-coded capability macro initializers
2510 #else /* HAND-CODED capability initializers */
2511
2512 +#define CAP_LAST_U32 ((_KERNEL_CAPABILITY_U32S) - 1)
2513 +#define CAP_LAST_U32_VALID_MASK (CAP_TO_MASK(CAP_LAST_CAP + 1) -1)
2514 +
2515 # define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }})
2516 -# define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }})
2517 +# define CAP_FULL_SET ((kernel_cap_t){{ ~0, CAP_LAST_U32_VALID_MASK }})
2518 # define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
2519 | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
2520 CAP_FS_MASK_B1 } })
2521 diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
2522 index 7c1420bb1dce..6ade97de7a85 100644
2523 --- a/include/linux/ceph/messenger.h
2524 +++ b/include/linux/ceph/messenger.h
2525 @@ -157,7 +157,7 @@ struct ceph_msg {
2526 bool front_is_vmalloc;
2527 bool more_to_follow;
2528 bool needs_out_seq;
2529 - int front_max;
2530 + int front_alloc_len;
2531 unsigned long ack_stamp; /* tx: when we were acked */
2532
2533 struct ceph_msgpool *pool;
2534 diff --git a/include/linux/mount.h b/include/linux/mount.h
2535 index 73005f9957ea..8eeb8f6ab110 100644
2536 --- a/include/linux/mount.h
2537 +++ b/include/linux/mount.h
2538 @@ -42,11 +42,18 @@ struct mnt_namespace;
2539 * flag, consider how it interacts with shared mounts.
2540 */
2541 #define MNT_SHARED_MASK (MNT_UNBINDABLE)
2542 -#define MNT_PROPAGATION_MASK (MNT_SHARED | MNT_UNBINDABLE)
2543 +#define MNT_USER_SETTABLE_MASK (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \
2544 + | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \
2545 + | MNT_READONLY)
2546
2547 +#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
2548
2549 #define MNT_INTERNAL 0x4000
2550
2551 +#define MNT_LOCK_ATIME 0x040000
2552 +#define MNT_LOCK_NOEXEC 0x080000
2553 +#define MNT_LOCK_NOSUID 0x100000
2554 +#define MNT_LOCK_NODEV 0x200000
2555 #define MNT_LOCK_READONLY 0x400000
2556
2557 struct vfsmount {
2558 diff --git a/kernel/audit.c b/kernel/audit.c
2559 index a6c632757e57..4dd7529b0845 100644
2560 --- a/kernel/audit.c
2561 +++ b/kernel/audit.c
2562 @@ -1412,7 +1412,7 @@ void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
2563 audit_log_format(ab, " %s=", prefix);
2564 CAP_FOR_EACH_U32(i) {
2565 audit_log_format(ab, "%08x",
2566 - cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]);
2567 + cap->cap[CAP_LAST_U32 - i]);
2568 }
2569 }
2570
2571 diff --git a/kernel/capability.c b/kernel/capability.c
2572 index d52eecc0942b..1339806a8731 100644
2573 --- a/kernel/capability.c
2574 +++ b/kernel/capability.c
2575 @@ -268,6 +268,10 @@ SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
2576 i++;
2577 }
2578
2579 + effective.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
2580 + permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
2581 + inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
2582 +
2583 new = prepare_creds();
2584 if (!new)
2585 return -ENOMEM;
2586 diff --git a/kernel/smp.c b/kernel/smp.c
2587 index 4dba0f7b72ad..88797cb0d23a 100644
2588 --- a/kernel/smp.c
2589 +++ b/kernel/smp.c
2590 @@ -658,7 +658,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
2591 if (cond_func(cpu, info)) {
2592 ret = smp_call_function_single(cpu, func,
2593 info, wait);
2594 - WARN_ON_ONCE(!ret);
2595 + WARN_ON_ONCE(ret);
2596 }
2597 preempt_enable();
2598 }
2599 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
2600 index 4063d5fe5e44..5efbc122e5ce 100644
2601 --- a/kernel/trace/ring_buffer.c
2602 +++ b/kernel/trace/ring_buffer.c
2603 @@ -1980,7 +1980,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
2604
2605 /**
2606 * rb_update_event - update event type and data
2607 - * @event: the even to update
2608 + * @event: the event to update
2609 * @type: the type of event
2610 * @length: the size of the event field in the ring buffer
2611 *
2612 @@ -3353,21 +3353,16 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
2613 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2614
2615 /* Iterator usage is expected to have record disabled */
2616 - if (list_empty(&cpu_buffer->reader_page->list)) {
2617 - iter->head_page = rb_set_head_page(cpu_buffer);
2618 - if (unlikely(!iter->head_page))
2619 - return;
2620 - iter->head = iter->head_page->read;
2621 - } else {
2622 - iter->head_page = cpu_buffer->reader_page;
2623 - iter->head = cpu_buffer->reader_page->read;
2624 - }
2625 + iter->head_page = cpu_buffer->reader_page;
2626 + iter->head = cpu_buffer->reader_page->read;
2627 +
2628 + iter->cache_reader_page = iter->head_page;
2629 + iter->cache_read = iter->head;
2630 +
2631 if (iter->head)
2632 iter->read_stamp = cpu_buffer->read_stamp;
2633 else
2634 iter->read_stamp = iter->head_page->page->time_stamp;
2635 - iter->cache_reader_page = cpu_buffer->reader_page;
2636 - iter->cache_read = cpu_buffer->read;
2637 }
2638
2639 /**
2640 @@ -3760,12 +3755,14 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2641 return NULL;
2642
2643 /*
2644 - * We repeat when a time extend is encountered.
2645 - * Since the time extend is always attached to a data event,
2646 - * we should never loop more than once.
2647 - * (We never hit the following condition more than twice).
2648 + * We repeat when a time extend is encountered or we hit
2649 + * the end of the page. Since the time extend is always attached
2650 + * to a data event, we should never loop more than three times.
2651 + * Once for going to next page, once on time extend, and
2652 + * finally once to get the event.
2653 + * (We never hit the following condition more than thrice).
2654 */
2655 - if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
2656 + if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
2657 return NULL;
2658
2659 if (rb_per_cpu_empty(cpu_buffer))
2660 diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
2661 index 302d29b3744d..5f36f70ce44d 100644
2662 --- a/net/bluetooth/l2cap_sock.c
2663 +++ b/net/bluetooth/l2cap_sock.c
2664 @@ -887,7 +887,8 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
2665 l2cap_chan_close(chan, 0);
2666 lock_sock(sk);
2667
2668 - if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2669 + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
2670 + !(current->flags & PF_EXITING))
2671 err = bt_sock_wait_state(sk, BT_CLOSED,
2672 sk->sk_lingertime);
2673 }
2674 diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
2675 index ca957d34b0c8..19ba192e9dbf 100644
2676 --- a/net/bluetooth/rfcomm/core.c
2677 +++ b/net/bluetooth/rfcomm/core.c
2678 @@ -1857,10 +1857,13 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
2679 /* Get data directly from socket receive queue without copying it. */
2680 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
2681 skb_orphan(skb);
2682 - if (!skb_linearize(skb))
2683 + if (!skb_linearize(skb)) {
2684 s = rfcomm_recv_frame(s, skb);
2685 - else
2686 + if (!s)
2687 + break;
2688 + } else {
2689 kfree_skb(skb);
2690 + }
2691 }
2692
2693 if (s && (sk->sk_state == BT_CLOSED))
2694 diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
2695 index c1c6028e389a..7ca014daa5ab 100644
2696 --- a/net/bluetooth/rfcomm/sock.c
2697 +++ b/net/bluetooth/rfcomm/sock.c
2698 @@ -887,7 +887,8 @@ static int rfcomm_sock_shutdown(struct socket *sock, int how)
2699 sk->sk_shutdown = SHUTDOWN_MASK;
2700 __rfcomm_sock_close(sk);
2701
2702 - if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2703 + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
2704 + !(current->flags & PF_EXITING))
2705 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
2706 }
2707 release_sock(sk);
2708 diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
2709 index 2bb1d3a5e76b..c9ae6b703c13 100644
2710 --- a/net/bluetooth/sco.c
2711 +++ b/net/bluetooth/sco.c
2712 @@ -858,7 +858,8 @@ static int sco_sock_shutdown(struct socket *sock, int how)
2713 sco_sock_clear_timer(sk);
2714 __sco_sock_close(sk);
2715
2716 - if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2717 + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
2718 + !(current->flags & PF_EXITING))
2719 err = bt_sock_wait_state(sk, BT_CLOSED,
2720 sk->sk_lingertime);
2721 }
2722 @@ -878,7 +879,8 @@ static int sco_sock_release(struct socket *sock)
2723
2724 sco_sock_close(sk);
2725
2726 - if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) {
2727 + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
2728 + !(current->flags & PF_EXITING)) {
2729 lock_sock(sk);
2730 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
2731 release_sock(sk);
2732 diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
2733 index 96238ba95f2b..de6662b14e1f 100644
2734 --- a/net/ceph/auth_x.c
2735 +++ b/net/ceph/auth_x.c
2736 @@ -13,8 +13,6 @@
2737 #include "auth_x.h"
2738 #include "auth_x_protocol.h"
2739
2740 -#define TEMP_TICKET_BUF_LEN 256
2741 -
2742 static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed);
2743
2744 static int ceph_x_is_authenticated(struct ceph_auth_client *ac)
2745 @@ -64,7 +62,7 @@ static int ceph_x_encrypt(struct ceph_crypto_key *secret,
2746 }
2747
2748 static int ceph_x_decrypt(struct ceph_crypto_key *secret,
2749 - void **p, void *end, void *obuf, size_t olen)
2750 + void **p, void *end, void **obuf, size_t olen)
2751 {
2752 struct ceph_x_encrypt_header head;
2753 size_t head_len = sizeof(head);
2754 @@ -75,8 +73,14 @@ static int ceph_x_decrypt(struct ceph_crypto_key *secret,
2755 return -EINVAL;
2756
2757 dout("ceph_x_decrypt len %d\n", len);
2758 - ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen,
2759 - *p, len);
2760 + if (*obuf == NULL) {
2761 + *obuf = kmalloc(len, GFP_NOFS);
2762 + if (!*obuf)
2763 + return -ENOMEM;
2764 + olen = len;
2765 + }
2766 +
2767 + ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len);
2768 if (ret)
2769 return ret;
2770 if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC)
2771 @@ -129,139 +133,120 @@ static void remove_ticket_handler(struct ceph_auth_client *ac,
2772 kfree(th);
2773 }
2774
2775 -static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
2776 - struct ceph_crypto_key *secret,
2777 - void *buf, void *end)
2778 +static int process_one_ticket(struct ceph_auth_client *ac,
2779 + struct ceph_crypto_key *secret,
2780 + void **p, void *end)
2781 {
2782 struct ceph_x_info *xi = ac->private;
2783 - int num;
2784 - void *p = buf;
2785 + int type;
2786 + u8 tkt_struct_v, blob_struct_v;
2787 + struct ceph_x_ticket_handler *th;
2788 + void *dbuf = NULL;
2789 + void *dp, *dend;
2790 + int dlen;
2791 + char is_enc;
2792 + struct timespec validity;
2793 + struct ceph_crypto_key old_key;
2794 + void *ticket_buf = NULL;
2795 + void *tp, *tpend;
2796 + struct ceph_timespec new_validity;
2797 + struct ceph_crypto_key new_session_key;
2798 + struct ceph_buffer *new_ticket_blob;
2799 + unsigned long new_expires, new_renew_after;
2800 + u64 new_secret_id;
2801 int ret;
2802 - char *dbuf;
2803 - char *ticket_buf;
2804 - u8 reply_struct_v;
2805
2806 - dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
2807 - if (!dbuf)
2808 - return -ENOMEM;
2809 + ceph_decode_need(p, end, sizeof(u32) + 1, bad);
2810
2811 - ret = -ENOMEM;
2812 - ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
2813 - if (!ticket_buf)
2814 - goto out_dbuf;
2815 + type = ceph_decode_32(p);
2816 + dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
2817
2818 - ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
2819 - reply_struct_v = ceph_decode_8(&p);
2820 - if (reply_struct_v != 1)
2821 + tkt_struct_v = ceph_decode_8(p);
2822 + if (tkt_struct_v != 1)
2823 goto bad;
2824 - num = ceph_decode_32(&p);
2825 - dout("%d tickets\n", num);
2826 - while (num--) {
2827 - int type;
2828 - u8 tkt_struct_v, blob_struct_v;
2829 - struct ceph_x_ticket_handler *th;
2830 - void *dp, *dend;
2831 - int dlen;
2832 - char is_enc;
2833 - struct timespec validity;
2834 - struct ceph_crypto_key old_key;
2835 - void *tp, *tpend;
2836 - struct ceph_timespec new_validity;
2837 - struct ceph_crypto_key new_session_key;
2838 - struct ceph_buffer *new_ticket_blob;
2839 - unsigned long new_expires, new_renew_after;
2840 - u64 new_secret_id;
2841 -
2842 - ceph_decode_need(&p, end, sizeof(u32) + 1, bad);
2843 -
2844 - type = ceph_decode_32(&p);
2845 - dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
2846 -
2847 - tkt_struct_v = ceph_decode_8(&p);
2848 - if (tkt_struct_v != 1)
2849 - goto bad;
2850 -
2851 - th = get_ticket_handler(ac, type);
2852 - if (IS_ERR(th)) {
2853 - ret = PTR_ERR(th);
2854 - goto out;
2855 - }
2856
2857 - /* blob for me */
2858 - dlen = ceph_x_decrypt(secret, &p, end, dbuf,
2859 - TEMP_TICKET_BUF_LEN);
2860 - if (dlen <= 0) {
2861 - ret = dlen;
2862 - goto out;
2863 - }
2864 - dout(" decrypted %d bytes\n", dlen);
2865 - dend = dbuf + dlen;
2866 - dp = dbuf;
2867 + th = get_ticket_handler(ac, type);
2868 + if (IS_ERR(th)) {
2869 + ret = PTR_ERR(th);
2870 + goto out;
2871 + }
2872
2873 - tkt_struct_v = ceph_decode_8(&dp);
2874 - if (tkt_struct_v != 1)
2875 - goto bad;
2876 + /* blob for me */
2877 + dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0);
2878 + if (dlen <= 0) {
2879 + ret = dlen;
2880 + goto out;
2881 + }
2882 + dout(" decrypted %d bytes\n", dlen);
2883 + dp = dbuf;
2884 + dend = dp + dlen;
2885
2886 - memcpy(&old_key, &th->session_key, sizeof(old_key));
2887 - ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
2888 - if (ret)
2889 - goto out;
2890 + tkt_struct_v = ceph_decode_8(&dp);
2891 + if (tkt_struct_v != 1)
2892 + goto bad;
2893
2894 - ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
2895 - ceph_decode_timespec(&validity, &new_validity);
2896 - new_expires = get_seconds() + validity.tv_sec;
2897 - new_renew_after = new_expires - (validity.tv_sec / 4);
2898 - dout(" expires=%lu renew_after=%lu\n", new_expires,
2899 - new_renew_after);
2900 + memcpy(&old_key, &th->session_key, sizeof(old_key));
2901 + ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
2902 + if (ret)
2903 + goto out;
2904
2905 - /* ticket blob for service */
2906 - ceph_decode_8_safe(&p, end, is_enc, bad);
2907 - tp = ticket_buf;
2908 - if (is_enc) {
2909 - /* encrypted */
2910 - dout(" encrypted ticket\n");
2911 - dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf,
2912 - TEMP_TICKET_BUF_LEN);
2913 - if (dlen < 0) {
2914 - ret = dlen;
2915 - goto out;
2916 - }
2917 - dlen = ceph_decode_32(&tp);
2918 - } else {
2919 - /* unencrypted */
2920 - ceph_decode_32_safe(&p, end, dlen, bad);
2921 - ceph_decode_need(&p, end, dlen, bad);
2922 - ceph_decode_copy(&p, ticket_buf, dlen);
2923 + ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
2924 + ceph_decode_timespec(&validity, &new_validity);
2925 + new_expires = get_seconds() + validity.tv_sec;
2926 + new_renew_after = new_expires - (validity.tv_sec / 4);
2927 + dout(" expires=%lu renew_after=%lu\n", new_expires,
2928 + new_renew_after);
2929 +
2930 + /* ticket blob for service */
2931 + ceph_decode_8_safe(p, end, is_enc, bad);
2932 + if (is_enc) {
2933 + /* encrypted */
2934 + dout(" encrypted ticket\n");
2935 + dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0);
2936 + if (dlen < 0) {
2937 + ret = dlen;
2938 + goto out;
2939 }
2940 - tpend = tp + dlen;
2941 - dout(" ticket blob is %d bytes\n", dlen);
2942 - ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
2943 - blob_struct_v = ceph_decode_8(&tp);
2944 - new_secret_id = ceph_decode_64(&tp);
2945 - ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
2946 - if (ret)
2947 + tp = ticket_buf;
2948 + dlen = ceph_decode_32(&tp);
2949 + } else {
2950 + /* unencrypted */
2951 + ceph_decode_32_safe(p, end, dlen, bad);
2952 + ticket_buf = kmalloc(dlen, GFP_NOFS);
2953 + if (!ticket_buf) {
2954 + ret = -ENOMEM;
2955 goto out;
2956 -
2957 - /* all is well, update our ticket */
2958 - ceph_crypto_key_destroy(&th->session_key);
2959 - if (th->ticket_blob)
2960 - ceph_buffer_put(th->ticket_blob);
2961 - th->session_key = new_session_key;
2962 - th->ticket_blob = new_ticket_blob;
2963 - th->validity = new_validity;
2964 - th->secret_id = new_secret_id;
2965 - th->expires = new_expires;
2966 - th->renew_after = new_renew_after;
2967 - dout(" got ticket service %d (%s) secret_id %lld len %d\n",
2968 - type, ceph_entity_type_name(type), th->secret_id,
2969 - (int)th->ticket_blob->vec.iov_len);
2970 - xi->have_keys |= th->service;
2971 + }
2972 + tp = ticket_buf;
2973 + ceph_decode_need(p, end, dlen, bad);
2974 + ceph_decode_copy(p, ticket_buf, dlen);
2975 }
2976 + tpend = tp + dlen;
2977 + dout(" ticket blob is %d bytes\n", dlen);
2978 + ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
2979 + blob_struct_v = ceph_decode_8(&tp);
2980 + new_secret_id = ceph_decode_64(&tp);
2981 + ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
2982 + if (ret)
2983 + goto out;
2984 +
2985 + /* all is well, update our ticket */
2986 + ceph_crypto_key_destroy(&th->session_key);
2987 + if (th->ticket_blob)
2988 + ceph_buffer_put(th->ticket_blob);
2989 + th->session_key = new_session_key;
2990 + th->ticket_blob = new_ticket_blob;
2991 + th->validity = new_validity;
2992 + th->secret_id = new_secret_id;
2993 + th->expires = new_expires;
2994 + th->renew_after = new_renew_after;
2995 + dout(" got ticket service %d (%s) secret_id %lld len %d\n",
2996 + type, ceph_entity_type_name(type), th->secret_id,
2997 + (int)th->ticket_blob->vec.iov_len);
2998 + xi->have_keys |= th->service;
2999
3000 - ret = 0;
3001 out:
3002 kfree(ticket_buf);
3003 -out_dbuf:
3004 kfree(dbuf);
3005 return ret;
3006
3007 @@ -270,6 +255,34 @@ bad:
3008 goto out;
3009 }
3010
3011 +static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
3012 + struct ceph_crypto_key *secret,
3013 + void *buf, void *end)
3014 +{
3015 + void *p = buf;
3016 + u8 reply_struct_v;
3017 + u32 num;
3018 + int ret;
3019 +
3020 + ceph_decode_8_safe(&p, end, reply_struct_v, bad);
3021 + if (reply_struct_v != 1)
3022 + return -EINVAL;
3023 +
3024 + ceph_decode_32_safe(&p, end, num, bad);
3025 + dout("%d tickets\n", num);
3026 +
3027 + while (num--) {
3028 + ret = process_one_ticket(ac, secret, &p, end);
3029 + if (ret)
3030 + return ret;
3031 + }
3032 +
3033 + return 0;
3034 +
3035 +bad:
3036 + return -EINVAL;
3037 +}
3038 +
3039 static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
3040 struct ceph_x_ticket_handler *th,
3041 struct ceph_x_authorizer *au)
3042 @@ -583,13 +596,14 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
3043 struct ceph_x_ticket_handler *th;
3044 int ret = 0;
3045 struct ceph_x_authorize_reply reply;
3046 + void *preply = &reply;
3047 void *p = au->reply_buf;
3048 void *end = p + sizeof(au->reply_buf);
3049
3050 th = get_ticket_handler(ac, au->service);
3051 if (IS_ERR(th))
3052 return PTR_ERR(th);
3053 - ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
3054 + ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply));
3055 if (ret < 0)
3056 return ret;
3057 if (ret != sizeof(reply))
3058 diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
3059 index b9d7df175700..66e77f380fce 100644
3060 --- a/net/ceph/messenger.c
3061 +++ b/net/ceph/messenger.c
3062 @@ -904,7 +904,7 @@ static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
3063 BUG_ON(page_count > (int)USHRT_MAX);
3064 cursor->page_count = (unsigned short)page_count;
3065 BUG_ON(length > SIZE_MAX - cursor->page_offset);
3066 - cursor->last_piece = (size_t)cursor->page_offset + length <= PAGE_SIZE;
3067 + cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE;
3068 }
3069
3070 static struct page *
3071 @@ -3144,7 +3144,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
3072 INIT_LIST_HEAD(&m->data);
3073
3074 /* front */
3075 - m->front_max = front_len;
3076 + m->front_alloc_len = front_len;
3077 if (front_len) {
3078 if (front_len > PAGE_CACHE_SIZE) {
3079 m->front.iov_base = __vmalloc(front_len, flags,
3080 @@ -3319,8 +3319,8 @@ EXPORT_SYMBOL(ceph_msg_last_put);
3081
3082 void ceph_msg_dump(struct ceph_msg *msg)
3083 {
3084 - pr_debug("msg_dump %p (front_max %d length %zd)\n", msg,
3085 - msg->front_max, msg->data_length);
3086 + pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg,
3087 + msg->front_alloc_len, msg->data_length);
3088 print_hex_dump(KERN_DEBUG, "header: ",
3089 DUMP_PREFIX_OFFSET, 16, 1,
3090 &msg->hdr, sizeof(msg->hdr), true);
3091 diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
3092 index 1fe25cd29d0e..dbcbf5a4707f 100644
3093 --- a/net/ceph/mon_client.c
3094 +++ b/net/ceph/mon_client.c
3095 @@ -152,7 +152,7 @@ static int __open_session(struct ceph_mon_client *monc)
3096 /* initiatiate authentication handshake */
3097 ret = ceph_auth_build_hello(monc->auth,
3098 monc->m_auth->front.iov_base,
3099 - monc->m_auth->front_max);
3100 + monc->m_auth->front_alloc_len);
3101 __send_prepared_auth_request(monc, ret);
3102 } else {
3103 dout("open_session mon%d already open\n", monc->cur_mon);
3104 @@ -196,7 +196,7 @@ static void __send_subscribe(struct ceph_mon_client *monc)
3105 int num;
3106
3107 p = msg->front.iov_base;
3108 - end = p + msg->front_max;
3109 + end = p + msg->front_alloc_len;
3110
3111 num = 1 + !!monc->want_next_osdmap + !!monc->want_mdsmap;
3112 ceph_encode_32(&p, num);
3113 @@ -897,7 +897,7 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
3114 ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base,
3115 msg->front.iov_len,
3116 monc->m_auth->front.iov_base,
3117 - monc->m_auth->front_max);
3118 + monc->m_auth->front_alloc_len);
3119 if (ret < 0) {
3120 monc->client->auth_err = ret;
3121 wake_up_all(&monc->client->auth_wq);
3122 @@ -939,7 +939,7 @@ static int __validate_auth(struct ceph_mon_client *monc)
3123 return 0;
3124
3125 ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base,
3126 - monc->m_auth->front_max);
3127 + monc->m_auth->front_alloc_len);
3128 if (ret <= 0)
3129 return ret; /* either an error, or no need to authenticate */
3130 __send_prepared_auth_request(monc, ret);
3131 @@ -1041,7 +1041,15 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
3132 if (!m) {
3133 pr_info("alloc_msg unknown type %d\n", type);
3134 *skip = 1;
3135 + } else if (front_len > m->front_alloc_len) {
3136 + pr_warning("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n",
3137 + front_len, m->front_alloc_len,
3138 + (unsigned int)con->peer_name.type,
3139 + le64_to_cpu(con->peer_name.num));
3140 + ceph_msg_put(m);
3141 + m = ceph_msg_new(type, front_len, GFP_NOFS, false);
3142 }
3143 +
3144 return m;
3145 }
3146
3147 diff --git a/security/commoncap.c b/security/commoncap.c
3148 index c44b6fe6648e..c9219a66b7c6 100644
3149 --- a/security/commoncap.c
3150 +++ b/security/commoncap.c
3151 @@ -421,6 +421,9 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
3152 cpu_caps->inheritable.cap[i] = le32_to_cpu(caps.data[i].inheritable);
3153 }
3154
3155 + cpu_caps->permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
3156 + cpu_caps->inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
3157 +
3158 return 0;
3159 }
3160
3161 diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
3162 index 9b7746c9546f..76bfeb3c3e30 100644
3163 --- a/sound/soc/codecs/max98090.c
3164 +++ b/sound/soc/codecs/max98090.c
3165 @@ -2234,7 +2234,7 @@ static int max98090_probe(struct snd_soc_codec *codec)
3166 /* Register for interrupts */
3167 dev_dbg(codec->dev, "irq = %d\n", max98090->irq);
3168
3169 - ret = request_threaded_irq(max98090->irq, NULL,
3170 + ret = devm_request_threaded_irq(codec->dev, max98090->irq, NULL,
3171 max98090_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
3172 "max98090_interrupt", codec);
3173 if (ret < 0) {
3174 diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
3175 index 6dbb17d050c9..ca1e999026e5 100644
3176 --- a/sound/soc/codecs/wm_adsp.c
3177 +++ b/sound/soc/codecs/wm_adsp.c
3178 @@ -1284,3 +1284,5 @@ int wm_adsp2_init(struct wm_adsp *adsp, bool dvfs)
3179 return 0;
3180 }
3181 EXPORT_SYMBOL_GPL(wm_adsp2_init);
3182 +
3183 +MODULE_LICENSE("GPL v2");
3184 diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
3185 index 6f4dd7543e82..95a9b07bbe96 100644
3186 --- a/sound/soc/pxa/pxa-ssp.c
3187 +++ b/sound/soc/pxa/pxa-ssp.c
3188 @@ -757,9 +757,7 @@ static int pxa_ssp_remove(struct snd_soc_dai *dai)
3189 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | \
3190 SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
3191
3192 -#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
3193 - SNDRV_PCM_FMTBIT_S24_LE | \
3194 - SNDRV_PCM_FMTBIT_S32_LE)
3195 +#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
3196
3197 static const struct snd_soc_dai_ops pxa_ssp_dai_ops = {
3198 .startup = pxa_ssp_startup,
3199 diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
3200 index 82ebb1a51479..5c9b5e4f94c3 100644
3201 --- a/sound/soc/samsung/i2s.c
3202 +++ b/sound/soc/samsung/i2s.c
3203 @@ -853,11 +853,9 @@ static int i2s_suspend(struct snd_soc_dai *dai)
3204 {
3205 struct i2s_dai *i2s = to_info(dai);
3206
3207 - if (dai->active) {
3208 - i2s->suspend_i2smod = readl(i2s->addr + I2SMOD);
3209 - i2s->suspend_i2scon = readl(i2s->addr + I2SCON);
3210 - i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR);
3211 - }
3212 + i2s->suspend_i2smod = readl(i2s->addr + I2SMOD);
3213 + i2s->suspend_i2scon = readl(i2s->addr + I2SCON);
3214 + i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR);
3215
3216 return 0;
3217 }
3218 @@ -866,11 +864,9 @@ static int i2s_resume(struct snd_soc_dai *dai)
3219 {
3220 struct i2s_dai *i2s = to_info(dai);
3221
3222 - if (dai->active) {
3223 - writel(i2s->suspend_i2scon, i2s->addr + I2SCON);
3224 - writel(i2s->suspend_i2smod, i2s->addr + I2SMOD);
3225 - writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR);
3226 - }
3227 + writel(i2s->suspend_i2scon, i2s->addr + I2SCON);
3228 + writel(i2s->suspend_i2smod, i2s->addr + I2SMOD);
3229 + writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR);
3230
3231 return 0;
3232 }
3233 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
3234 index ccb6be4d658d..02d26915b61d 100644
3235 --- a/sound/soc/soc-pcm.c
3236 +++ b/sound/soc/soc-pcm.c
3237 @@ -1886,6 +1886,7 @@ int soc_dpcm_runtime_update(struct snd_soc_dapm_widget *widget)
3238 dpcm_be_disconnect(fe, SNDRV_PCM_STREAM_PLAYBACK);
3239 }
3240
3241 + dpcm_path_put(&list);
3242 capture:
3243 /* skip if FE doesn't have capture capability */
3244 if (!fe->cpu_dai->driver->capture.channels_min)
3245 diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
3246 index 0a63658065f0..2cee2b79b4de 100644
3247 --- a/tools/testing/selftests/Makefile
3248 +++ b/tools/testing/selftests/Makefile
3249 @@ -4,6 +4,7 @@ TARGETS += efivarfs
3250 TARGETS += kcmp
3251 TARGETS += memory-hotplug
3252 TARGETS += mqueue
3253 +TARGETS += mount
3254 TARGETS += net
3255 TARGETS += ptrace
3256 TARGETS += vm
3257 diff --git a/tools/testing/selftests/mount/Makefile b/tools/testing/selftests/mount/Makefile
3258 new file mode 100644
3259 index 000000000000..337d853c2b72
3260 --- /dev/null
3261 +++ b/tools/testing/selftests/mount/Makefile
3262 @@ -0,0 +1,17 @@
3263 +# Makefile for mount selftests.
3264 +
3265 +all: unprivileged-remount-test
3266 +
3267 +unprivileged-remount-test: unprivileged-remount-test.c
3268 + gcc -Wall -O2 unprivileged-remount-test.c -o unprivileged-remount-test
3269 +
3270 +# Allow specific tests to be selected.
3271 +test_unprivileged_remount: unprivileged-remount-test
3272 + @if [ -f /proc/self/uid_map ] ; then ./unprivileged-remount-test ; fi
3273 +
3274 +run_tests: all test_unprivileged_remount
3275 +
3276 +clean:
3277 + rm -f unprivileged-remount-test
3278 +
3279 +.PHONY: all test_unprivileged_remount
3280 diff --git a/tools/testing/selftests/mount/unprivileged-remount-test.c b/tools/testing/selftests/mount/unprivileged-remount-test.c
3281 new file mode 100644
3282 index 000000000000..1b3ff2fda4d0
3283 --- /dev/null
3284 +++ b/tools/testing/selftests/mount/unprivileged-remount-test.c
3285 @@ -0,0 +1,242 @@
3286 +#define _GNU_SOURCE
3287 +#include <sched.h>
3288 +#include <stdio.h>
3289 +#include <errno.h>
3290 +#include <string.h>
3291 +#include <sys/types.h>
3292 +#include <sys/mount.h>
3293 +#include <sys/wait.h>
3294 +#include <stdlib.h>
3295 +#include <unistd.h>
3296 +#include <fcntl.h>
3297 +#include <grp.h>
3298 +#include <stdbool.h>
3299 +#include <stdarg.h>
3300 +
3301 +#ifndef CLONE_NEWNS
3302 +# define CLONE_NEWNS 0x00020000
3303 +#endif
3304 +#ifndef CLONE_NEWUTS
3305 +# define CLONE_NEWUTS 0x04000000
3306 +#endif
3307 +#ifndef CLONE_NEWIPC
3308 +# define CLONE_NEWIPC 0x08000000
3309 +#endif
3310 +#ifndef CLONE_NEWNET
3311 +# define CLONE_NEWNET 0x40000000
3312 +#endif
3313 +#ifndef CLONE_NEWUSER
3314 +# define CLONE_NEWUSER 0x10000000
3315 +#endif
3316 +#ifndef CLONE_NEWPID
3317 +# define CLONE_NEWPID 0x20000000
3318 +#endif
3319 +
3320 +#ifndef MS_RELATIME
3321 +#define MS_RELATIME (1 << 21)
3322 +#endif
3323 +#ifndef MS_STRICTATIME
3324 +#define MS_STRICTATIME (1 << 24)
3325 +#endif
3326 +
3327 +static void die(char *fmt, ...)
3328 +{
3329 + va_list ap;
3330 + va_start(ap, fmt);
3331 + vfprintf(stderr, fmt, ap);
3332 + va_end(ap);
3333 + exit(EXIT_FAILURE);
3334 +}
3335 +
3336 +static void write_file(char *filename, char *fmt, ...)
3337 +{
3338 + char buf[4096];
3339 + int fd;
3340 + ssize_t written;
3341 + int buf_len;
3342 + va_list ap;
3343 +
3344 + va_start(ap, fmt);
3345 + buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
3346 + va_end(ap);
3347 + if (buf_len < 0) {
3348 + die("vsnprintf failed: %s\n",
3349 + strerror(errno));
3350 + }
3351 + if (buf_len >= sizeof(buf)) {
3352 + die("vsnprintf output truncated\n");
3353 + }
3354 +
3355 + fd = open(filename, O_WRONLY);
3356 + if (fd < 0) {
3357 + die("open of %s failed: %s\n",
3358 + filename, strerror(errno));
3359 + }
3360 + written = write(fd, buf, buf_len);
3361 + if (written != buf_len) {
3362 + if (written >= 0) {
3363 + die("short write to %s\n", filename);
3364 + } else {
3365 + die("write to %s failed: %s\n",
3366 + filename, strerror(errno));
3367 + }
3368 + }
3369 + if (close(fd) != 0) {
3370 + die("close of %s failed: %s\n",
3371 + filename, strerror(errno));
3372 + }
3373 +}
3374 +
3375 +static void create_and_enter_userns(void)
3376 +{
3377 + uid_t uid;
3378 + gid_t gid;
3379 +
3380 + uid = getuid();
3381 + gid = getgid();
3382 +
3383 + if (unshare(CLONE_NEWUSER) !=0) {
3384 + die("unshare(CLONE_NEWUSER) failed: %s\n",
3385 + strerror(errno));
3386 + }
3387 +
3388 + write_file("/proc/self/uid_map", "0 %d 1", uid);
3389 + write_file("/proc/self/gid_map", "0 %d 1", gid);
3390 +
3391 + if (setgroups(0, NULL) != 0) {
3392 + die("setgroups failed: %s\n",
3393 + strerror(errno));
3394 + }
3395 + if (setgid(0) != 0) {
3396 + die ("setgid(0) failed %s\n",
3397 + strerror(errno));
3398 + }
3399 + if (setuid(0) != 0) {
3400 + die("setuid(0) failed %s\n",
3401 + strerror(errno));
3402 + }
3403 +}
3404 +
3405 +static
3406 +bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
3407 +{
3408 + pid_t child;
3409 +
3410 + child = fork();
3411 + if (child == -1) {
3412 + die("fork failed: %s\n",
3413 + strerror(errno));
3414 + }
3415 + if (child != 0) { /* parent */
3416 + pid_t pid;
3417 + int status;
3418 + pid = waitpid(child, &status, 0);
3419 + if (pid == -1) {
3420 + die("waitpid failed: %s\n",
3421 + strerror(errno));
3422 + }
3423 + if (pid != child) {
3424 + die("waited for %d got %d\n",
3425 + child, pid);
3426 + }
3427 + if (!WIFEXITED(status)) {
3428 + die("child did not terminate cleanly\n");
3429 + }
3430 + return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false;
3431 + }
3432 +
3433 + create_and_enter_userns();
3434 + if (unshare(CLONE_NEWNS) != 0) {
3435 + die("unshare(CLONE_NEWNS) failed: %s\n",
3436 + strerror(errno));
3437 + }
3438 +
3439 + if (mount("testing", "/tmp", "ramfs", mount_flags, NULL) != 0) {
3440 + die("mount of /tmp failed: %s\n",
3441 + strerror(errno));
3442 + }
3443 +
3444 + create_and_enter_userns();
3445 +
3446 + if (unshare(CLONE_NEWNS) != 0) {
3447 + die("unshare(CLONE_NEWNS) failed: %s\n",
3448 + strerror(errno));
3449 + }
3450 +
3451 + if (mount("/tmp", "/tmp", "none",
3452 + MS_REMOUNT | MS_BIND | remount_flags, NULL) != 0) {
3453 + /* system("cat /proc/self/mounts"); */
3454 + die("remount of /tmp failed: %s\n",
3455 + strerror(errno));
3456 + }
3457 +
3458 + if (mount("/tmp", "/tmp", "none",
3459 + MS_REMOUNT | MS_BIND | invalid_flags, NULL) == 0) {
3460 + /* system("cat /proc/self/mounts"); */
3461 + die("remount of /tmp with invalid flags "
3462 + "succeeded unexpectedly\n");
3463 + }
3464 + exit(EXIT_SUCCESS);
3465 +}
3466 +
3467 +static bool test_unpriv_remount_simple(int mount_flags)
3468 +{
3469 + return test_unpriv_remount(mount_flags, mount_flags, 0);
3470 +}
3471 +
3472 +static bool test_unpriv_remount_atime(int mount_flags, int invalid_flags)
3473 +{
3474 + return test_unpriv_remount(mount_flags, mount_flags, invalid_flags);
3475 +}
3476 +
3477 +int main(int argc, char **argv)
3478 +{
3479 + if (!test_unpriv_remount_simple(MS_RDONLY|MS_NODEV)) {
3480 + die("MS_RDONLY malfunctions\n");
3481 + }
3482 + if (!test_unpriv_remount_simple(MS_NODEV)) {
3483 + die("MS_NODEV malfunctions\n");
3484 + }
3485 + if (!test_unpriv_remount_simple(MS_NOSUID|MS_NODEV)) {
3486 + die("MS_NOSUID malfunctions\n");
3487 + }
3488 + if (!test_unpriv_remount_simple(MS_NOEXEC|MS_NODEV)) {
3489 + die("MS_NOEXEC malfunctions\n");
3490 + }
3491 + if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODEV,
3492 + MS_NOATIME|MS_NODEV))
3493 + {
3494 + die("MS_RELATIME malfunctions\n");
3495 + }
3496 + if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODEV,
3497 + MS_NOATIME|MS_NODEV))
3498 + {
3499 + die("MS_STRICTATIME malfunctions\n");
3500 + }
3501 + if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODEV,
3502 + MS_STRICTATIME|MS_NODEV))
3503 + {
3504 + die("MS_RELATIME malfunctions\n");
3505 + }
3506 + if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME|MS_NODEV,
3507 + MS_NOATIME|MS_NODEV))
3508 + {
3509 + die("MS_RELATIME malfunctions\n");
3510 + }
3511 + if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME|MS_NODEV,
3512 + MS_NOATIME|MS_NODEV))
3513 + {
3514 + die("MS_RELATIME malfunctions\n");
3515 + }
3516 + if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME|MS_NODEV,
3517 + MS_STRICTATIME|MS_NODEV))
3518 + {
3519 + die("MS_RELATIME malfunctions\n");
3520 + }
3521 + if (!test_unpriv_remount(MS_STRICTATIME|MS_NODEV, MS_NODEV,
3522 + MS_NOATIME|MS_NODEV))
3523 + {
3524 + die("Default atime malfunctions\n");
3525 + }
3526 + return EXIT_SUCCESS;
3527 +}