Magellan Linux

Annotation of /trunk/kernel-alx/patches-3.14/0118-3.14.19-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2506 - (hide annotations) (download)
Fri Oct 17 07:55:45 2014 UTC (9 years, 7 months ago) by niro
File size: 154080 byte(s)
-patches for 3.14
1 niro 2506 diff --git a/Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt b/Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt
2     index 46f344965313..4eb7997674a0 100644
3     --- a/Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt
4     +++ b/Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt
5     @@ -1,7 +1,7 @@
6     ADI AXI-SPDIF controller
7    
8     Required properties:
9     - - compatible : Must be "adi,axi-spdif-1.00.a"
10     + - compatible : Must be "adi,axi-spdif-tx-1.00.a"
11     - reg : Must contain SPDIF core's registers location and length
12     - clocks : Pairs of phandle and specifier referencing the controller's clocks.
13     The controller expects two clocks, the clock used for the AXI interface and
14     diff --git a/Makefile b/Makefile
15     index 05279d4f44c9..b1746b486646 100644
16     --- a/Makefile
17     +++ b/Makefile
18     @@ -1,6 +1,6 @@
19     VERSION = 3
20     PATCHLEVEL = 14
21     -SUBLEVEL = 18
22     +SUBLEVEL = 19
23     EXTRAVERSION =
24     NAME = Remembering Coco
25    
26     diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
27     index 331b837cec57..270cb3c6c498 100644
28     --- a/arch/mips/cavium-octeon/setup.c
29     +++ b/arch/mips/cavium-octeon/setup.c
30     @@ -458,6 +458,18 @@ static void octeon_halt(void)
31     octeon_kill_core(NULL);
32     }
33    
34     +static char __read_mostly octeon_system_type[80];
35     +
36     +static int __init init_octeon_system_type(void)
37     +{
38     + snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
39     + cvmx_board_type_to_string(octeon_bootinfo->board_type),
40     + octeon_model_get_string(read_c0_prid()));
41     +
42     + return 0;
43     +}
44     +early_initcall(init_octeon_system_type);
45     +
46     /**
47     * Return a string representing the system type
48     *
49     @@ -465,11 +477,7 @@ static void octeon_halt(void)
50     */
51     const char *octeon_board_type_string(void)
52     {
53     - static char name[80];
54     - sprintf(name, "%s (%s)",
55     - cvmx_board_type_to_string(octeon_bootinfo->board_type),
56     - octeon_model_get_string(read_c0_prid()));
57     - return name;
58     + return octeon_system_type;
59     }
60    
61     const char *get_system_type(void)
62     diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
63     index 7bba9da110af..6d019ca1bead 100644
64     --- a/arch/mips/include/asm/ptrace.h
65     +++ b/arch/mips/include/asm/ptrace.h
66     @@ -23,7 +23,7 @@
67     struct pt_regs {
68     #ifdef CONFIG_32BIT
69     /* Pad bytes for argument save space on the stack. */
70     - unsigned long pad0[6];
71     + unsigned long pad0[8];
72     #endif
73    
74     /* Saved main processor registers. */
75     diff --git a/arch/mips/include/asm/reg.h b/arch/mips/include/asm/reg.h
76     index 910e71a12466..b8343ccbc989 100644
77     --- a/arch/mips/include/asm/reg.h
78     +++ b/arch/mips/include/asm/reg.h
79     @@ -12,116 +12,194 @@
80     #ifndef __ASM_MIPS_REG_H
81     #define __ASM_MIPS_REG_H
82    
83     -
84     -#if defined(CONFIG_32BIT) || defined(WANT_COMPAT_REG_H)
85     -
86     -#define EF_R0 6
87     -#define EF_R1 7
88     -#define EF_R2 8
89     -#define EF_R3 9
90     -#define EF_R4 10
91     -#define EF_R5 11
92     -#define EF_R6 12
93     -#define EF_R7 13
94     -#define EF_R8 14
95     -#define EF_R9 15
96     -#define EF_R10 16
97     -#define EF_R11 17
98     -#define EF_R12 18
99     -#define EF_R13 19
100     -#define EF_R14 20
101     -#define EF_R15 21
102     -#define EF_R16 22
103     -#define EF_R17 23
104     -#define EF_R18 24
105     -#define EF_R19 25
106     -#define EF_R20 26
107     -#define EF_R21 27
108     -#define EF_R22 28
109     -#define EF_R23 29
110     -#define EF_R24 30
111     -#define EF_R25 31
112     +#define MIPS32_EF_R0 6
113     +#define MIPS32_EF_R1 7
114     +#define MIPS32_EF_R2 8
115     +#define MIPS32_EF_R3 9
116     +#define MIPS32_EF_R4 10
117     +#define MIPS32_EF_R5 11
118     +#define MIPS32_EF_R6 12
119     +#define MIPS32_EF_R7 13
120     +#define MIPS32_EF_R8 14
121     +#define MIPS32_EF_R9 15
122     +#define MIPS32_EF_R10 16
123     +#define MIPS32_EF_R11 17
124     +#define MIPS32_EF_R12 18
125     +#define MIPS32_EF_R13 19
126     +#define MIPS32_EF_R14 20
127     +#define MIPS32_EF_R15 21
128     +#define MIPS32_EF_R16 22
129     +#define MIPS32_EF_R17 23
130     +#define MIPS32_EF_R18 24
131     +#define MIPS32_EF_R19 25
132     +#define MIPS32_EF_R20 26
133     +#define MIPS32_EF_R21 27
134     +#define MIPS32_EF_R22 28
135     +#define MIPS32_EF_R23 29
136     +#define MIPS32_EF_R24 30
137     +#define MIPS32_EF_R25 31
138    
139     /*
140     * k0/k1 unsaved
141     */
142     -#define EF_R26 32
143     -#define EF_R27 33
144     +#define MIPS32_EF_R26 32
145     +#define MIPS32_EF_R27 33
146    
147     -#define EF_R28 34
148     -#define EF_R29 35
149     -#define EF_R30 36
150     -#define EF_R31 37
151     +#define MIPS32_EF_R28 34
152     +#define MIPS32_EF_R29 35
153     +#define MIPS32_EF_R30 36
154     +#define MIPS32_EF_R31 37
155    
156     /*
157     * Saved special registers
158     */
159     -#define EF_LO 38
160     -#define EF_HI 39
161     -
162     -#define EF_CP0_EPC 40
163     -#define EF_CP0_BADVADDR 41
164     -#define EF_CP0_STATUS 42
165     -#define EF_CP0_CAUSE 43
166     -#define EF_UNUSED0 44
167     -
168     -#define EF_SIZE 180
169     -
170     -#endif
171     -
172     -#if defined(CONFIG_64BIT) && !defined(WANT_COMPAT_REG_H)
173     -
174     -#define EF_R0 0
175     -#define EF_R1 1
176     -#define EF_R2 2
177     -#define EF_R3 3
178     -#define EF_R4 4
179     -#define EF_R5 5
180     -#define EF_R6 6
181     -#define EF_R7 7
182     -#define EF_R8 8
183     -#define EF_R9 9
184     -#define EF_R10 10
185     -#define EF_R11 11
186     -#define EF_R12 12
187     -#define EF_R13 13
188     -#define EF_R14 14
189     -#define EF_R15 15
190     -#define EF_R16 16
191     -#define EF_R17 17
192     -#define EF_R18 18
193     -#define EF_R19 19
194     -#define EF_R20 20
195     -#define EF_R21 21
196     -#define EF_R22 22
197     -#define EF_R23 23
198     -#define EF_R24 24
199     -#define EF_R25 25
200     +#define MIPS32_EF_LO 38
201     +#define MIPS32_EF_HI 39
202     +
203     +#define MIPS32_EF_CP0_EPC 40
204     +#define MIPS32_EF_CP0_BADVADDR 41
205     +#define MIPS32_EF_CP0_STATUS 42
206     +#define MIPS32_EF_CP0_CAUSE 43
207     +#define MIPS32_EF_UNUSED0 44
208     +
209     +#define MIPS32_EF_SIZE 180
210     +
211     +#define MIPS64_EF_R0 0
212     +#define MIPS64_EF_R1 1
213     +#define MIPS64_EF_R2 2
214     +#define MIPS64_EF_R3 3
215     +#define MIPS64_EF_R4 4
216     +#define MIPS64_EF_R5 5
217     +#define MIPS64_EF_R6 6
218     +#define MIPS64_EF_R7 7
219     +#define MIPS64_EF_R8 8
220     +#define MIPS64_EF_R9 9
221     +#define MIPS64_EF_R10 10
222     +#define MIPS64_EF_R11 11
223     +#define MIPS64_EF_R12 12
224     +#define MIPS64_EF_R13 13
225     +#define MIPS64_EF_R14 14
226     +#define MIPS64_EF_R15 15
227     +#define MIPS64_EF_R16 16
228     +#define MIPS64_EF_R17 17
229     +#define MIPS64_EF_R18 18
230     +#define MIPS64_EF_R19 19
231     +#define MIPS64_EF_R20 20
232     +#define MIPS64_EF_R21 21
233     +#define MIPS64_EF_R22 22
234     +#define MIPS64_EF_R23 23
235     +#define MIPS64_EF_R24 24
236     +#define MIPS64_EF_R25 25
237    
238     /*
239     * k0/k1 unsaved
240     */
241     -#define EF_R26 26
242     -#define EF_R27 27
243     +#define MIPS64_EF_R26 26
244     +#define MIPS64_EF_R27 27
245    
246    
247     -#define EF_R28 28
248     -#define EF_R29 29
249     -#define EF_R30 30
250     -#define EF_R31 31
251     +#define MIPS64_EF_R28 28
252     +#define MIPS64_EF_R29 29
253     +#define MIPS64_EF_R30 30
254     +#define MIPS64_EF_R31 31
255    
256     /*
257     * Saved special registers
258     */
259     -#define EF_LO 32
260     -#define EF_HI 33
261     -
262     -#define EF_CP0_EPC 34
263     -#define EF_CP0_BADVADDR 35
264     -#define EF_CP0_STATUS 36
265     -#define EF_CP0_CAUSE 37
266     -
267     -#define EF_SIZE 304 /* size in bytes */
268     +#define MIPS64_EF_LO 32
269     +#define MIPS64_EF_HI 33
270     +
271     +#define MIPS64_EF_CP0_EPC 34
272     +#define MIPS64_EF_CP0_BADVADDR 35
273     +#define MIPS64_EF_CP0_STATUS 36
274     +#define MIPS64_EF_CP0_CAUSE 37
275     +
276     +#define MIPS64_EF_SIZE 304 /* size in bytes */
277     +
278     +#if defined(CONFIG_32BIT)
279     +
280     +#define EF_R0 MIPS32_EF_R0
281     +#define EF_R1 MIPS32_EF_R1
282     +#define EF_R2 MIPS32_EF_R2
283     +#define EF_R3 MIPS32_EF_R3
284     +#define EF_R4 MIPS32_EF_R4
285     +#define EF_R5 MIPS32_EF_R5
286     +#define EF_R6 MIPS32_EF_R6
287     +#define EF_R7 MIPS32_EF_R7
288     +#define EF_R8 MIPS32_EF_R8
289     +#define EF_R9 MIPS32_EF_R9
290     +#define EF_R10 MIPS32_EF_R10
291     +#define EF_R11 MIPS32_EF_R11
292     +#define EF_R12 MIPS32_EF_R12
293     +#define EF_R13 MIPS32_EF_R13
294     +#define EF_R14 MIPS32_EF_R14
295     +#define EF_R15 MIPS32_EF_R15
296     +#define EF_R16 MIPS32_EF_R16
297     +#define EF_R17 MIPS32_EF_R17
298     +#define EF_R18 MIPS32_EF_R18
299     +#define EF_R19 MIPS32_EF_R19
300     +#define EF_R20 MIPS32_EF_R20
301     +#define EF_R21 MIPS32_EF_R21
302     +#define EF_R22 MIPS32_EF_R22
303     +#define EF_R23 MIPS32_EF_R23
304     +#define EF_R24 MIPS32_EF_R24
305     +#define EF_R25 MIPS32_EF_R25
306     +#define EF_R26 MIPS32_EF_R26
307     +#define EF_R27 MIPS32_EF_R27
308     +#define EF_R28 MIPS32_EF_R28
309     +#define EF_R29 MIPS32_EF_R29
310     +#define EF_R30 MIPS32_EF_R30
311     +#define EF_R31 MIPS32_EF_R31
312     +#define EF_LO MIPS32_EF_LO
313     +#define EF_HI MIPS32_EF_HI
314     +#define EF_CP0_EPC MIPS32_EF_CP0_EPC
315     +#define EF_CP0_BADVADDR MIPS32_EF_CP0_BADVADDR
316     +#define EF_CP0_STATUS MIPS32_EF_CP0_STATUS
317     +#define EF_CP0_CAUSE MIPS32_EF_CP0_CAUSE
318     +#define EF_UNUSED0 MIPS32_EF_UNUSED0
319     +#define EF_SIZE MIPS32_EF_SIZE
320     +
321     +#elif defined(CONFIG_64BIT)
322     +
323     +#define EF_R0 MIPS64_EF_R0
324     +#define EF_R1 MIPS64_EF_R1
325     +#define EF_R2 MIPS64_EF_R2
326     +#define EF_R3 MIPS64_EF_R3
327     +#define EF_R4 MIPS64_EF_R4
328     +#define EF_R5 MIPS64_EF_R5
329     +#define EF_R6 MIPS64_EF_R6
330     +#define EF_R7 MIPS64_EF_R7
331     +#define EF_R8 MIPS64_EF_R8
332     +#define EF_R9 MIPS64_EF_R9
333     +#define EF_R10 MIPS64_EF_R10
334     +#define EF_R11 MIPS64_EF_R11
335     +#define EF_R12 MIPS64_EF_R12
336     +#define EF_R13 MIPS64_EF_R13
337     +#define EF_R14 MIPS64_EF_R14
338     +#define EF_R15 MIPS64_EF_R15
339     +#define EF_R16 MIPS64_EF_R16
340     +#define EF_R17 MIPS64_EF_R17
341     +#define EF_R18 MIPS64_EF_R18
342     +#define EF_R19 MIPS64_EF_R19
343     +#define EF_R20 MIPS64_EF_R20
344     +#define EF_R21 MIPS64_EF_R21
345     +#define EF_R22 MIPS64_EF_R22
346     +#define EF_R23 MIPS64_EF_R23
347     +#define EF_R24 MIPS64_EF_R24
348     +#define EF_R25 MIPS64_EF_R25
349     +#define EF_R26 MIPS64_EF_R26
350     +#define EF_R27 MIPS64_EF_R27
351     +#define EF_R28 MIPS64_EF_R28
352     +#define EF_R29 MIPS64_EF_R29
353     +#define EF_R30 MIPS64_EF_R30
354     +#define EF_R31 MIPS64_EF_R31
355     +#define EF_LO MIPS64_EF_LO
356     +#define EF_HI MIPS64_EF_HI
357     +#define EF_CP0_EPC MIPS64_EF_CP0_EPC
358     +#define EF_CP0_BADVADDR MIPS64_EF_CP0_BADVADDR
359     +#define EF_CP0_STATUS MIPS64_EF_CP0_STATUS
360     +#define EF_CP0_CAUSE MIPS64_EF_CP0_CAUSE
361     +#define EF_SIZE MIPS64_EF_SIZE
362    
363     #endif /* CONFIG_64BIT */
364    
365     diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
366     index 7faf5f2bee25..71df942fb77c 100644
367     --- a/arch/mips/kernel/binfmt_elfo32.c
368     +++ b/arch/mips/kernel/binfmt_elfo32.c
369     @@ -72,12 +72,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
370    
371     #include <asm/processor.h>
372    
373     -/*
374     - * When this file is selected, we are definitely running a 64bit kernel.
375     - * So using the right regs define in asm/reg.h
376     - */
377     -#define WANT_COMPAT_REG_H
378     -
379     /* These MUST be defined before elf.h gets included */
380     extern void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs);
381     #define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs);
382     @@ -149,21 +143,21 @@ void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs)
383     {
384     int i;
385    
386     - for (i = 0; i < EF_R0; i++)
387     + for (i = 0; i < MIPS32_EF_R0; i++)
388     grp[i] = 0;
389     - grp[EF_R0] = 0;
390     + grp[MIPS32_EF_R0] = 0;
391     for (i = 1; i <= 31; i++)
392     - grp[EF_R0 + i] = (elf_greg_t) regs->regs[i];
393     - grp[EF_R26] = 0;
394     - grp[EF_R27] = 0;
395     - grp[EF_LO] = (elf_greg_t) regs->lo;
396     - grp[EF_HI] = (elf_greg_t) regs->hi;
397     - grp[EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc;
398     - grp[EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr;
399     - grp[EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status;
400     - grp[EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause;
401     -#ifdef EF_UNUSED0
402     - grp[EF_UNUSED0] = 0;
403     + grp[MIPS32_EF_R0 + i] = (elf_greg_t) regs->regs[i];
404     + grp[MIPS32_EF_R26] = 0;
405     + grp[MIPS32_EF_R27] = 0;
406     + grp[MIPS32_EF_LO] = (elf_greg_t) regs->lo;
407     + grp[MIPS32_EF_HI] = (elf_greg_t) regs->hi;
408     + grp[MIPS32_EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc;
409     + grp[MIPS32_EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr;
410     + grp[MIPS32_EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status;
411     + grp[MIPS32_EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause;
412     +#ifdef MIPS32_EF_UNUSED0
413     + grp[MIPS32_EF_UNUSED0] = 0;
414     #endif
415     }
416    
417     diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
418     index 5b5ddb231f26..78f18436cdf2 100644
419     --- a/arch/mips/kernel/irq-gic.c
420     +++ b/arch/mips/kernel/irq-gic.c
421     @@ -255,11 +255,13 @@ static void __init gic_setup_intr(unsigned int intr, unsigned int cpu,
422    
423     /* Setup Intr to Pin mapping */
424     if (pin & GIC_MAP_TO_NMI_MSK) {
425     + int i;
426     +
427     GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
428     /* FIXME: hack to route NMI to all cpu's */
429     - for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
430     + for (i = 0; i < NR_CPUS; i += 32) {
431     GICWRITE(GIC_REG_ADDR(SHARED,
432     - GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
433     + GIC_SH_MAP_TO_VPE_REG_OFF(intr, i)),
434     0xffffffff);
435     }
436     } else {
437     diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
438     index 7da9b76db4d9..60f48febe762 100644
439     --- a/arch/mips/kernel/ptrace.c
440     +++ b/arch/mips/kernel/ptrace.c
441     @@ -170,6 +170,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
442     __get_user(fregs[i], i + (__u64 __user *) data);
443    
444     __get_user(child->thread.fpu.fcr31, data + 64);
445     + child->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
446    
447     /* FIR may not be written. */
448    
449     @@ -265,36 +266,160 @@ int ptrace_set_watch_regs(struct task_struct *child,
450    
451     /* regset get/set implementations */
452    
453     -static int gpr_get(struct task_struct *target,
454     - const struct user_regset *regset,
455     - unsigned int pos, unsigned int count,
456     - void *kbuf, void __user *ubuf)
457     +#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
458     +
459     +static int gpr32_get(struct task_struct *target,
460     + const struct user_regset *regset,
461     + unsigned int pos, unsigned int count,
462     + void *kbuf, void __user *ubuf)
463     {
464     struct pt_regs *regs = task_pt_regs(target);
465     + u32 uregs[ELF_NGREG] = {};
466     + unsigned i;
467    
468     - return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
469     - regs, 0, sizeof(*regs));
470     + for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
471     + /* k0/k1 are copied as zero. */
472     + if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
473     + continue;
474     +
475     + uregs[i] = regs->regs[i - MIPS32_EF_R0];
476     + }
477     +
478     + uregs[MIPS32_EF_LO] = regs->lo;
479     + uregs[MIPS32_EF_HI] = regs->hi;
480     + uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
481     + uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
482     + uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
483     + uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
484     +
485     + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
486     + sizeof(uregs));
487     }
488    
489     -static int gpr_set(struct task_struct *target,
490     - const struct user_regset *regset,
491     - unsigned int pos, unsigned int count,
492     - const void *kbuf, const void __user *ubuf)
493     +static int gpr32_set(struct task_struct *target,
494     + const struct user_regset *regset,
495     + unsigned int pos, unsigned int count,
496     + const void *kbuf, const void __user *ubuf)
497     {
498     - struct pt_regs newregs;
499     - int ret;
500     + struct pt_regs *regs = task_pt_regs(target);
501     + u32 uregs[ELF_NGREG];
502     + unsigned start, num_regs, i;
503     + int err;
504    
505     - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
506     - &newregs,
507     - 0, sizeof(newregs));
508     - if (ret)
509     - return ret;
510     + start = pos / sizeof(u32);
511     + num_regs = count / sizeof(u32);
512    
513     - *task_pt_regs(target) = newregs;
514     + if (start + num_regs > ELF_NGREG)
515     + return -EIO;
516     +
517     + err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
518     + sizeof(uregs));
519     + if (err)
520     + return err;
521     +
522     + for (i = start; i < num_regs; i++) {
523     + /*
524     + * Cast all values to signed here so that if this is a 64-bit
525     + * kernel, the supplied 32-bit values will be sign extended.
526     + */
527     + switch (i) {
528     + case MIPS32_EF_R1 ... MIPS32_EF_R25:
529     + /* k0/k1 are ignored. */
530     + case MIPS32_EF_R28 ... MIPS32_EF_R31:
531     + regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
532     + break;
533     + case MIPS32_EF_LO:
534     + regs->lo = (s32)uregs[i];
535     + break;
536     + case MIPS32_EF_HI:
537     + regs->hi = (s32)uregs[i];
538     + break;
539     + case MIPS32_EF_CP0_EPC:
540     + regs->cp0_epc = (s32)uregs[i];
541     + break;
542     + }
543     + }
544    
545     return 0;
546     }
547    
548     +#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
549     +
550     +#ifdef CONFIG_64BIT
551     +
552     +static int gpr64_get(struct task_struct *target,
553     + const struct user_regset *regset,
554     + unsigned int pos, unsigned int count,
555     + void *kbuf, void __user *ubuf)
556     +{
557     + struct pt_regs *regs = task_pt_regs(target);
558     + u64 uregs[ELF_NGREG] = {};
559     + unsigned i;
560     +
561     + for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
562     + /* k0/k1 are copied as zero. */
563     + if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
564     + continue;
565     +
566     + uregs[i] = regs->regs[i - MIPS64_EF_R0];
567     + }
568     +
569     + uregs[MIPS64_EF_LO] = regs->lo;
570     + uregs[MIPS64_EF_HI] = regs->hi;
571     + uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
572     + uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
573     + uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
574     + uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
575     +
576     + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
577     + sizeof(uregs));
578     +}
579     +
580     +static int gpr64_set(struct task_struct *target,
581     + const struct user_regset *regset,
582     + unsigned int pos, unsigned int count,
583     + const void *kbuf, const void __user *ubuf)
584     +{
585     + struct pt_regs *regs = task_pt_regs(target);
586     + u64 uregs[ELF_NGREG];
587     + unsigned start, num_regs, i;
588     + int err;
589     +
590     + start = pos / sizeof(u64);
591     + num_regs = count / sizeof(u64);
592     +
593     + if (start + num_regs > ELF_NGREG)
594     + return -EIO;
595     +
596     + err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
597     + sizeof(uregs));
598     + if (err)
599     + return err;
600     +
601     + for (i = start; i < num_regs; i++) {
602     + switch (i) {
603     + case MIPS64_EF_R1 ... MIPS64_EF_R25:
604     + /* k0/k1 are ignored. */
605     + case MIPS64_EF_R28 ... MIPS64_EF_R31:
606     + regs->regs[i - MIPS64_EF_R0] = uregs[i];
607     + break;
608     + case MIPS64_EF_LO:
609     + regs->lo = uregs[i];
610     + break;
611     + case MIPS64_EF_HI:
612     + regs->hi = uregs[i];
613     + break;
614     + case MIPS64_EF_CP0_EPC:
615     + regs->cp0_epc = uregs[i];
616     + break;
617     + }
618     + }
619     +
620     + return 0;
621     +}
622     +
623     +#endif /* CONFIG_64BIT */
624     +
625     static int fpr_get(struct task_struct *target,
626     const struct user_regset *regset,
627     unsigned int pos, unsigned int count,
628     @@ -322,14 +447,16 @@ enum mips_regset {
629     REGSET_FPR,
630     };
631    
632     +#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
633     +
634     static const struct user_regset mips_regsets[] = {
635     [REGSET_GPR] = {
636     .core_note_type = NT_PRSTATUS,
637     .n = ELF_NGREG,
638     .size = sizeof(unsigned int),
639     .align = sizeof(unsigned int),
640     - .get = gpr_get,
641     - .set = gpr_set,
642     + .get = gpr32_get,
643     + .set = gpr32_set,
644     },
645     [REGSET_FPR] = {
646     .core_note_type = NT_PRFPREG,
647     @@ -349,14 +476,18 @@ static const struct user_regset_view user_mips_view = {
648     .n = ARRAY_SIZE(mips_regsets),
649     };
650    
651     +#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
652     +
653     +#ifdef CONFIG_64BIT
654     +
655     static const struct user_regset mips64_regsets[] = {
656     [REGSET_GPR] = {
657     .core_note_type = NT_PRSTATUS,
658     .n = ELF_NGREG,
659     .size = sizeof(unsigned long),
660     .align = sizeof(unsigned long),
661     - .get = gpr_get,
662     - .set = gpr_set,
663     + .get = gpr64_get,
664     + .set = gpr64_set,
665     },
666     [REGSET_FPR] = {
667     .core_note_type = NT_PRFPREG,
668     @@ -369,25 +500,26 @@ static const struct user_regset mips64_regsets[] = {
669     };
670    
671     static const struct user_regset_view user_mips64_view = {
672     - .name = "mips",
673     + .name = "mips64",
674     .e_machine = ELF_ARCH,
675     .ei_osabi = ELF_OSABI,
676     .regsets = mips64_regsets,
677     - .n = ARRAY_SIZE(mips_regsets),
678     + .n = ARRAY_SIZE(mips64_regsets),
679     };
680    
681     +#endif /* CONFIG_64BIT */
682     +
683     const struct user_regset_view *task_user_regset_view(struct task_struct *task)
684     {
685     #ifdef CONFIG_32BIT
686     return &user_mips_view;
687     -#endif
688     -
689     +#else
690     #ifdef CONFIG_MIPS32_O32
691     - if (test_thread_flag(TIF_32BIT_REGS))
692     - return &user_mips_view;
693     + if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
694     + return &user_mips_view;
695     #endif
696     -
697     return &user_mips64_view;
698     +#endif
699     }
700    
701     long arch_ptrace(struct task_struct *child, long request,
702     @@ -593,7 +725,7 @@ long arch_ptrace(struct task_struct *child, long request,
703     break;
704     #endif
705     case FPC_CSR:
706     - child->thread.fpu.fcr31 = data;
707     + child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X;
708     break;
709     case DSP_BASE ... DSP_BASE + 5: {
710     dspreg_t *dregs;
711     diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
712     index c369a5d35527..b897dde93e7a 100644
713     --- a/arch/mips/kernel/unaligned.c
714     +++ b/arch/mips/kernel/unaligned.c
715     @@ -605,7 +605,6 @@ static void emulate_load_store_insn(struct pt_regs *regs,
716     case sdc1_op:
717     die_if_kernel("Unaligned FP access in kernel code", regs);
718     BUG_ON(!used_math());
719     - BUG_ON(!is_fpu_owner());
720    
721     lose_fpu(1); /* Save FPU state for the emulator. */
722     res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
723     diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
724     index b234b1b5ccad..65d452aa1fda 100644
725     --- a/arch/mips/mm/tlbex.c
726     +++ b/arch/mips/mm/tlbex.c
727     @@ -1295,6 +1295,7 @@ static void build_r4000_tlb_refill_handler(void)
728     }
729     #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
730     uasm_l_tlb_huge_update(&l, p);
731     + UASM_i_LW(&p, K0, 0, K1);
732     build_huge_update_entries(&p, htlb_info.huge_pte, K1);
733     build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
734     htlb_info.restore_scratch);
735     diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
736     index ad3025d0880b..f20786825b8f 100644
737     --- a/arch/powerpc/include/asm/machdep.h
738     +++ b/arch/powerpc/include/asm/machdep.h
739     @@ -57,10 +57,10 @@ struct machdep_calls {
740     void (*hpte_removebolted)(unsigned long ea,
741     int psize, int ssize);
742     void (*flush_hash_range)(unsigned long number, int local);
743     - void (*hugepage_invalidate)(struct mm_struct *mm,
744     + void (*hugepage_invalidate)(unsigned long vsid,
745     + unsigned long addr,
746     unsigned char *hpte_slot_array,
747     - unsigned long addr, int psize);
748     -
749     + int psize, int ssize);
750     /* special for kexec, to be called in real mode, linear mapping is
751     * destroyed as well */
752     void (*hpte_clear_all)(void);
753     diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
754     index eb9261024f51..7b3d54fae46f 100644
755     --- a/arch/powerpc/include/asm/pgtable-ppc64.h
756     +++ b/arch/powerpc/include/asm/pgtable-ppc64.h
757     @@ -413,7 +413,7 @@ static inline char *get_hpte_slot_array(pmd_t *pmdp)
758     }
759    
760     extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
761     - pmd_t *pmdp);
762     + pmd_t *pmdp, unsigned long old_pmd);
763     #ifdef CONFIG_TRANSPARENT_HUGEPAGE
764     extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
765     extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
766     diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
767     index d836d945068d..9ecede1e124c 100644
768     --- a/arch/powerpc/include/asm/pte-hash64-64k.h
769     +++ b/arch/powerpc/include/asm/pte-hash64-64k.h
770     @@ -46,11 +46,31 @@
771     * in order to deal with 64K made of 4K HW pages. Thus we override the
772     * generic accessors and iterators here
773     */
774     -#define __real_pte(e,p) ((real_pte_t) { \
775     - (e), (pte_val(e) & _PAGE_COMBO) ? \
776     - (pte_val(*((p) + PTRS_PER_PTE))) : 0 })
777     -#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
778     - (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
779     +#define __real_pte __real_pte
780     +static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
781     +{
782     + real_pte_t rpte;
783     +
784     + rpte.pte = pte;
785     + rpte.hidx = 0;
786     + if (pte_val(pte) & _PAGE_COMBO) {
787     + /*
788     + * Make sure we order the hidx load against the _PAGE_COMBO
789     + * check. The store side ordering is done in __hash_page_4K
790     + */
791     + smp_rmb();
792     + rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE));
793     + }
794     + return rpte;
795     +}
796     +
797     +static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
798     +{
799     + if ((pte_val(rpte.pte) & _PAGE_COMBO))
800     + return (rpte.hidx >> (index<<2)) & 0xf;
801     + return (pte_val(rpte.pte) >> 12) & 0xf;
802     +}
803     +
804     #define __rpte_to_pte(r) ((r).pte)
805     #define __rpte_sub_valid(rpte, index) \
806     (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
807     diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
808     index 3ea26c25590b..838de8e17dc5 100644
809     --- a/arch/powerpc/mm/hash_native_64.c
810     +++ b/arch/powerpc/mm/hash_native_64.c
811     @@ -418,18 +418,18 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
812     local_irq_restore(flags);
813     }
814    
815     -static void native_hugepage_invalidate(struct mm_struct *mm,
816     +static void native_hugepage_invalidate(unsigned long vsid,
817     + unsigned long addr,
818     unsigned char *hpte_slot_array,
819     - unsigned long addr, int psize)
820     + int psize, int ssize)
821     {
822     - int ssize = 0, i;
823     - int lock_tlbie;
824     + int i;
825     struct hash_pte *hptep;
826     int actual_psize = MMU_PAGE_16M;
827     unsigned int max_hpte_count, valid;
828     unsigned long flags, s_addr = addr;
829     unsigned long hpte_v, want_v, shift;
830     - unsigned long hidx, vpn = 0, vsid, hash, slot;
831     + unsigned long hidx, vpn = 0, hash, slot;
832    
833     shift = mmu_psize_defs[psize].shift;
834     max_hpte_count = 1U << (PMD_SHIFT - shift);
835     @@ -443,15 +443,6 @@ static void native_hugepage_invalidate(struct mm_struct *mm,
836    
837     /* get the vpn */
838     addr = s_addr + (i * (1ul << shift));
839     - if (!is_kernel_addr(addr)) {
840     - ssize = user_segment_size(addr);
841     - vsid = get_vsid(mm->context.id, addr, ssize);
842     - WARN_ON(vsid == 0);
843     - } else {
844     - vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
845     - ssize = mmu_kernel_ssize;
846     - }
847     -
848     vpn = hpt_vpn(addr, vsid, ssize);
849     hash = hpt_hash(vpn, shift, ssize);
850     if (hidx & _PTEIDX_SECONDARY)
851     @@ -471,22 +462,13 @@ static void native_hugepage_invalidate(struct mm_struct *mm,
852     else
853     /* Invalidate the hpte. NOTE: this also unlocks it */
854     hptep->v = 0;
855     + /*
856     + * We need to do tlb invalidate for all the address, tlbie
857     + * instruction compares entry_VA in tlb with the VA specified
858     + * here
859     + */
860     + tlbie(vpn, psize, actual_psize, ssize, 0);
861     }
862     - /*
863     - * Since this is a hugepage, we just need a single tlbie.
864     - * use the last vpn.
865     - */
866     - lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
867     - if (lock_tlbie)
868     - raw_spin_lock(&native_tlbie_lock);
869     -
870     - asm volatile("ptesync":::"memory");
871     - __tlbie(vpn, psize, actual_psize, ssize);
872     - asm volatile("eieio; tlbsync; ptesync":::"memory");
873     -
874     - if (lock_tlbie)
875     - raw_spin_unlock(&native_tlbie_lock);
876     -
877     local_irq_restore(flags);
878     }
879    
880     diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
881     index 826893fcb3a7..5f5e6328c21c 100644
882     --- a/arch/powerpc/mm/hugepage-hash64.c
883     +++ b/arch/powerpc/mm/hugepage-hash64.c
884     @@ -18,6 +18,57 @@
885     #include <linux/mm.h>
886     #include <asm/machdep.h>
887    
888     +static void invalidate_old_hpte(unsigned long vsid, unsigned long addr,
889     + pmd_t *pmdp, unsigned int psize, int ssize)
890     +{
891     + int i, max_hpte_count, valid;
892     + unsigned long s_addr;
893     + unsigned char *hpte_slot_array;
894     + unsigned long hidx, shift, vpn, hash, slot;
895     +
896     + s_addr = addr & HPAGE_PMD_MASK;
897     + hpte_slot_array = get_hpte_slot_array(pmdp);
898     + /*
899     + * IF we try to do a HUGE PTE update after a withdraw is done.
900     + * we will find the below NULL. This happens when we do
901     + * split_huge_page_pmd
902     + */
903     + if (!hpte_slot_array)
904     + return;
905     +
906     + if (ppc_md.hugepage_invalidate)
907     + return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
908     + psize, ssize);
909     + /*
910     + * No bluk hpte removal support, invalidate each entry
911     + */
912     + shift = mmu_psize_defs[psize].shift;
913     + max_hpte_count = HPAGE_PMD_SIZE >> shift;
914     + for (i = 0; i < max_hpte_count; i++) {
915     + /*
916     + * 8 bits per each hpte entries
917     + * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
918     + */
919     + valid = hpte_valid(hpte_slot_array, i);
920     + if (!valid)
921     + continue;
922     + hidx = hpte_hash_index(hpte_slot_array, i);
923     +
924     + /* get the vpn */
925     + addr = s_addr + (i * (1ul << shift));
926     + vpn = hpt_vpn(addr, vsid, ssize);
927     + hash = hpt_hash(vpn, shift, ssize);
928     + if (hidx & _PTEIDX_SECONDARY)
929     + hash = ~hash;
930     +
931     + slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
932     + slot += hidx & _PTEIDX_GROUP_IX;
933     + ppc_md.hpte_invalidate(slot, vpn, psize,
934     + MMU_PAGE_16M, ssize, 0);
935     + }
936     +}
937     +
938     +
939     int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
940     pmd_t *pmdp, unsigned long trap, int local, int ssize,
941     unsigned int psize)
942     @@ -33,7 +84,9 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
943     * atomically mark the linux large page PMD busy and dirty
944     */
945     do {
946     - old_pmd = pmd_val(*pmdp);
947     + pmd_t pmd = ACCESS_ONCE(*pmdp);
948     +
949     + old_pmd = pmd_val(pmd);
950     /* If PMD busy, retry the access */
951     if (unlikely(old_pmd & _PAGE_BUSY))
952     return 0;
953     @@ -85,6 +138,15 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
954     vpn = hpt_vpn(ea, vsid, ssize);
955     hash = hpt_hash(vpn, shift, ssize);
956     hpte_slot_array = get_hpte_slot_array(pmdp);
957     + if (psize == MMU_PAGE_4K) {
958     + /*
959     + * invalidate the old hpte entry if we have that mapped via 64K
960     + * base page size. This is because demote_segment won't flush
961     + * hash page table entries.
962     + */
963     + if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
964     + invalidate_old_hpte(vsid, ea, pmdp, MMU_PAGE_64K, ssize);
965     + }
966    
967     valid = hpte_valid(hpte_slot_array, index);
968     if (valid) {
969     @@ -107,11 +169,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
970     * safely update this here.
971     */
972     valid = 0;
973     - new_pmd &= ~_PAGE_HPTEFLAGS;
974     hpte_slot_array[index] = 0;
975     - } else
976     - /* clear the busy bits and set the hash pte bits */
977     - new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
978     + }
979     }
980    
981     if (!valid) {
982     @@ -119,11 +178,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
983    
984     /* insert new entry */
985     pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
986     -repeat:
987     - hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
988     -
989     - /* clear the busy bits and set the hash pte bits */
990     - new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
991     + new_pmd |= _PAGE_HASHPTE;
992    
993     /* Add in WIMG bits */
994     rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
995     @@ -132,6 +187,8 @@ repeat:
996     * enable the memory coherence always
997     */
998     rflags |= HPTE_R_M;
999     +repeat:
1000     + hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
1001    
1002     /* Insert into the hash table, primary slot */
1003     slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
1004     @@ -172,8 +229,17 @@ repeat:
1005     mark_hpte_slot_valid(hpte_slot_array, index, slot);
1006     }
1007     /*
1008     - * No need to use ldarx/stdcx here
1009     + * Mark the pte with _PAGE_COMBO, if we are trying to hash it with
1010     + * base page size 4k.
1011     + */
1012     + if (psize == MMU_PAGE_4K)
1013     + new_pmd |= _PAGE_COMBO;
1014     + /*
1015     + * The hpte valid is stored in the pgtable whose address is in the
1016     + * second half of the PMD. Order this against clearing of the busy bit in
1017     + * huge pmd.
1018     */
1019     + smp_wmb();
1020     *pmdp = __pmd(new_pmd & ~_PAGE_BUSY);
1021     return 0;
1022     }
1023     diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
1024     index 30a42e24bf14..a5fff173be4f 100644
1025     --- a/arch/powerpc/mm/numa.c
1026     +++ b/arch/powerpc/mm/numa.c
1027     @@ -610,8 +610,8 @@ static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
1028     case CPU_UP_CANCELED:
1029     case CPU_UP_CANCELED_FROZEN:
1030     unmap_cpu_from_node(lcpu);
1031     - break;
1032     ret = NOTIFY_OK;
1033     + break;
1034     #endif
1035     }
1036     return ret;
1037     diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
1038     index 62bf5e8e78da..c64da56d7582 100644
1039     --- a/arch/powerpc/mm/pgtable_64.c
1040     +++ b/arch/powerpc/mm/pgtable_64.c
1041     @@ -538,7 +538,7 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
1042     *pmdp = __pmd((old & ~clr) | set);
1043     #endif
1044     if (old & _PAGE_HASHPTE)
1045     - hpte_do_hugepage_flush(mm, addr, pmdp);
1046     + hpte_do_hugepage_flush(mm, addr, pmdp, old);
1047     return old;
1048     }
1049    
1050     @@ -645,7 +645,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma,
1051     if (!(old & _PAGE_SPLITTING)) {
1052     /* We need to flush the hpte */
1053     if (old & _PAGE_HASHPTE)
1054     - hpte_do_hugepage_flush(vma->vm_mm, address, pmdp);
1055     + hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old);
1056     }
1057     }
1058    
1059     @@ -718,7 +718,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
1060     * neesd to be flushed.
1061     */
1062     void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
1063     - pmd_t *pmdp)
1064     + pmd_t *pmdp, unsigned long old_pmd)
1065     {
1066     int ssize, i;
1067     unsigned long s_addr;
1068     @@ -740,12 +740,29 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
1069     if (!hpte_slot_array)
1070     return;
1071    
1072     - /* get the base page size */
1073     + /* get the base page size,vsid and segment size */
1074     +#ifdef CONFIG_DEBUG_VM
1075     psize = get_slice_psize(mm, s_addr);
1076     + BUG_ON(psize == MMU_PAGE_16M);
1077     +#endif
1078     + if (old_pmd & _PAGE_COMBO)
1079     + psize = MMU_PAGE_4K;
1080     + else
1081     + psize = MMU_PAGE_64K;
1082     +
1083     + if (!is_kernel_addr(s_addr)) {
1084     + ssize = user_segment_size(s_addr);
1085     + vsid = get_vsid(mm->context.id, s_addr, ssize);
1086     + WARN_ON(vsid == 0);
1087     + } else {
1088     + vsid = get_kernel_vsid(s_addr, mmu_kernel_ssize);
1089     + ssize = mmu_kernel_ssize;
1090     + }
1091    
1092     if (ppc_md.hugepage_invalidate)
1093     - return ppc_md.hugepage_invalidate(mm, hpte_slot_array,
1094     - s_addr, psize);
1095     + return ppc_md.hugepage_invalidate(vsid, s_addr,
1096     + hpte_slot_array,
1097     + psize, ssize);
1098     /*
1099     * No bluk hpte removal support, invalidate each entry
1100     */
1101     @@ -763,15 +780,6 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
1102    
1103     /* get the vpn */
1104     addr = s_addr + (i * (1ul << shift));
1105     - if (!is_kernel_addr(addr)) {
1106     - ssize = user_segment_size(addr);
1107     - vsid = get_vsid(mm->context.id, addr, ssize);
1108     - WARN_ON(vsid == 0);
1109     - } else {
1110     - vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
1111     - ssize = mmu_kernel_ssize;
1112     - }
1113     -
1114     vpn = hpt_vpn(addr, vsid, ssize);
1115     hash = hpt_hash(vpn, shift, ssize);
1116     if (hidx & _PTEIDX_SECONDARY)
1117     diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
1118     index c99f6510a0b2..9adda5790463 100644
1119     --- a/arch/powerpc/mm/tlb_hash64.c
1120     +++ b/arch/powerpc/mm/tlb_hash64.c
1121     @@ -216,7 +216,7 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
1122     if (!(pte & _PAGE_HASHPTE))
1123     continue;
1124     if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
1125     - hpte_do_hugepage_flush(mm, start, (pmd_t *)pte);
1126     + hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
1127     else
1128     hpte_need_flush(mm, start, ptep, pte, 0);
1129     }
1130     diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
1131     index 9590dbb756f2..b9a82042760f 100644
1132     --- a/arch/powerpc/platforms/pseries/hotplug-memory.c
1133     +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
1134     @@ -160,7 +160,7 @@ static int pseries_remove_memory(struct device_node *np)
1135     static inline int pseries_remove_memblock(unsigned long base,
1136     unsigned int memblock_size)
1137     {
1138     - return -EOPNOTSUPP;
1139     + return 0;
1140     }
1141     static inline int pseries_remove_memory(struct device_node *np)
1142     {
1143     diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
1144     index 33b552ffbe57..4642d6a4d356 100644
1145     --- a/arch/powerpc/platforms/pseries/iommu.c
1146     +++ b/arch/powerpc/platforms/pseries/iommu.c
1147     @@ -721,13 +721,13 @@ static int __init disable_ddw_setup(char *str)
1148    
1149     early_param("disable_ddw", disable_ddw_setup);
1150    
1151     -static void remove_ddw(struct device_node *np)
1152     +static void remove_ddw(struct device_node *np, bool remove_prop)
1153     {
1154     struct dynamic_dma_window_prop *dwp;
1155     struct property *win64;
1156     const u32 *ddw_avail;
1157     u64 liobn;
1158     - int len, ret;
1159     + int len, ret = 0;
1160    
1161     ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len);
1162     win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
1163     @@ -761,7 +761,8 @@ static void remove_ddw(struct device_node *np)
1164     np->full_name, ret, ddw_avail[2], liobn);
1165    
1166     delprop:
1167     - ret = of_remove_property(np, win64);
1168     + if (remove_prop)
1169     + ret = of_remove_property(np, win64);
1170     if (ret)
1171     pr_warning("%s: failed to remove direct window property: %d\n",
1172     np->full_name, ret);
1173     @@ -805,7 +806,7 @@ static int find_existing_ddw_windows(void)
1174     window = kzalloc(sizeof(*window), GFP_KERNEL);
1175     if (!window || len < sizeof(struct dynamic_dma_window_prop)) {
1176     kfree(window);
1177     - remove_ddw(pdn);
1178     + remove_ddw(pdn, true);
1179     continue;
1180     }
1181    
1182     @@ -1045,7 +1046,7 @@ out_free_window:
1183     kfree(window);
1184    
1185     out_clear_window:
1186     - remove_ddw(pdn);
1187     + remove_ddw(pdn, true);
1188    
1189     out_free_prop:
1190     kfree(win64->name);
1191     @@ -1255,7 +1256,14 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti
1192    
1193     switch (action) {
1194     case OF_RECONFIG_DETACH_NODE:
1195     - remove_ddw(np);
1196     + /*
1197     + * Removing the property will invoke the reconfig
1198     + * notifier again, which causes dead-lock on the
1199     + * read-write semaphore of the notifier chain. So
1200     + * we have to remove the property when releasing
1201     + * the device node.
1202     + */
1203     + remove_ddw(np, false);
1204     if (pci && pci->iommu_table)
1205     iommu_free_table(pci->iommu_table, np->full_name);
1206    
1207     diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
1208     index b02af9ef3ff6..ccf6f162f69c 100644
1209     --- a/arch/powerpc/platforms/pseries/lpar.c
1210     +++ b/arch/powerpc/platforms/pseries/lpar.c
1211     @@ -430,16 +430,17 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
1212     spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
1213     }
1214    
1215     -static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm,
1216     - unsigned char *hpte_slot_array,
1217     - unsigned long addr, int psize)
1218     +static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
1219     + unsigned long addr,
1220     + unsigned char *hpte_slot_array,
1221     + int psize, int ssize)
1222     {
1223     - int ssize = 0, i, index = 0;
1224     + int i, index = 0;
1225     unsigned long s_addr = addr;
1226     unsigned int max_hpte_count, valid;
1227     unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
1228     unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
1229     - unsigned long shift, hidx, vpn = 0, vsid, hash, slot;
1230     + unsigned long shift, hidx, vpn = 0, hash, slot;
1231    
1232     shift = mmu_psize_defs[psize].shift;
1233     max_hpte_count = 1U << (PMD_SHIFT - shift);
1234     @@ -452,15 +453,6 @@ static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm,
1235    
1236     /* get the vpn */
1237     addr = s_addr + (i * (1ul << shift));
1238     - if (!is_kernel_addr(addr)) {
1239     - ssize = user_segment_size(addr);
1240     - vsid = get_vsid(mm->context.id, addr, ssize);
1241     - WARN_ON(vsid == 0);
1242     - } else {
1243     - vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
1244     - ssize = mmu_kernel_ssize;
1245     - }
1246     -
1247     vpn = hpt_vpn(addr, vsid, ssize);
1248     hash = hpt_hash(vpn, shift, ssize);
1249     if (hidx & _PTEIDX_SECONDARY)
1250     diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
1251     index bb74b21f007a..a0a3bed6e4dc 100644
1252     --- a/arch/s390/Kconfig
1253     +++ b/arch/s390/Kconfig
1254     @@ -93,6 +93,7 @@ config S390
1255     select ARCH_INLINE_WRITE_UNLOCK_IRQ
1256     select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
1257     select ARCH_SAVE_PAGE_KEYS if HIBERNATION
1258     + select ARCH_SUPPORTS_ATOMIC_RMW
1259     select ARCH_USE_CMPXCHG_LOCKREF
1260     select ARCH_WANT_IPC_PARSE_VERSION
1261     select BUILDTIME_EXTABLE_SORT
1262     diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
1263     index edff4e653d9a..c66bca17e736 100644
1264     --- a/drivers/acpi/acpica/utcopy.c
1265     +++ b/drivers/acpi/acpica/utcopy.c
1266     @@ -1001,5 +1001,11 @@ acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc,
1267     status = acpi_ut_copy_simple_object(source_desc, *dest_desc);
1268     }
1269    
1270     + /* Delete the allocated object if copy failed */
1271     +
1272     + if (ACPI_FAILURE(status)) {
1273     + acpi_ut_remove_reference(*dest_desc);
1274     + }
1275     +
1276     return_ACPI_STATUS(status);
1277     }
1278     diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
1279     index 3dca36d4ad26..17f9ec501972 100644
1280     --- a/drivers/acpi/processor_idle.c
1281     +++ b/drivers/acpi/processor_idle.c
1282     @@ -1071,9 +1071,9 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1283    
1284     if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1285    
1286     - cpuidle_pause_and_lock();
1287     /* Protect against cpu-hotplug */
1288     get_online_cpus();
1289     + cpuidle_pause_and_lock();
1290    
1291     /* Disable all cpuidle devices */
1292     for_each_online_cpu(cpu) {
1293     @@ -1100,8 +1100,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1294     cpuidle_enable_device(dev);
1295     }
1296     }
1297     - put_online_cpus();
1298     cpuidle_resume_and_unlock();
1299     + put_online_cpus();
1300     }
1301    
1302     return 0;
1303     diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
1304     index 57b053f424d1..92d5184e3654 100644
1305     --- a/drivers/acpi/scan.c
1306     +++ b/drivers/acpi/scan.c
1307     @@ -329,7 +329,8 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
1308     unsigned long long sta;
1309     acpi_status status;
1310    
1311     - if (device->handler->hotplug.demand_offline && !acpi_force_hot_remove) {
1312     + if (device->handler && device->handler->hotplug.demand_offline
1313     + && !acpi_force_hot_remove) {
1314     if (!acpi_scan_is_offline(device, true))
1315     return -EBUSY;
1316     } else {
1317     @@ -660,8 +661,14 @@ static ssize_t
1318     acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
1319     char *buf) {
1320     struct acpi_device *acpi_dev = to_acpi_device(dev);
1321     + acpi_status status;
1322     + unsigned long long sun;
1323     +
1324     + status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun);
1325     + if (ACPI_FAILURE(status))
1326     + return -ENODEV;
1327    
1328     - return sprintf(buf, "%lu\n", acpi_dev->pnp.sun);
1329     + return sprintf(buf, "%llu\n", sun);
1330     }
1331     static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
1332    
1333     @@ -683,7 +690,6 @@ static int acpi_device_setup_files(struct acpi_device *dev)
1334     {
1335     struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
1336     acpi_status status;
1337     - unsigned long long sun;
1338     int result = 0;
1339    
1340     /*
1341     @@ -724,14 +730,10 @@ static int acpi_device_setup_files(struct acpi_device *dev)
1342     if (dev->pnp.unique_id)
1343     result = device_create_file(&dev->dev, &dev_attr_uid);
1344    
1345     - status = acpi_evaluate_integer(dev->handle, "_SUN", NULL, &sun);
1346     - if (ACPI_SUCCESS(status)) {
1347     - dev->pnp.sun = (unsigned long)sun;
1348     + if (acpi_has_method(dev->handle, "_SUN")) {
1349     result = device_create_file(&dev->dev, &dev_attr_sun);
1350     if (result)
1351     goto end;
1352     - } else {
1353     - dev->pnp.sun = (unsigned long)-1;
1354     }
1355    
1356     if (acpi_has_method(dev->handle, "_STA")) {
1357     @@ -915,12 +917,17 @@ static void acpi_device_notify(acpi_handle handle, u32 event, void *data)
1358     device->driver->ops.notify(device, event);
1359     }
1360    
1361     -static acpi_status acpi_device_notify_fixed(void *data)
1362     +static void acpi_device_notify_fixed(void *data)
1363     {
1364     struct acpi_device *device = data;
1365    
1366     /* Fixed hardware devices have no handles */
1367     acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
1368     +}
1369     +
1370     +static acpi_status acpi_device_fixed_event(void *data)
1371     +{
1372     + acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data);
1373     return AE_OK;
1374     }
1375    
1376     @@ -931,12 +938,12 @@ static int acpi_device_install_notify_handler(struct acpi_device *device)
1377     if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
1378     status =
1379     acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
1380     - acpi_device_notify_fixed,
1381     + acpi_device_fixed_event,
1382     device);
1383     else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
1384     status =
1385     acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
1386     - acpi_device_notify_fixed,
1387     + acpi_device_fixed_event,
1388     device);
1389     else
1390     status = acpi_install_notify_handler(device->handle,
1391     @@ -953,10 +960,10 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device)
1392     {
1393     if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
1394     acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
1395     - acpi_device_notify_fixed);
1396     + acpi_device_fixed_event);
1397     else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
1398     acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
1399     - acpi_device_notify_fixed);
1400     + acpi_device_fixed_event);
1401     else
1402     acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
1403     acpi_device_notify);
1404     diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
1405     index 7399303d7d99..9e81a3d01d2b 100644
1406     --- a/drivers/bluetooth/btmrvl_drv.h
1407     +++ b/drivers/bluetooth/btmrvl_drv.h
1408     @@ -66,6 +66,7 @@ struct btmrvl_adapter {
1409     u8 hs_state;
1410     u8 wakeup_tries;
1411     wait_queue_head_t cmd_wait_q;
1412     + wait_queue_head_t event_hs_wait_q;
1413     u8 cmd_complete;
1414     bool is_suspended;
1415     };
1416     diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
1417     index 1e0320af00c6..49d20989b45a 100644
1418     --- a/drivers/bluetooth/btmrvl_main.c
1419     +++ b/drivers/bluetooth/btmrvl_main.c
1420     @@ -112,6 +112,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
1421     adapter->hs_state = HS_ACTIVATED;
1422     if (adapter->psmode)
1423     adapter->ps_state = PS_SLEEP;
1424     + wake_up_interruptible(&adapter->event_hs_wait_q);
1425     BT_DBG("HS ACTIVATED!");
1426     } else {
1427     BT_DBG("HS Enable failed");
1428     @@ -251,11 +252,31 @@ EXPORT_SYMBOL_GPL(btmrvl_enable_ps);
1429    
1430     int btmrvl_enable_hs(struct btmrvl_private *priv)
1431     {
1432     + struct btmrvl_adapter *adapter = priv->adapter;
1433     int ret;
1434    
1435     ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_ENABLE, NULL, 0);
1436     - if (ret)
1437     + if (ret) {
1438     BT_ERR("Host sleep enable command failed\n");
1439     + return ret;
1440     + }
1441     +
1442     + ret = wait_event_interruptible_timeout(adapter->event_hs_wait_q,
1443     + adapter->hs_state,
1444     + msecs_to_jiffies(WAIT_UNTIL_HS_STATE_CHANGED));
1445     + if (ret < 0) {
1446     + BT_ERR("event_hs_wait_q terminated (%d): %d,%d,%d",
1447     + ret, adapter->hs_state, adapter->ps_state,
1448     + adapter->wakeup_tries);
1449     + } else if (!ret) {
1450     + BT_ERR("hs_enable timeout: %d,%d,%d", adapter->hs_state,
1451     + adapter->ps_state, adapter->wakeup_tries);
1452     + ret = -ETIMEDOUT;
1453     + } else {
1454     + BT_DBG("host sleep enabled: %d,%d,%d", adapter->hs_state,
1455     + adapter->ps_state, adapter->wakeup_tries);
1456     + ret = 0;
1457     + }
1458    
1459     return ret;
1460     }
1461     @@ -341,6 +362,7 @@ static void btmrvl_init_adapter(struct btmrvl_private *priv)
1462     priv->adapter->ps_state = PS_AWAKE;
1463    
1464     init_waitqueue_head(&priv->adapter->cmd_wait_q);
1465     + init_waitqueue_head(&priv->adapter->event_hs_wait_q);
1466     }
1467    
1468     static void btmrvl_free_adapter(struct btmrvl_private *priv)
1469     @@ -648,6 +670,7 @@ int btmrvl_remove_card(struct btmrvl_private *priv)
1470     hdev = priv->btmrvl_dev.hcidev;
1471    
1472     wake_up_interruptible(&priv->adapter->cmd_wait_q);
1473     + wake_up_interruptible(&priv->adapter->event_hs_wait_q);
1474    
1475     kthread_stop(priv->main_thread.task);
1476    
1477     diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
1478     index 62e10fd1e1cb..6af17002a115 100644
1479     --- a/drivers/char/tpm/tpm-interface.c
1480     +++ b/drivers/char/tpm/tpm-interface.c
1481     @@ -491,11 +491,10 @@ static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
1482     int tpm_get_timeouts(struct tpm_chip *chip)
1483     {
1484     struct tpm_cmd_t tpm_cmd;
1485     - struct timeout_t *timeout_cap;
1486     + unsigned long new_timeout[4];
1487     + unsigned long old_timeout[4];
1488     struct duration_t *duration_cap;
1489     ssize_t rc;
1490     - u32 timeout;
1491     - unsigned int scale = 1;
1492    
1493     tpm_cmd.header.in = tpm_getcap_header;
1494     tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
1495     @@ -529,25 +528,46 @@ int tpm_get_timeouts(struct tpm_chip *chip)
1496     != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
1497     return -EINVAL;
1498    
1499     - timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
1500     - /* Don't overwrite default if value is 0 */
1501     - timeout = be32_to_cpu(timeout_cap->a);
1502     - if (timeout && timeout < 1000) {
1503     - /* timeouts in msec rather usec */
1504     - scale = 1000;
1505     - chip->vendor.timeout_adjusted = true;
1506     + old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
1507     + old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
1508     + old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
1509     + old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
1510     + memcpy(new_timeout, old_timeout, sizeof(new_timeout));
1511     +
1512     + /*
1513     + * Provide ability for vendor overrides of timeout values in case
1514     + * of misreporting.
1515     + */
1516     + if (chip->ops->update_timeouts != NULL)
1517     + chip->vendor.timeout_adjusted =
1518     + chip->ops->update_timeouts(chip, new_timeout);
1519     +
1520     + if (!chip->vendor.timeout_adjusted) {
1521     + /* Don't overwrite default if value is 0 */
1522     + if (new_timeout[0] != 0 && new_timeout[0] < 1000) {
1523     + int i;
1524     +
1525     + /* timeouts in msec rather usec */
1526     + for (i = 0; i != ARRAY_SIZE(new_timeout); i++)
1527     + new_timeout[i] *= 1000;
1528     + chip->vendor.timeout_adjusted = true;
1529     + }
1530     + }
1531     +
1532     + /* Report adjusted timeouts */
1533     + if (chip->vendor.timeout_adjusted) {
1534     + dev_info(chip->dev,
1535     + HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
1536     + old_timeout[0], new_timeout[0],
1537     + old_timeout[1], new_timeout[1],
1538     + old_timeout[2], new_timeout[2],
1539     + old_timeout[3], new_timeout[3]);
1540     }
1541     - if (timeout)
1542     - chip->vendor.timeout_a = usecs_to_jiffies(timeout * scale);
1543     - timeout = be32_to_cpu(timeout_cap->b);
1544     - if (timeout)
1545     - chip->vendor.timeout_b = usecs_to_jiffies(timeout * scale);
1546     - timeout = be32_to_cpu(timeout_cap->c);
1547     - if (timeout)
1548     - chip->vendor.timeout_c = usecs_to_jiffies(timeout * scale);
1549     - timeout = be32_to_cpu(timeout_cap->d);
1550     - if (timeout)
1551     - chip->vendor.timeout_d = usecs_to_jiffies(timeout * scale);
1552     +
1553     + chip->vendor.timeout_a = usecs_to_jiffies(new_timeout[0]);
1554     + chip->vendor.timeout_b = usecs_to_jiffies(new_timeout[1]);
1555     + chip->vendor.timeout_c = usecs_to_jiffies(new_timeout[2]);
1556     + chip->vendor.timeout_d = usecs_to_jiffies(new_timeout[3]);
1557    
1558     duration:
1559     tpm_cmd.header.in = tpm_getcap_header;
1560     @@ -991,13 +1011,13 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
1561     int err, total = 0, retries = 5;
1562     u8 *dest = out;
1563    
1564     + if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
1565     + return -EINVAL;
1566     +
1567     chip = tpm_chip_find_get(chip_num);
1568     if (chip == NULL)
1569     return -ENODEV;
1570    
1571     - if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
1572     - return -EINVAL;
1573     -
1574     do {
1575     tpm_cmd.header.in = tpm_getrandom_header;
1576     tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes);
1577     @@ -1016,6 +1036,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
1578     num_bytes -= recd;
1579     } while (retries-- && total < max);
1580    
1581     + tpm_chip_put(chip);
1582     return total ? total : -EIO;
1583     }
1584     EXPORT_SYMBOL_GPL(tpm_get_random);
1585     @@ -1095,7 +1116,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
1586     goto del_misc;
1587    
1588     if (tpm_add_ppi(&dev->kobj))
1589     - goto del_misc;
1590     + goto del_sysfs;
1591    
1592     chip->bios_dir = tpm_bios_log_setup(chip->devname);
1593    
1594     @@ -1106,6 +1127,8 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
1595    
1596     return chip;
1597    
1598     +del_sysfs:
1599     + tpm_sysfs_del_device(chip);
1600     del_misc:
1601     tpm_dev_del_device(chip);
1602     put_device:
1603     diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
1604     index a9ed2270c25d..2c46734b266d 100644
1605     --- a/drivers/char/tpm/tpm_tis.c
1606     +++ b/drivers/char/tpm/tpm_tis.c
1607     @@ -373,6 +373,36 @@ out_err:
1608     return rc;
1609     }
1610    
1611     +struct tis_vendor_timeout_override {
1612     + u32 did_vid;
1613     + unsigned long timeout_us[4];
1614     +};
1615     +
1616     +static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = {
1617     + /* Atmel 3204 */
1618     + { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000),
1619     + (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } },
1620     +};
1621     +
1622     +static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
1623     + unsigned long *timeout_cap)
1624     +{
1625     + int i;
1626     + u32 did_vid;
1627     +
1628     + did_vid = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
1629     +
1630     + for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
1631     + if (vendor_timeout_overrides[i].did_vid != did_vid)
1632     + continue;
1633     + memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
1634     + sizeof(vendor_timeout_overrides[i].timeout_us));
1635     + return true;
1636     + }
1637     +
1638     + return false;
1639     +}
1640     +
1641     /*
1642     * Early probing for iTPM with STS_DATA_EXPECT flaw.
1643     * Try sending command without itpm flag set and if that
1644     @@ -437,6 +467,7 @@ static const struct tpm_class_ops tpm_tis = {
1645     .recv = tpm_tis_recv,
1646     .send = tpm_tis_send,
1647     .cancel = tpm_tis_ready,
1648     + .update_timeouts = tpm_tis_update_timeouts,
1649     .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
1650     .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
1651     .req_canceled = tpm_tis_req_canceled,
1652     diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
1653     index b22659cccca4..e6125522860a 100644
1654     --- a/drivers/firmware/efi/vars.c
1655     +++ b/drivers/firmware/efi/vars.c
1656     @@ -481,7 +481,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_remove);
1657     */
1658     static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
1659     {
1660     - WARN_ON(!spin_is_locked(&__efivars->lock));
1661     + lockdep_assert_held(&__efivars->lock);
1662    
1663     list_del(&entry->list);
1664     spin_unlock_irq(&__efivars->lock);
1665     @@ -507,7 +507,7 @@ int __efivar_entry_delete(struct efivar_entry *entry)
1666     const struct efivar_operations *ops = __efivars->ops;
1667     efi_status_t status;
1668    
1669     - WARN_ON(!spin_is_locked(&__efivars->lock));
1670     + lockdep_assert_held(&__efivars->lock);
1671    
1672     status = ops->set_variable(entry->var.VariableName,
1673     &entry->var.VendorGuid,
1674     @@ -667,7 +667,7 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
1675     int strsize1, strsize2;
1676     bool found = false;
1677    
1678     - WARN_ON(!spin_is_locked(&__efivars->lock));
1679     + lockdep_assert_held(&__efivars->lock);
1680    
1681     list_for_each_entry_safe(entry, n, head, list) {
1682     strsize1 = ucs2_strsize(name, 1024);
1683     @@ -739,7 +739,7 @@ int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
1684     const struct efivar_operations *ops = __efivars->ops;
1685     efi_status_t status;
1686    
1687     - WARN_ON(!spin_is_locked(&__efivars->lock));
1688     + lockdep_assert_held(&__efivars->lock);
1689    
1690     status = ops->get_variable(entry->var.VariableName,
1691     &entry->var.VendorGuid,
1692     diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
1693     index 23ca7a517246..74ed08a750f4 100644
1694     --- a/drivers/gpu/drm/nouveau/nouveau_drm.h
1695     +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
1696     @@ -10,7 +10,7 @@
1697    
1698     #define DRIVER_MAJOR 1
1699     #define DRIVER_MINOR 1
1700     -#define DRIVER_PATCHLEVEL 1
1701     +#define DRIVER_PATCHLEVEL 2
1702    
1703     /*
1704     * 1.1.1:
1705     @@ -21,6 +21,8 @@
1706     * to control registers on the MPs to enable performance counters,
1707     * and to control the warp error enable mask (OpenGL requires out of
1708     * bounds access to local memory to be silently ignored / return 0).
1709     + * 1.1.2:
1710     + * - fixes multiple bugs in flip completion events and timestamping
1711     */
1712    
1713     #include <core/client.h>
1714     diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
1715     index bc9e56eb4e9c..7b3537c55c77 100644
1716     --- a/drivers/gpu/drm/radeon/cik.c
1717     +++ b/drivers/gpu/drm/radeon/cik.c
1718     @@ -7779,6 +7779,7 @@ restart_ih:
1719     static int cik_startup(struct radeon_device *rdev)
1720     {
1721     struct radeon_ring *ring;
1722     + u32 nop;
1723     int r;
1724    
1725     /* enable pcie gen2/3 link */
1726     @@ -7896,9 +7897,15 @@ static int cik_startup(struct radeon_device *rdev)
1727     }
1728     cik_irq_set(rdev);
1729    
1730     + if (rdev->family == CHIP_HAWAII) {
1731     + nop = RADEON_CP_PACKET2;
1732     + } else {
1733     + nop = PACKET3(PACKET3_NOP, 0x3FFF);
1734     + }
1735     +
1736     ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1737     r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
1738     - PACKET3(PACKET3_NOP, 0x3FFF));
1739     + nop);
1740     if (r)
1741     return r;
1742    
1743     @@ -7906,7 +7913,7 @@ static int cik_startup(struct radeon_device *rdev)
1744     /* type-2 packets are deprecated on MEC, use type-3 instead */
1745     ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
1746     r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
1747     - PACKET3(PACKET3_NOP, 0x3FFF));
1748     + nop);
1749     if (r)
1750     return r;
1751     ring->me = 1; /* first MEC */
1752     @@ -7917,7 +7924,7 @@ static int cik_startup(struct radeon_device *rdev)
1753     /* type-2 packets are deprecated on MEC, use type-3 instead */
1754     ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
1755     r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
1756     - PACKET3(PACKET3_NOP, 0x3FFF));
1757     + nop);
1758     if (r)
1759     return r;
1760     /* dGPU only have 1 MEC */
1761     diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
1762     index 3d2e489ab732..ff9163dc1596 100644
1763     --- a/drivers/infiniband/core/iwcm.c
1764     +++ b/drivers/infiniband/core/iwcm.c
1765     @@ -46,6 +46,7 @@
1766     #include <linux/completion.h>
1767     #include <linux/slab.h>
1768     #include <linux/module.h>
1769     +#include <linux/sysctl.h>
1770    
1771     #include <rdma/iw_cm.h>
1772     #include <rdma/ib_addr.h>
1773     @@ -65,6 +66,20 @@ struct iwcm_work {
1774     struct list_head free_list;
1775     };
1776    
1777     +static unsigned int default_backlog = 256;
1778     +
1779     +static struct ctl_table_header *iwcm_ctl_table_hdr;
1780     +static struct ctl_table iwcm_ctl_table[] = {
1781     + {
1782     + .procname = "default_backlog",
1783     + .data = &default_backlog,
1784     + .maxlen = sizeof(default_backlog),
1785     + .mode = 0644,
1786     + .proc_handler = proc_dointvec,
1787     + },
1788     + { }
1789     +};
1790     +
1791     /*
1792     * The following services provide a mechanism for pre-allocating iwcm_work
1793     * elements. The design pre-allocates them based on the cm_id type:
1794     @@ -425,6 +440,9 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
1795    
1796     cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
1797    
1798     + if (!backlog)
1799     + backlog = default_backlog;
1800     +
1801     ret = alloc_work_entries(cm_id_priv, backlog);
1802     if (ret)
1803     return ret;
1804     @@ -1030,11 +1048,20 @@ static int __init iw_cm_init(void)
1805     if (!iwcm_wq)
1806     return -ENOMEM;
1807    
1808     + iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
1809     + iwcm_ctl_table);
1810     + if (!iwcm_ctl_table_hdr) {
1811     + pr_err("iw_cm: couldn't register sysctl paths\n");
1812     + destroy_workqueue(iwcm_wq);
1813     + return -ENOMEM;
1814     + }
1815     +
1816     return 0;
1817     }
1818    
1819     static void __exit iw_cm_cleanup(void)
1820     {
1821     + unregister_net_sysctl_table(iwcm_ctl_table_hdr);
1822     destroy_workqueue(iwcm_wq);
1823     }
1824    
1825     diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
1826     index e96c07ee6756..ca0bc6c67abe 100644
1827     --- a/drivers/infiniband/ulp/srp/ib_srp.c
1828     +++ b/drivers/infiniband/ulp/srp/ib_srp.c
1829     @@ -120,6 +120,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
1830     static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
1831    
1832     static struct scsi_transport_template *ib_srp_transport_template;
1833     +static struct workqueue_struct *srp_remove_wq;
1834    
1835     static struct ib_client srp_client = {
1836     .name = "srp",
1837     @@ -539,7 +540,7 @@ static bool srp_queue_remove_work(struct srp_target_port *target)
1838     spin_unlock_irq(&target->lock);
1839    
1840     if (changed)
1841     - queue_work(system_long_wq, &target->remove_work);
1842     + queue_work(srp_remove_wq, &target->remove_work);
1843    
1844     return changed;
1845     }
1846     @@ -2886,9 +2887,10 @@ static void srp_remove_one(struct ib_device *device)
1847     spin_unlock(&host->target_lock);
1848    
1849     /*
1850     - * Wait for target port removal tasks.
1851     + * Wait for tl_err and target port removal tasks.
1852     */
1853     flush_workqueue(system_long_wq);
1854     + flush_workqueue(srp_remove_wq);
1855    
1856     kfree(host);
1857     }
1858     @@ -2940,16 +2942,22 @@ static int __init srp_init_module(void)
1859     indirect_sg_entries = cmd_sg_entries;
1860     }
1861    
1862     + srp_remove_wq = create_workqueue("srp_remove");
1863     + if (IS_ERR(srp_remove_wq)) {
1864     + ret = PTR_ERR(srp_remove_wq);
1865     + goto out;
1866     + }
1867     +
1868     + ret = -ENOMEM;
1869     ib_srp_transport_template =
1870     srp_attach_transport(&ib_srp_transport_functions);
1871     if (!ib_srp_transport_template)
1872     - return -ENOMEM;
1873     + goto destroy_wq;
1874    
1875     ret = class_register(&srp_class);
1876     if (ret) {
1877     pr_err("couldn't register class infiniband_srp\n");
1878     - srp_release_transport(ib_srp_transport_template);
1879     - return ret;
1880     + goto release_tr;
1881     }
1882    
1883     ib_sa_register_client(&srp_sa_client);
1884     @@ -2957,13 +2965,22 @@ static int __init srp_init_module(void)
1885     ret = ib_register_client(&srp_client);
1886     if (ret) {
1887     pr_err("couldn't register IB client\n");
1888     - srp_release_transport(ib_srp_transport_template);
1889     - ib_sa_unregister_client(&srp_sa_client);
1890     - class_unregister(&srp_class);
1891     - return ret;
1892     + goto unreg_sa;
1893     }
1894    
1895     - return 0;
1896     +out:
1897     + return ret;
1898     +
1899     +unreg_sa:
1900     + ib_sa_unregister_client(&srp_sa_client);
1901     + class_unregister(&srp_class);
1902     +
1903     +release_tr:
1904     + srp_release_transport(ib_srp_transport_template);
1905     +
1906     +destroy_wq:
1907     + destroy_workqueue(srp_remove_wq);
1908     + goto out;
1909     }
1910    
1911     static void __exit srp_cleanup_module(void)
1912     @@ -2972,6 +2989,7 @@ static void __exit srp_cleanup_module(void)
1913     ib_sa_unregister_client(&srp_sa_client);
1914     class_unregister(&srp_class);
1915     srp_release_transport(ib_srp_transport_template);
1916     + destroy_workqueue(srp_remove_wq);
1917     }
1918    
1919     module_init(srp_init_module);
1920     diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
1921     index 71776ff5aedc..9cbef59d404a 100644
1922     --- a/drivers/iommu/amd_iommu.c
1923     +++ b/drivers/iommu/amd_iommu.c
1924     @@ -3227,14 +3227,16 @@ free_domains:
1925    
1926     static void cleanup_domain(struct protection_domain *domain)
1927     {
1928     - struct iommu_dev_data *dev_data, *next;
1929     + struct iommu_dev_data *entry;
1930     unsigned long flags;
1931    
1932     write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1933    
1934     - list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
1935     - __detach_device(dev_data);
1936     - atomic_set(&dev_data->bind, 0);
1937     + while (!list_empty(&domain->dev_list)) {
1938     + entry = list_first_entry(&domain->dev_list,
1939     + struct iommu_dev_data, list);
1940     + __detach_device(entry);
1941     + atomic_set(&entry->bind, 0);
1942     }
1943    
1944     write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1945     diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
1946     index 56e24c072b62..d7690f86fdb9 100644
1947     --- a/drivers/md/raid1.c
1948     +++ b/drivers/md/raid1.c
1949     @@ -1501,12 +1501,12 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
1950     mddev->degraded++;
1951     set_bit(Faulty, &rdev->flags);
1952     spin_unlock_irqrestore(&conf->device_lock, flags);
1953     - /*
1954     - * if recovery is running, make sure it aborts.
1955     - */
1956     - set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1957     } else
1958     set_bit(Faulty, &rdev->flags);
1959     + /*
1960     + * if recovery is running, make sure it aborts.
1961     + */
1962     + set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1963     set_bit(MD_CHANGE_DEVS, &mddev->flags);
1964     printk(KERN_ALERT
1965     "md/raid1:%s: Disk failure on %s, disabling device.\n"
1966     diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
1967     index cb882aae9e20..a46124ecafc7 100644
1968     --- a/drivers/md/raid10.c
1969     +++ b/drivers/md/raid10.c
1970     @@ -1684,13 +1684,12 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
1971     spin_unlock_irqrestore(&conf->device_lock, flags);
1972     return;
1973     }
1974     - if (test_and_clear_bit(In_sync, &rdev->flags)) {
1975     + if (test_and_clear_bit(In_sync, &rdev->flags))
1976     mddev->degraded++;
1977     - /*
1978     - * if recovery is running, make sure it aborts.
1979     - */
1980     - set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1981     - }
1982     + /*
1983     + * If recovery is running, make sure it aborts.
1984     + */
1985     + set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1986     set_bit(Blocked, &rdev->flags);
1987     set_bit(Faulty, &rdev->flags);
1988     set_bit(MD_CHANGE_DEVS, &mddev->flags);
1989     @@ -2954,6 +2953,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
1990     */
1991     if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
1992     end_reshape(conf);
1993     + close_sync(conf);
1994     return 0;
1995     }
1996    
1997     @@ -4411,7 +4411,7 @@ read_more:
1998     read_bio->bi_private = r10_bio;
1999     read_bio->bi_end_io = end_sync_read;
2000     read_bio->bi_rw = READ;
2001     - read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
2002     + read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
2003     read_bio->bi_flags |= 1 << BIO_UPTODATE;
2004     read_bio->bi_vcnt = 0;
2005     read_bio->bi_iter.bi_size = 0;
2006     diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
2007     index 16f5c21963db..18cda77b4f79 100644
2008     --- a/drivers/md/raid5.c
2009     +++ b/drivers/md/raid5.c
2010     @@ -3779,6 +3779,8 @@ static void handle_stripe(struct stripe_head *sh)
2011     set_bit(R5_Wantwrite, &dev->flags);
2012     if (prexor)
2013     continue;
2014     + if (s.failed > 1)
2015     + continue;
2016     if (!test_bit(R5_Insync, &dev->flags) ||
2017     ((i == sh->pd_idx || i == sh->qd_idx) &&
2018     s.failed == 0))
2019     diff --git a/drivers/media/common/siano/Kconfig b/drivers/media/common/siano/Kconfig
2020     index f953d33ee151..4bfbd5f463d1 100644
2021     --- a/drivers/media/common/siano/Kconfig
2022     +++ b/drivers/media/common/siano/Kconfig
2023     @@ -22,8 +22,7 @@ config SMS_SIANO_DEBUGFS
2024     bool "Enable debugfs for smsdvb"
2025     depends on SMS_SIANO_MDTV
2026     depends on DEBUG_FS
2027     - depends on SMS_USB_DRV
2028     - depends on CONFIG_SMS_USB_DRV = CONFIG_SMS_SDIO_DRV
2029     + depends on SMS_USB_DRV = SMS_SDIO_DRV
2030    
2031     ---help---
2032     Choose Y to enable visualizing a dump of the frontend
2033     diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
2034     index 36c504b78f2c..008ac87a9031 100644
2035     --- a/drivers/media/i2c/mt9v032.c
2036     +++ b/drivers/media/i2c/mt9v032.c
2037     @@ -305,8 +305,8 @@ mt9v032_update_hblank(struct mt9v032 *mt9v032)
2038    
2039     if (mt9v032->version->version == MT9V034_CHIP_ID_REV1)
2040     min_hblank += (mt9v032->hratio - 1) * 10;
2041     - min_hblank = max_t(unsigned int, (int)mt9v032->model->data->min_row_time - crop->width,
2042     - (int)min_hblank);
2043     + min_hblank = max_t(int, mt9v032->model->data->min_row_time - crop->width,
2044     + min_hblank);
2045     hblank = max_t(unsigned int, mt9v032->hblank, min_hblank);
2046    
2047     return mt9v032_write(client, MT9V032_HORIZONTAL_BLANKING, hblank);
2048     diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
2049     index 703560fa5e73..88c1606fd555 100644
2050     --- a/drivers/media/media-device.c
2051     +++ b/drivers/media/media-device.c
2052     @@ -106,8 +106,6 @@ static long media_device_enum_entities(struct media_device *mdev,
2053     if (ent->name) {
2054     strncpy(u_ent.name, ent->name, sizeof(u_ent.name));
2055     u_ent.name[sizeof(u_ent.name) - 1] = '\0';
2056     - } else {
2057     - memset(u_ent.name, 0, sizeof(u_ent.name));
2058     }
2059     u_ent.type = ent->type;
2060     u_ent.revision = ent->revision;
2061     diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
2062     index b4687a834f85..7245cca89257 100644
2063     --- a/drivers/media/platform/vsp1/vsp1_video.c
2064     +++ b/drivers/media/platform/vsp1/vsp1_video.c
2065     @@ -635,8 +635,6 @@ static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
2066     if (vb->num_planes < format->num_planes)
2067     return -EINVAL;
2068    
2069     - buf->video = video;
2070     -
2071     for (i = 0; i < vb->num_planes; ++i) {
2072     buf->addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
2073     buf->length[i] = vb2_plane_size(vb, i);
2074     diff --git a/drivers/media/platform/vsp1/vsp1_video.h b/drivers/media/platform/vsp1/vsp1_video.h
2075     index d8612a378345..47b7a8ab5e2f 100644
2076     --- a/drivers/media/platform/vsp1/vsp1_video.h
2077     +++ b/drivers/media/platform/vsp1/vsp1_video.h
2078     @@ -89,7 +89,6 @@ static inline struct vsp1_pipeline *to_vsp1_pipeline(struct media_entity *e)
2079     }
2080    
2081     struct vsp1_video_buffer {
2082     - struct vsp1_video *video;
2083     struct vb2_buffer buf;
2084     struct list_head queue;
2085    
2086     diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
2087     index 2018befabb5a..e71decbfd0af 100644
2088     --- a/drivers/media/tuners/xc4000.c
2089     +++ b/drivers/media/tuners/xc4000.c
2090     @@ -93,7 +93,7 @@ struct xc4000_priv {
2091     struct firmware_description *firm;
2092     int firm_size;
2093     u32 if_khz;
2094     - u32 freq_hz;
2095     + u32 freq_hz, freq_offset;
2096     u32 bandwidth;
2097     u8 video_standard;
2098     u8 rf_mode;
2099     @@ -1157,14 +1157,14 @@ static int xc4000_set_params(struct dvb_frontend *fe)
2100     case SYS_ATSC:
2101     dprintk(1, "%s() VSB modulation\n", __func__);
2102     priv->rf_mode = XC_RF_MODE_AIR;
2103     - priv->freq_hz = c->frequency - 1750000;
2104     + priv->freq_offset = 1750000;
2105     priv->video_standard = XC4000_DTV6;
2106     type = DTV6;
2107     break;
2108     case SYS_DVBC_ANNEX_B:
2109     dprintk(1, "%s() QAM modulation\n", __func__);
2110     priv->rf_mode = XC_RF_MODE_CABLE;
2111     - priv->freq_hz = c->frequency - 1750000;
2112     + priv->freq_offset = 1750000;
2113     priv->video_standard = XC4000_DTV6;
2114     type = DTV6;
2115     break;
2116     @@ -1173,23 +1173,23 @@ static int xc4000_set_params(struct dvb_frontend *fe)
2117     dprintk(1, "%s() OFDM\n", __func__);
2118     if (bw == 0) {
2119     if (c->frequency < 400000000) {
2120     - priv->freq_hz = c->frequency - 2250000;
2121     + priv->freq_offset = 2250000;
2122     } else {
2123     - priv->freq_hz = c->frequency - 2750000;
2124     + priv->freq_offset = 2750000;
2125     }
2126     priv->video_standard = XC4000_DTV7_8;
2127     type = DTV78;
2128     } else if (bw <= 6000000) {
2129     priv->video_standard = XC4000_DTV6;
2130     - priv->freq_hz = c->frequency - 1750000;
2131     + priv->freq_offset = 1750000;
2132     type = DTV6;
2133     } else if (bw <= 7000000) {
2134     priv->video_standard = XC4000_DTV7;
2135     - priv->freq_hz = c->frequency - 2250000;
2136     + priv->freq_offset = 2250000;
2137     type = DTV7;
2138     } else {
2139     priv->video_standard = XC4000_DTV8;
2140     - priv->freq_hz = c->frequency - 2750000;
2141     + priv->freq_offset = 2750000;
2142     type = DTV8;
2143     }
2144     priv->rf_mode = XC_RF_MODE_AIR;
2145     @@ -1200,6 +1200,8 @@ static int xc4000_set_params(struct dvb_frontend *fe)
2146     goto fail;
2147     }
2148    
2149     + priv->freq_hz = c->frequency - priv->freq_offset;
2150     +
2151     dprintk(1, "%s() frequency=%d (compensated)\n",
2152     __func__, priv->freq_hz);
2153    
2154     @@ -1520,7 +1522,7 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
2155     {
2156     struct xc4000_priv *priv = fe->tuner_priv;
2157    
2158     - *freq = priv->freq_hz;
2159     + *freq = priv->freq_hz + priv->freq_offset;
2160    
2161     if (debug) {
2162     mutex_lock(&priv->lock);
2163     diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
2164     index 5cd09a681b6a..b2d9e9cb97f7 100644
2165     --- a/drivers/media/tuners/xc5000.c
2166     +++ b/drivers/media/tuners/xc5000.c
2167     @@ -55,7 +55,7 @@ struct xc5000_priv {
2168    
2169     u32 if_khz;
2170     u16 xtal_khz;
2171     - u32 freq_hz;
2172     + u32 freq_hz, freq_offset;
2173     u32 bandwidth;
2174     u8 video_standard;
2175     u8 rf_mode;
2176     @@ -755,13 +755,13 @@ static int xc5000_set_params(struct dvb_frontend *fe)
2177     case SYS_ATSC:
2178     dprintk(1, "%s() VSB modulation\n", __func__);
2179     priv->rf_mode = XC_RF_MODE_AIR;
2180     - priv->freq_hz = freq - 1750000;
2181     + priv->freq_offset = 1750000;
2182     priv->video_standard = DTV6;
2183     break;
2184     case SYS_DVBC_ANNEX_B:
2185     dprintk(1, "%s() QAM modulation\n", __func__);
2186     priv->rf_mode = XC_RF_MODE_CABLE;
2187     - priv->freq_hz = freq - 1750000;
2188     + priv->freq_offset = 1750000;
2189     priv->video_standard = DTV6;
2190     break;
2191     case SYS_ISDBT:
2192     @@ -776,15 +776,15 @@ static int xc5000_set_params(struct dvb_frontend *fe)
2193     switch (bw) {
2194     case 6000000:
2195     priv->video_standard = DTV6;
2196     - priv->freq_hz = freq - 1750000;
2197     + priv->freq_offset = 1750000;
2198     break;
2199     case 7000000:
2200     priv->video_standard = DTV7;
2201     - priv->freq_hz = freq - 2250000;
2202     + priv->freq_offset = 2250000;
2203     break;
2204     case 8000000:
2205     priv->video_standard = DTV8;
2206     - priv->freq_hz = freq - 2750000;
2207     + priv->freq_offset = 2750000;
2208     break;
2209     default:
2210     printk(KERN_ERR "xc5000 bandwidth not set!\n");
2211     @@ -798,15 +798,15 @@ static int xc5000_set_params(struct dvb_frontend *fe)
2212     priv->rf_mode = XC_RF_MODE_CABLE;
2213     if (bw <= 6000000) {
2214     priv->video_standard = DTV6;
2215     - priv->freq_hz = freq - 1750000;
2216     + priv->freq_offset = 1750000;
2217     b = 6;
2218     } else if (bw <= 7000000) {
2219     priv->video_standard = DTV7;
2220     - priv->freq_hz = freq - 2250000;
2221     + priv->freq_offset = 2250000;
2222     b = 7;
2223     } else {
2224     priv->video_standard = DTV7_8;
2225     - priv->freq_hz = freq - 2750000;
2226     + priv->freq_offset = 2750000;
2227     b = 8;
2228     }
2229     dprintk(1, "%s() Bandwidth %dMHz (%d)\n", __func__,
2230     @@ -817,6 +817,8 @@ static int xc5000_set_params(struct dvb_frontend *fe)
2231     return -EINVAL;
2232     }
2233    
2234     + priv->freq_hz = freq - priv->freq_offset;
2235     +
2236     dprintk(1, "%s() frequency=%d (compensated to %d)\n",
2237     __func__, freq, priv->freq_hz);
2238    
2239     @@ -1067,7 +1069,7 @@ static int xc5000_get_frequency(struct dvb_frontend *fe, u32 *freq)
2240     {
2241     struct xc5000_priv *priv = fe->tuner_priv;
2242     dprintk(1, "%s()\n", __func__);
2243     - *freq = priv->freq_hz;
2244     + *freq = priv->freq_hz + priv->freq_offset;
2245     return 0;
2246     }
2247    
2248     diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
2249     index f6154546b5c0..7ed75efa1c36 100644
2250     --- a/drivers/media/usb/au0828/au0828-video.c
2251     +++ b/drivers/media/usb/au0828/au0828-video.c
2252     @@ -787,11 +787,27 @@ static int au0828_i2s_init(struct au0828_dev *dev)
2253    
2254     /*
2255     * Auvitek au0828 analog stream enable
2256     - * Please set interface0 to AS5 before enable the stream
2257     */
2258     static int au0828_analog_stream_enable(struct au0828_dev *d)
2259     {
2260     + struct usb_interface *iface;
2261     + int ret;
2262     +
2263     dprintk(1, "au0828_analog_stream_enable called\n");
2264     +
2265     + iface = usb_ifnum_to_if(d->usbdev, 0);
2266     + if (iface && iface->cur_altsetting->desc.bAlternateSetting != 5) {
2267     + dprintk(1, "Changing intf#0 to alt 5\n");
2268     + /* set au0828 interface0 to AS5 here again */
2269     + ret = usb_set_interface(d->usbdev, 0, 5);
2270     + if (ret < 0) {
2271     + printk(KERN_INFO "Au0828 can't set alt setting to 5!\n");
2272     + return -EBUSY;
2273     + }
2274     + }
2275     +
2276     + /* FIXME: size should be calculated using d->width, d->height */
2277     +
2278     au0828_writereg(d, AU0828_SENSORCTRL_VBI_103, 0x00);
2279     au0828_writereg(d, 0x106, 0x00);
2280     /* set x position */
2281     @@ -1002,15 +1018,6 @@ static int au0828_v4l2_open(struct file *filp)
2282     return -ERESTARTSYS;
2283     }
2284     if (dev->users == 0) {
2285     - /* set au0828 interface0 to AS5 here again */
2286     - ret = usb_set_interface(dev->usbdev, 0, 5);
2287     - if (ret < 0) {
2288     - mutex_unlock(&dev->lock);
2289     - printk(KERN_INFO "Au0828 can't set alternate to 5!\n");
2290     - kfree(fh);
2291     - return -EBUSY;
2292     - }
2293     -
2294     au0828_analog_stream_enable(dev);
2295     au0828_analog_stream_reset(dev);
2296    
2297     @@ -1252,13 +1259,6 @@ static int au0828_set_format(struct au0828_dev *dev, unsigned int cmd,
2298     }
2299     }
2300    
2301     - /* set au0828 interface0 to AS5 here again */
2302     - ret = usb_set_interface(dev->usbdev, 0, 5);
2303     - if (ret < 0) {
2304     - printk(KERN_INFO "Au0828 can't set alt setting to 5!\n");
2305     - return -EBUSY;
2306     - }
2307     -
2308     au0828_analog_stream_enable(dev);
2309    
2310     return 0;
2311     diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
2312     index 90b630ccc8bc..0aefe501fd3c 100644
2313     --- a/drivers/mfd/omap-usb-host.c
2314     +++ b/drivers/mfd/omap-usb-host.c
2315     @@ -445,7 +445,7 @@ static unsigned omap_usbhs_rev1_hostconfig(struct usbhs_hcd_omap *omap,
2316    
2317     for (i = 0; i < omap->nports; i++) {
2318     if (is_ehci_phy_mode(pdata->port_mode[i])) {
2319     - reg &= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
2320     + reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
2321     break;
2322     }
2323     }
2324     diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
2325     index 19d637266fcd..71e4f6ccae2f 100644
2326     --- a/drivers/mtd/ftl.c
2327     +++ b/drivers/mtd/ftl.c
2328     @@ -1075,7 +1075,6 @@ static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
2329     return;
2330     }
2331    
2332     - ftl_freepart(partition);
2333     kfree(partition);
2334     }
2335    
2336     diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
2337     index 6f55d92dc233..64d8e32b6ca0 100644
2338     --- a/drivers/mtd/nand/omap2.c
2339     +++ b/drivers/mtd/nand/omap2.c
2340     @@ -933,7 +933,7 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
2341     u32 val;
2342    
2343     val = readl(info->reg.gpmc_ecc_config);
2344     - if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs)
2345     + if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs)
2346     return -EINVAL;
2347    
2348     /* read ecc result */
2349     diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c
2350     index 79a37f6d3307..e384844a1ae1 100644
2351     --- a/drivers/power/bq2415x_charger.c
2352     +++ b/drivers/power/bq2415x_charger.c
2353     @@ -840,8 +840,7 @@ static int bq2415x_notifier_call(struct notifier_block *nb,
2354     if (bq->automode < 1)
2355     return NOTIFY_OK;
2356    
2357     - sysfs_notify(&bq->charger.dev->kobj, NULL, "reported_mode");
2358     - bq2415x_set_mode(bq, bq->reported_mode);
2359     + schedule_delayed_work(&bq->work, 0);
2360    
2361     return NOTIFY_OK;
2362     }
2363     @@ -892,6 +891,11 @@ static void bq2415x_timer_work(struct work_struct *work)
2364     int error;
2365     int boost;
2366    
2367     + if (bq->automode > 0 && (bq->reported_mode != bq->mode)) {
2368     + sysfs_notify(&bq->charger.dev->kobj, NULL, "reported_mode");
2369     + bq2415x_set_mode(bq, bq->reported_mode);
2370     + }
2371     +
2372     if (!bq->autotimer)
2373     return;
2374    
2375     diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
2376     index f0ea4fdfde87..8b963a757883 100644
2377     --- a/drivers/regulator/arizona-ldo1.c
2378     +++ b/drivers/regulator/arizona-ldo1.c
2379     @@ -141,8 +141,6 @@ static struct regulator_ops arizona_ldo1_ops = {
2380     .map_voltage = regulator_map_voltage_linear,
2381     .get_voltage_sel = regulator_get_voltage_sel_regmap,
2382     .set_voltage_sel = regulator_set_voltage_sel_regmap,
2383     - .get_bypass = regulator_get_bypass_regmap,
2384     - .set_bypass = regulator_set_bypass_regmap,
2385     };
2386    
2387     static const struct regulator_desc arizona_ldo1 = {
2388     diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
2389     index 2e28392c2fb6..a38aafa030b3 100644
2390     --- a/drivers/scsi/bfa/bfa_ioc.h
2391     +++ b/drivers/scsi/bfa/bfa_ioc.h
2392     @@ -72,7 +72,7 @@ struct bfa_sge_s {
2393     } while (0)
2394    
2395     #define bfa_swap_words(_x) ( \
2396     - ((_x) << 32) | ((_x) >> 32))
2397     + ((u64)(_x) << 32) | ((u64)(_x) >> 32))
2398    
2399     #ifdef __BIG_ENDIAN
2400     #define bfa_sge_to_be(_x)
2401     diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
2402     index f969aca0b54e..49014a143c6a 100644
2403     --- a/drivers/scsi/scsi_devinfo.c
2404     +++ b/drivers/scsi/scsi_devinfo.c
2405     @@ -222,6 +222,7 @@ static struct {
2406     {"PIONEER", "CD-ROM DRM-602X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
2407     {"PIONEER", "CD-ROM DRM-604X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
2408     {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
2409     + {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
2410     {"Promise", "", NULL, BLIST_SPARSELUN},
2411     {"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
2412     {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
2413     diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
2414     index 4109530e92a0..054ec2c412a4 100644
2415     --- a/drivers/scsi/scsi_scan.c
2416     +++ b/drivers/scsi/scsi_scan.c
2417     @@ -922,6 +922,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
2418     if (*bflags & BLIST_USE_10_BYTE_MS)
2419     sdev->use_10_for_ms = 1;
2420    
2421     + /* some devices don't like REPORT SUPPORTED OPERATION CODES
2422     + * and will simply timeout causing sd_mod init to take a very
2423     + * very long time */
2424     + if (*bflags & BLIST_NO_RSOC)
2425     + sdev->no_report_opcodes = 1;
2426     +
2427     /* set the device running here so that slave configure
2428     * may do I/O */
2429     ret = scsi_device_set_state(sdev, SDEV_RUNNING);
2430     @@ -950,7 +956,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
2431    
2432     sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
2433    
2434     - if (*bflags & BLIST_SKIP_VPD_PAGES)
2435     + if (*bflags & BLIST_TRY_VPD_PAGES)
2436     + sdev->try_vpd_pages = 1;
2437     + else if (*bflags & BLIST_SKIP_VPD_PAGES)
2438     sdev->skip_vpd_pages = 1;
2439    
2440     transport_configure_device(&sdev->sdev_gendev);
2441     @@ -1236,6 +1244,12 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
2442     max_dev_lun = min(8U, max_dev_lun);
2443    
2444     /*
2445     + * Stop scanning at 255 unless BLIST_SCSI3LUN
2446     + */
2447     + if (!(bflags & BLIST_SCSI3LUN))
2448     + max_dev_lun = min(256U, max_dev_lun);
2449     +
2450     + /*
2451     * We have already scanned LUN 0, so start at LUN 1. Keep scanning
2452     * until we reach the max, or no LUN is found and we are not
2453     * sparse_lun.
2454     diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
2455     index d47ffc8d3e43..e3e794ee7ddd 100644
2456     --- a/drivers/scsi/scsi_transport_srp.c
2457     +++ b/drivers/scsi/scsi_transport_srp.c
2458     @@ -473,7 +473,8 @@ static void __srp_start_tl_fail_timers(struct srp_rport *rport)
2459     if (delay > 0)
2460     queue_delayed_work(system_long_wq, &rport->reconnect_work,
2461     1UL * delay * HZ);
2462     - if (srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
2463     + if ((fast_io_fail_tmo >= 0 || dev_loss_tmo >= 0) &&
2464     + srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
2465     pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev),
2466     rport->state);
2467     scsi_target_block(&shost->shost_gendev);
2468     diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2469     index 36d1a23f14be..e8abb731c7ec 100644
2470     --- a/drivers/scsi/sd.c
2471     +++ b/drivers/scsi/sd.c
2472     @@ -2686,6 +2686,11 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
2473    
2474     static int sd_try_extended_inquiry(struct scsi_device *sdp)
2475     {
2476     + /* Attempt VPD inquiry if the device blacklist explicitly calls
2477     + * for it.
2478     + */
2479     + if (sdp->try_vpd_pages)
2480     + return 1;
2481     /*
2482     * Although VPD inquiries can go to SCSI-2 type devices,
2483     * some USB ones crash on receiving them, and the pages
2484     diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
2485     index 9969fa1ef7c4..ed0f899e8aa5 100644
2486     --- a/drivers/scsi/storvsc_drv.c
2487     +++ b/drivers/scsi/storvsc_drv.c
2488     @@ -33,6 +33,7 @@
2489     #include <linux/device.h>
2490     #include <linux/hyperv.h>
2491     #include <linux/mempool.h>
2492     +#include <linux/blkdev.h>
2493     #include <scsi/scsi.h>
2494     #include <scsi/scsi_cmnd.h>
2495     #include <scsi/scsi_host.h>
2496     @@ -330,17 +331,17 @@ static int storvsc_timeout = 180;
2497    
2498     static void storvsc_on_channel_callback(void *context);
2499    
2500     -/*
2501     - * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In
2502     - * reality, the path/target is not used (ie always set to 0) so our
2503     - * scsi host adapter essentially has 1 bus with 1 target that contains
2504     - * up to 256 luns.
2505     - */
2506     -#define STORVSC_MAX_LUNS_PER_TARGET 64
2507     -#define STORVSC_MAX_TARGETS 1
2508     -#define STORVSC_MAX_CHANNELS 1
2509     +#define STORVSC_MAX_LUNS_PER_TARGET 255
2510     +#define STORVSC_MAX_TARGETS 2
2511     +#define STORVSC_MAX_CHANNELS 8
2512    
2513     +#define STORVSC_FC_MAX_LUNS_PER_TARGET 255
2514     +#define STORVSC_FC_MAX_TARGETS 128
2515     +#define STORVSC_FC_MAX_CHANNELS 8
2516    
2517     +#define STORVSC_IDE_MAX_LUNS_PER_TARGET 64
2518     +#define STORVSC_IDE_MAX_TARGETS 1
2519     +#define STORVSC_IDE_MAX_CHANNELS 1
2520    
2521     struct storvsc_cmd_request {
2522     struct list_head entry;
2523     @@ -1017,6 +1018,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
2524     case ATA_12:
2525     set_host_byte(scmnd, DID_PASSTHROUGH);
2526     break;
2527     + /*
2528     + * On Some Windows hosts TEST_UNIT_READY command can return
2529     + * SRB_STATUS_ERROR, let the upper level code deal with it
2530     + * based on the sense information.
2531     + */
2532     + case TEST_UNIT_READY:
2533     + break;
2534     default:
2535     set_host_byte(scmnd, DID_TARGET_FAILURE);
2536     }
2537     @@ -1518,6 +1526,16 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
2538     return SUCCESS;
2539     }
2540    
2541     +/*
2542     + * The host guarantees to respond to each command, although I/O latencies might
2543     + * be unbounded on Azure. Reset the timer unconditionally to give the host a
2544     + * chance to perform EH.
2545     + */
2546     +static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
2547     +{
2548     + return BLK_EH_RESET_TIMER;
2549     +}
2550     +
2551     static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
2552     {
2553     bool allowed = true;
2554     @@ -1553,9 +1571,19 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
2555     struct vmscsi_request *vm_srb;
2556     struct stor_mem_pools *memp = scmnd->device->hostdata;
2557    
2558     - if (!storvsc_scsi_cmd_ok(scmnd)) {
2559     - scmnd->scsi_done(scmnd);
2560     - return 0;
2561     + if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) {
2562     + /*
2563     + * On legacy hosts filter unimplemented commands.
2564     + * Future hosts are expected to correctly handle
2565     + * unsupported commands. Furthermore, it is
2566     + * possible that some of the currently
2567     + * unsupported commands maybe supported in
2568     + * future versions of the host.
2569     + */
2570     + if (!storvsc_scsi_cmd_ok(scmnd)) {
2571     + scmnd->scsi_done(scmnd);
2572     + return 0;
2573     + }
2574     }
2575    
2576     request_size = sizeof(struct storvsc_cmd_request);
2577     @@ -1580,26 +1608,24 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
2578     vm_srb = &cmd_request->vstor_packet.vm_srb;
2579     vm_srb->win8_extension.time_out_value = 60;
2580    
2581     + vm_srb->win8_extension.srb_flags |=
2582     + (SRB_FLAGS_QUEUE_ACTION_ENABLE |
2583     + SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
2584    
2585     /* Build the SRB */
2586     switch (scmnd->sc_data_direction) {
2587     case DMA_TO_DEVICE:
2588     vm_srb->data_in = WRITE_TYPE;
2589     vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT;
2590     - vm_srb->win8_extension.srb_flags |=
2591     - (SRB_FLAGS_QUEUE_ACTION_ENABLE |
2592     - SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
2593     break;
2594     case DMA_FROM_DEVICE:
2595     vm_srb->data_in = READ_TYPE;
2596     vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN;
2597     - vm_srb->win8_extension.srb_flags |=
2598     - (SRB_FLAGS_QUEUE_ACTION_ENABLE |
2599     - SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
2600     break;
2601     default:
2602     vm_srb->data_in = UNKNOWN_TYPE;
2603     - vm_srb->win8_extension.srb_flags = 0;
2604     + vm_srb->win8_extension.srb_flags |= (SRB_FLAGS_DATA_IN |
2605     + SRB_FLAGS_DATA_OUT);
2606     break;
2607     }
2608    
2609     @@ -1687,11 +1713,11 @@ static struct scsi_host_template scsi_driver = {
2610     .bios_param = storvsc_get_chs,
2611     .queuecommand = storvsc_queuecommand,
2612     .eh_host_reset_handler = storvsc_host_reset_handler,
2613     + .eh_timed_out = storvsc_eh_timed_out,
2614     .slave_alloc = storvsc_device_alloc,
2615     .slave_destroy = storvsc_device_destroy,
2616     .slave_configure = storvsc_device_configure,
2617     - .cmd_per_lun = 1,
2618     - /* 64 max_queue * 1 target */
2619     + .cmd_per_lun = 255,
2620     .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
2621     .this_id = -1,
2622     /* no use setting to 0 since ll_blk_rw reset it to 1 */
2623     @@ -1743,19 +1769,25 @@ static int storvsc_probe(struct hv_device *device,
2624     * set state to properly communicate with the host.
2625     */
2626    
2627     - if (vmbus_proto_version == VERSION_WIN8) {
2628     - sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
2629     - vmscsi_size_delta = 0;
2630     - vmstor_current_major = VMSTOR_WIN8_MAJOR;
2631     - vmstor_current_minor = VMSTOR_WIN8_MINOR;
2632     - } else {
2633     + switch (vmbus_proto_version) {
2634     + case VERSION_WS2008:
2635     + case VERSION_WIN7:
2636     sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
2637     vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
2638     vmstor_current_major = VMSTOR_WIN7_MAJOR;
2639     vmstor_current_minor = VMSTOR_WIN7_MINOR;
2640     + break;
2641     + default:
2642     + sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
2643     + vmscsi_size_delta = 0;
2644     + vmstor_current_major = VMSTOR_WIN8_MAJOR;
2645     + vmstor_current_minor = VMSTOR_WIN8_MINOR;
2646     + break;
2647     }
2648    
2649     -
2650     + if (dev_id->driver_data == SFC_GUID)
2651     + scsi_driver.can_queue = (STORVSC_MAX_IO_REQUESTS *
2652     + STORVSC_FC_MAX_TARGETS);
2653     host = scsi_host_alloc(&scsi_driver,
2654     sizeof(struct hv_host_device));
2655     if (!host)
2656     @@ -1789,12 +1821,25 @@ static int storvsc_probe(struct hv_device *device,
2657     host_dev->path = stor_device->path_id;
2658     host_dev->target = stor_device->target_id;
2659    
2660     - /* max # of devices per target */
2661     - host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
2662     - /* max # of targets per channel */
2663     - host->max_id = STORVSC_MAX_TARGETS;
2664     - /* max # of channels */
2665     - host->max_channel = STORVSC_MAX_CHANNELS - 1;
2666     + switch (dev_id->driver_data) {
2667     + case SFC_GUID:
2668     + host->max_lun = STORVSC_FC_MAX_LUNS_PER_TARGET;
2669     + host->max_id = STORVSC_FC_MAX_TARGETS;
2670     + host->max_channel = STORVSC_FC_MAX_CHANNELS - 1;
2671     + break;
2672     +
2673     + case SCSI_GUID:
2674     + host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
2675     + host->max_id = STORVSC_MAX_TARGETS;
2676     + host->max_channel = STORVSC_MAX_CHANNELS - 1;
2677     + break;
2678     +
2679     + default:
2680     + host->max_lun = STORVSC_IDE_MAX_LUNS_PER_TARGET;
2681     + host->max_id = STORVSC_IDE_MAX_TARGETS;
2682     + host->max_channel = STORVSC_IDE_MAX_CHANNELS - 1;
2683     + break;
2684     + }
2685     /* max cmd length */
2686     host->max_cmd_len = STORVSC_MAX_CMD_LEN;
2687    
2688     diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
2689     index a72127f08e39..a64f1557c156 100644
2690     --- a/drivers/spi/spi-omap2-mcspi.c
2691     +++ b/drivers/spi/spi-omap2-mcspi.c
2692     @@ -147,6 +147,7 @@ struct omap2_mcspi_cs {
2693     void __iomem *base;
2694     unsigned long phys;
2695     int word_len;
2696     + u16 mode;
2697     struct list_head node;
2698     /* Context save and restore shadow register */
2699     u32 chconf0;
2700     @@ -899,6 +900,8 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
2701    
2702     mcspi_write_chconf0(spi, l);
2703    
2704     + cs->mode = spi->mode;
2705     +
2706     dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
2707     OMAP2_MCSPI_MAX_FREQ >> div,
2708     (spi->mode & SPI_CPHA) ? "trailing" : "leading",
2709     @@ -971,6 +974,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
2710     return -ENOMEM;
2711     cs->base = mcspi->base + spi->chip_select * 0x14;
2712     cs->phys = mcspi->phys + spi->chip_select * 0x14;
2713     + cs->mode = 0;
2714     cs->chconf0 = 0;
2715     spi->controller_state = cs;
2716     /* Link this to context save list */
2717     @@ -1051,6 +1055,16 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
2718     cs = spi->controller_state;
2719     cd = spi->controller_data;
2720    
2721     + /*
2722     + * The slave driver could have changed spi->mode in which case
2723     + * it will be different from cs->mode (the current hardware setup).
2724     + * If so, set par_override (even though its not a parity issue) so
2725     + * omap2_mcspi_setup_transfer will be called to configure the hardware
2726     + * with the correct mode on the first iteration of the loop below.
2727     + */
2728     + if (spi->mode != cs->mode)
2729     + par_override = 1;
2730     +
2731     omap2_mcspi_set_enable(spi, 0);
2732     list_for_each_entry(t, &m->transfers, transfer_list) {
2733     if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
2734     diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
2735     index 7f2121fe2622..977b0619bb78 100644
2736     --- a/drivers/spi/spi-orion.c
2737     +++ b/drivers/spi/spi-orion.c
2738     @@ -404,8 +404,6 @@ static int orion_spi_probe(struct platform_device *pdev)
2739     struct resource *r;
2740     unsigned long tclk_hz;
2741     int status = 0;
2742     - const u32 *iprop;
2743     - int size;
2744    
2745     master = spi_alloc_master(&pdev->dev, sizeof(*spi));
2746     if (master == NULL) {
2747     @@ -416,10 +414,10 @@ static int orion_spi_probe(struct platform_device *pdev)
2748     if (pdev->id != -1)
2749     master->bus_num = pdev->id;
2750     if (pdev->dev.of_node) {
2751     - iprop = of_get_property(pdev->dev.of_node, "cell-index",
2752     - &size);
2753     - if (iprop && size == sizeof(*iprop))
2754     - master->bus_num = *iprop;
2755     + u32 cell_index;
2756     + if (!of_property_read_u32(pdev->dev.of_node, "cell-index",
2757     + &cell_index))
2758     + master->bus_num = cell_index;
2759     }
2760    
2761     /* we support only mode 0, and no options */
2762     diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
2763     index c702fc536a77..ced9ecffa163 100644
2764     --- a/drivers/spi/spi-pxa2xx.c
2765     +++ b/drivers/spi/spi-pxa2xx.c
2766     @@ -1078,6 +1078,7 @@ static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
2767     { "INT3430", 0 },
2768     { "INT3431", 0 },
2769     { "80860F0E", 0 },
2770     + { "8086228E", 0 },
2771     { },
2772     };
2773     MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
2774     diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
2775     index 172a8bc27abd..ef7d446e07ee 100644
2776     --- a/drivers/xen/events/events_fifo.c
2777     +++ b/drivers/xen/events/events_fifo.c
2778     @@ -99,6 +99,25 @@ static unsigned evtchn_fifo_nr_channels(void)
2779     return event_array_pages * EVENT_WORDS_PER_PAGE;
2780     }
2781    
2782     +static int init_control_block(int cpu,
2783     + struct evtchn_fifo_control_block *control_block)
2784     +{
2785     + struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
2786     + struct evtchn_init_control init_control;
2787     + unsigned int i;
2788     +
2789     + /* Reset the control block and the local HEADs. */
2790     + clear_page(control_block);
2791     + for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++)
2792     + q->head[i] = 0;
2793     +
2794     + init_control.control_gfn = virt_to_mfn(control_block);
2795     + init_control.offset = 0;
2796     + init_control.vcpu = cpu;
2797     +
2798     + return HYPERVISOR_event_channel_op(EVTCHNOP_init_control, &init_control);
2799     +}
2800     +
2801     static void free_unused_array_pages(void)
2802     {
2803     unsigned i;
2804     @@ -327,7 +346,6 @@ static void evtchn_fifo_resume(void)
2805    
2806     for_each_possible_cpu(cpu) {
2807     void *control_block = per_cpu(cpu_control_block, cpu);
2808     - struct evtchn_init_control init_control;
2809     int ret;
2810    
2811     if (!control_block)
2812     @@ -344,12 +362,7 @@ static void evtchn_fifo_resume(void)
2813     continue;
2814     }
2815    
2816     - init_control.control_gfn = virt_to_mfn(control_block);
2817     - init_control.offset = 0;
2818     - init_control.vcpu = cpu;
2819     -
2820     - ret = HYPERVISOR_event_channel_op(EVTCHNOP_init_control,
2821     - &init_control);
2822     + ret = init_control_block(cpu, control_block);
2823     if (ret < 0)
2824     BUG();
2825     }
2826     @@ -377,30 +390,25 @@ static const struct evtchn_ops evtchn_ops_fifo = {
2827     .resume = evtchn_fifo_resume,
2828     };
2829    
2830     -static int evtchn_fifo_init_control_block(unsigned cpu)
2831     +static int evtchn_fifo_alloc_control_block(unsigned cpu)
2832     {
2833     - struct page *control_block = NULL;
2834     - struct evtchn_init_control init_control;
2835     + void *control_block = NULL;
2836     int ret = -ENOMEM;
2837    
2838     - control_block = alloc_page(GFP_KERNEL|__GFP_ZERO);
2839     + control_block = (void *)__get_free_page(GFP_KERNEL);
2840     if (control_block == NULL)
2841     goto error;
2842    
2843     - init_control.control_gfn = virt_to_mfn(page_address(control_block));
2844     - init_control.offset = 0;
2845     - init_control.vcpu = cpu;
2846     -
2847     - ret = HYPERVISOR_event_channel_op(EVTCHNOP_init_control, &init_control);
2848     + ret = init_control_block(cpu, control_block);
2849     if (ret < 0)
2850     goto error;
2851    
2852     - per_cpu(cpu_control_block, cpu) = page_address(control_block);
2853     + per_cpu(cpu_control_block, cpu) = control_block;
2854    
2855     return 0;
2856    
2857     error:
2858     - __free_page(control_block);
2859     + free_page((unsigned long)control_block);
2860     return ret;
2861     }
2862    
2863     @@ -414,7 +422,7 @@ static int evtchn_fifo_cpu_notification(struct notifier_block *self,
2864     switch (action) {
2865     case CPU_UP_PREPARE:
2866     if (!per_cpu(cpu_control_block, cpu))
2867     - ret = evtchn_fifo_init_control_block(cpu);
2868     + ret = evtchn_fifo_alloc_control_block(cpu);
2869     break;
2870     default:
2871     break;
2872     @@ -431,7 +439,7 @@ int __init xen_evtchn_fifo_init(void)
2873     int cpu = get_cpu();
2874     int ret;
2875    
2876     - ret = evtchn_fifo_init_control_block(cpu);
2877     + ret = evtchn_fifo_alloc_control_block(cpu);
2878     if (ret < 0)
2879     goto out;
2880    
2881     diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
2882     index 30f6e9251a4a..f15d4353f30f 100644
2883     --- a/fs/cifs/cifsglob.h
2884     +++ b/fs/cifs/cifsglob.h
2885     @@ -70,11 +70,6 @@
2886     #define SERVER_NAME_LENGTH 40
2887     #define SERVER_NAME_LEN_WITH_NULL (SERVER_NAME_LENGTH + 1)
2888    
2889     -/* used to define string lengths for reversing unicode strings */
2890     -/* (256+1)*2 = 514 */
2891     -/* (max path length + 1 for null) * 2 for unicode */
2892     -#define MAX_NAME 514
2893     -
2894     /* SMB echo "timeout" -- FIXME: tunable? */
2895     #define SMB_ECHO_INTERVAL (60 * HZ)
2896    
2897     diff --git a/fs/cifs/file.c b/fs/cifs/file.c
2898     index 87c4dd072cde..8175b18df819 100644
2899     --- a/fs/cifs/file.c
2900     +++ b/fs/cifs/file.c
2901     @@ -2844,7 +2844,7 @@ cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2902     total_read += result;
2903     }
2904    
2905     - return total_read > 0 ? total_read : result;
2906     + return total_read > 0 && result != -EAGAIN ? total_read : result;
2907     }
2908    
2909     static ssize_t
2910     @@ -3267,7 +3267,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
2911     total_read += result;
2912     }
2913    
2914     - return total_read > 0 ? total_read : result;
2915     + return total_read > 0 && result != -EAGAIN ? total_read : result;
2916     }
2917    
2918     static int cifs_readpages(struct file *file, struct address_space *mapping,
2919     diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
2920     index aadc2b68678b..f2ddcf7ac9c3 100644
2921     --- a/fs/cifs/inode.c
2922     +++ b/fs/cifs/inode.c
2923     @@ -1706,13 +1706,22 @@ cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
2924     unlink_target:
2925     /* Try unlinking the target dentry if it's not negative */
2926     if (target_dentry->d_inode && (rc == -EACCES || rc == -EEXIST)) {
2927     - tmprc = cifs_unlink(target_dir, target_dentry);
2928     + if (d_is_dir(target_dentry))
2929     + tmprc = cifs_rmdir(target_dir, target_dentry);
2930     + else
2931     + tmprc = cifs_unlink(target_dir, target_dentry);
2932     if (tmprc)
2933     goto cifs_rename_exit;
2934     rc = cifs_do_rename(xid, source_dentry, from_name,
2935     target_dentry, to_name);
2936     }
2937    
2938     + /* force revalidate to go get info when needed */
2939     + CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;
2940     +
2941     + source_dir->i_ctime = source_dir->i_mtime = target_dir->i_ctime =
2942     + target_dir->i_mtime = current_fs_time(source_dir->i_sb);
2943     +
2944     cifs_rename_exit:
2945     kfree(info_buf_source);
2946     kfree(from_name);
2947     diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
2948     index b15862e0f68c..2bbf11b09214 100644
2949     --- a/fs/cifs/readdir.c
2950     +++ b/fs/cifs/readdir.c
2951     @@ -596,8 +596,8 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
2952     if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
2953     cfile->invalidHandle = true;
2954     spin_unlock(&cifs_file_list_lock);
2955     - if (server->ops->close)
2956     - server->ops->close(xid, tcon, &cfile->fid);
2957     + if (server->ops->close_dir)
2958     + server->ops->close_dir(xid, tcon, &cfile->fid);
2959     } else
2960     spin_unlock(&cifs_file_list_lock);
2961     if (cfile->srch_inf.ntwrk_buf_start) {
2962     diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
2963     index 3f17b4550831..45992944e238 100644
2964     --- a/fs/cifs/smb2file.c
2965     +++ b/fs/cifs/smb2file.c
2966     @@ -50,7 +50,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
2967     goto out;
2968     }
2969    
2970     - smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
2971     + smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
2972     GFP_KERNEL);
2973     if (smb2_data == NULL) {
2974     rc = -ENOMEM;
2975     diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
2976     index 84c012a6aba0..215f8d3e3e53 100644
2977     --- a/fs/cifs/smb2inode.c
2978     +++ b/fs/cifs/smb2inode.c
2979     @@ -131,7 +131,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
2980     *adjust_tz = false;
2981     *symlink = false;
2982    
2983     - smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
2984     + smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
2985     GFP_KERNEL);
2986     if (smb2_data == NULL)
2987     return -ENOMEM;
2988     diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
2989     index 94bd4fbb13d3..e31a9dfdcd39 100644
2990     --- a/fs/cifs/smb2maperror.c
2991     +++ b/fs/cifs/smb2maperror.c
2992     @@ -605,7 +605,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
2993     {STATUS_MAPPED_FILE_SIZE_ZERO, -EIO, "STATUS_MAPPED_FILE_SIZE_ZERO"},
2994     {STATUS_TOO_MANY_OPENED_FILES, -EMFILE, "STATUS_TOO_MANY_OPENED_FILES"},
2995     {STATUS_CANCELLED, -EIO, "STATUS_CANCELLED"},
2996     - {STATUS_CANNOT_DELETE, -EIO, "STATUS_CANNOT_DELETE"},
2997     + {STATUS_CANNOT_DELETE, -EACCES, "STATUS_CANNOT_DELETE"},
2998     {STATUS_INVALID_COMPUTER_NAME, -EIO, "STATUS_INVALID_COMPUTER_NAME"},
2999     {STATUS_FILE_DELETED, -EIO, "STATUS_FILE_DELETED"},
3000     {STATUS_SPECIAL_ACCOUNT, -EIO, "STATUS_SPECIAL_ACCOUNT"},
3001     diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3002     index 35ddc3ed119d..f8977b2d9187 100644
3003     --- a/fs/cifs/smb2ops.c
3004     +++ b/fs/cifs/smb2ops.c
3005     @@ -339,7 +339,7 @@ smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
3006     int rc;
3007     struct smb2_file_all_info *smb2_data;
3008    
3009     - smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
3010     + smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
3011     GFP_KERNEL);
3012     if (smb2_data == NULL)
3013     return -ENOMEM;
3014     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
3015     index 049a3f2693ba..9aab8fe0e508 100644
3016     --- a/fs/cifs/smb2pdu.c
3017     +++ b/fs/cifs/smb2pdu.c
3018     @@ -916,7 +916,8 @@ tcon_exit:
3019     tcon_error_exit:
3020     if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
3021     cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
3022     - tcon->bad_network_name = true;
3023     + if (tcon)
3024     + tcon->bad_network_name = true;
3025     }
3026     goto tcon_exit;
3027     }
3028     @@ -1539,7 +1540,7 @@ SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
3029     {
3030     return query_info(xid, tcon, persistent_fid, volatile_fid,
3031     FILE_ALL_INFORMATION,
3032     - sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
3033     + sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
3034     sizeof(struct smb2_file_all_info), data);
3035     }
3036    
3037     diff --git a/fs/dcache.c b/fs/dcache.c
3038     index 7f3b4004c6c3..58d57da91d2a 100644
3039     --- a/fs/dcache.c
3040     +++ b/fs/dcache.c
3041     @@ -106,8 +106,7 @@ static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
3042     unsigned int hash)
3043     {
3044     hash += (unsigned long) parent / L1_CACHE_BYTES;
3045     - hash = hash + (hash >> d_hash_shift);
3046     - return dentry_hashtable + (hash & d_hash_mask);
3047     + return dentry_hashtable + hash_32(hash, d_hash_shift);
3048     }
3049    
3050     /* Statistics gathering. */
3051     diff --git a/fs/namei.c b/fs/namei.c
3052     index bdea10963aa5..d5a4faeb39a5 100644
3053     --- a/fs/namei.c
3054     +++ b/fs/namei.c
3055     @@ -34,6 +34,7 @@
3056     #include <linux/device_cgroup.h>
3057     #include <linux/fs_struct.h>
3058     #include <linux/posix_acl.h>
3059     +#include <linux/hash.h>
3060     #include <asm/uaccess.h>
3061    
3062     #include "internal.h"
3063     @@ -1624,8 +1625,7 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
3064    
3065     static inline unsigned int fold_hash(unsigned long hash)
3066     {
3067     - hash += hash >> (8*sizeof(int));
3068     - return hash;
3069     + return hash_64(hash, 32);
3070     }
3071    
3072     #else /* 32-bit case */
3073     @@ -1797,7 +1797,7 @@ static int link_path_walk(const char *name, struct nameidata *nd)
3074     if (err)
3075     return err;
3076     }
3077     - if (!d_is_directory(nd->path.dentry)) {
3078     + if (!d_can_lookup(nd->path.dentry)) {
3079     err = -ENOTDIR;
3080     break;
3081     }
3082     @@ -1818,7 +1818,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
3083     struct dentry *root = nd->root.dentry;
3084     struct inode *inode = root->d_inode;
3085     if (*name) {
3086     - if (!d_is_directory(root))
3087     + if (!d_can_lookup(root))
3088     return -ENOTDIR;
3089     retval = inode_permission(inode, MAY_EXEC);
3090     if (retval)
3091     @@ -1874,7 +1874,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
3092     dentry = f.file->f_path.dentry;
3093    
3094     if (*name) {
3095     - if (!d_is_directory(dentry)) {
3096     + if (!d_can_lookup(dentry)) {
3097     fdput(f);
3098     return -ENOTDIR;
3099     }
3100     @@ -1956,7 +1956,7 @@ static int path_lookupat(int dfd, const char *name,
3101     err = complete_walk(nd);
3102    
3103     if (!err && nd->flags & LOOKUP_DIRECTORY) {
3104     - if (!d_is_directory(nd->path.dentry)) {
3105     + if (!d_can_lookup(nd->path.dentry)) {
3106     path_put(&nd->path);
3107     err = -ENOTDIR;
3108     }
3109     @@ -2416,11 +2416,11 @@ static int may_delete(struct inode *dir, struct dentry *victim, bool isdir)
3110     IS_IMMUTABLE(inode) || IS_SWAPFILE(inode))
3111     return -EPERM;
3112     if (isdir) {
3113     - if (!d_is_directory(victim) && !d_is_autodir(victim))
3114     + if (!d_is_dir(victim))
3115     return -ENOTDIR;
3116     if (IS_ROOT(victim))
3117     return -EBUSY;
3118     - } else if (d_is_directory(victim) || d_is_autodir(victim))
3119     + } else if (d_is_dir(victim))
3120     return -EISDIR;
3121     if (IS_DEADDIR(dir))
3122     return -ENOENT;
3123     @@ -3018,11 +3018,10 @@ finish_open:
3124     }
3125     audit_inode(name, nd->path.dentry, 0);
3126     error = -EISDIR;
3127     - if ((open_flag & O_CREAT) &&
3128     - (d_is_directory(nd->path.dentry) || d_is_autodir(nd->path.dentry)))
3129     + if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
3130     goto out;
3131     error = -ENOTDIR;
3132     - if ((nd->flags & LOOKUP_DIRECTORY) && !d_is_directory(nd->path.dentry))
3133     + if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry))
3134     goto out;
3135     if (!S_ISREG(nd->inode->i_mode))
3136     will_truncate = false;
3137     @@ -3746,7 +3745,7 @@ exit1:
3138     slashes:
3139     if (d_is_negative(dentry))
3140     error = -ENOENT;
3141     - else if (d_is_directory(dentry) || d_is_autodir(dentry))
3142     + else if (d_is_dir(dentry))
3143     error = -EISDIR;
3144     else
3145     error = -ENOTDIR;
3146     @@ -4125,7 +4124,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
3147     struct inode **delegated_inode)
3148     {
3149     int error;
3150     - int is_dir = d_is_directory(old_dentry) || d_is_autodir(old_dentry);
3151     + int is_dir = d_is_dir(old_dentry);
3152     const unsigned char *old_name;
3153    
3154     if (old_dentry->d_inode == new_dentry->d_inode)
3155     @@ -4218,7 +4217,7 @@ retry_deleg:
3156     if (d_is_negative(old_dentry))
3157     goto exit4;
3158     /* unless the source is a directory trailing slashes give -ENOTDIR */
3159     - if (!d_is_directory(old_dentry) && !d_is_autodir(old_dentry)) {
3160     + if (!d_is_dir(old_dentry)) {
3161     error = -ENOTDIR;
3162     if (oldnd.last.name[oldnd.last.len])
3163     goto exit4;
3164     diff --git a/fs/namespace.c b/fs/namespace.c
3165     index 65233a5f390a..75536db4b69b 100644
3166     --- a/fs/namespace.c
3167     +++ b/fs/namespace.c
3168     @@ -777,6 +777,20 @@ static void attach_mnt(struct mount *mnt,
3169     list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
3170     }
3171    
3172     +static void attach_shadowed(struct mount *mnt,
3173     + struct mount *parent,
3174     + struct mount *shadows)
3175     +{
3176     + if (shadows) {
3177     + hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash);
3178     + list_add(&mnt->mnt_child, &shadows->mnt_child);
3179     + } else {
3180     + hlist_add_head_rcu(&mnt->mnt_hash,
3181     + m_hash(&parent->mnt, mnt->mnt_mountpoint));
3182     + list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
3183     + }
3184     +}
3185     +
3186     /*
3187     * vfsmount lock must be held for write
3188     */
3189     @@ -795,12 +809,7 @@ static void commit_tree(struct mount *mnt, struct mount *shadows)
3190    
3191     list_splice(&head, n->list.prev);
3192    
3193     - if (shadows)
3194     - hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash);
3195     - else
3196     - hlist_add_head_rcu(&mnt->mnt_hash,
3197     - m_hash(&parent->mnt, mnt->mnt_mountpoint));
3198     - list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
3199     + attach_shadowed(mnt, parent, shadows);
3200     touch_mnt_namespace(n);
3201     }
3202    
3203     @@ -887,8 +896,21 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
3204    
3205     mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
3206     /* Don't allow unprivileged users to change mount flags */
3207     - if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))
3208     - mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
3209     + if (flag & CL_UNPRIVILEGED) {
3210     + mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
3211     +
3212     + if (mnt->mnt.mnt_flags & MNT_READONLY)
3213     + mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
3214     +
3215     + if (mnt->mnt.mnt_flags & MNT_NODEV)
3216     + mnt->mnt.mnt_flags |= MNT_LOCK_NODEV;
3217     +
3218     + if (mnt->mnt.mnt_flags & MNT_NOSUID)
3219     + mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID;
3220     +
3221     + if (mnt->mnt.mnt_flags & MNT_NOEXEC)
3222     + mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC;
3223     + }
3224    
3225     /* Don't allow unprivileged users to reveal what is under a mount */
3226     if ((flag & CL_UNPRIVILEGED) && list_empty(&old->mnt_expire))
3227     @@ -1204,6 +1226,11 @@ static void namespace_unlock(void)
3228     head.first->pprev = &head.first;
3229     INIT_HLIST_HEAD(&unmounted);
3230    
3231     + /* undo decrements we'd done in umount_tree() */
3232     + hlist_for_each_entry(mnt, &head, mnt_hash)
3233     + if (mnt->mnt_ex_mountpoint.mnt)
3234     + mntget(mnt->mnt_ex_mountpoint.mnt);
3235     +
3236     up_write(&namespace_sem);
3237    
3238     synchronize_rcu();
3239     @@ -1240,6 +1267,9 @@ void umount_tree(struct mount *mnt, int how)
3240     hlist_add_head(&p->mnt_hash, &tmp_list);
3241     }
3242    
3243     + hlist_for_each_entry(p, &tmp_list, mnt_hash)
3244     + list_del_init(&p->mnt_child);
3245     +
3246     if (how)
3247     propagate_umount(&tmp_list);
3248    
3249     @@ -1250,9 +1280,9 @@ void umount_tree(struct mount *mnt, int how)
3250     p->mnt_ns = NULL;
3251     if (how < 2)
3252     p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
3253     - list_del_init(&p->mnt_child);
3254     if (mnt_has_parent(p)) {
3255     put_mountpoint(p->mnt_mp);
3256     + mnt_add_count(p->mnt_parent, -1);
3257     /* move the reference to mountpoint into ->mnt_ex_mountpoint */
3258     p->mnt_ex_mountpoint.dentry = p->mnt_mountpoint;
3259     p->mnt_ex_mountpoint.mnt = &p->mnt_parent->mnt;
3260     @@ -1483,6 +1513,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
3261     continue;
3262    
3263     for (s = r; s; s = next_mnt(s, r)) {
3264     + struct mount *t = NULL;
3265     if (!(flag & CL_COPY_UNBINDABLE) &&
3266     IS_MNT_UNBINDABLE(s)) {
3267     s = skip_mnt_tree(s);
3268     @@ -1504,7 +1535,14 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
3269     goto out;
3270     lock_mount_hash();
3271     list_add_tail(&q->mnt_list, &res->mnt_list);
3272     - attach_mnt(q, parent, p->mnt_mp);
3273     + mnt_set_mountpoint(parent, p->mnt_mp, q);
3274     + if (!list_empty(&parent->mnt_mounts)) {
3275     + t = list_last_entry(&parent->mnt_mounts,
3276     + struct mount, mnt_child);
3277     + if (t->mnt_mp != p->mnt_mp)
3278     + t = NULL;
3279     + }
3280     + attach_shadowed(q, parent, t);
3281     unlock_mount_hash();
3282     }
3283     }
3284     @@ -1887,9 +1925,6 @@ static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
3285     if (readonly_request == __mnt_is_readonly(mnt))
3286     return 0;
3287    
3288     - if (mnt->mnt_flags & MNT_LOCK_READONLY)
3289     - return -EPERM;
3290     -
3291     if (readonly_request)
3292     error = mnt_make_readonly(real_mount(mnt));
3293     else
3294     @@ -1915,6 +1950,33 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
3295     if (path->dentry != path->mnt->mnt_root)
3296     return -EINVAL;
3297    
3298     + /* Don't allow changing of locked mnt flags.
3299     + *
3300     + * No locks need to be held here while testing the various
3301     + * MNT_LOCK flags because those flags can never be cleared
3302     + * once they are set.
3303     + */
3304     + if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
3305     + !(mnt_flags & MNT_READONLY)) {
3306     + return -EPERM;
3307     + }
3308     + if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
3309     + !(mnt_flags & MNT_NODEV)) {
3310     + return -EPERM;
3311     + }
3312     + if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
3313     + !(mnt_flags & MNT_NOSUID)) {
3314     + return -EPERM;
3315     + }
3316     + if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) &&
3317     + !(mnt_flags & MNT_NOEXEC)) {
3318     + return -EPERM;
3319     + }
3320     + if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) &&
3321     + ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) {
3322     + return -EPERM;
3323     + }
3324     +
3325     err = security_sb_remount(sb, data);
3326     if (err)
3327     return err;
3328     @@ -1928,7 +1990,7 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
3329     err = do_remount_sb(sb, flags, data, 0);
3330     if (!err) {
3331     lock_mount_hash();
3332     - mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
3333     + mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
3334     mnt->mnt.mnt_flags = mnt_flags;
3335     touch_mnt_namespace(mnt->mnt_ns);
3336     unlock_mount_hash();
3337     @@ -2113,7 +2175,7 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
3338     */
3339     if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) {
3340     flags |= MS_NODEV;
3341     - mnt_flags |= MNT_NODEV;
3342     + mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
3343     }
3344     }
3345    
3346     @@ -2427,6 +2489,14 @@ long do_mount(const char *dev_name, const char *dir_name,
3347     if (flags & MS_RDONLY)
3348     mnt_flags |= MNT_READONLY;
3349    
3350     + /* The default atime for remount is preservation */
3351     + if ((flags & MS_REMOUNT) &&
3352     + ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
3353     + MS_STRICTATIME)) == 0)) {
3354     + mnt_flags &= ~MNT_ATIME_MASK;
3355     + mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK;
3356     + }
3357     +
3358     flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
3359     MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
3360     MS_STRICTATIME);
3361     diff --git a/fs/pnode.c b/fs/pnode.c
3362     index a364a704333b..b7f831089500 100644
3363     --- a/fs/pnode.c
3364     +++ b/fs/pnode.c
3365     @@ -381,6 +381,7 @@ static void __propagate_umount(struct mount *mnt)
3366     * other children
3367     */
3368     if (child && list_empty(&child->mnt_mounts)) {
3369     + list_del_init(&child->mnt_child);
3370     hlist_del_init_rcu(&child->mnt_hash);
3371     hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash);
3372     }
3373     diff --git a/fs/proc/array.c b/fs/proc/array.c
3374     index 656e401794de..baf3464bbce0 100644
3375     --- a/fs/proc/array.c
3376     +++ b/fs/proc/array.c
3377     @@ -297,15 +297,11 @@ static void render_cap_t(struct seq_file *m, const char *header,
3378     seq_puts(m, header);
3379     CAP_FOR_EACH_U32(__capi) {
3380     seq_printf(m, "%08x",
3381     - a->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]);
3382     + a->cap[CAP_LAST_U32 - __capi]);
3383     }
3384     seq_putc(m, '\n');
3385     }
3386    
3387     -/* Remove non-existent capabilities */
3388     -#define NORM_CAPS(v) (v.cap[CAP_TO_INDEX(CAP_LAST_CAP)] &= \
3389     - CAP_TO_MASK(CAP_LAST_CAP + 1) - 1)
3390     -
3391     static inline void task_cap(struct seq_file *m, struct task_struct *p)
3392     {
3393     const struct cred *cred;
3394     @@ -319,11 +315,6 @@ static inline void task_cap(struct seq_file *m, struct task_struct *p)
3395     cap_bset = cred->cap_bset;
3396     rcu_read_unlock();
3397    
3398     - NORM_CAPS(cap_inheritable);
3399     - NORM_CAPS(cap_permitted);
3400     - NORM_CAPS(cap_effective);
3401     - NORM_CAPS(cap_bset);
3402     -
3403     render_cap_t(m, "CapInh:\t", &cap_inheritable);
3404     render_cap_t(m, "CapPrm:\t", &cap_permitted);
3405     render_cap_t(m, "CapEff:\t", &cap_effective);
3406     diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
3407     index db2cfb067d0b..5d2518b24cea 100644
3408     --- a/fs/xfs/xfs_aops.c
3409     +++ b/fs/xfs/xfs_aops.c
3410     @@ -1660,11 +1660,72 @@ xfs_vm_readpages(
3411     return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
3412     }
3413    
3414     +/*
3415     + * This is basically a copy of __set_page_dirty_buffers() with one
3416     + * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
3417     + * dirty, we'll never be able to clean them because we don't write buffers
3418     + * beyond EOF, and that means we can't invalidate pages that span EOF
3419     + * that have been marked dirty. Further, the dirty state can leak into
3420     + * the file interior if the file is extended, resulting in all sorts of
3421     + * bad things happening as the state does not match the underlying data.
3422     + *
3423     + * XXX: this really indicates that bufferheads in XFS need to die. Warts like
3424     + * this only exist because of bufferheads and how the generic code manages them.
3425     + */
3426     +STATIC int
3427     +xfs_vm_set_page_dirty(
3428     + struct page *page)
3429     +{
3430     + struct address_space *mapping = page->mapping;
3431     + struct inode *inode = mapping->host;
3432     + loff_t end_offset;
3433     + loff_t offset;
3434     + int newly_dirty;
3435     +
3436     + if (unlikely(!mapping))
3437     + return !TestSetPageDirty(page);
3438     +
3439     + end_offset = i_size_read(inode);
3440     + offset = page_offset(page);
3441     +
3442     + spin_lock(&mapping->private_lock);
3443     + if (page_has_buffers(page)) {
3444     + struct buffer_head *head = page_buffers(page);
3445     + struct buffer_head *bh = head;
3446     +
3447     + do {
3448     + if (offset < end_offset)
3449     + set_buffer_dirty(bh);
3450     + bh = bh->b_this_page;
3451     + offset += 1 << inode->i_blkbits;
3452     + } while (bh != head);
3453     + }
3454     + newly_dirty = !TestSetPageDirty(page);
3455     + spin_unlock(&mapping->private_lock);
3456     +
3457     + if (newly_dirty) {
3458     + /* sigh - __set_page_dirty() is static, so copy it here, too */
3459     + unsigned long flags;
3460     +
3461     + spin_lock_irqsave(&mapping->tree_lock, flags);
3462     + if (page->mapping) { /* Race with truncate? */
3463     + WARN_ON_ONCE(!PageUptodate(page));
3464     + account_page_dirtied(page, mapping);
3465     + radix_tree_tag_set(&mapping->page_tree,
3466     + page_index(page), PAGECACHE_TAG_DIRTY);
3467     + }
3468     + spin_unlock_irqrestore(&mapping->tree_lock, flags);
3469     + __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
3470     + }
3471     + return newly_dirty;
3472     +}
3473     +
3474     const struct address_space_operations xfs_address_space_operations = {
3475     .readpage = xfs_vm_readpage,
3476     .readpages = xfs_vm_readpages,
3477     .writepage = xfs_vm_writepage,
3478     .writepages = xfs_vm_writepages,
3479     + .set_page_dirty = xfs_vm_set_page_dirty,
3480     .releasepage = xfs_vm_releasepage,
3481     .invalidatepage = xfs_vm_invalidatepage,
3482     .write_begin = xfs_vm_write_begin,
3483     diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
3484     index 7aeb4c895b32..95f94483c3d7 100644
3485     --- a/fs/xfs/xfs_dquot.c
3486     +++ b/fs/xfs/xfs_dquot.c
3487     @@ -1011,7 +1011,8 @@ xfs_qm_dqflush(
3488     * Get the buffer containing the on-disk dquot
3489     */
3490     error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
3491     - mp->m_quotainfo->qi_dqchunklen, 0, &bp, NULL);
3492     + mp->m_quotainfo->qi_dqchunklen, 0, &bp,
3493     + &xfs_dquot_buf_ops);
3494     if (error)
3495     goto out_unlock;
3496    
3497     diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
3498     index 64b48eade91d..f50def6018a9 100644
3499     --- a/fs/xfs/xfs_file.c
3500     +++ b/fs/xfs/xfs_file.c
3501     @@ -302,7 +302,16 @@ xfs_file_aio_read(
3502     xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
3503     return ret;
3504     }
3505     - truncate_pagecache_range(VFS_I(ip), pos, -1);
3506     +
3507     + /*
3508     + * Invalidate whole pages. This can return an error if
3509     + * we fail to invalidate a page, but this should never
3510     + * happen on XFS. Warn if it does fail.
3511     + */
3512     + ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
3513     + pos >> PAGE_CACHE_SHIFT, -1);
3514     + WARN_ON_ONCE(ret);
3515     + ret = 0;
3516     }
3517     xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
3518     }
3519     @@ -683,7 +692,15 @@ xfs_file_dio_aio_write(
3520     pos, -1);
3521     if (ret)
3522     goto out;
3523     - truncate_pagecache_range(VFS_I(ip), pos, -1);
3524     + /*
3525     + * Invalidate whole pages. This can return an error if
3526     + * we fail to invalidate a page, but this should never
3527     + * happen on XFS. Warn if it does fail.
3528     + */
3529     + ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
3530     + pos >> PAGE_CACHE_SHIFT, -1);
3531     + WARN_ON_ONCE(ret);
3532     + ret = 0;
3533     }
3534    
3535     /*
3536     diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
3537     index bce53ac81096..eb26418814fe 100644
3538     --- a/fs/xfs/xfs_log_recover.c
3539     +++ b/fs/xfs/xfs_log_recover.c
3540     @@ -2125,6 +2125,17 @@ xlog_recover_validate_buf_type(
3541     __uint16_t magic16;
3542     __uint16_t magicda;
3543    
3544     + /*
3545     + * We can only do post recovery validation on items on CRC enabled
3546     + * fielsystems as we need to know when the buffer was written to be able
3547     + * to determine if we should have replayed the item. If we replay old
3548     + * metadata over a newer buffer, then it will enter a temporarily
3549     + * inconsistent state resulting in verification failures. Hence for now
3550     + * just avoid the verification stage for non-crc filesystems
3551     + */
3552     + if (!xfs_sb_version_hascrc(&mp->m_sb))
3553     + return;
3554     +
3555     magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
3556     magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
3557     magicda = be16_to_cpu(info->magic);
3558     @@ -2160,8 +2171,6 @@ xlog_recover_validate_buf_type(
3559     bp->b_ops = &xfs_agf_buf_ops;
3560     break;
3561     case XFS_BLFT_AGFL_BUF:
3562     - if (!xfs_sb_version_hascrc(&mp->m_sb))
3563     - break;
3564     if (magic32 != XFS_AGFL_MAGIC) {
3565     xfs_warn(mp, "Bad AGFL block magic!");
3566     ASSERT(0);
3567     @@ -2194,10 +2203,6 @@ xlog_recover_validate_buf_type(
3568     #endif
3569     break;
3570     case XFS_BLFT_DINO_BUF:
3571     - /*
3572     - * we get here with inode allocation buffers, not buffers that
3573     - * track unlinked list changes.
3574     - */
3575     if (magic16 != XFS_DINODE_MAGIC) {
3576     xfs_warn(mp, "Bad INODE block magic!");
3577     ASSERT(0);
3578     @@ -2277,8 +2282,6 @@ xlog_recover_validate_buf_type(
3579     bp->b_ops = &xfs_attr3_leaf_buf_ops;
3580     break;
3581     case XFS_BLFT_ATTR_RMT_BUF:
3582     - if (!xfs_sb_version_hascrc(&mp->m_sb))
3583     - break;
3584     if (magic32 != XFS_ATTR3_RMT_MAGIC) {
3585     xfs_warn(mp, "Bad attr remote magic!");
3586     ASSERT(0);
3587     @@ -2385,16 +2388,7 @@ xlog_recover_do_reg_buffer(
3588     /* Shouldn't be any more regions */
3589     ASSERT(i == item->ri_total);
3590    
3591     - /*
3592     - * We can only do post recovery validation on items on CRC enabled
3593     - * fielsystems as we need to know when the buffer was written to be able
3594     - * to determine if we should have replayed the item. If we replay old
3595     - * metadata over a newer buffer, then it will enter a temporarily
3596     - * inconsistent state resulting in verification failures. Hence for now
3597     - * just avoid the verification stage for non-crc filesystems
3598     - */
3599     - if (xfs_sb_version_hascrc(&mp->m_sb))
3600     - xlog_recover_validate_buf_type(mp, bp, buf_f);
3601     + xlog_recover_validate_buf_type(mp, bp, buf_f);
3602     }
3603    
3604     /*
3605     @@ -2502,12 +2496,29 @@ xlog_recover_buffer_pass2(
3606     }
3607    
3608     /*
3609     - * recover the buffer only if we get an LSN from it and it's less than
3610     + * Recover the buffer only if we get an LSN from it and it's less than
3611     * the lsn of the transaction we are replaying.
3612     + *
3613     + * Note that we have to be extremely careful of readahead here.
3614     + * Readahead does not attach verfiers to the buffers so if we don't
3615     + * actually do any replay after readahead because of the LSN we found
3616     + * in the buffer if more recent than that current transaction then we
3617     + * need to attach the verifier directly. Failure to do so can lead to
3618     + * future recovery actions (e.g. EFI and unlinked list recovery) can
3619     + * operate on the buffers and they won't get the verifier attached. This
3620     + * can lead to blocks on disk having the correct content but a stale
3621     + * CRC.
3622     + *
3623     + * It is safe to assume these clean buffers are currently up to date.
3624     + * If the buffer is dirtied by a later transaction being replayed, then
3625     + * the verifier will be reset to match whatever recover turns that
3626     + * buffer into.
3627     */
3628     lsn = xlog_recover_get_buf_lsn(mp, bp);
3629     - if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0)
3630     + if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3631     + xlog_recover_validate_buf_type(mp, bp, buf_f);
3632     goto out_release;
3633     + }
3634    
3635     if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
3636     error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
3637     diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
3638     index 348e4d2ed6e6..6d7d1de13403 100644
3639     --- a/fs/xfs/xfs_qm.c
3640     +++ b/fs/xfs/xfs_qm.c
3641     @@ -1176,6 +1176,12 @@ xfs_qm_dqiter_bufs(
3642     if (error)
3643     break;
3644    
3645     + /*
3646     + * A corrupt buffer might not have a verifier attached, so
3647     + * make sure we have the correct one attached before writeback
3648     + * occurs.
3649     + */
3650     + bp->b_ops = &xfs_dquot_buf_ops;
3651     xfs_qm_reset_dqcounts(mp, bp, firstid, type);
3652     xfs_buf_delwri_queue(bp, buffer_list);
3653     xfs_buf_relse(bp);
3654     @@ -1261,7 +1267,7 @@ xfs_qm_dqiterate(
3655     xfs_buf_readahead(mp->m_ddev_targp,
3656     XFS_FSB_TO_DADDR(mp, rablkno),
3657     mp->m_quotainfo->qi_dqchunklen,
3658     - NULL);
3659     + &xfs_dquot_buf_ops);
3660     rablkno++;
3661     }
3662     }
3663     diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
3664     index 8256eb4ad057..e9c4f190ffae 100644
3665     --- a/include/acpi/acpi_bus.h
3666     +++ b/include/acpi/acpi_bus.h
3667     @@ -228,7 +228,6 @@ struct acpi_device_pnp {
3668     acpi_device_name device_name; /* Driver-determined */
3669     acpi_device_class device_class; /* " */
3670     union acpi_object *str_obj; /* unicode string for _STR method */
3671     - unsigned long sun; /* _SUN */
3672     };
3673    
3674     #define acpi_device_bid(d) ((d)->pnp.bus_id)
3675     diff --git a/include/linux/capability.h b/include/linux/capability.h
3676     index 84b13ad67c1c..aa93e5ef594c 100644
3677     --- a/include/linux/capability.h
3678     +++ b/include/linux/capability.h
3679     @@ -78,8 +78,11 @@ extern const kernel_cap_t __cap_init_eff_set;
3680     # error Fix up hand-coded capability macro initializers
3681     #else /* HAND-CODED capability initializers */
3682    
3683     +#define CAP_LAST_U32 ((_KERNEL_CAPABILITY_U32S) - 1)
3684     +#define CAP_LAST_U32_VALID_MASK (CAP_TO_MASK(CAP_LAST_CAP + 1) -1)
3685     +
3686     # define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }})
3687     -# define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }})
3688     +# define CAP_FULL_SET ((kernel_cap_t){{ ~0, CAP_LAST_U32_VALID_MASK }})
3689     # define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
3690     | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
3691     CAP_FS_MASK_B1 } })
3692     diff --git a/include/linux/dcache.h b/include/linux/dcache.h
3693     index bf72e9ac6de0..3b50cac7ccb3 100644
3694     --- a/include/linux/dcache.h
3695     +++ b/include/linux/dcache.h
3696     @@ -429,7 +429,7 @@ static inline unsigned __d_entry_type(const struct dentry *dentry)
3697     return dentry->d_flags & DCACHE_ENTRY_TYPE;
3698     }
3699    
3700     -static inline bool d_is_directory(const struct dentry *dentry)
3701     +static inline bool d_can_lookup(const struct dentry *dentry)
3702     {
3703     return __d_entry_type(dentry) == DCACHE_DIRECTORY_TYPE;
3704     }
3705     @@ -439,6 +439,11 @@ static inline bool d_is_autodir(const struct dentry *dentry)
3706     return __d_entry_type(dentry) == DCACHE_AUTODIR_TYPE;
3707     }
3708    
3709     +static inline bool d_is_dir(const struct dentry *dentry)
3710     +{
3711     + return d_can_lookup(dentry) || d_is_autodir(dentry);
3712     +}
3713     +
3714     static inline bool d_is_symlink(const struct dentry *dentry)
3715     {
3716     return __d_entry_type(dentry) == DCACHE_SYMLINK_TYPE;
3717     diff --git a/include/linux/mount.h b/include/linux/mount.h
3718     index 839bac270904..b0c1e6574e7f 100644
3719     --- a/include/linux/mount.h
3720     +++ b/include/linux/mount.h
3721     @@ -42,13 +42,20 @@ struct mnt_namespace;
3722     * flag, consider how it interacts with shared mounts.
3723     */
3724     #define MNT_SHARED_MASK (MNT_UNBINDABLE)
3725     -#define MNT_PROPAGATION_MASK (MNT_SHARED | MNT_UNBINDABLE)
3726     +#define MNT_USER_SETTABLE_MASK (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \
3727     + | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \
3728     + | MNT_READONLY)
3729     +#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
3730    
3731     #define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
3732     MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED)
3733    
3734     #define MNT_INTERNAL 0x4000
3735    
3736     +#define MNT_LOCK_ATIME 0x040000
3737     +#define MNT_LOCK_NOEXEC 0x080000
3738     +#define MNT_LOCK_NOSUID 0x100000
3739     +#define MNT_LOCK_NODEV 0x200000
3740     #define MNT_LOCK_READONLY 0x400000
3741     #define MNT_LOCKED 0x800000
3742     #define MNT_DOOMED 0x1000000
3743     diff --git a/include/linux/tpm.h b/include/linux/tpm.h
3744     index fff1d0976f80..8350c538b486 100644
3745     --- a/include/linux/tpm.h
3746     +++ b/include/linux/tpm.h
3747     @@ -39,6 +39,9 @@ struct tpm_class_ops {
3748     int (*send) (struct tpm_chip *chip, u8 *buf, size_t len);
3749     void (*cancel) (struct tpm_chip *chip);
3750     u8 (*status) (struct tpm_chip *chip);
3751     + bool (*update_timeouts)(struct tpm_chip *chip,
3752     + unsigned long *timeout_cap);
3753     +
3754     };
3755    
3756     #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
3757     diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
3758     index b4f1effc9216..409fafb63f63 100644
3759     --- a/include/scsi/scsi_device.h
3760     +++ b/include/scsi/scsi_device.h
3761     @@ -149,6 +149,7 @@ struct scsi_device {
3762     unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */
3763     unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */
3764     unsigned skip_vpd_pages:1; /* do not read VPD pages */
3765     + unsigned try_vpd_pages:1; /* attempt to read VPD pages */
3766     unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */
3767     unsigned no_start_on_add:1; /* do not issue start on add */
3768     unsigned allow_restart:1; /* issue START_UNIT in error handler */
3769     diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
3770     index 447d2d7466fc..183eaab7c380 100644
3771     --- a/include/scsi/scsi_devinfo.h
3772     +++ b/include/scsi/scsi_devinfo.h
3773     @@ -32,4 +32,9 @@
3774     #define BLIST_ATTACH_PQ3 0x1000000 /* Scan: Attach to PQ3 devices */
3775     #define BLIST_NO_DIF 0x2000000 /* Disable T10 PI (DIF) */
3776     #define BLIST_SKIP_VPD_PAGES 0x4000000 /* Ignore SBC-3 VPD pages */
3777     +#define BLIST_SCSI3LUN 0x8000000 /* Scan more than 256 LUNs
3778     + for sequential scan */
3779     +#define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */
3780     +#define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */
3781     +
3782     #endif
3783     diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
3784     index 99b80abf360a..3066718eb120 100644
3785     --- a/include/uapi/rdma/rdma_user_cm.h
3786     +++ b/include/uapi/rdma/rdma_user_cm.h
3787     @@ -34,6 +34,7 @@
3788     #define RDMA_USER_CM_H
3789    
3790     #include <linux/types.h>
3791     +#include <linux/socket.h>
3792     #include <linux/in6.h>
3793     #include <rdma/ib_user_verbs.h>
3794     #include <rdma/ib_user_sa.h>
3795     diff --git a/kernel/audit.c b/kernel/audit.c
3796     index 0c9dc860cc15..2c0ecd1753de 100644
3797     --- a/kernel/audit.c
3798     +++ b/kernel/audit.c
3799     @@ -1628,7 +1628,7 @@ void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
3800     audit_log_format(ab, " %s=", prefix);
3801     CAP_FOR_EACH_U32(i) {
3802     audit_log_format(ab, "%08x",
3803     - cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]);
3804     + cap->cap[CAP_LAST_U32 - i]);
3805     }
3806     }
3807    
3808     diff --git a/kernel/capability.c b/kernel/capability.c
3809     index 1191a44786df..00adb2193d01 100644
3810     --- a/kernel/capability.c
3811     +++ b/kernel/capability.c
3812     @@ -268,6 +268,10 @@ SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
3813     i++;
3814     }
3815    
3816     + effective.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
3817     + permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
3818     + inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
3819     +
3820     new = prepare_creds();
3821     if (!new)
3822     return -ENOMEM;
3823     diff --git a/kernel/smp.c b/kernel/smp.c
3824     index ffee35bef179..ff87d4479558 100644
3825     --- a/kernel/smp.c
3826     +++ b/kernel/smp.c
3827     @@ -617,7 +617,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
3828     if (cond_func(cpu, info)) {
3829     ret = smp_call_function_single(cpu, func,
3830     info, wait);
3831     - WARN_ON_ONCE(!ret);
3832     + WARN_ON_ONCE(ret);
3833     }
3834     preempt_enable();
3835     }
3836     diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
3837     index 0954450df7dc..a53f1bbc546b 100644
3838     --- a/kernel/trace/ring_buffer.c
3839     +++ b/kernel/trace/ring_buffer.c
3840     @@ -1981,7 +1981,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
3841    
3842     /**
3843     * rb_update_event - update event type and data
3844     - * @event: the even to update
3845     + * @event: the event to update
3846     * @type: the type of event
3847     * @length: the size of the event field in the ring buffer
3848     *
3849     @@ -3354,21 +3354,16 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
3850     struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3851    
3852     /* Iterator usage is expected to have record disabled */
3853     - if (list_empty(&cpu_buffer->reader_page->list)) {
3854     - iter->head_page = rb_set_head_page(cpu_buffer);
3855     - if (unlikely(!iter->head_page))
3856     - return;
3857     - iter->head = iter->head_page->read;
3858     - } else {
3859     - iter->head_page = cpu_buffer->reader_page;
3860     - iter->head = cpu_buffer->reader_page->read;
3861     - }
3862     + iter->head_page = cpu_buffer->reader_page;
3863     + iter->head = cpu_buffer->reader_page->read;
3864     +
3865     + iter->cache_reader_page = iter->head_page;
3866     + iter->cache_read = iter->head;
3867     +
3868     if (iter->head)
3869     iter->read_stamp = cpu_buffer->read_stamp;
3870     else
3871     iter->read_stamp = iter->head_page->page->time_stamp;
3872     - iter->cache_reader_page = cpu_buffer->reader_page;
3873     - iter->cache_read = cpu_buffer->read;
3874     }
3875    
3876     /**
3877     @@ -3761,12 +3756,14 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3878     return NULL;
3879    
3880     /*
3881     - * We repeat when a time extend is encountered.
3882     - * Since the time extend is always attached to a data event,
3883     - * we should never loop more than once.
3884     - * (We never hit the following condition more than twice).
3885     + * We repeat when a time extend is encountered or we hit
3886     + * the end of the page. Since the time extend is always attached
3887     + * to a data event, we should never loop more than three times.
3888     + * Once for going to next page, once on time extend, and
3889     + * finally once to get the event.
3890     + * (We never hit the following condition more than thrice).
3891     */
3892     - if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3893     + if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
3894     return NULL;
3895    
3896     if (rb_per_cpu_empty(cpu_buffer))
3897     diff --git a/lib/assoc_array.c b/lib/assoc_array.c
3898     index c0b1007011e1..2404d03e251a 100644
3899     --- a/lib/assoc_array.c
3900     +++ b/lib/assoc_array.c
3901     @@ -1723,11 +1723,13 @@ ascend_old_tree:
3902     shortcut = assoc_array_ptr_to_shortcut(ptr);
3903     slot = shortcut->parent_slot;
3904     cursor = shortcut->back_pointer;
3905     + if (!cursor)
3906     + goto gc_complete;
3907     } else {
3908     slot = node->parent_slot;
3909     cursor = ptr;
3910     }
3911     - BUG_ON(!ptr);
3912     + BUG_ON(!cursor);
3913     node = assoc_array_ptr_to_node(cursor);
3914     slot++;
3915     goto continue_node;
3916     @@ -1735,7 +1737,7 @@ ascend_old_tree:
3917     gc_complete:
3918     edit->set[0].to = new_root;
3919     assoc_array_apply_edit(edit);
3920     - edit->array->nr_leaves_on_tree = nr_leaves_on_tree;
3921     + array->nr_leaves_on_tree = nr_leaves_on_tree;
3922     return 0;
3923    
3924     enomem:
3925     diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
3926     index 27ae84154586..06a7a769737f 100644
3927     --- a/net/bluetooth/l2cap_sock.c
3928     +++ b/net/bluetooth/l2cap_sock.c
3929     @@ -1112,7 +1112,8 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
3930     l2cap_chan_close(chan, 0);
3931     lock_sock(sk);
3932    
3933     - if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
3934     + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
3935     + !(current->flags & PF_EXITING))
3936     err = bt_sock_wait_state(sk, BT_CLOSED,
3937     sk->sk_lingertime);
3938     }
3939     diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
3940     index facd8a79c038..b08865111024 100644
3941     --- a/net/bluetooth/rfcomm/core.c
3942     +++ b/net/bluetooth/rfcomm/core.c
3943     @@ -1859,10 +1859,13 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
3944     /* Get data directly from socket receive queue without copying it. */
3945     while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
3946     skb_orphan(skb);
3947     - if (!skb_linearize(skb))
3948     + if (!skb_linearize(skb)) {
3949     s = rfcomm_recv_frame(s, skb);
3950     - else
3951     + if (!s)
3952     + break;
3953     + } else {
3954     kfree_skb(skb);
3955     + }
3956     }
3957    
3958     if (s && (sk->sk_state == BT_CLOSED))
3959     diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
3960     index 3c2d3e4aa2f5..a0050de6f1f1 100644
3961     --- a/net/bluetooth/rfcomm/sock.c
3962     +++ b/net/bluetooth/rfcomm/sock.c
3963     @@ -898,7 +898,8 @@ static int rfcomm_sock_shutdown(struct socket *sock, int how)
3964     sk->sk_shutdown = SHUTDOWN_MASK;
3965     __rfcomm_sock_close(sk);
3966    
3967     - if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
3968     + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
3969     + !(current->flags & PF_EXITING))
3970     err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
3971     }
3972     release_sock(sk);
3973     diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
3974     index 24fa3964b3c8..316dd4e0af39 100644
3975     --- a/net/bluetooth/sco.c
3976     +++ b/net/bluetooth/sco.c
3977     @@ -909,7 +909,8 @@ static int sco_sock_shutdown(struct socket *sock, int how)
3978     sco_sock_clear_timer(sk);
3979     __sco_sock_close(sk);
3980    
3981     - if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
3982     + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
3983     + !(current->flags & PF_EXITING))
3984     err = bt_sock_wait_state(sk, BT_CLOSED,
3985     sk->sk_lingertime);
3986     }
3987     @@ -929,7 +930,8 @@ static int sco_sock_release(struct socket *sock)
3988    
3989     sco_sock_close(sk);
3990    
3991     - if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) {
3992     + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
3993     + !(current->flags & PF_EXITING)) {
3994     lock_sock(sk);
3995     err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
3996     release_sock(sk);
3997     diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
3998     index 96238ba95f2b..de6662b14e1f 100644
3999     --- a/net/ceph/auth_x.c
4000     +++ b/net/ceph/auth_x.c
4001     @@ -13,8 +13,6 @@
4002     #include "auth_x.h"
4003     #include "auth_x_protocol.h"
4004    
4005     -#define TEMP_TICKET_BUF_LEN 256
4006     -
4007     static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed);
4008    
4009     static int ceph_x_is_authenticated(struct ceph_auth_client *ac)
4010     @@ -64,7 +62,7 @@ static int ceph_x_encrypt(struct ceph_crypto_key *secret,
4011     }
4012    
4013     static int ceph_x_decrypt(struct ceph_crypto_key *secret,
4014     - void **p, void *end, void *obuf, size_t olen)
4015     + void **p, void *end, void **obuf, size_t olen)
4016     {
4017     struct ceph_x_encrypt_header head;
4018     size_t head_len = sizeof(head);
4019     @@ -75,8 +73,14 @@ static int ceph_x_decrypt(struct ceph_crypto_key *secret,
4020     return -EINVAL;
4021    
4022     dout("ceph_x_decrypt len %d\n", len);
4023     - ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen,
4024     - *p, len);
4025     + if (*obuf == NULL) {
4026     + *obuf = kmalloc(len, GFP_NOFS);
4027     + if (!*obuf)
4028     + return -ENOMEM;
4029     + olen = len;
4030     + }
4031     +
4032     + ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len);
4033     if (ret)
4034     return ret;
4035     if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC)
4036     @@ -129,139 +133,120 @@ static void remove_ticket_handler(struct ceph_auth_client *ac,
4037     kfree(th);
4038     }
4039    
4040     -static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
4041     - struct ceph_crypto_key *secret,
4042     - void *buf, void *end)
4043     +static int process_one_ticket(struct ceph_auth_client *ac,
4044     + struct ceph_crypto_key *secret,
4045     + void **p, void *end)
4046     {
4047     struct ceph_x_info *xi = ac->private;
4048     - int num;
4049     - void *p = buf;
4050     + int type;
4051     + u8 tkt_struct_v, blob_struct_v;
4052     + struct ceph_x_ticket_handler *th;
4053     + void *dbuf = NULL;
4054     + void *dp, *dend;
4055     + int dlen;
4056     + char is_enc;
4057     + struct timespec validity;
4058     + struct ceph_crypto_key old_key;
4059     + void *ticket_buf = NULL;
4060     + void *tp, *tpend;
4061     + struct ceph_timespec new_validity;
4062     + struct ceph_crypto_key new_session_key;
4063     + struct ceph_buffer *new_ticket_blob;
4064     + unsigned long new_expires, new_renew_after;
4065     + u64 new_secret_id;
4066     int ret;
4067     - char *dbuf;
4068     - char *ticket_buf;
4069     - u8 reply_struct_v;
4070    
4071     - dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
4072     - if (!dbuf)
4073     - return -ENOMEM;
4074     + ceph_decode_need(p, end, sizeof(u32) + 1, bad);
4075    
4076     - ret = -ENOMEM;
4077     - ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
4078     - if (!ticket_buf)
4079     - goto out_dbuf;
4080     + type = ceph_decode_32(p);
4081     + dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
4082    
4083     - ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
4084     - reply_struct_v = ceph_decode_8(&p);
4085     - if (reply_struct_v != 1)
4086     + tkt_struct_v = ceph_decode_8(p);
4087     + if (tkt_struct_v != 1)
4088     goto bad;
4089     - num = ceph_decode_32(&p);
4090     - dout("%d tickets\n", num);
4091     - while (num--) {
4092     - int type;
4093     - u8 tkt_struct_v, blob_struct_v;
4094     - struct ceph_x_ticket_handler *th;
4095     - void *dp, *dend;
4096     - int dlen;
4097     - char is_enc;
4098     - struct timespec validity;
4099     - struct ceph_crypto_key old_key;
4100     - void *tp, *tpend;
4101     - struct ceph_timespec new_validity;
4102     - struct ceph_crypto_key new_session_key;
4103     - struct ceph_buffer *new_ticket_blob;
4104     - unsigned long new_expires, new_renew_after;
4105     - u64 new_secret_id;
4106     -
4107     - ceph_decode_need(&p, end, sizeof(u32) + 1, bad);
4108     -
4109     - type = ceph_decode_32(&p);
4110     - dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
4111     -
4112     - tkt_struct_v = ceph_decode_8(&p);
4113     - if (tkt_struct_v != 1)
4114     - goto bad;
4115     -
4116     - th = get_ticket_handler(ac, type);
4117     - if (IS_ERR(th)) {
4118     - ret = PTR_ERR(th);
4119     - goto out;
4120     - }
4121    
4122     - /* blob for me */
4123     - dlen = ceph_x_decrypt(secret, &p, end, dbuf,
4124     - TEMP_TICKET_BUF_LEN);
4125     - if (dlen <= 0) {
4126     - ret = dlen;
4127     - goto out;
4128     - }
4129     - dout(" decrypted %d bytes\n", dlen);
4130     - dend = dbuf + dlen;
4131     - dp = dbuf;
4132     + th = get_ticket_handler(ac, type);
4133     + if (IS_ERR(th)) {
4134     + ret = PTR_ERR(th);
4135     + goto out;
4136     + }
4137    
4138     - tkt_struct_v = ceph_decode_8(&dp);
4139     - if (tkt_struct_v != 1)
4140     - goto bad;
4141     + /* blob for me */
4142     + dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0);
4143     + if (dlen <= 0) {
4144     + ret = dlen;
4145     + goto out;
4146     + }
4147     + dout(" decrypted %d bytes\n", dlen);
4148     + dp = dbuf;
4149     + dend = dp + dlen;
4150    
4151     - memcpy(&old_key, &th->session_key, sizeof(old_key));
4152     - ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
4153     - if (ret)
4154     - goto out;
4155     + tkt_struct_v = ceph_decode_8(&dp);
4156     + if (tkt_struct_v != 1)
4157     + goto bad;
4158    
4159     - ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
4160     - ceph_decode_timespec(&validity, &new_validity);
4161     - new_expires = get_seconds() + validity.tv_sec;
4162     - new_renew_after = new_expires - (validity.tv_sec / 4);
4163     - dout(" expires=%lu renew_after=%lu\n", new_expires,
4164     - new_renew_after);
4165     + memcpy(&old_key, &th->session_key, sizeof(old_key));
4166     + ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
4167     + if (ret)
4168     + goto out;
4169    
4170     - /* ticket blob for service */
4171     - ceph_decode_8_safe(&p, end, is_enc, bad);
4172     - tp = ticket_buf;
4173     - if (is_enc) {
4174     - /* encrypted */
4175     - dout(" encrypted ticket\n");
4176     - dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf,
4177     - TEMP_TICKET_BUF_LEN);
4178     - if (dlen < 0) {
4179     - ret = dlen;
4180     - goto out;
4181     - }
4182     - dlen = ceph_decode_32(&tp);
4183     - } else {
4184     - /* unencrypted */
4185     - ceph_decode_32_safe(&p, end, dlen, bad);
4186     - ceph_decode_need(&p, end, dlen, bad);
4187     - ceph_decode_copy(&p, ticket_buf, dlen);
4188     + ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
4189     + ceph_decode_timespec(&validity, &new_validity);
4190     + new_expires = get_seconds() + validity.tv_sec;
4191     + new_renew_after = new_expires - (validity.tv_sec / 4);
4192     + dout(" expires=%lu renew_after=%lu\n", new_expires,
4193     + new_renew_after);
4194     +
4195     + /* ticket blob for service */
4196     + ceph_decode_8_safe(p, end, is_enc, bad);
4197     + if (is_enc) {
4198     + /* encrypted */
4199     + dout(" encrypted ticket\n");
4200     + dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0);
4201     + if (dlen < 0) {
4202     + ret = dlen;
4203     + goto out;
4204     }
4205     - tpend = tp + dlen;
4206     - dout(" ticket blob is %d bytes\n", dlen);
4207     - ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
4208     - blob_struct_v = ceph_decode_8(&tp);
4209     - new_secret_id = ceph_decode_64(&tp);
4210     - ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
4211     - if (ret)
4212     + tp = ticket_buf;
4213     + dlen = ceph_decode_32(&tp);
4214     + } else {
4215     + /* unencrypted */
4216     + ceph_decode_32_safe(p, end, dlen, bad);
4217     + ticket_buf = kmalloc(dlen, GFP_NOFS);
4218     + if (!ticket_buf) {
4219     + ret = -ENOMEM;
4220     goto out;
4221     -
4222     - /* all is well, update our ticket */
4223     - ceph_crypto_key_destroy(&th->session_key);
4224     - if (th->ticket_blob)
4225     - ceph_buffer_put(th->ticket_blob);
4226     - th->session_key = new_session_key;
4227     - th->ticket_blob = new_ticket_blob;
4228     - th->validity = new_validity;
4229     - th->secret_id = new_secret_id;
4230     - th->expires = new_expires;
4231     - th->renew_after = new_renew_after;
4232     - dout(" got ticket service %d (%s) secret_id %lld len %d\n",
4233     - type, ceph_entity_type_name(type), th->secret_id,
4234     - (int)th->ticket_blob->vec.iov_len);
4235     - xi->have_keys |= th->service;
4236     + }
4237     + tp = ticket_buf;
4238     + ceph_decode_need(p, end, dlen, bad);
4239     + ceph_decode_copy(p, ticket_buf, dlen);
4240     }
4241     + tpend = tp + dlen;
4242     + dout(" ticket blob is %d bytes\n", dlen);
4243     + ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
4244     + blob_struct_v = ceph_decode_8(&tp);
4245     + new_secret_id = ceph_decode_64(&tp);
4246     + ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
4247     + if (ret)
4248     + goto out;
4249     +
4250     + /* all is well, update our ticket */
4251     + ceph_crypto_key_destroy(&th->session_key);
4252     + if (th->ticket_blob)
4253     + ceph_buffer_put(th->ticket_blob);
4254     + th->session_key = new_session_key;
4255     + th->ticket_blob = new_ticket_blob;
4256     + th->validity = new_validity;
4257     + th->secret_id = new_secret_id;
4258     + th->expires = new_expires;
4259     + th->renew_after = new_renew_after;
4260     + dout(" got ticket service %d (%s) secret_id %lld len %d\n",
4261     + type, ceph_entity_type_name(type), th->secret_id,
4262     + (int)th->ticket_blob->vec.iov_len);
4263     + xi->have_keys |= th->service;
4264    
4265     - ret = 0;
4266     out:
4267     kfree(ticket_buf);
4268     -out_dbuf:
4269     kfree(dbuf);
4270     return ret;
4271    
4272     @@ -270,6 +255,34 @@ bad:
4273     goto out;
4274     }
4275    
4276     +static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
4277     + struct ceph_crypto_key *secret,
4278     + void *buf, void *end)
4279     +{
4280     + void *p = buf;
4281     + u8 reply_struct_v;
4282     + u32 num;
4283     + int ret;
4284     +
4285     + ceph_decode_8_safe(&p, end, reply_struct_v, bad);
4286     + if (reply_struct_v != 1)
4287     + return -EINVAL;
4288     +
4289     + ceph_decode_32_safe(&p, end, num, bad);
4290     + dout("%d tickets\n", num);
4291     +
4292     + while (num--) {
4293     + ret = process_one_ticket(ac, secret, &p, end);
4294     + if (ret)
4295     + return ret;
4296     + }
4297     +
4298     + return 0;
4299     +
4300     +bad:
4301     + return -EINVAL;
4302     +}
4303     +
4304     static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
4305     struct ceph_x_ticket_handler *th,
4306     struct ceph_x_authorizer *au)
4307     @@ -583,13 +596,14 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
4308     struct ceph_x_ticket_handler *th;
4309     int ret = 0;
4310     struct ceph_x_authorize_reply reply;
4311     + void *preply = &reply;
4312     void *p = au->reply_buf;
4313     void *end = p + sizeof(au->reply_buf);
4314    
4315     th = get_ticket_handler(ac, au->service);
4316     if (IS_ERR(th))
4317     return PTR_ERR(th);
4318     - ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
4319     + ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply));
4320     if (ret < 0)
4321     return ret;
4322     if (ret != sizeof(reply))
4323     diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
4324     index 988721a629eb..0a31298737ac 100644
4325     --- a/net/ceph/messenger.c
4326     +++ b/net/ceph/messenger.c
4327     @@ -900,7 +900,7 @@ static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
4328     BUG_ON(page_count > (int)USHRT_MAX);
4329     cursor->page_count = (unsigned short)page_count;
4330     BUG_ON(length > SIZE_MAX - cursor->page_offset);
4331     - cursor->last_piece = (size_t)cursor->page_offset + length <= PAGE_SIZE;
4332     + cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE;
4333     }
4334    
4335     static struct page *
4336     diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
4337     index 2ac9ef35110b..dbcbf5a4707f 100644
4338     --- a/net/ceph/mon_client.c
4339     +++ b/net/ceph/mon_client.c
4340     @@ -1041,7 +1041,15 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
4341     if (!m) {
4342     pr_info("alloc_msg unknown type %d\n", type);
4343     *skip = 1;
4344     + } else if (front_len > m->front_alloc_len) {
4345     + pr_warning("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n",
4346     + front_len, m->front_alloc_len,
4347     + (unsigned int)con->peer_name.type,
4348     + le64_to_cpu(con->peer_name.num));
4349     + ceph_msg_put(m);
4350     + m = ceph_msg_new(type, front_len, GFP_NOFS, false);
4351     }
4352     +
4353     return m;
4354     }
4355    
4356     diff --git a/security/commoncap.c b/security/commoncap.c
4357     index b9d613e0ef14..963dc5981661 100644
4358     --- a/security/commoncap.c
4359     +++ b/security/commoncap.c
4360     @@ -421,6 +421,9 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
4361     cpu_caps->inheritable.cap[i] = le32_to_cpu(caps.data[i].inheritable);
4362     }
4363    
4364     + cpu_caps->permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
4365     + cpu_caps->inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
4366     +
4367     return 0;
4368     }
4369    
4370     diff --git a/sound/soc/blackfin/bf5xx-i2s-pcm.c b/sound/soc/blackfin/bf5xx-i2s-pcm.c
4371     index a3881c4381c9..bcf591373a7a 100644
4372     --- a/sound/soc/blackfin/bf5xx-i2s-pcm.c
4373     +++ b/sound/soc/blackfin/bf5xx-i2s-pcm.c
4374     @@ -290,19 +290,19 @@ static int bf5xx_pcm_silence(struct snd_pcm_substream *substream,
4375     unsigned int sample_size = runtime->sample_bits / 8;
4376     void *buf = runtime->dma_area;
4377     struct bf5xx_i2s_pcm_data *dma_data;
4378     - unsigned int offset, size;
4379     + unsigned int offset, samples;
4380    
4381     dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
4382    
4383     if (dma_data->tdm_mode) {
4384     offset = pos * 8 * sample_size;
4385     - size = count * 8 * sample_size;
4386     + samples = count * 8;
4387     } else {
4388     offset = frames_to_bytes(runtime, pos);
4389     - size = frames_to_bytes(runtime, count);
4390     + samples = count * runtime->channels;
4391     }
4392    
4393     - snd_pcm_format_set_silence(runtime->format, buf + offset, size);
4394     + snd_pcm_format_set_silence(runtime->format, buf + offset, samples);
4395    
4396     return 0;
4397     }
4398     diff --git a/sound/soc/codecs/adau1701.c b/sound/soc/codecs/adau1701.c
4399     index d71c59cf7bdd..370b742117ef 100644
4400     --- a/sound/soc/codecs/adau1701.c
4401     +++ b/sound/soc/codecs/adau1701.c
4402     @@ -230,8 +230,10 @@ static int adau1701_reg_read(void *context, unsigned int reg,
4403    
4404     *value = 0;
4405    
4406     - for (i = 0; i < size; i++)
4407     - *value |= recv_buf[i] << (i * 8);
4408     + for (i = 0; i < size; i++) {
4409     + *value <<= 8;
4410     + *value |= recv_buf[i];
4411     + }
4412    
4413     return 0;
4414     }
4415     diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
4416     index b3f7c9026a29..ddfb0fddd030 100644
4417     --- a/sound/soc/codecs/max98090.c
4418     +++ b/sound/soc/codecs/max98090.c
4419     @@ -2250,7 +2250,7 @@ static int max98090_probe(struct snd_soc_codec *codec)
4420     /* Register for interrupts */
4421     dev_dbg(codec->dev, "irq = %d\n", max98090->irq);
4422    
4423     - ret = request_threaded_irq(max98090->irq, NULL,
4424     + ret = devm_request_threaded_irq(codec->dev, max98090->irq, NULL,
4425     max98090_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
4426     "max98090_interrupt", codec);
4427     if (ret < 0) {
4428     diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
4429     index 886924934aa5..5cb515b08a32 100644
4430     --- a/sound/soc/codecs/rt5640.c
4431     +++ b/sound/soc/codecs/rt5640.c
4432     @@ -2071,6 +2071,7 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5640 = {
4433     static const struct regmap_config rt5640_regmap = {
4434     .reg_bits = 8,
4435     .val_bits = 16,
4436     + .use_single_rw = true,
4437    
4438     .max_register = RT5640_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5640_ranges) *
4439     RT5640_PR_SPACING),
4440     diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
4441     index adb72063d44e..d98e52f647d2 100644
4442     --- a/sound/soc/codecs/wm8994.c
4443     +++ b/sound/soc/codecs/wm8994.c
4444     @@ -3497,6 +3497,7 @@ static irqreturn_t wm8994_mic_irq(int irq, void *data)
4445     return IRQ_HANDLED;
4446     }
4447    
4448     +/* Should be called with accdet_lock held */
4449     static void wm1811_micd_stop(struct snd_soc_codec *codec)
4450     {
4451     struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
4452     @@ -3504,14 +3505,10 @@ static void wm1811_micd_stop(struct snd_soc_codec *codec)
4453     if (!wm8994->jackdet)
4454     return;
4455    
4456     - mutex_lock(&wm8994->accdet_lock);
4457     -
4458     snd_soc_update_bits(codec, WM8958_MIC_DETECT_1, WM8958_MICD_ENA, 0);
4459    
4460     wm1811_jackdet_set_mode(codec, WM1811_JACKDET_MODE_JACK);
4461    
4462     - mutex_unlock(&wm8994->accdet_lock);
4463     -
4464     if (wm8994->wm8994->pdata.jd_ext_cap)
4465     snd_soc_dapm_disable_pin(&codec->dapm,
4466     "MICBIAS2");
4467     @@ -3552,10 +3549,10 @@ static void wm8958_open_circuit_work(struct work_struct *work)
4468     open_circuit_work.work);
4469     struct device *dev = wm8994->wm8994->dev;
4470    
4471     - wm1811_micd_stop(wm8994->hubs.codec);
4472     -
4473     mutex_lock(&wm8994->accdet_lock);
4474    
4475     + wm1811_micd_stop(wm8994->hubs.codec);
4476     +
4477     dev_dbg(dev, "Reporting open circuit\n");
4478    
4479     wm8994->jack_mic = false;
4480     diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
4481     index 444626fcab40..53c03aff762e 100644
4482     --- a/sound/soc/codecs/wm_adsp.c
4483     +++ b/sound/soc/codecs/wm_adsp.c
4484     @@ -1745,3 +1745,5 @@ int wm_adsp2_init(struct wm_adsp *adsp, bool dvfs)
4485     return 0;
4486     }
4487     EXPORT_SYMBOL_GPL(wm_adsp2_init);
4488     +
4489     +MODULE_LICENSE("GPL v2");
4490     diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
4491     index a3119a00d8fa..6c6b35e471c8 100644
4492     --- a/sound/soc/pxa/pxa-ssp.c
4493     +++ b/sound/soc/pxa/pxa-ssp.c
4494     @@ -725,7 +725,8 @@ static int pxa_ssp_probe(struct snd_soc_dai *dai)
4495     ssp_handle = of_parse_phandle(dev->of_node, "port", 0);
4496     if (!ssp_handle) {
4497     dev_err(dev, "unable to get 'port' phandle\n");
4498     - return -ENODEV;
4499     + ret = -ENODEV;
4500     + goto err_priv;
4501     }
4502    
4503     priv->ssp = pxa_ssp_request_of(ssp_handle, "SoC audio");
4504     @@ -766,9 +767,7 @@ static int pxa_ssp_remove(struct snd_soc_dai *dai)
4505     SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | \
4506     SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
4507    
4508     -#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
4509     - SNDRV_PCM_FMTBIT_S24_LE | \
4510     - SNDRV_PCM_FMTBIT_S32_LE)
4511     +#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
4512    
4513     static const struct snd_soc_dai_ops pxa_ssp_dai_ops = {
4514     .startup = pxa_ssp_startup,
4515     diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
4516     index 0a9b44c940ce..5dae66002a11 100644
4517     --- a/sound/soc/samsung/i2s.c
4518     +++ b/sound/soc/samsung/i2s.c
4519     @@ -915,11 +915,9 @@ static int i2s_suspend(struct snd_soc_dai *dai)
4520     {
4521     struct i2s_dai *i2s = to_info(dai);
4522    
4523     - if (dai->active) {
4524     - i2s->suspend_i2smod = readl(i2s->addr + I2SMOD);
4525     - i2s->suspend_i2scon = readl(i2s->addr + I2SCON);
4526     - i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR);
4527     - }
4528     + i2s->suspend_i2smod = readl(i2s->addr + I2SMOD);
4529     + i2s->suspend_i2scon = readl(i2s->addr + I2SCON);
4530     + i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR);
4531    
4532     return 0;
4533     }
4534     @@ -928,11 +926,9 @@ static int i2s_resume(struct snd_soc_dai *dai)
4535     {
4536     struct i2s_dai *i2s = to_info(dai);
4537    
4538     - if (dai->active) {
4539     - writel(i2s->suspend_i2scon, i2s->addr + I2SCON);
4540     - writel(i2s->suspend_i2smod, i2s->addr + I2SMOD);
4541     - writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR);
4542     - }
4543     + writel(i2s->suspend_i2scon, i2s->addr + I2SCON);
4544     + writel(i2s->suspend_i2smod, i2s->addr + I2SMOD);
4545     + writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR);
4546    
4547     return 0;
4548     }
4549     diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
4550     index 47e1ce771e65..02733ded2cb1 100644
4551     --- a/sound/soc/soc-pcm.c
4552     +++ b/sound/soc/soc-pcm.c
4553     @@ -2011,6 +2011,7 @@ int soc_dpcm_runtime_update(struct snd_soc_card *card)
4554     dpcm_be_disconnect(fe, SNDRV_PCM_STREAM_PLAYBACK);
4555     }
4556    
4557     + dpcm_path_put(&list);
4558     capture:
4559     /* skip if FE doesn't have capture capability */
4560     if (!fe->cpu_dai->driver->capture.channels_min)
4561     diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
4562     index 32487ed18354..3d5979b23e50 100644
4563     --- a/tools/testing/selftests/Makefile
4564     +++ b/tools/testing/selftests/Makefile
4565     @@ -4,6 +4,7 @@ TARGETS += efivarfs
4566     TARGETS += kcmp
4567     TARGETS += memory-hotplug
4568     TARGETS += mqueue
4569     +TARGETS += mount
4570     TARGETS += net
4571     TARGETS += ptrace
4572     TARGETS += timers
4573     diff --git a/tools/testing/selftests/mount/Makefile b/tools/testing/selftests/mount/Makefile
4574     new file mode 100644
4575     index 000000000000..337d853c2b72
4576     --- /dev/null
4577     +++ b/tools/testing/selftests/mount/Makefile
4578     @@ -0,0 +1,17 @@
4579     +# Makefile for mount selftests.
4580     +
4581     +all: unprivileged-remount-test
4582     +
4583     +unprivileged-remount-test: unprivileged-remount-test.c
4584     + gcc -Wall -O2 unprivileged-remount-test.c -o unprivileged-remount-test
4585     +
4586     +# Allow specific tests to be selected.
4587     +test_unprivileged_remount: unprivileged-remount-test
4588     + @if [ -f /proc/self/uid_map ] ; then ./unprivileged-remount-test ; fi
4589     +
4590     +run_tests: all test_unprivileged_remount
4591     +
4592     +clean:
4593     + rm -f unprivileged-remount-test
4594     +
4595     +.PHONY: all test_unprivileged_remount
4596     diff --git a/tools/testing/selftests/mount/unprivileged-remount-test.c b/tools/testing/selftests/mount/unprivileged-remount-test.c
4597     new file mode 100644
4598     index 000000000000..1b3ff2fda4d0
4599     --- /dev/null
4600     +++ b/tools/testing/selftests/mount/unprivileged-remount-test.c
4601     @@ -0,0 +1,242 @@
4602     +#define _GNU_SOURCE
4603     +#include <sched.h>
4604     +#include <stdio.h>
4605     +#include <errno.h>
4606     +#include <string.h>
4607     +#include <sys/types.h>
4608     +#include <sys/mount.h>
4609     +#include <sys/wait.h>
4610     +#include <stdlib.h>
4611     +#include <unistd.h>
4612     +#include <fcntl.h>
4613     +#include <grp.h>
4614     +#include <stdbool.h>
4615     +#include <stdarg.h>
4616     +
4617     +#ifndef CLONE_NEWNS
4618     +# define CLONE_NEWNS 0x00020000
4619     +#endif
4620     +#ifndef CLONE_NEWUTS
4621     +# define CLONE_NEWUTS 0x04000000
4622     +#endif
4623     +#ifndef CLONE_NEWIPC
4624     +# define CLONE_NEWIPC 0x08000000
4625     +#endif
4626     +#ifndef CLONE_NEWNET
4627     +# define CLONE_NEWNET 0x40000000
4628     +#endif
4629     +#ifndef CLONE_NEWUSER
4630     +# define CLONE_NEWUSER 0x10000000
4631     +#endif
4632     +#ifndef CLONE_NEWPID
4633     +# define CLONE_NEWPID 0x20000000
4634     +#endif
4635     +
4636     +#ifndef MS_RELATIME
4637     +#define MS_RELATIME (1 << 21)
4638     +#endif
4639     +#ifndef MS_STRICTATIME
4640     +#define MS_STRICTATIME (1 << 24)
4641     +#endif
4642     +
4643     +static void die(char *fmt, ...)
4644     +{
4645     + va_list ap;
4646     + va_start(ap, fmt);
4647     + vfprintf(stderr, fmt, ap);
4648     + va_end(ap);
4649     + exit(EXIT_FAILURE);
4650     +}
4651     +
4652     +static void write_file(char *filename, char *fmt, ...)
4653     +{
4654     + char buf[4096];
4655     + int fd;
4656     + ssize_t written;
4657     + int buf_len;
4658     + va_list ap;
4659     +
4660     + va_start(ap, fmt);
4661     + buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
4662     + va_end(ap);
4663     + if (buf_len < 0) {
4664     + die("vsnprintf failed: %s\n",
4665     + strerror(errno));
4666     + }
4667     + if (buf_len >= sizeof(buf)) {
4668     + die("vsnprintf output truncated\n");
4669     + }
4670     +
4671     + fd = open(filename, O_WRONLY);
4672     + if (fd < 0) {
4673     + die("open of %s failed: %s\n",
4674     + filename, strerror(errno));
4675     + }
4676     + written = write(fd, buf, buf_len);
4677     + if (written != buf_len) {
4678     + if (written >= 0) {
4679     + die("short write to %s\n", filename);
4680     + } else {
4681     + die("write to %s failed: %s\n",
4682     + filename, strerror(errno));
4683     + }
4684     + }
4685     + if (close(fd) != 0) {
4686     + die("close of %s failed: %s\n",
4687     + filename, strerror(errno));
4688     + }
4689     +}
4690     +
4691     +static void create_and_enter_userns(void)
4692     +{
4693     + uid_t uid;
4694     + gid_t gid;
4695     +
4696     + uid = getuid();
4697     + gid = getgid();
4698     +
4699     + if (unshare(CLONE_NEWUSER) !=0) {
4700     + die("unshare(CLONE_NEWUSER) failed: %s\n",
4701     + strerror(errno));
4702     + }
4703     +
4704     + write_file("/proc/self/uid_map", "0 %d 1", uid);
4705     + write_file("/proc/self/gid_map", "0 %d 1", gid);
4706     +
4707     + if (setgroups(0, NULL) != 0) {
4708     + die("setgroups failed: %s\n",
4709     + strerror(errno));
4710     + }
4711     + if (setgid(0) != 0) {
4712     + die ("setgid(0) failed %s\n",
4713     + strerror(errno));
4714     + }
4715     + if (setuid(0) != 0) {
4716     + die("setuid(0) failed %s\n",
4717     + strerror(errno));
4718     + }
4719     +}
4720     +
4721     +static
4722     +bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
4723     +{
4724     + pid_t child;
4725     +
4726     + child = fork();
4727     + if (child == -1) {
4728     + die("fork failed: %s\n",
4729     + strerror(errno));
4730     + }
4731     + if (child != 0) { /* parent */
4732     + pid_t pid;
4733     + int status;
4734     + pid = waitpid(child, &status, 0);
4735     + if (pid == -1) {
4736     + die("waitpid failed: %s\n",
4737     + strerror(errno));
4738     + }
4739     + if (pid != child) {
4740     + die("waited for %d got %d\n",
4741     + child, pid);
4742     + }
4743     + if (!WIFEXITED(status)) {
4744     + die("child did not terminate cleanly\n");
4745     + }
4746     + return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false;
4747     + }
4748     +
4749     + create_and_enter_userns();
4750     + if (unshare(CLONE_NEWNS) != 0) {
4751     + die("unshare(CLONE_NEWNS) failed: %s\n",
4752     + strerror(errno));
4753     + }
4754     +
4755     + if (mount("testing", "/tmp", "ramfs", mount_flags, NULL) != 0) {
4756     + die("mount of /tmp failed: %s\n",
4757     + strerror(errno));
4758     + }
4759     +
4760     + create_and_enter_userns();
4761     +
4762     + if (unshare(CLONE_NEWNS) != 0) {
4763     + die("unshare(CLONE_NEWNS) failed: %s\n",
4764     + strerror(errno));
4765     + }
4766     +
4767     + if (mount("/tmp", "/tmp", "none",
4768     + MS_REMOUNT | MS_BIND | remount_flags, NULL) != 0) {
4769     + /* system("cat /proc/self/mounts"); */
4770     + die("remount of /tmp failed: %s\n",
4771     + strerror(errno));
4772     + }
4773     +
4774     + if (mount("/tmp", "/tmp", "none",
4775     + MS_REMOUNT | MS_BIND | invalid_flags, NULL) == 0) {
4776     + /* system("cat /proc/self/mounts"); */
4777     + die("remount of /tmp with invalid flags "
4778     + "succeeded unexpectedly\n");
4779     + }
4780     + exit(EXIT_SUCCESS);
4781     +}
4782     +
4783     +static bool test_unpriv_remount_simple(int mount_flags)
4784     +{
4785     + return test_unpriv_remount(mount_flags, mount_flags, 0);
4786     +}
4787     +
4788     +static bool test_unpriv_remount_atime(int mount_flags, int invalid_flags)
4789     +{
4790     + return test_unpriv_remount(mount_flags, mount_flags, invalid_flags);
4791     +}
4792     +
4793     +int main(int argc, char **argv)
4794     +{
4795     + if (!test_unpriv_remount_simple(MS_RDONLY|MS_NODEV)) {
4796     + die("MS_RDONLY malfunctions\n");
4797     + }
4798     + if (!test_unpriv_remount_simple(MS_NODEV)) {
4799     + die("MS_NODEV malfunctions\n");
4800     + }
4801     + if (!test_unpriv_remount_simple(MS_NOSUID|MS_NODEV)) {
4802     + die("MS_NOSUID malfunctions\n");
4803     + }
4804     + if (!test_unpriv_remount_simple(MS_NOEXEC|MS_NODEV)) {
4805     + die("MS_NOEXEC malfunctions\n");
4806     + }
4807     + if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODEV,
4808     + MS_NOATIME|MS_NODEV))
4809     + {
4810     + die("MS_RELATIME malfunctions\n");
4811     + }
4812     + if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODEV,
4813     + MS_NOATIME|MS_NODEV))
4814     + {
4815     + die("MS_STRICTATIME malfunctions\n");
4816     + }
4817     + if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODEV,
4818     + MS_STRICTATIME|MS_NODEV))
4819     + {
4820     + die("MS_RELATIME malfunctions\n");
4821     + }
4822     + if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME|MS_NODEV,
4823     + MS_NOATIME|MS_NODEV))
4824     + {
4825     + die("MS_RELATIME malfunctions\n");
4826     + }
4827     + if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME|MS_NODEV,
4828     + MS_NOATIME|MS_NODEV))
4829     + {
4830     + die("MS_RELATIME malfunctions\n");
4831     + }
4832     + if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME|MS_NODEV,
4833     + MS_STRICTATIME|MS_NODEV))
4834     + {
4835     + die("MS_RELATIME malfunctions\n");
4836     + }
4837     + if (!test_unpriv_remount(MS_STRICTATIME|MS_NODEV, MS_NODEV,
4838     + MS_NOATIME|MS_NODEV))
4839     + {
4840     + die("Default atime malfunctions\n");
4841     + }
4842     + return EXIT_SUCCESS;
4843     +}