Magellan Linux

Annotation of /trunk/kernel-magellan/patches-3.18/0101-3.18.2-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2528 - (hide annotations) (download)
Tue Jan 13 09:11:52 2015 UTC (9 years, 4 months ago) by niro
File size: 112824 byte(s)
-patches for linux-3.18.2
1 niro 2528 diff --git a/Makefile b/Makefile
2     index 3f84029f2b31..8f73b417dc1a 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 3
7     PATCHLEVEL = 18
8     -SUBLEVEL = 1
9     +SUBLEVEL = 2
10     EXTRAVERSION =
11     NAME = Diseased Newt
12    
13     diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts
14     index a495e5821ab8..d5051358fb1b 100644
15     --- a/arch/arm/boot/dts/armada-370-db.dts
16     +++ b/arch/arm/boot/dts/armada-370-db.dts
17     @@ -102,30 +102,6 @@
18     broken-cd;
19     };
20    
21     - pinctrl {
22     - /*
23     - * These pins might be muxed as I2S by
24     - * the bootloader, but it conflicts
25     - * with the real I2S pins that are
26     - * muxed using i2s_pins. We must mux
27     - * those pins to a function other than
28     - * I2S.
29     - */
30     - pinctrl-0 = <&hog_pins1 &hog_pins2>;
31     - pinctrl-names = "default";
32     -
33     - hog_pins1: hog-pins1 {
34     - marvell,pins = "mpp6", "mpp8", "mpp10",
35     - "mpp12", "mpp13";
36     - marvell,function = "gpio";
37     - };
38     -
39     - hog_pins2: hog-pins2 {
40     - marvell,pins = "mpp5", "mpp7", "mpp9";
41     - marvell,function = "gpo";
42     - };
43     - };
44     -
45     usb@50000 {
46     status = "okay";
47     };
48     diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
49     index 6b3c23b1e138..7513410f7b89 100644
50     --- a/arch/arm/boot/dts/armada-370.dtsi
51     +++ b/arch/arm/boot/dts/armada-370.dtsi
52     @@ -106,11 +106,6 @@
53     reg = <0x11100 0x20>;
54     };
55    
56     - system-controller@18200 {
57     - compatible = "marvell,armada-370-xp-system-controller";
58     - reg = <0x18200 0x100>;
59     - };
60     -
61     pinctrl {
62     compatible = "marvell,mv88f6710-pinctrl";
63     reg = <0x18000 0x38>;
64     @@ -205,6 +200,11 @@
65     interrupts = <91>;
66     };
67    
68     + system-controller@18200 {
69     + compatible = "marvell,armada-370-xp-system-controller";
70     + reg = <0x18200 0x100>;
71     + };
72     +
73     gateclk: clock-gating-control@18220 {
74     compatible = "marvell,armada-370-gating-clock";
75     reg = <0x18220 0x4>;
76     diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
77     index 92793ba69c40..d4ebf5679f1f 100644
78     --- a/arch/arm/include/asm/arch_timer.h
79     +++ b/arch/arm/include/asm/arch_timer.h
80     @@ -78,6 +78,15 @@ static inline u32 arch_timer_get_cntfrq(void)
81     return val;
82     }
83    
84     +static inline u64 arch_counter_get_cntpct(void)
85     +{
86     + u64 cval;
87     +
88     + isb();
89     + asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
90     + return cval;
91     +}
92     +
93     static inline u64 arch_counter_get_cntvct(void)
94     {
95     u64 cval;
96     diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
97     index 044b51185fcc..c31f4c00b1fc 100644
98     --- a/arch/arm/mach-mvebu/coherency.c
99     +++ b/arch/arm/mach-mvebu/coherency.c
100     @@ -361,25 +361,41 @@ static int coherency_type(void)
101     {
102     struct device_node *np;
103     const struct of_device_id *match;
104     + int type;
105    
106     - np = of_find_matching_node_and_match(NULL, of_coherency_table, &match);
107     - if (np) {
108     - int type = (int) match->data;
109     + /*
110     + * The coherency fabric is needed:
111     + * - For coherency between processors on Armada XP, so only
112     + * when SMP is enabled.
113     + * - For coherency between the processor and I/O devices, but
114     + * this coherency requires many pre-requisites (write
115     + * allocate cache policy, shareable pages, SMP bit set) that
116     + * are only meant in SMP situations.
117     + *
118     + * Note that this means that on Armada 370, there is currently
119     + * no way to use hardware I/O coherency, because even when
120     + * CONFIG_SMP is enabled, is_smp() returns false due to the
121     + * Armada 370 being a single-core processor. To lift this
122     + * limitation, we would have to find a way to make the cache
123     + * policy set to write-allocate (on all Armada SoCs), and to
124     + * set the shareable attribute in page tables (on all Armada
125     + * SoCs except the Armada 370). Unfortunately, such decisions
126     + * are taken very early in the kernel boot process, at a point
127     + * where we don't know yet on which SoC we are running.
128    
129     - /* Armada 370/XP coherency works in both UP and SMP */
130     - if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP)
131     - return type;
132     + */
133     + if (!is_smp())
134     + return COHERENCY_FABRIC_TYPE_NONE;
135    
136     - /* Armada 375 coherency works only on SMP */
137     - else if (type == COHERENCY_FABRIC_TYPE_ARMADA_375 && is_smp())
138     - return type;
139     + np = of_find_matching_node_and_match(NULL, of_coherency_table, &match);
140     + if (!np)
141     + return COHERENCY_FABRIC_TYPE_NONE;
142    
143     - /* Armada 380 coherency works only on SMP */
144     - else if (type == COHERENCY_FABRIC_TYPE_ARMADA_380 && is_smp())
145     - return type;
146     - }
147     + type = (int) match->data;
148     +
149     + of_node_put(np);
150    
151     - return COHERENCY_FABRIC_TYPE_NONE;
152     + return type;
153     }
154    
155     int coherency_available(void)
156     diff --git a/arch/arm/mach-mvebu/coherency_ll.S b/arch/arm/mach-mvebu/coherency_ll.S
157     index f5d881b5d0f7..8b2fbc8b6bc6 100644
158     --- a/arch/arm/mach-mvebu/coherency_ll.S
159     +++ b/arch/arm/mach-mvebu/coherency_ll.S
160     @@ -24,7 +24,10 @@
161     #include <asm/cp15.h>
162    
163     .text
164     -/* Returns the coherency base address in r1 (r0 is untouched) */
165     +/*
166     + * Returns the coherency base address in r1 (r0 is untouched), or 0 if
167     + * the coherency fabric is not enabled.
168     + */
169     ENTRY(ll_get_coherency_base)
170     mrc p15, 0, r1, c1, c0, 0
171     tst r1, #CR_M @ Check MMU bit enabled
172     @@ -32,8 +35,13 @@ ENTRY(ll_get_coherency_base)
173    
174     /*
175     * MMU is disabled, use the physical address of the coherency
176     - * base address.
177     + * base address. However, if the coherency fabric isn't mapped
178     + * (i.e its virtual address is zero), it means coherency is
179     + * not enabled, so we return 0.
180     */
181     + ldr r1, =coherency_base
182     + cmp r1, #0
183     + beq 2f
184     adr r1, 3f
185     ldr r3, [r1]
186     ldr r1, [r1, r3]
187     @@ -85,6 +93,9 @@ ENTRY(ll_add_cpu_to_smp_group)
188     */
189     mov r0, lr
190     bl ll_get_coherency_base
191     + /* Bail out if the coherency is not enabled */
192     + cmp r1, #0
193     + reteq r0
194     bl ll_get_coherency_cpumask
195     mov lr, r0
196     add r0, r1, #ARMADA_XP_CFB_CFG_REG_OFFSET
197     @@ -107,6 +118,9 @@ ENTRY(ll_enable_coherency)
198     */
199     mov r0, lr
200     bl ll_get_coherency_base
201     + /* Bail out if the coherency is not enabled */
202     + cmp r1, #0
203     + reteq r0
204     bl ll_get_coherency_cpumask
205     mov lr, r0
206     add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
207     @@ -131,6 +145,9 @@ ENTRY(ll_disable_coherency)
208     */
209     mov r0, lr
210     bl ll_get_coherency_base
211     + /* Bail out if the coherency is not enabled */
212     + cmp r1, #0
213     + reteq r0
214     bl ll_get_coherency_cpumask
215     mov lr, r0
216     add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
217     diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S
218     index 7b2baab0f0bd..71be4af5e975 100644
219     --- a/arch/arm/mach-tegra/reset-handler.S
220     +++ b/arch/arm/mach-tegra/reset-handler.S
221     @@ -51,6 +51,7 @@ ENTRY(tegra_resume)
222     THUMB( it ne )
223     bne cpu_resume @ no
224    
225     + tegra_get_soc_id TEGRA_APB_MISC_BASE, r6
226     /* Are we on Tegra20? */
227     cmp r6, #TEGRA20
228     beq 1f @ Yes
229     diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
230     index f19097134b02..b1fa4e614718 100644
231     --- a/arch/arm64/include/asm/arch_timer.h
232     +++ b/arch/arm64/include/asm/arch_timer.h
233     @@ -104,6 +104,15 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
234     asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
235     }
236    
237     +static inline u64 arch_counter_get_cntpct(void)
238     +{
239     + /*
240     + * AArch64 kernel and user space mandate the use of CNTVCT.
241     + */
242     + BUG();
243     + return 0;
244     +}
245     +
246     static inline u64 arch_counter_get_cntvct(void)
247     {
248     u64 cval;
249     diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
250     index 024c46183c3c..0ad735166d9f 100644
251     --- a/arch/arm64/include/asm/hwcap.h
252     +++ b/arch/arm64/include/asm/hwcap.h
253     @@ -30,6 +30,7 @@
254     #define COMPAT_HWCAP_IDIVA (1 << 17)
255     #define COMPAT_HWCAP_IDIVT (1 << 18)
256     #define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT)
257     +#define COMPAT_HWCAP_LPAE (1 << 20)
258     #define COMPAT_HWCAP_EVTSTRM (1 << 21)
259    
260     #define COMPAT_HWCAP2_AES (1 << 0)
261     diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
262     index 2437196cc5d4..f9620154bfb0 100644
263     --- a/arch/arm64/kernel/setup.c
264     +++ b/arch/arm64/kernel/setup.c
265     @@ -72,7 +72,8 @@ EXPORT_SYMBOL_GPL(elf_hwcap);
266     COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
267     COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
268     COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
269     - COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
270     + COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
271     + COMPAT_HWCAP_LPAE)
272     unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
273     unsigned int compat_elf_hwcap2 __read_mostly;
274     #endif
275     diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
276     index 41f1e3e2ea24..edba042b2325 100644
277     --- a/arch/arm64/net/bpf_jit_comp.c
278     +++ b/arch/arm64/net/bpf_jit_comp.c
279     @@ -60,7 +60,7 @@ struct jit_ctx {
280     const struct bpf_prog *prog;
281     int idx;
282     int tmp_used;
283     - int body_offset;
284     + int epilogue_offset;
285     int *offset;
286     u32 *image;
287     };
288     @@ -130,8 +130,8 @@ static void jit_fill_hole(void *area, unsigned int size)
289    
290     static inline int epilogue_offset(const struct jit_ctx *ctx)
291     {
292     - int to = ctx->offset[ctx->prog->len - 1];
293     - int from = ctx->idx - ctx->body_offset;
294     + int to = ctx->epilogue_offset;
295     + int from = ctx->idx;
296    
297     return to - from;
298     }
299     @@ -463,6 +463,8 @@ emit_cond_jmp:
300     }
301     /* function return */
302     case BPF_JMP | BPF_EXIT:
303     + /* Optimization: when last instruction is EXIT,
304     + simply fallthrough to epilogue. */
305     if (i == ctx->prog->len - 1)
306     break;
307     jmp_offset = epilogue_offset(ctx);
308     @@ -685,11 +687,13 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
309    
310     /* 1. Initial fake pass to compute ctx->idx. */
311    
312     - /* Fake pass to fill in ctx->offset. */
313     + /* Fake pass to fill in ctx->offset and ctx->tmp_used. */
314     if (build_body(&ctx))
315     goto out;
316    
317     build_prologue(&ctx);
318     +
319     + ctx.epilogue_offset = ctx.idx;
320     build_epilogue(&ctx);
321    
322     /* Now we know the actual image size. */
323     @@ -706,7 +710,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
324    
325     build_prologue(&ctx);
326    
327     - ctx.body_offset = ctx.idx;
328     if (build_body(&ctx)) {
329     bpf_jit_binary_free(header);
330     goto out;
331     diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
332     index ca38139423ae..437e61159279 100644
333     --- a/arch/s390/kernel/compat_linux.c
334     +++ b/arch/s390/kernel/compat_linux.c
335     @@ -249,7 +249,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplis
336     struct group_info *group_info;
337     int retval;
338    
339     - if (!capable(CAP_SETGID))
340     + if (!may_setgroups())
341     return -EPERM;
342     if ((unsigned)gidsetsize > NGROUPS_MAX)
343     return -EINVAL;
344     diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
345     index 64dc362506b7..201b520521ed 100644
346     --- a/arch/x86/include/asm/microcode.h
347     +++ b/arch/x86/include/asm/microcode.h
348     @@ -78,6 +78,7 @@ static inline void __exit exit_amd_microcode(void) {}
349     extern void __init load_ucode_bsp(void);
350     extern void load_ucode_ap(void);
351     extern int __init save_microcode_in_initrd(void);
352     +void reload_early_microcode(void);
353     #else
354     static inline void __init load_ucode_bsp(void) {}
355     static inline void load_ucode_ap(void) {}
356     @@ -85,6 +86,7 @@ static inline int __init save_microcode_in_initrd(void)
357     {
358     return 0;
359     }
360     +static inline void reload_early_microcode(void) {}
361     #endif
362    
363     #endif /* _ASM_X86_MICROCODE_H */
364     diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
365     index b7b10b82d3e5..af935397e053 100644
366     --- a/arch/x86/include/asm/microcode_amd.h
367     +++ b/arch/x86/include/asm/microcode_amd.h
368     @@ -59,7 +59,7 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
369    
370     extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
371     extern int apply_microcode_amd(int cpu);
372     -extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
373     +extern enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
374    
375     #define PATCH_MAX_SIZE PAGE_SIZE
376     extern u8 amd_ucode_patch[PATCH_MAX_SIZE];
377     @@ -68,10 +68,12 @@ extern u8 amd_ucode_patch[PATCH_MAX_SIZE];
378     extern void __init load_ucode_amd_bsp(void);
379     extern void load_ucode_amd_ap(void);
380     extern int __init save_microcode_in_initrd_amd(void);
381     +void reload_ucode_amd(void);
382     #else
383     static inline void __init load_ucode_amd_bsp(void) {}
384     static inline void load_ucode_amd_ap(void) {}
385     static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; }
386     +void reload_ucode_amd(void) {}
387     #endif
388    
389     #endif /* _ASM_X86_MICROCODE_AMD_H */
390     diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h
391     index bbe296e0bce1..dd4c20043ce7 100644
392     --- a/arch/x86/include/asm/microcode_intel.h
393     +++ b/arch/x86/include/asm/microcode_intel.h
394     @@ -68,11 +68,13 @@ extern void __init load_ucode_intel_bsp(void);
395     extern void load_ucode_intel_ap(void);
396     extern void show_ucode_info_early(void);
397     extern int __init save_microcode_in_initrd_intel(void);
398     +void reload_ucode_intel(void);
399     #else
400     static inline __init void load_ucode_intel_bsp(void) {}
401     static inline void load_ucode_intel_ap(void) {}
402     static inline void show_ucode_info_early(void) {}
403     static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; }
404     +static inline void reload_ucode_intel(void) {}
405     #endif
406    
407     #if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
408     diff --git a/arch/x86/include/uapi/asm/ldt.h b/arch/x86/include/uapi/asm/ldt.h
409     index 46727eb37bfe..6e1aaf73852a 100644
410     --- a/arch/x86/include/uapi/asm/ldt.h
411     +++ b/arch/x86/include/uapi/asm/ldt.h
412     @@ -28,6 +28,13 @@ struct user_desc {
413     unsigned int seg_not_present:1;
414     unsigned int useable:1;
415     #ifdef __x86_64__
416     + /*
417     + * Because this bit is not present in 32-bit user code, user
418     + * programs can pass uninitialized values here. Therefore, in
419     + * any context in which a user_desc comes from a 32-bit program,
420     + * the kernel must act as though lm == 0, regardless of the
421     + * actual value.
422     + */
423     unsigned int lm:1;
424     #endif
425     };
426     diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
427     index 8fffd845e22b..bfbbe6195e2d 100644
428     --- a/arch/x86/kernel/cpu/microcode/amd.c
429     +++ b/arch/x86/kernel/cpu/microcode/amd.c
430     @@ -376,7 +376,7 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
431     return UCODE_OK;
432     }
433    
434     -enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
435     +enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
436     {
437     enum ucode_state ret;
438    
439     @@ -390,8 +390,8 @@ enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
440    
441     #if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32)
442     /* save BSP's matching patch for early load */
443     - if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) {
444     - struct ucode_patch *p = find_patch(smp_processor_id());
445     + if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
446     + struct ucode_patch *p = find_patch(cpu);
447     if (p) {
448     memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
449     memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
450     @@ -444,7 +444,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
451     goto fw_release;
452     }
453    
454     - ret = load_microcode_amd(c->x86, fw->data, fw->size);
455     + ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size);
456    
457     fw_release:
458     release_firmware(fw);
459     diff --git a/arch/x86/kernel/cpu/microcode/amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c
460     index 06674473b0e6..737737edbd1e 100644
461     --- a/arch/x86/kernel/cpu/microcode/amd_early.c
462     +++ b/arch/x86/kernel/cpu/microcode/amd_early.c
463     @@ -389,7 +389,7 @@ int __init save_microcode_in_initrd_amd(void)
464     eax = cpuid_eax(0x00000001);
465     eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
466    
467     - ret = load_microcode_amd(eax, container, container_size);
468     + ret = load_microcode_amd(smp_processor_id(), eax, container, container_size);
469     if (ret != UCODE_OK)
470     retval = -EINVAL;
471    
472     @@ -402,3 +402,21 @@ int __init save_microcode_in_initrd_amd(void)
473    
474     return retval;
475     }
476     +
477     +void reload_ucode_amd(void)
478     +{
479     + struct microcode_amd *mc;
480     + u32 rev, eax;
481     +
482     + rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
483     +
484     + mc = (struct microcode_amd *)amd_ucode_patch;
485     +
486     + if (mc && rev < mc->hdr.patch_id) {
487     + if (!__apply_microcode_amd(mc)) {
488     + ucode_new_rev = mc->hdr.patch_id;
489     + pr_info("microcode: reload patch_level=0x%08x\n",
490     + ucode_new_rev);
491     + }
492     + }
493     +}
494     diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
495     index 08fe6e8a726e..15c29096136b 100644
496     --- a/arch/x86/kernel/cpu/microcode/core.c
497     +++ b/arch/x86/kernel/cpu/microcode/core.c
498     @@ -465,16 +465,8 @@ static void mc_bp_resume(void)
499    
500     if (uci->valid && uci->mc)
501     microcode_ops->apply_microcode(cpu);
502     -#ifdef CONFIG_X86_64
503     else if (!uci->mc)
504     - /*
505     - * We might resume and not have applied late microcode but still
506     - * have a newer patch stashed from the early loader. We don't
507     - * have it in uci->mc so we have to load it the same way we're
508     - * applying patches early on the APs.
509     - */
510     - load_ucode_ap();
511     -#endif
512     + reload_early_microcode();
513     }
514    
515     static struct syscore_ops mc_syscore_ops = {
516     @@ -559,7 +551,7 @@ static int __init microcode_init(void)
517     struct cpuinfo_x86 *c = &cpu_data(0);
518     int error;
519    
520     - if (dis_ucode_ldr)
521     + if (paravirt_enabled() || dis_ucode_ldr)
522     return 0;
523    
524     if (c->x86_vendor == X86_VENDOR_INTEL)
525     diff --git a/arch/x86/kernel/cpu/microcode/core_early.c b/arch/x86/kernel/cpu/microcode/core_early.c
526     index 2c017f242a78..d45df4bd16ab 100644
527     --- a/arch/x86/kernel/cpu/microcode/core_early.c
528     +++ b/arch/x86/kernel/cpu/microcode/core_early.c
529     @@ -176,3 +176,24 @@ int __init save_microcode_in_initrd(void)
530    
531     return 0;
532     }
533     +
534     +void reload_early_microcode(void)
535     +{
536     + int vendor, x86;
537     +
538     + vendor = x86_vendor();
539     + x86 = x86_family();
540     +
541     + switch (vendor) {
542     + case X86_VENDOR_INTEL:
543     + if (x86 >= 6)
544     + reload_ucode_intel();
545     + break;
546     + case X86_VENDOR_AMD:
547     + if (x86 >= 0x10)
548     + reload_ucode_amd();
549     + break;
550     + default:
551     + break;
552     + }
553     +}
554     diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
555     index b88343f7a3b3..ec9df6f9cd47 100644
556     --- a/arch/x86/kernel/cpu/microcode/intel_early.c
557     +++ b/arch/x86/kernel/cpu/microcode/intel_early.c
558     @@ -650,8 +650,7 @@ static inline void print_ucode(struct ucode_cpu_info *uci)
559     }
560     #endif
561    
562     -static int apply_microcode_early(struct mc_saved_data *mc_saved_data,
563     - struct ucode_cpu_info *uci)
564     +static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
565     {
566     struct microcode_intel *mc_intel;
567     unsigned int val[2];
568     @@ -680,7 +679,10 @@ static int apply_microcode_early(struct mc_saved_data *mc_saved_data,
569     #endif
570     uci->cpu_sig.rev = val[1];
571    
572     - print_ucode(uci);
573     + if (early)
574     + print_ucode(uci);
575     + else
576     + print_ucode_info(uci, mc_intel->hdr.date);
577    
578     return 0;
579     }
580     @@ -715,12 +717,17 @@ _load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data,
581     unsigned long initrd_end_early,
582     struct ucode_cpu_info *uci)
583     {
584     + enum ucode_state ret;
585     +
586     collect_cpu_info_early(uci);
587     scan_microcode(initrd_start_early, initrd_end_early, mc_saved_data,
588     mc_saved_in_initrd, uci);
589     - load_microcode(mc_saved_data, mc_saved_in_initrd,
590     - initrd_start_early, uci);
591     - apply_microcode_early(mc_saved_data, uci);
592     +
593     + ret = load_microcode(mc_saved_data, mc_saved_in_initrd,
594     + initrd_start_early, uci);
595     +
596     + if (ret == UCODE_OK)
597     + apply_microcode_early(uci, true);
598     }
599    
600     void __init
601     @@ -749,7 +756,8 @@ load_ucode_intel_bsp(void)
602     initrd_end_early = initrd_start_early + ramdisk_size;
603    
604     _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd,
605     - initrd_start_early, initrd_end_early, &uci);
606     + initrd_start_early, initrd_end_early,
607     + &uci);
608     #endif
609     }
610    
611     @@ -783,5 +791,23 @@ void load_ucode_intel_ap(void)
612     collect_cpu_info_early(&uci);
613     load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
614     initrd_start_addr, &uci);
615     - apply_microcode_early(mc_saved_data_p, &uci);
616     + apply_microcode_early(&uci, true);
617     +}
618     +
619     +void reload_ucode_intel(void)
620     +{
621     + struct ucode_cpu_info uci;
622     + enum ucode_state ret;
623     +
624     + if (!mc_saved_data.mc_saved_count)
625     + return;
626     +
627     + collect_cpu_info_early(&uci);
628     +
629     + ret = generic_load_microcode_early(mc_saved_data.mc_saved,
630     + mc_saved_data.mc_saved_count, &uci);
631     + if (ret != UCODE_OK)
632     + return;
633     +
634     + apply_microcode_early(&uci, false);
635     }
636     diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
637     index f6945bef2cd1..94f643484300 100644
638     --- a/arch/x86/kernel/kvm.c
639     +++ b/arch/x86/kernel/kvm.c
640     @@ -283,7 +283,14 @@ NOKPROBE_SYMBOL(do_async_page_fault);
641     static void __init paravirt_ops_setup(void)
642     {
643     pv_info.name = "KVM";
644     - pv_info.paravirt_enabled = 1;
645     +
646     + /*
647     + * KVM isn't paravirt in the sense of paravirt_enabled. A KVM
648     + * guest kernel works like a bare metal kernel with additional
649     + * features, and paravirt_enabled is about features that are
650     + * missing.
651     + */
652     + pv_info.paravirt_enabled = 0;
653    
654     if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
655     pv_cpu_ops.io_delay = kvm_io_delay;
656     diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
657     index d9156ceecdff..a2de9bc7ac0b 100644
658     --- a/arch/x86/kernel/kvmclock.c
659     +++ b/arch/x86/kernel/kvmclock.c
660     @@ -263,7 +263,6 @@ void __init kvmclock_init(void)
661     #endif
662     kvm_get_preset_lpj();
663     clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
664     - pv_info.paravirt_enabled = 1;
665     pv_info.name = "KVM";
666    
667     if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
668     diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
669     index 3ed4a68d4013..5a2c02913af3 100644
670     --- a/arch/x86/kernel/process_64.c
671     +++ b/arch/x86/kernel/process_64.c
672     @@ -283,24 +283,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
673    
674     fpu = switch_fpu_prepare(prev_p, next_p, cpu);
675    
676     - /*
677     - * Reload esp0, LDT and the page table pointer:
678     - */
679     + /* Reload esp0 and ss1. */
680     load_sp0(tss, next);
681    
682     - /*
683     - * Switch DS and ES.
684     - * This won't pick up thread selector changes, but I guess that is ok.
685     - */
686     - savesegment(es, prev->es);
687     - if (unlikely(next->es | prev->es))
688     - loadsegment(es, next->es);
689     -
690     - savesegment(ds, prev->ds);
691     - if (unlikely(next->ds | prev->ds))
692     - loadsegment(ds, next->ds);
693     -
694     -
695     /* We must save %fs and %gs before load_TLS() because
696     * %fs and %gs may be cleared by load_TLS().
697     *
698     @@ -309,41 +294,101 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
699     savesegment(fs, fsindex);
700     savesegment(gs, gsindex);
701    
702     + /*
703     + * Load TLS before restoring any segments so that segment loads
704     + * reference the correct GDT entries.
705     + */
706     load_TLS(next, cpu);
707    
708     /*
709     - * Leave lazy mode, flushing any hypercalls made here.
710     - * This must be done before restoring TLS segments so
711     - * the GDT and LDT are properly updated, and must be
712     - * done before math_state_restore, so the TS bit is up
713     - * to date.
714     + * Leave lazy mode, flushing any hypercalls made here. This
715     + * must be done after loading TLS entries in the GDT but before
716     + * loading segments that might reference them, and and it must
717     + * be done before math_state_restore, so the TS bit is up to
718     + * date.
719     */
720     arch_end_context_switch(next_p);
721    
722     + /* Switch DS and ES.
723     + *
724     + * Reading them only returns the selectors, but writing them (if
725     + * nonzero) loads the full descriptor from the GDT or LDT. The
726     + * LDT for next is loaded in switch_mm, and the GDT is loaded
727     + * above.
728     + *
729     + * We therefore need to write new values to the segment
730     + * registers on every context switch unless both the new and old
731     + * values are zero.
732     + *
733     + * Note that we don't need to do anything for CS and SS, as
734     + * those are saved and restored as part of pt_regs.
735     + */
736     + savesegment(es, prev->es);
737     + if (unlikely(next->es | prev->es))
738     + loadsegment(es, next->es);
739     +
740     + savesegment(ds, prev->ds);
741     + if (unlikely(next->ds | prev->ds))
742     + loadsegment(ds, next->ds);
743     +
744     /*
745     * Switch FS and GS.
746     *
747     - * Segment register != 0 always requires a reload. Also
748     - * reload when it has changed. When prev process used 64bit
749     - * base always reload to avoid an information leak.
750     + * These are even more complicated than FS and GS: they have
751     + * 64-bit bases are that controlled by arch_prctl. Those bases
752     + * only differ from the values in the GDT or LDT if the selector
753     + * is 0.
754     + *
755     + * Loading the segment register resets the hidden base part of
756     + * the register to 0 or the value from the GDT / LDT. If the
757     + * next base address zero, writing 0 to the segment register is
758     + * much faster than using wrmsr to explicitly zero the base.
759     + *
760     + * The thread_struct.fs and thread_struct.gs values are 0
761     + * if the fs and gs bases respectively are not overridden
762     + * from the values implied by fsindex and gsindex. They
763     + * are nonzero, and store the nonzero base addresses, if
764     + * the bases are overridden.
765     + *
766     + * (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) should
767     + * be impossible.
768     + *
769     + * Therefore we need to reload the segment registers if either
770     + * the old or new selector is nonzero, and we need to override
771     + * the base address if next thread expects it to be overridden.
772     + *
773     + * This code is unnecessarily slow in the case where the old and
774     + * new indexes are zero and the new base is nonzero -- it will
775     + * unnecessarily write 0 to the selector before writing the new
776     + * base address.
777     + *
778     + * Note: This all depends on arch_prctl being the only way that
779     + * user code can override the segment base. Once wrfsbase and
780     + * wrgsbase are enabled, most of this code will need to change.
781     */
782     if (unlikely(fsindex | next->fsindex | prev->fs)) {
783     loadsegment(fs, next->fsindex);
784     +
785     /*
786     - * Check if the user used a selector != 0; if yes
787     - * clear 64bit base, since overloaded base is always
788     - * mapped to the Null selector
789     + * If user code wrote a nonzero value to FS, then it also
790     + * cleared the overridden base address.
791     + *
792     + * XXX: if user code wrote 0 to FS and cleared the base
793     + * address itself, we won't notice and we'll incorrectly
794     + * restore the prior base address next time we reschdule
795     + * the process.
796     */
797     if (fsindex)
798     prev->fs = 0;
799     }
800     - /* when next process has a 64bit base use it */
801     if (next->fs)
802     wrmsrl(MSR_FS_BASE, next->fs);
803     prev->fsindex = fsindex;
804    
805     if (unlikely(gsindex | next->gsindex | prev->gs)) {
806     load_gs_index(next->gsindex);
807     +
808     + /* This works (and fails) the same way as fsindex above. */
809     if (gsindex)
810     prev->gs = 0;
811     }
812     diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
813     index f7fec09e3e3a..4e942f31b1a7 100644
814     --- a/arch/x86/kernel/tls.c
815     +++ b/arch/x86/kernel/tls.c
816     @@ -27,6 +27,37 @@ static int get_free_idx(void)
817     return -ESRCH;
818     }
819    
820     +static bool tls_desc_okay(const struct user_desc *info)
821     +{
822     + if (LDT_empty(info))
823     + return true;
824     +
825     + /*
826     + * espfix is required for 16-bit data segments, but espfix
827     + * only works for LDT segments.
828     + */
829     + if (!info->seg_32bit)
830     + return false;
831     +
832     + /* Only allow data segments in the TLS array. */
833     + if (info->contents > 1)
834     + return false;
835     +
836     + /*
837     + * Non-present segments with DPL 3 present an interesting attack
838     + * surface. The kernel should handle such segments correctly,
839     + * but TLS is very difficult to protect in a sandbox, so prevent
840     + * such segments from being created.
841     + *
842     + * If userspace needs to remove a TLS entry, it can still delete
843     + * it outright.
844     + */
845     + if (info->seg_not_present)
846     + return false;
847     +
848     + return true;
849     +}
850     +
851     static void set_tls_desc(struct task_struct *p, int idx,
852     const struct user_desc *info, int n)
853     {
854     @@ -66,6 +97,9 @@ int do_set_thread_area(struct task_struct *p, int idx,
855     if (copy_from_user(&info, u_info, sizeof(info)))
856     return -EFAULT;
857    
858     + if (!tls_desc_okay(&info))
859     + return -EINVAL;
860     +
861     if (idx == -1)
862     idx = info.entry_number;
863    
864     @@ -192,6 +226,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
865     {
866     struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
867     const struct user_desc *info;
868     + int i;
869    
870     if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
871     (pos % sizeof(struct user_desc)) != 0 ||
872     @@ -205,6 +240,10 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
873     else
874     info = infobuf;
875    
876     + for (i = 0; i < count / sizeof(struct user_desc); i++)
877     + if (!tls_desc_okay(info + i))
878     + return -EINVAL;
879     +
880     set_tls_desc(target,
881     GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)),
882     info, count / sizeof(struct user_desc));
883     diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
884     index de801f22128a..07ab8e9733c5 100644
885     --- a/arch/x86/kernel/traps.c
886     +++ b/arch/x86/kernel/traps.c
887     @@ -387,7 +387,7 @@ NOKPROBE_SYMBOL(do_int3);
888     * for scheduling or signal handling. The actual stack switch is done in
889     * entry.S
890     */
891     -asmlinkage __visible struct pt_regs *sync_regs(struct pt_regs *eregs)
892     +asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
893     {
894     struct pt_regs *regs = eregs;
895     /* Did already sync */
896     @@ -413,7 +413,7 @@ struct bad_iret_stack {
897     struct pt_regs regs;
898     };
899    
900     -asmlinkage __visible
901     +asmlinkage __visible notrace
902     struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
903     {
904     /*
905     @@ -436,6 +436,7 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
906     BUG_ON(!user_mode_vm(&new_stack->regs));
907     return new_stack;
908     }
909     +NOKPROBE_SYMBOL(fixup_bad_iret);
910     #endif
911    
912     /*
913     diff --git a/crypto/af_alg.c b/crypto/af_alg.c
914     index 6a3ad8011585..1de4beeb25f8 100644
915     --- a/crypto/af_alg.c
916     +++ b/crypto/af_alg.c
917     @@ -449,6 +449,9 @@ void af_alg_complete(struct crypto_async_request *req, int err)
918     {
919     struct af_alg_completion *completion = req->data;
920    
921     + if (err == -EINPROGRESS)
922     + return;
923     +
924     completion->err = err;
925     complete(&completion->completion);
926     }
927     diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
928     index 43005d4d3348..1fa2af957b18 100644
929     --- a/drivers/clocksource/arm_arch_timer.c
930     +++ b/drivers/clocksource/arm_arch_timer.c
931     @@ -462,7 +462,10 @@ static void __init arch_counter_register(unsigned type)
932    
933     /* Register the CP15 based counter if we have one */
934     if (type & ARCH_CP15_TIMER) {
935     - arch_timer_read_counter = arch_counter_get_cntvct;
936     + if (arch_timer_use_virtual)
937     + arch_timer_read_counter = arch_counter_get_cntvct;
938     + else
939     + arch_timer_read_counter = arch_counter_get_cntpct;
940     } else {
941     arch_timer_read_counter = arch_counter_get_cntvct_mem;
942    
943     diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
944     index ce023fa3e8ae..ab9a4539a446 100644
945     --- a/drivers/gpu/drm/tegra/gem.c
946     +++ b/drivers/gpu/drm/tegra/gem.c
947     @@ -259,16 +259,12 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
948     int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
949     struct drm_mode_create_dumb *args)
950     {
951     - int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
952     + unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
953     struct tegra_drm *tegra = drm->dev_private;
954     struct tegra_bo *bo;
955    
956     - min_pitch = round_up(min_pitch, tegra->pitch_align);
957     - if (args->pitch < min_pitch)
958     - args->pitch = min_pitch;
959     -
960     - if (args->size < args->pitch * args->height)
961     - args->size = args->pitch * args->height;
962     + args->pitch = round_up(min_pitch, tegra->pitch_align);
963     + args->size = args->pitch * args->height;
964    
965     bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
966     &args->handle);
967     diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
968     index afe79719ea32..ecbd3ffcf359 100644
969     --- a/drivers/md/dm-bufio.c
970     +++ b/drivers/md/dm-bufio.c
971     @@ -532,6 +532,19 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
972     end_io(&b->bio, r);
973     }
974    
975     +static void inline_endio(struct bio *bio, int error)
976     +{
977     + bio_end_io_t *end_fn = bio->bi_private;
978     +
979     + /*
980     + * Reset the bio to free any attached resources
981     + * (e.g. bio integrity profiles).
982     + */
983     + bio_reset(bio);
984     +
985     + end_fn(bio, error);
986     +}
987     +
988     static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
989     bio_end_io_t *end_io)
990     {
991     @@ -543,7 +556,12 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
992     b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
993     b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
994     b->bio.bi_bdev = b->c->bdev;
995     - b->bio.bi_end_io = end_io;
996     + b->bio.bi_end_io = inline_endio;
997     + /*
998     + * Use of .bi_private isn't a problem here because
999     + * the dm_buffer's inline bio is local to bufio.
1000     + */
1001     + b->bio.bi_private = end_io;
1002    
1003     /*
1004     * We assume that if len >= PAGE_SIZE ptr is page-aligned.
1005     diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
1006     index 7130505c2425..da496cfb458d 100644
1007     --- a/drivers/md/dm-cache-target.c
1008     +++ b/drivers/md/dm-cache-target.c
1009     @@ -951,10 +951,14 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
1010     }
1011    
1012     } else {
1013     - clear_dirty(cache, mg->new_oblock, mg->cblock);
1014     - if (mg->requeue_holder)
1015     + if (mg->requeue_holder) {
1016     + clear_dirty(cache, mg->new_oblock, mg->cblock);
1017     cell_defer(cache, mg->new_ocell, true);
1018     - else {
1019     + } else {
1020     + /*
1021     + * The block was promoted via an overwrite, so it's dirty.
1022     + */
1023     + set_dirty(cache, mg->new_oblock, mg->cblock);
1024     bio_endio(mg->new_ocell->holder, 0);
1025     cell_defer(cache, mg->new_ocell, false);
1026     }
1027     @@ -1070,7 +1074,8 @@ static void issue_copy(struct dm_cache_migration *mg)
1028    
1029     avoid = is_discarded_oblock(cache, mg->new_oblock);
1030    
1031     - if (!avoid && bio_writes_complete_block(cache, bio)) {
1032     + if (writeback_mode(&cache->features) &&
1033     + !avoid && bio_writes_complete_block(cache, bio)) {
1034     issue_overwrite(mg, bio);
1035     return;
1036     }
1037     @@ -2549,11 +2554,11 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
1038     static int cache_map(struct dm_target *ti, struct bio *bio)
1039     {
1040     int r;
1041     - struct dm_bio_prison_cell *cell;
1042     + struct dm_bio_prison_cell *cell = NULL;
1043     struct cache *cache = ti->private;
1044    
1045     r = __cache_map(cache, bio, &cell);
1046     - if (r == DM_MAPIO_REMAPPED) {
1047     + if (r == DM_MAPIO_REMAPPED && cell) {
1048     inc_ds(cache, bio, cell);
1049     cell_defer(cache, cell, false);
1050     }
1051     diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1052     index fc93b9330af4..08981be7baa1 100644
1053     --- a/drivers/md/dm-crypt.c
1054     +++ b/drivers/md/dm-crypt.c
1055     @@ -705,7 +705,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
1056     for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
1057     crypto_xor(data + i * 8, buf, 8);
1058     out:
1059     - memset(buf, 0, sizeof(buf));
1060     + memzero_explicit(buf, sizeof(buf));
1061     return r;
1062     }
1063    
1064     diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1065     index 0f86d802b533..aae19133cfac 100644
1066     --- a/drivers/md/dm-thin.c
1067     +++ b/drivers/md/dm-thin.c
1068     @@ -990,6 +990,24 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1069     schedule_zero(tc, virt_block, data_dest, cell, bio);
1070     }
1071    
1072     +static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1073     +
1074     +static void check_for_space(struct pool *pool)
1075     +{
1076     + int r;
1077     + dm_block_t nr_free;
1078     +
1079     + if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
1080     + return;
1081     +
1082     + r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
1083     + if (r)
1084     + return;
1085     +
1086     + if (nr_free)
1087     + set_pool_mode(pool, PM_WRITE);
1088     +}
1089     +
1090     /*
1091     * A non-zero return indicates read_only or fail_io mode.
1092     * Many callers don't care about the return value.
1093     @@ -1004,6 +1022,8 @@ static int commit(struct pool *pool)
1094     r = dm_pool_commit_metadata(pool->pmd);
1095     if (r)
1096     metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
1097     + else
1098     + check_for_space(pool);
1099    
1100     return r;
1101     }
1102     @@ -1022,8 +1042,6 @@ static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
1103     }
1104     }
1105    
1106     -static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1107     -
1108     static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1109     {
1110     int r;
1111     @@ -1824,7 +1842,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1112     pool->process_bio = process_bio_read_only;
1113     pool->process_discard = process_discard;
1114     pool->process_prepared_mapping = process_prepared_mapping;
1115     - pool->process_prepared_discard = process_prepared_discard_passdown;
1116     + pool->process_prepared_discard = process_prepared_discard;
1117    
1118     if (!pool->pf.error_if_no_space && no_space_timeout)
1119     queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
1120     @@ -3248,14 +3266,14 @@ static void thin_dtr(struct dm_target *ti)
1121     struct thin_c *tc = ti->private;
1122     unsigned long flags;
1123    
1124     - thin_put(tc);
1125     - wait_for_completion(&tc->can_destroy);
1126     -
1127     spin_lock_irqsave(&tc->pool->lock, flags);
1128     list_del_rcu(&tc->list);
1129     spin_unlock_irqrestore(&tc->pool->lock, flags);
1130     synchronize_rcu();
1131    
1132     + thin_put(tc);
1133     + wait_for_completion(&tc->can_destroy);
1134     +
1135     mutex_lock(&dm_thin_pool_table.mutex);
1136    
1137     __pool_dec(tc->pool);
1138     diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
1139     index 786b689bdfc7..f4e22bcc7fb8 100644
1140     --- a/drivers/md/persistent-data/dm-space-map-metadata.c
1141     +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
1142     @@ -564,7 +564,9 @@ static int sm_bootstrap_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count
1143     {
1144     struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
1145    
1146     - return smm->ll.nr_blocks;
1147     + *count = smm->ll.nr_blocks;
1148     +
1149     + return 0;
1150     }
1151    
1152     static int sm_bootstrap_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
1153     diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
1154     index 4fac16bcd732..0afddf6c37af 100644
1155     --- a/drivers/mfd/tc6393xb.c
1156     +++ b/drivers/mfd/tc6393xb.c
1157     @@ -263,6 +263,17 @@ static int tc6393xb_ohci_disable(struct platform_device *dev)
1158     return 0;
1159     }
1160    
1161     +static int tc6393xb_ohci_suspend(struct platform_device *dev)
1162     +{
1163     + struct tc6393xb_platform_data *tcpd = dev_get_platdata(dev->dev.parent);
1164     +
1165     + /* We can't properly store/restore OHCI state, so fail here */
1166     + if (tcpd->resume_restore)
1167     + return -EBUSY;
1168     +
1169     + return tc6393xb_ohci_disable(dev);
1170     +}
1171     +
1172     static int tc6393xb_fb_enable(struct platform_device *dev)
1173     {
1174     struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
1175     @@ -403,7 +414,7 @@ static struct mfd_cell tc6393xb_cells[] = {
1176     .num_resources = ARRAY_SIZE(tc6393xb_ohci_resources),
1177     .resources = tc6393xb_ohci_resources,
1178     .enable = tc6393xb_ohci_enable,
1179     - .suspend = tc6393xb_ohci_disable,
1180     + .suspend = tc6393xb_ohci_suspend,
1181     .resume = tc6393xb_ohci_enable,
1182     .disable = tc6393xb_ohci_disable,
1183     },
1184     diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
1185     index 50f9091bcd38..7d63e324e6a8 100644
1186     --- a/drivers/mfd/twl4030-power.c
1187     +++ b/drivers/mfd/twl4030-power.c
1188     @@ -831,6 +831,9 @@ static struct twl4030_power_data osc_off_idle = {
1189    
1190     static struct of_device_id twl4030_power_of_match[] = {
1191     {
1192     + .compatible = "ti,twl4030-power",
1193     + },
1194     + {
1195     .compatible = "ti,twl4030-power-reset",
1196     .data = &omap3_reset,
1197     },
1198     diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
1199     index 1fa4c80ff886..a11451f4f408 100644
1200     --- a/drivers/mmc/card/block.c
1201     +++ b/drivers/mmc/card/block.c
1202     @@ -260,7 +260,7 @@ static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
1203     int ret;
1204     struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
1205    
1206     - ret = snprintf(buf, PAGE_SIZE, "%d",
1207     + ret = snprintf(buf, PAGE_SIZE, "%d\n",
1208     get_disk_ro(dev_to_disk(dev)) ^
1209     md->read_only);
1210     mmc_blk_put(md);
1211     diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
1212     index 69f0cc68d5b2..f7c95abc8c11 100644
1213     --- a/drivers/mmc/host/dw_mmc.c
1214     +++ b/drivers/mmc/host/dw_mmc.c
1215     @@ -626,6 +626,13 @@ static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
1216    
1217     WARN_ON(!(data->flags & MMC_DATA_READ));
1218    
1219     + /*
1220     + * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
1221     + * in the FIFO region, so we really shouldn't access it).
1222     + */
1223     + if (host->verid < DW_MMC_240A)
1224     + return;
1225     +
1226     if (host->timing != MMC_TIMING_MMC_HS200 &&
1227     host->timing != MMC_TIMING_UHS_SDR104)
1228     goto disable;
1229     diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
1230     index df27bb4fc098..9c2b9cbcbce0 100644
1231     --- a/drivers/mmc/host/omap_hsmmc.c
1232     +++ b/drivers/mmc/host/omap_hsmmc.c
1233     @@ -609,6 +609,7 @@ static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host)
1234     */
1235     if ((mmc_slot(host).features & HSMMC_HAS_HSPE_SUPPORT) &&
1236     (ios->timing != MMC_TIMING_MMC_DDR52) &&
1237     + (ios->timing != MMC_TIMING_UHS_DDR50) &&
1238     ((OMAP_HSMMC_READ(host->base, CAPA) & HSS) == HSS)) {
1239     regval = OMAP_HSMMC_READ(host->base, HCTL);
1240     if (clkdiv && (clk_get_rate(host->fclk)/clkdiv) > 25000000)
1241     @@ -628,7 +629,8 @@ static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host)
1242     u32 con;
1243    
1244     con = OMAP_HSMMC_READ(host->base, CON);
1245     - if (ios->timing == MMC_TIMING_MMC_DDR52)
1246     + if (ios->timing == MMC_TIMING_MMC_DDR52 ||
1247     + ios->timing == MMC_TIMING_UHS_DDR50)
1248     con |= DDR; /* configure in DDR mode */
1249     else
1250     con &= ~DDR;
1251     diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
1252     index 5670e381b0cf..e2ec108dba0e 100644
1253     --- a/drivers/mmc/host/sdhci-pci-o2micro.c
1254     +++ b/drivers/mmc/host/sdhci-pci-o2micro.c
1255     @@ -127,8 +127,6 @@ void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip)
1256     return;
1257     scratch_32 &= ~((1 << 21) | (1 << 30));
1258    
1259     - /* Set RTD3 function disabled */
1260     - scratch_32 |= ((1 << 29) | (1 << 28));
1261     pci_write_config_dword(chip->pdev, O2_SD_FUNC_REG3, scratch_32);
1262    
1263     /* Set L1 Entrance Timer */
1264     diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
1265     index 11cc051f97cd..8079a9ddcba9 100644
1266     --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
1267     +++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
1268     @@ -1355,6 +1355,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
1269     }
1270     INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker);
1271     count = BITS_TO_LONGS(if_msgbuf->nrof_flowrings);
1272     + count = count * sizeof(unsigned long);
1273     msgbuf->flow_map = kzalloc(count, GFP_KERNEL);
1274     if (!msgbuf->flow_map)
1275     goto fail;
1276     diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
1277     index 4f730af70e7c..30e8d7ad5813 100644
1278     --- a/drivers/regulator/anatop-regulator.c
1279     +++ b/drivers/regulator/anatop-regulator.c
1280     @@ -283,6 +283,14 @@ static int anatop_regulator_probe(struct platform_device *pdev)
1281     sreg->sel = 0;
1282     sreg->bypass = true;
1283     }
1284     +
1285     + /*
1286     + * In case vddpu was disabled by the bootloader, we need to set
1287     + * a sane default until imx6-cpufreq was probed and changes the
1288     + * voltage to the correct value. In this case we set 1.25V.
1289     + */
1290     + if (!sreg->sel && !strcmp(sreg->name, "vddpu"))
1291     + sreg->sel = 22;
1292     } else {
1293     rdesc->ops = &anatop_rops;
1294     }
1295     diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
1296     index 45da3c823322..ab1c09eaa5b8 100644
1297     --- a/drivers/scsi/NCR5380.c
1298     +++ b/drivers/scsi/NCR5380.c
1299     @@ -2647,14 +2647,14 @@ static void NCR5380_dma_complete(NCR5380_instance * instance) {
1300     *
1301     * Purpose : abort a command
1302     *
1303     - * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the
1304     - * host byte of the result field to, if zero DID_ABORTED is
1305     + * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the
1306     + * host byte of the result field to, if zero DID_ABORTED is
1307     * used.
1308     *
1309     - * Returns : 0 - success, -1 on failure.
1310     + * Returns : SUCCESS - success, FAILED on failure.
1311     *
1312     - * XXX - there is no way to abort the command that is currently
1313     - * connected, you have to wait for it to complete. If this is
1314     + * XXX - there is no way to abort the command that is currently
1315     + * connected, you have to wait for it to complete. If this is
1316     * a problem, we could implement longjmp() / setjmp(), setjmp()
1317     * called where the loop started in NCR5380_main().
1318     *
1319     @@ -2704,7 +2704,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
1320     * aborted flag and get back into our main loop.
1321     */
1322    
1323     - return 0;
1324     + return SUCCESS;
1325     }
1326     #endif
1327    
1328     diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
1329     index 5f3101797c93..31ace4bef8fe 100644
1330     --- a/drivers/scsi/aha1740.c
1331     +++ b/drivers/scsi/aha1740.c
1332     @@ -531,7 +531,7 @@ static int aha1740_eh_abort_handler (Scsi_Cmnd *dummy)
1333     * quiet as possible...
1334     */
1335    
1336     - return 0;
1337     + return SUCCESS;
1338     }
1339    
1340     static struct scsi_host_template aha1740_template = {
1341     diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
1342     index 79e6f045c2a9..e3bbc0a0f9f1 100644
1343     --- a/drivers/scsi/atari_NCR5380.c
1344     +++ b/drivers/scsi/atari_NCR5380.c
1345     @@ -2607,7 +2607,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
1346     * host byte of the result field to, if zero DID_ABORTED is
1347     * used.
1348     *
1349     - * Returns : 0 - success, -1 on failure.
1350     + * Returns : SUCCESS - success, FAILED on failure.
1351     *
1352     * XXX - there is no way to abort the command that is currently
1353     * connected, you have to wait for it to complete. If this is
1354     diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
1355     index 6504a195c874..45aa684f8b74 100644
1356     --- a/drivers/scsi/esas2r/esas2r_main.c
1357     +++ b/drivers/scsi/esas2r/esas2r_main.c
1358     @@ -1057,7 +1057,7 @@ int esas2r_eh_abort(struct scsi_cmnd *cmd)
1359    
1360     cmd->scsi_done(cmd);
1361    
1362     - return 0;
1363     + return SUCCESS;
1364     }
1365    
1366     spin_lock_irqsave(&a->queue_lock, flags);
1367     diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
1368     index ac5d94cfd52f..2485255f3414 100644
1369     --- a/drivers/scsi/megaraid.c
1370     +++ b/drivers/scsi/megaraid.c
1371     @@ -1945,7 +1945,7 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
1372     cmd->device->id, (u32)cmd->device->lun);
1373    
1374     if(list_empty(&adapter->pending_list))
1375     - return FALSE;
1376     + return FAILED;
1377    
1378     list_for_each_safe(pos, next, &adapter->pending_list) {
1379    
1380     @@ -1968,7 +1968,7 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
1381     (aor==SCB_ABORT) ? "ABORTING":"RESET",
1382     scb->idx);
1383    
1384     - return FALSE;
1385     + return FAILED;
1386     }
1387     else {
1388    
1389     @@ -1993,12 +1993,12 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
1390     list_add_tail(SCSI_LIST(cmd),
1391     &adapter->completed_list);
1392    
1393     - return TRUE;
1394     + return SUCCESS;
1395     }
1396     }
1397     }
1398    
1399     - return FALSE;
1400     + return FAILED;
1401     }
1402    
1403     static inline int
1404     diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
1405     index 5640ad1c8214..5e881e5e67b6 100644
1406     --- a/drivers/scsi/megaraid/megaraid_sas_base.c
1407     +++ b/drivers/scsi/megaraid/megaraid_sas_base.c
1408     @@ -1008,7 +1008,7 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1409     cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1410    
1411     cmd->sync_cmd = 1;
1412     - cmd->cmd_status = 0xFF;
1413     + cmd->cmd_status = ENODATA;
1414    
1415     instance->instancet->issue_dcmd(instance, cmd);
1416    
1417     diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
1418     index 685e6f391fe4..0f66d0ef0b26 100644
1419     --- a/drivers/scsi/megaraid/megaraid_sas_fp.c
1420     +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
1421     @@ -183,14 +183,15 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
1422     /* New Raid map will not set totalSize, so keep expected value
1423     * for legacy code in ValidateMapInfo
1424     */
1425     - pDrvRaidMap->totalSize = sizeof(struct MR_FW_RAID_MAP_EXT);
1426     + pDrvRaidMap->totalSize =
1427     + cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT));
1428     } else {
1429     fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
1430     fusion->ld_map[(instance->map_id & 1)];
1431     pFwRaidMap = &fw_map_old->raidMap;
1432    
1433     #if VD_EXT_DEBUG
1434     - for (i = 0; i < pFwRaidMap->ldCount; i++) {
1435     + for (i = 0; i < le16_to_cpu(pFwRaidMap->ldCount); i++) {
1436     dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x "
1437     "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n",
1438     instance->unique_id, i,
1439     @@ -202,12 +203,12 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
1440    
1441     memset(drv_map, 0, fusion->drv_map_sz);
1442     pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
1443     - pDrvRaidMap->ldCount = pFwRaidMap->ldCount;
1444     + pDrvRaidMap->ldCount = (__le16)pFwRaidMap->ldCount;
1445     pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
1446     for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
1447     pDrvRaidMap->ldTgtIdToLd[i] =
1448     (u8)pFwRaidMap->ldTgtIdToLd[i];
1449     - for (i = 0; i < pDrvRaidMap->ldCount; i++) {
1450     + for (i = 0; i < le16_to_cpu(pDrvRaidMap->ldCount); i++) {
1451     pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
1452     #if VD_EXT_DEBUG
1453     dev_dbg(&instance->pdev->dev,
1454     @@ -268,7 +269,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
1455     else
1456     expected_size =
1457     (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) +
1458     - (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pDrvRaidMap->ldCount)));
1459     + (sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));
1460    
1461     if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
1462     dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n",
1463     @@ -284,7 +285,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
1464    
1465     mr_update_load_balance_params(drv_map, lbInfo);
1466    
1467     - num_lds = le32_to_cpu(drv_map->raidMap.ldCount);
1468     + num_lds = le16_to_cpu(drv_map->raidMap.ldCount);
1469    
1470     /*Convert Raid capability values to CPU arch */
1471     for (ldCount = 0; ldCount < num_lds; ldCount++) {
1472     @@ -457,7 +458,7 @@ u32 mr_spanset_get_span_block(struct megasas_instance *instance,
1473     quad = &map->raidMap.ldSpanMap[ld].
1474     spanBlock[span].
1475     block_span_info.quad[info];
1476     - if (le32_to_cpu(quad->diff == 0))
1477     + if (le32_to_cpu(quad->diff) == 0)
1478     return SPAN_INVALID;
1479     if (le64_to_cpu(quad->logStart) <= row &&
1480     row <= le64_to_cpu(quad->logEnd) &&
1481     @@ -520,7 +521,7 @@ static u64 get_row_from_strip(struct megasas_instance *instance,
1482     span_set->span_row_data_width) * span_set->diff;
1483     for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
1484     if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
1485     - block_span_info.noElements >= info+1)) {
1486     + block_span_info.noElements) >= info+1) {
1487     if (strip_offset >=
1488     span_set->strip_offset[span])
1489     span_offset++;
1490     diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
1491     index f37eed682c75..9d9c27cd4687 100644
1492     --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
1493     +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
1494     @@ -880,7 +880,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
1495    
1496     map = fusion->ld_drv_map[instance->map_id & 1];
1497    
1498     - num_lds = le32_to_cpu(map->raidMap.ldCount);
1499     + num_lds = le16_to_cpu(map->raidMap.ldCount);
1500    
1501     dcmd = &cmd->frame->dcmd;
1502    
1503     @@ -1173,9 +1173,10 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
1504     struct megasas_register_set __iomem *regs)
1505     {
1506     #if defined(writeq) && defined(CONFIG_64BIT)
1507     - u64 req_data = (((u64)req_desc_hi << 32) | (u32)req_desc_lo);
1508     + u64 req_data = (((u64)le32_to_cpu(req_desc_hi) << 32) |
1509     + le32_to_cpu(req_desc_lo));
1510    
1511     - writeq(le64_to_cpu(req_data), &(regs)->inbound_low_queue_port);
1512     + writeq(req_data, &(regs)->inbound_low_queue_port);
1513     #else
1514     unsigned long flags;
1515    
1516     @@ -1373,7 +1374,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1517     /* Logical block reference tag */
1518     io_request->CDB.EEDP32.PrimaryReferenceTag =
1519     cpu_to_be32(ref_tag);
1520     - io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff;
1521     + io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff);
1522     io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
1523    
1524     /* Transfer length */
1525     @@ -1769,7 +1770,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
1526    
1527     /* set RAID context values */
1528     pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
1529     - pRAID_Context->timeoutValue = raid->fpIoTimeoutForLd;
1530     + pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd);
1531     pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
1532     pRAID_Context->regLockRowLBA = 0;
1533     pRAID_Context->regLockLength = 0;
1534     @@ -2254,7 +2255,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
1535     * megasas_complete_cmd
1536     */
1537    
1538     - if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
1539     + if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
1540     cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1541    
1542     fusion = instance->ctrl_context;
1543     diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
1544     index 1a2367a1b1f2..6d248a299bc4 100644
1545     --- a/drivers/scsi/sun3_NCR5380.c
1546     +++ b/drivers/scsi/sun3_NCR5380.c
1547     @@ -2590,15 +2590,15 @@ static void NCR5380_reselect (struct Scsi_Host *instance)
1548     * Purpose : abort a command
1549     *
1550     * Inputs : cmd - the struct scsi_cmnd to abort, code - code to set the
1551     - * host byte of the result field to, if zero DID_ABORTED is
1552     + * host byte of the result field to, if zero DID_ABORTED is
1553     * used.
1554     *
1555     - * Returns : 0 - success, -1 on failure.
1556     + * Returns : SUCCESS - success, FAILED on failure.
1557     *
1558     - * XXX - there is no way to abort the command that is currently
1559     - * connected, you have to wait for it to complete. If this is
1560     + * XXX - there is no way to abort the command that is currently
1561     + * connected, you have to wait for it to complete. If this is
1562     * a problem, we could implement longjmp() / setjmp(), setjmp()
1563     - * called where the loop started in NCR5380_main().
1564     + * called where the loop started in NCR5380_main().
1565     */
1566    
1567     static int NCR5380_abort(struct scsi_cmnd *cmd)
1568     diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
1569     index 43b90709585f..488e9bfd996b 100644
1570     --- a/drivers/thermal/thermal_core.c
1571     +++ b/drivers/thermal/thermal_core.c
1572     @@ -1835,10 +1835,10 @@ static int __init thermal_init(void)
1573    
1574     exit_netlink:
1575     genetlink_exit();
1576     -unregister_governors:
1577     - thermal_unregister_governors();
1578     unregister_class:
1579     class_unregister(&thermal_class);
1580     +unregister_governors:
1581     + thermal_unregister_governors();
1582     error:
1583     idr_destroy(&thermal_tz_idr);
1584     idr_destroy(&thermal_cdev_idr);
1585     diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
1586     index 1bf9f897065d..97676731190c 100644
1587     --- a/fs/btrfs/disk-io.c
1588     +++ b/fs/btrfs/disk-io.c
1589     @@ -4106,12 +4106,6 @@ again:
1590     if (ret)
1591     break;
1592    
1593     - /* opt_discard */
1594     - if (btrfs_test_opt(root, DISCARD))
1595     - ret = btrfs_error_discard_extent(root, start,
1596     - end + 1 - start,
1597     - NULL);
1598     -
1599     clear_extent_dirty(unpin, start, end, GFP_NOFS);
1600     btrfs_error_unpin_extent_range(root, start, end);
1601     cond_resched();
1602     @@ -4129,6 +4123,25 @@ again:
1603     return 0;
1604     }
1605    
1606     +static void btrfs_free_pending_ordered(struct btrfs_transaction *cur_trans,
1607     + struct btrfs_fs_info *fs_info)
1608     +{
1609     + struct btrfs_ordered_extent *ordered;
1610     +
1611     + spin_lock(&fs_info->trans_lock);
1612     + while (!list_empty(&cur_trans->pending_ordered)) {
1613     + ordered = list_first_entry(&cur_trans->pending_ordered,
1614     + struct btrfs_ordered_extent,
1615     + trans_list);
1616     + list_del_init(&ordered->trans_list);
1617     + spin_unlock(&fs_info->trans_lock);
1618     +
1619     + btrfs_put_ordered_extent(ordered);
1620     + spin_lock(&fs_info->trans_lock);
1621     + }
1622     + spin_unlock(&fs_info->trans_lock);
1623     +}
1624     +
1625     void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
1626     struct btrfs_root *root)
1627     {
1628     @@ -4140,6 +4153,7 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
1629     cur_trans->state = TRANS_STATE_UNBLOCKED;
1630     wake_up(&root->fs_info->transaction_wait);
1631    
1632     + btrfs_free_pending_ordered(cur_trans, root->fs_info);
1633     btrfs_destroy_delayed_inodes(root);
1634     btrfs_assert_delayed_root_empty(root);
1635    
1636     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
1637     index 47c1ba141082..4bd5e06fa5ab 100644
1638     --- a/fs/btrfs/extent-tree.c
1639     +++ b/fs/btrfs/extent-tree.c
1640     @@ -5715,7 +5715,8 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
1641     update_global_block_rsv(fs_info);
1642     }
1643    
1644     -static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
1645     +static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
1646     + const bool return_free_space)
1647     {
1648     struct btrfs_fs_info *fs_info = root->fs_info;
1649     struct btrfs_block_group_cache *cache = NULL;
1650     @@ -5739,7 +5740,8 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
1651    
1652     if (start < cache->last_byte_to_unpin) {
1653     len = min(len, cache->last_byte_to_unpin - start);
1654     - btrfs_add_free_space(cache, start, len);
1655     + if (return_free_space)
1656     + btrfs_add_free_space(cache, start, len);
1657     }
1658    
1659     start += len;
1660     @@ -5803,7 +5805,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
1661     end + 1 - start, NULL);
1662    
1663     clear_extent_dirty(unpin, start, end, GFP_NOFS);
1664     - unpin_extent_range(root, start, end);
1665     + unpin_extent_range(root, start, end, true);
1666     cond_resched();
1667     }
1668    
1669     @@ -9585,7 +9587,7 @@ out:
1670    
1671     int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
1672     {
1673     - return unpin_extent_range(root, start, end);
1674     + return unpin_extent_range(root, start, end, false);
1675     }
1676    
1677     int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
1678     diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
1679     index 225302b39afb..6a98bddd8f33 100644
1680     --- a/fs/btrfs/extent_map.c
1681     +++ b/fs/btrfs/extent_map.c
1682     @@ -287,8 +287,6 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
1683     if (!em)
1684     goto out;
1685    
1686     - if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
1687     - list_move(&em->list, &tree->modified_extents);
1688     em->generation = gen;
1689     clear_bit(EXTENT_FLAG_PINNED, &em->flags);
1690     em->mod_start = em->start;
1691     diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
1692     index ac734ec4cc20..269e21dd1506 100644
1693     --- a/fs/btrfs/ordered-data.c
1694     +++ b/fs/btrfs/ordered-data.c
1695     @@ -220,6 +220,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
1696     INIT_LIST_HEAD(&entry->work_list);
1697     init_completion(&entry->completion);
1698     INIT_LIST_HEAD(&entry->log_list);
1699     + INIT_LIST_HEAD(&entry->trans_list);
1700    
1701     trace_btrfs_ordered_extent_add(inode, entry);
1702    
1703     @@ -443,6 +444,8 @@ void btrfs_get_logged_extents(struct inode *inode,
1704     ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
1705     if (!list_empty(&ordered->log_list))
1706     continue;
1707     + if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
1708     + continue;
1709     list_add_tail(&ordered->log_list, logged_list);
1710     atomic_inc(&ordered->refs);
1711     }
1712     @@ -472,7 +475,8 @@ void btrfs_submit_logged_extents(struct list_head *logged_list,
1713     spin_unlock_irq(&log->log_extents_lock[index]);
1714     }
1715    
1716     -void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
1717     +void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
1718     + struct btrfs_root *log, u64 transid)
1719     {
1720     struct btrfs_ordered_extent *ordered;
1721     int index = transid % 2;
1722     @@ -497,7 +501,8 @@ void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
1723     wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
1724     &ordered->flags));
1725    
1726     - btrfs_put_ordered_extent(ordered);
1727     + if (!test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
1728     + list_add_tail(&ordered->trans_list, &trans->ordered);
1729     spin_lock_irq(&log->log_extents_lock[index]);
1730     }
1731     spin_unlock_irq(&log->log_extents_lock[index]);
1732     diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
1733     index d81a274d621e..0124bffc775f 100644
1734     --- a/fs/btrfs/ordered-data.h
1735     +++ b/fs/btrfs/ordered-data.h
1736     @@ -71,6 +71,8 @@ struct btrfs_ordered_sum {
1737     ordered extent */
1738     #define BTRFS_ORDERED_TRUNCATED 9 /* Set when we have to truncate an extent */
1739    
1740     +#define BTRFS_ORDERED_LOGGED 10 /* Set when we've waited on this ordered extent
1741     + * in the logging code. */
1742     struct btrfs_ordered_extent {
1743     /* logical offset in the file */
1744     u64 file_offset;
1745     @@ -121,6 +123,9 @@ struct btrfs_ordered_extent {
1746     /* If we need to wait on this to be done */
1747     struct list_head log_list;
1748    
1749     + /* If the transaction needs to wait on this ordered extent */
1750     + struct list_head trans_list;
1751     +
1752     /* used to wait for the BTRFS_ORDERED_COMPLETE bit */
1753     wait_queue_head_t wait;
1754    
1755     @@ -197,7 +202,8 @@ void btrfs_get_logged_extents(struct inode *inode,
1756     void btrfs_put_logged_extents(struct list_head *logged_list);
1757     void btrfs_submit_logged_extents(struct list_head *logged_list,
1758     struct btrfs_root *log);
1759     -void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid);
1760     +void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
1761     + struct btrfs_root *log, u64 transid);
1762     void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid);
1763     int __init ordered_data_init(void);
1764     void ordered_data_exit(void);
1765     diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
1766     index 54bd91ece35b..cde9c03e3913 100644
1767     --- a/fs/btrfs/super.c
1768     +++ b/fs/btrfs/super.c
1769     @@ -1824,7 +1824,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1770     buf->f_bfree -= block_rsv->size >> bits;
1771     spin_unlock(&block_rsv->lock);
1772    
1773     - buf->f_bavail = total_free_data;
1774     + buf->f_bavail = div_u64(total_free_data, factor);
1775     ret = btrfs_calc_avail_data_space(fs_info->tree_root, &total_free_data);
1776     if (ret) {
1777     mutex_unlock(&fs_info->chunk_mutex);
1778     diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
1779     index dcaae3616728..63c6d05950f2 100644
1780     --- a/fs/btrfs/transaction.c
1781     +++ b/fs/btrfs/transaction.c
1782     @@ -220,6 +220,7 @@ loop:
1783     INIT_LIST_HEAD(&cur_trans->pending_snapshots);
1784     INIT_LIST_HEAD(&cur_trans->pending_chunks);
1785     INIT_LIST_HEAD(&cur_trans->switch_commits);
1786     + INIT_LIST_HEAD(&cur_trans->pending_ordered);
1787     list_add_tail(&cur_trans->list, &fs_info->trans_list);
1788     extent_io_tree_init(&cur_trans->dirty_pages,
1789     fs_info->btree_inode->i_mapping);
1790     @@ -488,6 +489,7 @@ again:
1791     h->sync = false;
1792     INIT_LIST_HEAD(&h->qgroup_ref_list);
1793     INIT_LIST_HEAD(&h->new_bgs);
1794     + INIT_LIST_HEAD(&h->ordered);
1795    
1796     smp_mb();
1797     if (cur_trans->state >= TRANS_STATE_BLOCKED &&
1798     @@ -719,6 +721,12 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
1799     if (!list_empty(&trans->new_bgs))
1800     btrfs_create_pending_block_groups(trans, root);
1801    
1802     + if (!list_empty(&trans->ordered)) {
1803     + spin_lock(&info->trans_lock);
1804     + list_splice(&trans->ordered, &cur_trans->pending_ordered);
1805     + spin_unlock(&info->trans_lock);
1806     + }
1807     +
1808     trans->delayed_ref_updates = 0;
1809     if (!trans->sync) {
1810     must_run_delayed_refs =
1811     @@ -1652,6 +1660,28 @@ static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
1812     btrfs_wait_ordered_roots(fs_info, -1);
1813     }
1814    
1815     +static inline void
1816     +btrfs_wait_pending_ordered(struct btrfs_transaction *cur_trans,
1817     + struct btrfs_fs_info *fs_info)
1818     +{
1819     + struct btrfs_ordered_extent *ordered;
1820     +
1821     + spin_lock(&fs_info->trans_lock);
1822     + while (!list_empty(&cur_trans->pending_ordered)) {
1823     + ordered = list_first_entry(&cur_trans->pending_ordered,
1824     + struct btrfs_ordered_extent,
1825     + trans_list);
1826     + list_del_init(&ordered->trans_list);
1827     + spin_unlock(&fs_info->trans_lock);
1828     +
1829     + wait_event(ordered->wait, test_bit(BTRFS_ORDERED_COMPLETE,
1830     + &ordered->flags));
1831     + btrfs_put_ordered_extent(ordered);
1832     + spin_lock(&fs_info->trans_lock);
1833     + }
1834     + spin_unlock(&fs_info->trans_lock);
1835     +}
1836     +
1837     int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1838     struct btrfs_root *root)
1839     {
1840     @@ -1702,6 +1732,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1841     }
1842    
1843     spin_lock(&root->fs_info->trans_lock);
1844     + list_splice(&trans->ordered, &cur_trans->pending_ordered);
1845     if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
1846     spin_unlock(&root->fs_info->trans_lock);
1847     atomic_inc(&cur_trans->use_count);
1848     @@ -1754,6 +1785,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1849    
1850     btrfs_wait_delalloc_flush(root->fs_info);
1851    
1852     + btrfs_wait_pending_ordered(cur_trans, root->fs_info);
1853     +
1854     btrfs_scrub_pause(root);
1855     /*
1856     * Ok now we need to make sure to block out any other joins while we
1857     diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
1858     index d8f40e1a5d2d..1ba9c3e04191 100644
1859     --- a/fs/btrfs/transaction.h
1860     +++ b/fs/btrfs/transaction.h
1861     @@ -56,6 +56,7 @@ struct btrfs_transaction {
1862     wait_queue_head_t commit_wait;
1863     struct list_head pending_snapshots;
1864     struct list_head pending_chunks;
1865     + struct list_head pending_ordered;
1866     struct list_head switch_commits;
1867     struct btrfs_delayed_ref_root delayed_refs;
1868     int aborted;
1869     @@ -105,6 +106,7 @@ struct btrfs_trans_handle {
1870     */
1871     struct btrfs_root *root;
1872     struct seq_list delayed_ref_elem;
1873     + struct list_head ordered;
1874     struct list_head qgroup_ref_list;
1875     struct list_head new_bgs;
1876     };
1877     diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
1878     index 286213cec861..7d96cc961663 100644
1879     --- a/fs/btrfs/tree-log.c
1880     +++ b/fs/btrfs/tree-log.c
1881     @@ -2600,9 +2600,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
1882     if (atomic_read(&log_root_tree->log_commit[index2])) {
1883     blk_finish_plug(&plug);
1884     btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
1885     + btrfs_wait_logged_extents(trans, log, log_transid);
1886     wait_log_commit(trans, log_root_tree,
1887     root_log_ctx.log_transid);
1888     - btrfs_free_logged_extents(log, log_transid);
1889     mutex_unlock(&log_root_tree->log_mutex);
1890     ret = root_log_ctx.log_ret;
1891     goto out;
1892     @@ -2645,7 +2645,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
1893     btrfs_wait_marked_extents(log_root_tree,
1894     &log_root_tree->dirty_log_pages,
1895     EXTENT_NEW | EXTENT_DIRTY);
1896     - btrfs_wait_logged_extents(log, log_transid);
1897     + btrfs_wait_logged_extents(trans, log, log_transid);
1898    
1899     btrfs_set_super_log_root(root->fs_info->super_for_commit,
1900     log_root_tree->node->start);
1901     @@ -3766,7 +3766,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
1902     fi = btrfs_item_ptr(leaf, path->slots[0],
1903     struct btrfs_file_extent_item);
1904    
1905     - btrfs_set_token_file_extent_generation(leaf, fi, em->generation,
1906     + btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
1907     &token);
1908     if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
1909     btrfs_set_token_file_extent_type(leaf, fi,
1910     diff --git a/fs/dcache.c b/fs/dcache.c
1911     index 71acf8d6f2be..03dca3cad918 100644
1912     --- a/fs/dcache.c
1913     +++ b/fs/dcache.c
1914     @@ -2393,6 +2393,8 @@ static void swap_names(struct dentry *dentry, struct dentry *target)
1915     */
1916     unsigned int i;
1917     BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
1918     + kmemcheck_mark_initialized(dentry->d_iname, DNAME_INLINE_LEN);
1919     + kmemcheck_mark_initialized(target->d_iname, DNAME_INLINE_LEN);
1920     for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
1921     swap(((long *) &dentry->d_iname)[i],
1922     ((long *) &target->d_iname)[i]);
1923     diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
1924     index 2f6735dbf1a9..31b148f3e772 100644
1925     --- a/fs/ecryptfs/crypto.c
1926     +++ b/fs/ecryptfs/crypto.c
1927     @@ -1917,7 +1917,6 @@ ecryptfs_decode_from_filename(unsigned char *dst, size_t *dst_size,
1928     break;
1929     case 2:
1930     dst[dst_byte_offset++] |= (src_byte);
1931     - dst[dst_byte_offset] = 0;
1932     current_bit_offset = 0;
1933     break;
1934     }
1935     diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
1936     index f5bce9096555..54742f9a67a8 100644
1937     --- a/fs/ecryptfs/file.c
1938     +++ b/fs/ecryptfs/file.c
1939     @@ -190,23 +190,11 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
1940     {
1941     int rc = 0;
1942     struct ecryptfs_crypt_stat *crypt_stat = NULL;
1943     - struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
1944     struct dentry *ecryptfs_dentry = file->f_path.dentry;
1945     /* Private value of ecryptfs_dentry allocated in
1946     * ecryptfs_lookup() */
1947     struct ecryptfs_file_info *file_info;
1948    
1949     - mount_crypt_stat = &ecryptfs_superblock_to_private(
1950     - ecryptfs_dentry->d_sb)->mount_crypt_stat;
1951     - if ((mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
1952     - && ((file->f_flags & O_WRONLY) || (file->f_flags & O_RDWR)
1953     - || (file->f_flags & O_CREAT) || (file->f_flags & O_TRUNC)
1954     - || (file->f_flags & O_APPEND))) {
1955     - printk(KERN_WARNING "Mount has encrypted view enabled; "
1956     - "files may only be read\n");
1957     - rc = -EPERM;
1958     - goto out;
1959     - }
1960     /* Released in ecryptfs_release or end of function if failure */
1961     file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
1962     ecryptfs_set_file_private(file, file_info);
1963     diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
1964     index c4cd1fd86cc2..d9eb84bda559 100644
1965     --- a/fs/ecryptfs/main.c
1966     +++ b/fs/ecryptfs/main.c
1967     @@ -493,6 +493,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
1968     {
1969     struct super_block *s;
1970     struct ecryptfs_sb_info *sbi;
1971     + struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
1972     struct ecryptfs_dentry_info *root_info;
1973     const char *err = "Getting sb failed";
1974     struct inode *inode;
1975     @@ -511,6 +512,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
1976     err = "Error parsing options";
1977     goto out;
1978     }
1979     + mount_crypt_stat = &sbi->mount_crypt_stat;
1980    
1981     s = sget(fs_type, NULL, set_anon_super, flags, NULL);
1982     if (IS_ERR(s)) {
1983     @@ -557,11 +559,19 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
1984    
1985     /**
1986     * Set the POSIX ACL flag based on whether they're enabled in the lower
1987     - * mount. Force a read-only eCryptfs mount if the lower mount is ro.
1988     - * Allow a ro eCryptfs mount even when the lower mount is rw.
1989     + * mount.
1990     */
1991     s->s_flags = flags & ~MS_POSIXACL;
1992     - s->s_flags |= path.dentry->d_sb->s_flags & (MS_RDONLY | MS_POSIXACL);
1993     + s->s_flags |= path.dentry->d_sb->s_flags & MS_POSIXACL;
1994     +
1995     + /**
1996     + * Force a read-only eCryptfs mount when:
1997     + * 1) The lower mount is ro
1998     + * 2) The ecryptfs_encrypted_view mount option is specified
1999     + */
2000     + if (path.dentry->d_sb->s_flags & MS_RDONLY ||
2001     + mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
2002     + s->s_flags |= MS_RDONLY;
2003    
2004     s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
2005     s->s_blocksize = path.dentry->d_sb->s_blocksize;
2006     diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
2007     index 8e58c4cc2cb9..f988b01b6f89 100644
2008     --- a/fs/f2fs/data.c
2009     +++ b/fs/f2fs/data.c
2010     @@ -1007,21 +1007,19 @@ inline_data:
2011     goto out;
2012     }
2013    
2014     - if (dn.data_blkaddr == NEW_ADDR) {
2015     + if (f2fs_has_inline_data(inode)) {
2016     + err = f2fs_read_inline_data(inode, page);
2017     + if (err) {
2018     + page_cache_release(page);
2019     + goto fail;
2020     + }
2021     + } else if (dn.data_blkaddr == NEW_ADDR) {
2022     zero_user_segment(page, 0, PAGE_CACHE_SIZE);
2023     } else {
2024     - if (f2fs_has_inline_data(inode)) {
2025     - err = f2fs_read_inline_data(inode, page);
2026     - if (err) {
2027     - page_cache_release(page);
2028     - goto fail;
2029     - }
2030     - } else {
2031     - err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
2032     - READ_SYNC);
2033     - if (err)
2034     - goto fail;
2035     - }
2036     + err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
2037     + READ_SYNC);
2038     + if (err)
2039     + goto fail;
2040    
2041     lock_page(page);
2042     if (unlikely(!PageUptodate(page))) {
2043     diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
2044     index 923cb76fdc46..3c31221affe6 100644
2045     --- a/fs/f2fs/segment.c
2046     +++ b/fs/f2fs/segment.c
2047     @@ -1004,6 +1004,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
2048     range->len < sbi->blocksize)
2049     return -EINVAL;
2050    
2051     + cpc.trimmed = 0;
2052     if (end <= MAIN_BLKADDR(sbi))
2053     goto out;
2054    
2055     @@ -1015,7 +1016,6 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
2056     cpc.trim_start = start_segno;
2057     cpc.trim_end = end_segno;
2058     cpc.trim_minlen = range->minlen >> sbi->log_blocksize;
2059     - cpc.trimmed = 0;
2060    
2061     /* do checkpoint to issue discard commands safely */
2062     write_checkpoint(sbi, &cpc);
2063     diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
2064     index f488bbae541a..735d7522a3a9 100644
2065     --- a/fs/isofs/rock.c
2066     +++ b/fs/isofs/rock.c
2067     @@ -30,6 +30,7 @@ struct rock_state {
2068     int cont_size;
2069     int cont_extent;
2070     int cont_offset;
2071     + int cont_loops;
2072     struct inode *inode;
2073     };
2074    
2075     @@ -73,6 +74,9 @@ static void init_rock_state(struct rock_state *rs, struct inode *inode)
2076     rs->inode = inode;
2077     }
2078    
2079     +/* Maximum number of Rock Ridge continuation entries */
2080     +#define RR_MAX_CE_ENTRIES 32
2081     +
2082     /*
2083     * Returns 0 if the caller should continue scanning, 1 if the scan must end
2084     * and -ve on error.
2085     @@ -105,6 +109,8 @@ static int rock_continue(struct rock_state *rs)
2086     goto out;
2087     }
2088     ret = -EIO;
2089     + if (++rs->cont_loops >= RR_MAX_CE_ENTRIES)
2090     + goto out;
2091     bh = sb_bread(rs->inode->i_sb, rs->cont_extent);
2092     if (bh) {
2093     memcpy(rs->buffer, bh->b_data + rs->cont_offset,
2094     @@ -356,6 +362,9 @@ repeat:
2095     rs.cont_size = isonum_733(rr->u.CE.size);
2096     break;
2097     case SIG('E', 'R'):
2098     + /* Invalid length of ER tag id? */
2099     + if (rr->u.ER.len_id + offsetof(struct rock_ridge, u.ER.data) > rr->len)
2100     + goto out;
2101     ISOFS_SB(inode->i_sb)->s_rock = 1;
2102     printk(KERN_DEBUG "ISO 9660 Extensions: ");
2103     {
2104     diff --git a/fs/namespace.c b/fs/namespace.c
2105     index 5b66b2b3624d..bbde14719655 100644
2106     --- a/fs/namespace.c
2107     +++ b/fs/namespace.c
2108     @@ -1369,6 +1369,8 @@ void umount_tree(struct mount *mnt, int how)
2109     }
2110     if (last) {
2111     last->mnt_hash.next = unmounted.first;
2112     + if (unmounted.first)
2113     + unmounted.first->pprev = &last->mnt_hash.next;
2114     unmounted.first = tmp_list.first;
2115     unmounted.first->pprev = &unmounted.first;
2116     }
2117     @@ -1544,6 +1546,9 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
2118     goto dput_and_out;
2119     if (mnt->mnt.mnt_flags & MNT_LOCKED)
2120     goto dput_and_out;
2121     + retval = -EPERM;
2122     + if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
2123     + goto dput_and_out;
2124    
2125     retval = do_umount(mnt, flags);
2126     dput_and_out:
2127     @@ -2098,7 +2103,13 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
2128     }
2129     if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
2130     !(mnt_flags & MNT_NODEV)) {
2131     - return -EPERM;
2132     + /* Was the nodev implicitly added in mount? */
2133     + if ((mnt->mnt_ns->user_ns != &init_user_ns) &&
2134     + !(sb->s_type->fs_flags & FS_USERNS_DEV_MOUNT)) {
2135     + mnt_flags |= MNT_NODEV;
2136     + } else {
2137     + return -EPERM;
2138     + }
2139     }
2140     if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
2141     !(mnt_flags & MNT_NOSUID)) {
2142     diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
2143     index d5659d96ee7f..cf7e043a9447 100644
2144     --- a/fs/ncpfs/ioctl.c
2145     +++ b/fs/ncpfs/ioctl.c
2146     @@ -447,7 +447,6 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg
2147     result = -EIO;
2148     }
2149     }
2150     - result = 0;
2151     }
2152     mutex_unlock(&server->root_setup_lock);
2153    
2154     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2155     index 69dc20a743f9..83f3a7d7466e 100644
2156     --- a/fs/nfs/nfs4proc.c
2157     +++ b/fs/nfs/nfs4proc.c
2158     @@ -7704,6 +7704,9 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
2159    
2160     dprintk("--> %s\n", __func__);
2161    
2162     + /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
2163     + pnfs_get_layout_hdr(NFS_I(inode)->layout);
2164     +
2165     lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
2166     if (!lgp->args.layout.pages) {
2167     nfs4_layoutget_release(lgp);
2168     @@ -7716,9 +7719,6 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
2169     lgp->res.seq_res.sr_slot = NULL;
2170     nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
2171    
2172     - /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
2173     - pnfs_get_layout_hdr(NFS_I(inode)->layout);
2174     -
2175     task = rpc_run_task(&task_setup_data);
2176     if (IS_ERR(task))
2177     return ERR_CAST(task);
2178     diff --git a/fs/proc/base.c b/fs/proc/base.c
2179     index 772efa45a452..7dc3ea89ef1a 100644
2180     --- a/fs/proc/base.c
2181     +++ b/fs/proc/base.c
2182     @@ -2464,6 +2464,57 @@ static const struct file_operations proc_projid_map_operations = {
2183     .llseek = seq_lseek,
2184     .release = proc_id_map_release,
2185     };
2186     +
2187     +static int proc_setgroups_open(struct inode *inode, struct file *file)
2188     +{
2189     + struct user_namespace *ns = NULL;
2190     + struct task_struct *task;
2191     + int ret;
2192     +
2193     + ret = -ESRCH;
2194     + task = get_proc_task(inode);
2195     + if (task) {
2196     + rcu_read_lock();
2197     + ns = get_user_ns(task_cred_xxx(task, user_ns));
2198     + rcu_read_unlock();
2199     + put_task_struct(task);
2200     + }
2201     + if (!ns)
2202     + goto err;
2203     +
2204     + if (file->f_mode & FMODE_WRITE) {
2205     + ret = -EACCES;
2206     + if (!ns_capable(ns, CAP_SYS_ADMIN))
2207     + goto err_put_ns;
2208     + }
2209     +
2210     + ret = single_open(file, &proc_setgroups_show, ns);
2211     + if (ret)
2212     + goto err_put_ns;
2213     +
2214     + return 0;
2215     +err_put_ns:
2216     + put_user_ns(ns);
2217     +err:
2218     + return ret;
2219     +}
2220     +
2221     +static int proc_setgroups_release(struct inode *inode, struct file *file)
2222     +{
2223     + struct seq_file *seq = file->private_data;
2224     + struct user_namespace *ns = seq->private;
2225     + int ret = single_release(inode, file);
2226     + put_user_ns(ns);
2227     + return ret;
2228     +}
2229     +
2230     +static const struct file_operations proc_setgroups_operations = {
2231     + .open = proc_setgroups_open,
2232     + .write = proc_setgroups_write,
2233     + .read = seq_read,
2234     + .llseek = seq_lseek,
2235     + .release = proc_setgroups_release,
2236     +};
2237     #endif /* CONFIG_USER_NS */
2238    
2239     static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns,
2240     @@ -2572,6 +2623,7 @@ static const struct pid_entry tgid_base_stuff[] = {
2241     REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
2242     REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
2243     REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
2244     + REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations),
2245     #endif
2246     #ifdef CONFIG_CHECKPOINT_RESTORE
2247     REG("timers", S_IRUGO, proc_timers_operations),
2248     @@ -2913,6 +2965,7 @@ static const struct pid_entry tid_base_stuff[] = {
2249     REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
2250     REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
2251     REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
2252     + REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations),
2253     #endif
2254     };
2255    
2256     diff --git a/fs/udf/dir.c b/fs/udf/dir.c
2257     index a012c51caffd..a7690b46ce0a 100644
2258     --- a/fs/udf/dir.c
2259     +++ b/fs/udf/dir.c
2260     @@ -167,7 +167,8 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
2261     continue;
2262     }
2263    
2264     - flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
2265     + flen = udf_get_filename(dir->i_sb, nameptr, lfi, fname,
2266     + UDF_NAME_LEN);
2267     if (!flen)
2268     continue;
2269    
2270     diff --git a/fs/udf/inode.c b/fs/udf/inode.c
2271     index c9b4df5810d5..5bc71d9a674a 100644
2272     --- a/fs/udf/inode.c
2273     +++ b/fs/udf/inode.c
2274     @@ -1489,6 +1489,20 @@ reread:
2275     }
2276     inode->i_generation = iinfo->i_unique;
2277    
2278     + /* Sanity checks for files in ICB so that we don't get confused later */
2279     + if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
2280     + /*
2281     + * For file in ICB data is stored in allocation descriptor
2282     + * so sizes should match
2283     + */
2284     + if (iinfo->i_lenAlloc != inode->i_size)
2285     + goto out;
2286     + /* File in ICB has to fit in there... */
2287     + if (inode->i_size > inode->i_sb->s_blocksize -
2288     + udf_file_entry_alloc_offset(inode))
2289     + goto out;
2290     + }
2291     +
2292     switch (fe->icbTag.fileType) {
2293     case ICBTAG_FILE_TYPE_DIRECTORY:
2294     inode->i_op = &udf_dir_inode_operations;
2295     diff --git a/fs/udf/namei.c b/fs/udf/namei.c
2296     index c12e260fd6c4..6ff19b54b51f 100644
2297     --- a/fs/udf/namei.c
2298     +++ b/fs/udf/namei.c
2299     @@ -233,7 +233,8 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
2300     if (!lfi)
2301     continue;
2302    
2303     - flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
2304     + flen = udf_get_filename(dir->i_sb, nameptr, lfi, fname,
2305     + UDF_NAME_LEN);
2306     if (flen && udf_match(flen, fname, child->len, child->name))
2307     goto out_ok;
2308     }
2309     diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
2310     index 6fb7945c1e6e..ac10ca939f26 100644
2311     --- a/fs/udf/symlink.c
2312     +++ b/fs/udf/symlink.c
2313     @@ -30,49 +30,73 @@
2314     #include <linux/buffer_head.h>
2315     #include "udf_i.h"
2316    
2317     -static void udf_pc_to_char(struct super_block *sb, unsigned char *from,
2318     - int fromlen, unsigned char *to)
2319     +static int udf_pc_to_char(struct super_block *sb, unsigned char *from,
2320     + int fromlen, unsigned char *to, int tolen)
2321     {
2322     struct pathComponent *pc;
2323     int elen = 0;
2324     + int comp_len;
2325     unsigned char *p = to;
2326    
2327     + /* Reserve one byte for terminating \0 */
2328     + tolen--;
2329     while (elen < fromlen) {
2330     pc = (struct pathComponent *)(from + elen);
2331     + elen += sizeof(struct pathComponent);
2332     switch (pc->componentType) {
2333     case 1:
2334     /*
2335     * Symlink points to some place which should be agreed
2336     * upon between originator and receiver of the media. Ignore.
2337     */
2338     - if (pc->lengthComponentIdent > 0)
2339     + if (pc->lengthComponentIdent > 0) {
2340     + elen += pc->lengthComponentIdent;
2341     break;
2342     + }
2343     /* Fall through */
2344     case 2:
2345     + if (tolen == 0)
2346     + return -ENAMETOOLONG;
2347     p = to;
2348     *p++ = '/';
2349     + tolen--;
2350     break;
2351     case 3:
2352     + if (tolen < 3)
2353     + return -ENAMETOOLONG;
2354     memcpy(p, "../", 3);
2355     p += 3;
2356     + tolen -= 3;
2357     break;
2358     case 4:
2359     + if (tolen < 2)
2360     + return -ENAMETOOLONG;
2361     memcpy(p, "./", 2);
2362     p += 2;
2363     + tolen -= 2;
2364     /* that would be . - just ignore */
2365     break;
2366     case 5:
2367     - p += udf_get_filename(sb, pc->componentIdent, p,
2368     - pc->lengthComponentIdent);
2369     + elen += pc->lengthComponentIdent;
2370     + if (elen > fromlen)
2371     + return -EIO;
2372     + comp_len = udf_get_filename(sb, pc->componentIdent,
2373     + pc->lengthComponentIdent,
2374     + p, tolen);
2375     + p += comp_len;
2376     + tolen -= comp_len;
2377     + if (tolen == 0)
2378     + return -ENAMETOOLONG;
2379     *p++ = '/';
2380     + tolen--;
2381     break;
2382     }
2383     - elen += sizeof(struct pathComponent) + pc->lengthComponentIdent;
2384     }
2385     if (p > to + 1)
2386     p[-1] = '\0';
2387     else
2388     p[0] = '\0';
2389     + return 0;
2390     }
2391    
2392     static int udf_symlink_filler(struct file *file, struct page *page)
2393     @@ -80,11 +104,17 @@ static int udf_symlink_filler(struct file *file, struct page *page)
2394     struct inode *inode = page->mapping->host;
2395     struct buffer_head *bh = NULL;
2396     unsigned char *symlink;
2397     - int err = -EIO;
2398     + int err;
2399     unsigned char *p = kmap(page);
2400     struct udf_inode_info *iinfo;
2401     uint32_t pos;
2402    
2403     + /* We don't support symlinks longer than one block */
2404     + if (inode->i_size > inode->i_sb->s_blocksize) {
2405     + err = -ENAMETOOLONG;
2406     + goto out_unmap;
2407     + }
2408     +
2409     iinfo = UDF_I(inode);
2410     pos = udf_block_map(inode, 0);
2411    
2412     @@ -94,14 +124,18 @@ static int udf_symlink_filler(struct file *file, struct page *page)
2413     } else {
2414     bh = sb_bread(inode->i_sb, pos);
2415    
2416     - if (!bh)
2417     - goto out;
2418     + if (!bh) {
2419     + err = -EIO;
2420     + goto out_unlock_inode;
2421     + }
2422    
2423     symlink = bh->b_data;
2424     }
2425    
2426     - udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p);
2427     + err = udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p, PAGE_SIZE);
2428     brelse(bh);
2429     + if (err)
2430     + goto out_unlock_inode;
2431    
2432     up_read(&iinfo->i_data_sem);
2433     SetPageUptodate(page);
2434     @@ -109,9 +143,10 @@ static int udf_symlink_filler(struct file *file, struct page *page)
2435     unlock_page(page);
2436     return 0;
2437    
2438     -out:
2439     +out_unlock_inode:
2440     up_read(&iinfo->i_data_sem);
2441     SetPageError(page);
2442     +out_unmap:
2443     kunmap(page);
2444     unlock_page(page);
2445     return err;
2446     diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
2447     index 1cc3c993ebd0..47bb3f5ca360 100644
2448     --- a/fs/udf/udfdecl.h
2449     +++ b/fs/udf/udfdecl.h
2450     @@ -211,7 +211,8 @@ udf_get_lb_pblock(struct super_block *sb, struct kernel_lb_addr *loc,
2451     }
2452    
2453     /* unicode.c */
2454     -extern int udf_get_filename(struct super_block *, uint8_t *, uint8_t *, int);
2455     +extern int udf_get_filename(struct super_block *, uint8_t *, int, uint8_t *,
2456     + int);
2457     extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *,
2458     int);
2459     extern int udf_build_ustr(struct ustr *, dstring *, int);
2460     diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
2461     index afd470e588ff..b84fee372734 100644
2462     --- a/fs/udf/unicode.c
2463     +++ b/fs/udf/unicode.c
2464     @@ -28,7 +28,8 @@
2465    
2466     #include "udf_sb.h"
2467    
2468     -static int udf_translate_to_linux(uint8_t *, uint8_t *, int, uint8_t *, int);
2469     +static int udf_translate_to_linux(uint8_t *, int, uint8_t *, int, uint8_t *,
2470     + int);
2471    
2472     static int udf_char_to_ustr(struct ustr *dest, const uint8_t *src, int strlen)
2473     {
2474     @@ -333,8 +334,8 @@ try_again:
2475     return u_len + 1;
2476     }
2477    
2478     -int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
2479     - int flen)
2480     +int udf_get_filename(struct super_block *sb, uint8_t *sname, int slen,
2481     + uint8_t *dname, int dlen)
2482     {
2483     struct ustr *filename, *unifilename;
2484     int len = 0;
2485     @@ -347,7 +348,7 @@ int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
2486     if (!unifilename)
2487     goto out1;
2488    
2489     - if (udf_build_ustr_exact(unifilename, sname, flen))
2490     + if (udf_build_ustr_exact(unifilename, sname, slen))
2491     goto out2;
2492    
2493     if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
2494     @@ -366,7 +367,8 @@ int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
2495     } else
2496     goto out2;
2497    
2498     - len = udf_translate_to_linux(dname, filename->u_name, filename->u_len,
2499     + len = udf_translate_to_linux(dname, dlen,
2500     + filename->u_name, filename->u_len,
2501     unifilename->u_name, unifilename->u_len);
2502     out2:
2503     kfree(unifilename);
2504     @@ -403,10 +405,12 @@ int udf_put_filename(struct super_block *sb, const uint8_t *sname,
2505     #define EXT_MARK '.'
2506     #define CRC_MARK '#'
2507     #define EXT_SIZE 5
2508     +/* Number of chars we need to store generated CRC to make filename unique */
2509     +#define CRC_LEN 5
2510    
2511     -static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
2512     - int udfLen, uint8_t *fidName,
2513     - int fidNameLen)
2514     +static int udf_translate_to_linux(uint8_t *newName, int newLen,
2515     + uint8_t *udfName, int udfLen,
2516     + uint8_t *fidName, int fidNameLen)
2517     {
2518     int index, newIndex = 0, needsCRC = 0;
2519     int extIndex = 0, newExtIndex = 0, hasExt = 0;
2520     @@ -439,7 +443,7 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
2521     newExtIndex = newIndex;
2522     }
2523     }
2524     - if (newIndex < 256)
2525     + if (newIndex < newLen)
2526     newName[newIndex++] = curr;
2527     else
2528     needsCRC = 1;
2529     @@ -467,13 +471,13 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
2530     }
2531     ext[localExtIndex++] = curr;
2532     }
2533     - maxFilenameLen = 250 - localExtIndex;
2534     + maxFilenameLen = newLen - CRC_LEN - localExtIndex;
2535     if (newIndex > maxFilenameLen)
2536     newIndex = maxFilenameLen;
2537     else
2538     newIndex = newExtIndex;
2539     - } else if (newIndex > 250)
2540     - newIndex = 250;
2541     + } else if (newIndex > newLen - CRC_LEN)
2542     + newIndex = newLen - CRC_LEN;
2543     newName[newIndex++] = CRC_MARK;
2544     valueCRC = crc_itu_t(0, fidName, fidNameLen);
2545     newName[newIndex++] = hex_asc_upper_hi(valueCRC >> 8);
2546     diff --git a/include/linux/audit.h b/include/linux/audit.h
2547     index e58fe7df8b9c..10f155b7daf6 100644
2548     --- a/include/linux/audit.h
2549     +++ b/include/linux/audit.h
2550     @@ -47,6 +47,7 @@ struct sk_buff;
2551    
2552     struct audit_krule {
2553     int vers_ops;
2554     + u32 pflags;
2555     u32 flags;
2556     u32 listnr;
2557     u32 action;
2558     @@ -64,6 +65,9 @@ struct audit_krule {
2559     u64 prio;
2560     };
2561    
2562     +/* Flag to indicate legacy AUDIT_LOGINUID unset usage */
2563     +#define AUDIT_LOGINUID_LEGACY 0x1
2564     +
2565     struct audit_field {
2566     u32 type;
2567     union {
2568     diff --git a/include/linux/cred.h b/include/linux/cred.h
2569     index b2d0820837c4..2fb2ca2127ed 100644
2570     --- a/include/linux/cred.h
2571     +++ b/include/linux/cred.h
2572     @@ -68,6 +68,7 @@ extern void groups_free(struct group_info *);
2573     extern int set_current_groups(struct group_info *);
2574     extern void set_groups(struct cred *, struct group_info *);
2575     extern int groups_search(const struct group_info *, kgid_t);
2576     +extern bool may_setgroups(void);
2577    
2578     /* access the groups "array" with this macro */
2579     #define GROUP_AT(gi, i) \
2580     diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
2581     index e95372654f09..9f3579ff543d 100644
2582     --- a/include/linux/user_namespace.h
2583     +++ b/include/linux/user_namespace.h
2584     @@ -17,6 +17,10 @@ struct uid_gid_map { /* 64 bytes -- 1 cache line */
2585     } extent[UID_GID_MAP_MAX_EXTENTS];
2586     };
2587    
2588     +#define USERNS_SETGROUPS_ALLOWED 1UL
2589     +
2590     +#define USERNS_INIT_FLAGS USERNS_SETGROUPS_ALLOWED
2591     +
2592     struct user_namespace {
2593     struct uid_gid_map uid_map;
2594     struct uid_gid_map gid_map;
2595     @@ -27,6 +31,7 @@ struct user_namespace {
2596     kuid_t owner;
2597     kgid_t group;
2598     unsigned int proc_inum;
2599     + unsigned long flags;
2600    
2601     /* Register of per-UID persistent keyrings for this namespace */
2602     #ifdef CONFIG_PERSISTENT_KEYRINGS
2603     @@ -63,6 +68,9 @@ extern const struct seq_operations proc_projid_seq_operations;
2604     extern ssize_t proc_uid_map_write(struct file *, const char __user *, size_t, loff_t *);
2605     extern ssize_t proc_gid_map_write(struct file *, const char __user *, size_t, loff_t *);
2606     extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, loff_t *);
2607     +extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t, loff_t *);
2608     +extern int proc_setgroups_show(struct seq_file *m, void *v);
2609     +extern bool userns_may_setgroups(const struct user_namespace *ns);
2610     #else
2611    
2612     static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
2613     @@ -87,6 +95,10 @@ static inline void put_user_ns(struct user_namespace *ns)
2614     {
2615     }
2616    
2617     +static inline bool userns_may_setgroups(const struct user_namespace *ns)
2618     +{
2619     + return true;
2620     +}
2621     #endif
2622    
2623     #endif /* _LINUX_USER_H */
2624     diff --git a/kernel/audit.c b/kernel/audit.c
2625     index cebb11db4d34..c6df9905f1c6 100644
2626     --- a/kernel/audit.c
2627     +++ b/kernel/audit.c
2628     @@ -429,7 +429,7 @@ static void kauditd_send_skb(struct sk_buff *skb)
2629     * This function doesn't consume an skb as might be expected since it has to
2630     * copy it anyways.
2631     */
2632     -static void kauditd_send_multicast_skb(struct sk_buff *skb)
2633     +static void kauditd_send_multicast_skb(struct sk_buff *skb, gfp_t gfp_mask)
2634     {
2635     struct sk_buff *copy;
2636     struct audit_net *aunet = net_generic(&init_net, audit_net_id);
2637     @@ -448,11 +448,11 @@ static void kauditd_send_multicast_skb(struct sk_buff *skb)
2638     * no reason for new multicast clients to continue with this
2639     * non-compliance.
2640     */
2641     - copy = skb_copy(skb, GFP_KERNEL);
2642     + copy = skb_copy(skb, gfp_mask);
2643     if (!copy)
2644     return;
2645    
2646     - nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, GFP_KERNEL);
2647     + nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, gfp_mask);
2648     }
2649    
2650     /*
2651     @@ -1949,7 +1949,7 @@ void audit_log_end(struct audit_buffer *ab)
2652     struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
2653    
2654     nlh->nlmsg_len = ab->skb->len;
2655     - kauditd_send_multicast_skb(ab->skb);
2656     + kauditd_send_multicast_skb(ab->skb, ab->gfp_mask);
2657    
2658     /*
2659     * The original kaudit unicast socket sends up messages with
2660     diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
2661     index 3598e13f2a65..4f68a326d92e 100644
2662     --- a/kernel/auditfilter.c
2663     +++ b/kernel/auditfilter.c
2664     @@ -442,19 +442,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
2665     if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) {
2666     f->type = AUDIT_LOGINUID_SET;
2667     f->val = 0;
2668     - }
2669     -
2670     - if ((f->type == AUDIT_PID) || (f->type == AUDIT_PPID)) {
2671     - struct pid *pid;
2672     - rcu_read_lock();
2673     - pid = find_vpid(f->val);
2674     - if (!pid) {
2675     - rcu_read_unlock();
2676     - err = -ESRCH;
2677     - goto exit_free;
2678     - }
2679     - f->val = pid_nr(pid);
2680     - rcu_read_unlock();
2681     + entry->rule.pflags |= AUDIT_LOGINUID_LEGACY;
2682     }
2683    
2684     err = audit_field_valid(entry, f);
2685     @@ -630,6 +618,13 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule)
2686     data->buflen += data->values[i] =
2687     audit_pack_string(&bufp, krule->filterkey);
2688     break;
2689     + case AUDIT_LOGINUID_SET:
2690     + if (krule->pflags & AUDIT_LOGINUID_LEGACY && !f->val) {
2691     + data->fields[i] = AUDIT_LOGINUID;
2692     + data->values[i] = AUDIT_UID_UNSET;
2693     + break;
2694     + }
2695     + /* fallthrough if set */
2696     default:
2697     data->values[i] = f->val;
2698     }
2699     @@ -646,6 +641,7 @@ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b)
2700     int i;
2701    
2702     if (a->flags != b->flags ||
2703     + a->pflags != b->pflags ||
2704     a->listnr != b->listnr ||
2705     a->action != b->action ||
2706     a->field_count != b->field_count)
2707     @@ -764,6 +760,7 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old)
2708     new = &entry->rule;
2709     new->vers_ops = old->vers_ops;
2710     new->flags = old->flags;
2711     + new->pflags = old->pflags;
2712     new->listnr = old->listnr;
2713     new->action = old->action;
2714     for (i = 0; i < AUDIT_BITMASK_SIZE; i++)
2715     diff --git a/kernel/groups.c b/kernel/groups.c
2716     index 451698f86cfa..664411f171b5 100644
2717     --- a/kernel/groups.c
2718     +++ b/kernel/groups.c
2719     @@ -6,6 +6,7 @@
2720     #include <linux/slab.h>
2721     #include <linux/security.h>
2722     #include <linux/syscalls.h>
2723     +#include <linux/user_namespace.h>
2724     #include <asm/uaccess.h>
2725    
2726     /* init to 2 - one for init_task, one to ensure it is never freed */
2727     @@ -213,6 +214,14 @@ out:
2728     return i;
2729     }
2730    
2731     +bool may_setgroups(void)
2732     +{
2733     + struct user_namespace *user_ns = current_user_ns();
2734     +
2735     + return ns_capable(user_ns, CAP_SETGID) &&
2736     + userns_may_setgroups(user_ns);
2737     +}
2738     +
2739     /*
2740     * SMP: Our groups are copy-on-write. We can set them safely
2741     * without another task interfering.
2742     @@ -223,7 +232,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
2743     struct group_info *group_info;
2744     int retval;
2745    
2746     - if (!ns_capable(current_user_ns(), CAP_SETGID))
2747     + if (!may_setgroups())
2748     return -EPERM;
2749     if ((unsigned)gidsetsize > NGROUPS_MAX)
2750     return -EINVAL;
2751     diff --git a/kernel/pid.c b/kernel/pid.c
2752     index 9b9a26698144..82430c858d69 100644
2753     --- a/kernel/pid.c
2754     +++ b/kernel/pid.c
2755     @@ -341,6 +341,8 @@ out:
2756    
2757     out_unlock:
2758     spin_unlock_irq(&pidmap_lock);
2759     + put_pid_ns(ns);
2760     +
2761     out_free:
2762     while (++i <= ns->level)
2763     free_pidmap(pid->numbers + i);
2764     diff --git a/kernel/uid16.c b/kernel/uid16.c
2765     index 602e5bbbceff..d58cc4d8f0d1 100644
2766     --- a/kernel/uid16.c
2767     +++ b/kernel/uid16.c
2768     @@ -176,7 +176,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
2769     struct group_info *group_info;
2770     int retval;
2771    
2772     - if (!ns_capable(current_user_ns(), CAP_SETGID))
2773     + if (!may_setgroups())
2774     return -EPERM;
2775     if ((unsigned)gidsetsize > NGROUPS_MAX)
2776     return -EINVAL;
2777     diff --git a/kernel/user.c b/kernel/user.c
2778     index 4efa39350e44..2d09940c9632 100644
2779     --- a/kernel/user.c
2780     +++ b/kernel/user.c
2781     @@ -51,6 +51,7 @@ struct user_namespace init_user_ns = {
2782     .owner = GLOBAL_ROOT_UID,
2783     .group = GLOBAL_ROOT_GID,
2784     .proc_inum = PROC_USER_INIT_INO,
2785     + .flags = USERNS_INIT_FLAGS,
2786     #ifdef CONFIG_PERSISTENT_KEYRINGS
2787     .persistent_keyring_register_sem =
2788     __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
2789     diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
2790     index aa312b0dc3ec..a2e37c5d2f63 100644
2791     --- a/kernel/user_namespace.c
2792     +++ b/kernel/user_namespace.c
2793     @@ -24,6 +24,7 @@
2794     #include <linux/fs_struct.h>
2795    
2796     static struct kmem_cache *user_ns_cachep __read_mostly;
2797     +static DEFINE_MUTEX(userns_state_mutex);
2798    
2799     static bool new_idmap_permitted(const struct file *file,
2800     struct user_namespace *ns, int cap_setid,
2801     @@ -99,6 +100,11 @@ int create_user_ns(struct cred *new)
2802     ns->owner = owner;
2803     ns->group = group;
2804    
2805     + /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
2806     + mutex_lock(&userns_state_mutex);
2807     + ns->flags = parent_ns->flags;
2808     + mutex_unlock(&userns_state_mutex);
2809     +
2810     set_cred_user_ns(new, ns);
2811    
2812     #ifdef CONFIG_PERSISTENT_KEYRINGS
2813     @@ -583,9 +589,6 @@ static bool mappings_overlap(struct uid_gid_map *new_map,
2814     return false;
2815     }
2816    
2817     -
2818     -static DEFINE_MUTEX(id_map_mutex);
2819     -
2820     static ssize_t map_write(struct file *file, const char __user *buf,
2821     size_t count, loff_t *ppos,
2822     int cap_setid,
2823     @@ -602,7 +605,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
2824     ssize_t ret = -EINVAL;
2825    
2826     /*
2827     - * The id_map_mutex serializes all writes to any given map.
2828     + * The userns_state_mutex serializes all writes to any given map.
2829     *
2830     * Any map is only ever written once.
2831     *
2832     @@ -620,7 +623,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
2833     * order and smp_rmb() is guaranteed that we don't have crazy
2834     * architectures returning stale data.
2835     */
2836     - mutex_lock(&id_map_mutex);
2837     + mutex_lock(&userns_state_mutex);
2838    
2839     ret = -EPERM;
2840     /* Only allow one successful write to the map */
2841     @@ -750,7 +753,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
2842     *ppos = count;
2843     ret = count;
2844     out:
2845     - mutex_unlock(&id_map_mutex);
2846     + mutex_unlock(&userns_state_mutex);
2847     if (page)
2848     free_page(page);
2849     return ret;
2850     @@ -812,16 +815,21 @@ static bool new_idmap_permitted(const struct file *file,
2851     struct user_namespace *ns, int cap_setid,
2852     struct uid_gid_map *new_map)
2853     {
2854     - /* Allow mapping to your own filesystem ids */
2855     - if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1)) {
2856     + const struct cred *cred = file->f_cred;
2857     + /* Don't allow mappings that would allow anything that wouldn't
2858     + * be allowed without the establishment of unprivileged mappings.
2859     + */
2860     + if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
2861     + uid_eq(ns->owner, cred->euid)) {
2862     u32 id = new_map->extent[0].lower_first;
2863     if (cap_setid == CAP_SETUID) {
2864     kuid_t uid = make_kuid(ns->parent, id);
2865     - if (uid_eq(uid, file->f_cred->fsuid))
2866     + if (uid_eq(uid, cred->euid))
2867     return true;
2868     } else if (cap_setid == CAP_SETGID) {
2869     kgid_t gid = make_kgid(ns->parent, id);
2870     - if (gid_eq(gid, file->f_cred->fsgid))
2871     + if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
2872     + gid_eq(gid, cred->egid))
2873     return true;
2874     }
2875     }
2876     @@ -841,6 +849,100 @@ static bool new_idmap_permitted(const struct file *file,
2877     return false;
2878     }
2879    
2880     +int proc_setgroups_show(struct seq_file *seq, void *v)
2881     +{
2882     + struct user_namespace *ns = seq->private;
2883     + unsigned long userns_flags = ACCESS_ONCE(ns->flags);
2884     +
2885     + seq_printf(seq, "%s\n",
2886     + (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
2887     + "allow" : "deny");
2888     + return 0;
2889     +}
2890     +
2891     +ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
2892     + size_t count, loff_t *ppos)
2893     +{
2894     + struct seq_file *seq = file->private_data;
2895     + struct user_namespace *ns = seq->private;
2896     + char kbuf[8], *pos;
2897     + bool setgroups_allowed;
2898     + ssize_t ret;
2899     +
2900     + /* Only allow a very narrow range of strings to be written */
2901     + ret = -EINVAL;
2902     + if ((*ppos != 0) || (count >= sizeof(kbuf)))
2903     + goto out;
2904     +
2905     + /* What was written? */
2906     + ret = -EFAULT;
2907     + if (copy_from_user(kbuf, buf, count))
2908     + goto out;
2909     + kbuf[count] = '\0';
2910     + pos = kbuf;
2911     +
2912     + /* What is being requested? */
2913     + ret = -EINVAL;
2914     + if (strncmp(pos, "allow", 5) == 0) {
2915     + pos += 5;
2916     + setgroups_allowed = true;
2917     + }
2918     + else if (strncmp(pos, "deny", 4) == 0) {
2919     + pos += 4;
2920     + setgroups_allowed = false;
2921     + }
2922     + else
2923     + goto out;
2924     +
2925     + /* Verify there is not trailing junk on the line */
2926     + pos = skip_spaces(pos);
2927     + if (*pos != '\0')
2928     + goto out;
2929     +
2930     + ret = -EPERM;
2931     + mutex_lock(&userns_state_mutex);
2932     + if (setgroups_allowed) {
2933     + /* Enabling setgroups after setgroups has been disabled
2934     + * is not allowed.
2935     + */
2936     + if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
2937     + goto out_unlock;
2938     + } else {
2939     + /* Permanently disabling setgroups after setgroups has
2940     + * been enabled by writing the gid_map is not allowed.
2941     + */
2942     + if (ns->gid_map.nr_extents != 0)
2943     + goto out_unlock;
2944     + ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
2945     + }
2946     + mutex_unlock(&userns_state_mutex);
2947     +
2948     + /* Report a successful write */
2949     + *ppos = count;
2950     + ret = count;
2951     +out:
2952     + return ret;
2953     +out_unlock:
2954     + mutex_unlock(&userns_state_mutex);
2955     + goto out;
2956     +}
2957     +
2958     +bool userns_may_setgroups(const struct user_namespace *ns)
2959     +{
2960     + bool allowed;
2961     +
2962     + mutex_lock(&userns_state_mutex);
2963     + /* It is not safe to use setgroups until a gid mapping in
2964     + * the user namespace has been established.
2965     + */
2966     + allowed = ns->gid_map.nr_extents != 0;
2967     + /* Is setgroups allowed? */
2968     + allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
2969     + mutex_unlock(&userns_state_mutex);
2970     +
2971     + return allowed;
2972     +}
2973     +
2974     static void *userns_get(struct task_struct *task)
2975     {
2976     struct user_namespace *user_ns;
2977     diff --git a/mm/cma.c b/mm/cma.c
2978     index fde706e1284f..8e9ec13d31db 100644
2979     --- a/mm/cma.c
2980     +++ b/mm/cma.c
2981     @@ -215,9 +215,21 @@ int __init cma_declare_contiguous(phys_addr_t base,
2982     bool fixed, struct cma **res_cma)
2983     {
2984     phys_addr_t memblock_end = memblock_end_of_DRAM();
2985     - phys_addr_t highmem_start = __pa(high_memory);
2986     + phys_addr_t highmem_start;
2987     int ret = 0;
2988    
2989     +#ifdef CONFIG_X86
2990     + /*
2991     + * high_memory isn't direct mapped memory so retrieving its physical
2992     + * address isn't appropriate. But it would be useful to check the
2993     + * physical address of the highmem boundary so it's justfiable to get
2994     + * the physical address from it. On x86 there is a validation check for
2995     + * this case, so the following workaround is needed to avoid it.
2996     + */
2997     + highmem_start = __pa_nodebug(high_memory);
2998     +#else
2999     + highmem_start = __pa(high_memory);
3000     +#endif
3001     pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
3002     __func__, &size, &base, &limit, &alignment);
3003    
3004     diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
3005     index 4c74e8da64b9..5ce13a76d0fd 100644
3006     --- a/net/mac80211/chan.c
3007     +++ b/net/mac80211/chan.c
3008     @@ -929,6 +929,21 @@ ieee80211_vif_chanctx_reservation_complete(struct ieee80211_sub_if_data *sdata)
3009     }
3010     }
3011    
3012     +static void
3013     +ieee80211_vif_update_chandef(struct ieee80211_sub_if_data *sdata,
3014     + const struct cfg80211_chan_def *chandef)
3015     +{
3016     + struct ieee80211_sub_if_data *vlan;
3017     +
3018     + sdata->vif.bss_conf.chandef = *chandef;
3019     +
3020     + if (sdata->vif.type != NL80211_IFTYPE_AP)
3021     + return;
3022     +
3023     + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
3024     + vlan->vif.bss_conf.chandef = *chandef;
3025     +}
3026     +
3027     static int
3028     ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
3029     {
3030     @@ -991,7 +1006,7 @@ ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
3031     if (sdata->vif.bss_conf.chandef.width != sdata->reserved_chandef.width)
3032     changed = BSS_CHANGED_BANDWIDTH;
3033    
3034     - sdata->vif.bss_conf.chandef = sdata->reserved_chandef;
3035     + ieee80211_vif_update_chandef(sdata, &sdata->reserved_chandef);
3036    
3037     if (changed)
3038     ieee80211_bss_info_change_notify(sdata, changed);
3039     @@ -1333,7 +1348,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
3040     sdata->reserved_chandef.width)
3041     changed = BSS_CHANGED_BANDWIDTH;
3042    
3043     - sdata->vif.bss_conf.chandef = sdata->reserved_chandef;
3044     + ieee80211_vif_update_chandef(sdata, &sdata->reserved_chandef);
3045     if (changed)
3046     ieee80211_bss_info_change_notify(sdata,
3047     changed);
3048     @@ -1504,7 +1519,7 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
3049     goto out;
3050     }
3051    
3052     - sdata->vif.bss_conf.chandef = *chandef;
3053     + ieee80211_vif_update_chandef(sdata, chandef);
3054    
3055     ret = ieee80211_assign_vif_chanctx(sdata, ctx);
3056     if (ret) {
3057     @@ -1646,7 +1661,7 @@ int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
3058     break;
3059     }
3060    
3061     - sdata->vif.bss_conf.chandef = *chandef;
3062     + ieee80211_vif_update_chandef(sdata, chandef);
3063    
3064     ieee80211_recalc_chanctx_chantype(local, ctx);
3065    
3066     diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
3067     index 653f5eb07a27..eeae0abd01de 100644
3068     --- a/net/mac80211/iface.c
3069     +++ b/net/mac80211/iface.c
3070     @@ -511,6 +511,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
3071     sdata->vif.cab_queue = master->vif.cab_queue;
3072     memcpy(sdata->vif.hw_queue, master->vif.hw_queue,
3073     sizeof(sdata->vif.hw_queue));
3074     + sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef;
3075     break;
3076     }
3077     case NL80211_IFTYPE_AP:
3078     diff --git a/net/mac80211/key.c b/net/mac80211/key.c
3079     index 4712150dc210..d66c6443164c 100644
3080     --- a/net/mac80211/key.c
3081     +++ b/net/mac80211/key.c
3082     @@ -647,7 +647,7 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local,
3083     int i;
3084    
3085     mutex_lock(&local->key_mtx);
3086     - for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
3087     + for (i = 0; i < ARRAY_SIZE(sta->gtk); i++) {
3088     key = key_mtx_dereference(local, sta->gtk[i]);
3089     if (!key)
3090     continue;
3091     diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
3092     index 93af0f1c9d99..da1f639ecfb6 100644
3093     --- a/net/mac80211/mlme.c
3094     +++ b/net/mac80211/mlme.c
3095     @@ -174,6 +174,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
3096     if (!(ht_cap->cap_info &
3097     cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40))) {
3098     ret = IEEE80211_STA_DISABLE_40MHZ;
3099     + vht_chandef = *chandef;
3100     goto out;
3101     }
3102    
3103     diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
3104     index a37f9af634cb..e60da9a062c2 100644
3105     --- a/net/mac80211/rx.c
3106     +++ b/net/mac80211/rx.c
3107     @@ -1678,14 +1678,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
3108     sc = le16_to_cpu(hdr->seq_ctrl);
3109     frag = sc & IEEE80211_SCTL_FRAG;
3110    
3111     - if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
3112     - goto out;
3113     -
3114     if (is_multicast_ether_addr(hdr->addr1)) {
3115     rx->local->dot11MulticastReceivedFrameCount++;
3116     - goto out;
3117     + goto out_no_led;
3118     }
3119    
3120     + if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
3121     + goto out;
3122     +
3123     I802_DEBUG_INC(rx->local->rx_handlers_fragments);
3124    
3125     if (skb_linearize(rx->skb))
3126     @@ -1776,9 +1776,10 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
3127     status->rx_flags |= IEEE80211_RX_FRAGMENTED;
3128    
3129     out:
3130     + ieee80211_led_rx(rx->local);
3131     + out_no_led:
3132     if (rx->sta)
3133     rx->sta->rx_packets++;
3134     - ieee80211_led_rx(rx->local);
3135     return RX_CONTINUE;
3136     }
3137    
3138     diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
3139     index db9675db1026..7bed4ad7cd76 100644
3140     --- a/security/keys/encrypted-keys/encrypted.c
3141     +++ b/security/keys/encrypted-keys/encrypted.c
3142     @@ -1017,10 +1017,13 @@ static int __init init_encrypted(void)
3143     ret = encrypted_shash_alloc();
3144     if (ret < 0)
3145     return ret;
3146     + ret = aes_get_sizes();
3147     + if (ret < 0)
3148     + goto out;
3149     ret = register_key_type(&key_type_encrypted);
3150     if (ret < 0)
3151     goto out;
3152     - return aes_get_sizes();
3153     + return 0;
3154     out:
3155     encrypted_shash_release();
3156     return ret;
3157     diff --git a/tools/testing/selftests/mount/unprivileged-remount-test.c b/tools/testing/selftests/mount/unprivileged-remount-test.c
3158     index 1b3ff2fda4d0..517785052f1c 100644
3159     --- a/tools/testing/selftests/mount/unprivileged-remount-test.c
3160     +++ b/tools/testing/selftests/mount/unprivileged-remount-test.c
3161     @@ -6,6 +6,8 @@
3162     #include <sys/types.h>
3163     #include <sys/mount.h>
3164     #include <sys/wait.h>
3165     +#include <sys/vfs.h>
3166     +#include <sys/statvfs.h>
3167     #include <stdlib.h>
3168     #include <unistd.h>
3169     #include <fcntl.h>
3170     @@ -32,11 +34,14 @@
3171     # define CLONE_NEWPID 0x20000000
3172     #endif
3173    
3174     +#ifndef MS_REC
3175     +# define MS_REC 16384
3176     +#endif
3177     #ifndef MS_RELATIME
3178     -#define MS_RELATIME (1 << 21)
3179     +# define MS_RELATIME (1 << 21)
3180     #endif
3181     #ifndef MS_STRICTATIME
3182     -#define MS_STRICTATIME (1 << 24)
3183     +# define MS_STRICTATIME (1 << 24)
3184     #endif
3185    
3186     static void die(char *fmt, ...)
3187     @@ -48,17 +53,14 @@ static void die(char *fmt, ...)
3188     exit(EXIT_FAILURE);
3189     }
3190    
3191     -static void write_file(char *filename, char *fmt, ...)
3192     +static void vmaybe_write_file(bool enoent_ok, char *filename, char *fmt, va_list ap)
3193     {
3194     char buf[4096];
3195     int fd;
3196     ssize_t written;
3197     int buf_len;
3198     - va_list ap;
3199    
3200     - va_start(ap, fmt);
3201     buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
3202     - va_end(ap);
3203     if (buf_len < 0) {
3204     die("vsnprintf failed: %s\n",
3205     strerror(errno));
3206     @@ -69,6 +71,8 @@ static void write_file(char *filename, char *fmt, ...)
3207    
3208     fd = open(filename, O_WRONLY);
3209     if (fd < 0) {
3210     + if ((errno == ENOENT) && enoent_ok)
3211     + return;
3212     die("open of %s failed: %s\n",
3213     filename, strerror(errno));
3214     }
3215     @@ -87,6 +91,65 @@ static void write_file(char *filename, char *fmt, ...)
3216     }
3217     }
3218    
3219     +static void maybe_write_file(char *filename, char *fmt, ...)
3220     +{
3221     + va_list ap;
3222     +
3223     + va_start(ap, fmt);
3224     + vmaybe_write_file(true, filename, fmt, ap);
3225     + va_end(ap);
3226     +
3227     +}
3228     +
3229     +static void write_file(char *filename, char *fmt, ...)
3230     +{
3231     + va_list ap;
3232     +
3233     + va_start(ap, fmt);
3234     + vmaybe_write_file(false, filename, fmt, ap);
3235     + va_end(ap);
3236     +
3237     +}
3238     +
3239     +static int read_mnt_flags(const char *path)
3240     +{
3241     + int ret;
3242     + struct statvfs stat;
3243     + int mnt_flags;
3244     +
3245     + ret = statvfs(path, &stat);
3246     + if (ret != 0) {
3247     + die("statvfs of %s failed: %s\n",
3248     + path, strerror(errno));
3249     + }
3250     + if (stat.f_flag & ~(ST_RDONLY | ST_NOSUID | ST_NODEV | \
3251     + ST_NOEXEC | ST_NOATIME | ST_NODIRATIME | ST_RELATIME | \
3252     + ST_SYNCHRONOUS | ST_MANDLOCK)) {
3253     + die("Unrecognized mount flags\n");
3254     + }
3255     + mnt_flags = 0;
3256     + if (stat.f_flag & ST_RDONLY)
3257     + mnt_flags |= MS_RDONLY;
3258     + if (stat.f_flag & ST_NOSUID)
3259     + mnt_flags |= MS_NOSUID;
3260     + if (stat.f_flag & ST_NODEV)
3261     + mnt_flags |= MS_NODEV;
3262     + if (stat.f_flag & ST_NOEXEC)
3263     + mnt_flags |= MS_NOEXEC;
3264     + if (stat.f_flag & ST_NOATIME)
3265     + mnt_flags |= MS_NOATIME;
3266     + if (stat.f_flag & ST_NODIRATIME)
3267     + mnt_flags |= MS_NODIRATIME;
3268     + if (stat.f_flag & ST_RELATIME)
3269     + mnt_flags |= MS_RELATIME;
3270     + if (stat.f_flag & ST_SYNCHRONOUS)
3271     + mnt_flags |= MS_SYNCHRONOUS;
3272     + if (stat.f_flag & ST_MANDLOCK)
3273     + mnt_flags |= ST_MANDLOCK;
3274     +
3275     + return mnt_flags;
3276     +}
3277     +
3278     static void create_and_enter_userns(void)
3279     {
3280     uid_t uid;
3281     @@ -100,13 +163,10 @@ static void create_and_enter_userns(void)
3282     strerror(errno));
3283     }
3284    
3285     + maybe_write_file("/proc/self/setgroups", "deny");
3286     write_file("/proc/self/uid_map", "0 %d 1", uid);
3287     write_file("/proc/self/gid_map", "0 %d 1", gid);
3288    
3289     - if (setgroups(0, NULL) != 0) {
3290     - die("setgroups failed: %s\n",
3291     - strerror(errno));
3292     - }
3293     if (setgid(0) != 0) {
3294     die ("setgid(0) failed %s\n",
3295     strerror(errno));
3296     @@ -118,7 +178,8 @@ static void create_and_enter_userns(void)
3297     }
3298    
3299     static
3300     -bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
3301     +bool test_unpriv_remount(const char *fstype, const char *mount_options,
3302     + int mount_flags, int remount_flags, int invalid_flags)
3303     {
3304     pid_t child;
3305    
3306     @@ -151,9 +212,11 @@ bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
3307     strerror(errno));
3308     }
3309    
3310     - if (mount("testing", "/tmp", "ramfs", mount_flags, NULL) != 0) {
3311     - die("mount of /tmp failed: %s\n",
3312     - strerror(errno));
3313     + if (mount("testing", "/tmp", fstype, mount_flags, mount_options) != 0) {
3314     + die("mount of %s with options '%s' on /tmp failed: %s\n",
3315     + fstype,
3316     + mount_options? mount_options : "",
3317     + strerror(errno));
3318     }
3319    
3320     create_and_enter_userns();
3321     @@ -181,62 +244,127 @@ bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
3322    
3323     static bool test_unpriv_remount_simple(int mount_flags)
3324     {
3325     - return test_unpriv_remount(mount_flags, mount_flags, 0);
3326     + return test_unpriv_remount("ramfs", NULL, mount_flags, mount_flags, 0);
3327     }
3328    
3329     static bool test_unpriv_remount_atime(int mount_flags, int invalid_flags)
3330     {
3331     - return test_unpriv_remount(mount_flags, mount_flags, invalid_flags);
3332     + return test_unpriv_remount("ramfs", NULL, mount_flags, mount_flags,
3333     + invalid_flags);
3334     +}
3335     +
3336     +static bool test_priv_mount_unpriv_remount(void)
3337     +{
3338     + pid_t child;
3339     + int ret;
3340     + const char *orig_path = "/dev";
3341     + const char *dest_path = "/tmp";
3342     + int orig_mnt_flags, remount_mnt_flags;
3343     +
3344     + child = fork();
3345     + if (child == -1) {
3346     + die("fork failed: %s\n",
3347     + strerror(errno));
3348     + }
3349     + if (child != 0) { /* parent */
3350     + pid_t pid;
3351     + int status;
3352     + pid = waitpid(child, &status, 0);
3353     + if (pid == -1) {
3354     + die("waitpid failed: %s\n",
3355     + strerror(errno));
3356     + }
3357     + if (pid != child) {
3358     + die("waited for %d got %d\n",
3359     + child, pid);
3360     + }
3361     + if (!WIFEXITED(status)) {
3362     + die("child did not terminate cleanly\n");
3363     + }
3364     + return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false;
3365     + }
3366     +
3367     + orig_mnt_flags = read_mnt_flags(orig_path);
3368     +
3369     + create_and_enter_userns();
3370     + ret = unshare(CLONE_NEWNS);
3371     + if (ret != 0) {
3372     + die("unshare(CLONE_NEWNS) failed: %s\n",
3373     + strerror(errno));
3374     + }
3375     +
3376     + ret = mount(orig_path, dest_path, "bind", MS_BIND | MS_REC, NULL);
3377     + if (ret != 0) {
3378     + die("recursive bind mount of %s onto %s failed: %s\n",
3379     + orig_path, dest_path, strerror(errno));
3380     + }
3381     +
3382     + ret = mount(dest_path, dest_path, "none",
3383     + MS_REMOUNT | MS_BIND | orig_mnt_flags , NULL);
3384     + if (ret != 0) {
3385     + /* system("cat /proc/self/mounts"); */
3386     + die("remount of /tmp failed: %s\n",
3387     + strerror(errno));
3388     + }
3389     +
3390     + remount_mnt_flags = read_mnt_flags(dest_path);
3391     + if (orig_mnt_flags != remount_mnt_flags) {
3392     + die("Mount flags unexpectedly changed during remount of %s originally mounted on %s\n",
3393     + dest_path, orig_path);
3394     + }
3395     + exit(EXIT_SUCCESS);
3396     }
3397    
3398     int main(int argc, char **argv)
3399     {
3400     - if (!test_unpriv_remount_simple(MS_RDONLY|MS_NODEV)) {
3401     + if (!test_unpriv_remount_simple(MS_RDONLY)) {
3402     die("MS_RDONLY malfunctions\n");
3403     }
3404     - if (!test_unpriv_remount_simple(MS_NODEV)) {
3405     + if (!test_unpriv_remount("devpts", "newinstance", MS_NODEV, MS_NODEV, 0)) {
3406     die("MS_NODEV malfunctions\n");
3407     }
3408     - if (!test_unpriv_remount_simple(MS_NOSUID|MS_NODEV)) {
3409     + if (!test_unpriv_remount_simple(MS_NOSUID)) {
3410     die("MS_NOSUID malfunctions\n");
3411     }
3412     - if (!test_unpriv_remount_simple(MS_NOEXEC|MS_NODEV)) {
3413     + if (!test_unpriv_remount_simple(MS_NOEXEC)) {
3414     die("MS_NOEXEC malfunctions\n");
3415     }
3416     - if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODEV,
3417     - MS_NOATIME|MS_NODEV))
3418     + if (!test_unpriv_remount_atime(MS_RELATIME,
3419     + MS_NOATIME))
3420     {
3421     die("MS_RELATIME malfunctions\n");
3422     }
3423     - if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODEV,
3424     - MS_NOATIME|MS_NODEV))
3425     + if (!test_unpriv_remount_atime(MS_STRICTATIME,
3426     + MS_NOATIME))
3427     {
3428     die("MS_STRICTATIME malfunctions\n");
3429     }
3430     - if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODEV,
3431     - MS_STRICTATIME|MS_NODEV))
3432     + if (!test_unpriv_remount_atime(MS_NOATIME,
3433     + MS_STRICTATIME))
3434     {
3435     - die("MS_RELATIME malfunctions\n");
3436     + die("MS_NOATIME malfunctions\n");
3437     }
3438     - if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME|MS_NODEV,
3439     - MS_NOATIME|MS_NODEV))
3440     + if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME,
3441     + MS_NOATIME))
3442     {
3443     - die("MS_RELATIME malfunctions\n");
3444     + die("MS_RELATIME|MS_NODIRATIME malfunctions\n");
3445     }
3446     - if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME|MS_NODEV,
3447     - MS_NOATIME|MS_NODEV))
3448     + if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME,
3449     + MS_NOATIME))
3450     {
3451     - die("MS_RELATIME malfunctions\n");
3452     + die("MS_STRICTATIME|MS_NODIRATIME malfunctions\n");
3453     }
3454     - if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME|MS_NODEV,
3455     - MS_STRICTATIME|MS_NODEV))
3456     + if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME,
3457     + MS_STRICTATIME))
3458     {
3459     - die("MS_RELATIME malfunctions\n");
3460     + die("MS_NOATIME|MS_DIRATIME malfunctions\n");
3461     }
3462     - if (!test_unpriv_remount(MS_STRICTATIME|MS_NODEV, MS_NODEV,
3463     - MS_NOATIME|MS_NODEV))
3464     + if (!test_unpriv_remount("ramfs", NULL, MS_STRICTATIME, 0, MS_NOATIME))
3465     {
3466     die("Default atime malfunctions\n");
3467     }
3468     + if (!test_priv_mount_unpriv_remount()) {
3469     + die("Mount flags unexpectedly changed after remount\n");
3470     + }
3471     return EXIT_SUCCESS;
3472     }