Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0131-4.9.32-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2956 - (hide annotations) (download)
Mon Jul 24 12:03:46 2017 UTC (6 years, 9 months ago) by niro
File size: 152437 byte(s)
-added patches-4.9
1 niro 2956 diff --git a/Makefile b/Makefile
2     index 3601995f63f9..3d8781997968 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 31
9     +SUBLEVEL = 32
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
14     index 34e3f3c45634..f4e54503afa9 100644
15     --- a/arch/arm/kernel/setup.c
16     +++ b/arch/arm/kernel/setup.c
17     @@ -81,7 +81,7 @@ __setup("fpe=", fpe_setup);
18     extern void init_default_cache_policy(unsigned long);
19     extern void paging_init(const struct machine_desc *desc);
20     extern void early_paging_init(const struct machine_desc *);
21     -extern void sanity_check_meminfo(void);
22     +extern void adjust_lowmem_bounds(void);
23     extern enum reboot_mode reboot_mode;
24     extern void setup_dma_zone(const struct machine_desc *desc);
25    
26     @@ -1093,8 +1093,14 @@ void __init setup_arch(char **cmdline_p)
27     setup_dma_zone(mdesc);
28     xen_early_init();
29     efi_init();
30     - sanity_check_meminfo();
31     + /*
32     + * Make sure the calculation for lowmem/highmem is set appropriately
33     + * before reserving/allocating any mmeory
34     + */
35     + adjust_lowmem_bounds();
36     arm_memblock_init(mdesc);
37     + /* Memory may have been removed so recalculate the bounds. */
38     + adjust_lowmem_bounds();
39    
40     early_ioremap_reset();
41    
42     diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
43     index bf89c919efc1..bd0ee7fc304c 100644
44     --- a/arch/arm/kvm/init.S
45     +++ b/arch/arm/kvm/init.S
46     @@ -95,7 +95,6 @@ __do_hyp_init:
47     @ - Write permission implies XN: disabled
48     @ - Instruction cache: enabled
49     @ - Data/Unified cache: enabled
50     - @ - Memory alignment checks: enabled
51     @ - MMU: enabled (this code must be run from an identity mapping)
52     mrc p15, 4, r0, c1, c0, 0 @ HSCR
53     ldr r2, =HSCTLR_MASK
54     @@ -103,8 +102,8 @@ __do_hyp_init:
55     mrc p15, 0, r1, c1, c0, 0 @ SCTLR
56     ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
57     and r1, r1, r2
58     - ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) )
59     - THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) )
60     + ARM( ldr r2, =(HSCTLR_M) )
61     + THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
62     orr r1, r1, r2
63     orr r0, r0, r1
64     mcr p15, 4, r0, c1, c0, 0 @ HSCR
65     diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
66     index 2fd5c135e8a4..332ce3b5a34f 100644
67     --- a/arch/arm/kvm/mmu.c
68     +++ b/arch/arm/kvm/mmu.c
69     @@ -872,6 +872,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
70     pmd_t *pmd;
71    
72     pud = stage2_get_pud(kvm, cache, addr);
73     + if (!pud)
74     + return NULL;
75     +
76     if (stage2_pud_none(*pud)) {
77     if (!cache)
78     return NULL;
79     diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
80     index 4001dd15818d..5cbfd9f86412 100644
81     --- a/arch/arm/mm/mmu.c
82     +++ b/arch/arm/mm/mmu.c
83     @@ -1152,13 +1152,12 @@ early_param("vmalloc", early_vmalloc);
84    
85     phys_addr_t arm_lowmem_limit __initdata = 0;
86    
87     -void __init sanity_check_meminfo(void)
88     +void __init adjust_lowmem_bounds(void)
89     {
90     phys_addr_t memblock_limit = 0;
91     - int highmem = 0;
92     u64 vmalloc_limit;
93     struct memblock_region *reg;
94     - bool should_use_highmem = false;
95     + phys_addr_t lowmem_limit = 0;
96    
97     /*
98     * Let's use our own (unoptimized) equivalent of __pa() that is
99     @@ -1172,43 +1171,18 @@ void __init sanity_check_meminfo(void)
100     for_each_memblock(memory, reg) {
101     phys_addr_t block_start = reg->base;
102     phys_addr_t block_end = reg->base + reg->size;
103     - phys_addr_t size_limit = reg->size;
104    
105     - if (reg->base >= vmalloc_limit)
106     - highmem = 1;
107     - else
108     - size_limit = vmalloc_limit - reg->base;
109     -
110     -
111     - if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
112     -
113     - if (highmem) {
114     - pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
115     - &block_start, &block_end);
116     - memblock_remove(reg->base, reg->size);
117     - should_use_highmem = true;
118     - continue;
119     - }
120     -
121     - if (reg->size > size_limit) {
122     - phys_addr_t overlap_size = reg->size - size_limit;
123     -
124     - pr_notice("Truncating RAM at %pa-%pa",
125     - &block_start, &block_end);
126     - block_end = vmalloc_limit;
127     - pr_cont(" to -%pa", &block_end);
128     - memblock_remove(vmalloc_limit, overlap_size);
129     - should_use_highmem = true;
130     - }
131     - }
132     -
133     - if (!highmem) {
134     - if (block_end > arm_lowmem_limit) {
135     - if (reg->size > size_limit)
136     - arm_lowmem_limit = vmalloc_limit;
137     - else
138     - arm_lowmem_limit = block_end;
139     - }
140     + if (reg->base < vmalloc_limit) {
141     + if (block_end > lowmem_limit)
142     + /*
143     + * Compare as u64 to ensure vmalloc_limit does
144     + * not get truncated. block_end should always
145     + * fit in phys_addr_t so there should be no
146     + * issue with assignment.
147     + */
148     + lowmem_limit = min_t(u64,
149     + vmalloc_limit,
150     + block_end);
151    
152     /*
153     * Find the first non-pmd-aligned page, and point
154     @@ -1227,14 +1201,13 @@ void __init sanity_check_meminfo(void)
155     if (!IS_ALIGNED(block_start, PMD_SIZE))
156     memblock_limit = block_start;
157     else if (!IS_ALIGNED(block_end, PMD_SIZE))
158     - memblock_limit = arm_lowmem_limit;
159     + memblock_limit = lowmem_limit;
160     }
161    
162     }
163     }
164    
165     - if (should_use_highmem)
166     - pr_notice("Consider using a HIGHMEM enabled kernel.\n");
167     + arm_lowmem_limit = lowmem_limit;
168    
169     high_memory = __va(arm_lowmem_limit - 1) + 1;
170    
171     @@ -1248,6 +1221,18 @@ void __init sanity_check_meminfo(void)
172     if (!memblock_limit)
173     memblock_limit = arm_lowmem_limit;
174    
175     + if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
176     + if (memblock_end_of_DRAM() > arm_lowmem_limit) {
177     + phys_addr_t end = memblock_end_of_DRAM();
178     +
179     + pr_notice("Ignoring RAM at %pa-%pa\n",
180     + &memblock_limit, &end);
181     + pr_notice("Consider using a HIGHMEM enabled kernel.\n");
182     +
183     + memblock_remove(memblock_limit, end - memblock_limit);
184     + }
185     + }
186     +
187     memblock_set_current_limit(memblock_limit);
188     }
189    
190     diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
191     index 2740967727e2..13a25d6282f8 100644
192     --- a/arch/arm/mm/nommu.c
193     +++ b/arch/arm/mm/nommu.c
194     @@ -85,7 +85,7 @@ static unsigned long irbar_read(void)
195     }
196    
197     /* MPU initialisation functions */
198     -void __init sanity_check_meminfo_mpu(void)
199     +void __init adjust_lowmem_bounds_mpu(void)
200     {
201     phys_addr_t phys_offset = PHYS_OFFSET;
202     phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size;
203     @@ -274,7 +274,7 @@ void __init mpu_setup(void)
204     }
205     }
206     #else
207     -static void sanity_check_meminfo_mpu(void) {}
208     +static void adjust_lowmem_bounds_mpu(void) {}
209     static void __init mpu_setup(void) {}
210     #endif /* CONFIG_ARM_MPU */
211    
212     @@ -295,10 +295,10 @@ void __init arm_mm_memblock_reserve(void)
213     #endif
214     }
215    
216     -void __init sanity_check_meminfo(void)
217     +void __init adjust_lowmem_bounds(void)
218     {
219     phys_addr_t end;
220     - sanity_check_meminfo_mpu();
221     + adjust_lowmem_bounds_mpu();
222     end = memblock_end_of_DRAM();
223     high_memory = __va(end - 1) + 1;
224     memblock_set_current_limit(end);
225     diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
226     new file mode 100644
227     index 000000000000..be2d2347d995
228     --- /dev/null
229     +++ b/arch/arm64/include/asm/asm-uaccess.h
230     @@ -0,0 +1,13 @@
231     +#ifndef __ASM_ASM_UACCESS_H
232     +#define __ASM_ASM_UACCESS_H
233     +
234     +/*
235     + * Remove the address tag from a virtual address, if present.
236     + */
237     + .macro clear_address_tag, dst, addr
238     + tst \addr, #(1 << 55)
239     + bic \dst, \addr, #(0xff << 56)
240     + csel \dst, \dst, \addr, eq
241     + .endm
242     +
243     +#endif
244     diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
245     index 6c80b3699cb8..7393cc767edb 100644
246     --- a/arch/arm64/include/asm/sysreg.h
247     +++ b/arch/arm64/include/asm/sysreg.h
248     @@ -94,6 +94,10 @@
249     #define SCTLR_ELx_A (1 << 1)
250     #define SCTLR_ELx_M 1
251    
252     +#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \
253     + (1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \
254     + (1 << 28) | (1 << 29))
255     +
256     #define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
257     SCTLR_ELx_SA | SCTLR_ELx_I)
258    
259     diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
260     index 14cca10aeb4e..811cf16a65f9 100644
261     --- a/arch/arm64/include/asm/uaccess.h
262     +++ b/arch/arm64/include/asm/uaccess.h
263     @@ -105,9 +105,9 @@ static inline void set_fs(mm_segment_t fs)
264     })
265    
266     /*
267     - * When dealing with data aborts or instruction traps we may end up with
268     - * a tagged userland pointer. Clear the tag to get a sane pointer to pass
269     - * on to access_ok(), for instance.
270     + * When dealing with data aborts, watchpoints, or instruction traps we may end
271     + * up with a tagged userland pointer. Clear the tag to get a sane pointer to
272     + * pass on to access_ok(), for instance.
273     */
274     #define untagged_addr(addr) sign_extend64(addr, 55)
275    
276     diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
277     index 79b0fe24d5b7..b4c7db434654 100644
278     --- a/arch/arm64/kernel/entry.S
279     +++ b/arch/arm64/kernel/entry.S
280     @@ -30,6 +30,7 @@
281     #include <asm/irq.h>
282     #include <asm/memory.h>
283     #include <asm/thread_info.h>
284     +#include <asm/asm-uaccess.h>
285     #include <asm/unistd.h>
286    
287     /*
288     @@ -369,12 +370,13 @@ el1_da:
289     /*
290     * Data abort handling
291     */
292     - mrs x0, far_el1
293     + mrs x3, far_el1
294     enable_dbg
295     // re-enable interrupts if they were enabled in the aborted context
296     tbnz x23, #7, 1f // PSR_I_BIT
297     enable_irq
298     1:
299     + clear_address_tag x0, x3
300     mov x2, sp // struct pt_regs
301     bl do_mem_abort
302    
303     @@ -535,7 +537,7 @@ el0_da:
304     // enable interrupts before calling the main handler
305     enable_dbg_and_irq
306     ct_user_exit
307     - bic x0, x26, #(0xff << 56)
308     + clear_address_tag x0, x26
309     mov x1, x25
310     mov x2, sp
311     bl do_mem_abort
312     diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
313     index 948b73148d56..0b9e5f6290f9 100644
314     --- a/arch/arm64/kernel/hw_breakpoint.c
315     +++ b/arch/arm64/kernel/hw_breakpoint.c
316     @@ -36,6 +36,7 @@
317     #include <asm/traps.h>
318     #include <asm/cputype.h>
319     #include <asm/system_misc.h>
320     +#include <asm/uaccess.h>
321    
322     /* Breakpoint currently in use for each BRP. */
323     static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
324     @@ -696,7 +697,7 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
325    
326     /* Check if the watchpoint value matches. */
327     val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
328     - if (val != (addr & ~alignment_mask))
329     + if (val != (untagged_addr(addr) & ~alignment_mask))
330     goto unlock;
331    
332     /* Possible match, check the byte address select to confirm. */
333     diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
334     index 11e5eae088ab..f22826135c73 100644
335     --- a/arch/arm64/kernel/traps.c
336     +++ b/arch/arm64/kernel/traps.c
337     @@ -435,7 +435,7 @@ int cpu_enable_cache_maint_trap(void *__unused)
338     }
339    
340     #define __user_cache_maint(insn, address, res) \
341     - if (untagged_addr(address) >= user_addr_max()) \
342     + if (address >= user_addr_max()) \
343     res = -EFAULT; \
344     else \
345     asm volatile ( \
346     @@ -458,7 +458,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
347     int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
348     int ret = 0;
349    
350     - address = (rt == 31) ? 0 : regs->regs[rt];
351     + address = (rt == 31) ? 0 : untagged_addr(regs->regs[rt]);
352    
353     switch (crm) {
354     case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */
355     diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
356     index 6b29d3d9e1f2..4bbff904169d 100644
357     --- a/arch/arm64/kvm/hyp-init.S
358     +++ b/arch/arm64/kvm/hyp-init.S
359     @@ -102,10 +102,13 @@ __do_hyp_init:
360     tlbi alle2
361     dsb sy
362    
363     - mrs x4, sctlr_el2
364     - and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2
365     - ldr x5, =SCTLR_ELx_FLAGS
366     - orr x4, x4, x5
367     + /*
368     + * Preserve all the RES1 bits while setting the default flags,
369     + * as well as the EE bit on BE. Drop the A flag since the compiler
370     + * is allowed to generate unaligned accesses.
371     + */
372     + ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
373     +CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
374     msr sctlr_el2, x4
375     isb
376    
377     diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
378     index 8b3b46b7b0f2..329771559cbb 100644
379     --- a/arch/powerpc/include/asm/topology.h
380     +++ b/arch/powerpc/include/asm/topology.h
381     @@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void);
382     extern int sysfs_add_device_to_node(struct device *dev, int nid);
383     extern void sysfs_remove_device_from_node(struct device *dev, int nid);
384    
385     +static inline int early_cpu_to_node(int cpu)
386     +{
387     + int nid;
388     +
389     + nid = numa_cpu_lookup_table[cpu];
390     +
391     + /*
392     + * Fall back to node 0 if nid is unset (it should be, except bugs).
393     + * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
394     + */
395     + return (nid < 0) ? 0 : nid;
396     +}
397     #else
398    
399     +static inline int early_cpu_to_node(int cpu) { return 0; }
400     +
401     static inline void dump_numa_cpu_topology(void) {}
402    
403     static inline int sysfs_add_device_to_node(struct device *dev, int nid)
404     diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
405     index c7164739dc75..b249c2fb99c8 100644
406     --- a/arch/powerpc/kernel/process.c
407     +++ b/arch/powerpc/kernel/process.c
408     @@ -1659,6 +1659,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
409     #ifdef CONFIG_VSX
410     current->thread.used_vsr = 0;
411     #endif
412     + current->thread.load_fp = 0;
413     memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
414     current->thread.fp_save_area = NULL;
415     #ifdef CONFIG_ALTIVEC
416     @@ -1667,6 +1668,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
417     current->thread.vr_save_area = NULL;
418     current->thread.vrsave = 0;
419     current->thread.used_vr = 0;
420     + current->thread.load_vec = 0;
421     #endif /* CONFIG_ALTIVEC */
422     #ifdef CONFIG_SPE
423     memset(current->thread.evr, 0, sizeof(current->thread.evr));
424     @@ -1678,6 +1680,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
425     current->thread.tm_tfhar = 0;
426     current->thread.tm_texasr = 0;
427     current->thread.tm_tfiar = 0;
428     + current->thread.load_tm = 0;
429     #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
430     }
431     EXPORT_SYMBOL(start_thread);
432     diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
433     index a12be60181bf..ada71bee176d 100644
434     --- a/arch/powerpc/kernel/setup_64.c
435     +++ b/arch/powerpc/kernel/setup_64.c
436     @@ -595,7 +595,7 @@ void __init emergency_stack_init(void)
437    
438     static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
439     {
440     - return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
441     + return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
442     __pa(MAX_DMA_ADDRESS));
443     }
444    
445     @@ -606,7 +606,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
446    
447     static int pcpu_cpu_distance(unsigned int from, unsigned int to)
448     {
449     - if (cpu_to_node(from) == cpu_to_node(to))
450     + if (early_cpu_to_node(from) == early_cpu_to_node(to))
451     return LOCAL_DISTANCE;
452     else
453     return REMOTE_DISTANCE;
454     diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
455     index 76ec104e88be..c0a0947f43bb 100644
456     --- a/arch/powerpc/platforms/pseries/hotplug-memory.c
457     +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
458     @@ -124,6 +124,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
459     for (i = 0; i < num_lmbs; i++) {
460     lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
461     lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
462     + lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
463     lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
464     }
465    
466     @@ -147,6 +148,7 @@ static void dlpar_update_drconf_property(struct device_node *dn,
467     for (i = 0; i < num_lmbs; i++) {
468     lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
469     lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
470     + lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
471     lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
472     }
473    
474     diff --git a/arch/powerpc/sysdev/simple_gpio.c b/arch/powerpc/sysdev/simple_gpio.c
475     index ef470b470b04..6afddae2fb47 100644
476     --- a/arch/powerpc/sysdev/simple_gpio.c
477     +++ b/arch/powerpc/sysdev/simple_gpio.c
478     @@ -75,7 +75,8 @@ static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
479    
480     static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
481     {
482     - struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc);
483     + struct u8_gpio_chip *u8_gc =
484     + container_of(mm_gc, struct u8_gpio_chip, mm_gc);
485    
486     u8_gc->data = in_8(mm_gc->regs);
487     }
488     diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
489     index 165ecdd24d22..b27e48e25841 100644
490     --- a/arch/sparc/Kconfig
491     +++ b/arch/sparc/Kconfig
492     @@ -187,9 +187,9 @@ config NR_CPUS
493     int "Maximum number of CPUs"
494     depends on SMP
495     range 2 32 if SPARC32
496     - range 2 1024 if SPARC64
497     + range 2 4096 if SPARC64
498     default 32 if SPARC32
499     - default 64 if SPARC64
500     + default 4096 if SPARC64
501    
502     source kernel/Kconfig.hz
503    
504     diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
505     index f7de0dbc38af..83b36a5371ff 100644
506     --- a/arch/sparc/include/asm/mmu_64.h
507     +++ b/arch/sparc/include/asm/mmu_64.h
508     @@ -52,7 +52,7 @@
509     #define CTX_NR_MASK TAG_CONTEXT_BITS
510     #define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK)
511    
512     -#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL))
513     +#define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT)
514     #define CTX_VALID(__ctx) \
515     (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
516     #define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
517     diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
518     index b84be675e507..349dd23e2876 100644
519     --- a/arch/sparc/include/asm/mmu_context_64.h
520     +++ b/arch/sparc/include/asm/mmu_context_64.h
521     @@ -17,13 +17,8 @@ extern spinlock_t ctx_alloc_lock;
522     extern unsigned long tlb_context_cache;
523     extern unsigned long mmu_context_bmap[];
524    
525     +DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
526     void get_new_mmu_context(struct mm_struct *mm);
527     -#ifdef CONFIG_SMP
528     -void smp_new_mmu_context_version(void);
529     -#else
530     -#define smp_new_mmu_context_version() do { } while (0)
531     -#endif
532     -
533     int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
534     void destroy_context(struct mm_struct *mm);
535    
536     @@ -74,8 +69,9 @@ void __flush_tlb_mm(unsigned long, unsigned long);
537     static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
538     {
539     unsigned long ctx_valid, flags;
540     - int cpu;
541     + int cpu = smp_processor_id();
542    
543     + per_cpu(per_cpu_secondary_mm, cpu) = mm;
544     if (unlikely(mm == &init_mm))
545     return;
546    
547     @@ -121,7 +117,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
548     * for the first time, we must flush that context out of the
549     * local TLB.
550     */
551     - cpu = smp_processor_id();
552     if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
553     cpumask_set_cpu(cpu, mm_cpumask(mm));
554     __flush_tlb_mm(CTX_HWBITS(mm->context),
555     @@ -131,26 +126,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
556     }
557    
558     #define deactivate_mm(tsk,mm) do { } while (0)
559     -
560     -/* Activate a new MM instance for the current task. */
561     -static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
562     -{
563     - unsigned long flags;
564     - int cpu;
565     -
566     - spin_lock_irqsave(&mm->context.lock, flags);
567     - if (!CTX_VALID(mm->context))
568     - get_new_mmu_context(mm);
569     - cpu = smp_processor_id();
570     - if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
571     - cpumask_set_cpu(cpu, mm_cpumask(mm));
572     -
573     - load_secondary_context(mm);
574     - __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
575     - tsb_context_switch(mm);
576     - spin_unlock_irqrestore(&mm->context.lock, flags);
577     -}
578     -
579     +#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
580     #endif /* !(__ASSEMBLY__) */
581    
582     #endif /* !(__SPARC64_MMU_CONTEXT_H) */
583     diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h
584     index 266937030546..522b43db2ed3 100644
585     --- a/arch/sparc/include/asm/pil.h
586     +++ b/arch/sparc/include/asm/pil.h
587     @@ -20,7 +20,6 @@
588     #define PIL_SMP_CALL_FUNC 1
589     #define PIL_SMP_RECEIVE_SIGNAL 2
590     #define PIL_SMP_CAPTURE 3
591     -#define PIL_SMP_CTX_NEW_VERSION 4
592     #define PIL_DEVICE_IRQ 5
593     #define PIL_SMP_CALL_FUNC_SNGL 6
594     #define PIL_DEFERRED_PCR_WORK 7
595     diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
596     index 8174f6cdbbbb..9dca7a892978 100644
597     --- a/arch/sparc/include/asm/vio.h
598     +++ b/arch/sparc/include/asm/vio.h
599     @@ -327,6 +327,7 @@ struct vio_dev {
600     int compat_len;
601    
602     u64 dev_no;
603     + u64 id;
604    
605     unsigned long channel_id;
606    
607     diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
608     index 34a7930b76ef..e1b1ce63a328 100644
609     --- a/arch/sparc/kernel/irq_64.c
610     +++ b/arch/sparc/kernel/irq_64.c
611     @@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
612     {
613     #ifdef CONFIG_SMP
614     unsigned long page;
615     + void *mondo, *p;
616    
617     - BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
618     + BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
619     +
620     + /* Make sure mondo block is 64byte aligned */
621     + p = kzalloc(127, GFP_KERNEL);
622     + if (!p) {
623     + prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
624     + prom_halt();
625     + }
626     + mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
627     + tb->cpu_mondo_block_pa = __pa(mondo);
628    
629     page = get_zeroed_page(GFP_KERNEL);
630     if (!page) {
631     - prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
632     + prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
633     prom_halt();
634     }
635    
636     - tb->cpu_mondo_block_pa = __pa(page);
637     - tb->cpu_list_pa = __pa(page + 64);
638     + tb->cpu_list_pa = __pa(page);
639     #endif
640     }
641    
642     diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
643     index c9804551262c..6ae1e77be0bf 100644
644     --- a/arch/sparc/kernel/kernel.h
645     +++ b/arch/sparc/kernel/kernel.h
646     @@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
647     /* smp_64.c */
648     void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
649     void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
650     -void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
651     void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
652     void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
653    
654     diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
655     index 8182f7caf5b1..d5807d24b98f 100644
656     --- a/arch/sparc/kernel/smp_64.c
657     +++ b/arch/sparc/kernel/smp_64.c
658     @@ -963,37 +963,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
659     preempt_enable();
660     }
661    
662     -void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
663     -{
664     - struct mm_struct *mm;
665     - unsigned long flags;
666     -
667     - clear_softint(1 << irq);
668     -
669     - /* See if we need to allocate a new TLB context because
670     - * the version of the one we are using is now out of date.
671     - */
672     - mm = current->active_mm;
673     - if (unlikely(!mm || (mm == &init_mm)))
674     - return;
675     -
676     - spin_lock_irqsave(&mm->context.lock, flags);
677     -
678     - if (unlikely(!CTX_VALID(mm->context)))
679     - get_new_mmu_context(mm);
680     -
681     - spin_unlock_irqrestore(&mm->context.lock, flags);
682     -
683     - load_secondary_context(mm);
684     - __flush_tlb_mm(CTX_HWBITS(mm->context),
685     - SECONDARY_CONTEXT);
686     -}
687     -
688     -void smp_new_mmu_context_version(void)
689     -{
690     - smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
691     -}
692     -
693     #ifdef CONFIG_KGDB
694     void kgdb_roundup_cpus(unsigned long flags)
695     {
696     diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
697     index d568c8207af7..395ec1800530 100644
698     --- a/arch/sparc/kernel/tsb.S
699     +++ b/arch/sparc/kernel/tsb.S
700     @@ -470,13 +470,16 @@ __tsb_context_switch:
701     .type copy_tsb,#function
702     copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
703     * %o2=new_tsb_base, %o3=new_tsb_size
704     + * %o4=page_size_shift
705     */
706     sethi %uhi(TSB_PASS_BITS), %g7
707     srlx %o3, 4, %o3
708     - add %o0, %o1, %g1 /* end of old tsb */
709     + add %o0, %o1, %o1 /* end of old tsb */
710     sllx %g7, 32, %g7
711     sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
712    
713     + mov %o4, %g1 /* page_size_shift */
714     +
715     661: prefetcha [%o0] ASI_N, #one_read
716     .section .tsb_phys_patch, "ax"
717     .word 661b
718     @@ -501,9 +504,9 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
719     /* This can definitely be computed faster... */
720     srlx %o0, 4, %o5 /* Build index */
721     and %o5, 511, %o5 /* Mask index */
722     - sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
723     + sllx %o5, %g1, %o5 /* Put into vaddr position */
724     or %o4, %o5, %o4 /* Full VADDR. */
725     - srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */
726     + srlx %o4, %g1, %o4 /* Shift down to create index */
727     and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
728     sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
729     TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
730     @@ -511,7 +514,7 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
731     TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
732    
733     80: add %o0, 16, %o0
734     - cmp %o0, %g1
735     + cmp %o0, %o1
736     bne,pt %xcc, 90b
737     nop
738    
739     diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S
740     index c6dfdaa29e20..170ead662f2a 100644
741     --- a/arch/sparc/kernel/ttable_64.S
742     +++ b/arch/sparc/kernel/ttable_64.S
743     @@ -50,7 +50,7 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
744     tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
745     tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
746     tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
747     -tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4)
748     +tl0_irq4: BTRAP(0x44)
749     #else
750     tl0_irq1: BTRAP(0x41)
751     tl0_irq2: BTRAP(0x42)
752     diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
753     index f6bb857254fc..075d38980dee 100644
754     --- a/arch/sparc/kernel/vio.c
755     +++ b/arch/sparc/kernel/vio.c
756     @@ -302,13 +302,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
757     if (!id) {
758     dev_set_name(&vdev->dev, "%s", bus_id_name);
759     vdev->dev_no = ~(u64)0;
760     + vdev->id = ~(u64)0;
761     } else if (!cfg_handle) {
762     dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
763     vdev->dev_no = *id;
764     + vdev->id = ~(u64)0;
765     } else {
766     dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
767     *cfg_handle, *id);
768     vdev->dev_no = *cfg_handle;
769     + vdev->id = *id;
770     }
771    
772     vdev->dev.parent = parent;
773     @@ -351,27 +354,84 @@ static void vio_add(struct mdesc_handle *hp, u64 node)
774     (void) vio_create_one(hp, node, &root_vdev->dev);
775     }
776    
777     +struct vio_md_node_query {
778     + const char *type;
779     + u64 dev_no;
780     + u64 id;
781     +};
782     +
783     static int vio_md_node_match(struct device *dev, void *arg)
784     {
785     + struct vio_md_node_query *query = (struct vio_md_node_query *) arg;
786     struct vio_dev *vdev = to_vio_dev(dev);
787    
788     - if (vdev->mp == (u64) arg)
789     - return 1;
790     + if (vdev->dev_no != query->dev_no)
791     + return 0;
792     + if (vdev->id != query->id)
793     + return 0;
794     + if (strcmp(vdev->type, query->type))
795     + return 0;
796    
797     - return 0;
798     + return 1;
799     }
800    
801     static void vio_remove(struct mdesc_handle *hp, u64 node)
802     {
803     + const char *type;
804     + const u64 *id, *cfg_handle;
805     + u64 a;
806     + struct vio_md_node_query query;
807     struct device *dev;
808    
809     - dev = device_find_child(&root_vdev->dev, (void *) node,
810     + type = mdesc_get_property(hp, node, "device-type", NULL);
811     + if (!type) {
812     + type = mdesc_get_property(hp, node, "name", NULL);
813     + if (!type)
814     + type = mdesc_node_name(hp, node);
815     + }
816     +
817     + query.type = type;
818     +
819     + id = mdesc_get_property(hp, node, "id", NULL);
820     + cfg_handle = NULL;
821     + mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
822     + u64 target;
823     +
824     + target = mdesc_arc_target(hp, a);
825     + cfg_handle = mdesc_get_property(hp, target,
826     + "cfg-handle", NULL);
827     + if (cfg_handle)
828     + break;
829     + }
830     +
831     + if (!id) {
832     + query.dev_no = ~(u64)0;
833     + query.id = ~(u64)0;
834     + } else if (!cfg_handle) {
835     + query.dev_no = *id;
836     + query.id = ~(u64)0;
837     + } else {
838     + query.dev_no = *cfg_handle;
839     + query.id = *id;
840     + }
841     +
842     + dev = device_find_child(&root_vdev->dev, &query,
843     vio_md_node_match);
844     if (dev) {
845     printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
846    
847     device_unregister(dev);
848     put_device(dev);
849     + } else {
850     + if (!id)
851     + printk(KERN_ERR "VIO: Removed unknown %s node.\n",
852     + type);
853     + else if (!cfg_handle)
854     + printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n",
855     + type, *id);
856     + else
857     + printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n",
858     + type, *cfg_handle, *id);
859     }
860     }
861    
862     diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
863     index 69912d2f8b54..07c03e72d812 100644
864     --- a/arch/sparc/lib/Makefile
865     +++ b/arch/sparc/lib/Makefile
866     @@ -15,6 +15,7 @@ lib-$(CONFIG_SPARC32) += copy_user.o locks.o
867     lib-$(CONFIG_SPARC64) += atomic_64.o
868     lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
869     lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
870     +lib-$(CONFIG_SPARC64) += multi3.o
871    
872     lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
873     lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
874     diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S
875     new file mode 100644
876     index 000000000000..d6b6c97fe3c7
877     --- /dev/null
878     +++ b/arch/sparc/lib/multi3.S
879     @@ -0,0 +1,35 @@
880     +#include <linux/linkage.h>
881     +#include <asm/export.h>
882     +
883     + .text
884     + .align 4
885     +ENTRY(__multi3) /* %o0 = u, %o1 = v */
886     + mov %o1, %g1
887     + srl %o3, 0, %g4
888     + mulx %g4, %g1, %o1
889     + srlx %g1, 0x20, %g3
890     + mulx %g3, %g4, %g5
891     + sllx %g5, 0x20, %o5
892     + srl %g1, 0, %g4
893     + sub %o1, %o5, %o5
894     + srlx %o5, 0x20, %o5
895     + addcc %g5, %o5, %g5
896     + srlx %o3, 0x20, %o5
897     + mulx %g4, %o5, %g4
898     + mulx %g3, %o5, %o5
899     + sethi %hi(0x80000000), %g3
900     + addcc %g5, %g4, %g5
901     + srlx %g5, 0x20, %g5
902     + add %g3, %g3, %g3
903     + movcc %xcc, %g0, %g3
904     + addcc %o5, %g5, %o5
905     + sllx %g4, 0x20, %g4
906     + add %o1, %g4, %o1
907     + add %o5, %g3, %g2
908     + mulx %g1, %o2, %g1
909     + add %g1, %g2, %g1
910     + mulx %o0, %o3, %o0
911     + retl
912     + add %g1, %o0, %o0
913     +ENDPROC(__multi3)
914     +EXPORT_SYMBOL(__multi3)
915     diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
916     index bd7e2aa86c45..57154c638e71 100644
917     --- a/arch/sparc/mm/init_64.c
918     +++ b/arch/sparc/mm/init_64.c
919     @@ -658,10 +658,58 @@ EXPORT_SYMBOL(__flush_dcache_range);
920    
921     /* get_new_mmu_context() uses "cache + 1". */
922     DEFINE_SPINLOCK(ctx_alloc_lock);
923     -unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
924     +unsigned long tlb_context_cache = CTX_FIRST_VERSION;
925     #define MAX_CTX_NR (1UL << CTX_NR_BITS)
926     #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
927     DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
928     +DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
929     +
930     +static void mmu_context_wrap(void)
931     +{
932     + unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
933     + unsigned long new_ver, new_ctx, old_ctx;
934     + struct mm_struct *mm;
935     + int cpu;
936     +
937     + bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
938     +
939     + /* Reserve kernel context */
940     + set_bit(0, mmu_context_bmap);
941     +
942     + new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
943     + if (unlikely(new_ver == 0))
944     + new_ver = CTX_FIRST_VERSION;
945     + tlb_context_cache = new_ver;
946     +
947     + /*
948     + * Make sure that any new mm that are added into per_cpu_secondary_mm,
949     + * are going to go through get_new_mmu_context() path.
950     + */
951     + mb();
952     +
953     + /*
954     + * Updated versions to current on those CPUs that had valid secondary
955     + * contexts
956     + */
957     + for_each_online_cpu(cpu) {
958     + /*
959     + * If a new mm is stored after we took this mm from the array,
960     + * it will go into get_new_mmu_context() path, because we
961     + * already bumped the version in tlb_context_cache.
962     + */
963     + mm = per_cpu(per_cpu_secondary_mm, cpu);
964     +
965     + if (unlikely(!mm || mm == &init_mm))
966     + continue;
967     +
968     + old_ctx = mm->context.sparc64_ctx_val;
969     + if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
970     + new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
971     + set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
972     + mm->context.sparc64_ctx_val = new_ctx;
973     + }
974     + }
975     +}
976    
977     /* Caller does TLB context flushing on local CPU if necessary.
978     * The caller also ensures that CTX_VALID(mm->context) is false.
979     @@ -677,48 +725,30 @@ void get_new_mmu_context(struct mm_struct *mm)
980     {
981     unsigned long ctx, new_ctx;
982     unsigned long orig_pgsz_bits;
983     - int new_version;
984    
985     spin_lock(&ctx_alloc_lock);
986     +retry:
987     + /* wrap might have happened, test again if our context became valid */
988     + if (unlikely(CTX_VALID(mm->context)))
989     + goto out;
990     orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
991     ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
992     new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
993     - new_version = 0;
994     if (new_ctx >= (1 << CTX_NR_BITS)) {
995     new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
996     if (new_ctx >= ctx) {
997     - int i;
998     - new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
999     - CTX_FIRST_VERSION;
1000     - if (new_ctx == 1)
1001     - new_ctx = CTX_FIRST_VERSION;
1002     -
1003     - /* Don't call memset, for 16 entries that's just
1004     - * plain silly...
1005     - */
1006     - mmu_context_bmap[0] = 3;
1007     - mmu_context_bmap[1] = 0;
1008     - mmu_context_bmap[2] = 0;
1009     - mmu_context_bmap[3] = 0;
1010     - for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
1011     - mmu_context_bmap[i + 0] = 0;
1012     - mmu_context_bmap[i + 1] = 0;
1013     - mmu_context_bmap[i + 2] = 0;
1014     - mmu_context_bmap[i + 3] = 0;
1015     - }
1016     - new_version = 1;
1017     - goto out;
1018     + mmu_context_wrap();
1019     + goto retry;
1020     }
1021     }
1022     + if (mm->context.sparc64_ctx_val)
1023     + cpumask_clear(mm_cpumask(mm));
1024     mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
1025     new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
1026     -out:
1027     tlb_context_cache = new_ctx;
1028     mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
1029     +out:
1030     spin_unlock(&ctx_alloc_lock);
1031     -
1032     - if (unlikely(new_version))
1033     - smp_new_mmu_context_version();
1034     }
1035    
1036     static int numa_enabled = 1;
1037     diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
1038     index e20fbbafb0b0..84cd593117a6 100644
1039     --- a/arch/sparc/mm/tsb.c
1040     +++ b/arch/sparc/mm/tsb.c
1041     @@ -451,7 +451,8 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
1042     extern void copy_tsb(unsigned long old_tsb_base,
1043     unsigned long old_tsb_size,
1044     unsigned long new_tsb_base,
1045     - unsigned long new_tsb_size);
1046     + unsigned long new_tsb_size,
1047     + unsigned long page_size_shift);
1048     unsigned long old_tsb_base = (unsigned long) old_tsb;
1049     unsigned long new_tsb_base = (unsigned long) new_tsb;
1050    
1051     @@ -459,7 +460,9 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
1052     old_tsb_base = __pa(old_tsb_base);
1053     new_tsb_base = __pa(new_tsb_base);
1054     }
1055     - copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
1056     + copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
1057     + tsb_index == MM_TSB_BASE ?
1058     + PAGE_SHIFT : REAL_HPAGE_SHIFT);
1059     }
1060    
1061     mm->context.tsb_block[tsb_index].tsb = new_tsb;
1062     diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
1063     index 5d2fd6cd3189..fcf4d27a38fb 100644
1064     --- a/arch/sparc/mm/ultra.S
1065     +++ b/arch/sparc/mm/ultra.S
1066     @@ -971,11 +971,6 @@ xcall_capture:
1067     wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
1068     retry
1069    
1070     - .globl xcall_new_mmu_context_version
1071     -xcall_new_mmu_context_version:
1072     - wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
1073     - retry
1074     -
1075     #ifdef CONFIG_KGDB
1076     .globl xcall_kgdb_capture
1077     xcall_kgdb_capture:
1078     diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
1079     index edbbfc854e39..9cf697ceedbf 100644
1080     --- a/arch/x86/kernel/kvm.c
1081     +++ b/arch/x86/kernel/kvm.c
1082     @@ -162,8 +162,8 @@ void kvm_async_pf_task_wait(u32 token)
1083     */
1084     rcu_irq_exit();
1085     native_safe_halt();
1086     - rcu_irq_enter();
1087     local_irq_disable();
1088     + rcu_irq_enter();
1089     }
1090     }
1091     if (!n.halted)
1092     diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
1093     index 967e459ff1e6..649d8f2c1e40 100644
1094     --- a/arch/x86/kvm/cpuid.c
1095     +++ b/arch/x86/kvm/cpuid.c
1096     @@ -765,18 +765,20 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
1097     static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
1098     {
1099     struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
1100     - int j, nent = vcpu->arch.cpuid_nent;
1101     + struct kvm_cpuid_entry2 *ej;
1102     + int j = i;
1103     + int nent = vcpu->arch.cpuid_nent;
1104    
1105     e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
1106     /* when no next entry is found, the current entry[i] is reselected */
1107     - for (j = i + 1; ; j = (j + 1) % nent) {
1108     - struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
1109     - if (ej->function == e->function) {
1110     - ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
1111     - return j;
1112     - }
1113     - }
1114     - return 0; /* silence gcc, even though control never reaches here */
1115     + do {
1116     + j = (j + 1) % nent;
1117     + ej = &vcpu->arch.cpuid_entries[j];
1118     + } while (ej->function != e->function);
1119     +
1120     + ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
1121     +
1122     + return j;
1123     }
1124    
1125     /* find an entry with matching function, matching index (if needed), and that
1126     diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
1127     index d9c7e986b4e4..5f2412704b81 100644
1128     --- a/arch/x86/kvm/mmu.c
1129     +++ b/arch/x86/kvm/mmu.c
1130     @@ -3489,12 +3489,15 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
1131     return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
1132     }
1133    
1134     -static bool can_do_async_pf(struct kvm_vcpu *vcpu)
1135     +bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
1136     {
1137     if (unlikely(!lapic_in_kernel(vcpu) ||
1138     kvm_event_needs_reinjection(vcpu)))
1139     return false;
1140    
1141     + if (is_guest_mode(vcpu))
1142     + return false;
1143     +
1144     return kvm_x86_ops->interrupt_allowed(vcpu);
1145     }
1146    
1147     @@ -3510,7 +3513,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
1148     if (!async)
1149     return false; /* *pfn has correct page already */
1150    
1151     - if (!prefault && can_do_async_pf(vcpu)) {
1152     + if (!prefault && kvm_can_do_async_pf(vcpu)) {
1153     trace_kvm_try_async_get_page(gva, gfn);
1154     if (kvm_find_async_pf_gfn(vcpu, gfn)) {
1155     trace_kvm_async_pf_doublefault(gva, gfn);
1156     diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
1157     index ddc56e91f2e4..c92834c55c59 100644
1158     --- a/arch/x86/kvm/mmu.h
1159     +++ b/arch/x86/kvm/mmu.h
1160     @@ -75,6 +75,7 @@ enum {
1161     int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
1162     void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
1163     void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
1164     +bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
1165    
1166     static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
1167     {
1168     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1169     index 81bba3c2137d..62cde4f67c72 100644
1170     --- a/arch/x86/kvm/x86.c
1171     +++ b/arch/x86/kvm/x86.c
1172     @@ -8444,8 +8444,7 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1173     if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
1174     return true;
1175     else
1176     - return !kvm_event_needs_reinjection(vcpu) &&
1177     - kvm_x86_ops->interrupt_allowed(vcpu);
1178     + return kvm_can_do_async_pf(vcpu);
1179     }
1180    
1181     void kvm_arch_start_assignment(struct kvm *kvm)
1182     diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
1183     index cdfe8c628959..393a0c0288d1 100644
1184     --- a/arch/x86/platform/efi/quirks.c
1185     +++ b/arch/x86/platform/efi/quirks.c
1186     @@ -358,6 +358,9 @@ void __init efi_free_boot_services(void)
1187     free_bootmem_late(start, size);
1188     }
1189    
1190     + if (!num_entries)
1191     + return;
1192     +
1193     new_size = efi.memmap.desc_size * num_entries;
1194     new_phys = efi_memmap_alloc(num_entries);
1195     if (!new_phys) {
1196     diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
1197     index 3ab6807773ee..c7c3d4e6bc27 100644
1198     --- a/block/cfq-iosched.c
1199     +++ b/block/cfq-iosched.c
1200     @@ -36,9 +36,13 @@ static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
1201     static const int cfq_hist_divisor = 4;
1202    
1203     /*
1204     - * offset from end of service tree
1205     + * offset from end of queue service tree for idle class
1206     */
1207     #define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5)
1208     +/* offset from end of group service tree under time slice mode */
1209     +#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
1210     +/* offset from end of group service under IOPS mode */
1211     +#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
1212    
1213     /*
1214     * below this threshold, we consider thinktime immediate
1215     @@ -1370,6 +1374,14 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1216     cfqg->vfraction = max_t(unsigned, vfr, 1);
1217     }
1218    
1219     +static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
1220     +{
1221     + if (!iops_mode(cfqd))
1222     + return CFQ_SLICE_MODE_GROUP_DELAY;
1223     + else
1224     + return CFQ_IOPS_MODE_GROUP_DELAY;
1225     +}
1226     +
1227     static void
1228     cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1229     {
1230     @@ -1389,7 +1401,8 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1231     n = rb_last(&st->rb);
1232     if (n) {
1233     __cfqg = rb_entry_cfqg(n);
1234     - cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
1235     + cfqg->vdisktime = __cfqg->vdisktime +
1236     + cfq_get_cfqg_vdisktime_delay(cfqd);
1237     } else
1238     cfqg->vdisktime = st->min_vdisktime;
1239     cfq_group_service_tree_add(st, cfqg);
1240     diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
1241     index fd76b5fc3b3a..4955eb66e361 100644
1242     --- a/crypto/asymmetric_keys/public_key.c
1243     +++ b/crypto/asymmetric_keys/public_key.c
1244     @@ -140,7 +140,7 @@ int public_key_verify_signature(const struct public_key *pkey,
1245     * signature and returns that to us.
1246     */
1247     ret = crypto_akcipher_verify(req);
1248     - if (ret == -EINPROGRESS) {
1249     + if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
1250     wait_for_completion(&compl.completion);
1251     ret = compl.err;
1252     }
1253     diff --git a/crypto/drbg.c b/crypto/drbg.c
1254     index 053035b5c8f8..123d211efa12 100644
1255     --- a/crypto/drbg.c
1256     +++ b/crypto/drbg.c
1257     @@ -1768,9 +1768,8 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
1258     break;
1259     case -EINPROGRESS:
1260     case -EBUSY:
1261     - ret = wait_for_completion_interruptible(
1262     - &drbg->ctr_completion);
1263     - if (!ret && !drbg->ctr_async_err) {
1264     + wait_for_completion(&drbg->ctr_completion);
1265     + if (!drbg->ctr_async_err) {
1266     reinit_completion(&drbg->ctr_completion);
1267     break;
1268     }
1269     diff --git a/crypto/gcm.c b/crypto/gcm.c
1270     index f624ac98c94e..dd33fbd2d868 100644
1271     --- a/crypto/gcm.c
1272     +++ b/crypto/gcm.c
1273     @@ -152,10 +152,8 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
1274    
1275     err = crypto_skcipher_encrypt(&data->req);
1276     if (err == -EINPROGRESS || err == -EBUSY) {
1277     - err = wait_for_completion_interruptible(
1278     - &data->result.completion);
1279     - if (!err)
1280     - err = data->result.err;
1281     + wait_for_completion(&data->result.completion);
1282     + err = data->result.err;
1283     }
1284    
1285     if (err)
1286     diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
1287     index 74f4c662f776..c94038206c3a 100644
1288     --- a/drivers/ata/ahci.c
1289     +++ b/drivers/ata/ahci.c
1290     @@ -1362,6 +1362,40 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
1291     {}
1292     #endif
1293    
1294     +/*
1295     + * On the Acer Aspire Switch Alpha 12, sometimes all SATA ports are detected
1296     + * as DUMMY, or detected but eventually get a "link down" and never get up
1297     + * again. When this happens, CAP.NP may hold a value of 0x00 or 0x01, and the
1298     + * port_map may hold a value of 0x00.
1299     + *
1300     + * Overriding CAP.NP to 0x02 and the port_map to 0x7 will reveal all 3 ports
1301     + * and can significantly reduce the occurrence of the problem.
1302     + *
1303     + * https://bugzilla.kernel.org/show_bug.cgi?id=189471
1304     + */
1305     +static void acer_sa5_271_workaround(struct ahci_host_priv *hpriv,
1306     + struct pci_dev *pdev)
1307     +{
1308     + static const struct dmi_system_id sysids[] = {
1309     + {
1310     + .ident = "Acer Switch Alpha 12",
1311     + .matches = {
1312     + DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1313     + DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271")
1314     + },
1315     + },
1316     + { }
1317     + };
1318     +
1319     + if (dmi_check_system(sysids)) {
1320     + dev_info(&pdev->dev, "enabling Acer Switch Alpha 12 workaround\n");
1321     + if ((hpriv->saved_cap & 0xC734FF00) == 0xC734FF00) {
1322     + hpriv->port_map = 0x7;
1323     + hpriv->cap = 0xC734FF02;
1324     + }
1325     + }
1326     +}
1327     +
1328     #ifdef CONFIG_ARM64
1329     /*
1330     * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
1331     @@ -1597,6 +1631,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1332     "online status unreliable, applying workaround\n");
1333     }
1334    
1335     +
1336     + /* Acer SA5-271 workaround modifies private_data */
1337     + acer_sa5_271_workaround(hpriv, pdev);
1338     +
1339     /* CAP.NP sometimes indicate the index of the last enabled
1340     * port, at other times, that of the last possible port, so
1341     * determining the maximum port number requires looking at
1342     diff --git a/drivers/char/mem.c b/drivers/char/mem.c
1343     index 6e0cbe092220..593a8818aca9 100644
1344     --- a/drivers/char/mem.c
1345     +++ b/drivers/char/mem.c
1346     @@ -343,7 +343,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
1347     phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
1348    
1349     /* It's illegal to wrap around the end of the physical address space. */
1350     - if (offset + (phys_addr_t)size < offset)
1351     + if (offset + (phys_addr_t)size - 1 < offset)
1352     return -EINVAL;
1353    
1354     if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
1355     diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
1356     index 6153b66139d5..286d4d61bd0b 100644
1357     --- a/drivers/cpufreq/cpufreq.c
1358     +++ b/drivers/cpufreq/cpufreq.c
1359     @@ -2474,6 +2474,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1360     if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
1361     list_empty(&cpufreq_policy_list)) {
1362     /* if all ->init() calls failed, unregister */
1363     + ret = -ENODEV;
1364     pr_debug("%s: No CPU initialized for driver %s\n", __func__,
1365     driver_data->name);
1366     goto err_if_unreg;
1367     diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
1368     index d37e8dda8079..ec240592f5c8 100644
1369     --- a/drivers/dma/ep93xx_dma.c
1370     +++ b/drivers/dma/ep93xx_dma.c
1371     @@ -201,6 +201,7 @@ struct ep93xx_dma_engine {
1372     struct dma_device dma_dev;
1373     bool m2m;
1374     int (*hw_setup)(struct ep93xx_dma_chan *);
1375     + void (*hw_synchronize)(struct ep93xx_dma_chan *);
1376     void (*hw_shutdown)(struct ep93xx_dma_chan *);
1377     void (*hw_submit)(struct ep93xx_dma_chan *);
1378     int (*hw_interrupt)(struct ep93xx_dma_chan *);
1379     @@ -323,6 +324,8 @@ static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
1380     | M2P_CONTROL_ENABLE;
1381     m2p_set_control(edmac, control);
1382    
1383     + edmac->buffer = 0;
1384     +
1385     return 0;
1386     }
1387    
1388     @@ -331,21 +334,27 @@ static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
1389     return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
1390     }
1391    
1392     -static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
1393     +static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
1394     {
1395     + unsigned long flags;
1396     u32 control;
1397    
1398     + spin_lock_irqsave(&edmac->lock, flags);
1399     control = readl(edmac->regs + M2P_CONTROL);
1400     control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
1401     m2p_set_control(edmac, control);
1402     + spin_unlock_irqrestore(&edmac->lock, flags);
1403    
1404     while (m2p_channel_state(edmac) >= M2P_STATE_ON)
1405     - cpu_relax();
1406     + schedule();
1407     +}
1408    
1409     +static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
1410     +{
1411     m2p_set_control(edmac, 0);
1412    
1413     - while (m2p_channel_state(edmac) == M2P_STATE_STALL)
1414     - cpu_relax();
1415     + while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
1416     + dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
1417     }
1418    
1419     static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
1420     @@ -1161,6 +1170,26 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1421     }
1422    
1423     /**
1424     + * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
1425     + * current context.
1426     + * @chan: channel
1427     + *
1428     + * Synchronizes the DMA channel termination to the current context. When this
1429     + * function returns it is guaranteed that all transfers for previously issued
1430     + * descriptors have stopped and and it is safe to free the memory associated
1431     + * with them. Furthermore it is guaranteed that all complete callback functions
1432     + * for a previously submitted descriptor have finished running and it is safe to
1433     + * free resources accessed from within the complete callbacks.
1434     + */
1435     +static void ep93xx_dma_synchronize(struct dma_chan *chan)
1436     +{
1437     + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1438     +
1439     + if (edmac->edma->hw_synchronize)
1440     + edmac->edma->hw_synchronize(edmac);
1441     +}
1442     +
1443     +/**
1444     * ep93xx_dma_terminate_all - terminate all transactions
1445     * @chan: channel
1446     *
1447     @@ -1323,6 +1352,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
1448     dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1449     dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1450     dma_dev->device_config = ep93xx_dma_slave_config;
1451     + dma_dev->device_synchronize = ep93xx_dma_synchronize;
1452     dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1453     dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1454     dma_dev->device_tx_status = ep93xx_dma_tx_status;
1455     @@ -1340,6 +1370,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
1456     } else {
1457     dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1458    
1459     + edma->hw_synchronize = m2p_hw_synchronize;
1460     edma->hw_setup = m2p_hw_setup;
1461     edma->hw_shutdown = m2p_hw_shutdown;
1462     edma->hw_submit = m2p_hw_submit;
1463     diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
1464     index a28a01fcba67..f3e211f8f6c5 100644
1465     --- a/drivers/dma/mv_xor_v2.c
1466     +++ b/drivers/dma/mv_xor_v2.c
1467     @@ -161,6 +161,7 @@ struct mv_xor_v2_device {
1468     struct mv_xor_v2_sw_desc *sw_desq;
1469     int desc_size;
1470     unsigned int npendings;
1471     + unsigned int hw_queue_idx;
1472     };
1473    
1474     /**
1475     @@ -214,18 +215,6 @@ static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
1476     }
1477    
1478     /*
1479     - * Return the next available index in the DESQ.
1480     - */
1481     -static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev)
1482     -{
1483     - /* read the index for the next available descriptor in the DESQ */
1484     - u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF);
1485     -
1486     - return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT)
1487     - & MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK);
1488     -}
1489     -
1490     -/*
1491     * notify the engine of new descriptors, and update the available index.
1492     */
1493     static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev,
1494     @@ -257,22 +246,6 @@ static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
1495     return MV_XOR_V2_EXT_DESC_SIZE;
1496     }
1497    
1498     -/*
1499     - * Set the IMSG threshold
1500     - */
1501     -static inline
1502     -void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val)
1503     -{
1504     - u32 reg;
1505     -
1506     - reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
1507     -
1508     - reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
1509     - reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
1510     -
1511     - writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
1512     -}
1513     -
1514     static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
1515     {
1516     struct mv_xor_v2_device *xor_dev = data;
1517     @@ -288,12 +261,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
1518     if (!ndescs)
1519     return IRQ_NONE;
1520    
1521     - /*
1522     - * Update IMSG threshold, to disable new IMSG interrupts until
1523     - * end of the tasklet
1524     - */
1525     - mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM);
1526     -
1527     /* schedule a tasklet to handle descriptors callbacks */
1528     tasklet_schedule(&xor_dev->irq_tasklet);
1529    
1530     @@ -306,7 +273,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
1531     static dma_cookie_t
1532     mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
1533     {
1534     - int desq_ptr;
1535     void *dest_hw_desc;
1536     dma_cookie_t cookie;
1537     struct mv_xor_v2_sw_desc *sw_desc =
1538     @@ -322,15 +288,15 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
1539     spin_lock_bh(&xor_dev->lock);
1540     cookie = dma_cookie_assign(tx);
1541    
1542     - /* get the next available slot in the DESQ */
1543     - desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev);
1544     -
1545     /* copy the HW descriptor from the SW descriptor to the DESQ */
1546     - dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr;
1547     + dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
1548    
1549     memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
1550    
1551     xor_dev->npendings++;
1552     + xor_dev->hw_queue_idx++;
1553     + if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
1554     + xor_dev->hw_queue_idx = 0;
1555    
1556     spin_unlock_bh(&xor_dev->lock);
1557    
1558     @@ -344,6 +310,7 @@ static struct mv_xor_v2_sw_desc *
1559     mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
1560     {
1561     struct mv_xor_v2_sw_desc *sw_desc;
1562     + bool found = false;
1563    
1564     /* Lock the channel */
1565     spin_lock_bh(&xor_dev->lock);
1566     @@ -355,19 +322,23 @@ mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
1567     return NULL;
1568     }
1569    
1570     - /* get a free SW descriptor from the SW DESQ */
1571     - sw_desc = list_first_entry(&xor_dev->free_sw_desc,
1572     - struct mv_xor_v2_sw_desc, free_list);
1573     + list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) {
1574     + if (async_tx_test_ack(&sw_desc->async_tx)) {
1575     + found = true;
1576     + break;
1577     + }
1578     + }
1579     +
1580     + if (!found) {
1581     + spin_unlock_bh(&xor_dev->lock);
1582     + return NULL;
1583     + }
1584     +
1585     list_del(&sw_desc->free_list);
1586    
1587     /* Release the channel */
1588     spin_unlock_bh(&xor_dev->lock);
1589    
1590     - /* set the async tx descriptor */
1591     - dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan);
1592     - sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
1593     - async_tx_ack(&sw_desc->async_tx);
1594     -
1595     return sw_desc;
1596     }
1597    
1598     @@ -389,6 +360,8 @@ mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
1599     __func__, len, &src, &dest, flags);
1600    
1601     sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
1602     + if (!sw_desc)
1603     + return NULL;
1604    
1605     sw_desc->async_tx.flags = flags;
1606    
1607     @@ -443,6 +416,8 @@ mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
1608     __func__, src_cnt, len, &dest, flags);
1609    
1610     sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
1611     + if (!sw_desc)
1612     + return NULL;
1613    
1614     sw_desc->async_tx.flags = flags;
1615    
1616     @@ -491,6 +466,8 @@ mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
1617     container_of(chan, struct mv_xor_v2_device, dmachan);
1618    
1619     sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
1620     + if (!sw_desc)
1621     + return NULL;
1622    
1623     /* set the HW descriptor */
1624     hw_descriptor = &sw_desc->hw_desc;
1625     @@ -554,7 +531,6 @@ static void mv_xor_v2_tasklet(unsigned long data)
1626     {
1627     struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
1628     int pending_ptr, num_of_pending, i;
1629     - struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL;
1630     struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
1631    
1632     dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
1633     @@ -562,17 +538,10 @@ static void mv_xor_v2_tasklet(unsigned long data)
1634     /* get the pending descriptors parameters */
1635     num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
1636    
1637     - /* next HW descriptor */
1638     - next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr;
1639     -
1640     /* loop over free descriptors */
1641     for (i = 0; i < num_of_pending; i++) {
1642     -
1643     - if (pending_ptr > MV_XOR_V2_DESC_NUM)
1644     - pending_ptr = 0;
1645     -
1646     - if (next_pending_sw_desc != NULL)
1647     - next_pending_hw_desc++;
1648     + struct mv_xor_v2_descriptor *next_pending_hw_desc =
1649     + xor_dev->hw_desq_virt + pending_ptr;
1650    
1651     /* get the SW descriptor related to the HW descriptor */
1652     next_pending_sw_desc =
1653     @@ -608,15 +577,14 @@ static void mv_xor_v2_tasklet(unsigned long data)
1654    
1655     /* increment the next descriptor */
1656     pending_ptr++;
1657     + if (pending_ptr >= MV_XOR_V2_DESC_NUM)
1658     + pending_ptr = 0;
1659     }
1660    
1661     if (num_of_pending != 0) {
1662     /* free the descriptores */
1663     mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
1664     }
1665     -
1666     - /* Update IMSG threshold, to enable new IMSG interrupts */
1667     - mv_xor_v2_set_imsg_thrd(xor_dev, 0);
1668     }
1669    
1670     /*
1671     @@ -648,9 +616,6 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
1672     writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32,
1673     xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
1674    
1675     - /* enable the DMA engine */
1676     - writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
1677     -
1678     /*
1679     * This is a temporary solution, until we activate the
1680     * SMMU. Set the attributes for reading & writing data buffers
1681     @@ -694,6 +659,9 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
1682     reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
1683     writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
1684    
1685     + /* enable the DMA engine */
1686     + writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
1687     +
1688     return 0;
1689     }
1690    
1691     @@ -725,6 +693,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
1692    
1693     platform_set_drvdata(pdev, xor_dev);
1694    
1695     + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1696     + if (ret)
1697     + return ret;
1698     +
1699     xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
1700     if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
1701     return -EPROBE_DEFER;
1702     @@ -785,8 +757,15 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
1703    
1704     /* add all SW descriptors to the free list */
1705     for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
1706     - xor_dev->sw_desq[i].idx = i;
1707     - list_add(&xor_dev->sw_desq[i].free_list,
1708     + struct mv_xor_v2_sw_desc *sw_desc =
1709     + xor_dev->sw_desq + i;
1710     + sw_desc->idx = i;
1711     + dma_async_tx_descriptor_init(&sw_desc->async_tx,
1712     + &xor_dev->dmachan);
1713     + sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
1714     + async_tx_ack(&sw_desc->async_tx);
1715     +
1716     + list_add(&sw_desc->free_list,
1717     &xor_dev->free_sw_desc);
1718     }
1719    
1720     diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
1721     index 06ecdc38cee0..6682b3eec2b6 100644
1722     --- a/drivers/dma/sh/usb-dmac.c
1723     +++ b/drivers/dma/sh/usb-dmac.c
1724     @@ -117,7 +117,7 @@ struct usb_dmac {
1725     #define USB_DMASWR 0x0008
1726     #define USB_DMASWR_SWR (1 << 0)
1727     #define USB_DMAOR 0x0060
1728     -#define USB_DMAOR_AE (1 << 2)
1729     +#define USB_DMAOR_AE (1 << 1)
1730     #define USB_DMAOR_DME (1 << 0)
1731    
1732     #define USB_DMASAR 0x0000
1733     diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
1734     index 5be788b269e2..1679727c22ef 100644
1735     --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
1736     +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
1737     @@ -900,6 +900,12 @@ static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
1738     u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
1739     u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
1740    
1741     + /* disable mclk switching if the refresh is >120Hz, even if the
1742     + * blanking period would allow it
1743     + */
1744     + if (amdgpu_dpm_get_vrefresh(adev) > 120)
1745     + return true;
1746     +
1747     if (vblank_time < switch_limit)
1748     return true;
1749     else
1750     diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
1751     index 6efdba4993fc..0f2fa9044668 100644
1752     --- a/drivers/gpu/drm/drm_drv.c
1753     +++ b/drivers/gpu/drm/drm_drv.c
1754     @@ -379,7 +379,12 @@ EXPORT_SYMBOL(drm_put_dev);
1755     void drm_unplug_dev(struct drm_device *dev)
1756     {
1757     /* for a USB device */
1758     - drm_dev_unregister(dev);
1759     + if (drm_core_check_feature(dev, DRIVER_MODESET))
1760     + drm_modeset_unregister_all(dev);
1761     +
1762     + drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
1763     + drm_minor_unregister(dev, DRM_MINOR_RENDER);
1764     + drm_minor_unregister(dev, DRM_MINOR_CONTROL);
1765    
1766     mutex_lock(&drm_global_mutex);
1767    
1768     diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
1769     index 923150de46cb..ca6efb69ef66 100644
1770     --- a/drivers/gpu/drm/i915/i915_drv.c
1771     +++ b/drivers/gpu/drm/i915/i915_drv.c
1772     @@ -573,9 +573,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1773     if (i915_inject_load_failure())
1774     return -ENODEV;
1775    
1776     - ret = intel_bios_init(dev_priv);
1777     - if (ret)
1778     - DRM_INFO("failed to find VBIOS tables\n");
1779     + intel_bios_init(dev_priv);
1780    
1781     /* If we have > 1 VGA cards, then we need to arbitrate access
1782     * to the common VGA resources.
1783     diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
1784     index e0d72457b23c..36a665f0e5c9 100644
1785     --- a/drivers/gpu/drm/i915/i915_drv.h
1786     +++ b/drivers/gpu/drm/i915/i915_drv.h
1787     @@ -3584,7 +3584,7 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
1788     extern void intel_i2c_reset(struct drm_device *dev);
1789    
1790     /* intel_bios.c */
1791     -int intel_bios_init(struct drm_i915_private *dev_priv);
1792     +void intel_bios_init(struct drm_i915_private *dev_priv);
1793     bool intel_bios_is_valid_vbt(const void *buf, size_t size);
1794     bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
1795     bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
1796     diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
1797     index cf2560708e03..4ac36e3c341f 100644
1798     --- a/drivers/gpu/drm/i915/intel_bios.c
1799     +++ b/drivers/gpu/drm/i915/intel_bios.c
1800     @@ -1332,6 +1332,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
1801     return;
1802     }
1803    
1804     +/* Common defaults which may be overridden by VBT. */
1805     static void
1806     init_vbt_defaults(struct drm_i915_private *dev_priv)
1807     {
1808     @@ -1368,6 +1369,18 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
1809     &dev_priv->vbt.ddi_port_info[port];
1810    
1811     info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
1812     + }
1813     +}
1814     +
1815     +/* Defaults to initialize only if there is no VBT. */
1816     +static void
1817     +init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
1818     +{
1819     + enum port port;
1820     +
1821     + for (port = PORT_A; port < I915_MAX_PORTS; port++) {
1822     + struct ddi_vbt_port_info *info =
1823     + &dev_priv->vbt.ddi_port_info[port];
1824    
1825     info->supports_dvi = (port != PORT_A && port != PORT_E);
1826     info->supports_hdmi = info->supports_dvi;
1827     @@ -1450,36 +1463,35 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
1828     * intel_bios_init - find VBT and initialize settings from the BIOS
1829     * @dev_priv: i915 device instance
1830     *
1831     - * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
1832     - * to appropriate values.
1833     - *
1834     - * Returns 0 on success, nonzero on failure.
1835     + * Parse and initialize settings from the Video BIOS Tables (VBT). If the VBT
1836     + * was not found in ACPI OpRegion, try to find it in PCI ROM first. Also
1837     + * initialize some defaults if the VBT is not present at all.
1838     */
1839     -int
1840     -intel_bios_init(struct drm_i915_private *dev_priv)
1841     +void intel_bios_init(struct drm_i915_private *dev_priv)
1842     {
1843     struct pci_dev *pdev = dev_priv->drm.pdev;
1844     const struct vbt_header *vbt = dev_priv->opregion.vbt;
1845     const struct bdb_header *bdb;
1846     u8 __iomem *bios = NULL;
1847    
1848     - if (HAS_PCH_NOP(dev_priv))
1849     - return -ENODEV;
1850     + if (HAS_PCH_NOP(dev_priv)) {
1851     + DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
1852     + return;
1853     + }
1854    
1855     init_vbt_defaults(dev_priv);
1856    
1857     + /* If the OpRegion does not have VBT, look in PCI ROM. */
1858     if (!vbt) {
1859     size_t size;
1860    
1861     bios = pci_map_rom(pdev, &size);
1862     if (!bios)
1863     - return -1;
1864     + goto out;
1865    
1866     vbt = find_vbt(bios, size);
1867     - if (!vbt) {
1868     - pci_unmap_rom(pdev, bios);
1869     - return -1;
1870     - }
1871     + if (!vbt)
1872     + goto out;
1873    
1874     DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n");
1875     }
1876     @@ -1504,10 +1516,14 @@ intel_bios_init(struct drm_i915_private *dev_priv)
1877     parse_mipi_sequence(dev_priv, bdb);
1878     parse_ddi_ports(dev_priv, bdb);
1879    
1880     +out:
1881     + if (!vbt) {
1882     + DRM_INFO("Failed to find VBIOS tables (VBT)\n");
1883     + init_vbt_missing_defaults(dev_priv);
1884     + }
1885     +
1886     if (bios)
1887     pci_unmap_rom(pdev, bios);
1888     -
1889     - return 0;
1890     }
1891    
1892     /**
1893     diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
1894     index 46568fc80848..6abf315fd6da 100644
1895     --- a/drivers/gpu/drm/msm/msm_drv.c
1896     +++ b/drivers/gpu/drm/msm/msm_drv.c
1897     @@ -801,6 +801,7 @@ static struct drm_driver msm_driver = {
1898     .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1899     .gem_prime_export = drm_gem_prime_export,
1900     .gem_prime_import = drm_gem_prime_import,
1901     + .gem_prime_res_obj = msm_gem_prime_res_obj,
1902     .gem_prime_pin = msm_gem_prime_pin,
1903     .gem_prime_unpin = msm_gem_prime_unpin,
1904     .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
1905     diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
1906     index d0da52f2a806..bc98d48c47f8 100644
1907     --- a/drivers/gpu/drm/msm/msm_drv.h
1908     +++ b/drivers/gpu/drm/msm/msm_drv.h
1909     @@ -203,6 +203,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
1910     void *msm_gem_prime_vmap(struct drm_gem_object *obj);
1911     void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
1912     int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
1913     +struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
1914     struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
1915     struct dma_buf_attachment *attach, struct sg_table *sg);
1916     int msm_gem_prime_pin(struct drm_gem_object *obj);
1917     diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
1918     index 60bb290700ce..13403c6da6c7 100644
1919     --- a/drivers/gpu/drm/msm/msm_gem_prime.c
1920     +++ b/drivers/gpu/drm/msm/msm_gem_prime.c
1921     @@ -70,3 +70,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
1922     if (!obj->import_attach)
1923     msm_gem_put_pages(obj);
1924     }
1925     +
1926     +struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
1927     +{
1928     + struct msm_gem_object *msm_obj = to_msm_bo(obj);
1929     +
1930     + return msm_obj->resv;
1931     +}
1932     diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
1933     index 82d3e28918fd..7e4f24ae7de8 100644
1934     --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
1935     +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
1936     @@ -4,6 +4,7 @@
1937    
1938     struct nvkm_alarm {
1939     struct list_head head;
1940     + struct list_head exec;
1941     u64 timestamp;
1942     void (*func)(struct nvkm_alarm *);
1943     };
1944     diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
1945     index f2a86eae0a0d..2437f7d41ca2 100644
1946     --- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
1947     +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
1948     @@ -50,7 +50,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
1949     /* Move to completed list. We'll drop the lock before
1950     * executing the callback so it can reschedule itself.
1951     */
1952     - list_move_tail(&alarm->head, &exec);
1953     + list_del_init(&alarm->head);
1954     + list_add(&alarm->exec, &exec);
1955     }
1956    
1957     /* Shut down interrupt if no more pending alarms. */
1958     @@ -59,8 +60,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
1959     spin_unlock_irqrestore(&tmr->lock, flags);
1960    
1961     /* Execute completed callbacks. */
1962     - list_for_each_entry_safe(alarm, atemp, &exec, head) {
1963     - list_del_init(&alarm->head);
1964     + list_for_each_entry_safe(alarm, atemp, &exec, exec) {
1965     + list_del(&alarm->exec);
1966     alarm->func(alarm);
1967     }
1968     }
1969     diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
1970     index b6a0806b06bf..a1c68e6a689e 100644
1971     --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
1972     +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
1973     @@ -368,6 +368,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
1974     return fifo_state->static_buffer;
1975     else {
1976     fifo_state->dynamic_buffer = vmalloc(bytes);
1977     + if (!fifo_state->dynamic_buffer)
1978     + goto out_err;
1979     return fifo_state->dynamic_buffer;
1980     }
1981     }
1982     diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
1983     index 05fa092c942b..56b803384ea2 100644
1984     --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
1985     +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
1986     @@ -1275,11 +1275,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1987     struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1988     int ret;
1989     uint32_t size;
1990     - uint32_t backup_handle;
1991     + uint32_t backup_handle = 0;
1992    
1993     if (req->multisample_count != 0)
1994     return -EINVAL;
1995    
1996     + if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
1997     + return -EINVAL;
1998     +
1999     if (unlikely(vmw_user_surface_size == 0))
2000     vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
2001     128;
2002     @@ -1315,12 +1318,16 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
2003     ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
2004     &res->backup,
2005     &user_srf->backup_base);
2006     - if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
2007     - res->backup_size) {
2008     - DRM_ERROR("Surface backup buffer is too small.\n");
2009     - vmw_dmabuf_unreference(&res->backup);
2010     - ret = -EINVAL;
2011     - goto out_unlock;
2012     + if (ret == 0) {
2013     + if (res->backup->base.num_pages * PAGE_SIZE <
2014     + res->backup_size) {
2015     + DRM_ERROR("Surface backup buffer is too small.\n");
2016     + vmw_dmabuf_unreference(&res->backup);
2017     + ret = -EINVAL;
2018     + goto out_unlock;
2019     + } else {
2020     + backup_handle = req->buffer_handle;
2021     + }
2022     }
2023     } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
2024     ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
2025     diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
2026     index 21d38c8af21e..7f4f9c4150e3 100644
2027     --- a/drivers/iio/adc/bcm_iproc_adc.c
2028     +++ b/drivers/iio/adc/bcm_iproc_adc.c
2029     @@ -143,7 +143,7 @@ static void iproc_adc_reg_dump(struct iio_dev *indio_dev)
2030     iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_DATA);
2031     }
2032    
2033     -static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
2034     +static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
2035     {
2036     u32 channel_intr_status;
2037     u32 intr_status;
2038     @@ -167,7 +167,7 @@ static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
2039     return IRQ_NONE;
2040     }
2041    
2042     -static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
2043     +static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
2044     {
2045     irqreturn_t retval = IRQ_NONE;
2046     struct iproc_adc_priv *adc_priv;
2047     @@ -181,7 +181,7 @@ static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
2048     adc_priv = iio_priv(indio_dev);
2049    
2050     regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status);
2051     - dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_thread(),INTRPT_STS:%x\n",
2052     + dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_handler(),INTRPT_STS:%x\n",
2053     intr_status);
2054    
2055     intr_channels = (intr_status & IPROC_ADC_INTR_MASK) >> IPROC_ADC_INTR;
2056     @@ -566,8 +566,8 @@ static int iproc_adc_probe(struct platform_device *pdev)
2057     }
2058    
2059     ret = devm_request_threaded_irq(&pdev->dev, adc_priv->irqno,
2060     - iproc_adc_interrupt_thread,
2061     iproc_adc_interrupt_handler,
2062     + iproc_adc_interrupt_thread,
2063     IRQF_SHARED, "iproc-adc", indio_dev);
2064     if (ret) {
2065     dev_err(&pdev->dev, "request_irq error %d\n", ret);
2066     diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
2067     index 3afc53a3d0b6..c298fd86ed86 100644
2068     --- a/drivers/iio/light/ltr501.c
2069     +++ b/drivers/iio/light/ltr501.c
2070     @@ -74,9 +74,9 @@ static const int int_time_mapping[] = {100000, 50000, 200000, 400000};
2071     static const struct reg_field reg_field_it =
2072     REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4);
2073     static const struct reg_field reg_field_als_intr =
2074     - REG_FIELD(LTR501_INTR, 0, 0);
2075     -static const struct reg_field reg_field_ps_intr =
2076     REG_FIELD(LTR501_INTR, 1, 1);
2077     +static const struct reg_field reg_field_ps_intr =
2078     + REG_FIELD(LTR501_INTR, 0, 0);
2079     static const struct reg_field reg_field_als_rate =
2080     REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2);
2081     static const struct reg_field reg_field_ps_rate =
2082     diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
2083     index 020459513384..268210ea4990 100644
2084     --- a/drivers/iio/proximity/as3935.c
2085     +++ b/drivers/iio/proximity/as3935.c
2086     @@ -40,9 +40,9 @@
2087     #define AS3935_AFE_PWR_BIT BIT(0)
2088    
2089     #define AS3935_INT 0x03
2090     -#define AS3935_INT_MASK 0x07
2091     +#define AS3935_INT_MASK 0x0f
2092     #define AS3935_EVENT_INT BIT(3)
2093     -#define AS3935_NOISE_INT BIT(1)
2094     +#define AS3935_NOISE_INT BIT(0)
2095    
2096     #define AS3935_DATA 0x07
2097     #define AS3935_DATA_MASK 0x3F
2098     @@ -215,7 +215,7 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
2099    
2100     st->buffer[0] = val & AS3935_DATA_MASK;
2101     iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
2102     - pf->timestamp);
2103     + iio_get_time_ns(indio_dev));
2104     err_read:
2105     iio_trigger_notify_done(indio_dev->trig);
2106    
2107     @@ -244,7 +244,7 @@ static void as3935_event_work(struct work_struct *work)
2108    
2109     switch (val) {
2110     case AS3935_EVENT_INT:
2111     - iio_trigger_poll(st->trig);
2112     + iio_trigger_poll_chained(st->trig);
2113     break;
2114     case AS3935_NOISE_INT:
2115     dev_warn(&st->spi->dev, "noise level is too high\n");
2116     diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
2117     index 7826994c45bf..cd834da5934a 100644
2118     --- a/drivers/input/mouse/elantech.c
2119     +++ b/drivers/input/mouse/elantech.c
2120     @@ -1118,8 +1118,10 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
2121     * Asus UX32VD 0x361f02 00, 15, 0e clickpad
2122     * Avatar AVIU-145A2 0x361f00 ? clickpad
2123     * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
2124     + * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
2125     * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
2126     * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
2127     + * Fujitsu LIFEBOOK E557 0x570f01 40, 14, 0c 2 hw buttons
2128     * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
2129     * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
2130     * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
2131     @@ -1525,6 +1527,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
2132     },
2133     },
2134     {
2135     + /* Fujitsu LIFEBOOK E546 does not work with crc_enabled == 0 */
2136     + .matches = {
2137     + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
2138     + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E546"),
2139     + },
2140     + },
2141     + {
2142     /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
2143     .matches = {
2144     DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
2145     @@ -1546,6 +1555,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
2146     },
2147     },
2148     {
2149     + /* Fujitsu LIFEBOOK E557 does not work with crc_enabled == 0 */
2150     + .matches = {
2151     + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
2152     + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E557"),
2153     + },
2154     + },
2155     + {
2156     /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
2157     .matches = {
2158     DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
2159     diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
2160     index 77080cc5fa0a..afa211397048 100644
2161     --- a/drivers/misc/cxl/file.c
2162     +++ b/drivers/misc/cxl/file.c
2163     @@ -155,11 +155,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
2164    
2165     /* Do this outside the status_mutex to avoid a circular dependency with
2166     * the locking in cxl_mmap_fault() */
2167     - if (copy_from_user(&work, uwork,
2168     - sizeof(struct cxl_ioctl_start_work))) {
2169     - rc = -EFAULT;
2170     - goto out;
2171     - }
2172     + if (copy_from_user(&work, uwork, sizeof(work)))
2173     + return -EFAULT;
2174    
2175     mutex_lock(&ctx->status_mutex);
2176     if (ctx->status != OPENED) {
2177     diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
2178     index a217a74ccc98..224c7103890c 100644
2179     --- a/drivers/misc/cxl/native.c
2180     +++ b/drivers/misc/cxl/native.c
2181     @@ -1066,13 +1066,16 @@ int cxl_native_register_psl_err_irq(struct cxl *adapter)
2182    
2183     void cxl_native_release_psl_err_irq(struct cxl *adapter)
2184     {
2185     - if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq))
2186     + if (adapter->native->err_virq == 0 ||
2187     + adapter->native->err_virq !=
2188     + irq_find_mapping(NULL, adapter->native->err_hwirq))
2189     return;
2190    
2191     cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
2192     cxl_unmap_irq(adapter->native->err_virq, adapter);
2193     cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
2194     kfree(adapter->irq_name);
2195     + adapter->native->err_virq = 0;
2196     }
2197    
2198     int cxl_native_register_serr_irq(struct cxl_afu *afu)
2199     @@ -1102,13 +1105,15 @@ int cxl_native_register_serr_irq(struct cxl_afu *afu)
2200    
2201     void cxl_native_release_serr_irq(struct cxl_afu *afu)
2202     {
2203     - if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
2204     + if (afu->serr_virq == 0 ||
2205     + afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
2206     return;
2207    
2208     cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
2209     cxl_unmap_irq(afu->serr_virq, afu);
2210     cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
2211     kfree(afu->err_irq_name);
2212     + afu->serr_virq = 0;
2213     }
2214    
2215     int cxl_native_register_psl_irq(struct cxl_afu *afu)
2216     @@ -1131,12 +1136,15 @@ int cxl_native_register_psl_irq(struct cxl_afu *afu)
2217    
2218     void cxl_native_release_psl_irq(struct cxl_afu *afu)
2219     {
2220     - if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq))
2221     + if (afu->native->psl_virq == 0 ||
2222     + afu->native->psl_virq !=
2223     + irq_find_mapping(NULL, afu->native->psl_hwirq))
2224     return;
2225    
2226     cxl_unmap_irq(afu->native->psl_virq, afu);
2227     cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
2228     kfree(afu->psl_irq_name);
2229     + afu->native->psl_virq = 0;
2230     }
2231    
2232     static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
2233     diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
2234     index dbe676de7a19..0c98ed44df05 100644
2235     --- a/drivers/misc/mei/bus.c
2236     +++ b/drivers/misc/mei/bus.c
2237     @@ -678,8 +678,10 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
2238     {
2239     struct mei_cl_device *cldev = to_mei_cl_device(dev);
2240     const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
2241     + u8 version = mei_me_cl_ver(cldev->me_cl);
2242    
2243     - return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:", cldev->name, uuid);
2244     + return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:",
2245     + cldev->name, uuid, version);
2246     }
2247     static DEVICE_ATTR_RO(modalias);
2248    
2249     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
2250     index 0a9108cd4c45..0a5ee1d973ac 100644
2251     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
2252     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
2253     @@ -1931,7 +1931,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
2254     }
2255    
2256     /* select a non-FCoE queue */
2257     - return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
2258     + return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
2259     }
2260    
2261     void bnx2x_set_num_queues(struct bnx2x *bp)
2262     diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2263     index 19dc9e25aa72..f9c2feb4a4e7 100644
2264     --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2265     +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2266     @@ -2226,10 +2226,14 @@ static int cxgb_up(struct adapter *adap)
2267     if (err)
2268     goto irq_err;
2269     }
2270     +
2271     + mutex_lock(&uld_mutex);
2272     enable_rx(adap);
2273     t4_sge_start(adap);
2274     t4_intr_enable(adap);
2275     adap->flags |= FULL_INIT_DONE;
2276     + mutex_unlock(&uld_mutex);
2277     +
2278     notify_ulds(adap, CXGB4_STATE_UP);
2279     #if IS_ENABLED(CONFIG_IPV6)
2280     update_clip(adap);
2281     diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
2282     index c044667a0a25..e31199f3048c 100644
2283     --- a/drivers/net/ethernet/ethoc.c
2284     +++ b/drivers/net/ethernet/ethoc.c
2285     @@ -710,6 +710,8 @@ static int ethoc_open(struct net_device *dev)
2286     if (ret)
2287     return ret;
2288    
2289     + napi_enable(&priv->napi);
2290     +
2291     ethoc_init_ring(priv, dev->mem_start);
2292     ethoc_reset(priv);
2293    
2294     @@ -722,7 +724,6 @@ static int ethoc_open(struct net_device *dev)
2295     }
2296    
2297     phy_start(dev->phydev);
2298     - napi_enable(&priv->napi);
2299    
2300     if (netif_msg_ifup(priv)) {
2301     dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
2302     diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2303     index b2893fbe25e5..ef6bff820cf6 100644
2304     --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2305     +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2306     @@ -1953,7 +1953,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2307    
2308     priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2309     0, 1,
2310     - (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
2311     + (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2312     0, 0);
2313    
2314     tmp_len -= TSO_MAX_BUFF_SIZE;
2315     diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
2316     index 3c4c2cf6d444..55c4408892be 100644
2317     --- a/drivers/net/vxlan.c
2318     +++ b/drivers/net/vxlan.c
2319     @@ -59,6 +59,8 @@ static const u8 all_zeros_mac[ETH_ALEN + 2];
2320    
2321     static int vxlan_sock_add(struct vxlan_dev *vxlan);
2322    
2323     +static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
2324     +
2325     /* per-network namespace private data for this module */
2326     struct vxlan_net {
2327     struct list_head vxlan_list;
2328     @@ -717,6 +719,22 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
2329     call_rcu(&f->rcu, vxlan_fdb_free);
2330     }
2331    
2332     +static void vxlan_dst_free(struct rcu_head *head)
2333     +{
2334     + struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
2335     +
2336     + dst_cache_destroy(&rd->dst_cache);
2337     + kfree(rd);
2338     +}
2339     +
2340     +static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
2341     + struct vxlan_rdst *rd)
2342     +{
2343     + list_del_rcu(&rd->list);
2344     + vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
2345     + call_rcu(&rd->rcu, vxlan_dst_free);
2346     +}
2347     +
2348     static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
2349     union vxlan_addr *ip, __be16 *port, __be32 *vni,
2350     u32 *ifindex)
2351     @@ -847,9 +865,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
2352     * otherwise destroy the fdb entry
2353     */
2354     if (rd && !list_is_singular(&f->remotes)) {
2355     - list_del_rcu(&rd->list);
2356     - vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
2357     - kfree_rcu(rd, rcu);
2358     + vxlan_fdb_dst_destroy(vxlan, f, rd);
2359     goto out;
2360     }
2361    
2362     @@ -1026,6 +1042,8 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan)
2363     rcu_assign_pointer(vxlan->vn4_sock, NULL);
2364     synchronize_net();
2365    
2366     + vxlan_vs_del_dev(vxlan);
2367     +
2368     if (__vxlan_sock_release_prep(sock4)) {
2369     udp_tunnel_sock_release(sock4->sock);
2370     kfree(sock4);
2371     @@ -2286,6 +2304,15 @@ static void vxlan_cleanup(unsigned long arg)
2372     mod_timer(&vxlan->age_timer, next_timer);
2373     }
2374    
2375     +static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
2376     +{
2377     + struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2378     +
2379     + spin_lock(&vn->sock_lock);
2380     + hlist_del_init_rcu(&vxlan->hlist);
2381     + spin_unlock(&vn->sock_lock);
2382     +}
2383     +
2384     static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
2385     {
2386     struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2387     @@ -3056,12 +3083,6 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
2388     static void vxlan_dellink(struct net_device *dev, struct list_head *head)
2389     {
2390     struct vxlan_dev *vxlan = netdev_priv(dev);
2391     - struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2392     -
2393     - spin_lock(&vn->sock_lock);
2394     - if (!hlist_unhashed(&vxlan->hlist))
2395     - hlist_del_rcu(&vxlan->hlist);
2396     - spin_unlock(&vn->sock_lock);
2397    
2398     gro_cells_destroy(&vxlan->gro_cells);
2399     list_del(&vxlan->next);
2400     diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
2401     index 643014f82f7d..4a6e086279f9 100644
2402     --- a/drivers/scsi/qla2xxx/qla_bsg.c
2403     +++ b/drivers/scsi/qla2xxx/qla_bsg.c
2404     @@ -721,6 +721,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
2405     return -EIO;
2406     }
2407    
2408     + memset(&elreq, 0, sizeof(elreq));
2409     +
2410     elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
2411     bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
2412     DMA_TO_DEVICE);
2413     @@ -786,10 +788,9 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
2414    
2415     if (atomic_read(&vha->loop_state) == LOOP_READY &&
2416     (ha->current_topology == ISP_CFG_F ||
2417     - ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
2418     - le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
2419     - && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
2420     - elreq.options == EXTERNAL_LOOPBACK) {
2421     + (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE &&
2422     + req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
2423     + elreq.options == EXTERNAL_LOOPBACK) {
2424     type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
2425     ql_dbg(ql_dbg_user, vha, 0x701e,
2426     "BSG request type: %s.\n", type);
2427     diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
2428     index 45af34ddc432..658e4d15cb71 100644
2429     --- a/drivers/scsi/qla2xxx/qla_dbg.c
2430     +++ b/drivers/scsi/qla2xxx/qla_dbg.c
2431     @@ -1131,7 +1131,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
2432    
2433     /* Mailbox registers. */
2434     mbx_reg = &reg->mailbox0;
2435     - for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++)
2436     + for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
2437     fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
2438    
2439     /* Transfer sequence registers. */
2440     @@ -2090,7 +2090,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
2441    
2442     /* Mailbox registers. */
2443     mbx_reg = &reg->mailbox0;
2444     - for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++)
2445     + for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
2446     fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
2447    
2448     /* Transfer sequence registers. */
2449     diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
2450     index 23698c998699..a1b01d66c9ab 100644
2451     --- a/drivers/scsi/qla2xxx/qla_mbx.c
2452     +++ b/drivers/scsi/qla2xxx/qla_mbx.c
2453     @@ -4783,9 +4783,9 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
2454    
2455     memset(mcp->mb, 0 , sizeof(mcp->mb));
2456     mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
2457     - mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
2458     + /* BIT_6 specifies 64bit address */
2459     + mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
2460     if (IS_CNA_CAPABLE(ha)) {
2461     - mcp->mb[1] |= BIT_15;
2462     mcp->mb[2] = vha->fcoe_fcf_idx;
2463     }
2464     mcp->mb[16] = LSW(mreq->rcv_dma);
2465     diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
2466     index f9b52a4b8c55..94630d4738e6 100644
2467     --- a/drivers/scsi/qla2xxx/qla_os.c
2468     +++ b/drivers/scsi/qla2xxx/qla_os.c
2469     @@ -2420,10 +2420,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2470    
2471     if (mem_only) {
2472     if (pci_enable_device_mem(pdev))
2473     - goto probe_out;
2474     + return ret;
2475     } else {
2476     if (pci_enable_device(pdev))
2477     - goto probe_out;
2478     + return ret;
2479     }
2480    
2481     /* This may fail but that's ok */
2482     @@ -2433,7 +2433,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2483     if (!ha) {
2484     ql_log_pci(ql_log_fatal, pdev, 0x0009,
2485     "Unable to allocate memory for ha.\n");
2486     - goto probe_out;
2487     + goto disable_device;
2488     }
2489     ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
2490     "Memory allocated for ha=%p.\n", ha);
2491     @@ -3039,7 +3039,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2492     kfree(ha);
2493     ha = NULL;
2494    
2495     -probe_out:
2496     +disable_device:
2497     pci_disable_device(pdev);
2498     return ret;
2499     }
2500     diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
2501     index 36935c9ed669..9c2c7fe61280 100644
2502     --- a/drivers/scsi/qla2xxx/qla_tmpl.c
2503     +++ b/drivers/scsi/qla2xxx/qla_tmpl.c
2504     @@ -371,7 +371,7 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
2505     goto done;
2506     }
2507    
2508     - if (end <= start || start == 0 || end == 0) {
2509     + if (end < start || start == 0 || end == 0) {
2510     ql_dbg(ql_dbg_misc, vha, 0xd023,
2511     "%s: unusable range (start=%x end=%x)\n", __func__,
2512     ent->t262.end_addr, ent->t262.start_addr);
2513     diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
2514     index be6e9857ce2a..a6d1cc804647 100644
2515     --- a/drivers/staging/lustre/lustre/lov/lov_pack.c
2516     +++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
2517     @@ -387,18 +387,10 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
2518     struct lov_mds_md *lmmk = NULL;
2519     int rc, lmmk_size, lmm_size;
2520     int lum_size;
2521     - mm_segment_t seg;
2522    
2523     if (!lsm)
2524     return -ENODATA;
2525    
2526     - /*
2527     - * "Switch to kernel segment" to allow copying from kernel space by
2528     - * copy_{to,from}_user().
2529     - */
2530     - seg = get_fs();
2531     - set_fs(KERNEL_DS);
2532     -
2533     /* we only need the header part from user space to get lmm_magic and
2534     * lmm_stripe_count, (the header part is common to v1 and v3)
2535     */
2536     @@ -478,6 +470,5 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
2537     out_free:
2538     kfree(lmmk);
2539     out:
2540     - set_fs(seg);
2541     return rc;
2542     }
2543     diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
2544     index cae4dea6464e..077344cc819f 100644
2545     --- a/drivers/target/target_core_transport.c
2546     +++ b/drivers/target/target_core_transport.c
2547     @@ -1182,15 +1182,28 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
2548     if (cmd->unknown_data_length) {
2549     cmd->data_length = size;
2550     } else if (size != cmd->data_length) {
2551     - pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
2552     + pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
2553     " %u does not match SCSI CDB Length: %u for SAM Opcode:"
2554     " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
2555     cmd->data_length, size, cmd->t_task_cdb[0]);
2556    
2557     - if (cmd->data_direction == DMA_TO_DEVICE &&
2558     - cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
2559     - pr_err("Rejecting underflow/overflow WRITE data\n");
2560     - return TCM_INVALID_CDB_FIELD;
2561     + if (cmd->data_direction == DMA_TO_DEVICE) {
2562     + if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
2563     + pr_err_ratelimited("Rejecting underflow/overflow"
2564     + " for WRITE data CDB\n");
2565     + return TCM_INVALID_CDB_FIELD;
2566     + }
2567     + /*
2568     + * Some fabric drivers like iscsi-target still expect to
2569     + * always reject overflow writes. Reject this case until
2570     + * full fabric driver level support for overflow writes
2571     + * is introduced tree-wide.
2572     + */
2573     + if (size > cmd->data_length) {
2574     + pr_err_ratelimited("Rejecting overflow for"
2575     + " WRITE control CDB\n");
2576     + return TCM_INVALID_CDB_FIELD;
2577     + }
2578     }
2579     /*
2580     * Reject READ_* or WRITE_* with overflow/underflow for
2581     diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
2582     index d386346248de..91d2ddd6ef88 100644
2583     --- a/drivers/tty/serial/ifx6x60.c
2584     +++ b/drivers/tty/serial/ifx6x60.c
2585     @@ -1381,9 +1381,9 @@ static struct spi_driver ifx_spi_driver = {
2586     static void __exit ifx_spi_exit(void)
2587     {
2588     /* unregister */
2589     + spi_unregister_driver(&ifx_spi_driver);
2590     tty_unregister_driver(tty_drv);
2591     put_tty_driver(tty_drv);
2592     - spi_unregister_driver(&ifx_spi_driver);
2593     unregister_reboot_notifier(&ifx_modem_reboot_notifier_block);
2594     }
2595    
2596     diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
2597     index 4b26252c2885..ee84f89391ca 100644
2598     --- a/drivers/tty/serial/sh-sci.c
2599     +++ b/drivers/tty/serial/sh-sci.c
2600     @@ -1976,11 +1976,13 @@ static int sci_startup(struct uart_port *port)
2601    
2602     dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
2603    
2604     + sci_request_dma(port);
2605     +
2606     ret = sci_request_irq(s);
2607     - if (unlikely(ret < 0))
2608     + if (unlikely(ret < 0)) {
2609     + sci_free_dma(port);
2610     return ret;
2611     -
2612     - sci_request_dma(port);
2613     + }
2614    
2615     return 0;
2616     }
2617     @@ -2012,8 +2014,8 @@ static void sci_shutdown(struct uart_port *port)
2618     }
2619     #endif
2620    
2621     - sci_free_dma(port);
2622     sci_free_irq(s);
2623     + sci_free_dma(port);
2624     }
2625    
2626     static int sci_sck_calc(struct sci_port *s, unsigned int bps,
2627     diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
2628     index 6d23eede4d8c..1c31e8a08810 100644
2629     --- a/drivers/usb/chipidea/debug.c
2630     +++ b/drivers/usb/chipidea/debug.c
2631     @@ -294,7 +294,8 @@ static int ci_role_show(struct seq_file *s, void *data)
2632     {
2633     struct ci_hdrc *ci = s->private;
2634    
2635     - seq_printf(s, "%s\n", ci_role(ci)->name);
2636     + if (ci->role != CI_ROLE_END)
2637     + seq_printf(s, "%s\n", ci_role(ci)->name);
2638    
2639     return 0;
2640     }
2641     diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
2642     index c9e80ad48fdc..6a15b7250e9c 100644
2643     --- a/drivers/usb/chipidea/udc.c
2644     +++ b/drivers/usb/chipidea/udc.c
2645     @@ -1987,6 +1987,7 @@ static void udc_id_switch_for_host(struct ci_hdrc *ci)
2646     int ci_hdrc_gadget_init(struct ci_hdrc *ci)
2647     {
2648     struct ci_role_driver *rdrv;
2649     + int ret;
2650    
2651     if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
2652     return -ENXIO;
2653     @@ -1999,7 +2000,10 @@ int ci_hdrc_gadget_init(struct ci_hdrc *ci)
2654     rdrv->stop = udc_id_switch_for_host;
2655     rdrv->irq = udc_irq;
2656     rdrv->name = "gadget";
2657     - ci->roles[CI_ROLE_GADGET] = rdrv;
2658    
2659     - return udc_start(ci);
2660     + ret = udc_start(ci);
2661     + if (!ret)
2662     + ci->roles[CI_ROLE_GADGET] = rdrv;
2663     +
2664     + return ret;
2665     }
2666     diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
2667     index 8f3659b65f53..ccd93c9e26ab 100644
2668     --- a/drivers/usb/gadget/function/f_mass_storage.c
2669     +++ b/drivers/usb/gadget/function/f_mass_storage.c
2670     @@ -395,7 +395,11 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
2671     /* Caller must hold fsg->lock */
2672     static void wakeup_thread(struct fsg_common *common)
2673     {
2674     - smp_wmb(); /* ensure the write of bh->state is complete */
2675     + /*
2676     + * Ensure the reading of thread_wakeup_needed
2677     + * and the writing of bh->state are completed
2678     + */
2679     + smp_mb();
2680     /* Tell the main thread that something has happened */
2681     common->thread_wakeup_needed = 1;
2682     if (common->thread_task)
2683     @@ -626,7 +630,12 @@ static int sleep_thread(struct fsg_common *common, bool can_freeze)
2684     }
2685     __set_current_state(TASK_RUNNING);
2686     common->thread_wakeup_needed = 0;
2687     - smp_rmb(); /* ensure the latest bh->state is visible */
2688     +
2689     + /*
2690     + * Ensure the writing of thread_wakeup_needed
2691     + * and the reading of bh->state are completed
2692     + */
2693     + smp_mb();
2694     return rc;
2695     }
2696    
2697     diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
2698     index 702040fe2001..0e6061496972 100644
2699     --- a/drivers/xen/privcmd.c
2700     +++ b/drivers/xen/privcmd.c
2701     @@ -335,8 +335,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
2702     st->global_error = 1;
2703     }
2704     }
2705     - st->va += PAGE_SIZE * nr;
2706     - st->index += nr;
2707     + st->va += XEN_PAGE_SIZE * nr;
2708     + st->index += nr / XEN_PFN_PER_PAGE;
2709    
2710     return 0;
2711     }
2712     diff --git a/fs/block_dev.c b/fs/block_dev.c
2713     index 2924bddb4a94..07e46b786500 100644
2714     --- a/fs/block_dev.c
2715     +++ b/fs/block_dev.c
2716     @@ -713,7 +713,7 @@ struct block_device *bdget(dev_t dev)
2717     bdev->bd_contains = NULL;
2718     bdev->bd_super = NULL;
2719     bdev->bd_inode = inode;
2720     - bdev->bd_block_size = (1 << inode->i_blkbits);
2721     + bdev->bd_block_size = i_blocksize(inode);
2722     bdev->bd_part_count = 0;
2723     bdev->bd_invalidated = 0;
2724     inode->i_mode = S_IFBLK;
2725     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
2726     index 5909ae8c6731..e46e7fbe1b34 100644
2727     --- a/fs/btrfs/extent-tree.c
2728     +++ b/fs/btrfs/extent-tree.c
2729     @@ -3984,6 +3984,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2730     info->space_info_kobj, "%s",
2731     alloc_name(found->flags));
2732     if (ret) {
2733     + percpu_counter_destroy(&found->total_bytes_pinned);
2734     kfree(found);
2735     return ret;
2736     }
2737     diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
2738     index 3a14c87d9c92..3286a6e47ff0 100644
2739     --- a/fs/btrfs/file.c
2740     +++ b/fs/btrfs/file.c
2741     @@ -2842,7 +2842,7 @@ static long btrfs_fallocate(struct file *file, int mode,
2742     if (!ret)
2743     ret = btrfs_prealloc_file_range(inode, mode,
2744     range->start,
2745     - range->len, 1 << inode->i_blkbits,
2746     + range->len, i_blocksize(inode),
2747     offset + len, &alloc_hint);
2748     else
2749     btrfs_free_reserved_data_space(inode, range->start,
2750     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
2751     index be4da91d880f..bddbae796941 100644
2752     --- a/fs/btrfs/inode.c
2753     +++ b/fs/btrfs/inode.c
2754     @@ -7435,8 +7435,8 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
2755     int found = false;
2756     void **pagep = NULL;
2757     struct page *page = NULL;
2758     - int start_idx;
2759     - int end_idx;
2760     + unsigned long start_idx;
2761     + unsigned long end_idx;
2762    
2763     start_idx = start >> PAGE_SHIFT;
2764    
2765     diff --git a/fs/buffer.c b/fs/buffer.c
2766     index b205a629001d..5d8f496d624e 100644
2767     --- a/fs/buffer.c
2768     +++ b/fs/buffer.c
2769     @@ -2353,7 +2353,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
2770     loff_t pos, loff_t *bytes)
2771     {
2772     struct inode *inode = mapping->host;
2773     - unsigned blocksize = 1 << inode->i_blkbits;
2774     + unsigned int blocksize = i_blocksize(inode);
2775     struct page *page;
2776     void *fsdata;
2777     pgoff_t index, curidx;
2778     @@ -2433,8 +2433,8 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
2779     get_block_t *get_block, loff_t *bytes)
2780     {
2781     struct inode *inode = mapping->host;
2782     - unsigned blocksize = 1 << inode->i_blkbits;
2783     - unsigned zerofrom;
2784     + unsigned int blocksize = i_blocksize(inode);
2785     + unsigned int zerofrom;
2786     int err;
2787    
2788     err = cont_expand_zero(file, mapping, pos, bytes);
2789     @@ -2796,7 +2796,7 @@ int nobh_truncate_page(struct address_space *mapping,
2790     struct buffer_head map_bh;
2791     int err;
2792    
2793     - blocksize = 1 << inode->i_blkbits;
2794     + blocksize = i_blocksize(inode);
2795     length = offset & (blocksize - 1);
2796    
2797     /* Block boundary? Nothing to do */
2798     @@ -2874,7 +2874,7 @@ int block_truncate_page(struct address_space *mapping,
2799     struct buffer_head *bh;
2800     int err;
2801    
2802     - blocksize = 1 << inode->i_blkbits;
2803     + blocksize = i_blocksize(inode);
2804     length = offset & (blocksize - 1);
2805    
2806     /* Block boundary? Nothing to do */
2807     @@ -2986,7 +2986,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2808     struct inode *inode = mapping->host;
2809     tmp.b_state = 0;
2810     tmp.b_blocknr = 0;
2811     - tmp.b_size = 1 << inode->i_blkbits;
2812     + tmp.b_size = i_blocksize(inode);
2813     get_block(inode, block, &tmp, 0);
2814     return tmp.b_blocknr;
2815     }
2816     diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
2817     index 18dc18f8af2c..900ffafb85ca 100644
2818     --- a/fs/ceph/addr.c
2819     +++ b/fs/ceph/addr.c
2820     @@ -745,7 +745,7 @@ static int ceph_writepages_start(struct address_space *mapping,
2821     struct pagevec pvec;
2822     int done = 0;
2823     int rc = 0;
2824     - unsigned wsize = 1 << inode->i_blkbits;
2825     + unsigned int wsize = i_blocksize(inode);
2826     struct ceph_osd_request *req = NULL;
2827     int do_sync = 0;
2828     loff_t snap_size, i_size;
2829     diff --git a/fs/direct-io.c b/fs/direct-io.c
2830     index fb9aa16a7727..c60756e89833 100644
2831     --- a/fs/direct-io.c
2832     +++ b/fs/direct-io.c
2833     @@ -587,7 +587,7 @@ static int dio_set_defer_completion(struct dio *dio)
2834     /*
2835     * Call into the fs to map some more disk blocks. We record the current number
2836     * of available blocks at sdio->blocks_available. These are in units of the
2837     - * fs blocksize, (1 << inode->i_blkbits).
2838     + * fs blocksize, i_blocksize(inode).
2839     *
2840     * The fs is allowed to map lots of blocks at once. If it wants to do that,
2841     * it uses the passed inode-relative block number as the file offset, as usual.
2842     diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
2843     index 9fbf92ca358c..a3e0b3b7441d 100644
2844     --- a/fs/ext4/extents.c
2845     +++ b/fs/ext4/extents.c
2846     @@ -3413,13 +3413,13 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2847     struct ext4_sb_info *sbi;
2848     struct ext4_extent_header *eh;
2849     struct ext4_map_blocks split_map;
2850     - struct ext4_extent zero_ex;
2851     + struct ext4_extent zero_ex1, zero_ex2;
2852     struct ext4_extent *ex, *abut_ex;
2853     ext4_lblk_t ee_block, eof_block;
2854     unsigned int ee_len, depth, map_len = map->m_len;
2855     int allocated = 0, max_zeroout = 0;
2856     int err = 0;
2857     - int split_flag = 0;
2858     + int split_flag = EXT4_EXT_DATA_VALID2;
2859    
2860     ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
2861     "block %llu, max_blocks %u\n", inode->i_ino,
2862     @@ -3436,7 +3436,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2863     ex = path[depth].p_ext;
2864     ee_block = le32_to_cpu(ex->ee_block);
2865     ee_len = ext4_ext_get_actual_len(ex);
2866     - zero_ex.ee_len = 0;
2867     + zero_ex1.ee_len = 0;
2868     + zero_ex2.ee_len = 0;
2869    
2870     trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
2871    
2872     @@ -3576,62 +3577,52 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2873     if (ext4_encrypted_inode(inode))
2874     max_zeroout = 0;
2875    
2876     - /* If extent is less than s_max_zeroout_kb, zeroout directly */
2877     - if (max_zeroout && (ee_len <= max_zeroout)) {
2878     - err = ext4_ext_zeroout(inode, ex);
2879     - if (err)
2880     - goto out;
2881     - zero_ex.ee_block = ex->ee_block;
2882     - zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
2883     - ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
2884     -
2885     - err = ext4_ext_get_access(handle, inode, path + depth);
2886     - if (err)
2887     - goto out;
2888     - ext4_ext_mark_initialized(ex);
2889     - ext4_ext_try_to_merge(handle, inode, path, ex);
2890     - err = ext4_ext_dirty(handle, inode, path + path->p_depth);
2891     - goto out;
2892     - }
2893     -
2894     /*
2895     - * four cases:
2896     + * five cases:
2897     * 1. split the extent into three extents.
2898     - * 2. split the extent into two extents, zeroout the first half.
2899     - * 3. split the extent into two extents, zeroout the second half.
2900     + * 2. split the extent into two extents, zeroout the head of the first
2901     + * extent.
2902     + * 3. split the extent into two extents, zeroout the tail of the second
2903     + * extent.
2904     * 4. split the extent into two extents with out zeroout.
2905     + * 5. no splitting needed, just possibly zeroout the head and / or the
2906     + * tail of the extent.
2907     */
2908     split_map.m_lblk = map->m_lblk;
2909     split_map.m_len = map->m_len;
2910    
2911     - if (max_zeroout && (allocated > map->m_len)) {
2912     + if (max_zeroout && (allocated > split_map.m_len)) {
2913     if (allocated <= max_zeroout) {
2914     - /* case 3 */
2915     - zero_ex.ee_block =
2916     - cpu_to_le32(map->m_lblk);
2917     - zero_ex.ee_len = cpu_to_le16(allocated);
2918     - ext4_ext_store_pblock(&zero_ex,
2919     - ext4_ext_pblock(ex) + map->m_lblk - ee_block);
2920     - err = ext4_ext_zeroout(inode, &zero_ex);
2921     + /* case 3 or 5 */
2922     + zero_ex1.ee_block =
2923     + cpu_to_le32(split_map.m_lblk +
2924     + split_map.m_len);
2925     + zero_ex1.ee_len =
2926     + cpu_to_le16(allocated - split_map.m_len);
2927     + ext4_ext_store_pblock(&zero_ex1,
2928     + ext4_ext_pblock(ex) + split_map.m_lblk +
2929     + split_map.m_len - ee_block);
2930     + err = ext4_ext_zeroout(inode, &zero_ex1);
2931     if (err)
2932     goto out;
2933     - split_map.m_lblk = map->m_lblk;
2934     split_map.m_len = allocated;
2935     - } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) {
2936     - /* case 2 */
2937     - if (map->m_lblk != ee_block) {
2938     - zero_ex.ee_block = ex->ee_block;
2939     - zero_ex.ee_len = cpu_to_le16(map->m_lblk -
2940     + }
2941     + if (split_map.m_lblk - ee_block + split_map.m_len <
2942     + max_zeroout) {
2943     + /* case 2 or 5 */
2944     + if (split_map.m_lblk != ee_block) {
2945     + zero_ex2.ee_block = ex->ee_block;
2946     + zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
2947     ee_block);
2948     - ext4_ext_store_pblock(&zero_ex,
2949     + ext4_ext_store_pblock(&zero_ex2,
2950     ext4_ext_pblock(ex));
2951     - err = ext4_ext_zeroout(inode, &zero_ex);
2952     + err = ext4_ext_zeroout(inode, &zero_ex2);
2953     if (err)
2954     goto out;
2955     }
2956    
2957     + split_map.m_len += split_map.m_lblk - ee_block;
2958     split_map.m_lblk = ee_block;
2959     - split_map.m_len = map->m_lblk - ee_block + map->m_len;
2960     allocated = map->m_len;
2961     }
2962     }
2963     @@ -3642,8 +3633,11 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2964     err = 0;
2965     out:
2966     /* If we have gotten a failure, don't zero out status tree */
2967     - if (!err)
2968     - err = ext4_zeroout_es(inode, &zero_ex);
2969     + if (!err) {
2970     + err = ext4_zeroout_es(inode, &zero_ex1);
2971     + if (!err)
2972     + err = ext4_zeroout_es(inode, &zero_ex2);
2973     + }
2974     return err ? err : allocated;
2975     }
2976    
2977     @@ -4893,6 +4887,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
2978    
2979     /* Zero out partial block at the edges of the range */
2980     ret = ext4_zero_partial_blocks(handle, inode, offset, len);
2981     + if (ret >= 0)
2982     + ext4_update_inode_fsync_trans(handle, inode, 1);
2983    
2984     if (file->f_flags & O_SYNC)
2985     ext4_handle_sync(handle);
2986     @@ -5579,6 +5575,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
2987     ext4_handle_sync(handle);
2988     inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
2989     ext4_mark_inode_dirty(handle, inode);
2990     + ext4_update_inode_fsync_trans(handle, inode, 1);
2991    
2992     out_stop:
2993     ext4_journal_stop(handle);
2994     @@ -5752,6 +5749,8 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
2995     up_write(&EXT4_I(inode)->i_data_sem);
2996     if (IS_SYNC(inode))
2997     ext4_handle_sync(handle);
2998     + if (ret >= 0)
2999     + ext4_update_inode_fsync_trans(handle, inode, 1);
3000    
3001     out_stop:
3002     ext4_journal_stop(handle);
3003     diff --git a/fs/ext4/file.c b/fs/ext4/file.c
3004     index 2a822d30e73f..9e77c089e8cb 100644
3005     --- a/fs/ext4/file.c
3006     +++ b/fs/ext4/file.c
3007     @@ -432,47 +432,27 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
3008     num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
3009     nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
3010     (pgoff_t)num);
3011     - if (nr_pages == 0) {
3012     - if (whence == SEEK_DATA)
3013     - break;
3014     -
3015     - BUG_ON(whence != SEEK_HOLE);
3016     - /*
3017     - * If this is the first time to go into the loop and
3018     - * offset is not beyond the end offset, it will be a
3019     - * hole at this offset
3020     - */
3021     - if (lastoff == startoff || lastoff < endoff)
3022     - found = 1;
3023     - break;
3024     - }
3025     -
3026     - /*
3027     - * If this is the first time to go into the loop and
3028     - * offset is smaller than the first page offset, it will be a
3029     - * hole at this offset.
3030     - */
3031     - if (lastoff == startoff && whence == SEEK_HOLE &&
3032     - lastoff < page_offset(pvec.pages[0])) {
3033     - found = 1;
3034     + if (nr_pages == 0)
3035     break;
3036     - }
3037    
3038     for (i = 0; i < nr_pages; i++) {
3039     struct page *page = pvec.pages[i];
3040     struct buffer_head *bh, *head;
3041    
3042     /*
3043     - * If the current offset is not beyond the end of given
3044     - * range, it will be a hole.
3045     + * If current offset is smaller than the page offset,
3046     + * there is a hole at this offset.
3047     */
3048     - if (lastoff < endoff && whence == SEEK_HOLE &&
3049     - page->index > end) {
3050     + if (whence == SEEK_HOLE && lastoff < endoff &&
3051     + lastoff < page_offset(pvec.pages[i])) {
3052     found = 1;
3053     *offset = lastoff;
3054     goto out;
3055     }
3056    
3057     + if (page->index > end)
3058     + goto out;
3059     +
3060     lock_page(page);
3061    
3062     if (unlikely(page->mapping != inode->i_mapping)) {
3063     @@ -512,20 +492,18 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
3064     unlock_page(page);
3065     }
3066    
3067     - /*
3068     - * The no. of pages is less than our desired, that would be a
3069     - * hole in there.
3070     - */
3071     - if (nr_pages < num && whence == SEEK_HOLE) {
3072     - found = 1;
3073     - *offset = lastoff;
3074     + /* The no. of pages is less than our desired, we are done. */
3075     + if (nr_pages < num)
3076     break;
3077     - }
3078    
3079     index = pvec.pages[i - 1]->index + 1;
3080     pagevec_release(&pvec);
3081     } while (index <= end);
3082    
3083     + if (whence == SEEK_HOLE && lastoff < endoff) {
3084     + found = 1;
3085     + *offset = lastoff;
3086     + }
3087     out:
3088     pagevec_release(&pvec);
3089     return found;
3090     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3091     index 01329688fb9e..1b29efcab3dc 100644
3092     --- a/fs/ext4/inode.c
3093     +++ b/fs/ext4/inode.c
3094     @@ -2205,7 +2205,7 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd,
3095     {
3096     struct inode *inode = mpd->inode;
3097     int err;
3098     - ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
3099     + ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
3100     >> inode->i_blkbits;
3101    
3102     do {
3103     @@ -3454,14 +3454,14 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
3104     * writes need zeroing either because they can race with page
3105     * faults or because they use partial blocks.
3106     */
3107     - if (round_down(offset, 1<<inode->i_blkbits) >= inode->i_size &&
3108     + if (round_down(offset, i_blocksize(inode)) >= inode->i_size &&
3109     ext4_aligned_io(inode, offset, count))
3110     get_block_func = ext4_dio_get_block;
3111     else
3112     get_block_func = ext4_dax_get_block;
3113     dio_flags = DIO_LOCKING;
3114     } else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
3115     - round_down(offset, 1 << inode->i_blkbits) >= inode->i_size) {
3116     + round_down(offset, i_blocksize(inode)) >= inode->i_size) {
3117     get_block_func = ext4_dio_get_block;
3118     dio_flags = DIO_LOCKING | DIO_SKIP_HOLES;
3119     } else if (is_sync_kiocb(iocb)) {
3120     @@ -4044,6 +4044,8 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
3121    
3122     inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
3123     ext4_mark_inode_dirty(handle, inode);
3124     + if (ret >= 0)
3125     + ext4_update_inode_fsync_trans(handle, inode, 1);
3126     out_stop:
3127     ext4_journal_stop(handle);
3128     out_dio:
3129     @@ -5046,7 +5048,7 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
3130     * do. We do the check mainly to optimize the common PAGE_SIZE ==
3131     * blocksize case
3132     */
3133     - if (offset > PAGE_SIZE - (1 << inode->i_blkbits))
3134     + if (offset > PAGE_SIZE - i_blocksize(inode))
3135     return;
3136     while (1) {
3137     page = find_lock_page(inode->i_mapping,
3138     @@ -5441,8 +5443,9 @@ static int ext4_expand_extra_isize(struct inode *inode,
3139     /* No extended attributes present */
3140     if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
3141     header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
3142     - memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
3143     - new_extra_isize);
3144     + memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
3145     + EXT4_I(inode)->i_extra_isize, 0,
3146     + new_extra_isize - EXT4_I(inode)->i_extra_isize);
3147     EXT4_I(inode)->i_extra_isize = new_extra_isize;
3148     return 0;
3149     }
3150     diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
3151     index 2e9fc7a61048..846b57ff58de 100644
3152     --- a/fs/ext4/mballoc.c
3153     +++ b/fs/ext4/mballoc.c
3154     @@ -838,7 +838,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
3155     inode = page->mapping->host;
3156     sb = inode->i_sb;
3157     ngroups = ext4_get_groups_count(sb);
3158     - blocksize = 1 << inode->i_blkbits;
3159     + blocksize = i_blocksize(inode);
3160     blocks_per_page = PAGE_SIZE / blocksize;
3161    
3162     groups_per_page = blocks_per_page >> 1;
3163     diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
3164     index 6fc14def0c70..578f8c33fb44 100644
3165     --- a/fs/ext4/move_extent.c
3166     +++ b/fs/ext4/move_extent.c
3167     @@ -187,7 +187,7 @@ mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
3168     if (PageUptodate(page))
3169     return 0;
3170    
3171     - blocksize = 1 << inode->i_blkbits;
3172     + blocksize = i_blocksize(inode);
3173     if (!page_has_buffers(page))
3174     create_empty_buffers(page, blocksize, 0);
3175    
3176     diff --git a/fs/iomap.c b/fs/iomap.c
3177     index 814ae8f9587d..798c291cbc75 100644
3178     --- a/fs/iomap.c
3179     +++ b/fs/iomap.c
3180     @@ -419,8 +419,8 @@ int
3181     iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
3182     struct iomap_ops *ops)
3183     {
3184     - unsigned blocksize = (1 << inode->i_blkbits);
3185     - unsigned off = pos & (blocksize - 1);
3186     + unsigned int blocksize = i_blocksize(inode);
3187     + unsigned int off = pos & (blocksize - 1);
3188    
3189     /* Block boundary? Nothing to do */
3190     if (!off)
3191     diff --git a/fs/jfs/super.c b/fs/jfs/super.c
3192     index 85671f7f8518..14be95bc0bcd 100644
3193     --- a/fs/jfs/super.c
3194     +++ b/fs/jfs/super.c
3195     @@ -758,7 +758,7 @@ static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
3196     sb->s_blocksize - offset : toread;
3197    
3198     tmp_bh.b_state = 0;
3199     - tmp_bh.b_size = 1 << inode->i_blkbits;
3200     + tmp_bh.b_size = i_blocksize(inode);
3201     err = jfs_get_block(inode, blk, &tmp_bh, 0);
3202     if (err)
3203     return err;
3204     @@ -798,7 +798,7 @@ static ssize_t jfs_quota_write(struct super_block *sb, int type,
3205     sb->s_blocksize - offset : towrite;
3206    
3207     tmp_bh.b_state = 0;
3208     - tmp_bh.b_size = 1 << inode->i_blkbits;
3209     + tmp_bh.b_size = i_blocksize(inode);
3210     err = jfs_get_block(inode, blk, &tmp_bh, 1);
3211     if (err)
3212     goto out;
3213     diff --git a/fs/mpage.c b/fs/mpage.c
3214     index d2413af0823a..d2fcb149720d 100644
3215     --- a/fs/mpage.c
3216     +++ b/fs/mpage.c
3217     @@ -115,7 +115,7 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
3218     SetPageUptodate(page);
3219     return;
3220     }
3221     - create_empty_buffers(page, 1 << inode->i_blkbits, 0);
3222     + create_empty_buffers(page, i_blocksize(inode), 0);
3223     }
3224     head = page_buffers(page);
3225     page_bh = head;
3226     diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
3227     index 0780ff864539..3e396dbb1eb9 100644
3228     --- a/fs/nfsd/blocklayout.c
3229     +++ b/fs/nfsd/blocklayout.c
3230     @@ -23,7 +23,7 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
3231     {
3232     struct nfsd4_layout_seg *seg = &args->lg_seg;
3233     struct super_block *sb = inode->i_sb;
3234     - u32 block_size = (1 << inode->i_blkbits);
3235     + u32 block_size = i_blocksize(inode);
3236     struct pnfs_block_extent *bex;
3237     struct iomap iomap;
3238     u32 device_generation = 0;
3239     @@ -180,7 +180,7 @@ nfsd4_block_proc_layoutcommit(struct inode *inode,
3240     int nr_iomaps;
3241    
3242     nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout,
3243     - lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits);
3244     + lcp->lc_up_len, &iomaps, i_blocksize(inode));
3245     if (nr_iomaps < 0)
3246     return nfserrno(nr_iomaps);
3247    
3248     @@ -372,7 +372,7 @@ nfsd4_scsi_proc_layoutcommit(struct inode *inode,
3249     int nr_iomaps;
3250    
3251     nr_iomaps = nfsd4_scsi_decode_layoutupdate(lcp->lc_up_layout,
3252     - lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits);
3253     + lcp->lc_up_len, &iomaps, i_blocksize(inode));
3254     if (nr_iomaps < 0)
3255     return nfserrno(nr_iomaps);
3256    
3257     diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
3258     index 650226f33298..022d95886d66 100644
3259     --- a/fs/nfsd/nfs4proc.c
3260     +++ b/fs/nfsd/nfs4proc.c
3261     @@ -1783,6 +1783,12 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
3262     opdesc->op_get_currentstateid(cstate, &op->u);
3263     op->status = opdesc->op_func(rqstp, cstate, &op->u);
3264    
3265     + /* Only from SEQUENCE */
3266     + if (cstate->status == nfserr_replay_cache) {
3267     + dprintk("%s NFS4.1 replay from cache\n", __func__);
3268     + status = op->status;
3269     + goto out;
3270     + }
3271     if (!op->status) {
3272     if (opdesc->op_set_currentstateid)
3273     opdesc->op_set_currentstateid(cstate, &op->u);
3274     @@ -1793,14 +1799,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
3275     if (need_wrongsec_check(rqstp))
3276     op->status = check_nfsd_access(current_fh->fh_export, rqstp);
3277     }
3278     -
3279     encode_op:
3280     - /* Only from SEQUENCE */
3281     - if (cstate->status == nfserr_replay_cache) {
3282     - dprintk("%s NFS4.1 replay from cache\n", __func__);
3283     - status = op->status;
3284     - goto out;
3285     - }
3286     if (op->status == nfserr_replay_me) {
3287     op->replay = &cstate->replay_owner->so_replay;
3288     nfsd4_encode_replay(&resp->xdr, op);
3289     diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
3290     index 2ee80e1f5230..4e7a56a0a9b6 100644
3291     --- a/fs/nfsd/nfs4xdr.c
3292     +++ b/fs/nfsd/nfs4xdr.c
3293     @@ -2793,9 +2793,16 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
3294     }
3295     #endif /* CONFIG_NFSD_PNFS */
3296     if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) {
3297     - status = nfsd4_encode_bitmap(xdr, NFSD_SUPPATTR_EXCLCREAT_WORD0,
3298     - NFSD_SUPPATTR_EXCLCREAT_WORD1,
3299     - NFSD_SUPPATTR_EXCLCREAT_WORD2);
3300     + u32 supp[3];
3301     +
3302     + supp[0] = nfsd_suppattrs0(minorversion);
3303     + supp[1] = nfsd_suppattrs1(minorversion);
3304     + supp[2] = nfsd_suppattrs2(minorversion);
3305     + supp[0] &= NFSD_SUPPATTR_EXCLCREAT_WORD0;
3306     + supp[1] &= NFSD_SUPPATTR_EXCLCREAT_WORD1;
3307     + supp[2] &= NFSD_SUPPATTR_EXCLCREAT_WORD2;
3308     +
3309     + status = nfsd4_encode_bitmap(xdr, supp[0], supp[1], supp[2]);
3310     if (status)
3311     goto out;
3312     }
3313     diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
3314     index d5c23da43513..c21e0b4454a6 100644
3315     --- a/fs/nilfs2/btnode.c
3316     +++ b/fs/nilfs2/btnode.c
3317     @@ -50,7 +50,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
3318     brelse(bh);
3319     BUG();
3320     }
3321     - memset(bh->b_data, 0, 1 << inode->i_blkbits);
3322     + memset(bh->b_data, 0, i_blocksize(inode));
3323     bh->b_bdev = inode->i_sb->s_bdev;
3324     bh->b_blocknr = blocknr;
3325     set_buffer_mapped(bh);
3326     diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
3327     index c7f4fef9ebf5..7ffe71a8dfb9 100644
3328     --- a/fs/nilfs2/inode.c
3329     +++ b/fs/nilfs2/inode.c
3330     @@ -51,7 +51,7 @@ void nilfs_inode_add_blocks(struct inode *inode, int n)
3331     {
3332     struct nilfs_root *root = NILFS_I(inode)->i_root;
3333    
3334     - inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
3335     + inode_add_bytes(inode, i_blocksize(inode) * n);
3336     if (root)
3337     atomic64_add(n, &root->blocks_count);
3338     }
3339     @@ -60,7 +60,7 @@ void nilfs_inode_sub_blocks(struct inode *inode, int n)
3340     {
3341     struct nilfs_root *root = NILFS_I(inode)->i_root;
3342    
3343     - inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
3344     + inode_sub_bytes(inode, i_blocksize(inode) * n);
3345     if (root)
3346     atomic64_sub(n, &root->blocks_count);
3347     }
3348     diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
3349     index d56d3a5bea88..98835ed6bef4 100644
3350     --- a/fs/nilfs2/mdt.c
3351     +++ b/fs/nilfs2/mdt.c
3352     @@ -57,7 +57,7 @@ nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
3353     set_buffer_mapped(bh);
3354    
3355     kaddr = kmap_atomic(bh->b_page);
3356     - memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits);
3357     + memset(kaddr + bh_offset(bh), 0, i_blocksize(inode));
3358     if (init_block)
3359     init_block(inode, bh, kaddr);
3360     flush_dcache_page(bh->b_page);
3361     @@ -501,7 +501,7 @@ void nilfs_mdt_set_entry_size(struct inode *inode, unsigned int entry_size,
3362     struct nilfs_mdt_info *mi = NILFS_MDT(inode);
3363    
3364     mi->mi_entry_size = entry_size;
3365     - mi->mi_entries_per_block = (1 << inode->i_blkbits) / entry_size;
3366     + mi->mi_entries_per_block = i_blocksize(inode) / entry_size;
3367     mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
3368     }
3369    
3370     diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
3371     index bedcae2c28e6..7d18d62e8e07 100644
3372     --- a/fs/nilfs2/segment.c
3373     +++ b/fs/nilfs2/segment.c
3374     @@ -723,7 +723,7 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
3375    
3376     lock_page(page);
3377     if (!page_has_buffers(page))
3378     - create_empty_buffers(page, 1 << inode->i_blkbits, 0);
3379     + create_empty_buffers(page, i_blocksize(inode), 0);
3380     unlock_page(page);
3381    
3382     bh = head = page_buffers(page);
3383     diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
3384     index c5c5b9748ea3..f2961b13e8c5 100644
3385     --- a/fs/ocfs2/aops.c
3386     +++ b/fs/ocfs2/aops.c
3387     @@ -599,7 +599,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
3388     int ret = 0;
3389     struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
3390     unsigned int block_end, block_start;
3391     - unsigned int bsize = 1 << inode->i_blkbits;
3392     + unsigned int bsize = i_blocksize(inode);
3393    
3394     if (!page_has_buffers(page))
3395     create_empty_buffers(page, bsize, 0);
3396     diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
3397     index 000c234d7bbd..0db6f83fdea1 100644
3398     --- a/fs/ocfs2/file.c
3399     +++ b/fs/ocfs2/file.c
3400     @@ -808,7 +808,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
3401     /* We know that zero_from is block aligned */
3402     for (block_start = zero_from; block_start < zero_to;
3403     block_start = block_end) {
3404     - block_end = block_start + (1 << inode->i_blkbits);
3405     + block_end = block_start + i_blocksize(inode);
3406    
3407     /*
3408     * block_start is block-aligned. Bump it by one to force
3409     diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c
3410     index 06af81f71e10..9b96b99539d6 100644
3411     --- a/fs/orangefs/orangefs-utils.c
3412     +++ b/fs/orangefs/orangefs-utils.c
3413     @@ -306,7 +306,7 @@ int orangefs_inode_getattr(struct inode *inode, int new, int bypass)
3414     break;
3415     case S_IFDIR:
3416     inode->i_size = PAGE_SIZE;
3417     - orangefs_inode->blksize = (1 << inode->i_blkbits);
3418     + orangefs_inode->blksize = i_blocksize(inode);
3419     spin_lock(&inode->i_lock);
3420     inode_set_bytes(inode, inode->i_size);
3421     spin_unlock(&inode->i_lock);
3422     @@ -316,7 +316,7 @@ int orangefs_inode_getattr(struct inode *inode, int new, int bypass)
3423     if (new) {
3424     inode->i_size = (loff_t)strlen(new_op->
3425     downcall.resp.getattr.link_target);
3426     - orangefs_inode->blksize = (1 << inode->i_blkbits);
3427     + orangefs_inode->blksize = i_blocksize(inode);
3428     ret = strscpy(orangefs_inode->link_target,
3429     new_op->downcall.resp.getattr.link_target,
3430     ORANGEFS_NAME_MAX);
3431     diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
3432     index 2f8c5c9bdaf6..b396eb09f288 100644
3433     --- a/fs/reiserfs/file.c
3434     +++ b/fs/reiserfs/file.c
3435     @@ -189,7 +189,7 @@ int reiserfs_commit_page(struct inode *inode, struct page *page,
3436     int ret = 0;
3437    
3438     th.t_trans_id = 0;
3439     - blocksize = 1 << inode->i_blkbits;
3440     + blocksize = i_blocksize(inode);
3441    
3442     if (logit) {
3443     reiserfs_write_lock(s);
3444     diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
3445     index 58b2dedb2a3a..bd4c727f4610 100644
3446     --- a/fs/reiserfs/inode.c
3447     +++ b/fs/reiserfs/inode.c
3448     @@ -524,7 +524,7 @@ static int reiserfs_get_blocks_direct_io(struct inode *inode,
3449     * referenced in convert_tail_for_hole() that may be called from
3450     * reiserfs_get_block()
3451     */
3452     - bh_result->b_size = (1 << inode->i_blkbits);
3453     + bh_result->b_size = i_blocksize(inode);
3454    
3455     ret = reiserfs_get_block(inode, iblock, bh_result,
3456     create | GET_BLOCK_NO_DANGLE);
3457     diff --git a/fs/stat.c b/fs/stat.c
3458     index bc045c7994e1..068fdbcc9e26 100644
3459     --- a/fs/stat.c
3460     +++ b/fs/stat.c
3461     @@ -31,7 +31,7 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
3462     stat->atime = inode->i_atime;
3463     stat->mtime = inode->i_mtime;
3464     stat->ctime = inode->i_ctime;
3465     - stat->blksize = (1 << inode->i_blkbits);
3466     + stat->blksize = i_blocksize(inode);
3467     stat->blocks = inode->i_blocks;
3468     }
3469    
3470     @@ -454,6 +454,7 @@ void __inode_add_bytes(struct inode *inode, loff_t bytes)
3471     inode->i_bytes -= 512;
3472     }
3473     }
3474     +EXPORT_SYMBOL(__inode_add_bytes);
3475    
3476     void inode_add_bytes(struct inode *inode, loff_t bytes)
3477     {
3478     diff --git a/fs/udf/inode.c b/fs/udf/inode.c
3479     index aad46401ede5..129b18a29c8f 100644
3480     --- a/fs/udf/inode.c
3481     +++ b/fs/udf/inode.c
3482     @@ -1214,7 +1214,7 @@ int udf_setsize(struct inode *inode, loff_t newsize)
3483     {
3484     int err;
3485     struct udf_inode_info *iinfo;
3486     - int bsize = 1 << inode->i_blkbits;
3487     + int bsize = i_blocksize(inode);
3488    
3489     if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3490     S_ISLNK(inode->i_mode)))
3491     diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
3492     index 67e085d591d8..a81b97013021 100644
3493     --- a/fs/ufs/balloc.c
3494     +++ b/fs/ufs/balloc.c
3495     @@ -81,7 +81,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
3496     ufs_error (sb, "ufs_free_fragments",
3497     "bit already cleared for fragment %u", i);
3498     }
3499     -
3500     +
3501     + inode_sub_bytes(inode, count << uspi->s_fshift);
3502     fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
3503     uspi->cs_total.cs_nffree += count;
3504     fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
3505     @@ -183,6 +184,7 @@ void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
3506     ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
3507     }
3508     ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
3509     + inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
3510     if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
3511     ufs_clusteracct (sb, ucpi, blkno, 1);
3512    
3513     @@ -494,6 +496,20 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
3514     return 0;
3515     }
3516    
3517     +static bool try_add_frags(struct inode *inode, unsigned frags)
3518     +{
3519     + unsigned size = frags * i_blocksize(inode);
3520     + spin_lock(&inode->i_lock);
3521     + __inode_add_bytes(inode, size);
3522     + if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
3523     + __inode_sub_bytes(inode, size);
3524     + spin_unlock(&inode->i_lock);
3525     + return false;
3526     + }
3527     + spin_unlock(&inode->i_lock);
3528     + return true;
3529     +}
3530     +
3531     static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
3532     unsigned oldcount, unsigned newcount)
3533     {
3534     @@ -530,6 +546,9 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
3535     for (i = oldcount; i < newcount; i++)
3536     if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
3537     return 0;
3538     +
3539     + if (!try_add_frags(inode, count))
3540     + return 0;
3541     /*
3542     * Block can be extended
3543     */
3544     @@ -647,6 +666,7 @@ static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
3545     ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
3546     i = uspi->s_fpb - count;
3547    
3548     + inode_sub_bytes(inode, i << uspi->s_fshift);
3549     fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
3550     uspi->cs_total.cs_nffree += i;
3551     fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
3552     @@ -657,6 +677,8 @@ static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
3553     result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
3554     if (result == INVBLOCK)
3555     return 0;
3556     + if (!try_add_frags(inode, count))
3557     + return 0;
3558     for (i = 0; i < count; i++)
3559     ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
3560    
3561     @@ -716,6 +738,8 @@ static u64 ufs_alloccg_block(struct inode *inode,
3562     return INVBLOCK;
3563     ucpi->c_rotor = result;
3564     gotit:
3565     + if (!try_add_frags(inode, uspi->s_fpb))
3566     + return 0;
3567     blkno = ufs_fragstoblks(result);
3568     ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
3569     if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
3570     diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
3571     index 190d64be22ed..a2760a2869f4 100644
3572     --- a/fs/ufs/inode.c
3573     +++ b/fs/ufs/inode.c
3574     @@ -235,7 +235,8 @@ ufs_extend_tail(struct inode *inode, u64 writes_to,
3575    
3576     p = ufs_get_direct_data_ptr(uspi, ufsi, block);
3577     tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
3578     - new_size, err, locked_page);
3579     + new_size - (lastfrag & uspi->s_fpbmask), err,
3580     + locked_page);
3581     return tmp != 0;
3582     }
3583    
3584     @@ -284,7 +285,7 @@ ufs_inode_getfrag(struct inode *inode, unsigned index,
3585     goal += uspi->s_fpb;
3586     }
3587     tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
3588     - goal, uspi->s_fpb, err, locked_page);
3589     + goal, nfrags, err, locked_page);
3590    
3591     if (!tmp) {
3592     *err = -ENOSPC;
3593     @@ -402,7 +403,9 @@ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buff
3594    
3595     if (!create) {
3596     phys64 = ufs_frag_map(inode, offsets, depth);
3597     - goto out;
3598     + if (phys64)
3599     + map_bh(bh_result, sb, phys64 + frag);
3600     + return 0;
3601     }
3602    
3603     /* This code entered only while writing ....? */
3604     diff --git a/fs/ufs/super.c b/fs/ufs/super.c
3605     index f3469ad0fef2..351162ff1bfd 100644
3606     --- a/fs/ufs/super.c
3607     +++ b/fs/ufs/super.c
3608     @@ -746,6 +746,23 @@ static void ufs_put_super(struct super_block *sb)
3609     return;
3610     }
3611    
3612     +static u64 ufs_max_bytes(struct super_block *sb)
3613     +{
3614     + struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
3615     + int bits = uspi->s_apbshift;
3616     + u64 res;
3617     +
3618     + if (bits > 21)
3619     + res = ~0ULL;
3620     + else
3621     + res = UFS_NDADDR + (1LL << bits) + (1LL << (2*bits)) +
3622     + (1LL << (3*bits));
3623     +
3624     + if (res >= (MAX_LFS_FILESIZE >> uspi->s_bshift))
3625     + return MAX_LFS_FILESIZE;
3626     + return res << uspi->s_bshift;
3627     +}
3628     +
3629     static int ufs_fill_super(struct super_block *sb, void *data, int silent)
3630     {
3631     struct ufs_sb_info * sbi;
3632     @@ -1211,6 +1228,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
3633     "fast symlink size (%u)\n", uspi->s_maxsymlinklen);
3634     uspi->s_maxsymlinklen = maxsymlen;
3635     }
3636     + sb->s_maxbytes = ufs_max_bytes(sb);
3637     sb->s_max_links = UFS_LINK_MAX;
3638    
3639     inode = ufs_iget(sb, UFS_ROOTINO);
3640     diff --git a/fs/ufs/util.h b/fs/ufs/util.h
3641     index b7fbf53dbc81..398019fb1448 100644
3642     --- a/fs/ufs/util.h
3643     +++ b/fs/ufs/util.h
3644     @@ -473,15 +473,19 @@ static inline unsigned _ubh_find_last_zero_bit_(
3645     static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi,
3646     struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
3647     {
3648     + u8 mask;
3649     switch (uspi->s_fpb) {
3650     case 8:
3651     return (*ubh_get_addr (ubh, begin + block) == 0xff);
3652     case 4:
3653     - return (*ubh_get_addr (ubh, begin + (block >> 1)) == (0x0f << ((block & 0x01) << 2)));
3654     + mask = 0x0f << ((block & 0x01) << 2);
3655     + return (*ubh_get_addr (ubh, begin + (block >> 1)) & mask) == mask;
3656     case 2:
3657     - return (*ubh_get_addr (ubh, begin + (block >> 2)) == (0x03 << ((block & 0x03) << 1)));
3658     + mask = 0x03 << ((block & 0x03) << 1);
3659     + return (*ubh_get_addr (ubh, begin + (block >> 2)) & mask) == mask;
3660     case 1:
3661     - return (*ubh_get_addr (ubh, begin + (block >> 3)) == (0x01 << (block & 0x07)));
3662     + mask = 0x01 << (block & 0x07);
3663     + return (*ubh_get_addr (ubh, begin + (block >> 3)) & mask) == mask;
3664     }
3665     return 0;
3666     }
3667     diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
3668     index 6df0a7ce3e8a..578981412615 100644
3669     --- a/fs/xfs/xfs_aops.c
3670     +++ b/fs/xfs/xfs_aops.c
3671     @@ -108,9 +108,9 @@ xfs_finish_page_writeback(
3672     unsigned int bsize;
3673    
3674     ASSERT(bvec->bv_offset < PAGE_SIZE);
3675     - ASSERT((bvec->bv_offset & ((1 << inode->i_blkbits) - 1)) == 0);
3676     + ASSERT((bvec->bv_offset & (i_blocksize(inode) - 1)) == 0);
3677     ASSERT(end < PAGE_SIZE);
3678     - ASSERT((bvec->bv_len & ((1 << inode->i_blkbits) - 1)) == 0);
3679     + ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0);
3680    
3681     bh = head = page_buffers(bvec->bv_page);
3682    
3683     @@ -349,7 +349,7 @@ xfs_map_blocks(
3684     {
3685     struct xfs_inode *ip = XFS_I(inode);
3686     struct xfs_mount *mp = ip->i_mount;
3687     - ssize_t count = 1 << inode->i_blkbits;
3688     + ssize_t count = i_blocksize(inode);
3689     xfs_fileoff_t offset_fsb, end_fsb;
3690     int error = 0;
3691     int bmapi_flags = XFS_BMAPI_ENTIRE;
3692     @@ -759,7 +759,7 @@ xfs_aops_discard_page(
3693     break;
3694     }
3695     next_buffer:
3696     - offset += 1 << inode->i_blkbits;
3697     + offset += i_blocksize(inode);
3698    
3699     } while ((bh = bh->b_this_page) != head);
3700    
3701     @@ -847,7 +847,7 @@ xfs_writepage_map(
3702     LIST_HEAD(submit_list);
3703     struct xfs_ioend *ioend, *next;
3704     struct buffer_head *bh, *head;
3705     - ssize_t len = 1 << inode->i_blkbits;
3706     + ssize_t len = i_blocksize(inode);
3707     int error = 0;
3708     int count = 0;
3709     int uptodate = 1;
3710     @@ -1250,7 +1250,7 @@ xfs_map_trim_size(
3711     offset + mapping_size >= i_size_read(inode)) {
3712     /* limit mapping to block that spans EOF */
3713     mapping_size = roundup_64(i_size_read(inode) - offset,
3714     - 1 << inode->i_blkbits);
3715     + i_blocksize(inode));
3716     }
3717     if (mapping_size > LONG_MAX)
3718     mapping_size = LONG_MAX;
3719     @@ -1286,7 +1286,7 @@ __xfs_get_blocks(
3720     return -EIO;
3721    
3722     offset = (xfs_off_t)iblock << inode->i_blkbits;
3723     - ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
3724     + ASSERT(bh_result->b_size >= i_blocksize(inode));
3725     size = bh_result->b_size;
3726    
3727     if (!create && offset >= i_size_read(inode))
3728     @@ -1634,7 +1634,7 @@ xfs_vm_set_page_dirty(
3729     if (offset < end_offset)
3730     set_buffer_dirty(bh);
3731     bh = bh->b_this_page;
3732     - offset += 1 << inode->i_blkbits;
3733     + offset += i_blocksize(inode);
3734     } while (bh != head);
3735     }
3736     /*
3737     diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
3738     index a90ec3fad69f..df206cfc21f7 100644
3739     --- a/fs/xfs/xfs_file.c
3740     +++ b/fs/xfs/xfs_file.c
3741     @@ -823,7 +823,7 @@ xfs_file_fallocate(
3742     if (error)
3743     goto out_unlock;
3744     } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
3745     - unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
3746     + unsigned int blksize_mask = i_blocksize(inode) - 1;
3747    
3748     if (offset & blksize_mask || len & blksize_mask) {
3749     error = -EINVAL;
3750     @@ -845,7 +845,7 @@ xfs_file_fallocate(
3751     if (error)
3752     goto out_unlock;
3753     } else if (mode & FALLOC_FL_INSERT_RANGE) {
3754     - unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
3755     + unsigned int blksize_mask = i_blocksize(inode) - 1;
3756    
3757     new_size = i_size_read(inode) + len;
3758     if (offset & blksize_mask || len & blksize_mask) {
3759     diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
3760     index 5b17de62c962..6fb1c34cf805 100644
3761     --- a/include/linux/cgroup-defs.h
3762     +++ b/include/linux/cgroup-defs.h
3763     @@ -46,6 +46,7 @@ enum {
3764     CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
3765     CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
3766     CSS_VISIBLE = (1 << 3), /* css is visible to userland */
3767     + CSS_DYING = (1 << 4), /* css is dying */
3768     };
3769    
3770     /* bits in struct cgroup flags field */
3771     diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
3772     index 307ae63ef262..7620a8bc0493 100644
3773     --- a/include/linux/cgroup.h
3774     +++ b/include/linux/cgroup.h
3775     @@ -344,6 +344,26 @@ static inline bool css_tryget_online(struct cgroup_subsys_state *css)
3776     }
3777    
3778     /**
3779     + * css_is_dying - test whether the specified css is dying
3780     + * @css: target css
3781     + *
3782     + * Test whether @css is in the process of offlining or already offline. In
3783     + * most cases, ->css_online() and ->css_offline() callbacks should be
3784     + * enough; however, the actual offline operations are RCU delayed and this
3785     + * test returns %true also when @css is scheduled to be offlined.
3786     + *
3787     + * This is useful, for example, when the use case requires synchronous
3788     + * behavior with respect to cgroup removal. cgroup removal schedules css
3789     + * offlining but the css can seem alive while the operation is being
3790     + * delayed. If the delay affects user visible semantics, this test can be
3791     + * used to resolve the situation.
3792     + */
3793     +static inline bool css_is_dying(struct cgroup_subsys_state *css)
3794     +{
3795     + return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
3796     +}
3797     +
3798     +/**
3799     * css_put - put a css reference
3800     * @css: target css
3801     *
3802     diff --git a/include/linux/fs.h b/include/linux/fs.h
3803     index dc0478c07b2a..2f63d44368bd 100644
3804     --- a/include/linux/fs.h
3805     +++ b/include/linux/fs.h
3806     @@ -705,6 +705,11 @@ struct inode {
3807     void *i_private; /* fs or device private pointer */
3808     };
3809    
3810     +static inline unsigned int i_blocksize(const struct inode *node)
3811     +{
3812     + return (1 << node->i_blkbits);
3813     +}
3814     +
3815     static inline int inode_unhashed(struct inode *inode)
3816     {
3817     return hlist_unhashed(&inode->i_hash);
3818     diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
3819     index e0e539321ab9..d53a23100401 100644
3820     --- a/include/linux/ptrace.h
3821     +++ b/include/linux/ptrace.h
3822     @@ -53,7 +53,8 @@ extern int ptrace_request(struct task_struct *child, long request,
3823     unsigned long addr, unsigned long data);
3824     extern void ptrace_notify(int exit_code);
3825     extern void __ptrace_link(struct task_struct *child,
3826     - struct task_struct *new_parent);
3827     + struct task_struct *new_parent,
3828     + const struct cred *ptracer_cred);
3829     extern void __ptrace_unlink(struct task_struct *child);
3830     extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
3831     #define PTRACE_MODE_READ 0x01
3832     @@ -205,7 +206,7 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
3833    
3834     if (unlikely(ptrace) && current->ptrace) {
3835     child->ptrace = current->ptrace;
3836     - __ptrace_link(child, current->parent);
3837     + __ptrace_link(child, current->parent, current->ptracer_cred);
3838    
3839     if (child->ptrace & PT_SEIZED)
3840     task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
3841     @@ -214,6 +215,8 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
3842    
3843     set_tsk_thread_flag(child, TIF_SIGPENDING);
3844     }
3845     + else
3846     + child->ptracer_cred = NULL;
3847     }
3848    
3849     /**
3850     diff --git a/include/net/ipv6.h b/include/net/ipv6.h
3851     index 7f15f95625e7..91afb4aadaa6 100644
3852     --- a/include/net/ipv6.h
3853     +++ b/include/net/ipv6.h
3854     @@ -1001,6 +1001,7 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row,
3855     */
3856     extern const struct proto_ops inet6_stream_ops;
3857     extern const struct proto_ops inet6_dgram_ops;
3858     +extern const struct proto_ops inet6_sockraw_ops;
3859    
3860     struct group_source_req;
3861     struct group_filter;
3862     diff --git a/kernel/cgroup.c b/kernel/cgroup.c
3863     index a3d2aad2443f..1fde8eec9529 100644
3864     --- a/kernel/cgroup.c
3865     +++ b/kernel/cgroup.c
3866     @@ -5407,6 +5407,11 @@ static void kill_css(struct cgroup_subsys_state *css)
3867     {
3868     lockdep_assert_held(&cgroup_mutex);
3869    
3870     + if (css->flags & CSS_DYING)
3871     + return;
3872     +
3873     + css->flags |= CSS_DYING;
3874     +
3875     /*
3876     * This must happen before css is disassociated with its cgroup.
3877     * See seq_css() for details.
3878     diff --git a/kernel/cpu.c b/kernel/cpu.c
3879     index 99c6c568bc55..8f52977aad59 100644
3880     --- a/kernel/cpu.c
3881     +++ b/kernel/cpu.c
3882     @@ -1765,13 +1765,13 @@ static ssize_t write_cpuhp_target(struct device *dev,
3883     ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
3884     mutex_unlock(&cpuhp_state_mutex);
3885     if (ret)
3886     - return ret;
3887     + goto out;
3888    
3889     if (st->state < target)
3890     ret = do_cpu_up(dev->id, target);
3891     else
3892     ret = do_cpu_down(dev->id, target);
3893     -
3894     +out:
3895     unlock_device_hotplug();
3896     return ret ? ret : count;
3897     }
3898     diff --git a/kernel/cpuset.c b/kernel/cpuset.c
3899     index 29f815d2ef7e..24d175d2b62d 100644
3900     --- a/kernel/cpuset.c
3901     +++ b/kernel/cpuset.c
3902     @@ -174,9 +174,9 @@ typedef enum {
3903     } cpuset_flagbits_t;
3904    
3905     /* convenient tests for these bits */
3906     -static inline bool is_cpuset_online(const struct cpuset *cs)
3907     +static inline bool is_cpuset_online(struct cpuset *cs)
3908     {
3909     - return test_bit(CS_ONLINE, &cs->flags);
3910     + return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
3911     }
3912    
3913     static inline int is_cpu_exclusive(const struct cpuset *cs)
3914     diff --git a/kernel/events/core.c b/kernel/events/core.c
3915     index 07c0dc806dfc..11cc1d83c770 100644
3916     --- a/kernel/events/core.c
3917     +++ b/kernel/events/core.c
3918     @@ -7062,6 +7062,21 @@ static void perf_log_itrace_start(struct perf_event *event)
3919     perf_output_end(&handle);
3920     }
3921    
3922     +static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
3923     +{
3924     + /*
3925     + * Due to interrupt latency (AKA "skid"), we may enter the
3926     + * kernel before taking an overflow, even if the PMU is only
3927     + * counting user events.
3928     + * To avoid leaking information to userspace, we must always
3929     + * reject kernel samples when exclude_kernel is set.
3930     + */
3931     + if (event->attr.exclude_kernel && !user_mode(regs))
3932     + return false;
3933     +
3934     + return true;
3935     +}
3936     +
3937     /*
3938     * Generic event overflow handling, sampling.
3939     */
3940     @@ -7109,6 +7124,12 @@ static int __perf_event_overflow(struct perf_event *event,
3941     }
3942    
3943     /*
3944     + * For security, drop the skid kernel samples if necessary.
3945     + */
3946     + if (!sample_is_allowed(event, regs))
3947     + return ret;
3948     +
3949     + /*
3950     * XXX event_limit might not quite work as expected on inherited
3951     * events
3952     */
3953     diff --git a/kernel/ptrace.c b/kernel/ptrace.c
3954     index a5caecef88be..f39a7be98fc1 100644
3955     --- a/kernel/ptrace.c
3956     +++ b/kernel/ptrace.c
3957     @@ -57,19 +57,25 @@ int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
3958     }
3959    
3960    
3961     +void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
3962     + const struct cred *ptracer_cred)
3963     +{
3964     + BUG_ON(!list_empty(&child->ptrace_entry));
3965     + list_add(&child->ptrace_entry, &new_parent->ptraced);
3966     + child->parent = new_parent;
3967     + child->ptracer_cred = get_cred(ptracer_cred);
3968     +}
3969     +
3970     /*
3971     * ptrace a task: make the debugger its new parent and
3972     * move it to the ptrace list.
3973     *
3974     * Must be called with the tasklist lock write-held.
3975     */
3976     -void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
3977     +static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
3978     {
3979     - BUG_ON(!list_empty(&child->ptrace_entry));
3980     - list_add(&child->ptrace_entry, &new_parent->ptraced);
3981     - child->parent = new_parent;
3982     rcu_read_lock();
3983     - child->ptracer_cred = get_cred(__task_cred(new_parent));
3984     + __ptrace_link(child, new_parent, __task_cred(new_parent));
3985     rcu_read_unlock();
3986     }
3987    
3988     @@ -383,7 +389,7 @@ static int ptrace_attach(struct task_struct *task, long request,
3989     flags |= PT_SEIZED;
3990     task->ptrace = flags;
3991    
3992     - __ptrace_link(task, current);
3993     + ptrace_link(task, current);
3994    
3995     /* SEIZE doesn't trap tracee on attach */
3996     if (!seize)
3997     @@ -456,7 +462,7 @@ static int ptrace_traceme(void)
3998     */
3999     if (!ret && !(current->real_parent->flags & PF_EXITING)) {
4000     current->ptrace = PT_PTRACED;
4001     - __ptrace_link(current, current->real_parent);
4002     + ptrace_link(current, current->real_parent);
4003     }
4004     }
4005     write_unlock_irq(&tasklist_lock);
4006     diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
4007     index 69e06898997d..cb771c76682e 100644
4008     --- a/kernel/sched/cpufreq_schedutil.c
4009     +++ b/kernel/sched/cpufreq_schedutil.c
4010     @@ -32,6 +32,7 @@ struct sugov_policy {
4011     u64 last_freq_update_time;
4012     s64 freq_update_delay_ns;
4013     unsigned int next_freq;
4014     + unsigned int cached_raw_freq;
4015    
4016     /* The next fields are only needed if fast switch cannot be used. */
4017     struct irq_work irq_work;
4018     @@ -46,7 +47,6 @@ struct sugov_cpu {
4019     struct update_util_data update_util;
4020     struct sugov_policy *sg_policy;
4021    
4022     - unsigned int cached_raw_freq;
4023     unsigned long iowait_boost;
4024     unsigned long iowait_boost_max;
4025     u64 last_update;
4026     @@ -140,9 +140,9 @@ static unsigned int get_next_freq(struct sugov_cpu *sg_cpu, unsigned long util,
4027    
4028     freq = (freq + (freq >> 2)) * util / max;
4029    
4030     - if (freq == sg_cpu->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
4031     + if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
4032     return sg_policy->next_freq;
4033     - sg_cpu->cached_raw_freq = freq;
4034     + sg_policy->cached_raw_freq = freq;
4035     return cpufreq_driver_resolve_freq(policy, freq);
4036     }
4037    
4038     @@ -502,25 +502,19 @@ static int sugov_start(struct cpufreq_policy *policy)
4039     sg_policy->next_freq = UINT_MAX;
4040     sg_policy->work_in_progress = false;
4041     sg_policy->need_freq_update = false;
4042     + sg_policy->cached_raw_freq = 0;
4043    
4044     for_each_cpu(cpu, policy->cpus) {
4045     struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
4046    
4047     + memset(sg_cpu, 0, sizeof(*sg_cpu));
4048     sg_cpu->sg_policy = sg_policy;
4049     - if (policy_is_shared(policy)) {
4050     - sg_cpu->util = 0;
4051     - sg_cpu->max = 0;
4052     - sg_cpu->flags = SCHED_CPUFREQ_RT;
4053     - sg_cpu->last_update = 0;
4054     - sg_cpu->cached_raw_freq = 0;
4055     - sg_cpu->iowait_boost = 0;
4056     - sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
4057     - cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
4058     - sugov_update_shared);
4059     - } else {
4060     - cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
4061     - sugov_update_single);
4062     - }
4063     + sg_cpu->flags = SCHED_CPUFREQ_RT;
4064     + sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
4065     + cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
4066     + policy_is_shared(policy) ?
4067     + sugov_update_shared :
4068     + sugov_update_single);
4069     }
4070     return 0;
4071     }
4072     diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
4073     index 0ecef3e4690e..5e6db6b1e3bd 100644
4074     --- a/lib/test_user_copy.c
4075     +++ b/lib/test_user_copy.c
4076     @@ -58,7 +58,9 @@ static int __init test_user_copy_init(void)
4077     usermem = (char __user *)user_addr;
4078     bad_usermem = (char *)user_addr;
4079    
4080     - /* Legitimate usage: none of these should fail. */
4081     + /*
4082     + * Legitimate usage: none of these copies should fail.
4083     + */
4084     ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE),
4085     "legitimate copy_from_user failed");
4086     ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE),
4087     @@ -68,19 +70,33 @@ static int __init test_user_copy_init(void)
4088     ret |= test(put_user(value, (unsigned long __user *)usermem),
4089     "legitimate put_user failed");
4090    
4091     - /* Invalid usage: none of these should succeed. */
4092     + /*
4093     + * Invalid usage: none of these copies should succeed.
4094     + */
4095     +
4096     + /* Reject kernel-to-kernel copies through copy_from_user(). */
4097     ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE),
4098     PAGE_SIZE),
4099     "illegal all-kernel copy_from_user passed");
4100     +
4101     +#if 0
4102     + /*
4103     + * When running with SMAP/PAN/etc, this will Oops the kernel
4104     + * due to the zeroing of userspace memory on failure. This needs
4105     + * to be tested in LKDTM instead, since this test module does not
4106     + * expect to explode.
4107     + */
4108     ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem,
4109     PAGE_SIZE),
4110     "illegal reversed copy_from_user passed");
4111     +#endif
4112     ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE,
4113     PAGE_SIZE),
4114     "illegal all-kernel copy_to_user passed");
4115     ret |= test(!copy_to_user((char __user *)kmem, bad_usermem,
4116     PAGE_SIZE),
4117     "illegal reversed copy_to_user passed");
4118     +
4119     ret |= test(!get_user(value, (unsigned long __user *)kmem),
4120     "illegal get_user passed");
4121     ret |= test(!put_user(value, (unsigned long __user *)kmem),
4122     diff --git a/mm/truncate.c b/mm/truncate.c
4123     index 8d8c62d89e6d..9c809e7d73c3 100644
4124     --- a/mm/truncate.c
4125     +++ b/mm/truncate.c
4126     @@ -753,7 +753,7 @@ EXPORT_SYMBOL(truncate_setsize);
4127     */
4128     void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
4129     {
4130     - int bsize = 1 << inode->i_blkbits;
4131     + int bsize = i_blocksize(inode);
4132     loff_t rounded_from;
4133     struct page *page;
4134     pgoff_t index;
4135     diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
4136     index 5a782f543aff..16b5aa9a91f1 100644
4137     --- a/net/bridge/br_stp_if.c
4138     +++ b/net/bridge/br_stp_if.c
4139     @@ -185,7 +185,8 @@ static void br_stp_start(struct net_bridge *br)
4140     br_debug(br, "using kernel STP\n");
4141    
4142     /* To start timers on any ports left in blocking */
4143     - mod_timer(&br->hello_timer, jiffies + br->hello_time);
4144     + if (br->dev->flags & IFF_UP)
4145     + mod_timer(&br->hello_timer, jiffies + br->hello_time);
4146     br_port_state_selection(br);
4147     }
4148    
4149     diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
4150     index 971b9471d427..f60fe82c2c1e 100644
4151     --- a/net/ipv4/af_inet.c
4152     +++ b/net/ipv4/af_inet.c
4153     @@ -1015,7 +1015,7 @@ static struct inet_protosw inetsw_array[] =
4154     .type = SOCK_DGRAM,
4155     .protocol = IPPROTO_ICMP,
4156     .prot = &ping_prot,
4157     - .ops = &inet_dgram_ops,
4158     + .ops = &inet_sockraw_ops,
4159     .flags = INET_PROTOSW_REUSE,
4160     },
4161    
4162     diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
4163     index baea5df43598..0cdbea9b9288 100644
4164     --- a/net/ipv4/tcp_cong.c
4165     +++ b/net/ipv4/tcp_cong.c
4166     @@ -179,6 +179,7 @@ void tcp_init_congestion_control(struct sock *sk)
4167     {
4168     const struct inet_connection_sock *icsk = inet_csk(sk);
4169    
4170     + tcp_sk(sk)->prior_ssthresh = 0;
4171     if (icsk->icsk_ca_ops->init)
4172     icsk->icsk_ca_ops->init(sk);
4173     if (tcp_ca_needs_ecn(sk))
4174     diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
4175     index 37ac9de713c6..8d772fea1dde 100644
4176     --- a/net/ipv6/calipso.c
4177     +++ b/net/ipv6/calipso.c
4178     @@ -1319,7 +1319,7 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
4179     struct ipv6hdr *ip6_hdr;
4180     struct ipv6_opt_hdr *hop;
4181     unsigned char buf[CALIPSO_MAX_BUFFER];
4182     - int len_delta, new_end, pad;
4183     + int len_delta, new_end, pad, payload;
4184     unsigned int start, end;
4185    
4186     ip6_hdr = ipv6_hdr(skb);
4187     @@ -1346,6 +1346,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
4188     if (ret_val < 0)
4189     return ret_val;
4190    
4191     + ip6_hdr = ipv6_hdr(skb); /* Reset as skb_cow() may have moved it */
4192     +
4193     if (len_delta) {
4194     if (len_delta > 0)
4195     skb_push(skb, len_delta);
4196     @@ -1355,6 +1357,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
4197     sizeof(*ip6_hdr) + start);
4198     skb_reset_network_header(skb);
4199     ip6_hdr = ipv6_hdr(skb);
4200     + payload = ntohs(ip6_hdr->payload_len);
4201     + ip6_hdr->payload_len = htons(payload + len_delta);
4202     }
4203    
4204     hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1);
4205     diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
4206     index 013086b248e2..424fbe1f8978 100644
4207     --- a/net/ipv6/ip6_offload.c
4208     +++ b/net/ipv6/ip6_offload.c
4209     @@ -116,8 +116,10 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
4210    
4211     if (udpfrag) {
4212     int err = ip6_find_1stfragopt(skb, &prevhdr);
4213     - if (err < 0)
4214     + if (err < 0) {
4215     + kfree_skb_list(segs);
4216     return ERR_PTR(err);
4217     + }
4218     fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
4219     fptr->frag_off = htons(offset);
4220     if (skb->next)
4221     diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
4222     index 66e2d9dfc43a..982868193dbb 100644
4223     --- a/net/ipv6/ping.c
4224     +++ b/net/ipv6/ping.c
4225     @@ -198,7 +198,7 @@ static struct inet_protosw pingv6_protosw = {
4226     .type = SOCK_DGRAM,
4227     .protocol = IPPROTO_ICMPV6,
4228     .prot = &pingv6_prot,
4229     - .ops = &inet6_dgram_ops,
4230     + .ops = &inet6_sockraw_ops,
4231     .flags = INET_PROTOSW_REUSE,
4232     };
4233    
4234     diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
4235     index 1a2fe5c3a366..71ffa526cb23 100644
4236     --- a/net/ipv6/raw.c
4237     +++ b/net/ipv6/raw.c
4238     @@ -1330,7 +1330,7 @@ void raw6_proc_exit(void)
4239     #endif /* CONFIG_PROC_FS */
4240    
4241     /* Same as inet6_dgram_ops, sans udp_poll. */
4242     -static const struct proto_ops inet6_sockraw_ops = {
4243     +const struct proto_ops inet6_sockraw_ops = {
4244     .family = PF_INET6,
4245     .owner = THIS_MODULE,
4246     .release = inet6_release,
4247     diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c
4248     index 0e015906f9ca..07d36573f50b 100644
4249     --- a/net/ipv6/xfrm6_mode_ro.c
4250     +++ b/net/ipv6/xfrm6_mode_ro.c
4251     @@ -47,6 +47,8 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
4252     iph = ipv6_hdr(skb);
4253    
4254     hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
4255     + if (hdr_len < 0)
4256     + return hdr_len;
4257     skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
4258     skb_set_network_header(skb, -x->props.header_len);
4259     skb->transport_header = skb->network_header + hdr_len;
4260     diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c
4261     index 4e344105b3fd..1d3bbe6e1183 100644
4262     --- a/net/ipv6/xfrm6_mode_transport.c
4263     +++ b/net/ipv6/xfrm6_mode_transport.c
4264     @@ -28,6 +28,8 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
4265     iph = ipv6_hdr(skb);
4266    
4267     hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
4268     + if (hdr_len < 0)
4269     + return hdr_len;
4270     skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
4271     skb_set_network_header(skb, -x->props.header_len);
4272     skb->transport_header = skb->network_header + hdr_len;
4273     diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
4274     index 36493a7cae88..93820e0d8814 100644
4275     --- a/net/netfilter/nft_set_rbtree.c
4276     +++ b/net/netfilter/nft_set_rbtree.c
4277     @@ -118,17 +118,17 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
4278     else if (d > 0)
4279     p = &parent->rb_right;
4280     else {
4281     - if (nft_set_elem_active(&rbe->ext, genmask)) {
4282     - if (nft_rbtree_interval_end(rbe) &&
4283     - !nft_rbtree_interval_end(new))
4284     - p = &parent->rb_left;
4285     - else if (!nft_rbtree_interval_end(rbe) &&
4286     - nft_rbtree_interval_end(new))
4287     - p = &parent->rb_right;
4288     - else {
4289     - *ext = &rbe->ext;
4290     - return -EEXIST;
4291     - }
4292     + if (nft_rbtree_interval_end(rbe) &&
4293     + !nft_rbtree_interval_end(new)) {
4294     + p = &parent->rb_left;
4295     + } else if (!nft_rbtree_interval_end(rbe) &&
4296     + nft_rbtree_interval_end(new)) {
4297     + p = &parent->rb_right;
4298     + } else if (nft_set_elem_active(&rbe->ext, genmask)) {
4299     + *ext = &rbe->ext;
4300     + return -EEXIST;
4301     + } else {
4302     + p = &parent->rb_left;
4303     }
4304     }
4305     }
4306     diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
4307     index 17a06105ccb6..56c458dd16a2 100644
4308     --- a/security/keys/encrypted-keys/encrypted.c
4309     +++ b/security/keys/encrypted-keys/encrypted.c
4310     @@ -480,12 +480,9 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
4311     struct skcipher_request *req;
4312     unsigned int encrypted_datalen;
4313     u8 iv[AES_BLOCK_SIZE];
4314     - unsigned int padlen;
4315     - char pad[16];
4316     int ret;
4317    
4318     encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
4319     - padlen = encrypted_datalen - epayload->decrypted_datalen;
4320    
4321     req = init_skcipher_req(derived_key, derived_keylen);
4322     ret = PTR_ERR(req);
4323     @@ -493,11 +490,10 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
4324     goto out;
4325     dump_decrypted_data(epayload);
4326    
4327     - memset(pad, 0, sizeof pad);
4328     sg_init_table(sg_in, 2);
4329     sg_set_buf(&sg_in[0], epayload->decrypted_data,
4330     epayload->decrypted_datalen);
4331     - sg_set_buf(&sg_in[1], pad, padlen);
4332     + sg_set_page(&sg_in[1], ZERO_PAGE(0), AES_BLOCK_SIZE, 0);
4333    
4334     sg_init_table(sg_out, 1);
4335     sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen);
4336     @@ -584,9 +580,14 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
4337     struct skcipher_request *req;
4338     unsigned int encrypted_datalen;
4339     u8 iv[AES_BLOCK_SIZE];
4340     - char pad[16];
4341     + u8 *pad;
4342     int ret;
4343    
4344     + /* Throwaway buffer to hold the unused zero padding at the end */
4345     + pad = kmalloc(AES_BLOCK_SIZE, GFP_KERNEL);
4346     + if (!pad)
4347     + return -ENOMEM;
4348     +
4349     encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
4350     req = init_skcipher_req(derived_key, derived_keylen);
4351     ret = PTR_ERR(req);
4352     @@ -594,13 +595,12 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
4353     goto out;
4354     dump_encrypted_data(epayload, encrypted_datalen);
4355    
4356     - memset(pad, 0, sizeof pad);
4357     sg_init_table(sg_in, 1);
4358     sg_init_table(sg_out, 2);
4359     sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen);
4360     sg_set_buf(&sg_out[0], epayload->decrypted_data,
4361     epayload->decrypted_datalen);
4362     - sg_set_buf(&sg_out[1], pad, sizeof pad);
4363     + sg_set_buf(&sg_out[1], pad, AES_BLOCK_SIZE);
4364    
4365     memcpy(iv, epayload->iv, sizeof(iv));
4366     skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
4367     @@ -612,6 +612,7 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
4368     goto out;
4369     dump_decrypted_data(epayload);
4370     out:
4371     + kfree(pad);
4372     return ret;
4373     }
4374    
4375     diff --git a/security/keys/key.c b/security/keys/key.c
4376     index 346fbf201c22..2f4ce35ae2aa 100644
4377     --- a/security/keys/key.c
4378     +++ b/security/keys/key.c
4379     @@ -962,12 +962,11 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen)
4380     /* the key must be writable */
4381     ret = key_permission(key_ref, KEY_NEED_WRITE);
4382     if (ret < 0)
4383     - goto error;
4384     + return ret;
4385    
4386     /* attempt to update it if supported */
4387     - ret = -EOPNOTSUPP;
4388     if (!key->type->update)
4389     - goto error;
4390     + return -EOPNOTSUPP;
4391    
4392     memset(&prep, 0, sizeof(prep));
4393     prep.data = payload;
4394     diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
4395     index dbbfd7735ce5..ada12c3e3ac4 100644
4396     --- a/security/keys/keyctl.c
4397     +++ b/security/keys/keyctl.c
4398     @@ -97,7 +97,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
4399     /* pull the payload in if one was supplied */
4400     payload = NULL;
4401    
4402     - if (_payload) {
4403     + if (plen) {
4404     ret = -ENOMEM;
4405     payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN);
4406     if (!payload) {
4407     @@ -327,7 +327,7 @@ long keyctl_update_key(key_serial_t id,
4408    
4409     /* pull the payload in if one was supplied */
4410     payload = NULL;
4411     - if (_payload) {
4412     + if (plen) {
4413     ret = -ENOMEM;
4414     payload = kmalloc(plen, GFP_KERNEL);
4415     if (!payload)
4416     diff --git a/sound/core/timer.c b/sound/core/timer.c
4417     index ad153149b231..e5ddc475dca4 100644
4418     --- a/sound/core/timer.c
4419     +++ b/sound/core/timer.c
4420     @@ -1622,6 +1622,7 @@ static int snd_timer_user_tselect(struct file *file,
4421     if (err < 0)
4422     goto __err;
4423    
4424     + tu->qhead = tu->qtail = tu->qused = 0;
4425     kfree(tu->queue);
4426     tu->queue = NULL;
4427     kfree(tu->tqueue);
4428     @@ -1963,6 +1964,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
4429    
4430     tu = file->private_data;
4431     unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
4432     + mutex_lock(&tu->ioctl_lock);
4433     spin_lock_irq(&tu->qlock);
4434     while ((long)count - result >= unit) {
4435     while (!tu->qused) {
4436     @@ -1978,7 +1980,9 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
4437     add_wait_queue(&tu->qchange_sleep, &wait);
4438    
4439     spin_unlock_irq(&tu->qlock);
4440     + mutex_unlock(&tu->ioctl_lock);
4441     schedule();
4442     + mutex_lock(&tu->ioctl_lock);
4443     spin_lock_irq(&tu->qlock);
4444    
4445     remove_wait_queue(&tu->qchange_sleep, &wait);
4446     @@ -1998,7 +2002,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
4447     tu->qused--;
4448     spin_unlock_irq(&tu->qlock);
4449    
4450     - mutex_lock(&tu->ioctl_lock);
4451     if (tu->tread) {
4452     if (copy_to_user(buffer, &tu->tqueue[qhead],
4453     sizeof(struct snd_timer_tread)))
4454     @@ -2008,7 +2011,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
4455     sizeof(struct snd_timer_read)))
4456     err = -EFAULT;
4457     }
4458     - mutex_unlock(&tu->ioctl_lock);
4459    
4460     spin_lock_irq(&tu->qlock);
4461     if (err < 0)
4462     @@ -2018,6 +2020,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
4463     }
4464     _error:
4465     spin_unlock_irq(&tu->qlock);
4466     + mutex_unlock(&tu->ioctl_lock);
4467     return result > 0 ? result : err;
4468     }
4469    
4470     diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
4471     index c0bbcd903261..4e3de566809c 100644
4472     --- a/sound/soc/soc-core.c
4473     +++ b/sound/soc/soc-core.c
4474     @@ -2076,6 +2076,9 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
4475     list_for_each_entry(rtd, &card->rtd_list, list)
4476     flush_delayed_work(&rtd->delayed_work);
4477    
4478     + /* free the ALSA card at first; this syncs with pending operations */
4479     + snd_card_free(card->snd_card);
4480     +
4481     /* remove and free each DAI */
4482     soc_remove_dai_links(card);
4483     soc_remove_pcm_runtimes(card);
4484     @@ -2090,9 +2093,7 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
4485     if (card->remove)
4486     card->remove(card);
4487    
4488     - snd_card_free(card->snd_card);
4489     return 0;
4490     -
4491     }
4492    
4493     /* removes a socdev */
4494     diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
4495     index 834137e7b83f..1ab58f7b5d74 100644
4496     --- a/virt/kvm/arm/vgic/vgic-v2.c
4497     +++ b/virt/kvm/arm/vgic/vgic-v2.c
4498     @@ -168,6 +168,13 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
4499     if (irq->hw) {
4500     val |= GICH_LR_HW;
4501     val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
4502     + /*
4503     + * Never set pending+active on a HW interrupt, as the
4504     + * pending state is kept at the physical distributor
4505     + * level.
4506     + */
4507     + if (irq->active && irq->pending)
4508     + val &= ~GICH_LR_PENDING_BIT;
4509     } else {
4510     if (irq->config == VGIC_CONFIG_LEVEL)
4511     val |= GICH_LR_EOI;
4512     diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
4513     index e6b03fd8c374..f1320063db28 100644
4514     --- a/virt/kvm/arm/vgic/vgic-v3.c
4515     +++ b/virt/kvm/arm/vgic/vgic-v3.c
4516     @@ -151,6 +151,13 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
4517     if (irq->hw) {
4518     val |= ICH_LR_HW;
4519     val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
4520     + /*
4521     + * Never set pending+active on a HW interrupt, as the
4522     + * pending state is kept at the physical distributor
4523     + * level.
4524     + */
4525     + if (irq->active && irq->pending)
4526     + val &= ~ICH_LR_PENDING_BIT;
4527     } else {
4528     if (irq->config == VGIC_CONFIG_LEVEL)
4529     val |= ICH_LR_EOI;