Magellan Linux

Annotation of /trunk/kernel-alx/patches-3.10/0167-3.10.68-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2655 - (hide annotations) (download)
Tue Jul 21 16:20:22 2015 UTC (8 years, 10 months ago) by niro
File size: 73928 byte(s)
-linux-3.10.68
1 niro 2655 diff --git a/Makefile b/Makefile
2     index 7c6711fa3c3f..dd67be657716 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 3
7     PATCHLEVEL = 10
8     -SUBLEVEL = 67
9     +SUBLEVEL = 68
10     EXTRAVERSION =
11     NAME = TOSSUG Baby Fish
12    
13     diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
14     index da1c77d39327..9ee7e01066f9 100644
15     --- a/arch/arm/include/asm/atomic.h
16     +++ b/arch/arm/include/asm/atomic.h
17     @@ -114,7 +114,8 @@ static inline int atomic_sub_return(int i, atomic_t *v)
18    
19     static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
20     {
21     - unsigned long oldval, res;
22     + int oldval;
23     + unsigned long res;
24    
25     smp_mb();
26    
27     @@ -238,15 +239,15 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
28    
29     #ifndef CONFIG_GENERIC_ATOMIC64
30     typedef struct {
31     - u64 __aligned(8) counter;
32     + long long counter;
33     } atomic64_t;
34    
35     #define ATOMIC64_INIT(i) { (i) }
36    
37     #ifdef CONFIG_ARM_LPAE
38     -static inline u64 atomic64_read(const atomic64_t *v)
39     +static inline long long atomic64_read(const atomic64_t *v)
40     {
41     - u64 result;
42     + long long result;
43    
44     __asm__ __volatile__("@ atomic64_read\n"
45     " ldrd %0, %H0, [%1]"
46     @@ -257,7 +258,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
47     return result;
48     }
49    
50     -static inline void atomic64_set(atomic64_t *v, u64 i)
51     +static inline void atomic64_set(atomic64_t *v, long long i)
52     {
53     __asm__ __volatile__("@ atomic64_set\n"
54     " strd %2, %H2, [%1]"
55     @@ -266,9 +267,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
56     );
57     }
58     #else
59     -static inline u64 atomic64_read(const atomic64_t *v)
60     +static inline long long atomic64_read(const atomic64_t *v)
61     {
62     - u64 result;
63     + long long result;
64    
65     __asm__ __volatile__("@ atomic64_read\n"
66     " ldrexd %0, %H0, [%1]"
67     @@ -279,9 +280,9 @@ static inline u64 atomic64_read(const atomic64_t *v)
68     return result;
69     }
70    
71     -static inline void atomic64_set(atomic64_t *v, u64 i)
72     +static inline void atomic64_set(atomic64_t *v, long long i)
73     {
74     - u64 tmp;
75     + long long tmp;
76    
77     __asm__ __volatile__("@ atomic64_set\n"
78     "1: ldrexd %0, %H0, [%2]\n"
79     @@ -294,9 +295,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
80     }
81     #endif
82    
83     -static inline void atomic64_add(u64 i, atomic64_t *v)
84     +static inline void atomic64_add(long long i, atomic64_t *v)
85     {
86     - u64 result;
87     + long long result;
88     unsigned long tmp;
89    
90     __asm__ __volatile__("@ atomic64_add\n"
91     @@ -311,9 +312,9 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
92     : "cc");
93     }
94    
95     -static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
96     +static inline long long atomic64_add_return(long long i, atomic64_t *v)
97     {
98     - u64 result;
99     + long long result;
100     unsigned long tmp;
101    
102     smp_mb();
103     @@ -334,9 +335,9 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
104     return result;
105     }
106    
107     -static inline void atomic64_sub(u64 i, atomic64_t *v)
108     +static inline void atomic64_sub(long long i, atomic64_t *v)
109     {
110     - u64 result;
111     + long long result;
112     unsigned long tmp;
113    
114     __asm__ __volatile__("@ atomic64_sub\n"
115     @@ -351,9 +352,9 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
116     : "cc");
117     }
118    
119     -static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
120     +static inline long long atomic64_sub_return(long long i, atomic64_t *v)
121     {
122     - u64 result;
123     + long long result;
124     unsigned long tmp;
125    
126     smp_mb();
127     @@ -374,9 +375,10 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
128     return result;
129     }
130    
131     -static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
132     +static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
133     + long long new)
134     {
135     - u64 oldval;
136     + long long oldval;
137     unsigned long res;
138    
139     smp_mb();
140     @@ -398,9 +400,9 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
141     return oldval;
142     }
143    
144     -static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
145     +static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
146     {
147     - u64 result;
148     + long long result;
149     unsigned long tmp;
150    
151     smp_mb();
152     @@ -419,9 +421,9 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
153     return result;
154     }
155    
156     -static inline u64 atomic64_dec_if_positive(atomic64_t *v)
157     +static inline long long atomic64_dec_if_positive(atomic64_t *v)
158     {
159     - u64 result;
160     + long long result;
161     unsigned long tmp;
162    
163     smp_mb();
164     @@ -445,9 +447,9 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
165     return result;
166     }
167    
168     -static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
169     +static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
170     {
171     - u64 val;
172     + long long val;
173     unsigned long tmp;
174     int ret = 1;
175    
176     diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
177     index 57870ab313c5..d847cbbcee45 100644
178     --- a/arch/arm/include/asm/memory.h
179     +++ b/arch/arm/include/asm/memory.h
180     @@ -98,23 +98,19 @@
181     #define TASK_UNMAPPED_BASE UL(0x00000000)
182     #endif
183    
184     -#ifndef PHYS_OFFSET
185     -#define PHYS_OFFSET UL(CONFIG_DRAM_BASE)
186     -#endif
187     -
188     #ifndef END_MEM
189     #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
190     #endif
191    
192     #ifndef PAGE_OFFSET
193     -#define PAGE_OFFSET (PHYS_OFFSET)
194     +#define PAGE_OFFSET PLAT_PHYS_OFFSET
195     #endif
196    
197     /*
198     * The module can be at any place in ram in nommu mode.
199     */
200     #define MODULES_END (END_MEM)
201     -#define MODULES_VADDR (PHYS_OFFSET)
202     +#define MODULES_VADDR PAGE_OFFSET
203    
204     #define XIP_VIRT_ADDR(physaddr) (physaddr)
205    
206     @@ -141,6 +137,16 @@
207     #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
208     #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
209    
210     +/*
211     + * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
212     + * memory. This is used for XIP and NoMMU kernels, or by kernels which
213     + * have their own mach/memory.h. Assembly code must always use
214     + * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
215     + */
216     +#ifndef PLAT_PHYS_OFFSET
217     +#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
218     +#endif
219     +
220     #ifndef __ASSEMBLY__
221    
222     /*
223     @@ -183,22 +189,15 @@ static inline unsigned long __phys_to_virt(unsigned long x)
224     return t;
225     }
226     #else
227     +
228     +#define PHYS_OFFSET PLAT_PHYS_OFFSET
229     +
230     #define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
231     #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
232     -#endif
233     -#endif
234     -#endif /* __ASSEMBLY__ */
235    
236     -#ifndef PHYS_OFFSET
237     -#ifdef PLAT_PHYS_OFFSET
238     -#define PHYS_OFFSET PLAT_PHYS_OFFSET
239     -#else
240     -#define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
241     #endif
242     #endif
243    
244     -#ifndef __ASSEMBLY__
245     -
246     /*
247     * PFNs are used to describe any physical page; this means
248     * PFN 0 == physical address 0.
249     @@ -207,7 +206,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)
250     * direct-mapped view. We assume this is the first page
251     * of RAM in the mem_map as well.
252     */
253     -#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
254     +#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
255    
256     /*
257     * These are *only* valid on the kernel direct mapped RAM memory.
258     @@ -275,7 +274,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
259     #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
260    
261     #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
262     -#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
263     +#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
264     + && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) )
265    
266     #endif
267    
268     diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
269     index 0d3a28dbc8e5..ed690c49ef93 100644
270     --- a/arch/arm/include/asm/module.h
271     +++ b/arch/arm/include/asm/module.h
272     @@ -12,6 +12,8 @@ enum {
273     ARM_SEC_CORE,
274     ARM_SEC_EXIT,
275     ARM_SEC_DEVEXIT,
276     + ARM_SEC_HOT,
277     + ARM_SEC_UNLIKELY,
278     ARM_SEC_MAX,
279     };
280    
281     diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
282     index cbdc7a21f869..4355f0ec44d6 100644
283     --- a/arch/arm/include/asm/page.h
284     +++ b/arch/arm/include/asm/page.h
285     @@ -13,7 +13,7 @@
286     /* PAGE_SHIFT determines the page size */
287     #define PAGE_SHIFT 12
288     #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
289     -#define PAGE_MASK (~(PAGE_SIZE-1))
290     +#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
291    
292     #ifndef __ASSEMBLY__
293    
294     diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
295     index 18f5cef82ad5..add785b1ec0a 100644
296     --- a/arch/arm/include/asm/pgtable-3level-hwdef.h
297     +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
298     @@ -68,6 +68,7 @@
299     #define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0)
300     #define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
301     #define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
302     +#define PTE_AP2 (_AT(pteval_t, 1) << 7) /* AP[2] */
303     #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
304     #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
305     #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
306     diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
307     index 86b8fe398b95..bb017328c5bd 100644
308     --- a/arch/arm/include/asm/pgtable-3level.h
309     +++ b/arch/arm/include/asm/pgtable-3level.h
310     @@ -33,7 +33,7 @@
311     #define PTRS_PER_PMD 512
312     #define PTRS_PER_PGD 4
313    
314     -#define PTE_HWTABLE_PTRS (PTRS_PER_PTE)
315     +#define PTE_HWTABLE_PTRS (0)
316     #define PTE_HWTABLE_OFF (0)
317     #define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
318    
319     @@ -48,16 +48,16 @@
320     #define PMD_SHIFT 21
321    
322     #define PMD_SIZE (1UL << PMD_SHIFT)
323     -#define PMD_MASK (~(PMD_SIZE-1))
324     +#define PMD_MASK (~((1 << PMD_SHIFT) - 1))
325     #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
326     -#define PGDIR_MASK (~(PGDIR_SIZE-1))
327     +#define PGDIR_MASK (~((1 << PGDIR_SHIFT) - 1))
328    
329     /*
330     * section address mask and size definitions.
331     */
332     #define SECTION_SHIFT 21
333     #define SECTION_SIZE (1UL << SECTION_SHIFT)
334     -#define SECTION_MASK (~(SECTION_SIZE-1))
335     +#define SECTION_MASK (~((1 << SECTION_SHIFT) - 1))
336    
337     #define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE)
338    
339     @@ -71,13 +71,13 @@
340     #define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */
341     #define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
342     #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
343     -#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
344     #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
345     #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
346     #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
347     -#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
348     -#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
349     +#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
350     +#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
351     #define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
352     +#define L_PTE_RDONLY (_AT(pteval_t, 1) << 58) /* READ ONLY */
353    
354     /*
355     * To be used in assembly code with the upper page attributes.
356     @@ -166,6 +166,23 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
357     clean_pmd_entry(pmdp); \
358     } while (0)
359    
360     +/*
361     + * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes
362     + * that are written to a page table but not for ptes created with mk_pte.
363     + *
364     + * In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to
365     + * hugetlb_cow, where it is compared with an entry in a page table.
366     + * This comparison test fails erroneously leading ultimately to a memory leak.
367     + *
368     + * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is
369     + * present before running the comparison.
370     + */
371     +#define __HAVE_ARCH_PTE_SAME
372     +#define pte_same(pte_a,pte_b) ((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG \
373     + : pte_val(pte_a)) \
374     + == (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG \
375     + : pte_val(pte_b)))
376     +
377     #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext)))
378    
379     #endif /* __ASSEMBLY__ */
380     diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
381     index 5aac06fcc97e..4043d7f4bc92 100644
382     --- a/arch/arm/include/asm/pgtable.h
383     +++ b/arch/arm/include/asm/pgtable.h
384     @@ -211,12 +211,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
385    
386     #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
387    
388     +#define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \
389     + : !!(pte_val(pte) & (val)))
390     +#define pte_isclear(pte, val) (!(pte_val(pte) & (val)))
391     +
392     #define pte_none(pte) (!pte_val(pte))
393     -#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
394     -#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
395     -#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
396     -#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
397     -#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
398     +#define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT))
399     +#define pte_write(pte) (pte_isclear((pte), L_PTE_RDONLY))
400     +#define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY))
401     +#define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG))
402     +#define pte_exec(pte) (pte_isclear((pte), L_PTE_XN))
403     #define pte_special(pte) (0)
404    
405     #define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
406     diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
407     index 8bac553fe213..f935b5f651f0 100644
408     --- a/arch/arm/kernel/head.S
409     +++ b/arch/arm/kernel/head.S
410     @@ -109,7 +109,7 @@ ENTRY(stext)
411     sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
412     add r8, r8, r4 @ PHYS_OFFSET
413     #else
414     - ldr r8, =PHYS_OFFSET @ always constant in this case
415     + ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case
416     #endif
417    
418     /*
419     diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
420     index 1e9be5d25e56..af60478f54d0 100644
421     --- a/arch/arm/kernel/module.c
422     +++ b/arch/arm/kernel/module.c
423     @@ -296,6 +296,10 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
424     maps[ARM_SEC_EXIT].unw_sec = s;
425     else if (strcmp(".ARM.exidx.devexit.text", secname) == 0)
426     maps[ARM_SEC_DEVEXIT].unw_sec = s;
427     + else if (strcmp(".ARM.exidx.text.unlikely", secname) == 0)
428     + maps[ARM_SEC_UNLIKELY].unw_sec = s;
429     + else if (strcmp(".ARM.exidx.text.hot", secname) == 0)
430     + maps[ARM_SEC_HOT].unw_sec = s;
431     else if (strcmp(".init.text", secname) == 0)
432     maps[ARM_SEC_INIT].txt_sec = s;
433     else if (strcmp(".devinit.text", secname) == 0)
434     @@ -306,6 +310,10 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
435     maps[ARM_SEC_EXIT].txt_sec = s;
436     else if (strcmp(".devexit.text", secname) == 0)
437     maps[ARM_SEC_DEVEXIT].txt_sec = s;
438     + else if (strcmp(".text.unlikely", secname) == 0)
439     + maps[ARM_SEC_UNLIKELY].txt_sec = s;
440     + else if (strcmp(".text.hot", secname) == 0)
441     + maps[ARM_SEC_HOT].txt_sec = s;
442     }
443    
444     for (i = 0; i < ARM_SEC_MAX; i++)
445     diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
446     index 6c9d7054d997..8ca636cf8618 100644
447     --- a/arch/arm/mm/dma-mapping.c
448     +++ b/arch/arm/mm/dma-mapping.c
449     @@ -429,12 +429,21 @@ void __init dma_contiguous_remap(void)
450     map.type = MT_MEMORY_DMA_READY;
451    
452     /*
453     - * Clear previous low-memory mapping
454     + * Clear previous low-memory mapping to ensure that the
455     + * TLB does not see any conflicting entries, then flush
456     + * the TLB of the old entries before creating new mappings.
457     + *
458     + * This ensures that any speculatively loaded TLB entries
459     + * (even though they may be rare) can not cause any problems,
460     + * and ensures that this code is architecturally compliant.
461     */
462     for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
463     addr += PMD_SIZE)
464     pmd_clear(pmd_off_k(addr));
465    
466     + flush_tlb_kernel_range(__phys_to_virt(start),
467     + __phys_to_virt(end));
468     +
469     iotable_init(&map, 1);
470     }
471     }
472     diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
473     index fb3c446af9e5..4c7d5cddef35 100644
474     --- a/arch/arm/mm/mmu.c
475     +++ b/arch/arm/mm/mmu.c
476     @@ -685,7 +685,8 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
477     }
478    
479     static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
480     - unsigned long end, unsigned long phys, const struct mem_type *type)
481     + unsigned long end, phys_addr_t phys,
482     + const struct mem_type *type)
483     {
484     pud_t *pud = pud_offset(pgd, addr);
485     unsigned long next;
486     diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
487     index 6f3b0476b729..110e738bc970 100644
488     --- a/arch/arm/mm/proc-v7-3level.S
489     +++ b/arch/arm/mm/proc-v7-3level.S
490     @@ -78,8 +78,13 @@ ENTRY(cpu_v7_set_pte_ext)
491     tst rh, #1 << (57 - 32) @ L_PTE_NONE
492     bicne rl, #L_PTE_VALID
493     bne 1f
494     - tst rh, #1 << (55 - 32) @ L_PTE_DIRTY
495     - orreq rl, #L_PTE_RDONLY
496     +
497     + eor ip, rh, #1 << (55 - 32) @ toggle L_PTE_DIRTY in temp reg to
498     + @ test for !L_PTE_DIRTY || L_PTE_RDONLY
499     + tst ip, #1 << (55 - 32) | 1 << (58 - 32)
500     + orrne rl, #PTE_AP2
501     + biceq rl, #PTE_AP2
502     +
503     1: strd r2, r3, [r0]
504     ALT_SMP(W(nop))
505     ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
506     diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
507     index 94599a65cc66..89e57280d2e2 100644
508     --- a/arch/powerpc/xmon/xmon.c
509     +++ b/arch/powerpc/xmon/xmon.c
510     @@ -288,6 +288,7 @@ static inline void disable_surveillance(void)
511     args.token = rtas_token("set-indicator");
512     if (args.token == RTAS_UNKNOWN_SERVICE)
513     return;
514     + args.token = cpu_to_be32(args.token);
515     args.nargs = cpu_to_be32(3);
516     args.nret = cpu_to_be32(1);
517     args.rets = &args.args[3];
518     diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
519     index 202d2c85ba2e..9b2622e0a07e 100644
520     --- a/drivers/firmware/efi/efi-pstore.c
521     +++ b/drivers/firmware/efi/efi-pstore.c
522     @@ -38,6 +38,12 @@ struct pstore_read_data {
523     char **buf;
524     };
525    
526     +static inline u64 generic_id(unsigned long timestamp,
527     + unsigned int part, int count)
528     +{
529     + return (timestamp * 100 + part) * 1000 + count;
530     +}
531     +
532     static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
533     {
534     efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
535     @@ -56,7 +62,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
536    
537     if (sscanf(name, "dump-type%u-%u-%d-%lu",
538     cb_data->type, &part, &cnt, &time) == 4) {
539     - *cb_data->id = part;
540     + *cb_data->id = generic_id(time, part, cnt);
541     *cb_data->count = cnt;
542     cb_data->timespec->tv_sec = time;
543     cb_data->timespec->tv_nsec = 0;
544     @@ -67,7 +73,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
545     * which doesn't support holding
546     * multiple logs, remains.
547     */
548     - *cb_data->id = part;
549     + *cb_data->id = generic_id(time, part, 0);
550     *cb_data->count = 0;
551     cb_data->timespec->tv_sec = time;
552     cb_data->timespec->tv_nsec = 0;
553     @@ -185,14 +191,16 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
554     char name[DUMP_NAME_LEN];
555     efi_char16_t efi_name[DUMP_NAME_LEN];
556     int found, i;
557     + unsigned int part;
558    
559     - sprintf(name, "dump-type%u-%u-%d-%lu", type, (unsigned int)id, count,
560     - time.tv_sec);
561     + do_div(id, 1000);
562     + part = do_div(id, 100);
563     + sprintf(name, "dump-type%u-%u-%d-%lu", type, part, count, time.tv_sec);
564    
565     for (i = 0; i < DUMP_NAME_LEN; i++)
566     efi_name[i] = name[i];
567    
568     - edata.id = id;
569     + edata.id = part;
570     edata.type = type;
571     edata.count = count;
572     edata.time = time;
573     diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
574     index af909a20dd70..74769724c94a 100644
575     --- a/drivers/gpio/gpiolib-of.c
576     +++ b/drivers/gpio/gpiolib-of.c
577     @@ -48,7 +48,7 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
578     * Return true to stop looking and return the translation
579     * error via out_gpio
580     */
581     - gg_data->out_gpio = ERR_PTR(ret);
582     + gg_data->out_gpio = ret;
583     return true;
584     }
585    
586     diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
587     index 5d4a4583d2df..8019e642d2f5 100644
588     --- a/drivers/infiniband/ulp/isert/ib_isert.c
589     +++ b/drivers/infiniband/ulp/isert/ib_isert.c
590     @@ -40,8 +40,15 @@ static DEFINE_MUTEX(device_list_mutex);
591     static LIST_HEAD(device_list);
592     static struct workqueue_struct *isert_rx_wq;
593     static struct workqueue_struct *isert_comp_wq;
594     +static struct workqueue_struct *isert_release_wq;
595     static struct kmem_cache *isert_cmd_cache;
596    
597     +static int
598     +isert_rdma_post_recvl(struct isert_conn *isert_conn);
599     +static int
600     +isert_rdma_accept(struct isert_conn *isert_conn);
601     +struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
602     +
603     static void
604     isert_qp_event_callback(struct ib_event *e, void *context)
605     {
606     @@ -107,9 +114,12 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
607     attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
608     /*
609     * FIXME: Use devattr.max_sge - 2 for max_send_sge as
610     - * work-around for RDMA_READ..
611     + * work-around for RDMA_READs with ConnectX-2.
612     + *
613     + * Also, still make sure to have at least two SGEs for
614     + * outgoing control PDU responses.
615     */
616     - attr.cap.max_send_sge = devattr.max_sge - 2;
617     + attr.cap.max_send_sge = max(2, devattr.max_sge - 2);
618     isert_conn->max_sge = attr.cap.max_send_sge;
619    
620     attr.cap.max_recv_sge = 1;
621     @@ -124,12 +134,18 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
622     ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
623     if (ret) {
624     pr_err("rdma_create_qp failed for cma_id %d\n", ret);
625     - return ret;
626     + goto err;
627     }
628     isert_conn->conn_qp = cma_id->qp;
629     pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
630    
631     return 0;
632     +err:
633     + mutex_lock(&device_list_mutex);
634     + device->cq_active_qps[min_index]--;
635     + mutex_unlock(&device_list_mutex);
636     +
637     + return ret;
638     }
639    
640     static void
641     @@ -212,6 +228,13 @@ isert_create_device_ib_res(struct isert_device *device)
642     struct ib_device *ib_dev = device->ib_device;
643     struct isert_cq_desc *cq_desc;
644     int ret = 0, i, j;
645     + int max_rx_cqe, max_tx_cqe;
646     + struct ib_device_attr dev_attr;
647     +
648     + memset(&dev_attr, 0, sizeof(struct ib_device_attr));
649     + ret = isert_query_device(device->ib_device, &dev_attr);
650     + if (ret)
651     + return ret;
652    
653     device->cqs_used = min_t(int, num_online_cpus(),
654     device->ib_device->num_comp_vectors);
655     @@ -234,6 +257,9 @@ isert_create_device_ib_res(struct isert_device *device)
656     goto out_cq_desc;
657     }
658    
659     + max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr.max_cqe);
660     + max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr.max_cqe);
661     +
662     for (i = 0; i < device->cqs_used; i++) {
663     cq_desc[i].device = device;
664     cq_desc[i].cq_index = i;
665     @@ -242,7 +268,7 @@ isert_create_device_ib_res(struct isert_device *device)
666     isert_cq_rx_callback,
667     isert_cq_event_callback,
668     (void *)&cq_desc[i],
669     - ISER_MAX_RX_CQ_LEN, i);
670     + max_rx_cqe, i);
671     if (IS_ERR(device->dev_rx_cq[i])) {
672     ret = PTR_ERR(device->dev_rx_cq[i]);
673     device->dev_rx_cq[i] = NULL;
674     @@ -253,7 +279,7 @@ isert_create_device_ib_res(struct isert_device *device)
675     isert_cq_tx_callback,
676     isert_cq_event_callback,
677     (void *)&cq_desc[i],
678     - ISER_MAX_TX_CQ_LEN, i);
679     + max_tx_cqe, i);
680     if (IS_ERR(device->dev_tx_cq[i])) {
681     ret = PTR_ERR(device->dev_tx_cq[i]);
682     device->dev_tx_cq[i] = NULL;
683     @@ -375,8 +401,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
684     static int
685     isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
686     {
687     - struct iscsi_np *np = cma_id->context;
688     - struct isert_np *isert_np = np->np_context;
689     + struct isert_np *isert_np = cma_id->context;
690     + struct iscsi_np *np = isert_np->np;
691     struct isert_conn *isert_conn;
692     struct isert_device *device;
693     struct ib_device *ib_dev = cma_id->device;
694     @@ -401,12 +427,12 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
695     isert_conn->state = ISER_CONN_INIT;
696     INIT_LIST_HEAD(&isert_conn->conn_accept_node);
697     init_completion(&isert_conn->conn_login_comp);
698     + init_completion(&isert_conn->login_req_comp);
699     init_completion(&isert_conn->conn_wait);
700     init_completion(&isert_conn->conn_wait_comp_err);
701     kref_init(&isert_conn->conn_kref);
702     mutex_init(&isert_conn->conn_mutex);
703    
704     - cma_id->context = isert_conn;
705     isert_conn->conn_cm_id = cma_id;
706     isert_conn->responder_resources = event->param.conn.responder_resources;
707     isert_conn->initiator_depth = event->param.conn.initiator_depth;
708     @@ -466,6 +492,14 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
709     if (ret)
710     goto out_conn_dev;
711    
712     + ret = isert_rdma_post_recvl(isert_conn);
713     + if (ret)
714     + goto out_conn_dev;
715     +
716     + ret = isert_rdma_accept(isert_conn);
717     + if (ret)
718     + goto out_conn_dev;
719     +
720     mutex_lock(&isert_np->np_accept_mutex);
721     list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
722     mutex_unlock(&isert_np->np_accept_mutex);
723     @@ -486,6 +520,7 @@ out_login_buf:
724     kfree(isert_conn->login_buf);
725     out:
726     kfree(isert_conn);
727     + rdma_reject(cma_id, NULL, 0);
728     return ret;
729     }
730    
731     @@ -498,18 +533,20 @@ isert_connect_release(struct isert_conn *isert_conn)
732    
733     pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
734    
735     + isert_free_rx_descriptors(isert_conn);
736     + rdma_destroy_id(isert_conn->conn_cm_id);
737     +
738     if (isert_conn->conn_qp) {
739     cq_index = ((struct isert_cq_desc *)
740     isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
741     pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
742     + mutex_lock(&device_list_mutex);
743     isert_conn->conn_device->cq_active_qps[cq_index]--;
744     + mutex_unlock(&device_list_mutex);
745    
746     - rdma_destroy_qp(isert_conn->conn_cm_id);
747     + ib_destroy_qp(isert_conn->conn_qp);
748     }
749    
750     - isert_free_rx_descriptors(isert_conn);
751     - rdma_destroy_id(isert_conn->conn_cm_id);
752     -
753     if (isert_conn->login_buf) {
754     ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
755     ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
756     @@ -529,9 +566,19 @@ isert_connect_release(struct isert_conn *isert_conn)
757     static void
758     isert_connected_handler(struct rdma_cm_id *cma_id)
759     {
760     - struct isert_conn *isert_conn = cma_id->context;
761     + struct isert_conn *isert_conn = cma_id->qp->qp_context;
762     +
763     + pr_info("conn %p\n", isert_conn);
764    
765     - kref_get(&isert_conn->conn_kref);
766     + if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
767     + pr_warn("conn %p connect_release is running\n", isert_conn);
768     + return;
769     + }
770     +
771     + mutex_lock(&isert_conn->conn_mutex);
772     + if (isert_conn->state != ISER_CONN_FULL_FEATURE)
773     + isert_conn->state = ISER_CONN_UP;
774     + mutex_unlock(&isert_conn->conn_mutex);
775     }
776    
777     static void
778     @@ -552,65 +599,108 @@ isert_put_conn(struct isert_conn *isert_conn)
779     kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
780     }
781    
782     +/**
783     + * isert_conn_terminate() - Initiate connection termination
784     + * @isert_conn: isert connection struct
785     + *
786     + * Notes:
787     + * In case the connection state is FULL_FEATURE, move state
788     + * to TEMINATING and start teardown sequence (rdma_disconnect).
789     + * In case the connection state is UP, complete flush as well.
790     + *
791     + * This routine must be called with conn_mutex held. Thus it is
792     + * safe to call multiple times.
793     + */
794     static void
795     -isert_disconnect_work(struct work_struct *work)
796     +isert_conn_terminate(struct isert_conn *isert_conn)
797     {
798     - struct isert_conn *isert_conn = container_of(work,
799     - struct isert_conn, conn_logout_work);
800     + int err;
801    
802     - pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
803     - mutex_lock(&isert_conn->conn_mutex);
804     - if (isert_conn->state == ISER_CONN_UP)
805     + switch (isert_conn->state) {
806     + case ISER_CONN_TERMINATING:
807     + break;
808     + case ISER_CONN_UP:
809     + /*
810     + * No flush completions will occur as we didn't
811     + * get to ISER_CONN_FULL_FEATURE yet, complete
812     + * to allow teardown progress.
813     + */
814     + complete(&isert_conn->conn_wait_comp_err);
815     + case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
816     + pr_info("Terminating conn %p state %d\n",
817     + isert_conn, isert_conn->state);
818     isert_conn->state = ISER_CONN_TERMINATING;
819     -
820     - if (isert_conn->post_recv_buf_count == 0 &&
821     - atomic_read(&isert_conn->post_send_buf_count) == 0) {
822     - mutex_unlock(&isert_conn->conn_mutex);
823     - goto wake_up;
824     - }
825     - if (!isert_conn->conn_cm_id) {
826     - mutex_unlock(&isert_conn->conn_mutex);
827     - isert_put_conn(isert_conn);
828     - return;
829     + err = rdma_disconnect(isert_conn->conn_cm_id);
830     + if (err)
831     + pr_warn("Failed rdma_disconnect isert_conn %p\n",
832     + isert_conn);
833     + break;
834     + default:
835     + pr_warn("conn %p teminating in state %d\n",
836     + isert_conn, isert_conn->state);
837     }
838     +}
839    
840     - if (isert_conn->disconnect) {
841     - /* Send DREQ/DREP towards our initiator */
842     - rdma_disconnect(isert_conn->conn_cm_id);
843     - }
844     +static int
845     +isert_np_cma_handler(struct isert_np *isert_np,
846     + enum rdma_cm_event_type event)
847     +{
848     + pr_debug("isert np %p, handling event %d\n", isert_np, event);
849    
850     - mutex_unlock(&isert_conn->conn_mutex);
851     + switch (event) {
852     + case RDMA_CM_EVENT_DEVICE_REMOVAL:
853     + isert_np->np_cm_id = NULL;
854     + break;
855     + case RDMA_CM_EVENT_ADDR_CHANGE:
856     + isert_np->np_cm_id = isert_setup_id(isert_np);
857     + if (IS_ERR(isert_np->np_cm_id)) {
858     + pr_err("isert np %p setup id failed: %ld\n",
859     + isert_np, PTR_ERR(isert_np->np_cm_id));
860     + isert_np->np_cm_id = NULL;
861     + }
862     + break;
863     + default:
864     + pr_err("isert np %p Unexpected event %d\n",
865     + isert_np, event);
866     + }
867    
868     -wake_up:
869     - complete(&isert_conn->conn_wait);
870     + return -1;
871     }
872    
873     static int
874     -isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
875     +isert_disconnected_handler(struct rdma_cm_id *cma_id,
876     + enum rdma_cm_event_type event)
877     {
878     + struct isert_np *isert_np = cma_id->context;
879     struct isert_conn *isert_conn;
880    
881     - if (!cma_id->qp) {
882     - struct isert_np *isert_np = cma_id->context;
883     + if (isert_np->np_cm_id == cma_id)
884     + return isert_np_cma_handler(cma_id->context, event);
885    
886     - isert_np->np_cm_id = NULL;
887     - return -1;
888     - }
889     + isert_conn = cma_id->qp->qp_context;
890    
891     - isert_conn = (struct isert_conn *)cma_id->context;
892     + mutex_lock(&isert_conn->conn_mutex);
893     + isert_conn_terminate(isert_conn);
894     + mutex_unlock(&isert_conn->conn_mutex);
895    
896     - isert_conn->disconnect = disconnect;
897     - INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
898     - schedule_work(&isert_conn->conn_logout_work);
899     + pr_info("conn %p completing conn_wait\n", isert_conn);
900     + complete(&isert_conn->conn_wait);
901    
902     return 0;
903     }
904    
905     +static void
906     +isert_connect_error(struct rdma_cm_id *cma_id)
907     +{
908     + struct isert_conn *isert_conn = cma_id->qp->qp_context;
909     +
910     + isert_put_conn(isert_conn);
911     +}
912     +
913     static int
914     isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
915     {
916     int ret = 0;
917     - bool disconnect = false;
918    
919     pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
920     event->event, event->status, cma_id->context, cma_id);
921     @@ -628,11 +718,14 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
922     case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
923     case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
924     case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
925     - disconnect = true;
926     case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
927     - ret = isert_disconnected_handler(cma_id, disconnect);
928     + ret = isert_disconnected_handler(cma_id, event->event);
929     break;
930     + case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
931     + case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
932     case RDMA_CM_EVENT_CONNECT_ERROR:
933     + isert_connect_error(cma_id);
934     + break;
935     default:
936     pr_err("Unhandled RDMA CMA event: %d\n", event->event);
937     break;
938     @@ -834,7 +927,10 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
939     if (ret)
940     return ret;
941    
942     - isert_conn->state = ISER_CONN_UP;
943     + /* Now we are in FULL_FEATURE phase */
944     + mutex_lock(&isert_conn->conn_mutex);
945     + isert_conn->state = ISER_CONN_FULL_FEATURE;
946     + mutex_unlock(&isert_conn->conn_mutex);
947     goto post_send;
948     }
949    
950     @@ -851,18 +947,17 @@ post_send:
951     }
952    
953     static void
954     -isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
955     - struct isert_conn *isert_conn)
956     +isert_rx_login_req(struct isert_conn *isert_conn)
957     {
958     + struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
959     + int rx_buflen = isert_conn->login_req_len;
960     struct iscsi_conn *conn = isert_conn->conn;
961     struct iscsi_login *login = conn->conn_login;
962     int size;
963    
964     - if (!login) {
965     - pr_err("conn->conn_login is NULL\n");
966     - dump_stack();
967     - return;
968     - }
969     + pr_info("conn %p\n", isert_conn);
970     +
971     + WARN_ON_ONCE(!login);
972    
973     if (login->first_request) {
974     struct iscsi_login_req *login_req =
975     @@ -892,7 +987,8 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
976     size, rx_buflen, MAX_KEY_VALUE_PAIRS);
977     memcpy(login->req_buf, &rx_desc->data[0], size);
978    
979     - complete(&isert_conn->conn_login_comp);
980     + if (login->first_request)
981     + complete(&isert_conn->conn_login_comp);
982     }
983    
984     static void
985     @@ -1169,11 +1265,20 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
986     hdr->opcode, hdr->itt, hdr->flags,
987     (int)(xfer_len - ISER_HEADERS_LEN));
988    
989     - if ((char *)desc == isert_conn->login_req_buf)
990     - isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
991     - isert_conn);
992     - else
993     + if ((char *)desc == isert_conn->login_req_buf) {
994     + isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
995     + if (isert_conn->conn) {
996     + struct iscsi_login *login = isert_conn->conn->conn_login;
997     +
998     + if (login && !login->first_request)
999     + isert_rx_login_req(isert_conn);
1000     + }
1001     + mutex_lock(&isert_conn->conn_mutex);
1002     + complete(&isert_conn->login_req_comp);
1003     + mutex_unlock(&isert_conn->conn_mutex);
1004     + } else {
1005     isert_rx_do_work(desc, isert_conn);
1006     + }
1007    
1008     ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1009     DMA_FROM_DEVICE);
1010     @@ -1483,7 +1588,7 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn)
1011     msleep(3000);
1012    
1013     mutex_lock(&isert_conn->conn_mutex);
1014     - isert_conn->state = ISER_CONN_DOWN;
1015     + isert_conn_terminate(isert_conn);
1016     mutex_unlock(&isert_conn->conn_mutex);
1017    
1018     iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1019     @@ -2044,13 +2149,51 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
1020     return ret;
1021     }
1022    
1023     +struct rdma_cm_id *
1024     +isert_setup_id(struct isert_np *isert_np)
1025     +{
1026     + struct iscsi_np *np = isert_np->np;
1027     + struct rdma_cm_id *id;
1028     + struct sockaddr *sa;
1029     + int ret;
1030     +
1031     + sa = (struct sockaddr *)&np->np_sockaddr;
1032     + pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
1033     +
1034     + id = rdma_create_id(isert_cma_handler, isert_np,
1035     + RDMA_PS_TCP, IB_QPT_RC);
1036     + if (IS_ERR(id)) {
1037     + pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
1038     + ret = PTR_ERR(id);
1039     + goto out;
1040     + }
1041     + pr_debug("id %p context %p\n", id, id->context);
1042     +
1043     + ret = rdma_bind_addr(id, sa);
1044     + if (ret) {
1045     + pr_err("rdma_bind_addr() failed: %d\n", ret);
1046     + goto out_id;
1047     + }
1048     +
1049     + ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
1050     + if (ret) {
1051     + pr_err("rdma_listen() failed: %d\n", ret);
1052     + goto out_id;
1053     + }
1054     +
1055     + return id;
1056     +out_id:
1057     + rdma_destroy_id(id);
1058     +out:
1059     + return ERR_PTR(ret);
1060     +}
1061     +
1062     static int
1063     isert_setup_np(struct iscsi_np *np,
1064     struct __kernel_sockaddr_storage *ksockaddr)
1065     {
1066     struct isert_np *isert_np;
1067     struct rdma_cm_id *isert_lid;
1068     - struct sockaddr *sa;
1069     int ret;
1070    
1071     isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
1072     @@ -2062,9 +2205,8 @@ isert_setup_np(struct iscsi_np *np,
1073     mutex_init(&isert_np->np_accept_mutex);
1074     INIT_LIST_HEAD(&isert_np->np_accept_list);
1075     init_completion(&isert_np->np_login_comp);
1076     + isert_np->np = np;
1077    
1078     - sa = (struct sockaddr *)ksockaddr;
1079     - pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
1080     /*
1081     * Setup the np->np_sockaddr from the passed sockaddr setup
1082     * in iscsi_target_configfs.c code..
1083     @@ -2072,37 +2214,20 @@ isert_setup_np(struct iscsi_np *np,
1084     memcpy(&np->np_sockaddr, ksockaddr,
1085     sizeof(struct __kernel_sockaddr_storage));
1086    
1087     - isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
1088     - IB_QPT_RC);
1089     + isert_lid = isert_setup_id(isert_np);
1090     if (IS_ERR(isert_lid)) {
1091     - pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
1092     - PTR_ERR(isert_lid));
1093     ret = PTR_ERR(isert_lid);
1094     goto out;
1095     }
1096    
1097     - ret = rdma_bind_addr(isert_lid, sa);
1098     - if (ret) {
1099     - pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
1100     - goto out_lid;
1101     - }
1102     -
1103     - ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
1104     - if (ret) {
1105     - pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
1106     - goto out_lid;
1107     - }
1108     -
1109     isert_np->np_cm_id = isert_lid;
1110     np->np_context = isert_np;
1111     - pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
1112    
1113     return 0;
1114    
1115     -out_lid:
1116     - rdma_destroy_id(isert_lid);
1117     out:
1118     kfree(isert_np);
1119     +
1120     return ret;
1121     }
1122    
1123     @@ -2138,13 +2263,27 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
1124     struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1125     int ret;
1126    
1127     - pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
1128     + pr_info("before login_req comp conn: %p\n", isert_conn);
1129     + ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
1130     + if (ret) {
1131     + pr_err("isert_conn %p interrupted before got login req\n",
1132     + isert_conn);
1133     + return ret;
1134     + }
1135     + isert_conn->login_req_comp.done = 0;
1136     +
1137     + if (!login->first_request)
1138     + return 0;
1139     +
1140     + isert_rx_login_req(isert_conn);
1141     +
1142     + pr_info("before conn_login_comp conn: %p\n", conn);
1143    
1144     ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
1145     if (ret)
1146     return ret;
1147    
1148     - pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
1149     + pr_info("processing login->req: %p\n", login->req);
1150     return 0;
1151     }
1152    
1153     @@ -2222,17 +2361,10 @@ accept_wait:
1154     isert_conn->conn = conn;
1155     max_accept = 0;
1156    
1157     - ret = isert_rdma_post_recvl(isert_conn);
1158     - if (ret)
1159     - return ret;
1160     -
1161     - ret = isert_rdma_accept(isert_conn);
1162     - if (ret)
1163     - return ret;
1164     -
1165     isert_set_conn_info(np, conn, isert_conn);
1166    
1167     - pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
1168     + pr_debug("Processing isert_conn: %p\n", isert_conn);
1169     +
1170     return 0;
1171     }
1172    
1173     @@ -2248,6 +2380,24 @@ isert_free_np(struct iscsi_np *np)
1174     kfree(isert_np);
1175     }
1176    
1177     +static void isert_release_work(struct work_struct *work)
1178     +{
1179     + struct isert_conn *isert_conn = container_of(work,
1180     + struct isert_conn,
1181     + release_work);
1182     +
1183     + pr_info("Starting release conn %p\n", isert_conn);
1184     +
1185     + wait_for_completion(&isert_conn->conn_wait);
1186     +
1187     + mutex_lock(&isert_conn->conn_mutex);
1188     + isert_conn->state = ISER_CONN_DOWN;
1189     + mutex_unlock(&isert_conn->conn_mutex);
1190     +
1191     + pr_info("Destroying conn %p\n", isert_conn);
1192     + isert_put_conn(isert_conn);
1193     +}
1194     +
1195     static void isert_wait_conn(struct iscsi_conn *conn)
1196     {
1197     struct isert_conn *isert_conn = conn->context;
1198     @@ -2255,10 +2405,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
1199     pr_debug("isert_wait_conn: Starting \n");
1200    
1201     mutex_lock(&isert_conn->conn_mutex);
1202     - if (isert_conn->conn_cm_id) {
1203     - pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
1204     - rdma_disconnect(isert_conn->conn_cm_id);
1205     - }
1206     /*
1207     * Only wait for conn_wait_comp_err if the isert_conn made it
1208     * into full feature phase..
1209     @@ -2267,14 +2413,13 @@ static void isert_wait_conn(struct iscsi_conn *conn)
1210     mutex_unlock(&isert_conn->conn_mutex);
1211     return;
1212     }
1213     - if (isert_conn->state == ISER_CONN_UP)
1214     - isert_conn->state = ISER_CONN_TERMINATING;
1215     + isert_conn_terminate(isert_conn);
1216     mutex_unlock(&isert_conn->conn_mutex);
1217    
1218     wait_for_completion(&isert_conn->conn_wait_comp_err);
1219    
1220     - wait_for_completion(&isert_conn->conn_wait);
1221     - isert_put_conn(isert_conn);
1222     + INIT_WORK(&isert_conn->release_work, isert_release_work);
1223     + queue_work(isert_release_wq, &isert_conn->release_work);
1224     }
1225    
1226     static void isert_free_conn(struct iscsi_conn *conn)
1227     @@ -2320,20 +2465,30 @@ static int __init isert_init(void)
1228     goto destroy_rx_wq;
1229     }
1230    
1231     + isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
1232     + WQ_UNBOUND_MAX_ACTIVE);
1233     + if (!isert_release_wq) {
1234     + pr_err("Unable to allocate isert_release_wq\n");
1235     + ret = -ENOMEM;
1236     + goto destroy_comp_wq;
1237     + }
1238     +
1239     isert_cmd_cache = kmem_cache_create("isert_cmd_cache",
1240     sizeof(struct isert_cmd), __alignof__(struct isert_cmd),
1241     0, NULL);
1242     if (!isert_cmd_cache) {
1243     pr_err("Unable to create isert_cmd_cache\n");
1244     ret = -ENOMEM;
1245     - goto destroy_tx_cq;
1246     + goto destroy_release_wq;
1247     }
1248    
1249     iscsit_register_transport(&iser_target_transport);
1250     - pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
1251     + pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
1252     return 0;
1253    
1254     -destroy_tx_cq:
1255     +destroy_release_wq:
1256     + destroy_workqueue(isert_release_wq);
1257     +destroy_comp_wq:
1258     destroy_workqueue(isert_comp_wq);
1259     destroy_rx_wq:
1260     destroy_workqueue(isert_rx_wq);
1261     @@ -2344,6 +2499,7 @@ static void __exit isert_exit(void)
1262     {
1263     flush_scheduled_work();
1264     kmem_cache_destroy(isert_cmd_cache);
1265     + destroy_workqueue(isert_release_wq);
1266     destroy_workqueue(isert_comp_wq);
1267     destroy_workqueue(isert_rx_wq);
1268     iscsit_unregister_transport(&iser_target_transport);
1269     diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
1270     index 032f65abee36..b233ee5e46b0 100644
1271     --- a/drivers/infiniband/ulp/isert/ib_isert.h
1272     +++ b/drivers/infiniband/ulp/isert/ib_isert.h
1273     @@ -21,6 +21,7 @@ enum iser_ib_op_code {
1274     enum iser_conn_state {
1275     ISER_CONN_INIT,
1276     ISER_CONN_UP,
1277     + ISER_CONN_FULL_FEATURE,
1278     ISER_CONN_TERMINATING,
1279     ISER_CONN_DOWN,
1280     };
1281     @@ -87,6 +88,7 @@ struct isert_conn {
1282     char *login_req_buf;
1283     char *login_rsp_buf;
1284     u64 login_req_dma;
1285     + int login_req_len;
1286     u64 login_rsp_dma;
1287     unsigned int conn_rx_desc_head;
1288     struct iser_rx_desc *conn_rx_descs;
1289     @@ -94,18 +96,18 @@ struct isert_conn {
1290     struct iscsi_conn *conn;
1291     struct list_head conn_accept_node;
1292     struct completion conn_login_comp;
1293     + struct completion login_req_comp;
1294     struct iser_tx_desc conn_login_tx_desc;
1295     struct rdma_cm_id *conn_cm_id;
1296     struct ib_pd *conn_pd;
1297     struct ib_mr *conn_mr;
1298     struct ib_qp *conn_qp;
1299     struct isert_device *conn_device;
1300     - struct work_struct conn_logout_work;
1301     struct mutex conn_mutex;
1302     struct completion conn_wait;
1303     struct completion conn_wait_comp_err;
1304     struct kref conn_kref;
1305     - bool disconnect;
1306     + struct work_struct release_work;
1307     };
1308    
1309     #define ISERT_MAX_CQ 64
1310     @@ -131,6 +133,7 @@ struct isert_device {
1311     };
1312    
1313     struct isert_np {
1314     + struct iscsi_np *np;
1315     struct semaphore np_sem;
1316     struct rdma_cm_id *np_cm_id;
1317     struct mutex np_accept_mutex;
1318     diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
1319     index 78227f32d6fa..4de2571938b8 100644
1320     --- a/drivers/input/serio/i8042-x86ia64io.h
1321     +++ b/drivers/input/serio/i8042-x86ia64io.h
1322     @@ -152,6 +152,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
1323     },
1324     },
1325     {
1326     + /* Medion Akoya E7225 */
1327     + .matches = {
1328     + DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
1329     + DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
1330     + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1331     + },
1332     + },
1333     + {
1334     /* Blue FB5601 */
1335     .matches = {
1336     DMI_MATCH(DMI_SYS_VENDOR, "blue"),
1337     diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
1338     index b361ce4ce511..4a10c1562d0f 100644
1339     --- a/drivers/md/dm-cache-metadata.c
1340     +++ b/drivers/md/dm-cache-metadata.c
1341     @@ -648,7 +648,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
1342     cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1343     if (!cmd) {
1344     DMERR("could not allocate metadata struct");
1345     - return NULL;
1346     + return ERR_PTR(-ENOMEM);
1347     }
1348    
1349     atomic_set(&cmd->ref_count, 1);
1350     @@ -710,7 +710,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
1351     return cmd;
1352    
1353     cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
1354     - if (cmd) {
1355     + if (!IS_ERR(cmd)) {
1356     mutex_lock(&table_lock);
1357     cmd2 = lookup(bdev);
1358     if (cmd2) {
1359     @@ -745,9 +745,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
1360     {
1361     struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
1362     may_format_device, policy_hint_size);
1363     - if (cmd && !same_params(cmd, data_block_size)) {
1364     +
1365     + if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
1366     dm_cache_metadata_close(cmd);
1367     - return NULL;
1368     + return ERR_PTR(-EINVAL);
1369     }
1370    
1371     return cmd;
1372     diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1373     index 86a2a5e3b26b..39996ca58ce6 100644
1374     --- a/drivers/md/dm-thin.c
1375     +++ b/drivers/md/dm-thin.c
1376     @@ -2457,6 +2457,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
1377     struct pool_c *pt = ti->private;
1378     struct pool *pool = pt->pool;
1379    
1380     + if (get_pool_mode(pool) >= PM_READ_ONLY) {
1381     + DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
1382     + dm_device_name(pool->pool_md));
1383     + return -EINVAL;
1384     + }
1385     +
1386     if (!strcasecmp(argv[0], "create_thin"))
1387     r = process_create_thin_mesg(argc, argv, pool);
1388    
1389     diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
1390     index 63fb90b006ba..a3fb8b51038a 100644
1391     --- a/drivers/net/can/usb/kvaser_usb.c
1392     +++ b/drivers/net/can/usb/kvaser_usb.c
1393     @@ -579,7 +579,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
1394     usb_sndbulkpipe(dev->udev,
1395     dev->bulk_out->bEndpointAddress),
1396     buf, msg->len,
1397     - kvaser_usb_simple_msg_callback, priv);
1398     + kvaser_usb_simple_msg_callback, netdev);
1399     usb_anchor_urb(urb, &priv->tx_submitted);
1400    
1401     err = usb_submit_urb(urb, GFP_ATOMIC);
1402     @@ -654,11 +654,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
1403     priv = dev->nets[channel];
1404     stats = &priv->netdev->stats;
1405    
1406     - if (status & M16C_STATE_BUS_RESET) {
1407     - kvaser_usb_unlink_tx_urbs(priv);
1408     - return;
1409     - }
1410     -
1411     skb = alloc_can_err_skb(priv->netdev, &cf);
1412     if (!skb) {
1413     stats->rx_dropped++;
1414     @@ -669,7 +664,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
1415    
1416     netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
1417    
1418     - if (status & M16C_STATE_BUS_OFF) {
1419     + if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
1420     cf->can_id |= CAN_ERR_BUSOFF;
1421    
1422     priv->can.can_stats.bus_off++;
1423     @@ -695,9 +690,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
1424     }
1425    
1426     new_state = CAN_STATE_ERROR_PASSIVE;
1427     - }
1428     -
1429     - if (status == M16C_STATE_BUS_ERROR) {
1430     + } else if (status & M16C_STATE_BUS_ERROR) {
1431     if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
1432     ((txerr >= 96) || (rxerr >= 96))) {
1433     cf->can_id |= CAN_ERR_CRTL;
1434     @@ -707,7 +700,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
1435    
1436     priv->can.can_stats.error_warning++;
1437     new_state = CAN_STATE_ERROR_WARNING;
1438     - } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) {
1439     + } else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) &&
1440     + ((txerr < 96) && (rxerr < 96))) {
1441     cf->can_id |= CAN_ERR_PROT;
1442     cf->data[2] = CAN_ERR_PROT_ACTIVE;
1443    
1444     @@ -1583,7 +1577,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
1445     {
1446     struct kvaser_usb *dev;
1447     int err = -ENOMEM;
1448     - int i;
1449     + int i, retry = 3;
1450    
1451     dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
1452     if (!dev)
1453     @@ -1601,7 +1595,15 @@ static int kvaser_usb_probe(struct usb_interface *intf,
1454    
1455     usb_set_intfdata(intf, dev);
1456    
1457     - err = kvaser_usb_get_software_info(dev);
1458     + /* On some x86 laptops, plugging a Kvaser device again after
1459     + * an unplug makes the firmware always ignore the very first
1460     + * command. For such a case, provide some room for retries
1461     + * instead of completely exiting the driver.
1462     + */
1463     + do {
1464     + err = kvaser_usb_get_software_info(dev);
1465     + } while (--retry && err == -ETIMEDOUT);
1466     +
1467     if (err) {
1468     dev_err(&intf->dev,
1469     "Cannot get software infos, error %d\n", err);
1470     diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
1471     index b1ab3a4956a5..e18240de159c 100644
1472     --- a/drivers/net/ethernet/ti/cpsw.c
1473     +++ b/drivers/net/ethernet/ti/cpsw.c
1474     @@ -1293,6 +1293,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
1475     if (vid == priv->data.default_vlan)
1476     return 0;
1477    
1478     + if (priv->data.dual_emac) {
1479     + /* In dual EMAC, reserved VLAN id should not be used for
1480     + * creating VLAN interfaces as this can break the dual
1481     + * EMAC port separation
1482     + */
1483     + int i;
1484     +
1485     + for (i = 0; i < priv->data.slaves; i++) {
1486     + if (vid == priv->slaves[i].port_vlan)
1487     + return -EINVAL;
1488     + }
1489     + }
1490     +
1491     dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
1492     return cpsw_add_vlan_ale_entry(priv, vid);
1493     }
1494     @@ -1306,6 +1319,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1495     if (vid == priv->data.default_vlan)
1496     return 0;
1497    
1498     + if (priv->data.dual_emac) {
1499     + int i;
1500     +
1501     + for (i = 0; i < priv->data.slaves; i++) {
1502     + if (vid == priv->slaves[i].port_vlan)
1503     + return -EINVAL;
1504     + }
1505     + }
1506     +
1507     dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
1508     ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
1509     if (ret != 0)
1510     diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
1511     index a86d12326137..e873e8f0070d 100644
1512     --- a/drivers/regulator/core.c
1513     +++ b/drivers/regulator/core.c
1514     @@ -1410,7 +1410,7 @@ struct regulator *regulator_get_exclusive(struct device *dev, const char *id)
1515     }
1516     EXPORT_SYMBOL_GPL(regulator_get_exclusive);
1517    
1518     -/* Locks held by regulator_put() */
1519     +/* regulator_list_mutex lock held by regulator_put() */
1520     static void _regulator_put(struct regulator *regulator)
1521     {
1522     struct regulator_dev *rdev;
1523     @@ -1425,12 +1425,14 @@ static void _regulator_put(struct regulator *regulator)
1524     /* remove any sysfs entries */
1525     if (regulator->dev)
1526     sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
1527     + mutex_lock(&rdev->mutex);
1528     kfree(regulator->supply_name);
1529     list_del(&regulator->list);
1530     kfree(regulator);
1531    
1532     rdev->open_count--;
1533     rdev->exclusive = 0;
1534     + mutex_unlock(&rdev->mutex);
1535    
1536     module_put(rdev->owner);
1537     }
1538     diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
1539     index 0791c92e8c50..1389fefe8814 100644
1540     --- a/drivers/spi/spi-dw-mid.c
1541     +++ b/drivers/spi/spi-dw-mid.c
1542     @@ -222,7 +222,6 @@ int dw_spi_mid_init(struct dw_spi *dws)
1543     iounmap(clk_reg);
1544    
1545     dws->num_cs = 16;
1546     - dws->fifo_len = 40; /* FIFO has 40 words buffer */
1547    
1548     #ifdef CONFIG_SPI_DW_MID_DMA
1549     dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
1550     diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
1551     index d26a2d195d21..cc42ee5e19fb 100644
1552     --- a/drivers/spi/spi-pxa2xx.c
1553     +++ b/drivers/spi/spi-pxa2xx.c
1554     @@ -393,8 +393,8 @@ static void giveback(struct driver_data *drv_data)
1555     cs_deassert(drv_data);
1556     }
1557    
1558     - spi_finalize_current_message(drv_data->master);
1559     drv_data->cur_chip = NULL;
1560     + spi_finalize_current_message(drv_data->master);
1561     }
1562    
1563     static void reset_sccr1(struct driver_data *drv_data)
1564     diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
1565     index e14e105acff8..0493e8b1ba8f 100644
1566     --- a/drivers/target/iscsi/iscsi_target_login.c
1567     +++ b/drivers/target/iscsi/iscsi_target_login.c
1568     @@ -1360,6 +1360,9 @@ old_sess_out:
1569     conn->sock = NULL;
1570     }
1571    
1572     + if (conn->conn_transport->iscsit_wait_conn)
1573     + conn->conn_transport->iscsit_wait_conn(conn);
1574     +
1575     if (conn->conn_transport->iscsit_free_conn)
1576     conn->conn_transport->iscsit_free_conn(conn);
1577    
1578     diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
1579     index 7c908141cc8a..9c02eb41ea90 100644
1580     --- a/drivers/target/loopback/tcm_loop.c
1581     +++ b/drivers/target/loopback/tcm_loop.c
1582     @@ -179,7 +179,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
1583     goto out_done;
1584     }
1585    
1586     - tl_nexus = tl_hba->tl_nexus;
1587     + tl_nexus = tl_tpg->tl_nexus;
1588     if (!tl_nexus) {
1589     scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
1590     " does not exist\n");
1591     @@ -258,20 +258,20 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
1592     */
1593     tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
1594     /*
1595     + * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
1596     + */
1597     + tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
1598     + se_tpg = &tl_tpg->tl_se_tpg;
1599     + /*
1600     * Locate the tl_nexus and se_sess pointers
1601     */
1602     - tl_nexus = tl_hba->tl_nexus;
1603     + tl_nexus = tl_tpg->tl_nexus;
1604     if (!tl_nexus) {
1605     pr_err("Unable to perform device reset without"
1606     " active I_T Nexus\n");
1607     return FAILED;
1608     }
1609     se_sess = tl_nexus->se_sess;
1610     - /*
1611     - * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
1612     - */
1613     - tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
1614     - se_tpg = &tl_tpg->tl_se_tpg;
1615    
1616     tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
1617     if (!tl_cmd) {
1618     @@ -879,8 +879,8 @@ static int tcm_loop_make_nexus(
1619     struct tcm_loop_nexus *tl_nexus;
1620     int ret = -ENOMEM;
1621    
1622     - if (tl_tpg->tl_hba->tl_nexus) {
1623     - pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
1624     + if (tl_tpg->tl_nexus) {
1625     + pr_debug("tl_tpg->tl_nexus already exists\n");
1626     return -EEXIST;
1627     }
1628     se_tpg = &tl_tpg->tl_se_tpg;
1629     @@ -915,7 +915,7 @@ static int tcm_loop_make_nexus(
1630     */
1631     __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
1632     tl_nexus->se_sess, tl_nexus);
1633     - tl_tpg->tl_hba->tl_nexus = tl_nexus;
1634     + tl_tpg->tl_nexus = tl_nexus;
1635     pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
1636     " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
1637     name);
1638     @@ -931,9 +931,8 @@ static int tcm_loop_drop_nexus(
1639     {
1640     struct se_session *se_sess;
1641     struct tcm_loop_nexus *tl_nexus;
1642     - struct tcm_loop_hba *tl_hba = tpg->tl_hba;
1643    
1644     - tl_nexus = tpg->tl_hba->tl_nexus;
1645     + tl_nexus = tpg->tl_nexus;
1646     if (!tl_nexus)
1647     return -ENODEV;
1648    
1649     @@ -949,13 +948,13 @@ static int tcm_loop_drop_nexus(
1650     }
1651    
1652     pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
1653     - " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
1654     + " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
1655     tl_nexus->se_sess->se_node_acl->initiatorname);
1656     /*
1657     * Release the SCSI I_T Nexus to the emulated SAS Target Port
1658     */
1659     transport_deregister_session(tl_nexus->se_sess);
1660     - tpg->tl_hba->tl_nexus = NULL;
1661     + tpg->tl_nexus = NULL;
1662     kfree(tl_nexus);
1663     return 0;
1664     }
1665     @@ -971,7 +970,7 @@ static ssize_t tcm_loop_tpg_show_nexus(
1666     struct tcm_loop_nexus *tl_nexus;
1667     ssize_t ret;
1668    
1669     - tl_nexus = tl_tpg->tl_hba->tl_nexus;
1670     + tl_nexus = tl_tpg->tl_nexus;
1671     if (!tl_nexus)
1672     return -ENODEV;
1673    
1674     diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
1675     index dd7a84ee78e1..4ed85886a1ee 100644
1676     --- a/drivers/target/loopback/tcm_loop.h
1677     +++ b/drivers/target/loopback/tcm_loop.h
1678     @@ -25,11 +25,6 @@ struct tcm_loop_tmr {
1679     };
1680    
1681     struct tcm_loop_nexus {
1682     - int it_nexus_active;
1683     - /*
1684     - * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
1685     - */
1686     - struct scsi_host *sh;
1687     /*
1688     * Pointer to TCM session for I_T Nexus
1689     */
1690     @@ -45,6 +40,7 @@ struct tcm_loop_tpg {
1691     atomic_t tl_tpg_port_count;
1692     struct se_portal_group tl_se_tpg;
1693     struct tcm_loop_hba *tl_hba;
1694     + struct tcm_loop_nexus *tl_nexus;
1695     };
1696    
1697     struct tcm_loop_hba {
1698     @@ -53,7 +49,6 @@ struct tcm_loop_hba {
1699     struct se_hba_s *se_hba;
1700     struct se_lun *tl_hba_lun;
1701     struct se_port *tl_hba_lun_sep;
1702     - struct tcm_loop_nexus *tl_nexus;
1703     struct device dev;
1704     struct Scsi_Host *sh;
1705     struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
1706     diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
1707     index 2be407e22eb4..4deb0c997b1b 100644
1708     --- a/drivers/target/target_core_device.c
1709     +++ b/drivers/target/target_core_device.c
1710     @@ -1037,10 +1037,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1711     " changed for TCM/pSCSI\n", dev);
1712     return -EINVAL;
1713     }
1714     - if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
1715     + if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
1716     pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1717     - " greater than fabric_max_sectors: %u\n", dev,
1718     - optimal_sectors, dev->dev_attrib.fabric_max_sectors);
1719     + " greater than hw_max_sectors: %u\n", dev,
1720     + optimal_sectors, dev->dev_attrib.hw_max_sectors);
1721     return -EINVAL;
1722     }
1723    
1724     @@ -1442,7 +1442,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1725     DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
1726     dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
1727     dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
1728     - dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
1729    
1730     return dev;
1731     }
1732     @@ -1475,6 +1474,7 @@ int target_configure_device(struct se_device *dev)
1733     dev->dev_attrib.hw_max_sectors =
1734     se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
1735     dev->dev_attrib.hw_block_size);
1736     + dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
1737    
1738     dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1739     dev->creation_time = get_jiffies_64();
1740     diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
1741     index 3b2879316b87..8baaa0a26d70 100644
1742     --- a/drivers/target/target_core_file.c
1743     +++ b/drivers/target/target_core_file.c
1744     @@ -554,7 +554,16 @@ fd_execute_rw(struct se_cmd *cmd)
1745     enum dma_data_direction data_direction = cmd->data_direction;
1746     struct se_device *dev = cmd->se_dev;
1747     int ret = 0;
1748     -
1749     + /*
1750     + * We are currently limited by the number of iovecs (2048) per
1751     + * single vfs_[writev,readv] call.
1752     + */
1753     + if (cmd->data_length > FD_MAX_BYTES) {
1754     + pr_err("FILEIO: Not able to process I/O of %u bytes due to"
1755     + "FD_MAX_BYTES: %u iovec count limitiation\n",
1756     + cmd->data_length, FD_MAX_BYTES);
1757     + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1758     + }
1759     /*
1760     * Call vectorized fileio functions to map struct scatterlist
1761     * physical memory addresses to struct iovec virtual memory.
1762     diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
1763     index aa1620abec6d..b358b3d6c201 100644
1764     --- a/drivers/target/target_core_iblock.c
1765     +++ b/drivers/target/target_core_iblock.c
1766     @@ -122,7 +122,7 @@ static int iblock_configure_device(struct se_device *dev)
1767     q = bdev_get_queue(bd);
1768    
1769     dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
1770     - dev->dev_attrib.hw_max_sectors = UINT_MAX;
1771     + dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
1772     dev->dev_attrib.hw_queue_depth = q->nr_requests;
1773    
1774     /*
1775     diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
1776     index 0ef75fb0ecba..92e6c510e5d0 100644
1777     --- a/drivers/target/target_core_sbc.c
1778     +++ b/drivers/target/target_core_sbc.c
1779     @@ -561,21 +561,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
1780     if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1781     unsigned long long end_lba;
1782    
1783     - if (sectors > dev->dev_attrib.fabric_max_sectors) {
1784     - printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
1785     - " big sectors %u exceeds fabric_max_sectors:"
1786     - " %u\n", cdb[0], sectors,
1787     - dev->dev_attrib.fabric_max_sectors);
1788     - return TCM_INVALID_CDB_FIELD;
1789     - }
1790     - if (sectors > dev->dev_attrib.hw_max_sectors) {
1791     - printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
1792     - " big sectors %u exceeds backend hw_max_sectors:"
1793     - " %u\n", cdb[0], sectors,
1794     - dev->dev_attrib.hw_max_sectors);
1795     - return TCM_INVALID_CDB_FIELD;
1796     - }
1797     -
1798     end_lba = dev->transport->get_blocks(dev) + 1;
1799     if (cmd->t_task_lba + sectors > end_lba) {
1800     pr_err("cmd exceeds last lba %llu "
1801     diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
1802     index 34254b2ec466..9998ae23cc7c 100644
1803     --- a/drivers/target/target_core_spc.c
1804     +++ b/drivers/target/target_core_spc.c
1805     @@ -444,7 +444,6 @@ static sense_reason_t
1806     spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
1807     {
1808     struct se_device *dev = cmd->se_dev;
1809     - u32 max_sectors;
1810     int have_tp = 0;
1811    
1812     /*
1813     @@ -469,9 +468,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
1814     /*
1815     * Set MAXIMUM TRANSFER LENGTH
1816     */
1817     - max_sectors = min(dev->dev_attrib.fabric_max_sectors,
1818     - dev->dev_attrib.hw_max_sectors);
1819     - put_unaligned_be32(max_sectors, &buf[8]);
1820     + put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
1821    
1822     /*
1823     * Set OPTIMAL TRANSFER LENGTH
1824     diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
1825     index 962c7e3c3baa..fb97bc0b80e7 100644
1826     --- a/drivers/vhost/scsi.c
1827     +++ b/drivers/vhost/scsi.c
1828     @@ -820,6 +820,23 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
1829     return 0;
1830     }
1831    
1832     +static int vhost_scsi_to_tcm_attr(int attr)
1833     +{
1834     + switch (attr) {
1835     + case VIRTIO_SCSI_S_SIMPLE:
1836     + return MSG_SIMPLE_TAG;
1837     + case VIRTIO_SCSI_S_ORDERED:
1838     + return MSG_ORDERED_TAG;
1839     + case VIRTIO_SCSI_S_HEAD:
1840     + return MSG_HEAD_TAG;
1841     + case VIRTIO_SCSI_S_ACA:
1842     + return MSG_ACA_TAG;
1843     + default:
1844     + break;
1845     + }
1846     + return MSG_SIMPLE_TAG;
1847     +}
1848     +
1849     static void tcm_vhost_submission_work(struct work_struct *work)
1850     {
1851     struct tcm_vhost_cmd *tv_cmd =
1852     @@ -846,9 +863,9 @@ static void tcm_vhost_submission_work(struct work_struct *work)
1853     rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
1854     tv_cmd->tvc_cdb, &tv_cmd->tvc_sense_buf[0],
1855     tv_cmd->tvc_lun, tv_cmd->tvc_exp_data_len,
1856     - tv_cmd->tvc_task_attr, tv_cmd->tvc_data_direction,
1857     - 0, sg_ptr, tv_cmd->tvc_sgl_count,
1858     - sg_bidi_ptr, sg_no_bidi);
1859     + vhost_scsi_to_tcm_attr(tv_cmd->tvc_task_attr),
1860     + tv_cmd->tvc_data_direction, 0, sg_ptr,
1861     + tv_cmd->tvc_sgl_count, sg_bidi_ptr, sg_no_bidi);
1862     if (rc < 0) {
1863     transport_send_check_condition_and_sense(se_cmd,
1864     TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
1865     @@ -1150,6 +1167,7 @@ static int vhost_scsi_set_endpoint(
1866     struct vhost_scsi *vs,
1867     struct vhost_scsi_target *t)
1868     {
1869     + struct se_portal_group *se_tpg;
1870     struct tcm_vhost_tport *tv_tport;
1871     struct tcm_vhost_tpg *tv_tpg;
1872     struct tcm_vhost_tpg **vs_tpg;
1873     @@ -1197,6 +1215,21 @@ static int vhost_scsi_set_endpoint(
1874     ret = -EEXIST;
1875     goto out;
1876     }
1877     + /*
1878     + * In order to ensure individual vhost-scsi configfs
1879     + * groups cannot be removed while in use by vhost ioctl,
1880     + * go ahead and take an explicit se_tpg->tpg_group.cg_item
1881     + * dependency now.
1882     + */
1883     + se_tpg = &tv_tpg->se_tpg;
1884     + ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
1885     + &se_tpg->tpg_group.cg_item);
1886     + if (ret) {
1887     + pr_warn("configfs_depend_item() failed: %d\n", ret);
1888     + kfree(vs_tpg);
1889     + mutex_unlock(&tv_tpg->tv_tpg_mutex);
1890     + goto out;
1891     + }
1892     tv_tpg->tv_tpg_vhost_count++;
1893     tv_tpg->vhost_scsi = vs;
1894     vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
1895     @@ -1240,6 +1273,7 @@ static int vhost_scsi_clear_endpoint(
1896     struct vhost_scsi *vs,
1897     struct vhost_scsi_target *t)
1898     {
1899     + struct se_portal_group *se_tpg;
1900     struct tcm_vhost_tport *tv_tport;
1901     struct tcm_vhost_tpg *tv_tpg;
1902     struct vhost_virtqueue *vq;
1903     @@ -1288,6 +1322,13 @@ static int vhost_scsi_clear_endpoint(
1904     vs->vs_tpg[target] = NULL;
1905     match = true;
1906     mutex_unlock(&tv_tpg->tv_tpg_mutex);
1907     + /*
1908     + * Release se_tpg->tpg_group.cg_item configfs dependency now
1909     + * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1910     + */
1911     + se_tpg = &tv_tpg->se_tpg;
1912     + configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
1913     + &se_tpg->tpg_group.cg_item);
1914     }
1915     if (match) {
1916     for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1917     diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
1918     index 725e87538c98..615c5079db7c 100644
1919     --- a/fs/nfs/direct.c
1920     +++ b/fs/nfs/direct.c
1921     @@ -123,6 +123,12 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
1922     */
1923     ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
1924     {
1925     + struct inode *inode = iocb->ki_filp->f_mapping->host;
1926     +
1927     + /* we only support swap file calling nfs_direct_IO */
1928     + if (!IS_SWAPFILE(inode))
1929     + return 0;
1930     +
1931     #ifndef CONFIG_NFS_SWAP
1932     dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
1933     iocb->ki_filp->f_path.dentry->d_name.name,
1934     diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
1935     index 519833d0457e..5f8d5ffdad8f 100644
1936     --- a/fs/nfs/nfs4client.c
1937     +++ b/fs/nfs/nfs4client.c
1938     @@ -462,7 +462,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
1939     prev = pos;
1940    
1941     status = nfs_wait_client_init_complete(pos);
1942     - if (status == 0) {
1943     + if (pos->cl_cons_state == NFS_CS_SESSION_INITING) {
1944     nfs4_schedule_lease_recovery(pos);
1945     status = nfs4_wait_clnt_recover(pos);
1946     }
1947     diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
1948     index 3ba30825f387..66c8c2fe86b7 100644
1949     --- a/fs/pstore/inode.c
1950     +++ b/fs/pstore/inode.c
1951     @@ -178,6 +178,8 @@ static int pstore_unlink(struct inode *dir, struct dentry *dentry)
1952     if (p->psi->erase)
1953     p->psi->erase(p->type, p->id, p->count,
1954     dentry->d_inode->i_ctime, p->psi);
1955     + else
1956     + return -EPERM;
1957    
1958     return simple_unlink(dir, dentry);
1959     }
1960     @@ -334,9 +336,8 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
1961    
1962     mutex_lock(&root->d_inode->i_mutex);
1963    
1964     - rc = -ENOSPC;
1965     dentry = d_alloc_name(root, name);
1966     - if (IS_ERR(dentry))
1967     + if (!dentry)
1968     goto fail_lockedalloc;
1969    
1970     memcpy(private->data, data, size);
1971     diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
1972     index 42d5911c7e29..d3d37142bd93 100644
1973     --- a/fs/pstore/ram.c
1974     +++ b/fs/pstore/ram.c
1975     @@ -92,6 +92,7 @@ struct ramoops_context {
1976     struct persistent_ram_ecc_info ecc_info;
1977     unsigned int max_dump_cnt;
1978     unsigned int dump_write_cnt;
1979     + /* _read_cnt need clear on ramoops_pstore_open */
1980     unsigned int dump_read_cnt;
1981     unsigned int console_read_cnt;
1982     unsigned int ftrace_read_cnt;
1983     @@ -107,6 +108,7 @@ static int ramoops_pstore_open(struct pstore_info *psi)
1984    
1985     cxt->dump_read_cnt = 0;
1986     cxt->console_read_cnt = 0;
1987     + cxt->ftrace_read_cnt = 0;
1988     return 0;
1989     }
1990    
1991     @@ -123,13 +125,15 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], uint *c, uint max,
1992     return NULL;
1993    
1994     prz = przs[i];
1995     + if (!prz)
1996     + return NULL;
1997    
1998     - if (update) {
1999     - /* Update old/shadowed buffer. */
2000     + /* Update old/shadowed buffer. */
2001     + if (update)
2002     persistent_ram_save_old(prz);
2003     - if (!persistent_ram_old_size(prz))
2004     - return NULL;
2005     - }
2006     +
2007     + if (!persistent_ram_old_size(prz))
2008     + return NULL;
2009    
2010     *typep = type;
2011     *id = i;
2012     @@ -415,7 +419,6 @@ static int ramoops_probe(struct platform_device *pdev)
2013     if (!is_power_of_2(pdata->ftrace_size))
2014     pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size);
2015    
2016     - cxt->dump_read_cnt = 0;
2017     cxt->size = pdata->mem_size;
2018     cxt->phys_addr = pdata->mem_address;
2019     cxt->memtype = pdata->mem_type;
2020     diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
2021     index 6ff97553331b..bda61a759b68 100644
2022     --- a/fs/pstore/ram_core.c
2023     +++ b/fs/pstore/ram_core.c
2024     @@ -46,7 +46,7 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz)
2025     }
2026    
2027     /* increase and wrap the start pointer, returning the old value */
2028     -static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
2029     +static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a)
2030     {
2031     int old;
2032     int new;
2033     @@ -62,7 +62,7 @@ static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
2034     }
2035    
2036     /* increase the size counter until it hits the max size */
2037     -static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
2038     +static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a)
2039     {
2040     size_t old;
2041     size_t new;
2042     @@ -78,6 +78,53 @@ static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
2043     } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
2044     }
2045    
2046     +static DEFINE_RAW_SPINLOCK(buffer_lock);
2047     +
2048     +/* increase and wrap the start pointer, returning the old value */
2049     +static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
2050     +{
2051     + int old;
2052     + int new;
2053     + unsigned long flags;
2054     +
2055     + raw_spin_lock_irqsave(&buffer_lock, flags);
2056     +
2057     + old = atomic_read(&prz->buffer->start);
2058     + new = old + a;
2059     + while (unlikely(new > prz->buffer_size))
2060     + new -= prz->buffer_size;
2061     + atomic_set(&prz->buffer->start, new);
2062     +
2063     + raw_spin_unlock_irqrestore(&buffer_lock, flags);
2064     +
2065     + return old;
2066     +}
2067     +
2068     +/* increase the size counter until it hits the max size */
2069     +static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a)
2070     +{
2071     + size_t old;
2072     + size_t new;
2073     + unsigned long flags;
2074     +
2075     + raw_spin_lock_irqsave(&buffer_lock, flags);
2076     +
2077     + old = atomic_read(&prz->buffer->size);
2078     + if (old == prz->buffer_size)
2079     + goto exit;
2080     +
2081     + new = old + a;
2082     + if (new > prz->buffer_size)
2083     + new = prz->buffer_size;
2084     + atomic_set(&prz->buffer->size, new);
2085     +
2086     +exit:
2087     + raw_spin_unlock_irqrestore(&buffer_lock, flags);
2088     +}
2089     +
2090     +static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic;
2091     +static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic;
2092     +
2093     static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
2094     uint8_t *data, size_t len, uint8_t *ecc)
2095     {
2096     @@ -379,6 +426,9 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size,
2097     return NULL;
2098     }
2099    
2100     + buffer_start_add = buffer_start_add_locked;
2101     + buffer_size_add = buffer_size_add_locked;
2102     +
2103     if (memtype)
2104     va = ioremap(start, size);
2105     else
2106     diff --git a/kernel/workqueue.c b/kernel/workqueue.c
2107     index c2f9d6ca7e5e..16730a9c8cac 100644
2108     --- a/kernel/workqueue.c
2109     +++ b/kernel/workqueue.c
2110     @@ -1934,17 +1934,13 @@ static void pool_mayday_timeout(unsigned long __pool)
2111     * spin_lock_irq(pool->lock) which may be released and regrabbed
2112     * multiple times. Does GFP_KERNEL allocations. Called only from
2113     * manager.
2114     - *
2115     - * RETURNS:
2116     - * %false if no action was taken and pool->lock stayed locked, %true
2117     - * otherwise.
2118     */
2119     -static bool maybe_create_worker(struct worker_pool *pool)
2120     +static void maybe_create_worker(struct worker_pool *pool)
2121     __releases(&pool->lock)
2122     __acquires(&pool->lock)
2123     {
2124     if (!need_to_create_worker(pool))
2125     - return false;
2126     + return;
2127     restart:
2128     spin_unlock_irq(&pool->lock);
2129    
2130     @@ -1961,7 +1957,7 @@ restart:
2131     start_worker(worker);
2132     if (WARN_ON_ONCE(need_to_create_worker(pool)))
2133     goto restart;
2134     - return true;
2135     + return;
2136     }
2137    
2138     if (!need_to_create_worker(pool))
2139     @@ -1978,7 +1974,7 @@ restart:
2140     spin_lock_irq(&pool->lock);
2141     if (need_to_create_worker(pool))
2142     goto restart;
2143     - return true;
2144     + return;
2145     }
2146    
2147     /**
2148     @@ -1991,15 +1987,9 @@ restart:
2149     * LOCKING:
2150     * spin_lock_irq(pool->lock) which may be released and regrabbed
2151     * multiple times. Called only from manager.
2152     - *
2153     - * RETURNS:
2154     - * %false if no action was taken and pool->lock stayed locked, %true
2155     - * otherwise.
2156     */
2157     -static bool maybe_destroy_workers(struct worker_pool *pool)
2158     +static void maybe_destroy_workers(struct worker_pool *pool)
2159     {
2160     - bool ret = false;
2161     -
2162     while (too_many_workers(pool)) {
2163     struct worker *worker;
2164     unsigned long expires;
2165     @@ -2013,10 +2003,7 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
2166     }
2167    
2168     destroy_worker(worker);
2169     - ret = true;
2170     }
2171     -
2172     - return ret;
2173     }
2174    
2175     /**
2176     @@ -2036,13 +2023,14 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
2177     * multiple times. Does GFP_KERNEL allocations.
2178     *
2179     * RETURNS:
2180     - * spin_lock_irq(pool->lock) which may be released and regrabbed
2181     - * multiple times. Does GFP_KERNEL allocations.
2182     + * %false if the pool doesn't need management and the caller can safely
2183     + * start processing works, %true if management function was performed and
2184     + * the conditions that the caller verified before calling the function may
2185     + * no longer be true.
2186     */
2187     static bool manage_workers(struct worker *worker)
2188     {
2189     struct worker_pool *pool = worker->pool;
2190     - bool ret = false;
2191    
2192     /*
2193     * Managership is governed by two mutexes - manager_arb and
2194     @@ -2066,7 +2054,7 @@ static bool manage_workers(struct worker *worker)
2195     * manager_mutex.
2196     */
2197     if (!mutex_trylock(&pool->manager_arb))
2198     - return ret;
2199     + return false;
2200    
2201     /*
2202     * With manager arbitration won, manager_mutex would be free in
2203     @@ -2076,7 +2064,6 @@ static bool manage_workers(struct worker *worker)
2204     spin_unlock_irq(&pool->lock);
2205     mutex_lock(&pool->manager_mutex);
2206     spin_lock_irq(&pool->lock);
2207     - ret = true;
2208     }
2209    
2210     pool->flags &= ~POOL_MANAGE_WORKERS;
2211     @@ -2085,12 +2072,12 @@ static bool manage_workers(struct worker *worker)
2212     * Destroy and then create so that may_start_working() is true
2213     * on return.
2214     */
2215     - ret |= maybe_destroy_workers(pool);
2216     - ret |= maybe_create_worker(pool);
2217     + maybe_destroy_workers(pool);
2218     + maybe_create_worker(pool);
2219    
2220     mutex_unlock(&pool->manager_mutex);
2221     mutex_unlock(&pool->manager_arb);
2222     - return ret;
2223     + return true;
2224     }
2225    
2226     /**
2227     diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
2228     index 62aebed7c6e2..dc200bf831ae 100644
2229     --- a/net/wireless/nl80211.c
2230     +++ b/net/wireless/nl80211.c
2231     @@ -2629,6 +2629,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
2232     if (!rdev->ops->get_key)
2233     return -EOPNOTSUPP;
2234    
2235     + if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
2236     + return -ENOENT;
2237     +
2238     msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2239     if (!msg)
2240     return -ENOMEM;
2241     @@ -2648,10 +2651,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
2242     nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
2243     goto nla_put_failure;
2244    
2245     - if (pairwise && mac_addr &&
2246     - !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
2247     - return -ENOENT;
2248     -
2249     err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie,
2250     get_key_callback);
2251    
2252     @@ -2822,7 +2821,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
2253     wdev_lock(dev->ieee80211_ptr);
2254     err = nl80211_key_allowed(dev->ieee80211_ptr);
2255    
2256     - if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr &&
2257     + if (key.type == NL80211_KEYTYPE_GROUP && mac_addr &&
2258     !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
2259     err = -ENOENT;
2260    
2261     diff --git a/sound/core/seq/seq_dummy.c b/sound/core/seq/seq_dummy.c
2262     index dbc550716790..f60d81497f28 100644
2263     --- a/sound/core/seq/seq_dummy.c
2264     +++ b/sound/core/seq/seq_dummy.c
2265     @@ -82,36 +82,6 @@ struct snd_seq_dummy_port {
2266     static int my_client = -1;
2267    
2268     /*
2269     - * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events
2270     - * to subscribers.
2271     - * Note: this callback is called only after all subscribers are removed.
2272     - */
2273     -static int
2274     -dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info)
2275     -{
2276     - struct snd_seq_dummy_port *p;
2277     - int i;
2278     - struct snd_seq_event ev;
2279     -
2280     - p = private_data;
2281     - memset(&ev, 0, sizeof(ev));
2282     - if (p->duplex)
2283     - ev.source.port = p->connect;
2284     - else
2285     - ev.source.port = p->port;
2286     - ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
2287     - ev.type = SNDRV_SEQ_EVENT_CONTROLLER;
2288     - for (i = 0; i < 16; i++) {
2289     - ev.data.control.channel = i;
2290     - ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF;
2291     - snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
2292     - ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS;
2293     - snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
2294     - }
2295     - return 0;
2296     -}
2297     -
2298     -/*
2299     * event input callback - just redirect events to subscribers
2300     */
2301     static int
2302     @@ -175,7 +145,6 @@ create_port(int idx, int type)
2303     | SNDRV_SEQ_PORT_TYPE_PORT;
2304     memset(&pcb, 0, sizeof(pcb));
2305     pcb.owner = THIS_MODULE;
2306     - pcb.unuse = dummy_unuse;
2307     pcb.event_input = dummy_input;
2308     pcb.private_free = dummy_free;
2309     pcb.private_data = rec;
2310     diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
2311     index 5e5af898f7f8..412d90f7b256 100644
2312     --- a/sound/soc/codecs/wm8960.c
2313     +++ b/sound/soc/codecs/wm8960.c
2314     @@ -555,7 +555,7 @@ static struct {
2315     { 22050, 2 },
2316     { 24000, 2 },
2317     { 16000, 3 },
2318     - { 11250, 4 },
2319     + { 11025, 4 },
2320     { 12000, 4 },
2321     { 8000, 5 },
2322     };