Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0104-4.9.5-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2956 - (hide annotations) (download)
Mon Jul 24 12:03:46 2017 UTC (6 years, 10 months ago) by niro
File size: 177665 byte(s)
-added patches-4.9
1 niro 2956 diff --git a/Documentation/devicetree/bindings/mfd/tps65086.txt b/Documentation/devicetree/bindings/mfd/tps65086.txt
2     index d3705612a846..9cfa886fe99f 100644
3     --- a/Documentation/devicetree/bindings/mfd/tps65086.txt
4     +++ b/Documentation/devicetree/bindings/mfd/tps65086.txt
5     @@ -23,7 +23,7 @@ Required properties:
6     defined below.
7    
8     Optional regulator properties:
9     - - ti,regulator-step-size-25mv : This is applicable for buck[1,2,6], set this
10     + - ti,regulator-step-size-25mv : This is applicable for buck[1-6], set this
11     if the regulator is factory set with a 25mv
12     step voltage mapping.
13     - ti,regulator-decay : This is applicable for buck[1-6], set this if
14     diff --git a/Makefile b/Makefile
15     index 9175706bfe7f..2a8af8af7b27 100644
16     --- a/Makefile
17     +++ b/Makefile
18     @@ -1,6 +1,6 @@
19     VERSION = 4
20     PATCHLEVEL = 9
21     -SUBLEVEL = 4
22     +SUBLEVEL = 5
23     EXTRAVERSION =
24     NAME = Roaring Lionus
25    
26     diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
27     index 2e49bd252fe7..45bec627bae3 100644
28     --- a/arch/arm64/mm/hugetlbpage.c
29     +++ b/arch/arm64/mm/hugetlbpage.c
30     @@ -51,20 +51,8 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr,
31     *pgsize = PAGE_SIZE;
32     if (!pte_cont(pte))
33     return 1;
34     - if (!pgd_present(*pgd)) {
35     - VM_BUG_ON(!pgd_present(*pgd));
36     - return 1;
37     - }
38     pud = pud_offset(pgd, addr);
39     - if (!pud_present(*pud)) {
40     - VM_BUG_ON(!pud_present(*pud));
41     - return 1;
42     - }
43     pmd = pmd_offset(pud, addr);
44     - if (!pmd_present(*pmd)) {
45     - VM_BUG_ON(!pmd_present(*pmd));
46     - return 1;
47     - }
48     if ((pte_t *)pmd == ptep) {
49     *pgsize = PMD_SIZE;
50     return CONT_PMDS;
51     @@ -212,7 +200,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
52     ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
53     /* save the 1st pte to return */
54     pte = ptep_get_and_clear(mm, addr, cpte);
55     - for (i = 1; i < ncontig; ++i) {
56     + for (i = 1, addr += pgsize; i < ncontig; ++i, addr += pgsize) {
57     /*
58     * If HW_AFDBM is enabled, then the HW could
59     * turn on the dirty bit for any of the page
60     @@ -250,8 +238,8 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
61     pfn = pte_pfn(*cpte);
62     ncontig = find_num_contig(vma->vm_mm, addr, cpte,
63     *cpte, &pgsize);
64     - for (i = 0; i < ncontig; ++i, ++cpte) {
65     - changed = ptep_set_access_flags(vma, addr, cpte,
66     + for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) {
67     + changed |= ptep_set_access_flags(vma, addr, cpte,
68     pfn_pte(pfn,
69     hugeprot),
70     dirty);
71     @@ -273,7 +261,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
72    
73     cpte = huge_pte_offset(mm, addr);
74     ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
75     - for (i = 0; i < ncontig; ++i, ++cpte)
76     + for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
77     ptep_set_wrprotect(mm, addr, cpte);
78     } else {
79     ptep_set_wrprotect(mm, addr, ptep);
80     @@ -291,7 +279,7 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
81     cpte = huge_pte_offset(vma->vm_mm, addr);
82     ncontig = find_num_contig(vma->vm_mm, addr, cpte,
83     *cpte, &pgsize);
84     - for (i = 0; i < ncontig; ++i, ++cpte)
85     + for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
86     ptep_clear_flush(vma, addr, cpte);
87     } else {
88     ptep_clear_flush(vma, addr, ptep);
89     diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
90     index e407af2b7333..2e6a823fa502 100644
91     --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
92     +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
93     @@ -70,7 +70,9 @@
94    
95     #define HPTE_V_SSIZE_SHIFT 62
96     #define HPTE_V_AVPN_SHIFT 7
97     +#define HPTE_V_COMMON_BITS ASM_CONST(0x000fffffffffffff)
98     #define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
99     +#define HPTE_V_AVPN_3_0 ASM_CONST(0x000fffffffffff80)
100     #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
101     #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
102     #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
103     @@ -80,14 +82,16 @@
104     #define HPTE_V_VALID ASM_CONST(0x0000000000000001)
105    
106     /*
107     - * ISA 3.0 have a different HPTE format.
108     + * ISA 3.0 has a different HPTE format.
109     */
110     #define HPTE_R_3_0_SSIZE_SHIFT 58
111     +#define HPTE_R_3_0_SSIZE_MASK (3ull << HPTE_R_3_0_SSIZE_SHIFT)
112     #define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
113     #define HPTE_R_TS ASM_CONST(0x4000000000000000)
114     #define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
115     #define HPTE_R_RPN_SHIFT 12
116     #define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
117     +#define HPTE_R_RPN_3_0 ASM_CONST(0x01fffffffffff000)
118     #define HPTE_R_PP ASM_CONST(0x0000000000000003)
119     #define HPTE_R_PPP ASM_CONST(0x8000000000000003)
120     #define HPTE_R_N ASM_CONST(0x0000000000000004)
121     @@ -316,12 +320,43 @@ static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
122     */
123     v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
124     v <<= HPTE_V_AVPN_SHIFT;
125     - if (!cpu_has_feature(CPU_FTR_ARCH_300))
126     - v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
127     + v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
128     return v;
129     }
130    
131     /*
132     + * ISA v3.0 defines a new HPTE format, which differs from the old
133     + * format in having smaller AVPN and ARPN fields, and the B field
134     + * in the second dword instead of the first.
135     + */
136     +static inline unsigned long hpte_old_to_new_v(unsigned long v)
137     +{
138     + /* trim AVPN, drop B */
139     + return v & HPTE_V_COMMON_BITS;
140     +}
141     +
142     +static inline unsigned long hpte_old_to_new_r(unsigned long v, unsigned long r)
143     +{
144     + /* move B field from 1st to 2nd dword, trim ARPN */
145     + return (r & ~HPTE_R_3_0_SSIZE_MASK) |
146     + (((v) >> HPTE_V_SSIZE_SHIFT) << HPTE_R_3_0_SSIZE_SHIFT);
147     +}
148     +
149     +static inline unsigned long hpte_new_to_old_v(unsigned long v, unsigned long r)
150     +{
151     + /* insert B field */
152     + return (v & HPTE_V_COMMON_BITS) |
153     + ((r & HPTE_R_3_0_SSIZE_MASK) <<
154     + (HPTE_V_SSIZE_SHIFT - HPTE_R_3_0_SSIZE_SHIFT));
155     +}
156     +
157     +static inline unsigned long hpte_new_to_old_r(unsigned long r)
158     +{
159     + /* clear out B field */
160     + return r & ~HPTE_R_3_0_SSIZE_MASK;
161     +}
162     +
163     +/*
164     * This function sets the AVPN and L fields of the HPTE appropriately
165     * using the base page size and actual page size.
166     */
167     @@ -341,12 +376,8 @@ static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
168     * aligned for the requested page size
169     */
170     static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
171     - int actual_psize, int ssize)
172     + int actual_psize)
173     {
174     -
175     - if (cpu_has_feature(CPU_FTR_ARCH_300))
176     - pa |= ((unsigned long) ssize) << HPTE_R_3_0_SSIZE_SHIFT;
177     -
178     /* A 4K page needs no special encoding */
179     if (actual_psize == MMU_PAGE_4K)
180     return pa & HPTE_R_RPN;
181     diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
182     index 6ca9a2ffaac7..35f5244782d9 100644
183     --- a/arch/powerpc/kernel/ibmebus.c
184     +++ b/arch/powerpc/kernel/ibmebus.c
185     @@ -180,6 +180,7 @@ static int ibmebus_create_device(struct device_node *dn)
186     static int ibmebus_create_devices(const struct of_device_id *matches)
187     {
188     struct device_node *root, *child;
189     + struct device *dev;
190     int ret = 0;
191    
192     root = of_find_node_by_path("/");
193     @@ -188,9 +189,12 @@ static int ibmebus_create_devices(const struct of_device_id *matches)
194     if (!of_match_node(matches, child))
195     continue;
196    
197     - if (bus_find_device(&ibmebus_bus_type, NULL, child,
198     - ibmebus_match_node))
199     + dev = bus_find_device(&ibmebus_bus_type, NULL, child,
200     + ibmebus_match_node);
201     + if (dev) {
202     + put_device(dev);
203     continue;
204     + }
205    
206     ret = ibmebus_create_device(child);
207     if (ret) {
208     @@ -262,6 +266,7 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus,
209     const char *buf, size_t count)
210     {
211     struct device_node *dn = NULL;
212     + struct device *dev;
213     char *path;
214     ssize_t rc = 0;
215    
216     @@ -269,8 +274,10 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus,
217     if (!path)
218     return -ENOMEM;
219    
220     - if (bus_find_device(&ibmebus_bus_type, NULL, path,
221     - ibmebus_match_path)) {
222     + dev = bus_find_device(&ibmebus_bus_type, NULL, path,
223     + ibmebus_match_path);
224     + if (dev) {
225     + put_device(dev);
226     printk(KERN_WARNING "%s: %s has already been probed\n",
227     __func__, path);
228     rc = -EEXIST;
229     @@ -307,6 +314,7 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus,
230     if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path,
231     ibmebus_match_path))) {
232     of_device_unregister(to_platform_device(dev));
233     + put_device(dev);
234    
235     kfree(path);
236     return count;
237     diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
238     index 83ddc0e171b0..ad9fd5245be2 100644
239     --- a/arch/powerpc/mm/hash_native_64.c
240     +++ b/arch/powerpc/mm/hash_native_64.c
241     @@ -221,13 +221,18 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
242     return -1;
243    
244     hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
245     - hpte_r = hpte_encode_r(pa, psize, apsize, ssize) | rflags;
246     + hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
247    
248     if (!(vflags & HPTE_V_BOLTED)) {
249     DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
250     i, hpte_v, hpte_r);
251     }
252    
253     + if (cpu_has_feature(CPU_FTR_ARCH_300)) {
254     + hpte_r = hpte_old_to_new_r(hpte_v, hpte_r);
255     + hpte_v = hpte_old_to_new_v(hpte_v);
256     + }
257     +
258     hptep->r = cpu_to_be64(hpte_r);
259     /* Guarantee the second dword is visible before the valid bit */
260     eieio();
261     @@ -295,6 +300,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
262     vpn, want_v & HPTE_V_AVPN, slot, newpp);
263    
264     hpte_v = be64_to_cpu(hptep->v);
265     + if (cpu_has_feature(CPU_FTR_ARCH_300))
266     + hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
267     /*
268     * We need to invalidate the TLB always because hpte_remove doesn't do
269     * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
270     @@ -309,6 +316,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
271     native_lock_hpte(hptep);
272     /* recheck with locks held */
273     hpte_v = be64_to_cpu(hptep->v);
274     + if (cpu_has_feature(CPU_FTR_ARCH_300))
275     + hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
276     if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
277     !(hpte_v & HPTE_V_VALID))) {
278     ret = -1;
279     @@ -350,6 +359,8 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
280     for (i = 0; i < HPTES_PER_GROUP; i++) {
281     hptep = htab_address + slot;
282     hpte_v = be64_to_cpu(hptep->v);
283     + if (cpu_has_feature(CPU_FTR_ARCH_300))
284     + hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
285    
286     if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
287     /* HPTE matches */
288     @@ -409,6 +420,8 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
289     want_v = hpte_encode_avpn(vpn, bpsize, ssize);
290     native_lock_hpte(hptep);
291     hpte_v = be64_to_cpu(hptep->v);
292     + if (cpu_has_feature(CPU_FTR_ARCH_300))
293     + hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
294    
295     /*
296     * We need to invalidate the TLB always because hpte_remove doesn't do
297     @@ -467,6 +480,8 @@ static void native_hugepage_invalidate(unsigned long vsid,
298     want_v = hpte_encode_avpn(vpn, psize, ssize);
299     native_lock_hpte(hptep);
300     hpte_v = be64_to_cpu(hptep->v);
301     + if (cpu_has_feature(CPU_FTR_ARCH_300))
302     + hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
303    
304     /* Even if we miss, we need to invalidate the TLB */
305     if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
306     @@ -504,6 +519,10 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
307     /* Look at the 8 bit LP value */
308     unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
309    
310     + if (cpu_has_feature(CPU_FTR_ARCH_300)) {
311     + hpte_v = hpte_new_to_old_v(hpte_v, hpte_r);
312     + hpte_r = hpte_new_to_old_r(hpte_r);
313     + }
314     if (!(hpte_v & HPTE_V_LARGE)) {
315     size = MMU_PAGE_4K;
316     a_size = MMU_PAGE_4K;
317     @@ -512,11 +531,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
318     a_size = hpte_page_sizes[lp] >> 4;
319     }
320     /* This works for all page sizes, and for 256M and 1T segments */
321     - if (cpu_has_feature(CPU_FTR_ARCH_300))
322     - *ssize = hpte_r >> HPTE_R_3_0_SSIZE_SHIFT;
323     - else
324     - *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
325     -
326     + *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
327     shift = mmu_psize_defs[size].shift;
328    
329     avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
330     @@ -639,6 +654,9 @@ static void native_flush_hash_range(unsigned long number, int local)
331     want_v = hpte_encode_avpn(vpn, psize, ssize);
332     native_lock_hpte(hptep);
333     hpte_v = be64_to_cpu(hptep->v);
334     + if (cpu_has_feature(CPU_FTR_ARCH_300))
335     + hpte_v = hpte_new_to_old_v(hpte_v,
336     + be64_to_cpu(hptep->r));
337     if (!HPTE_V_COMPARE(hpte_v, want_v) ||
338     !(hpte_v & HPTE_V_VALID))
339     native_unlock_hpte(hptep);
340     diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
341     index 688b54517655..ebb7f46f0532 100644
342     --- a/arch/powerpc/mm/pgtable-radix.c
343     +++ b/arch/powerpc/mm/pgtable-radix.c
344     @@ -159,7 +159,7 @@ static void __init radix_init_pgtable(void)
345     * Allocate Partition table and process table for the
346     * host.
347     */
348     - BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 23), "Process table size too large.");
349     + BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 36), "Process table size too large.");
350     process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
351     /*
352     * Fill in the process table.
353     @@ -181,7 +181,7 @@ static void __init radix_init_partition_table(void)
354    
355     rts_field = radix__get_tree_size();
356    
357     - BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
358     + BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
359     partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT);
360     partition_tb->patb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) |
361     RADIX_PGD_INDEX_SIZE | PATB_HR);
362     diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
363     index d4b33dd2d9e7..dcdfee0cd4f2 100644
364     --- a/arch/powerpc/platforms/powernv/pci-ioda.c
365     +++ b/arch/powerpc/platforms/powernv/pci-ioda.c
366     @@ -145,7 +145,7 @@ static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
367     */
368     rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
369     OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
370     - if (rc != OPAL_SUCCESS)
371     + if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED)
372     pr_warn("%s: Error %lld unfreezing PHB#%d-PE#%d\n",
373     __func__, rc, phb->hose->global_number, pe_no);
374    
375     diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
376     index cb3c50328de8..cc2b281a3766 100644
377     --- a/arch/powerpc/platforms/ps3/htab.c
378     +++ b/arch/powerpc/platforms/ps3/htab.c
379     @@ -63,7 +63,7 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn,
380     vflags &= ~HPTE_V_SECONDARY;
381    
382     hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
383     - hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize, apsize, ssize) | rflags;
384     + hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize, apsize) | rflags;
385    
386     spin_lock_irqsave(&ps3_htab_lock, flags);
387    
388     diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
389     index aa35245d8d6d..f2c98f6c1c9c 100644
390     --- a/arch/powerpc/platforms/pseries/lpar.c
391     +++ b/arch/powerpc/platforms/pseries/lpar.c
392     @@ -145,7 +145,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
393     hpte_group, vpn, pa, rflags, vflags, psize);
394    
395     hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
396     - hpte_r = hpte_encode_r(pa, psize, apsize, ssize) | rflags;
397     + hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
398    
399     if (!(vflags & HPTE_V_BOLTED))
400     pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
401     diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
402     index a39629206864..ed10b5bf9b93 100644
403     --- a/arch/x86/include/asm/cpufeatures.h
404     +++ b/arch/x86/include/asm/cpufeatures.h
405     @@ -311,4 +311,6 @@
406     #define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
407     #define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
408     #define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
409     +#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
410     +
411     #endif /* _ASM_X86_CPUFEATURES_H */
412     diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
413     index 1e81a37c034e..1d3167269a67 100644
414     --- a/arch/x86/kernel/cpu/amd.c
415     +++ b/arch/x86/kernel/cpu/amd.c
416     @@ -20,6 +20,10 @@
417    
418     #include "cpu.h"
419    
420     +static const int amd_erratum_383[];
421     +static const int amd_erratum_400[];
422     +static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
423     +
424     /*
425     * nodes_per_socket: Stores the number of nodes per socket.
426     * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
427     @@ -305,20 +309,32 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
428    
429     /* get information required for multi-node processors */
430     if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
431     - u32 eax, ebx, ecx, edx;
432    
433     - cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
434     - node_id = ecx & 7;
435     + node_id = cpuid_ecx(0x8000001e) & 7;
436    
437     - /* get compute unit information */
438     - smp_num_siblings = ((ebx >> 8) & 3) + 1;
439     - c->x86_max_cores /= smp_num_siblings;
440     - c->cpu_core_id = ebx & 0xff;
441     + /*
442     + * We may have multiple LLCs if L3 caches exist, so check if we
443     + * have an L3 cache by looking at the L3 cache CPUID leaf.
444     + */
445     + if (cpuid_edx(0x80000006)) {
446     + if (c->x86 == 0x17) {
447     + /*
448     + * LLC is at the core complex level.
449     + * Core complex id is ApicId[3].
450     + */
451     + per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
452     + } else {
453     + /* LLC is at the node level. */
454     + per_cpu(cpu_llc_id, cpu) = node_id;
455     + }
456     + }
457     } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
458     u64 value;
459    
460     rdmsrl(MSR_FAM10H_NODE_ID, value);
461     node_id = value & 7;
462     +
463     + per_cpu(cpu_llc_id, cpu) = node_id;
464     } else
465     return;
466    
467     @@ -329,9 +345,6 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
468     set_cpu_cap(c, X86_FEATURE_AMD_DCM);
469     cus_per_node = c->x86_max_cores / nodes_per_socket;
470    
471     - /* store NodeID, use llc_shared_map to store sibling info */
472     - per_cpu(cpu_llc_id, cpu) = node_id;
473     -
474     /* core id has to be in the [0 .. cores_per_node - 1] range */
475     c->cpu_core_id %= cus_per_node;
476     }
477     @@ -356,15 +369,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
478     /* use socket ID also for last level cache */
479     per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
480     amd_get_topology(c);
481     -
482     - /*
483     - * Fix percpu cpu_llc_id here as LLC topology is different
484     - * for Fam17h systems.
485     - */
486     - if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
487     - return;
488     -
489     - per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
490     #endif
491     }
492    
493     @@ -585,11 +589,16 @@ static void early_init_amd(struct cpuinfo_x86 *c)
494     /* F16h erratum 793, CVE-2013-6885 */
495     if (c->x86 == 0x16 && c->x86_model <= 0xf)
496     msr_set_bit(MSR_AMD64_LS_CFG, 15);
497     -}
498    
499     -static const int amd_erratum_383[];
500     -static const int amd_erratum_400[];
501     -static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
502     + /*
503     + * Check whether the machine is affected by erratum 400. This is
504     + * used to select the proper idle routine and to enable the check
505     + * whether the machine is affected in arch_post_acpi_init(), which
506     + * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
507     + */
508     + if (cpu_has_amd_erratum(c, amd_erratum_400))
509     + set_cpu_bug(c, X86_BUG_AMD_E400);
510     +}
511    
512     static void init_amd_k8(struct cpuinfo_x86 *c)
513     {
514     @@ -770,9 +779,6 @@ static void init_amd(struct cpuinfo_x86 *c)
515     if (c->x86 > 0x11)
516     set_cpu_cap(c, X86_FEATURE_ARAT);
517    
518     - if (cpu_has_amd_erratum(c, amd_erratum_400))
519     - set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
520     -
521     rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
522    
523     /* 3DNow or LM implies PREFETCHW */
524     diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
525     index dd62708c6a67..023c7bfa24df 100644
526     --- a/arch/x86/kernel/cpu/common.c
527     +++ b/arch/x86/kernel/cpu/common.c
528     @@ -1275,7 +1275,7 @@ static __init int setup_disablecpuid(char *arg)
529     {
530     int bit;
531    
532     - if (get_option(&arg, &bit) && bit < NCAPINTS*32)
533     + if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32)
534     setup_clear_cpu_cap(bit);
535     else
536     return 0;
537     diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
538     index 0888a879120f..8e10e72bf6ee 100644
539     --- a/arch/x86/kernel/process.c
540     +++ b/arch/x86/kernel/process.c
541     @@ -448,8 +448,7 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
542     if (x86_idle || boot_option_idle_override == IDLE_POLL)
543     return;
544    
545     - if (cpu_has_bug(c, X86_BUG_AMD_APIC_C1E)) {
546     - /* E400: APIC timer interrupt does not wake up CPU from C1e */
547     + if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
548     pr_info("using AMD E400 aware idle routine\n");
549     x86_idle = amd_e400_idle;
550     } else if (prefer_mwait_c1_over_halt(c)) {
551     diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
552     index a3ce9d260d68..9f676adcdfc2 100644
553     --- a/arch/x86/kvm/emulate.c
554     +++ b/arch/x86/kvm/emulate.c
555     @@ -171,6 +171,7 @@
556     #define NearBranch ((u64)1 << 52) /* Near branches */
557     #define No16 ((u64)1 << 53) /* No 16 bit operand */
558     #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
559     +#define Aligned16 ((u64)1 << 55) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
560    
561     #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
562    
563     @@ -446,6 +447,26 @@ FOP_END;
564     FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
565     FOP_END;
566    
567     +/*
568     + * XXX: inoutclob user must know where the argument is being expanded.
569     + * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
570     + */
571     +#define asm_safe(insn, inoutclob...) \
572     +({ \
573     + int _fault = 0; \
574     + \
575     + asm volatile("1:" insn "\n" \
576     + "2:\n" \
577     + ".pushsection .fixup, \"ax\"\n" \
578     + "3: movl $1, %[_fault]\n" \
579     + " jmp 2b\n" \
580     + ".popsection\n" \
581     + _ASM_EXTABLE(1b, 3b) \
582     + : [_fault] "+qm"(_fault) inoutclob ); \
583     + \
584     + _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
585     +})
586     +
587     static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
588     enum x86_intercept intercept,
589     enum x86_intercept_stage stage)
590     @@ -632,21 +653,24 @@ static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
591     * depending on whether they're AVX encoded or not.
592     *
593     * Also included is CMPXCHG16B which is not a vector instruction, yet it is
594     - * subject to the same check.
595     + * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
596     + * 512 bytes of data must be aligned to a 16 byte boundary.
597     */
598     -static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
599     +static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
600     {
601     if (likely(size < 16))
602     - return false;
603     + return 1;
604    
605     if (ctxt->d & Aligned)
606     - return true;
607     + return size;
608     else if (ctxt->d & Unaligned)
609     - return false;
610     + return 1;
611     else if (ctxt->d & Avx)
612     - return false;
613     + return 1;
614     + else if (ctxt->d & Aligned16)
615     + return 16;
616     else
617     - return true;
618     + return size;
619     }
620    
621     static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
622     @@ -704,7 +728,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
623     }
624     break;
625     }
626     - if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
627     + if (la & (insn_alignment(ctxt, size) - 1))
628     return emulate_gp(ctxt, 0);
629     return X86EMUL_CONTINUE;
630     bad:
631     @@ -791,6 +815,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
632     return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
633     }
634    
635     +static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
636     + struct segmented_address addr,
637     + void *data,
638     + unsigned int size)
639     +{
640     + int rc;
641     + ulong linear;
642     +
643     + rc = linearize(ctxt, addr, size, true, &linear);
644     + if (rc != X86EMUL_CONTINUE)
645     + return rc;
646     + return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
647     +}
648     +
649     /*
650     * Prefetch the remaining bytes of the instruction without crossing page
651     * boundary if they are not in fetch_cache yet.
652     @@ -1544,7 +1582,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
653     &ctxt->exception);
654     }
655    
656     -/* Does not support long mode */
657     static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
658     u16 selector, int seg, u8 cpl,
659     enum x86_transfer_type transfer,
660     @@ -1581,20 +1618,34 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
661    
662     rpl = selector & 3;
663    
664     - /* NULL selector is not valid for TR, CS and SS (except for long mode) */
665     - if ((seg == VCPU_SREG_CS
666     - || (seg == VCPU_SREG_SS
667     - && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
668     - || seg == VCPU_SREG_TR)
669     - && null_selector)
670     - goto exception;
671     -
672     /* TR should be in GDT only */
673     if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
674     goto exception;
675    
676     - if (null_selector) /* for NULL selector skip all following checks */
677     + /* NULL selector is not valid for TR, CS and (except for long mode) SS */
678     + if (null_selector) {
679     + if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
680     + goto exception;
681     +
682     + if (seg == VCPU_SREG_SS) {
683     + if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
684     + goto exception;
685     +
686     + /*
687     + * ctxt->ops->set_segment expects the CPL to be in
688     + * SS.DPL, so fake an expand-up 32-bit data segment.
689     + */
690     + seg_desc.type = 3;
691     + seg_desc.p = 1;
692     + seg_desc.s = 1;
693     + seg_desc.dpl = cpl;
694     + seg_desc.d = 1;
695     + seg_desc.g = 1;
696     + }
697     +
698     + /* Skip all following checks */
699     goto load;
700     + }
701    
702     ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
703     if (ret != X86EMUL_CONTINUE)
704     @@ -1710,6 +1761,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
705     u16 selector, int seg)
706     {
707     u8 cpl = ctxt->ops->cpl(ctxt);
708     +
709     + /*
710     + * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
711     + * they can load it at CPL<3 (Intel's manual says only LSS can,
712     + * but it's wrong).
713     + *
714     + * However, the Intel manual says that putting IST=1/DPL=3 in
715     + * an interrupt gate will result in SS=3 (the AMD manual instead
716     + * says it doesn't), so allow SS=3 in __load_segment_descriptor
717     + * and only forbid it here.
718     + */
719     + if (seg == VCPU_SREG_SS && selector == 3 &&
720     + ctxt->mode == X86EMUL_MODE_PROT64)
721     + return emulate_exception(ctxt, GP_VECTOR, 0, true);
722     +
723     return __load_segment_descriptor(ctxt, selector, seg, cpl,
724     X86_TRANSFER_NONE, NULL);
725     }
726     @@ -3658,8 +3724,8 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
727     }
728     /* Disable writeback. */
729     ctxt->dst.type = OP_NONE;
730     - return segmented_write(ctxt, ctxt->dst.addr.mem,
731     - &desc_ptr, 2 + ctxt->op_bytes);
732     + return segmented_write_std(ctxt, ctxt->dst.addr.mem,
733     + &desc_ptr, 2 + ctxt->op_bytes);
734     }
735    
736     static int em_sgdt(struct x86_emulate_ctxt *ctxt)
737     @@ -3842,6 +3908,131 @@ static int em_movsxd(struct x86_emulate_ctxt *ctxt)
738     return X86EMUL_CONTINUE;
739     }
740    
741     +static int check_fxsr(struct x86_emulate_ctxt *ctxt)
742     +{
743     + u32 eax = 1, ebx, ecx = 0, edx;
744     +
745     + ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
746     + if (!(edx & FFL(FXSR)))
747     + return emulate_ud(ctxt);
748     +
749     + if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
750     + return emulate_nm(ctxt);
751     +
752     + /*
753     + * Don't emulate a case that should never be hit, instead of working
754     + * around a lack of fxsave64/fxrstor64 on old compilers.
755     + */
756     + if (ctxt->mode >= X86EMUL_MODE_PROT64)
757     + return X86EMUL_UNHANDLEABLE;
758     +
759     + return X86EMUL_CONTINUE;
760     +}
761     +
762     +/*
763     + * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
764     + * 1) 16 bit mode
765     + * 2) 32 bit mode
766     + * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
767     + * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
768     + * save and restore
769     + * 3) 64-bit mode with REX.W prefix
770     + * - like (2), but XMM 8-15 are being saved and restored
771     + * 4) 64-bit mode without REX.W prefix
772     + * - like (3), but FIP and FDP are 64 bit
773     + *
774     + * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
775     + * desired result. (4) is not emulated.
776     + *
777     + * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
778     + * and FPU DS) should match.
779     + */
780     +static int em_fxsave(struct x86_emulate_ctxt *ctxt)
781     +{
782     + struct fxregs_state fx_state;
783     + size_t size;
784     + int rc;
785     +
786     + rc = check_fxsr(ctxt);
787     + if (rc != X86EMUL_CONTINUE)
788     + return rc;
789     +
790     + ctxt->ops->get_fpu(ctxt);
791     +
792     + rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
793     +
794     + ctxt->ops->put_fpu(ctxt);
795     +
796     + if (rc != X86EMUL_CONTINUE)
797     + return rc;
798     +
799     + if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)
800     + size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]);
801     + else
802     + size = offsetof(struct fxregs_state, xmm_space[0]);
803     +
804     + return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
805     +}
806     +
807     +static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
808     + struct fxregs_state *new)
809     +{
810     + int rc = X86EMUL_CONTINUE;
811     + struct fxregs_state old;
812     +
813     + rc = asm_safe("fxsave %[fx]", , [fx] "+m"(old));
814     + if (rc != X86EMUL_CONTINUE)
815     + return rc;
816     +
817     + /*
818     + * 64 bit host will restore XMM 8-15, which is not correct on non-64
819     + * bit guests. Load the current values in order to preserve 64 bit
820     + * XMMs after fxrstor.
821     + */
822     +#ifdef CONFIG_X86_64
823     + /* XXX: accessing XMM 8-15 very awkwardly */
824     + memcpy(&new->xmm_space[8 * 16/4], &old.xmm_space[8 * 16/4], 8 * 16);
825     +#endif
826     +
827     + /*
828     + * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but
829     + * does save and restore MXCSR.
830     + */
831     + if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))
832     + memcpy(new->xmm_space, old.xmm_space, 8 * 16);
833     +
834     + return rc;
835     +}
836     +
837     +static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
838     +{
839     + struct fxregs_state fx_state;
840     + int rc;
841     +
842     + rc = check_fxsr(ctxt);
843     + if (rc != X86EMUL_CONTINUE)
844     + return rc;
845     +
846     + rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
847     + if (rc != X86EMUL_CONTINUE)
848     + return rc;
849     +
850     + if (fx_state.mxcsr >> 16)
851     + return emulate_gp(ctxt, 0);
852     +
853     + ctxt->ops->get_fpu(ctxt);
854     +
855     + if (ctxt->mode < X86EMUL_MODE_PROT64)
856     + rc = fxrstor_fixup(ctxt, &fx_state);
857     +
858     + if (rc == X86EMUL_CONTINUE)
859     + rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
860     +
861     + ctxt->ops->put_fpu(ctxt);
862     +
863     + return rc;
864     +}
865     +
866     static bool valid_cr(int nr)
867     {
868     switch (nr) {
869     @@ -4194,7 +4385,9 @@ static const struct gprefix pfx_0f_ae_7 = {
870     };
871    
872     static const struct group_dual group15 = { {
873     - N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
874     + I(ModRM | Aligned16, em_fxsave),
875     + I(ModRM | Aligned16, em_fxrstor),
876     + N, N, N, N, N, GP(0, &pfx_0f_ae_7),
877     }, {
878     N, N, N, N, N, N, N, N,
879     } };
880     @@ -5066,21 +5259,13 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
881    
882     static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
883     {
884     - bool fault = false;
885     + int rc;
886    
887     ctxt->ops->get_fpu(ctxt);
888     - asm volatile("1: fwait \n\t"
889     - "2: \n\t"
890     - ".pushsection .fixup,\"ax\" \n\t"
891     - "3: \n\t"
892     - "movb $1, %[fault] \n\t"
893     - "jmp 2b \n\t"
894     - ".popsection \n\t"
895     - _ASM_EXTABLE(1b, 3b)
896     - : [fault]"+qm"(fault));
897     + rc = asm_safe("fwait");
898     ctxt->ops->put_fpu(ctxt);
899    
900     - if (unlikely(fault))
901     + if (unlikely(rc != X86EMUL_CONTINUE))
902     return emulate_exception(ctxt, MF_VECTOR, 0, false);
903    
904     return X86EMUL_CONTINUE;
905     diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
906     index 6f69340f9fa3..3f05c044720b 100644
907     --- a/arch/x86/kvm/lapic.c
908     +++ b/arch/x86/kvm/lapic.c
909     @@ -2360,3 +2360,9 @@ void kvm_lapic_init(void)
910     jump_label_rate_limit(&apic_hw_disabled, HZ);
911     jump_label_rate_limit(&apic_sw_disabled, HZ);
912     }
913     +
914     +void kvm_lapic_exit(void)
915     +{
916     + static_key_deferred_flush(&apic_hw_disabled);
917     + static_key_deferred_flush(&apic_sw_disabled);
918     +}
919     diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
920     index f60d01c29d51..4dfe4d6cb338 100644
921     --- a/arch/x86/kvm/lapic.h
922     +++ b/arch/x86/kvm/lapic.h
923     @@ -108,6 +108,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
924    
925     int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
926     void kvm_lapic_init(void);
927     +void kvm_lapic_exit(void);
928    
929     #define VEC_POS(v) ((v) & (32 - 1))
930     #define REG_POS(v) (((v) >> 5) << 4)
931     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
932     index f3648c978d2f..487b957e7802 100644
933     --- a/arch/x86/kvm/x86.c
934     +++ b/arch/x86/kvm/x86.c
935     @@ -3308,6 +3308,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
936    
937     switch (cap->cap) {
938     case KVM_CAP_HYPERV_SYNIC:
939     + if (!irqchip_in_kernel(vcpu->kvm))
940     + return -EINVAL;
941     return kvm_hv_activate_synic(vcpu);
942     default:
943     return -EINVAL;
944     @@ -5963,6 +5965,7 @@ int kvm_arch_init(void *opaque)
945    
946     void kvm_arch_exit(void)
947     {
948     + kvm_lapic_exit();
949     perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
950    
951     if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
952     diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
953     index 936a488d6cf6..274dfc481849 100644
954     --- a/arch/x86/platform/efi/efi.c
955     +++ b/arch/x86/platform/efi/efi.c
956     @@ -210,6 +210,70 @@ int __init efi_memblock_x86_reserve_range(void)
957     return 0;
958     }
959    
960     +#define OVERFLOW_ADDR_SHIFT (64 - EFI_PAGE_SHIFT)
961     +#define OVERFLOW_ADDR_MASK (U64_MAX << OVERFLOW_ADDR_SHIFT)
962     +#define U64_HIGH_BIT (~(U64_MAX >> 1))
963     +
964     +static bool __init efi_memmap_entry_valid(const efi_memory_desc_t *md, int i)
965     +{
966     + u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1;
967     + u64 end_hi = 0;
968     + char buf[64];
969     +
970     + if (md->num_pages == 0) {
971     + end = 0;
972     + } else if (md->num_pages > EFI_PAGES_MAX ||
973     + EFI_PAGES_MAX - md->num_pages <
974     + (md->phys_addr >> EFI_PAGE_SHIFT)) {
975     + end_hi = (md->num_pages & OVERFLOW_ADDR_MASK)
976     + >> OVERFLOW_ADDR_SHIFT;
977     +
978     + if ((md->phys_addr & U64_HIGH_BIT) && !(end & U64_HIGH_BIT))
979     + end_hi += 1;
980     + } else {
981     + return true;
982     + }
983     +
984     + pr_warn_once(FW_BUG "Invalid EFI memory map entries:\n");
985     +
986     + if (end_hi) {
987     + pr_warn("mem%02u: %s range=[0x%016llx-0x%llx%016llx] (invalid)\n",
988     + i, efi_md_typeattr_format(buf, sizeof(buf), md),
989     + md->phys_addr, end_hi, end);
990     + } else {
991     + pr_warn("mem%02u: %s range=[0x%016llx-0x%016llx] (invalid)\n",
992     + i, efi_md_typeattr_format(buf, sizeof(buf), md),
993     + md->phys_addr, end);
994     + }
995     + return false;
996     +}
997     +
998     +static void __init efi_clean_memmap(void)
999     +{
1000     + efi_memory_desc_t *out = efi.memmap.map;
1001     + const efi_memory_desc_t *in = out;
1002     + const efi_memory_desc_t *end = efi.memmap.map_end;
1003     + int i, n_removal;
1004     +
1005     + for (i = n_removal = 0; in < end; i++) {
1006     + if (efi_memmap_entry_valid(in, i)) {
1007     + if (out != in)
1008     + memcpy(out, in, efi.memmap.desc_size);
1009     + out = (void *)out + efi.memmap.desc_size;
1010     + } else {
1011     + n_removal++;
1012     + }
1013     + in = (void *)in + efi.memmap.desc_size;
1014     + }
1015     +
1016     + if (n_removal > 0) {
1017     + u64 size = efi.memmap.nr_map - n_removal;
1018     +
1019     + pr_warn("Removing %d invalid memory map entries.\n", n_removal);
1020     + efi_memmap_install(efi.memmap.phys_map, size);
1021     + }
1022     +}
1023     +
1024     void __init efi_print_memmap(void)
1025     {
1026     efi_memory_desc_t *md;
1027     @@ -472,6 +536,8 @@ void __init efi_init(void)
1028     }
1029     }
1030    
1031     + efi_clean_memmap();
1032     +
1033     if (efi_enabled(EFI_DBG))
1034     efi_print_memmap();
1035     }
1036     diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
1037     index 10aca63a50d7..30031d5293c4 100644
1038     --- a/arch/x86/platform/efi/quirks.c
1039     +++ b/arch/x86/platform/efi/quirks.c
1040     @@ -214,7 +214,7 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
1041    
1042     new_size = efi.memmap.desc_size * num_entries;
1043    
1044     - new_phys = memblock_alloc(new_size, 0);
1045     + new_phys = efi_memmap_alloc(num_entries);
1046     if (!new_phys) {
1047     pr_err("Could not allocate boot services memmap\n");
1048     return;
1049     @@ -355,7 +355,7 @@ void __init efi_free_boot_services(void)
1050     }
1051    
1052     new_size = efi.memmap.desc_size * num_entries;
1053     - new_phys = memblock_alloc(new_size, 0);
1054     + new_phys = efi_memmap_alloc(num_entries);
1055     if (!new_phys) {
1056     pr_err("Failed to allocate new EFI memmap\n");
1057     return;
1058     diff --git a/block/blk-mq.c b/block/blk-mq.c
1059     index ad459e4e8071..81caceb96c3c 100644
1060     --- a/block/blk-mq.c
1061     +++ b/block/blk-mq.c
1062     @@ -895,7 +895,7 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1063     return WORK_CPU_UNBOUND;
1064    
1065     if (--hctx->next_cpu_batch <= 0) {
1066     - int cpu = hctx->next_cpu, next_cpu;
1067     + int next_cpu;
1068    
1069     next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
1070     if (next_cpu >= nr_cpu_ids)
1071     @@ -903,8 +903,6 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1072    
1073     hctx->next_cpu = next_cpu;
1074     hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1075     -
1076     - return cpu;
1077     }
1078    
1079     return hctx->next_cpu;
1080     diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
1081     index 5e24d880306c..3ab6807773ee 100644
1082     --- a/block/cfq-iosched.c
1083     +++ b/block/cfq-iosched.c
1084     @@ -1596,7 +1596,7 @@ static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
1085     {
1086     struct cfq_group_data *cgd;
1087    
1088     - cgd = kzalloc(sizeof(*cgd), GFP_KERNEL);
1089     + cgd = kzalloc(sizeof(*cgd), gfp);
1090     if (!cgd)
1091     return NULL;
1092     return &cgd->cpd;
1093     diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
1094     index 0d099a24f776..e53bef6cf53c 100644
1095     --- a/drivers/acpi/apei/ghes.c
1096     +++ b/drivers/acpi/apei/ghes.c
1097     @@ -852,6 +852,8 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
1098     if (ghes_read_estatus(ghes, 1)) {
1099     ghes_clear_estatus(ghes);
1100     continue;
1101     + } else {
1102     + ret = NMI_HANDLED;
1103     }
1104    
1105     sev = ghes_severity(ghes->estatus->error_severity);
1106     @@ -863,12 +865,11 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
1107    
1108     __process_error(ghes);
1109     ghes_clear_estatus(ghes);
1110     -
1111     - ret = NMI_HANDLED;
1112     }
1113    
1114     #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
1115     - irq_work_queue(&ghes_proc_irq_work);
1116     + if (ret == NMI_HANDLED)
1117     + irq_work_queue(&ghes_proc_irq_work);
1118     #endif
1119     atomic_dec(&ghes_in_nmi);
1120     return ret;
1121     diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
1122     index d0d0504b7c89..e0ea8f56d2bf 100644
1123     --- a/drivers/acpi/cppc_acpi.c
1124     +++ b/drivers/acpi/cppc_acpi.c
1125     @@ -784,8 +784,10 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
1126    
1127     /* Add per logical CPU nodes for reading its feedback counters. */
1128     cpu_dev = get_cpu_device(pr->id);
1129     - if (!cpu_dev)
1130     + if (!cpu_dev) {
1131     + ret = -EINVAL;
1132     goto out_free;
1133     + }
1134    
1135     ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
1136     "acpi_cppc");
1137     diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
1138     index 5545a679abd8..3c3b8f601469 100644
1139     --- a/drivers/block/virtio_blk.c
1140     +++ b/drivers/block/virtio_blk.c
1141     @@ -56,6 +56,7 @@ struct virtblk_req {
1142     struct virtio_blk_outhdr out_hdr;
1143     struct virtio_scsi_inhdr in_hdr;
1144     u8 status;
1145     + u8 sense[SCSI_SENSE_BUFFERSIZE];
1146     struct scatterlist sg[];
1147     };
1148    
1149     @@ -102,7 +103,8 @@ static int __virtblk_add_req(struct virtqueue *vq,
1150     }
1151    
1152     if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
1153     - sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
1154     + memcpy(vbr->sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
1155     + sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
1156     sgs[num_out + num_in++] = &sense;
1157     sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
1158     sgs[num_out + num_in++] = &inhdr;
1159     diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
1160     index 5497f7fc44d0..d2ef51ca9cf4 100644
1161     --- a/drivers/block/zram/zram_drv.c
1162     +++ b/drivers/block/zram/zram_drv.c
1163     @@ -25,6 +25,7 @@
1164     #include <linux/genhd.h>
1165     #include <linux/highmem.h>
1166     #include <linux/slab.h>
1167     +#include <linux/backing-dev.h>
1168     #include <linux/string.h>
1169     #include <linux/vmalloc.h>
1170     #include <linux/err.h>
1171     @@ -111,6 +112,14 @@ static inline bool is_partial_io(struct bio_vec *bvec)
1172     return bvec->bv_len != PAGE_SIZE;
1173     }
1174    
1175     +static void zram_revalidate_disk(struct zram *zram)
1176     +{
1177     + revalidate_disk(zram->disk);
1178     + /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
1179     + zram->disk->queue->backing_dev_info.capabilities |=
1180     + BDI_CAP_STABLE_WRITES;
1181     +}
1182     +
1183     /*
1184     * Check if request is within bounds and aligned on zram logical blocks.
1185     */
1186     @@ -1094,15 +1103,9 @@ static ssize_t disksize_store(struct device *dev,
1187     zram->comp = comp;
1188     zram->disksize = disksize;
1189     set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
1190     + zram_revalidate_disk(zram);
1191     up_write(&zram->init_lock);
1192    
1193     - /*
1194     - * Revalidate disk out of the init_lock to avoid lockdep splat.
1195     - * It's okay because disk's capacity is protected by init_lock
1196     - * so that revalidate_disk always sees up-to-date capacity.
1197     - */
1198     - revalidate_disk(zram->disk);
1199     -
1200     return len;
1201    
1202     out_destroy_comp:
1203     @@ -1148,7 +1151,7 @@ static ssize_t reset_store(struct device *dev,
1204     /* Make sure all the pending I/O are finished */
1205     fsync_bdev(bdev);
1206     zram_reset_device(zram);
1207     - revalidate_disk(zram->disk);
1208     + zram_revalidate_disk(zram);
1209     bdput(bdev);
1210    
1211     mutex_lock(&bdev->bd_mutex);
1212     diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
1213     index 9efdf1de4035..493e7b9fc813 100644
1214     --- a/drivers/bus/vexpress-config.c
1215     +++ b/drivers/bus/vexpress-config.c
1216     @@ -171,6 +171,7 @@ static int vexpress_config_populate(struct device_node *node)
1217     {
1218     struct device_node *bridge;
1219     struct device *parent;
1220     + int ret;
1221    
1222     bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
1223     if (!bridge)
1224     @@ -182,7 +183,11 @@ static int vexpress_config_populate(struct device_node *node)
1225     if (WARN_ON(!parent))
1226     return -ENODEV;
1227    
1228     - return of_platform_populate(node, NULL, NULL, parent);
1229     + ret = of_platform_populate(node, NULL, NULL, parent);
1230     +
1231     + put_device(parent);
1232     +
1233     + return ret;
1234     }
1235    
1236     static int __init vexpress_config_init(void)
1237     diff --git a/drivers/char/mem.c b/drivers/char/mem.c
1238     index 5bb1985ec484..6d9cc2d39d22 100644
1239     --- a/drivers/char/mem.c
1240     +++ b/drivers/char/mem.c
1241     @@ -381,9 +381,6 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
1242     char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
1243     int err = 0;
1244    
1245     - if (!pfn_valid(PFN_DOWN(p)))
1246     - return -EIO;
1247     -
1248     read = 0;
1249     if (p < (unsigned long) high_memory) {
1250     low_count = count;
1251     @@ -412,6 +409,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
1252     * by the kernel or data corruption may occur
1253     */
1254     kbuf = xlate_dev_kmem_ptr((void *)p);
1255     + if (!virt_addr_valid(kbuf))
1256     + return -ENXIO;
1257    
1258     if (copy_to_user(buf, kbuf, sz))
1259     return -EFAULT;
1260     @@ -482,6 +481,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
1261     * corruption may occur.
1262     */
1263     ptr = xlate_dev_kmem_ptr((void *)p);
1264     + if (!virt_addr_valid(ptr))
1265     + return -ENXIO;
1266    
1267     copied = copy_from_user(ptr, buf, sz);
1268     if (copied) {
1269     @@ -512,9 +513,6 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
1270     char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
1271     int err = 0;
1272    
1273     - if (!pfn_valid(PFN_DOWN(p)))
1274     - return -EIO;
1275     -
1276     if (p < (unsigned long) high_memory) {
1277     unsigned long to_write = min_t(unsigned long, count,
1278     (unsigned long)high_memory - p);
1279     diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
1280     index d3ffde806629..a84724eabfb8 100644
1281     --- a/drivers/cpufreq/powernv-cpufreq.c
1282     +++ b/drivers/cpufreq/powernv-cpufreq.c
1283     @@ -647,8 +647,14 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
1284     if (unlikely(rebooting) && new_index != get_nominal_index())
1285     return 0;
1286    
1287     - if (!throttled)
1288     + if (!throttled) {
1289     + /* we don't want to be preempted while
1290     + * checking if the CPU frequency has been throttled
1291     + */
1292     + preempt_disable();
1293     powernv_cpufreq_throttle_check(NULL);
1294     + preempt_enable();
1295     + }
1296    
1297     cur_msec = jiffies_to_msecs(get_jiffies_64());
1298    
1299     diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
1300     index 7ca27d4b1c54..6b16ce390dce 100644
1301     --- a/drivers/dma/omap-dma.c
1302     +++ b/drivers/dma/omap-dma.c
1303     @@ -1339,6 +1339,7 @@ static int omap_dma_probe(struct platform_device *pdev)
1304     struct omap_dmadev *od;
1305     struct resource *res;
1306     int rc, i, irq;
1307     + u32 lch_count;
1308    
1309     od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
1310     if (!od)
1311     @@ -1381,20 +1382,31 @@ static int omap_dma_probe(struct platform_device *pdev)
1312     spin_lock_init(&od->lock);
1313     spin_lock_init(&od->irq_lock);
1314    
1315     - if (!pdev->dev.of_node) {
1316     - od->dma_requests = od->plat->dma_attr->lch_count;
1317     - if (unlikely(!od->dma_requests))
1318     - od->dma_requests = OMAP_SDMA_REQUESTS;
1319     - } else if (of_property_read_u32(pdev->dev.of_node, "dma-requests",
1320     - &od->dma_requests)) {
1321     + /* Number of DMA requests */
1322     + od->dma_requests = OMAP_SDMA_REQUESTS;
1323     + if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
1324     + "dma-requests",
1325     + &od->dma_requests)) {
1326     dev_info(&pdev->dev,
1327     "Missing dma-requests property, using %u.\n",
1328     OMAP_SDMA_REQUESTS);
1329     - od->dma_requests = OMAP_SDMA_REQUESTS;
1330     }
1331    
1332     - od->lch_map = devm_kcalloc(&pdev->dev, od->dma_requests,
1333     - sizeof(*od->lch_map), GFP_KERNEL);
1334     + /* Number of available logical channels */
1335     + if (!pdev->dev.of_node) {
1336     + lch_count = od->plat->dma_attr->lch_count;
1337     + if (unlikely(!lch_count))
1338     + lch_count = OMAP_SDMA_CHANNELS;
1339     + } else if (of_property_read_u32(pdev->dev.of_node, "dma-channels",
1340     + &lch_count)) {
1341     + dev_info(&pdev->dev,
1342     + "Missing dma-channels property, using %u.\n",
1343     + OMAP_SDMA_CHANNELS);
1344     + lch_count = OMAP_SDMA_CHANNELS;
1345     + }
1346     +
1347     + od->lch_map = devm_kcalloc(&pdev->dev, lch_count, sizeof(*od->lch_map),
1348     + GFP_KERNEL);
1349     if (!od->lch_map)
1350     return -ENOMEM;
1351    
1352     diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
1353     index 78298460d168..7c1e3a7b14e0 100644
1354     --- a/drivers/extcon/extcon.c
1355     +++ b/drivers/extcon/extcon.c
1356     @@ -453,7 +453,7 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
1357     dev_err(&edev->dev, "out of memory in extcon_set_state\n");
1358     kobject_uevent(&edev->dev.kobj, KOBJ_CHANGE);
1359    
1360     - return 0;
1361     + return -ENOMEM;
1362     }
1363    
1364     length = name_show(&edev->dev, NULL, prop_buf);
1365     diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
1366     index 520a40e5e0e4..6c7d60c239b5 100644
1367     --- a/drivers/firmware/efi/fake_mem.c
1368     +++ b/drivers/firmware/efi/fake_mem.c
1369     @@ -71,8 +71,7 @@ void __init efi_fake_memmap(void)
1370     }
1371    
1372     /* allocate memory for new EFI memmap */
1373     - new_memmap_phy = memblock_alloc(efi.memmap.desc_size * new_nr_map,
1374     - PAGE_SIZE);
1375     + new_memmap_phy = efi_memmap_alloc(new_nr_map);
1376     if (!new_memmap_phy)
1377     return;
1378    
1379     diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
1380     index ee49cd23ee63..fac67992bede 100644
1381     --- a/drivers/firmware/efi/libstub/efistub.h
1382     +++ b/drivers/firmware/efi/libstub/efistub.h
1383     @@ -30,14 +30,6 @@ efi_status_t efi_file_close(void *handle);
1384    
1385     unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
1386    
1387     -efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
1388     - unsigned long orig_fdt_size,
1389     - void *fdt, int new_fdt_size, char *cmdline_ptr,
1390     - u64 initrd_addr, u64 initrd_size,
1391     - efi_memory_desc_t *memory_map,
1392     - unsigned long map_size, unsigned long desc_size,
1393     - u32 desc_ver);
1394     -
1395     efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
1396     void *handle,
1397     unsigned long *new_fdt_addr,
1398     diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
1399     index a6a93116a8f0..921dfa047202 100644
1400     --- a/drivers/firmware/efi/libstub/fdt.c
1401     +++ b/drivers/firmware/efi/libstub/fdt.c
1402     @@ -16,13 +16,10 @@
1403    
1404     #include "efistub.h"
1405    
1406     -efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
1407     - unsigned long orig_fdt_size,
1408     - void *fdt, int new_fdt_size, char *cmdline_ptr,
1409     - u64 initrd_addr, u64 initrd_size,
1410     - efi_memory_desc_t *memory_map,
1411     - unsigned long map_size, unsigned long desc_size,
1412     - u32 desc_ver)
1413     +static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
1414     + unsigned long orig_fdt_size,
1415     + void *fdt, int new_fdt_size, char *cmdline_ptr,
1416     + u64 initrd_addr, u64 initrd_size)
1417     {
1418     int node, num_rsv;
1419     int status;
1420     @@ -101,25 +98,23 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
1421     if (status)
1422     goto fdt_set_fail;
1423    
1424     - fdt_val64 = cpu_to_fdt64((u64)(unsigned long)memory_map);
1425     + fdt_val64 = U64_MAX; /* placeholder */
1426     status = fdt_setprop(fdt, node, "linux,uefi-mmap-start",
1427     &fdt_val64, sizeof(fdt_val64));
1428     if (status)
1429     goto fdt_set_fail;
1430    
1431     - fdt_val32 = cpu_to_fdt32(map_size);
1432     + fdt_val32 = U32_MAX; /* placeholder */
1433     status = fdt_setprop(fdt, node, "linux,uefi-mmap-size",
1434     &fdt_val32, sizeof(fdt_val32));
1435     if (status)
1436     goto fdt_set_fail;
1437    
1438     - fdt_val32 = cpu_to_fdt32(desc_size);
1439     status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-size",
1440     &fdt_val32, sizeof(fdt_val32));
1441     if (status)
1442     goto fdt_set_fail;
1443    
1444     - fdt_val32 = cpu_to_fdt32(desc_ver);
1445     status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-ver",
1446     &fdt_val32, sizeof(fdt_val32));
1447     if (status)
1448     @@ -148,6 +143,43 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
1449     return EFI_LOAD_ERROR;
1450     }
1451    
1452     +static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
1453     +{
1454     + int node = fdt_path_offset(fdt, "/chosen");
1455     + u64 fdt_val64;
1456     + u32 fdt_val32;
1457     + int err;
1458     +
1459     + if (node < 0)
1460     + return EFI_LOAD_ERROR;
1461     +
1462     + fdt_val64 = cpu_to_fdt64((unsigned long)*map->map);
1463     + err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-start",
1464     + &fdt_val64, sizeof(fdt_val64));
1465     + if (err)
1466     + return EFI_LOAD_ERROR;
1467     +
1468     + fdt_val32 = cpu_to_fdt32(*map->map_size);
1469     + err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-size",
1470     + &fdt_val32, sizeof(fdt_val32));
1471     + if (err)
1472     + return EFI_LOAD_ERROR;
1473     +
1474     + fdt_val32 = cpu_to_fdt32(*map->desc_size);
1475     + err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-size",
1476     + &fdt_val32, sizeof(fdt_val32));
1477     + if (err)
1478     + return EFI_LOAD_ERROR;
1479     +
1480     + fdt_val32 = cpu_to_fdt32(*map->desc_ver);
1481     + err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-ver",
1482     + &fdt_val32, sizeof(fdt_val32));
1483     + if (err)
1484     + return EFI_LOAD_ERROR;
1485     +
1486     + return EFI_SUCCESS;
1487     +}
1488     +
1489     #ifndef EFI_FDT_ALIGN
1490     #define EFI_FDT_ALIGN EFI_PAGE_SIZE
1491     #endif
1492     @@ -243,20 +275,10 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
1493     goto fail;
1494     }
1495    
1496     - /*
1497     - * Now that we have done our final memory allocation (and free)
1498     - * we can get the memory map key needed for
1499     - * exit_boot_services().
1500     - */
1501     - status = efi_get_memory_map(sys_table, &map);
1502     - if (status != EFI_SUCCESS)
1503     - goto fail_free_new_fdt;
1504     -
1505     status = update_fdt(sys_table,
1506     (void *)fdt_addr, fdt_size,
1507     (void *)*new_fdt_addr, new_fdt_size,
1508     - cmdline_ptr, initrd_addr, initrd_size,
1509     - memory_map, map_size, desc_size, desc_ver);
1510     + cmdline_ptr, initrd_addr, initrd_size);
1511    
1512     /* Succeeding the first time is the expected case. */
1513     if (status == EFI_SUCCESS)
1514     @@ -266,20 +288,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
1515     /*
1516     * We need to allocate more space for the new
1517     * device tree, so free existing buffer that is
1518     - * too small. Also free memory map, as we will need
1519     - * to get new one that reflects the free/alloc we do
1520     - * on the device tree buffer.
1521     + * too small.
1522     */
1523     efi_free(sys_table, new_fdt_size, *new_fdt_addr);
1524     - sys_table->boottime->free_pool(memory_map);
1525     new_fdt_size += EFI_PAGE_SIZE;
1526     } else {
1527     pr_efi_err(sys_table, "Unable to construct new device tree.\n");
1528     - goto fail_free_mmap;
1529     + goto fail_free_new_fdt;
1530     }
1531     }
1532    
1533     - sys_table->boottime->free_pool(memory_map);
1534     priv.runtime_map = runtime_map;
1535     priv.runtime_entry_count = &runtime_entry_count;
1536     status = efi_exit_boot_services(sys_table, handle, &map, &priv,
1537     @@ -288,6 +306,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
1538     if (status == EFI_SUCCESS) {
1539     efi_set_virtual_address_map_t *svam;
1540    
1541     + status = update_fdt_memmap((void *)*new_fdt_addr, &map);
1542     + if (status != EFI_SUCCESS) {
1543     + /*
1544     + * The kernel won't get far without the memory map, but
1545     + * may still be able to print something meaningful so
1546     + * return success here.
1547     + */
1548     + return EFI_SUCCESS;
1549     + }
1550     +
1551     /* Install the new virtual address map */
1552     svam = sys_table->runtime->set_virtual_address_map;
1553     status = svam(runtime_entry_count * desc_size, desc_size,
1554     @@ -319,9 +347,6 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
1555    
1556     pr_efi_err(sys_table, "Exit boot services failed.\n");
1557    
1558     -fail_free_mmap:
1559     - sys_table->boottime->free_pool(memory_map);
1560     -
1561     fail_free_new_fdt:
1562     efi_free(sys_table, new_fdt_size, *new_fdt_addr);
1563    
1564     diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
1565     index f03ddecd232b..78686443cb37 100644
1566     --- a/drivers/firmware/efi/memmap.c
1567     +++ b/drivers/firmware/efi/memmap.c
1568     @@ -9,6 +9,44 @@
1569     #include <linux/efi.h>
1570     #include <linux/io.h>
1571     #include <asm/early_ioremap.h>
1572     +#include <linux/memblock.h>
1573     +#include <linux/slab.h>
1574     +
1575     +static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
1576     +{
1577     + return memblock_alloc(size, 0);
1578     +}
1579     +
1580     +static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
1581     +{
1582     + unsigned int order = get_order(size);
1583     + struct page *p = alloc_pages(GFP_KERNEL, order);
1584     +
1585     + if (!p)
1586     + return 0;
1587     +
1588     + return PFN_PHYS(page_to_pfn(p));
1589     +}
1590     +
1591     +/**
1592     + * efi_memmap_alloc - Allocate memory for the EFI memory map
1593     + * @num_entries: Number of entries in the allocated map.
1594     + *
1595     + * Depending on whether mm_init() has already been invoked or not,
1596     + * either memblock or "normal" page allocation is used.
1597     + *
1598     + * Returns the physical address of the allocated memory map on
1599     + * success, zero on failure.
1600     + */
1601     +phys_addr_t __init efi_memmap_alloc(unsigned int num_entries)
1602     +{
1603     + unsigned long size = num_entries * efi.memmap.desc_size;
1604     +
1605     + if (slab_is_available())
1606     + return __efi_memmap_alloc_late(size);
1607     +
1608     + return __efi_memmap_alloc_early(size);
1609     +}
1610    
1611     /**
1612     * __efi_memmap_init - Common code for mapping the EFI memory map
1613     diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
1614     index 90621fb93941..92159313361b 100644
1615     --- a/drivers/gpio/gpiolib.c
1616     +++ b/drivers/gpio/gpiolib.c
1617     @@ -1317,12 +1317,12 @@ void gpiochip_remove(struct gpio_chip *chip)
1618    
1619     /* FIXME: should the legacy sysfs handling be moved to gpio_device? */
1620     gpiochip_sysfs_unregister(gdev);
1621     + gpiochip_free_hogs(chip);
1622     /* Numb the device, cancelling all outstanding operations */
1623     gdev->chip = NULL;
1624     gpiochip_irqchip_remove(chip);
1625     acpi_gpiochip_remove(chip);
1626     gpiochip_remove_pin_ranges(chip);
1627     - gpiochip_free_hogs(chip);
1628     of_gpiochip_remove(chip);
1629     /*
1630     * We accept no more calls into the driver from this point, so
1631     diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
1632     index 6d2ea76f4eb6..b447a01ab21a 100644
1633     --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
1634     +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
1635     @@ -56,7 +56,6 @@
1636     #define BIOS_SCRATCH_4 0x5cd
1637    
1638     MODULE_FIRMWARE("radeon/tahiti_smc.bin");
1639     -MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
1640     MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
1641     MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
1642     MODULE_FIRMWARE("radeon/verde_smc.bin");
1643     @@ -3486,19 +3485,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
1644     (adev->pdev->device == 0x6817) ||
1645     (adev->pdev->device == 0x6806))
1646     max_mclk = 120000;
1647     - } else if (adev->asic_type == CHIP_VERDE) {
1648     - if ((adev->pdev->revision == 0x81) ||
1649     - (adev->pdev->revision == 0x83) ||
1650     - (adev->pdev->revision == 0x87) ||
1651     - (adev->pdev->device == 0x6820) ||
1652     - (adev->pdev->device == 0x6821) ||
1653     - (adev->pdev->device == 0x6822) ||
1654     - (adev->pdev->device == 0x6823) ||
1655     - (adev->pdev->device == 0x682A) ||
1656     - (adev->pdev->device == 0x682B)) {
1657     - max_sclk = 75000;
1658     - max_mclk = 80000;
1659     - }
1660     } else if (adev->asic_type == CHIP_OLAND) {
1661     if ((adev->pdev->revision == 0xC7) ||
1662     (adev->pdev->revision == 0x80) ||
1663     @@ -7685,49 +7671,49 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
1664     chip_name = "tahiti";
1665     break;
1666     case CHIP_PITCAIRN:
1667     - if ((adev->pdev->revision == 0x81) ||
1668     - (adev->pdev->device == 0x6810) ||
1669     - (adev->pdev->device == 0x6811) ||
1670     - (adev->pdev->device == 0x6816) ||
1671     - (adev->pdev->device == 0x6817) ||
1672     - (adev->pdev->device == 0x6806))
1673     + if ((adev->pdev->revision == 0x81) &&
1674     + ((adev->pdev->device == 0x6810) ||
1675     + (adev->pdev->device == 0x6811)))
1676     chip_name = "pitcairn_k";
1677     else
1678     chip_name = "pitcairn";
1679     break;
1680     case CHIP_VERDE:
1681     - if ((adev->pdev->revision == 0x81) ||
1682     - (adev->pdev->revision == 0x83) ||
1683     - (adev->pdev->revision == 0x87) ||
1684     - (adev->pdev->device == 0x6820) ||
1685     - (adev->pdev->device == 0x6821) ||
1686     - (adev->pdev->device == 0x6822) ||
1687     - (adev->pdev->device == 0x6823) ||
1688     - (adev->pdev->device == 0x682A) ||
1689     - (adev->pdev->device == 0x682B))
1690     + if (((adev->pdev->device == 0x6820) &&
1691     + ((adev->pdev->revision == 0x81) ||
1692     + (adev->pdev->revision == 0x83))) ||
1693     + ((adev->pdev->device == 0x6821) &&
1694     + ((adev->pdev->revision == 0x83) ||
1695     + (adev->pdev->revision == 0x87))) ||
1696     + ((adev->pdev->revision == 0x87) &&
1697     + ((adev->pdev->device == 0x6823) ||
1698     + (adev->pdev->device == 0x682b))))
1699     chip_name = "verde_k";
1700     else
1701     chip_name = "verde";
1702     break;
1703     case CHIP_OLAND:
1704     - if ((adev->pdev->revision == 0xC7) ||
1705     - (adev->pdev->revision == 0x80) ||
1706     - (adev->pdev->revision == 0x81) ||
1707     - (adev->pdev->revision == 0x83) ||
1708     - (adev->pdev->revision == 0x87) ||
1709     - (adev->pdev->device == 0x6604) ||
1710     - (adev->pdev->device == 0x6605))
1711     + if (((adev->pdev->revision == 0x81) &&
1712     + ((adev->pdev->device == 0x6600) ||
1713     + (adev->pdev->device == 0x6604) ||
1714     + (adev->pdev->device == 0x6605) ||
1715     + (adev->pdev->device == 0x6610))) ||
1716     + ((adev->pdev->revision == 0x83) &&
1717     + (adev->pdev->device == 0x6610)))
1718     chip_name = "oland_k";
1719     else
1720     chip_name = "oland";
1721     break;
1722     case CHIP_HAINAN:
1723     - if ((adev->pdev->revision == 0x81) ||
1724     - (adev->pdev->revision == 0x83) ||
1725     - (adev->pdev->revision == 0xC3) ||
1726     - (adev->pdev->device == 0x6664) ||
1727     - (adev->pdev->device == 0x6665) ||
1728     - (adev->pdev->device == 0x6667))
1729     + if (((adev->pdev->revision == 0x81) &&
1730     + (adev->pdev->device == 0x6660)) ||
1731     + ((adev->pdev->revision == 0x83) &&
1732     + ((adev->pdev->device == 0x6660) ||
1733     + (adev->pdev->device == 0x6663) ||
1734     + (adev->pdev->device == 0x6665) ||
1735     + (adev->pdev->device == 0x6667))) ||
1736     + ((adev->pdev->revision == 0xc3) &&
1737     + (adev->pdev->device == 0x6665)))
1738     chip_name = "hainan_k";
1739     else
1740     chip_name = "hainan";
1741     diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
1742     index 21f992605541..338766c64c99 100644
1743     --- a/drivers/gpu/drm/drm_atomic_helper.c
1744     +++ b/drivers/gpu/drm/drm_atomic_helper.c
1745     @@ -1253,8 +1253,10 @@ int drm_atomic_helper_commit(struct drm_device *dev,
1746    
1747     if (!nonblock) {
1748     ret = drm_atomic_helper_wait_for_fences(dev, state, true);
1749     - if (ret)
1750     + if (ret) {
1751     + drm_atomic_helper_cleanup_planes(dev, state);
1752     return ret;
1753     + }
1754     }
1755    
1756     /*
1757     diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
1758     index b969a64a1514..48a6167f5e7b 100644
1759     --- a/drivers/gpu/drm/drm_irq.c
1760     +++ b/drivers/gpu/drm/drm_irq.c
1761     @@ -952,8 +952,10 @@ static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
1762     u32 vblank_count;
1763     unsigned int seq;
1764    
1765     - if (WARN_ON(pipe >= dev->num_crtcs))
1766     + if (WARN_ON(pipe >= dev->num_crtcs)) {
1767     + *vblanktime = (struct timeval) { 0 };
1768     return 0;
1769     + }
1770    
1771     do {
1772     seq = read_seqbegin(&vblank->seqlock);
1773     diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
1774     index 11d44a1e0ab3..ee07bb4a57b7 100644
1775     --- a/drivers/gpu/drm/drm_mm.c
1776     +++ b/drivers/gpu/drm/drm_mm.c
1777     @@ -839,6 +839,7 @@ void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
1778    
1779     /* Clever trick to avoid a special case in the free hole tracking. */
1780     INIT_LIST_HEAD(&mm->head_node.node_list);
1781     + mm->head_node.allocated = 0;
1782     mm->head_node.hole_follows = 1;
1783     mm->head_node.scanned_block = 0;
1784     mm->head_node.scanned_prev_free = 0;
1785     diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1786     index c9e83f39ec0a..869b29fe9ec4 100644
1787     --- a/drivers/gpu/drm/i915/intel_display.c
1788     +++ b/drivers/gpu/drm/i915/intel_display.c
1789     @@ -16749,7 +16749,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
1790    
1791     for_each_intel_crtc(dev, crtc) {
1792     struct intel_crtc_state *crtc_state = crtc->config;
1793     - int pixclk = 0;
1794    
1795     __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
1796     memset(crtc_state, 0, sizeof(*crtc_state));
1797     @@ -16761,23 +16760,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
1798     crtc->base.enabled = crtc_state->base.enable;
1799     crtc->active = crtc_state->base.active;
1800    
1801     - if (crtc_state->base.active) {
1802     + if (crtc_state->base.active)
1803     dev_priv->active_crtcs |= 1 << crtc->pipe;
1804    
1805     - if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
1806     - pixclk = ilk_pipe_pixel_rate(crtc_state);
1807     - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1808     - pixclk = crtc_state->base.adjusted_mode.crtc_clock;
1809     - else
1810     - WARN_ON(dev_priv->display.modeset_calc_cdclk);
1811     -
1812     - /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
1813     - if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
1814     - pixclk = DIV_ROUND_UP(pixclk * 100, 95);
1815     - }
1816     -
1817     - dev_priv->min_pixclk[crtc->pipe] = pixclk;
1818     -
1819     readout_plane_state(crtc);
1820    
1821     DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
1822     @@ -16851,6 +16836,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
1823     }
1824    
1825     for_each_intel_crtc(dev, crtc) {
1826     + int pixclk = 0;
1827     +
1828     crtc->base.hwmode = crtc->config->base.adjusted_mode;
1829    
1830     memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
1831     @@ -16878,10 +16865,23 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
1832     */
1833     crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
1834    
1835     + if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
1836     + pixclk = ilk_pipe_pixel_rate(crtc->config);
1837     + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1838     + pixclk = crtc->config->base.adjusted_mode.crtc_clock;
1839     + else
1840     + WARN_ON(dev_priv->display.modeset_calc_cdclk);
1841     +
1842     + /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
1843     + if (IS_BROADWELL(dev_priv) && crtc->config->ips_enabled)
1844     + pixclk = DIV_ROUND_UP(pixclk * 100, 95);
1845     +
1846     drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
1847     update_scanline_offset(crtc);
1848     }
1849    
1850     + dev_priv->min_pixclk[crtc->pipe] = pixclk;
1851     +
1852     intel_pipe_config_sanity_check(dev_priv, crtc->config);
1853     }
1854     }
1855     diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
1856     index 985cb31f4b44..e559a45ff1f7 100644
1857     --- a/drivers/gpu/drm/i915/intel_pm.c
1858     +++ b/drivers/gpu/drm/i915/intel_pm.c
1859     @@ -2955,24 +2955,10 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
1860     return 0;
1861     }
1862    
1863     -static int
1864     -intel_do_sagv_disable(struct drm_i915_private *dev_priv)
1865     -{
1866     - int ret;
1867     - uint32_t temp = GEN9_SAGV_DISABLE;
1868     -
1869     - ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
1870     - &temp);
1871     - if (ret)
1872     - return ret;
1873     - else
1874     - return temp & GEN9_SAGV_IS_DISABLED;
1875     -}
1876     -
1877     int
1878     intel_disable_sagv(struct drm_i915_private *dev_priv)
1879     {
1880     - int ret, result;
1881     + int ret;
1882    
1883     if (!intel_has_sagv(dev_priv))
1884     return 0;
1885     @@ -2984,25 +2970,23 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
1886     mutex_lock(&dev_priv->rps.hw_lock);
1887    
1888     /* bspec says to keep retrying for at least 1 ms */
1889     - ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1);
1890     + ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
1891     + GEN9_SAGV_DISABLE,
1892     + GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
1893     + 1);
1894     mutex_unlock(&dev_priv->rps.hw_lock);
1895    
1896     - if (ret == -ETIMEDOUT) {
1897     - DRM_ERROR("Request to disable SAGV timed out\n");
1898     - return -ETIMEDOUT;
1899     - }
1900     -
1901     /*
1902     * Some skl systems, pre-release machines in particular,
1903     * don't actually have an SAGV.
1904     */
1905     - if (IS_SKYLAKE(dev_priv) && result == -ENXIO) {
1906     + if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
1907     DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
1908     dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
1909     return 0;
1910     - } else if (result < 0) {
1911     - DRM_ERROR("Failed to disable the SAGV\n");
1912     - return result;
1913     + } else if (ret < 0) {
1914     + DRM_ERROR("Failed to disable the SAGV (%d)\n", ret);
1915     + return ret;
1916     }
1917    
1918     dev_priv->sagv_status = I915_SAGV_DISABLED;
1919     @@ -8015,14 +7999,14 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
1920     * worst case) _and_ PCODE was busy for some reason even after a
1921     * (queued) request and @timeout_base_ms delay. As a workaround retry
1922     * the poll with preemption disabled to maximize the number of
1923     - * requests. Increase the timeout from @timeout_base_ms to 50ms to
1924     + * requests. Increase the timeout from @timeout_base_ms to 10ms to
1925     * account for interrupts that could reduce the number of these
1926     * requests.
1927     */
1928     DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
1929     WARN_ON_ONCE(timeout_base_ms > 3);
1930     preempt_disable();
1931     - ret = wait_for_atomic(COND, 50);
1932     + ret = wait_for_atomic(COND, 10);
1933     preempt_enable();
1934    
1935     out:
1936     diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
1937     index 113db3c4a633..27cb42467b20 100644
1938     --- a/drivers/gpu/drm/panel/panel-simple.c
1939     +++ b/drivers/gpu/drm/panel/panel-simple.c
1940     @@ -120,7 +120,7 @@ static int panel_simple_get_fixed_modes(struct panel_simple *panel)
1941    
1942     mode->type |= DRM_MODE_TYPE_DRIVER;
1943    
1944     - if (panel->desc->num_modes == 1)
1945     + if (panel->desc->num_timings == 1)
1946     mode->type |= DRM_MODE_TYPE_PREFERRED;
1947    
1948     drm_mode_probed_add(connector, mode);
1949     diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
1950     index 125c7e82c3d1..877af4a5ef68 100644
1951     --- a/drivers/gpu/drm/radeon/si.c
1952     +++ b/drivers/gpu/drm/radeon/si.c
1953     @@ -50,7 +50,6 @@ MODULE_FIRMWARE("radeon/tahiti_ce.bin");
1954     MODULE_FIRMWARE("radeon/tahiti_mc.bin");
1955     MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
1956     MODULE_FIRMWARE("radeon/tahiti_smc.bin");
1957     -MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
1958    
1959     MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
1960     MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
1961     @@ -1657,9 +1656,6 @@ static int si_init_microcode(struct radeon_device *rdev)
1962     switch (rdev->family) {
1963     case CHIP_TAHITI:
1964     chip_name = "TAHITI";
1965     - /* XXX: figure out which Tahitis need the new ucode */
1966     - if (0)
1967     - new_smc = true;
1968     new_chip_name = "tahiti";
1969     pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1970     me_req_size = SI_PM4_UCODE_SIZE * 4;
1971     @@ -1671,12 +1667,9 @@ static int si_init_microcode(struct radeon_device *rdev)
1972     break;
1973     case CHIP_PITCAIRN:
1974     chip_name = "PITCAIRN";
1975     - if ((rdev->pdev->revision == 0x81) ||
1976     - (rdev->pdev->device == 0x6810) ||
1977     - (rdev->pdev->device == 0x6811) ||
1978     - (rdev->pdev->device == 0x6816) ||
1979     - (rdev->pdev->device == 0x6817) ||
1980     - (rdev->pdev->device == 0x6806))
1981     + if ((rdev->pdev->revision == 0x81) &&
1982     + ((rdev->pdev->device == 0x6810) ||
1983     + (rdev->pdev->device == 0x6811)))
1984     new_smc = true;
1985     new_chip_name = "pitcairn";
1986     pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1987     @@ -1689,15 +1682,15 @@ static int si_init_microcode(struct radeon_device *rdev)
1988     break;
1989     case CHIP_VERDE:
1990     chip_name = "VERDE";
1991     - if ((rdev->pdev->revision == 0x81) ||
1992     - (rdev->pdev->revision == 0x83) ||
1993     - (rdev->pdev->revision == 0x87) ||
1994     - (rdev->pdev->device == 0x6820) ||
1995     - (rdev->pdev->device == 0x6821) ||
1996     - (rdev->pdev->device == 0x6822) ||
1997     - (rdev->pdev->device == 0x6823) ||
1998     - (rdev->pdev->device == 0x682A) ||
1999     - (rdev->pdev->device == 0x682B))
2000     + if (((rdev->pdev->device == 0x6820) &&
2001     + ((rdev->pdev->revision == 0x81) ||
2002     + (rdev->pdev->revision == 0x83))) ||
2003     + ((rdev->pdev->device == 0x6821) &&
2004     + ((rdev->pdev->revision == 0x83) ||
2005     + (rdev->pdev->revision == 0x87))) ||
2006     + ((rdev->pdev->revision == 0x87) &&
2007     + ((rdev->pdev->device == 0x6823) ||
2008     + (rdev->pdev->device == 0x682b))))
2009     new_smc = true;
2010     new_chip_name = "verde";
2011     pfp_req_size = SI_PFP_UCODE_SIZE * 4;
2012     @@ -1710,13 +1703,13 @@ static int si_init_microcode(struct radeon_device *rdev)
2013     break;
2014     case CHIP_OLAND:
2015     chip_name = "OLAND";
2016     - if ((rdev->pdev->revision == 0xC7) ||
2017     - (rdev->pdev->revision == 0x80) ||
2018     - (rdev->pdev->revision == 0x81) ||
2019     - (rdev->pdev->revision == 0x83) ||
2020     - (rdev->pdev->revision == 0x87) ||
2021     - (rdev->pdev->device == 0x6604) ||
2022     - (rdev->pdev->device == 0x6605))
2023     + if (((rdev->pdev->revision == 0x81) &&
2024     + ((rdev->pdev->device == 0x6600) ||
2025     + (rdev->pdev->device == 0x6604) ||
2026     + (rdev->pdev->device == 0x6605) ||
2027     + (rdev->pdev->device == 0x6610))) ||
2028     + ((rdev->pdev->revision == 0x83) &&
2029     + (rdev->pdev->device == 0x6610)))
2030     new_smc = true;
2031     new_chip_name = "oland";
2032     pfp_req_size = SI_PFP_UCODE_SIZE * 4;
2033     @@ -1728,12 +1721,15 @@ static int si_init_microcode(struct radeon_device *rdev)
2034     break;
2035     case CHIP_HAINAN:
2036     chip_name = "HAINAN";
2037     - if ((rdev->pdev->revision == 0x81) ||
2038     - (rdev->pdev->revision == 0x83) ||
2039     - (rdev->pdev->revision == 0xC3) ||
2040     - (rdev->pdev->device == 0x6664) ||
2041     - (rdev->pdev->device == 0x6665) ||
2042     - (rdev->pdev->device == 0x6667))
2043     + if (((rdev->pdev->revision == 0x81) &&
2044     + (rdev->pdev->device == 0x6660)) ||
2045     + ((rdev->pdev->revision == 0x83) &&
2046     + ((rdev->pdev->device == 0x6660) ||
2047     + (rdev->pdev->device == 0x6663) ||
2048     + (rdev->pdev->device == 0x6665) ||
2049     + (rdev->pdev->device == 0x6667))) ||
2050     + ((rdev->pdev->revision == 0xc3) &&
2051     + (rdev->pdev->device == 0x6665)))
2052     new_smc = true;
2053     new_chip_name = "hainan";
2054     pfp_req_size = SI_PFP_UCODE_SIZE * 4;
2055     diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
2056     index 8b5e697f2549..13ba73fd9b68 100644
2057     --- a/drivers/gpu/drm/radeon/si_dpm.c
2058     +++ b/drivers/gpu/drm/radeon/si_dpm.c
2059     @@ -3008,19 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2060     (rdev->pdev->device == 0x6817) ||
2061     (rdev->pdev->device == 0x6806))
2062     max_mclk = 120000;
2063     - } else if (rdev->family == CHIP_VERDE) {
2064     - if ((rdev->pdev->revision == 0x81) ||
2065     - (rdev->pdev->revision == 0x83) ||
2066     - (rdev->pdev->revision == 0x87) ||
2067     - (rdev->pdev->device == 0x6820) ||
2068     - (rdev->pdev->device == 0x6821) ||
2069     - (rdev->pdev->device == 0x6822) ||
2070     - (rdev->pdev->device == 0x6823) ||
2071     - (rdev->pdev->device == 0x682A) ||
2072     - (rdev->pdev->device == 0x682B)) {
2073     - max_sclk = 75000;
2074     - max_mclk = 80000;
2075     - }
2076     } else if (rdev->family == CHIP_OLAND) {
2077     if ((rdev->pdev->revision == 0xC7) ||
2078     (rdev->pdev->revision == 0x80) ||
2079     diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
2080     index 3dc0d8ff95ec..2db89bed52e8 100644
2081     --- a/drivers/gpu/drm/savage/savage_state.c
2082     +++ b/drivers/gpu/drm/savage/savage_state.c
2083     @@ -1004,6 +1004,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
2084     kvb_addr = memdup_user(cmdbuf->vb_addr, cmdbuf->vb_size);
2085     if (IS_ERR(kvb_addr)) {
2086     ret = PTR_ERR(kvb_addr);
2087     + kvb_addr = NULL;
2088     goto done;
2089     }
2090     cmdbuf->vb_addr = kvb_addr;
2091     diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
2092     index 059f409556d5..2fde44c3a1b3 100644
2093     --- a/drivers/gpu/drm/tegra/dpaux.c
2094     +++ b/drivers/gpu/drm/tegra/dpaux.c
2095     @@ -539,9 +539,9 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
2096     dpaux->desc.owner = THIS_MODULE;
2097    
2098     dpaux->pinctrl = devm_pinctrl_register(&pdev->dev, &dpaux->desc, dpaux);
2099     - if (!dpaux->pinctrl) {
2100     + if (IS_ERR(dpaux->pinctrl)) {
2101     dev_err(&pdev->dev, "failed to register pincontrol\n");
2102     - return -ENODEV;
2103     + return PTR_ERR(dpaux->pinctrl);
2104     }
2105     #endif
2106     /* enable and clear all interrupts */
2107     diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
2108     index 47a095f392f8..303f23c96220 100644
2109     --- a/drivers/gpu/drm/vc4/vc4_gem.c
2110     +++ b/drivers/gpu/drm/vc4/vc4_gem.c
2111     @@ -544,14 +544,15 @@ vc4_cl_lookup_bos(struct drm_device *dev,
2112    
2113     handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t));
2114     if (!handles) {
2115     + ret = -ENOMEM;
2116     DRM_ERROR("Failed to allocate incoming GEM handles\n");
2117     goto fail;
2118     }
2119    
2120     - ret = copy_from_user(handles,
2121     - (void __user *)(uintptr_t)args->bo_handles,
2122     - exec->bo_count * sizeof(uint32_t));
2123     - if (ret) {
2124     + if (copy_from_user(handles,
2125     + (void __user *)(uintptr_t)args->bo_handles,
2126     + exec->bo_count * sizeof(uint32_t))) {
2127     + ret = -EFAULT;
2128     DRM_ERROR("Failed to copy in GEM handles\n");
2129     goto fail;
2130     }
2131     diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
2132     index c2268cdf38e8..e34d82e79b98 100644
2133     --- a/drivers/i2c/busses/i2c-piix4.c
2134     +++ b/drivers/i2c/busses/i2c-piix4.c
2135     @@ -585,10 +585,29 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
2136     u8 command, int size, union i2c_smbus_data *data)
2137     {
2138     struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap);
2139     + unsigned short piix4_smba = adapdata->smba;
2140     + int retries = MAX_TIMEOUT;
2141     + int smbslvcnt;
2142     u8 smba_en_lo;
2143     u8 port;
2144     int retval;
2145    
2146     + /* Request the SMBUS semaphore, avoid conflicts with the IMC */
2147     + smbslvcnt = inb_p(SMBSLVCNT);
2148     + do {
2149     + outb_p(smbslvcnt | 0x10, SMBSLVCNT);
2150     +
2151     + /* Check the semaphore status */
2152     + smbslvcnt = inb_p(SMBSLVCNT);
2153     + if (smbslvcnt & 0x10)
2154     + break;
2155     +
2156     + usleep_range(1000, 2000);
2157     + } while (--retries);
2158     + /* SMBus is still owned by the IMC, we give up */
2159     + if (!retries)
2160     + return -EBUSY;
2161     +
2162     mutex_lock(&piix4_mutex_sb800);
2163    
2164     outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
2165     @@ -606,6 +625,9 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
2166    
2167     mutex_unlock(&piix4_mutex_sb800);
2168    
2169     + /* Release the semaphore */
2170     + outb_p(smbslvcnt | 0x20, SMBSLVCNT);
2171     +
2172     return retval;
2173     }
2174    
2175     diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
2176     index b432b64e307a..7484aac1e14d 100644
2177     --- a/drivers/i2c/i2c-core.c
2178     +++ b/drivers/i2c/i2c-core.c
2179     @@ -1657,7 +1657,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
2180    
2181     if (i2c_check_addr_validity(addr, info.flags)) {
2182     dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n",
2183     - info.addr, node->full_name);
2184     + addr, node->full_name);
2185     return ERR_PTR(-EINVAL);
2186     }
2187    
2188     diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
2189     index 66f323fd3982..6f638bbc922d 100644
2190     --- a/drivers/i2c/i2c-dev.c
2191     +++ b/drivers/i2c/i2c-dev.c
2192     @@ -331,7 +331,7 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
2193     unsigned long arg)
2194     {
2195     struct i2c_smbus_ioctl_data data_arg;
2196     - union i2c_smbus_data temp;
2197     + union i2c_smbus_data temp = {};
2198     int datasize, res;
2199    
2200     if (copy_from_user(&data_arg,
2201     diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
2202     index 8bc3d36d2837..9c4ac26c014e 100644
2203     --- a/drivers/i2c/muxes/i2c-mux-pca954x.c
2204     +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
2205     @@ -151,6 +151,9 @@ static int pca954x_reg_write(struct i2c_adapter *adap,
2206     buf[0] = val;
2207     msg.buf = buf;
2208     ret = __i2c_transfer(adap, &msg, 1);
2209     +
2210     + if (ret >= 0 && ret != 1)
2211     + ret = -EREMOTEIO;
2212     } else {
2213     union i2c_smbus_data data;
2214     ret = adap->algo->smbus_xfer(adap, client->addr,
2215     @@ -179,7 +182,7 @@ static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
2216     /* Only select the channel if its different from the last channel */
2217     if (data->last_chan != regval) {
2218     ret = pca954x_reg_write(muxc->parent, client, regval);
2219     - data->last_chan = ret ? 0 : regval;
2220     + data->last_chan = ret < 0 ? 0 : regval;
2221     }
2222    
2223     return ret;
2224     diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
2225     index 93e3d270a98a..b99dc9e0ffb2 100644
2226     --- a/drivers/infiniband/hw/cxgb4/device.c
2227     +++ b/drivers/infiniband/hw/cxgb4/device.c
2228     @@ -828,8 +828,10 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
2229     }
2230     rdev->status_page = (struct t4_dev_status_page *)
2231     __get_free_page(GFP_KERNEL);
2232     - if (!rdev->status_page)
2233     + if (!rdev->status_page) {
2234     + err = -ENOMEM;
2235     goto destroy_ocqp_pool;
2236     + }
2237     rdev->status_page->qp_start = rdev->lldi.vr->qp.start;
2238     rdev->status_page->qp_size = rdev->lldi.vr->qp.size;
2239     rdev->status_page->cq_start = rdev->lldi.vr->cq.start;
2240     diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
2241     index 83af17ad0f1f..bbe15243b8e7 100644
2242     --- a/drivers/input/joystick/xpad.c
2243     +++ b/drivers/input/joystick/xpad.c
2244     @@ -1376,6 +1376,12 @@ static int xpad_init_input(struct usb_xpad *xpad)
2245     input_dev->name = xpad->name;
2246     input_dev->phys = xpad->phys;
2247     usb_to_input_id(xpad->udev, &input_dev->id);
2248     +
2249     + if (xpad->xtype == XTYPE_XBOX360W) {
2250     + /* x360w controllers and the receiver have different ids */
2251     + input_dev->id.product = 0x02a1;
2252     + }
2253     +
2254     input_dev->dev.parent = &xpad->intf->dev;
2255    
2256     input_set_drvdata(input_dev, xpad);
2257     diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
2258     index 073246c7d163..0cdd95801a25 100644
2259     --- a/drivers/input/serio/i8042-x86ia64io.h
2260     +++ b/drivers/input/serio/i8042-x86ia64io.h
2261     @@ -211,6 +211,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
2262     DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
2263     },
2264     },
2265     + {
2266     + .matches = {
2267     + DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
2268     + DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
2269     + },
2270     + },
2271     { }
2272     };
2273    
2274     diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
2275     index 02aec284deca..3e6003d32e56 100644
2276     --- a/drivers/input/touchscreen/elants_i2c.c
2277     +++ b/drivers/input/touchscreen/elants_i2c.c
2278     @@ -914,9 +914,9 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
2279    
2280     case QUEUE_HEADER_NORMAL:
2281     report_count = ts->buf[FW_HDR_COUNT];
2282     - if (report_count > 3) {
2283     + if (report_count == 0 || report_count > 3) {
2284     dev_err(&client->dev,
2285     - "too large report count: %*ph\n",
2286     + "bad report count: %*ph\n",
2287     HEADER_SIZE, ts->buf);
2288     break;
2289     }
2290     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
2291     index 92bd13ddc39d..0c9ef8729ca7 100644
2292     --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
2293     +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
2294     @@ -1158,7 +1158,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
2295     {
2296     int err = 0;
2297    
2298     - mlx5_drain_health_wq(dev);
2299     + if (cleanup)
2300     + mlx5_drain_health_wq(dev);
2301    
2302     mutex_lock(&dev->intf_state_mutex);
2303     if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
2304     @@ -1320,9 +1321,10 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
2305    
2306     mlx5_enter_error_state(dev);
2307     mlx5_unload_one(dev, priv, false);
2308     - /* In case of kernel call save the pci state */
2309     + /* In case of kernel call save the pci state and drain the health wq */
2310     if (state) {
2311     pci_save_state(pdev);
2312     + mlx5_drain_health_wq(dev);
2313     mlx5_pci_disable_device(dev);
2314     }
2315    
2316     diff --git a/drivers/net/wireless/intersil/orinoco/mic.c b/drivers/net/wireless/intersil/orinoco/mic.c
2317     index bc7397d709d3..08bc7822f820 100644
2318     --- a/drivers/net/wireless/intersil/orinoco/mic.c
2319     +++ b/drivers/net/wireless/intersil/orinoco/mic.c
2320     @@ -16,7 +16,7 @@
2321     /********************************************************************/
2322     int orinoco_mic_init(struct orinoco_private *priv)
2323     {
2324     - priv->tx_tfm_mic = crypto_alloc_ahash("michael_mic", 0,
2325     + priv->tx_tfm_mic = crypto_alloc_shash("michael_mic", 0,
2326     CRYPTO_ALG_ASYNC);
2327     if (IS_ERR(priv->tx_tfm_mic)) {
2328     printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
2329     @@ -25,7 +25,7 @@ int orinoco_mic_init(struct orinoco_private *priv)
2330     return -ENOMEM;
2331     }
2332    
2333     - priv->rx_tfm_mic = crypto_alloc_ahash("michael_mic", 0,
2334     + priv->rx_tfm_mic = crypto_alloc_shash("michael_mic", 0,
2335     CRYPTO_ALG_ASYNC);
2336     if (IS_ERR(priv->rx_tfm_mic)) {
2337     printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
2338     @@ -40,17 +40,16 @@ int orinoco_mic_init(struct orinoco_private *priv)
2339     void orinoco_mic_free(struct orinoco_private *priv)
2340     {
2341     if (priv->tx_tfm_mic)
2342     - crypto_free_ahash(priv->tx_tfm_mic);
2343     + crypto_free_shash(priv->tx_tfm_mic);
2344     if (priv->rx_tfm_mic)
2345     - crypto_free_ahash(priv->rx_tfm_mic);
2346     + crypto_free_shash(priv->rx_tfm_mic);
2347     }
2348    
2349     -int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
2350     +int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
2351     u8 *da, u8 *sa, u8 priority,
2352     u8 *data, size_t data_len, u8 *mic)
2353     {
2354     - AHASH_REQUEST_ON_STACK(req, tfm_michael);
2355     - struct scatterlist sg[2];
2356     + SHASH_DESC_ON_STACK(desc, tfm_michael);
2357     u8 hdr[ETH_HLEN + 2]; /* size of header + padding */
2358     int err;
2359    
2360     @@ -67,18 +66,27 @@ int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
2361     hdr[ETH_ALEN * 2 + 2] = 0;
2362     hdr[ETH_ALEN * 2 + 3] = 0;
2363    
2364     - /* Use scatter gather to MIC header and data in one go */
2365     - sg_init_table(sg, 2);
2366     - sg_set_buf(&sg[0], hdr, sizeof(hdr));
2367     - sg_set_buf(&sg[1], data, data_len);
2368     + desc->tfm = tfm_michael;
2369     + desc->flags = 0;
2370    
2371     - if (crypto_ahash_setkey(tfm_michael, key, MIC_KEYLEN))
2372     - return -1;
2373     + err = crypto_shash_setkey(tfm_michael, key, MIC_KEYLEN);
2374     + if (err)
2375     + return err;
2376     +
2377     + err = crypto_shash_init(desc);
2378     + if (err)
2379     + return err;
2380     +
2381     + err = crypto_shash_update(desc, hdr, sizeof(hdr));
2382     + if (err)
2383     + return err;
2384     +
2385     + err = crypto_shash_update(desc, data, data_len);
2386     + if (err)
2387     + return err;
2388     +
2389     + err = crypto_shash_final(desc, mic);
2390     + shash_desc_zero(desc);
2391    
2392     - ahash_request_set_tfm(req, tfm_michael);
2393     - ahash_request_set_callback(req, 0, NULL, NULL);
2394     - ahash_request_set_crypt(req, sg, mic, data_len + sizeof(hdr));
2395     - err = crypto_ahash_digest(req);
2396     - ahash_request_zero(req);
2397     return err;
2398     }
2399     diff --git a/drivers/net/wireless/intersil/orinoco/mic.h b/drivers/net/wireless/intersil/orinoco/mic.h
2400     index ce731d05cc98..e8724e889219 100644
2401     --- a/drivers/net/wireless/intersil/orinoco/mic.h
2402     +++ b/drivers/net/wireless/intersil/orinoco/mic.h
2403     @@ -6,6 +6,7 @@
2404     #define _ORINOCO_MIC_H_
2405    
2406     #include <linux/types.h>
2407     +#include <crypto/hash.h>
2408    
2409     #define MICHAEL_MIC_LEN 8
2410    
2411     @@ -15,7 +16,7 @@ struct crypto_ahash;
2412    
2413     int orinoco_mic_init(struct orinoco_private *priv);
2414     void orinoco_mic_free(struct orinoco_private *priv);
2415     -int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
2416     +int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
2417     u8 *da, u8 *sa, u8 priority,
2418     u8 *data, size_t data_len, u8 *mic);
2419    
2420     diff --git a/drivers/net/wireless/intersil/orinoco/orinoco.h b/drivers/net/wireless/intersil/orinoco/orinoco.h
2421     index 2f0c84b1c440..5fa1c3e3713f 100644
2422     --- a/drivers/net/wireless/intersil/orinoco/orinoco.h
2423     +++ b/drivers/net/wireless/intersil/orinoco/orinoco.h
2424     @@ -152,8 +152,8 @@ struct orinoco_private {
2425     u8 *wpa_ie;
2426     int wpa_ie_len;
2427    
2428     - struct crypto_ahash *rx_tfm_mic;
2429     - struct crypto_ahash *tx_tfm_mic;
2430     + struct crypto_shash *rx_tfm_mic;
2431     + struct crypto_shash *tx_tfm_mic;
2432    
2433     unsigned int wpa_enabled:1;
2434     unsigned int tkip_cm_active:1;
2435     diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
2436     index 79e679d12f3b..da10b484bd25 100644
2437     --- a/drivers/nvme/host/core.c
2438     +++ b/drivers/nvme/host/core.c
2439     @@ -1122,12 +1122,7 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
2440     if (ret)
2441     return ret;
2442    
2443     - /* Checking for ctrl->tagset is a trick to avoid sleeping on module
2444     - * load, since we only need the quirk on reset_controller. Notice
2445     - * that the HGST device needs this delay only in firmware activation
2446     - * procedure; unfortunately we have no (easy) way to verify this.
2447     - */
2448     - if ((ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) && ctrl->tagset)
2449     + if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
2450     msleep(NVME_QUIRK_DELAY_AMOUNT);
2451    
2452     return nvme_wait_ready(ctrl, cap, false);
2453     diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
2454     index 79c4e14a5a75..5ef7e875b50e 100644
2455     --- a/drivers/pinctrl/freescale/pinctrl-imx.c
2456     +++ b/drivers/pinctrl/freescale/pinctrl-imx.c
2457     @@ -778,10 +778,10 @@ int imx_pinctrl_probe(struct platform_device *pdev,
2458     imx_pinctrl_desc->name = dev_name(&pdev->dev);
2459     imx_pinctrl_desc->pins = info->pins;
2460     imx_pinctrl_desc->npins = info->npins;
2461     - imx_pinctrl_desc->pctlops = &imx_pctrl_ops,
2462     - imx_pinctrl_desc->pmxops = &imx_pmx_ops,
2463     - imx_pinctrl_desc->confops = &imx_pinconf_ops,
2464     - imx_pinctrl_desc->owner = THIS_MODULE,
2465     + imx_pinctrl_desc->pctlops = &imx_pctrl_ops;
2466     + imx_pinctrl_desc->pmxops = &imx_pmx_ops;
2467     + imx_pinctrl_desc->confops = &imx_pinconf_ops;
2468     + imx_pinctrl_desc->owner = THIS_MODULE;
2469    
2470     ret = imx_pinctrl_probe_dt(pdev, info);
2471     if (ret) {
2472     diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
2473     index 57122eda155a..9443c9d408c6 100644
2474     --- a/drivers/pinctrl/meson/pinctrl-meson.c
2475     +++ b/drivers/pinctrl/meson/pinctrl-meson.c
2476     @@ -212,7 +212,7 @@ static int meson_pmx_request_gpio(struct pinctrl_dev *pcdev,
2477     {
2478     struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
2479    
2480     - meson_pmx_disable_other_groups(pc, range->pin_base + offset, -1);
2481     + meson_pmx_disable_other_groups(pc, offset, -1);
2482    
2483     return 0;
2484     }
2485     diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
2486     index f3a8897d4e8f..cf80ce1dd7ce 100644
2487     --- a/drivers/pinctrl/sh-pfc/core.c
2488     +++ b/drivers/pinctrl/sh-pfc/core.c
2489     @@ -389,6 +389,21 @@ int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type)
2490     return 0;
2491     }
2492    
2493     +const struct sh_pfc_bias_info *
2494     +sh_pfc_pin_to_bias_info(const struct sh_pfc_bias_info *info,
2495     + unsigned int num, unsigned int pin)
2496     +{
2497     + unsigned int i;
2498     +
2499     + for (i = 0; i < num; i++)
2500     + if (info[i].pin == pin)
2501     + return &info[i];
2502     +
2503     + WARN_ONCE(1, "Pin %u is not in bias info list\n", pin);
2504     +
2505     + return NULL;
2506     +}
2507     +
2508     static int sh_pfc_init_ranges(struct sh_pfc *pfc)
2509     {
2510     struct sh_pfc_pin_range *range;
2511     diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h
2512     index 0bbdea5849f4..6d598dd63720 100644
2513     --- a/drivers/pinctrl/sh-pfc/core.h
2514     +++ b/drivers/pinctrl/sh-pfc/core.h
2515     @@ -33,4 +33,8 @@ void sh_pfc_write_reg(struct sh_pfc *pfc, u32 reg, unsigned int width,
2516     int sh_pfc_get_pin_index(struct sh_pfc *pfc, unsigned int pin);
2517     int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type);
2518    
2519     +const struct sh_pfc_bias_info *
2520     +sh_pfc_pin_to_bias_info(const struct sh_pfc_bias_info *info,
2521     + unsigned int num, unsigned int pin);
2522     +
2523     #endif /* __SH_PFC_CORE_H__ */
2524     diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
2525     index 2e8cc2adbed7..84cee66b1e08 100644
2526     --- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
2527     +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
2528     @@ -5188,184 +5188,183 @@ static int r8a7795_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *poc
2529     #define PU5 0x14
2530     #define PU6 0x18
2531    
2532     -static const struct {
2533     - u16 reg : 11;
2534     - u16 bit : 5;
2535     -} pullups[] = {
2536     - [RCAR_GP_PIN(2, 11)] = { PU0, 31 }, /* AVB_PHY_INT */
2537     - [RCAR_GP_PIN(2, 10)] = { PU0, 30 }, /* AVB_MAGIC */
2538     - [RCAR_GP_PIN(2, 9)] = { PU0, 29 }, /* AVB_MDC */
2539     -
2540     - [RCAR_GP_PIN(1, 19)] = { PU1, 31 }, /* A19 */
2541     - [RCAR_GP_PIN(1, 18)] = { PU1, 30 }, /* A18 */
2542     - [RCAR_GP_PIN(1, 17)] = { PU1, 29 }, /* A17 */
2543     - [RCAR_GP_PIN(1, 16)] = { PU1, 28 }, /* A16 */
2544     - [RCAR_GP_PIN(1, 15)] = { PU1, 27 }, /* A15 */
2545     - [RCAR_GP_PIN(1, 14)] = { PU1, 26 }, /* A14 */
2546     - [RCAR_GP_PIN(1, 13)] = { PU1, 25 }, /* A13 */
2547     - [RCAR_GP_PIN(1, 12)] = { PU1, 24 }, /* A12 */
2548     - [RCAR_GP_PIN(1, 11)] = { PU1, 23 }, /* A11 */
2549     - [RCAR_GP_PIN(1, 10)] = { PU1, 22 }, /* A10 */
2550     - [RCAR_GP_PIN(1, 9)] = { PU1, 21 }, /* A9 */
2551     - [RCAR_GP_PIN(1, 8)] = { PU1, 20 }, /* A8 */
2552     - [RCAR_GP_PIN(1, 7)] = { PU1, 19 }, /* A7 */
2553     - [RCAR_GP_PIN(1, 6)] = { PU1, 18 }, /* A6 */
2554     - [RCAR_GP_PIN(1, 5)] = { PU1, 17 }, /* A5 */
2555     - [RCAR_GP_PIN(1, 4)] = { PU1, 16 }, /* A4 */
2556     - [RCAR_GP_PIN(1, 3)] = { PU1, 15 }, /* A3 */
2557     - [RCAR_GP_PIN(1, 2)] = { PU1, 14 }, /* A2 */
2558     - [RCAR_GP_PIN(1, 1)] = { PU1, 13 }, /* A1 */
2559     - [RCAR_GP_PIN(1, 0)] = { PU1, 12 }, /* A0 */
2560     - [RCAR_GP_PIN(2, 8)] = { PU1, 11 }, /* PWM2_A */
2561     - [RCAR_GP_PIN(2, 7)] = { PU1, 10 }, /* PWM1_A */
2562     - [RCAR_GP_PIN(2, 6)] = { PU1, 9 }, /* PWM0 */
2563     - [RCAR_GP_PIN(2, 5)] = { PU1, 8 }, /* IRQ5 */
2564     - [RCAR_GP_PIN(2, 4)] = { PU1, 7 }, /* IRQ4 */
2565     - [RCAR_GP_PIN(2, 3)] = { PU1, 6 }, /* IRQ3 */
2566     - [RCAR_GP_PIN(2, 2)] = { PU1, 5 }, /* IRQ2 */
2567     - [RCAR_GP_PIN(2, 1)] = { PU1, 4 }, /* IRQ1 */
2568     - [RCAR_GP_PIN(2, 0)] = { PU1, 3 }, /* IRQ0 */
2569     - [RCAR_GP_PIN(2, 14)] = { PU1, 2 }, /* AVB_AVTP_CAPTURE_A */
2570     - [RCAR_GP_PIN(2, 13)] = { PU1, 1 }, /* AVB_AVTP_MATCH_A */
2571     - [RCAR_GP_PIN(2, 12)] = { PU1, 0 }, /* AVB_LINK */
2572     -
2573     - [RCAR_GP_PIN(7, 3)] = { PU2, 29 }, /* HDMI1_CEC */
2574     - [RCAR_GP_PIN(7, 2)] = { PU2, 28 }, /* HDMI0_CEC */
2575     - [RCAR_GP_PIN(7, 1)] = { PU2, 27 }, /* AVS2 */
2576     - [RCAR_GP_PIN(7, 0)] = { PU2, 26 }, /* AVS1 */
2577     - [RCAR_GP_PIN(0, 15)] = { PU2, 25 }, /* D15 */
2578     - [RCAR_GP_PIN(0, 14)] = { PU2, 24 }, /* D14 */
2579     - [RCAR_GP_PIN(0, 13)] = { PU2, 23 }, /* D13 */
2580     - [RCAR_GP_PIN(0, 12)] = { PU2, 22 }, /* D12 */
2581     - [RCAR_GP_PIN(0, 11)] = { PU2, 21 }, /* D11 */
2582     - [RCAR_GP_PIN(0, 10)] = { PU2, 20 }, /* D10 */
2583     - [RCAR_GP_PIN(0, 9)] = { PU2, 19 }, /* D9 */
2584     - [RCAR_GP_PIN(0, 8)] = { PU2, 18 }, /* D8 */
2585     - [RCAR_GP_PIN(0, 7)] = { PU2, 17 }, /* D7 */
2586     - [RCAR_GP_PIN(0, 6)] = { PU2, 16 }, /* D6 */
2587     - [RCAR_GP_PIN(0, 5)] = { PU2, 15 }, /* D5 */
2588     - [RCAR_GP_PIN(0, 4)] = { PU2, 14 }, /* D4 */
2589     - [RCAR_GP_PIN(0, 3)] = { PU2, 13 }, /* D3 */
2590     - [RCAR_GP_PIN(0, 2)] = { PU2, 12 }, /* D2 */
2591     - [RCAR_GP_PIN(0, 1)] = { PU2, 11 }, /* D1 */
2592     - [RCAR_GP_PIN(0, 0)] = { PU2, 10 }, /* D0 */
2593     - [RCAR_GP_PIN(1, 27)] = { PU2, 8 }, /* EX_WAIT0_A */
2594     - [RCAR_GP_PIN(1, 26)] = { PU2, 7 }, /* WE1_N */
2595     - [RCAR_GP_PIN(1, 25)] = { PU2, 6 }, /* WE0_N */
2596     - [RCAR_GP_PIN(1, 24)] = { PU2, 5 }, /* RD_WR_N */
2597     - [RCAR_GP_PIN(1, 23)] = { PU2, 4 }, /* RD_N */
2598     - [RCAR_GP_PIN(1, 22)] = { PU2, 3 }, /* BS_N */
2599     - [RCAR_GP_PIN(1, 21)] = { PU2, 2 }, /* CS1_N_A26 */
2600     - [RCAR_GP_PIN(1, 20)] = { PU2, 1 }, /* CS0_N */
2601     -
2602     - [RCAR_GP_PIN(4, 9)] = { PU3, 31 }, /* SD3_DAT0 */
2603     - [RCAR_GP_PIN(4, 8)] = { PU3, 30 }, /* SD3_CMD */
2604     - [RCAR_GP_PIN(4, 7)] = { PU3, 29 }, /* SD3_CLK */
2605     - [RCAR_GP_PIN(4, 6)] = { PU3, 28 }, /* SD2_DS */
2606     - [RCAR_GP_PIN(4, 5)] = { PU3, 27 }, /* SD2_DAT3 */
2607     - [RCAR_GP_PIN(4, 4)] = { PU3, 26 }, /* SD2_DAT2 */
2608     - [RCAR_GP_PIN(4, 3)] = { PU3, 25 }, /* SD2_DAT1 */
2609     - [RCAR_GP_PIN(4, 2)] = { PU3, 24 }, /* SD2_DAT0 */
2610     - [RCAR_GP_PIN(4, 1)] = { PU3, 23 }, /* SD2_CMD */
2611     - [RCAR_GP_PIN(4, 0)] = { PU3, 22 }, /* SD2_CLK */
2612     - [RCAR_GP_PIN(3, 11)] = { PU3, 21 }, /* SD1_DAT3 */
2613     - [RCAR_GP_PIN(3, 10)] = { PU3, 20 }, /* SD1_DAT2 */
2614     - [RCAR_GP_PIN(3, 9)] = { PU3, 19 }, /* SD1_DAT1 */
2615     - [RCAR_GP_PIN(3, 8)] = { PU3, 18 }, /* SD1_DAT0 */
2616     - [RCAR_GP_PIN(3, 7)] = { PU3, 17 }, /* SD1_CMD */
2617     - [RCAR_GP_PIN(3, 6)] = { PU3, 16 }, /* SD1_CLK */
2618     - [RCAR_GP_PIN(3, 5)] = { PU3, 15 }, /* SD0_DAT3 */
2619     - [RCAR_GP_PIN(3, 4)] = { PU3, 14 }, /* SD0_DAT2 */
2620     - [RCAR_GP_PIN(3, 3)] = { PU3, 13 }, /* SD0_DAT1 */
2621     - [RCAR_GP_PIN(3, 2)] = { PU3, 12 }, /* SD0_DAT0 */
2622     - [RCAR_GP_PIN(3, 1)] = { PU3, 11 }, /* SD0_CMD */
2623     - [RCAR_GP_PIN(3, 0)] = { PU3, 10 }, /* SD0_CLK */
2624     -
2625     - [RCAR_GP_PIN(5, 19)] = { PU4, 31 }, /* MSIOF0_SS1 */
2626     - [RCAR_GP_PIN(5, 18)] = { PU4, 30 }, /* MSIOF0_SYNC */
2627     - [RCAR_GP_PIN(5, 17)] = { PU4, 29 }, /* MSIOF0_SCK */
2628     - [RCAR_GP_PIN(5, 16)] = { PU4, 28 }, /* HRTS0_N */
2629     - [RCAR_GP_PIN(5, 15)] = { PU4, 27 }, /* HCTS0_N */
2630     - [RCAR_GP_PIN(5, 14)] = { PU4, 26 }, /* HTX0 */
2631     - [RCAR_GP_PIN(5, 13)] = { PU4, 25 }, /* HRX0 */
2632     - [RCAR_GP_PIN(5, 12)] = { PU4, 24 }, /* HSCK0 */
2633     - [RCAR_GP_PIN(5, 11)] = { PU4, 23 }, /* RX2_A */
2634     - [RCAR_GP_PIN(5, 10)] = { PU4, 22 }, /* TX2_A */
2635     - [RCAR_GP_PIN(5, 9)] = { PU4, 21 }, /* SCK2 */
2636     - [RCAR_GP_PIN(5, 8)] = { PU4, 20 }, /* RTS1_N_TANS */
2637     - [RCAR_GP_PIN(5, 7)] = { PU4, 19 }, /* CTS1_N */
2638     - [RCAR_GP_PIN(5, 6)] = { PU4, 18 }, /* TX1_A */
2639     - [RCAR_GP_PIN(5, 5)] = { PU4, 17 }, /* RX1_A */
2640     - [RCAR_GP_PIN(5, 4)] = { PU4, 16 }, /* RTS0_N_TANS */
2641     - [RCAR_GP_PIN(5, 3)] = { PU4, 15 }, /* CTS0_N */
2642     - [RCAR_GP_PIN(5, 2)] = { PU4, 14 }, /* TX0 */
2643     - [RCAR_GP_PIN(5, 1)] = { PU4, 13 }, /* RX0 */
2644     - [RCAR_GP_PIN(5, 0)] = { PU4, 12 }, /* SCK0 */
2645     - [RCAR_GP_PIN(3, 15)] = { PU4, 11 }, /* SD1_WP */
2646     - [RCAR_GP_PIN(3, 14)] = { PU4, 10 }, /* SD1_CD */
2647     - [RCAR_GP_PIN(3, 13)] = { PU4, 9 }, /* SD0_WP */
2648     - [RCAR_GP_PIN(3, 12)] = { PU4, 8 }, /* SD0_CD */
2649     - [RCAR_GP_PIN(4, 17)] = { PU4, 7 }, /* SD3_DS */
2650     - [RCAR_GP_PIN(4, 16)] = { PU4, 6 }, /* SD3_DAT7 */
2651     - [RCAR_GP_PIN(4, 15)] = { PU4, 5 }, /* SD3_DAT6 */
2652     - [RCAR_GP_PIN(4, 14)] = { PU4, 4 }, /* SD3_DAT5 */
2653     - [RCAR_GP_PIN(4, 13)] = { PU4, 3 }, /* SD3_DAT4 */
2654     - [RCAR_GP_PIN(4, 12)] = { PU4, 2 }, /* SD3_DAT3 */
2655     - [RCAR_GP_PIN(4, 11)] = { PU4, 1 }, /* SD3_DAT2 */
2656     - [RCAR_GP_PIN(4, 10)] = { PU4, 0 }, /* SD3_DAT1 */
2657     -
2658     - [RCAR_GP_PIN(6, 24)] = { PU5, 31 }, /* USB0_PWEN */
2659     - [RCAR_GP_PIN(6, 23)] = { PU5, 30 }, /* AUDIO_CLKB_B */
2660     - [RCAR_GP_PIN(6, 22)] = { PU5, 29 }, /* AUDIO_CLKA_A */
2661     - [RCAR_GP_PIN(6, 21)] = { PU5, 28 }, /* SSI_SDATA9_A */
2662     - [RCAR_GP_PIN(6, 20)] = { PU5, 27 }, /* SSI_SDATA8 */
2663     - [RCAR_GP_PIN(6, 19)] = { PU5, 26 }, /* SSI_SDATA7 */
2664     - [RCAR_GP_PIN(6, 18)] = { PU5, 25 }, /* SSI_WS78 */
2665     - [RCAR_GP_PIN(6, 17)] = { PU5, 24 }, /* SSI_SCK78 */
2666     - [RCAR_GP_PIN(6, 16)] = { PU5, 23 }, /* SSI_SDATA6 */
2667     - [RCAR_GP_PIN(6, 15)] = { PU5, 22 }, /* SSI_WS6 */
2668     - [RCAR_GP_PIN(6, 14)] = { PU5, 21 }, /* SSI_SCK6 */
2669     - [RCAR_GP_PIN(6, 13)] = { PU5, 20 }, /* SSI_SDATA5 */
2670     - [RCAR_GP_PIN(6, 12)] = { PU5, 19 }, /* SSI_WS5 */
2671     - [RCAR_GP_PIN(6, 11)] = { PU5, 18 }, /* SSI_SCK5 */
2672     - [RCAR_GP_PIN(6, 10)] = { PU5, 17 }, /* SSI_SDATA4 */
2673     - [RCAR_GP_PIN(6, 9)] = { PU5, 16 }, /* SSI_WS4 */
2674     - [RCAR_GP_PIN(6, 8)] = { PU5, 15 }, /* SSI_SCK4 */
2675     - [RCAR_GP_PIN(6, 7)] = { PU5, 14 }, /* SSI_SDATA3 */
2676     - [RCAR_GP_PIN(6, 6)] = { PU5, 13 }, /* SSI_WS34 */
2677     - [RCAR_GP_PIN(6, 5)] = { PU5, 12 }, /* SSI_SCK34 */
2678     - [RCAR_GP_PIN(6, 4)] = { PU5, 11 }, /* SSI_SDATA2_A */
2679     - [RCAR_GP_PIN(6, 3)] = { PU5, 10 }, /* SSI_SDATA1_A */
2680     - [RCAR_GP_PIN(6, 2)] = { PU5, 9 }, /* SSI_SDATA0 */
2681     - [RCAR_GP_PIN(6, 1)] = { PU5, 8 }, /* SSI_WS01239 */
2682     - [RCAR_GP_PIN(6, 0)] = { PU5, 7 }, /* SSI_SCK01239 */
2683     - [RCAR_GP_PIN(5, 25)] = { PU5, 5 }, /* MLB_DAT */
2684     - [RCAR_GP_PIN(5, 24)] = { PU5, 4 }, /* MLB_SIG */
2685     - [RCAR_GP_PIN(5, 23)] = { PU5, 3 }, /* MLB_CLK */
2686     - [RCAR_GP_PIN(5, 22)] = { PU5, 2 }, /* MSIOF0_RXD */
2687     - [RCAR_GP_PIN(5, 21)] = { PU5, 1 }, /* MSIOF0_SS2 */
2688     - [RCAR_GP_PIN(5, 20)] = { PU5, 0 }, /* MSIOF0_TXD */
2689     -
2690     - [RCAR_GP_PIN(6, 31)] = { PU6, 6 }, /* USB31_OVC */
2691     - [RCAR_GP_PIN(6, 30)] = { PU6, 5 }, /* USB31_PWEN */
2692     - [RCAR_GP_PIN(6, 29)] = { PU6, 4 }, /* USB30_OVC */
2693     - [RCAR_GP_PIN(6, 28)] = { PU6, 3 }, /* USB30_PWEN */
2694     - [RCAR_GP_PIN(6, 27)] = { PU6, 2 }, /* USB1_OVC */
2695     - [RCAR_GP_PIN(6, 26)] = { PU6, 1 }, /* USB1_PWEN */
2696     - [RCAR_GP_PIN(6, 25)] = { PU6, 0 }, /* USB0_OVC */
2697     +static const struct sh_pfc_bias_info bias_info[] = {
2698     + { RCAR_GP_PIN(2, 11), PU0, 31 }, /* AVB_PHY_INT */
2699     + { RCAR_GP_PIN(2, 10), PU0, 30 }, /* AVB_MAGIC */
2700     + { RCAR_GP_PIN(2, 9), PU0, 29 }, /* AVB_MDC */
2701     +
2702     + { RCAR_GP_PIN(1, 19), PU1, 31 }, /* A19 */
2703     + { RCAR_GP_PIN(1, 18), PU1, 30 }, /* A18 */
2704     + { RCAR_GP_PIN(1, 17), PU1, 29 }, /* A17 */
2705     + { RCAR_GP_PIN(1, 16), PU1, 28 }, /* A16 */
2706     + { RCAR_GP_PIN(1, 15), PU1, 27 }, /* A15 */
2707     + { RCAR_GP_PIN(1, 14), PU1, 26 }, /* A14 */
2708     + { RCAR_GP_PIN(1, 13), PU1, 25 }, /* A13 */
2709     + { RCAR_GP_PIN(1, 12), PU1, 24 }, /* A12 */
2710     + { RCAR_GP_PIN(1, 11), PU1, 23 }, /* A11 */
2711     + { RCAR_GP_PIN(1, 10), PU1, 22 }, /* A10 */
2712     + { RCAR_GP_PIN(1, 9), PU1, 21 }, /* A9 */
2713     + { RCAR_GP_PIN(1, 8), PU1, 20 }, /* A8 */
2714     + { RCAR_GP_PIN(1, 7), PU1, 19 }, /* A7 */
2715     + { RCAR_GP_PIN(1, 6), PU1, 18 }, /* A6 */
2716     + { RCAR_GP_PIN(1, 5), PU1, 17 }, /* A5 */
2717     + { RCAR_GP_PIN(1, 4), PU1, 16 }, /* A4 */
2718     + { RCAR_GP_PIN(1, 3), PU1, 15 }, /* A3 */
2719     + { RCAR_GP_PIN(1, 2), PU1, 14 }, /* A2 */
2720     + { RCAR_GP_PIN(1, 1), PU1, 13 }, /* A1 */
2721     + { RCAR_GP_PIN(1, 0), PU1, 12 }, /* A0 */
2722     + { RCAR_GP_PIN(2, 8), PU1, 11 }, /* PWM2_A */
2723     + { RCAR_GP_PIN(2, 7), PU1, 10 }, /* PWM1_A */
2724     + { RCAR_GP_PIN(2, 6), PU1, 9 }, /* PWM0 */
2725     + { RCAR_GP_PIN(2, 5), PU1, 8 }, /* IRQ5 */
2726     + { RCAR_GP_PIN(2, 4), PU1, 7 }, /* IRQ4 */
2727     + { RCAR_GP_PIN(2, 3), PU1, 6 }, /* IRQ3 */
2728     + { RCAR_GP_PIN(2, 2), PU1, 5 }, /* IRQ2 */
2729     + { RCAR_GP_PIN(2, 1), PU1, 4 }, /* IRQ1 */
2730     + { RCAR_GP_PIN(2, 0), PU1, 3 }, /* IRQ0 */
2731     + { RCAR_GP_PIN(2, 14), PU1, 2 }, /* AVB_AVTP_CAPTURE_A */
2732     + { RCAR_GP_PIN(2, 13), PU1, 1 }, /* AVB_AVTP_MATCH_A */
2733     + { RCAR_GP_PIN(2, 12), PU1, 0 }, /* AVB_LINK */
2734     +
2735     + { RCAR_GP_PIN(7, 3), PU2, 29 }, /* HDMI1_CEC */
2736     + { RCAR_GP_PIN(7, 2), PU2, 28 }, /* HDMI0_CEC */
2737     + { RCAR_GP_PIN(7, 1), PU2, 27 }, /* AVS2 */
2738     + { RCAR_GP_PIN(7, 0), PU2, 26 }, /* AVS1 */
2739     + { RCAR_GP_PIN(0, 15), PU2, 25 }, /* D15 */
2740     + { RCAR_GP_PIN(0, 14), PU2, 24 }, /* D14 */
2741     + { RCAR_GP_PIN(0, 13), PU2, 23 }, /* D13 */
2742     + { RCAR_GP_PIN(0, 12), PU2, 22 }, /* D12 */
2743     + { RCAR_GP_PIN(0, 11), PU2, 21 }, /* D11 */
2744     + { RCAR_GP_PIN(0, 10), PU2, 20 }, /* D10 */
2745     + { RCAR_GP_PIN(0, 9), PU2, 19 }, /* D9 */
2746     + { RCAR_GP_PIN(0, 8), PU2, 18 }, /* D8 */
2747     + { RCAR_GP_PIN(0, 7), PU2, 17 }, /* D7 */
2748     + { RCAR_GP_PIN(0, 6), PU2, 16 }, /* D6 */
2749     + { RCAR_GP_PIN(0, 5), PU2, 15 }, /* D5 */
2750     + { RCAR_GP_PIN(0, 4), PU2, 14 }, /* D4 */
2751     + { RCAR_GP_PIN(0, 3), PU2, 13 }, /* D3 */
2752     + { RCAR_GP_PIN(0, 2), PU2, 12 }, /* D2 */
2753     + { RCAR_GP_PIN(0, 1), PU2, 11 }, /* D1 */
2754     + { RCAR_GP_PIN(0, 0), PU2, 10 }, /* D0 */
2755     + { RCAR_GP_PIN(1, 27), PU2, 8 }, /* EX_WAIT0_A */
2756     + { RCAR_GP_PIN(1, 26), PU2, 7 }, /* WE1_N */
2757     + { RCAR_GP_PIN(1, 25), PU2, 6 }, /* WE0_N */
2758     + { RCAR_GP_PIN(1, 24), PU2, 5 }, /* RD_WR_N */
2759     + { RCAR_GP_PIN(1, 23), PU2, 4 }, /* RD_N */
2760     + { RCAR_GP_PIN(1, 22), PU2, 3 }, /* BS_N */
2761     + { RCAR_GP_PIN(1, 21), PU2, 2 }, /* CS1_N_A26 */
2762     + { RCAR_GP_PIN(1, 20), PU2, 1 }, /* CS0_N */
2763     +
2764     + { RCAR_GP_PIN(4, 9), PU3, 31 }, /* SD3_DAT0 */
2765     + { RCAR_GP_PIN(4, 8), PU3, 30 }, /* SD3_CMD */
2766     + { RCAR_GP_PIN(4, 7), PU3, 29 }, /* SD3_CLK */
2767     + { RCAR_GP_PIN(4, 6), PU3, 28 }, /* SD2_DS */
2768     + { RCAR_GP_PIN(4, 5), PU3, 27 }, /* SD2_DAT3 */
2769     + { RCAR_GP_PIN(4, 4), PU3, 26 }, /* SD2_DAT2 */
2770     + { RCAR_GP_PIN(4, 3), PU3, 25 }, /* SD2_DAT1 */
2771     + { RCAR_GP_PIN(4, 2), PU3, 24 }, /* SD2_DAT0 */
2772     + { RCAR_GP_PIN(4, 1), PU3, 23 }, /* SD2_CMD */
2773     + { RCAR_GP_PIN(4, 0), PU3, 22 }, /* SD2_CLK */
2774     + { RCAR_GP_PIN(3, 11), PU3, 21 }, /* SD1_DAT3 */
2775     + { RCAR_GP_PIN(3, 10), PU3, 20 }, /* SD1_DAT2 */
2776     + { RCAR_GP_PIN(3, 9), PU3, 19 }, /* SD1_DAT1 */
2777     + { RCAR_GP_PIN(3, 8), PU3, 18 }, /* SD1_DAT0 */
2778     + { RCAR_GP_PIN(3, 7), PU3, 17 }, /* SD1_CMD */
2779     + { RCAR_GP_PIN(3, 6), PU3, 16 }, /* SD1_CLK */
2780     + { RCAR_GP_PIN(3, 5), PU3, 15 }, /* SD0_DAT3 */
2781     + { RCAR_GP_PIN(3, 4), PU3, 14 }, /* SD0_DAT2 */
2782     + { RCAR_GP_PIN(3, 3), PU3, 13 }, /* SD0_DAT1 */
2783     + { RCAR_GP_PIN(3, 2), PU3, 12 }, /* SD0_DAT0 */
2784     + { RCAR_GP_PIN(3, 1), PU3, 11 }, /* SD0_CMD */
2785     + { RCAR_GP_PIN(3, 0), PU3, 10 }, /* SD0_CLK */
2786     +
2787     + { RCAR_GP_PIN(5, 19), PU4, 31 }, /* MSIOF0_SS1 */
2788     + { RCAR_GP_PIN(5, 18), PU4, 30 }, /* MSIOF0_SYNC */
2789     + { RCAR_GP_PIN(5, 17), PU4, 29 }, /* MSIOF0_SCK */
2790     + { RCAR_GP_PIN(5, 16), PU4, 28 }, /* HRTS0_N */
2791     + { RCAR_GP_PIN(5, 15), PU4, 27 }, /* HCTS0_N */
2792     + { RCAR_GP_PIN(5, 14), PU4, 26 }, /* HTX0 */
2793     + { RCAR_GP_PIN(5, 13), PU4, 25 }, /* HRX0 */
2794     + { RCAR_GP_PIN(5, 12), PU4, 24 }, /* HSCK0 */
2795     + { RCAR_GP_PIN(5, 11), PU4, 23 }, /* RX2_A */
2796     + { RCAR_GP_PIN(5, 10), PU4, 22 }, /* TX2_A */
2797     + { RCAR_GP_PIN(5, 9), PU4, 21 }, /* SCK2 */
2798     + { RCAR_GP_PIN(5, 8), PU4, 20 }, /* RTS1_N_TANS */
2799     + { RCAR_GP_PIN(5, 7), PU4, 19 }, /* CTS1_N */
2800     + { RCAR_GP_PIN(5, 6), PU4, 18 }, /* TX1_A */
2801     + { RCAR_GP_PIN(5, 5), PU4, 17 }, /* RX1_A */
2802     + { RCAR_GP_PIN(5, 4), PU4, 16 }, /* RTS0_N_TANS */
2803     + { RCAR_GP_PIN(5, 3), PU4, 15 }, /* CTS0_N */
2804     + { RCAR_GP_PIN(5, 2), PU4, 14 }, /* TX0 */
2805     + { RCAR_GP_PIN(5, 1), PU4, 13 }, /* RX0 */
2806     + { RCAR_GP_PIN(5, 0), PU4, 12 }, /* SCK0 */
2807     + { RCAR_GP_PIN(3, 15), PU4, 11 }, /* SD1_WP */
2808     + { RCAR_GP_PIN(3, 14), PU4, 10 }, /* SD1_CD */
2809     + { RCAR_GP_PIN(3, 13), PU4, 9 }, /* SD0_WP */
2810     + { RCAR_GP_PIN(3, 12), PU4, 8 }, /* SD0_CD */
2811     + { RCAR_GP_PIN(4, 17), PU4, 7 }, /* SD3_DS */
2812     + { RCAR_GP_PIN(4, 16), PU4, 6 }, /* SD3_DAT7 */
2813     + { RCAR_GP_PIN(4, 15), PU4, 5 }, /* SD3_DAT6 */
2814     + { RCAR_GP_PIN(4, 14), PU4, 4 }, /* SD3_DAT5 */
2815     + { RCAR_GP_PIN(4, 13), PU4, 3 }, /* SD3_DAT4 */
2816     + { RCAR_GP_PIN(4, 12), PU4, 2 }, /* SD3_DAT3 */
2817     + { RCAR_GP_PIN(4, 11), PU4, 1 }, /* SD3_DAT2 */
2818     + { RCAR_GP_PIN(4, 10), PU4, 0 }, /* SD3_DAT1 */
2819     +
2820     + { RCAR_GP_PIN(6, 24), PU5, 31 }, /* USB0_PWEN */
2821     + { RCAR_GP_PIN(6, 23), PU5, 30 }, /* AUDIO_CLKB_B */
2822     + { RCAR_GP_PIN(6, 22), PU5, 29 }, /* AUDIO_CLKA_A */
2823     + { RCAR_GP_PIN(6, 21), PU5, 28 }, /* SSI_SDATA9_A */
2824     + { RCAR_GP_PIN(6, 20), PU5, 27 }, /* SSI_SDATA8 */
2825     + { RCAR_GP_PIN(6, 19), PU5, 26 }, /* SSI_SDATA7 */
2826     + { RCAR_GP_PIN(6, 18), PU5, 25 }, /* SSI_WS78 */
2827     + { RCAR_GP_PIN(6, 17), PU5, 24 }, /* SSI_SCK78 */
2828     + { RCAR_GP_PIN(6, 16), PU5, 23 }, /* SSI_SDATA6 */
2829     + { RCAR_GP_PIN(6, 15), PU5, 22 }, /* SSI_WS6 */
2830     + { RCAR_GP_PIN(6, 14), PU5, 21 }, /* SSI_SCK6 */
2831     + { RCAR_GP_PIN(6, 13), PU5, 20 }, /* SSI_SDATA5 */
2832     + { RCAR_GP_PIN(6, 12), PU5, 19 }, /* SSI_WS5 */
2833     + { RCAR_GP_PIN(6, 11), PU5, 18 }, /* SSI_SCK5 */
2834     + { RCAR_GP_PIN(6, 10), PU5, 17 }, /* SSI_SDATA4 */
2835     + { RCAR_GP_PIN(6, 9), PU5, 16 }, /* SSI_WS4 */
2836     + { RCAR_GP_PIN(6, 8), PU5, 15 }, /* SSI_SCK4 */
2837     + { RCAR_GP_PIN(6, 7), PU5, 14 }, /* SSI_SDATA3 */
2838     + { RCAR_GP_PIN(6, 6), PU5, 13 }, /* SSI_WS34 */
2839     + { RCAR_GP_PIN(6, 5), PU5, 12 }, /* SSI_SCK34 */
2840     + { RCAR_GP_PIN(6, 4), PU5, 11 }, /* SSI_SDATA2_A */
2841     + { RCAR_GP_PIN(6, 3), PU5, 10 }, /* SSI_SDATA1_A */
2842     + { RCAR_GP_PIN(6, 2), PU5, 9 }, /* SSI_SDATA0 */
2843     + { RCAR_GP_PIN(6, 1), PU5, 8 }, /* SSI_WS01239 */
2844     + { RCAR_GP_PIN(6, 0), PU5, 7 }, /* SSI_SCK01239 */
2845     + { RCAR_GP_PIN(5, 25), PU5, 5 }, /* MLB_DAT */
2846     + { RCAR_GP_PIN(5, 24), PU5, 4 }, /* MLB_SIG */
2847     + { RCAR_GP_PIN(5, 23), PU5, 3 }, /* MLB_CLK */
2848     + { RCAR_GP_PIN(5, 22), PU5, 2 }, /* MSIOF0_RXD */
2849     + { RCAR_GP_PIN(5, 21), PU5, 1 }, /* MSIOF0_SS2 */
2850     + { RCAR_GP_PIN(5, 20), PU5, 0 }, /* MSIOF0_TXD */
2851     +
2852     + { RCAR_GP_PIN(6, 31), PU6, 6 }, /* USB31_OVC */
2853     + { RCAR_GP_PIN(6, 30), PU6, 5 }, /* USB31_PWEN */
2854     + { RCAR_GP_PIN(6, 29), PU6, 4 }, /* USB30_OVC */
2855     + { RCAR_GP_PIN(6, 28), PU6, 3 }, /* USB30_PWEN */
2856     + { RCAR_GP_PIN(6, 27), PU6, 2 }, /* USB1_OVC */
2857     + { RCAR_GP_PIN(6, 26), PU6, 1 }, /* USB1_PWEN */
2858     + { RCAR_GP_PIN(6, 25), PU6, 0 }, /* USB0_OVC */
2859     };
2860    
2861     static unsigned int r8a7795_pinmux_get_bias(struct sh_pfc *pfc,
2862     unsigned int pin)
2863     {
2864     + const struct sh_pfc_bias_info *info;
2865     u32 reg;
2866     u32 bit;
2867    
2868     - if (WARN_ON_ONCE(!pullups[pin].reg))
2869     + info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
2870     + if (!info)
2871     return PIN_CONFIG_BIAS_DISABLE;
2872    
2873     - reg = pullups[pin].reg;
2874     - bit = BIT(pullups[pin].bit);
2875     + reg = info->reg;
2876     + bit = BIT(info->bit);
2877    
2878     if (sh_pfc_read_reg(pfc, PUEN + reg, 32) & bit) {
2879     if (sh_pfc_read_reg(pfc, PUD + reg, 32) & bit)
2880     @@ -5379,15 +5378,17 @@ static unsigned int r8a7795_pinmux_get_bias(struct sh_pfc *pfc,
2881     static void r8a7795_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
2882     unsigned int bias)
2883     {
2884     + const struct sh_pfc_bias_info *info;
2885     u32 enable, updown;
2886     u32 reg;
2887     u32 bit;
2888    
2889     - if (WARN_ON_ONCE(!pullups[pin].reg))
2890     + info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
2891     + if (!info)
2892     return;
2893    
2894     - reg = pullups[pin].reg;
2895     - bit = BIT(pullups[pin].bit);
2896     + reg = info->reg;
2897     + bit = BIT(info->bit);
2898    
2899     enable = sh_pfc_read_reg(pfc, PUEN + reg, 32) & ~bit;
2900     if (bias != PIN_CONFIG_BIAS_DISABLE)
2901     diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
2902     index c5772584594c..fcacfa73ef6e 100644
2903     --- a/drivers/pinctrl/sh-pfc/pinctrl.c
2904     +++ b/drivers/pinctrl/sh-pfc/pinctrl.c
2905     @@ -570,7 +570,8 @@ static bool sh_pfc_pinconf_validate(struct sh_pfc *pfc, unsigned int _pin,
2906    
2907     switch (param) {
2908     case PIN_CONFIG_BIAS_DISABLE:
2909     - return true;
2910     + return pin->configs &
2911     + (SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN);
2912    
2913     case PIN_CONFIG_BIAS_PULL_UP:
2914     return pin->configs & SH_PFC_PIN_CFG_PULL_UP;
2915     diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
2916     index 2345421103db..9556c172e3d2 100644
2917     --- a/drivers/pinctrl/sh-pfc/sh_pfc.h
2918     +++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
2919     @@ -189,6 +189,12 @@ struct sh_pfc_window {
2920     unsigned long size;
2921     };
2922    
2923     +struct sh_pfc_bias_info {
2924     + u16 pin;
2925     + u16 reg : 11;
2926     + u16 bit : 5;
2927     +};
2928     +
2929     struct sh_pfc_pin_range;
2930    
2931     struct sh_pfc {
2932     diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
2933     index f5746b9f4e83..e9584330aeed 100644
2934     --- a/drivers/power/supply/bq24190_charger.c
2935     +++ b/drivers/power/supply/bq24190_charger.c
2936     @@ -1141,7 +1141,7 @@ static int bq24190_battery_set_property(struct power_supply *psy,
2937    
2938     dev_dbg(bdi->dev, "prop: %d\n", psp);
2939    
2940     - pm_runtime_put_sync(bdi->dev);
2941     + pm_runtime_get_sync(bdi->dev);
2942    
2943     switch (psp) {
2944     case POWER_SUPPLY_PROP_ONLINE:
2945     diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
2946     index 3b0dbc689d72..bccb3f595ff3 100644
2947     --- a/drivers/power/supply/bq27xxx_battery.c
2948     +++ b/drivers/power/supply/bq27xxx_battery.c
2949     @@ -164,6 +164,25 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
2950     [BQ27XXX_REG_DCAP] = 0x3c,
2951     [BQ27XXX_REG_AP] = INVALID_REG_ADDR,
2952     },
2953     + [BQ27510] = {
2954     + [BQ27XXX_REG_CTRL] = 0x00,
2955     + [BQ27XXX_REG_TEMP] = 0x06,
2956     + [BQ27XXX_REG_INT_TEMP] = 0x28,
2957     + [BQ27XXX_REG_VOLT] = 0x08,
2958     + [BQ27XXX_REG_AI] = 0x14,
2959     + [BQ27XXX_REG_FLAGS] = 0x0a,
2960     + [BQ27XXX_REG_TTE] = 0x16,
2961     + [BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
2962     + [BQ27XXX_REG_TTES] = 0x1a,
2963     + [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
2964     + [BQ27XXX_REG_NAC] = 0x0c,
2965     + [BQ27XXX_REG_FCC] = 0x12,
2966     + [BQ27XXX_REG_CYCT] = 0x1e,
2967     + [BQ27XXX_REG_AE] = INVALID_REG_ADDR,
2968     + [BQ27XXX_REG_SOC] = 0x20,
2969     + [BQ27XXX_REG_DCAP] = 0x2e,
2970     + [BQ27XXX_REG_AP] = INVALID_REG_ADDR,
2971     + },
2972     [BQ27530] = {
2973     [BQ27XXX_REG_CTRL] = 0x00,
2974     [BQ27XXX_REG_TEMP] = 0x06,
2975     @@ -302,6 +321,24 @@ static enum power_supply_property bq27500_battery_props[] = {
2976     POWER_SUPPLY_PROP_MANUFACTURER,
2977     };
2978    
2979     +static enum power_supply_property bq27510_battery_props[] = {
2980     + POWER_SUPPLY_PROP_STATUS,
2981     + POWER_SUPPLY_PROP_PRESENT,
2982     + POWER_SUPPLY_PROP_VOLTAGE_NOW,
2983     + POWER_SUPPLY_PROP_CURRENT_NOW,
2984     + POWER_SUPPLY_PROP_CAPACITY,
2985     + POWER_SUPPLY_PROP_CAPACITY_LEVEL,
2986     + POWER_SUPPLY_PROP_TEMP,
2987     + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
2988     + POWER_SUPPLY_PROP_TECHNOLOGY,
2989     + POWER_SUPPLY_PROP_CHARGE_FULL,
2990     + POWER_SUPPLY_PROP_CHARGE_NOW,
2991     + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
2992     + POWER_SUPPLY_PROP_CYCLE_COUNT,
2993     + POWER_SUPPLY_PROP_HEALTH,
2994     + POWER_SUPPLY_PROP_MANUFACTURER,
2995     +};
2996     +
2997     static enum power_supply_property bq27530_battery_props[] = {
2998     POWER_SUPPLY_PROP_STATUS,
2999     POWER_SUPPLY_PROP_PRESENT,
3000     @@ -385,6 +422,7 @@ static struct {
3001     BQ27XXX_PROP(BQ27000, bq27000_battery_props),
3002     BQ27XXX_PROP(BQ27010, bq27010_battery_props),
3003     BQ27XXX_PROP(BQ27500, bq27500_battery_props),
3004     + BQ27XXX_PROP(BQ27510, bq27510_battery_props),
3005     BQ27XXX_PROP(BQ27530, bq27530_battery_props),
3006     BQ27XXX_PROP(BQ27541, bq27541_battery_props),
3007     BQ27XXX_PROP(BQ27545, bq27545_battery_props),
3008     @@ -635,7 +673,8 @@ static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di)
3009     */
3010     static bool bq27xxx_battery_overtemp(struct bq27xxx_device_info *di, u16 flags)
3011     {
3012     - if (di->chip == BQ27500 || di->chip == BQ27541 || di->chip == BQ27545)
3013     + if (di->chip == BQ27500 || di->chip == BQ27510 ||
3014     + di->chip == BQ27541 || di->chip == BQ27545)
3015     return flags & (BQ27XXX_FLAG_OTC | BQ27XXX_FLAG_OTD);
3016     if (di->chip == BQ27530 || di->chip == BQ27421)
3017     return flags & BQ27XXX_FLAG_OT;
3018     diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
3019     index 85d4ea2a9c20..5c5c3a6f9923 100644
3020     --- a/drivers/power/supply/bq27xxx_battery_i2c.c
3021     +++ b/drivers/power/supply/bq27xxx_battery_i2c.c
3022     @@ -149,8 +149,8 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
3023     { "bq27200", BQ27000 },
3024     { "bq27210", BQ27010 },
3025     { "bq27500", BQ27500 },
3026     - { "bq27510", BQ27500 },
3027     - { "bq27520", BQ27500 },
3028     + { "bq27510", BQ27510 },
3029     + { "bq27520", BQ27510 },
3030     { "bq27530", BQ27530 },
3031     { "bq27531", BQ27530 },
3032     { "bq27541", BQ27541 },
3033     diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
3034     index 243b233ff31b..3c71f608b444 100644
3035     --- a/drivers/powercap/intel_rapl.c
3036     +++ b/drivers/powercap/intel_rapl.c
3037     @@ -442,6 +442,7 @@ static int contraint_to_pl(struct rapl_domain *rd, int cid)
3038     return i;
3039     }
3040     }
3041     + pr_err("Cannot find matching power limit for constraint %d\n", cid);
3042    
3043     return -EINVAL;
3044     }
3045     @@ -457,6 +458,10 @@ static int set_power_limit(struct powercap_zone *power_zone, int cid,
3046     get_online_cpus();
3047     rd = power_zone_to_rapl_domain(power_zone);
3048     id = contraint_to_pl(rd, cid);
3049     + if (id < 0) {
3050     + ret = id;
3051     + goto set_exit;
3052     + }
3053    
3054     rp = rd->rp;
3055    
3056     @@ -496,6 +501,11 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int cid,
3057     get_online_cpus();
3058     rd = power_zone_to_rapl_domain(power_zone);
3059     id = contraint_to_pl(rd, cid);
3060     + if (id < 0) {
3061     + ret = id;
3062     + goto get_exit;
3063     + }
3064     +
3065     switch (rd->rpl[id].prim_id) {
3066     case PL1_ENABLE:
3067     prim = POWER_LIMIT1;
3068     @@ -512,6 +522,7 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int cid,
3069     else
3070     *data = val;
3071    
3072     +get_exit:
3073     put_online_cpus();
3074    
3075     return ret;
3076     @@ -527,6 +538,10 @@ static int set_time_window(struct powercap_zone *power_zone, int cid,
3077     get_online_cpus();
3078     rd = power_zone_to_rapl_domain(power_zone);
3079     id = contraint_to_pl(rd, cid);
3080     + if (id < 0) {
3081     + ret = id;
3082     + goto set_time_exit;
3083     + }
3084    
3085     switch (rd->rpl[id].prim_id) {
3086     case PL1_ENABLE:
3087     @@ -538,6 +553,8 @@ static int set_time_window(struct powercap_zone *power_zone, int cid,
3088     default:
3089     ret = -EINVAL;
3090     }
3091     +
3092     +set_time_exit:
3093     put_online_cpus();
3094     return ret;
3095     }
3096     @@ -552,6 +569,10 @@ static int get_time_window(struct powercap_zone *power_zone, int cid, u64 *data)
3097     get_online_cpus();
3098     rd = power_zone_to_rapl_domain(power_zone);
3099     id = contraint_to_pl(rd, cid);
3100     + if (id < 0) {
3101     + ret = id;
3102     + goto get_time_exit;
3103     + }
3104    
3105     switch (rd->rpl[id].prim_id) {
3106     case PL1_ENABLE:
3107     @@ -566,6 +587,8 @@ static int get_time_window(struct powercap_zone *power_zone, int cid, u64 *data)
3108     }
3109     if (!ret)
3110     *data = val;
3111     +
3112     +get_time_exit:
3113     put_online_cpus();
3114    
3115     return ret;
3116     @@ -707,7 +730,7 @@ static u64 rapl_unit_xlate(struct rapl_domain *rd, enum unit_type type,
3117     case ENERGY_UNIT:
3118     scale = ENERGY_UNIT_SCALE;
3119     /* per domain unit takes precedence */
3120     - if (rd && rd->domain_energy_unit)
3121     + if (rd->domain_energy_unit)
3122     units = rd->domain_energy_unit;
3123     else
3124     units = rp->energy_unit;
3125     diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
3126     index 54382ef902c6..e6a512ebeae2 100644
3127     --- a/drivers/regulator/axp20x-regulator.c
3128     +++ b/drivers/regulator/axp20x-regulator.c
3129     @@ -337,10 +337,18 @@ static const struct regulator_desc axp809_regulators[] = {
3130     AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
3131     AXP_DESC(AXP809, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
3132     AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
3133     - AXP_DESC_IO(AXP809, LDO_IO0, "ldo_io0", "ips", 700, 3300, 100,
3134     + /*
3135     + * Note the datasheet only guarantees reliable operation up to
3136     + * 3.3V, this needs to be enforced via dts provided constraints
3137     + */
3138     + AXP_DESC_IO(AXP809, LDO_IO0, "ldo_io0", "ips", 700, 3800, 100,
3139     AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
3140     AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
3141     - AXP_DESC_IO(AXP809, LDO_IO1, "ldo_io1", "ips", 700, 3300, 100,
3142     + /*
3143     + * Note the datasheet only guarantees reliable operation up to
3144     + * 3.3V, this needs to be enforced via dts provided constraints
3145     + */
3146     + AXP_DESC_IO(AXP809, LDO_IO1, "ldo_io1", "ips", 700, 3800, 100,
3147     AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
3148     AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
3149     AXP_DESC_FIXED(AXP809, RTC_LDO, "rtc_ldo", "ips", 1800),
3150     diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
3151     index bcf38fd5106a..379cdacc05d8 100644
3152     --- a/drivers/regulator/helpers.c
3153     +++ b/drivers/regulator/helpers.c
3154     @@ -454,13 +454,17 @@ EXPORT_SYMBOL_GPL(regulator_set_bypass_regmap);
3155     int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable)
3156     {
3157     unsigned int val;
3158     + unsigned int val_on = rdev->desc->bypass_val_on;
3159     int ret;
3160    
3161     ret = regmap_read(rdev->regmap, rdev->desc->bypass_reg, &val);
3162     if (ret != 0)
3163     return ret;
3164    
3165     - *enable = (val & rdev->desc->bypass_mask) == rdev->desc->bypass_val_on;
3166     + if (!val_on)
3167     + val_on = rdev->desc->bypass_mask;
3168     +
3169     + *enable = (val & rdev->desc->bypass_mask) == val_on;
3170    
3171     return 0;
3172     }
3173     diff --git a/drivers/regulator/tps65086-regulator.c b/drivers/regulator/tps65086-regulator.c
3174     index 33f389d583ef..caf174ffa316 100644
3175     --- a/drivers/regulator/tps65086-regulator.c
3176     +++ b/drivers/regulator/tps65086-regulator.c
3177     @@ -71,18 +71,17 @@ struct tps65086_regulator {
3178     unsigned int decay_mask;
3179     };
3180    
3181     -static const struct regulator_linear_range tps65086_buck126_10mv_ranges[] = {
3182     +static const struct regulator_linear_range tps65086_10mv_ranges[] = {
3183     REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
3184     REGULATOR_LINEAR_RANGE(410000, 0x1, 0x7F, 10000),
3185     };
3186    
3187     static const struct regulator_linear_range tps65086_buck126_25mv_ranges[] = {
3188     - REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
3189     - REGULATOR_LINEAR_RANGE(1000000, 0x1, 0x18, 0),
3190     + REGULATOR_LINEAR_RANGE(1000000, 0x0, 0x18, 0),
3191     REGULATOR_LINEAR_RANGE(1025000, 0x19, 0x7F, 25000),
3192     };
3193    
3194     -static const struct regulator_linear_range tps65086_buck345_ranges[] = {
3195     +static const struct regulator_linear_range tps65086_buck345_25mv_ranges[] = {
3196     REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
3197     REGULATOR_LINEAR_RANGE(425000, 0x1, 0x7F, 25000),
3198     };
3199     @@ -125,27 +124,27 @@ static int tps65086_of_parse_cb(struct device_node *dev,
3200     static struct tps65086_regulator regulators[] = {
3201     TPS65086_REGULATOR("BUCK1", "buck1", BUCK1, 0x80, TPS65086_BUCK1CTRL,
3202     BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(0),
3203     - tps65086_buck126_10mv_ranges, TPS65086_BUCK1CTRL,
3204     + tps65086_10mv_ranges, TPS65086_BUCK1CTRL,
3205     BIT(0)),
3206     TPS65086_REGULATOR("BUCK2", "buck2", BUCK2, 0x80, TPS65086_BUCK2CTRL,
3207     BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(1),
3208     - tps65086_buck126_10mv_ranges, TPS65086_BUCK2CTRL,
3209     + tps65086_10mv_ranges, TPS65086_BUCK2CTRL,
3210     BIT(0)),
3211     TPS65086_REGULATOR("BUCK3", "buck3", BUCK3, 0x80, TPS65086_BUCK3VID,
3212     BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(2),
3213     - tps65086_buck345_ranges, TPS65086_BUCK3DECAY,
3214     + tps65086_10mv_ranges, TPS65086_BUCK3DECAY,
3215     BIT(0)),
3216     TPS65086_REGULATOR("BUCK4", "buck4", BUCK4, 0x80, TPS65086_BUCK4VID,
3217     BUCK_VID_MASK, TPS65086_BUCK4CTRL, BIT(0),
3218     - tps65086_buck345_ranges, TPS65086_BUCK4VID,
3219     + tps65086_10mv_ranges, TPS65086_BUCK4VID,
3220     BIT(0)),
3221     TPS65086_REGULATOR("BUCK5", "buck5", BUCK5, 0x80, TPS65086_BUCK5VID,
3222     BUCK_VID_MASK, TPS65086_BUCK5CTRL, BIT(0),
3223     - tps65086_buck345_ranges, TPS65086_BUCK5CTRL,
3224     + tps65086_10mv_ranges, TPS65086_BUCK5CTRL,
3225     BIT(0)),
3226     TPS65086_REGULATOR("BUCK6", "buck6", BUCK6, 0x80, TPS65086_BUCK6VID,
3227     BUCK_VID_MASK, TPS65086_BUCK6CTRL, BIT(0),
3228     - tps65086_buck126_10mv_ranges, TPS65086_BUCK6CTRL,
3229     + tps65086_10mv_ranges, TPS65086_BUCK6CTRL,
3230     BIT(0)),
3231     TPS65086_REGULATOR("LDOA1", "ldoa1", LDOA1, 0xF, TPS65086_LDOA1CTRL,
3232     VDOA1_VID_MASK, TPS65086_LDOA1CTRL, BIT(0),
3233     @@ -162,18 +161,6 @@ static struct tps65086_regulator regulators[] = {
3234     TPS65086_SWITCH("VTT", "vtt", VTT, TPS65086_SWVTT_EN, BIT(4)),
3235     };
3236    
3237     -static inline bool has_25mv_mode(int id)
3238     -{
3239     - switch (id) {
3240     - case BUCK1:
3241     - case BUCK2:
3242     - case BUCK6:
3243     - return true;
3244     - default:
3245     - return false;
3246     - }
3247     -}
3248     -
3249     static int tps65086_of_parse_cb(struct device_node *dev,
3250     const struct regulator_desc *desc,
3251     struct regulator_config *config)
3252     @@ -181,12 +168,27 @@ static int tps65086_of_parse_cb(struct device_node *dev,
3253     int ret;
3254    
3255     /* Check for 25mV step mode */
3256     - if (has_25mv_mode(desc->id) &&
3257     - of_property_read_bool(config->of_node, "ti,regulator-step-size-25mv")) {
3258     - regulators[desc->id].desc.linear_ranges =
3259     + if (of_property_read_bool(config->of_node, "ti,regulator-step-size-25mv")) {
3260     + switch (desc->id) {
3261     + case BUCK1:
3262     + case BUCK2:
3263     + case BUCK6:
3264     + regulators[desc->id].desc.linear_ranges =
3265     tps65086_buck126_25mv_ranges;
3266     - regulators[desc->id].desc.n_linear_ranges =
3267     + regulators[desc->id].desc.n_linear_ranges =
3268     ARRAY_SIZE(tps65086_buck126_25mv_ranges);
3269     + break;
3270     + case BUCK3:
3271     + case BUCK4:
3272     + case BUCK5:
3273     + regulators[desc->id].desc.linear_ranges =
3274     + tps65086_buck345_25mv_ranges;
3275     + regulators[desc->id].desc.n_linear_ranges =
3276     + ARRAY_SIZE(tps65086_buck345_25mv_ranges);
3277     + break;
3278     + default:
3279     + dev_warn(config->dev, "25mV step mode only valid for BUCK regulators\n");
3280     + }
3281     }
3282    
3283     /* Check for decay mode */
3284     diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
3285     index f396bfef5d42..5fcbefcb8636 100644
3286     --- a/drivers/remoteproc/Kconfig
3287     +++ b/drivers/remoteproc/Kconfig
3288     @@ -91,17 +91,12 @@ config QCOM_Q6V5_PIL
3289     Say y here to support the Qualcomm Peripherial Image Loader for the
3290     Hexagon V5 based remote processors.
3291    
3292     -config QCOM_WCNSS_IRIS
3293     - tristate
3294     - depends on OF && ARCH_QCOM
3295     -
3296     config QCOM_WCNSS_PIL
3297     tristate "Qualcomm WCNSS Peripheral Image Loader"
3298     depends on OF && ARCH_QCOM
3299     depends on QCOM_SMEM
3300     select QCOM_MDT_LOADER
3301     select QCOM_SCM
3302     - select QCOM_WCNSS_IRIS
3303     select REMOTEPROC
3304     help
3305     Say y here to support the Peripheral Image Loader for the Qualcomm
3306     diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
3307     index 6dfb62ed643f..034b6f3563a7 100644
3308     --- a/drivers/remoteproc/Makefile
3309     +++ b/drivers/remoteproc/Makefile
3310     @@ -13,6 +13,7 @@ obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o
3311     obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
3312     obj-$(CONFIG_QCOM_MDT_LOADER) += qcom_mdt_loader.o
3313     obj-$(CONFIG_QCOM_Q6V5_PIL) += qcom_q6v5_pil.o
3314     -obj-$(CONFIG_QCOM_WCNSS_IRIS) += qcom_wcnss_iris.o
3315     -obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss.o
3316     +obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o
3317     +qcom_wcnss_pil-y += qcom_wcnss.o
3318     +qcom_wcnss_pil-y += qcom_wcnss_iris.o
3319     obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
3320     diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
3321     index f5cedeaafba1..323b629474a6 100644
3322     --- a/drivers/remoteproc/qcom_wcnss.c
3323     +++ b/drivers/remoteproc/qcom_wcnss.c
3324     @@ -143,7 +143,6 @@ void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss,
3325    
3326     mutex_unlock(&wcnss->iris_lock);
3327     }
3328     -EXPORT_SYMBOL_GPL(qcom_wcnss_assign_iris);
3329    
3330     static int wcnss_load(struct rproc *rproc, const struct firmware *fw)
3331     {
3332     @@ -619,6 +618,28 @@ static struct platform_driver wcnss_driver = {
3333     },
3334     };
3335    
3336     -module_platform_driver(wcnss_driver);
3337     +static int __init wcnss_init(void)
3338     +{
3339     + int ret;
3340     +
3341     + ret = platform_driver_register(&wcnss_driver);
3342     + if (ret)
3343     + return ret;
3344     +
3345     + ret = platform_driver_register(&qcom_iris_driver);
3346     + if (ret)
3347     + platform_driver_unregister(&wcnss_driver);
3348     +
3349     + return ret;
3350     +}
3351     +module_init(wcnss_init);
3352     +
3353     +static void __exit wcnss_exit(void)
3354     +{
3355     + platform_driver_unregister(&qcom_iris_driver);
3356     + platform_driver_unregister(&wcnss_driver);
3357     +}
3358     +module_exit(wcnss_exit);
3359     +
3360     MODULE_DESCRIPTION("Qualcomm Peripherial Image Loader for Wireless Subsystem");
3361     MODULE_LICENSE("GPL v2");
3362     diff --git a/drivers/remoteproc/qcom_wcnss.h b/drivers/remoteproc/qcom_wcnss.h
3363     index 9dc4a9fe41e1..25fb7f62a457 100644
3364     --- a/drivers/remoteproc/qcom_wcnss.h
3365     +++ b/drivers/remoteproc/qcom_wcnss.h
3366     @@ -4,6 +4,8 @@
3367     struct qcom_iris;
3368     struct qcom_wcnss;
3369    
3370     +extern struct platform_driver qcom_iris_driver;
3371     +
3372     struct wcnss_vreg_info {
3373     const char * const name;
3374     int min_voltage;
3375     diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c
3376     index f0ca24a8dd0b..05d6e175411a 100644
3377     --- a/drivers/remoteproc/qcom_wcnss_iris.c
3378     +++ b/drivers/remoteproc/qcom_wcnss_iris.c
3379     @@ -94,14 +94,12 @@ int qcom_iris_enable(struct qcom_iris *iris)
3380    
3381     return ret;
3382     }
3383     -EXPORT_SYMBOL_GPL(qcom_iris_enable);
3384    
3385     void qcom_iris_disable(struct qcom_iris *iris)
3386     {
3387     clk_disable_unprepare(iris->xo_clk);
3388     regulator_bulk_disable(iris->num_vregs, iris->vregs);
3389     }
3390     -EXPORT_SYMBOL_GPL(qcom_iris_disable);
3391    
3392     static int qcom_iris_probe(struct platform_device *pdev)
3393     {
3394     @@ -174,7 +172,7 @@ static const struct of_device_id iris_of_match[] = {
3395     {}
3396     };
3397    
3398     -static struct platform_driver wcnss_driver = {
3399     +struct platform_driver qcom_iris_driver = {
3400     .probe = qcom_iris_probe,
3401     .remove = qcom_iris_remove,
3402     .driver = {
3403     @@ -182,7 +180,3 @@ static struct platform_driver wcnss_driver = {
3404     .of_match_table = iris_of_match,
3405     },
3406     };
3407     -
3408     -module_platform_driver(wcnss_driver);
3409     -MODULE_DESCRIPTION("Qualcomm Wireless Subsystem Iris driver");
3410     -MODULE_LICENSE("GPL v2");
3411     diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
3412     index ae8963fcc8c8..da4e152e9733 100644
3413     --- a/drivers/remoteproc/st_remoteproc.c
3414     +++ b/drivers/remoteproc/st_remoteproc.c
3415     @@ -245,8 +245,10 @@ static int st_rproc_probe(struct platform_device *pdev)
3416     goto free_rproc;
3417    
3418     enabled = st_rproc_state(pdev);
3419     - if (enabled < 0)
3420     + if (enabled < 0) {
3421     + ret = enabled;
3422     goto free_rproc;
3423     + }
3424    
3425     if (enabled) {
3426     atomic_inc(&rproc->power);
3427     diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
3428     index 642b739ad0da..608140f16d98 100644
3429     --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
3430     +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
3431     @@ -3702,7 +3702,7 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
3432     1, 1);
3433     if (rc) {
3434     pr_err("srp_transfer_data() failed: %d\n", rc);
3435     - return -EAGAIN;
3436     + return -EIO;
3437     }
3438     /*
3439     * We now tell TCM to add this WRITE CDB directly into the TCM storage
3440     diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
3441     index 240a361b674f..e8819aa20415 100644
3442     --- a/drivers/tty/serial/8250/8250_core.c
3443     +++ b/drivers/tty/serial/8250/8250_core.c
3444     @@ -675,7 +675,7 @@ static struct console univ8250_console = {
3445     .device = uart_console_device,
3446     .setup = univ8250_console_setup,
3447     .match = univ8250_console_match,
3448     - .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_CONSDEV,
3449     + .flags = CON_PRINTBUFFER | CON_ANYTIME,
3450     .index = -1,
3451     .data = &serial8250_reg,
3452     };
3453     diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
3454     index 1731b98d2471..080d5a59d0a7 100644
3455     --- a/drivers/tty/serial/8250/8250_port.c
3456     +++ b/drivers/tty/serial/8250/8250_port.c
3457     @@ -1411,7 +1411,7 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p)
3458     * Enable previously disabled RX interrupts.
3459     */
3460     if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
3461     - serial8250_clear_fifos(p);
3462     + serial8250_clear_and_reinit_fifos(p);
3463    
3464     p->ier |= UART_IER_RLSI | UART_IER_RDI;
3465     serial_port_out(&p->port, UART_IER, p->ier);
3466     diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
3467     index 168b10cad47b..fabbe76203bb 100644
3468     --- a/drivers/tty/serial/atmel_serial.c
3469     +++ b/drivers/tty/serial/atmel_serial.c
3470     @@ -481,6 +481,14 @@ static void atmel_stop_tx(struct uart_port *port)
3471     /* disable PDC transmit */
3472     atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
3473     }
3474     +
3475     + /*
3476     + * Disable the transmitter.
3477     + * This is mandatory when DMA is used, otherwise the DMA buffer
3478     + * is fully transmitted.
3479     + */
3480     + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
3481     +
3482     /* Disable interrupts */
3483     atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
3484    
3485     @@ -513,6 +521,9 @@ static void atmel_start_tx(struct uart_port *port)
3486    
3487     /* Enable interrupts */
3488     atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
3489     +
3490     + /* re-enable the transmitter */
3491     + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
3492     }
3493    
3494     /*
3495     @@ -798,6 +809,11 @@ static void atmel_complete_tx_dma(void *arg)
3496     */
3497     if (!uart_circ_empty(xmit))
3498     atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
3499     + else if ((port->rs485.flags & SER_RS485_ENABLED) &&
3500     + !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
3501     + /* DMA done, stop TX, start RX for RS485 */
3502     + atmel_start_rx(port);
3503     + }
3504    
3505     spin_unlock_irqrestore(&port->lock, flags);
3506     }
3507     @@ -900,12 +916,6 @@ static void atmel_tx_dma(struct uart_port *port)
3508     desc->callback = atmel_complete_tx_dma;
3509     desc->callback_param = atmel_port;
3510     atmel_port->cookie_tx = dmaengine_submit(desc);
3511     -
3512     - } else {
3513     - if (port->rs485.flags & SER_RS485_ENABLED) {
3514     - /* DMA done, stop TX, start RX for RS485 */
3515     - atmel_start_rx(port);
3516     - }
3517     }
3518    
3519     if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
3520     diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
3521     index 52bbd27e93ae..701c085bb19b 100644
3522     --- a/drivers/tty/sysrq.c
3523     +++ b/drivers/tty/sysrq.c
3524     @@ -946,8 +946,8 @@ static const struct input_device_id sysrq_ids[] = {
3525     {
3526     .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
3527     INPUT_DEVICE_ID_MATCH_KEYBIT,
3528     - .evbit = { BIT_MASK(EV_KEY) },
3529     - .keybit = { BIT_MASK(KEY_LEFTALT) },
3530     + .evbit = { [BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY) },
3531     + .keybit = { [BIT_WORD(KEY_LEFTALT)] = BIT_MASK(KEY_LEFTALT) },
3532     },
3533     { },
3534     };
3535     diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
3536     index 771a6da9caea..521c1816a26a 100644
3537     --- a/drivers/usb/host/xhci-ring.c
3538     +++ b/drivers/usb/host/xhci-ring.c
3539     @@ -917,17 +917,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
3540     spin_lock_irqsave(&xhci->lock, flags);
3541    
3542     ep->stop_cmds_pending--;
3543     - if (xhci->xhc_state & XHCI_STATE_REMOVING) {
3544     - spin_unlock_irqrestore(&xhci->lock, flags);
3545     - return;
3546     - }
3547     - if (xhci->xhc_state & XHCI_STATE_DYING) {
3548     - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
3549     - "Stop EP timer ran, but another timer marked "
3550     - "xHCI as DYING, exiting.");
3551     - spin_unlock_irqrestore(&xhci->lock, flags);
3552     - return;
3553     - }
3554     if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
3555     xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
3556     "Stop EP timer ran, but no command pending, "
3557     diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
3558     index ad0624386950..34e23c7d7797 100644
3559     --- a/drivers/usb/host/xhci.c
3560     +++ b/drivers/usb/host/xhci.c
3561     @@ -1529,19 +1529,6 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
3562     xhci_urb_free_priv(urb_priv);
3563     return ret;
3564     }
3565     - if ((xhci->xhc_state & XHCI_STATE_DYING) ||
3566     - (xhci->xhc_state & XHCI_STATE_HALTED)) {
3567     - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
3568     - "Ep 0x%x: URB %p to be canceled on "
3569     - "non-responsive xHCI host.",
3570     - urb->ep->desc.bEndpointAddress, urb);
3571     - /* Let the stop endpoint command watchdog timer (which set this
3572     - * state) finish cleaning up the endpoint TD lists. We must
3573     - * have caught it in the middle of dropping a lock and giving
3574     - * back an URB.
3575     - */
3576     - goto done;
3577     - }
3578    
3579     ep_index = xhci_get_endpoint_index(&urb->ep->desc);
3580     ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
3581     diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
3582     index 9b22d946c089..534a3f6fa89c 100644
3583     --- a/drivers/usb/musb/musb_debugfs.c
3584     +++ b/drivers/usb/musb/musb_debugfs.c
3585     @@ -114,6 +114,7 @@ static int musb_regdump_show(struct seq_file *s, void *unused)
3586     unsigned i;
3587    
3588     seq_printf(s, "MUSB (M)HDRC Register Dump\n");
3589     + pm_runtime_get_sync(musb->controller);
3590    
3591     for (i = 0; i < ARRAY_SIZE(musb_regmap); i++) {
3592     switch (musb_regmap[i].size) {
3593     @@ -132,6 +133,8 @@ static int musb_regdump_show(struct seq_file *s, void *unused)
3594     }
3595     }
3596    
3597     + pm_runtime_mark_last_busy(musb->controller);
3598     + pm_runtime_put_autosuspend(musb->controller);
3599     return 0;
3600     }
3601    
3602     @@ -145,7 +148,10 @@ static int musb_test_mode_show(struct seq_file *s, void *unused)
3603     struct musb *musb = s->private;
3604     unsigned test;
3605    
3606     + pm_runtime_get_sync(musb->controller);
3607     test = musb_readb(musb->mregs, MUSB_TESTMODE);
3608     + pm_runtime_mark_last_busy(musb->controller);
3609     + pm_runtime_put_autosuspend(musb->controller);
3610    
3611     if (test & MUSB_TEST_FORCE_HOST)
3612     seq_printf(s, "force host\n");
3613     @@ -194,11 +200,12 @@ static ssize_t musb_test_mode_write(struct file *file,
3614     u8 test;
3615     char buf[18];
3616    
3617     + pm_runtime_get_sync(musb->controller);
3618     test = musb_readb(musb->mregs, MUSB_TESTMODE);
3619     if (test) {
3620     dev_err(musb->controller, "Error: test mode is already set. "
3621     "Please do USB Bus Reset to start a new test.\n");
3622     - return count;
3623     + goto ret;
3624     }
3625    
3626     memset(buf, 0x00, sizeof(buf));
3627     @@ -234,6 +241,9 @@ static ssize_t musb_test_mode_write(struct file *file,
3628    
3629     musb_writeb(musb->mregs, MUSB_TESTMODE, test);
3630    
3631     +ret:
3632     + pm_runtime_mark_last_busy(musb->controller);
3633     + pm_runtime_put_autosuspend(musb->controller);
3634     return count;
3635     }
3636    
3637     @@ -254,8 +264,13 @@ static int musb_softconnect_show(struct seq_file *s, void *unused)
3638     switch (musb->xceiv->otg->state) {
3639     case OTG_STATE_A_HOST:
3640     case OTG_STATE_A_WAIT_BCON:
3641     + pm_runtime_get_sync(musb->controller);
3642     +
3643     reg = musb_readb(musb->mregs, MUSB_DEVCTL);
3644     connect = reg & MUSB_DEVCTL_SESSION ? 1 : 0;
3645     +
3646     + pm_runtime_mark_last_busy(musb->controller);
3647     + pm_runtime_put_autosuspend(musb->controller);
3648     break;
3649     default:
3650     connect = -1;
3651     @@ -284,6 +299,7 @@ static ssize_t musb_softconnect_write(struct file *file,
3652     if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
3653     return -EFAULT;
3654    
3655     + pm_runtime_get_sync(musb->controller);
3656     if (!strncmp(buf, "0", 1)) {
3657     switch (musb->xceiv->otg->state) {
3658     case OTG_STATE_A_HOST:
3659     @@ -314,6 +330,8 @@ static ssize_t musb_softconnect_write(struct file *file,
3660     }
3661     }
3662    
3663     + pm_runtime_mark_last_busy(musb->controller);
3664     + pm_runtime_put_autosuspend(musb->controller);
3665     return count;
3666     }
3667    
3668     diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
3669     index f139488d0816..e98590aab633 100644
3670     --- a/drivers/usb/serial/ch341.c
3671     +++ b/drivers/usb/serial/ch341.c
3672     @@ -99,6 +99,8 @@ static int ch341_control_out(struct usb_device *dev, u8 request,
3673     r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request,
3674     USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
3675     value, index, NULL, 0, DEFAULT_TIMEOUT);
3676     + if (r < 0)
3677     + dev_err(&dev->dev, "failed to send control message: %d\n", r);
3678    
3679     return r;
3680     }
3681     @@ -116,7 +118,20 @@ static int ch341_control_in(struct usb_device *dev,
3682     r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
3683     USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
3684     value, index, buf, bufsize, DEFAULT_TIMEOUT);
3685     - return r;
3686     + if (r < bufsize) {
3687     + if (r >= 0) {
3688     + dev_err(&dev->dev,
3689     + "short control message received (%d < %u)\n",
3690     + r, bufsize);
3691     + r = -EIO;
3692     + }
3693     +
3694     + dev_err(&dev->dev, "failed to receive control message: %d\n",
3695     + r);
3696     + return r;
3697     + }
3698     +
3699     + return 0;
3700     }
3701    
3702     static int ch341_set_baudrate(struct usb_device *dev,
3703     @@ -158,9 +173,9 @@ static int ch341_set_handshake(struct usb_device *dev, u8 control)
3704    
3705     static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
3706     {
3707     + const unsigned int size = 2;
3708     char *buffer;
3709     int r;
3710     - const unsigned size = 8;
3711     unsigned long flags;
3712    
3713     buffer = kmalloc(size, GFP_KERNEL);
3714     @@ -171,14 +186,9 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
3715     if (r < 0)
3716     goto out;
3717    
3718     - /* setup the private status if available */
3719     - if (r == 2) {
3720     - r = 0;
3721     - spin_lock_irqsave(&priv->lock, flags);
3722     - priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
3723     - spin_unlock_irqrestore(&priv->lock, flags);
3724     - } else
3725     - r = -EPROTO;
3726     + spin_lock_irqsave(&priv->lock, flags);
3727     + priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
3728     + spin_unlock_irqrestore(&priv->lock, flags);
3729    
3730     out: kfree(buffer);
3731     return r;
3732     @@ -188,9 +198,9 @@ out: kfree(buffer);
3733    
3734     static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
3735     {
3736     + const unsigned int size = 2;
3737     char *buffer;
3738     int r;
3739     - const unsigned size = 8;
3740    
3741     buffer = kmalloc(size, GFP_KERNEL);
3742     if (!buffer)
3743     @@ -253,7 +263,6 @@ static int ch341_port_probe(struct usb_serial_port *port)
3744    
3745     spin_lock_init(&priv->lock);
3746     priv->baud_rate = DEFAULT_BAUD_RATE;
3747     - priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
3748    
3749     r = ch341_configure(port->serial->dev, priv);
3750     if (r < 0)
3751     @@ -315,7 +324,7 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
3752    
3753     r = ch341_configure(serial->dev, priv);
3754     if (r)
3755     - goto out;
3756     + return r;
3757    
3758     if (tty)
3759     ch341_set_termios(tty, port, NULL);
3760     @@ -325,12 +334,19 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
3761     if (r) {
3762     dev_err(&port->dev, "%s - failed to submit interrupt urb: %d\n",
3763     __func__, r);
3764     - goto out;
3765     + return r;
3766     }
3767    
3768     r = usb_serial_generic_open(tty, port);
3769     + if (r)
3770     + goto err_kill_interrupt_urb;
3771    
3772     -out: return r;
3773     + return 0;
3774     +
3775     +err_kill_interrupt_urb:
3776     + usb_kill_urb(port->interrupt_in_urb);
3777     +
3778     + return r;
3779     }
3780    
3781     /* Old_termios contains the original termios settings and
3782     @@ -345,26 +361,25 @@ static void ch341_set_termios(struct tty_struct *tty,
3783    
3784     baud_rate = tty_get_baud_rate(tty);
3785    
3786     - priv->baud_rate = baud_rate;
3787     -
3788     if (baud_rate) {
3789     - spin_lock_irqsave(&priv->lock, flags);
3790     - priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
3791     - spin_unlock_irqrestore(&priv->lock, flags);
3792     + priv->baud_rate = baud_rate;
3793     ch341_set_baudrate(port->serial->dev, priv);
3794     - } else {
3795     - spin_lock_irqsave(&priv->lock, flags);
3796     - priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
3797     - spin_unlock_irqrestore(&priv->lock, flags);
3798     }
3799    
3800     - ch341_set_handshake(port->serial->dev, priv->line_control);
3801     -
3802     /* Unimplemented:
3803     * (cflag & CSIZE) : data bits [5, 8]
3804     * (cflag & PARENB) : parity {NONE, EVEN, ODD}
3805     * (cflag & CSTOPB) : stop bits [1, 2]
3806     */
3807     +
3808     + spin_lock_irqsave(&priv->lock, flags);
3809     + if (C_BAUD(tty) == B0)
3810     + priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
3811     + else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
3812     + priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
3813     + spin_unlock_irqrestore(&priv->lock, flags);
3814     +
3815     + ch341_set_handshake(port->serial->dev, priv->line_control);
3816     }
3817    
3818     static void ch341_break_ctl(struct tty_struct *tty, int break_state)
3819     @@ -539,14 +554,23 @@ static int ch341_tiocmget(struct tty_struct *tty)
3820    
3821     static int ch341_reset_resume(struct usb_serial *serial)
3822     {
3823     - struct ch341_private *priv;
3824     -
3825     - priv = usb_get_serial_port_data(serial->port[0]);
3826     + struct usb_serial_port *port = serial->port[0];
3827     + struct ch341_private *priv = usb_get_serial_port_data(port);
3828     + int ret;
3829    
3830     /* reconfigure ch341 serial port after bus-reset */
3831     ch341_configure(serial->dev, priv);
3832    
3833     - return 0;
3834     + if (tty_port_initialized(&port->port)) {
3835     + ret = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
3836     + if (ret) {
3837     + dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
3838     + ret);
3839     + return ret;
3840     + }
3841     + }
3842     +
3843     + return usb_serial_generic_resume(serial);
3844     }
3845    
3846     static struct usb_serial_driver ch341_device = {
3847     diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
3848     index 0ee190fc1bf8..6cb45757818f 100644
3849     --- a/drivers/usb/serial/kl5kusb105.c
3850     +++ b/drivers/usb/serial/kl5kusb105.c
3851     @@ -192,10 +192,11 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
3852     status_buf, KLSI_STATUSBUF_LEN,
3853     10000
3854     );
3855     - if (rc < 0)
3856     - dev_err(&port->dev, "Reading line status failed (error = %d)\n",
3857     - rc);
3858     - else {
3859     + if (rc != KLSI_STATUSBUF_LEN) {
3860     + dev_err(&port->dev, "reading line status failed: %d\n", rc);
3861     + if (rc >= 0)
3862     + rc = -EIO;
3863     + } else {
3864     status = get_unaligned_le16(status_buf);
3865    
3866     dev_info(&port->serial->dev->dev, "read status %x %x\n",
3867     diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
3868     index 79451f7ef1b7..062c205f0046 100644
3869     --- a/drivers/usb/wusbcore/crypto.c
3870     +++ b/drivers/usb/wusbcore/crypto.c
3871     @@ -216,7 +216,6 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
3872     struct scatterlist sg[4], sg_dst;
3873     void *dst_buf;
3874     size_t dst_size;
3875     - const u8 bzero[16] = { 0 };
3876     u8 iv[crypto_skcipher_ivsize(tfm_cbc)];
3877     size_t zero_padding;
3878    
3879     @@ -261,7 +260,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
3880     sg_set_buf(&sg[1], &scratch->b1, sizeof(scratch->b1));
3881     sg_set_buf(&sg[2], b, blen);
3882     /* 0 if well behaved :) */
3883     - sg_set_buf(&sg[3], bzero, zero_padding);
3884     + sg_set_page(&sg[3], ZERO_PAGE(0), zero_padding, 0);
3885     sg_init_one(&sg_dst, dst_buf, dst_size);
3886    
3887     skcipher_request_set_tfm(req, tfm_cbc);
3888     diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
3889     index 6b5ee896af63..7cc51223db1c 100644
3890     --- a/drivers/vme/bridges/vme_ca91cx42.c
3891     +++ b/drivers/vme/bridges/vme_ca91cx42.c
3892     @@ -464,7 +464,7 @@ static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
3893     vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
3894     pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
3895    
3896     - *pci_base = (dma_addr_t)vme_base + pci_offset;
3897     + *pci_base = (dma_addr_t)*vme_base + pci_offset;
3898     *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
3899    
3900     *enabled = 0;
3901     diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
3902     index 63d197724519..ff0b0be92d61 100644
3903     --- a/fs/btrfs/async-thread.c
3904     +++ b/fs/btrfs/async-thread.c
3905     @@ -273,6 +273,8 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
3906     unsigned long flags;
3907    
3908     while (1) {
3909     + void *wtag;
3910     +
3911     spin_lock_irqsave(lock, flags);
3912     if (list_empty(list))
3913     break;
3914     @@ -299,11 +301,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
3915     spin_unlock_irqrestore(lock, flags);
3916    
3917     /*
3918     - * we don't want to call the ordered free functions
3919     - * with the lock held though
3920     + * We don't want to call the ordered free functions with the
3921     + * lock held though. Save the work as tag for the trace event,
3922     + * because the callback could free the structure.
3923     */
3924     + wtag = work;
3925     work->ordered_free(work);
3926     - trace_btrfs_all_work_done(work);
3927     + trace_btrfs_all_work_done(wq->fs_info, wtag);
3928     }
3929     spin_unlock_irqrestore(lock, flags);
3930     }
3931     @@ -311,6 +315,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
3932     static void normal_work_helper(struct btrfs_work *work)
3933     {
3934     struct __btrfs_workqueue *wq;
3935     + void *wtag;
3936     int need_order = 0;
3937    
3938     /*
3939     @@ -324,6 +329,8 @@ static void normal_work_helper(struct btrfs_work *work)
3940     if (work->ordered_func)
3941     need_order = 1;
3942     wq = work->wq;
3943     + /* Safe for tracepoints in case work gets freed by the callback */
3944     + wtag = work;
3945    
3946     trace_btrfs_work_sched(work);
3947     thresh_exec_hook(wq);
3948     @@ -333,7 +340,7 @@ static void normal_work_helper(struct btrfs_work *work)
3949     run_ordered_work(wq);
3950     }
3951     if (!need_order)
3952     - trace_btrfs_all_work_done(work);
3953     + trace_btrfs_all_work_done(wq->fs_info, wtag);
3954     }
3955    
3956     void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
3957     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
3958     index 4607af38c72e..5909ae8c6731 100644
3959     --- a/fs/btrfs/extent-tree.c
3960     +++ b/fs/btrfs/extent-tree.c
3961     @@ -2537,11 +2537,11 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
3962     if (ref && ref->seq &&
3963     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
3964     spin_unlock(&locked_ref->lock);
3965     - btrfs_delayed_ref_unlock(locked_ref);
3966     spin_lock(&delayed_refs->lock);
3967     locked_ref->processing = 0;
3968     delayed_refs->num_heads_ready++;
3969     spin_unlock(&delayed_refs->lock);
3970     + btrfs_delayed_ref_unlock(locked_ref);
3971     locked_ref = NULL;
3972     cond_resched();
3973     count++;
3974     @@ -2587,7 +2587,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
3975     */
3976     if (must_insert_reserved)
3977     locked_ref->must_insert_reserved = 1;
3978     + spin_lock(&delayed_refs->lock);
3979     locked_ref->processing = 0;
3980     + delayed_refs->num_heads_ready++;
3981     + spin_unlock(&delayed_refs->lock);
3982     btrfs_debug(fs_info,
3983     "run_delayed_extent_op returned %d",
3984     ret);
3985     diff --git a/fs/dcache.c b/fs/dcache.c
3986     index 5c7cc953ac81..4485a48f4091 100644
3987     --- a/fs/dcache.c
3988     +++ b/fs/dcache.c
3989     @@ -1330,8 +1330,11 @@ int d_set_mounted(struct dentry *dentry)
3990     }
3991     spin_lock(&dentry->d_lock);
3992     if (!d_unlinked(dentry)) {
3993     - dentry->d_flags |= DCACHE_MOUNTED;
3994     - ret = 0;
3995     + ret = -EBUSY;
3996     + if (!d_mountpoint(dentry)) {
3997     + dentry->d_flags |= DCACHE_MOUNTED;
3998     + ret = 0;
3999     + }
4000     }
4001     spin_unlock(&dentry->d_lock);
4002     out:
4003     diff --git a/fs/namespace.c b/fs/namespace.c
4004     index e6c234b1a645..7cea503ae06d 100644
4005     --- a/fs/namespace.c
4006     +++ b/fs/namespace.c
4007     @@ -746,26 +746,50 @@ static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
4008     return NULL;
4009     }
4010    
4011     -static struct mountpoint *new_mountpoint(struct dentry *dentry)
4012     +static struct mountpoint *get_mountpoint(struct dentry *dentry)
4013     {
4014     - struct hlist_head *chain = mp_hash(dentry);
4015     - struct mountpoint *mp;
4016     + struct mountpoint *mp, *new = NULL;
4017     int ret;
4018    
4019     - mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
4020     - if (!mp)
4021     + if (d_mountpoint(dentry)) {
4022     +mountpoint:
4023     + read_seqlock_excl(&mount_lock);
4024     + mp = lookup_mountpoint(dentry);
4025     + read_sequnlock_excl(&mount_lock);
4026     + if (mp)
4027     + goto done;
4028     + }
4029     +
4030     + if (!new)
4031     + new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
4032     + if (!new)
4033     return ERR_PTR(-ENOMEM);
4034    
4035     +
4036     + /* Exactly one processes may set d_mounted */
4037     ret = d_set_mounted(dentry);
4038     - if (ret) {
4039     - kfree(mp);
4040     - return ERR_PTR(ret);
4041     - }
4042    
4043     - mp->m_dentry = dentry;
4044     - mp->m_count = 1;
4045     - hlist_add_head(&mp->m_hash, chain);
4046     - INIT_HLIST_HEAD(&mp->m_list);
4047     + /* Someone else set d_mounted? */
4048     + if (ret == -EBUSY)
4049     + goto mountpoint;
4050     +
4051     + /* The dentry is not available as a mountpoint? */
4052     + mp = ERR_PTR(ret);
4053     + if (ret)
4054     + goto done;
4055     +
4056     + /* Add the new mountpoint to the hash table */
4057     + read_seqlock_excl(&mount_lock);
4058     + new->m_dentry = dentry;
4059     + new->m_count = 1;
4060     + hlist_add_head(&new->m_hash, mp_hash(dentry));
4061     + INIT_HLIST_HEAD(&new->m_list);
4062     + read_sequnlock_excl(&mount_lock);
4063     +
4064     + mp = new;
4065     + new = NULL;
4066     +done:
4067     + kfree(new);
4068     return mp;
4069     }
4070    
4071     @@ -1568,11 +1592,11 @@ void __detach_mounts(struct dentry *dentry)
4072     struct mount *mnt;
4073    
4074     namespace_lock();
4075     + lock_mount_hash();
4076     mp = lookup_mountpoint(dentry);
4077     if (IS_ERR_OR_NULL(mp))
4078     goto out_unlock;
4079    
4080     - lock_mount_hash();
4081     event++;
4082     while (!hlist_empty(&mp->m_list)) {
4083     mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
4084     @@ -1582,9 +1606,9 @@ void __detach_mounts(struct dentry *dentry)
4085     }
4086     else umount_tree(mnt, UMOUNT_CONNECTED);
4087     }
4088     - unlock_mount_hash();
4089     put_mountpoint(mp);
4090     out_unlock:
4091     + unlock_mount_hash();
4092     namespace_unlock();
4093     }
4094    
4095     @@ -2013,9 +2037,7 @@ static struct mountpoint *lock_mount(struct path *path)
4096     namespace_lock();
4097     mnt = lookup_mnt(path);
4098     if (likely(!mnt)) {
4099     - struct mountpoint *mp = lookup_mountpoint(dentry);
4100     - if (!mp)
4101     - mp = new_mountpoint(dentry);
4102     + struct mountpoint *mp = get_mountpoint(dentry);
4103     if (IS_ERR(mp)) {
4104     namespace_unlock();
4105     inode_unlock(dentry->d_inode);
4106     @@ -2034,7 +2056,11 @@ static struct mountpoint *lock_mount(struct path *path)
4107     static void unlock_mount(struct mountpoint *where)
4108     {
4109     struct dentry *dentry = where->m_dentry;
4110     +
4111     + read_seqlock_excl(&mount_lock);
4112     put_mountpoint(where);
4113     + read_sequnlock_excl(&mount_lock);
4114     +
4115     namespace_unlock();
4116     inode_unlock(dentry->d_inode);
4117     }
4118     @@ -3110,9 +3136,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
4119     touch_mnt_namespace(current->nsproxy->mnt_ns);
4120     /* A moved mount should not expire automatically */
4121     list_del_init(&new_mnt->mnt_expire);
4122     + put_mountpoint(root_mp);
4123     unlock_mount_hash();
4124     chroot_fs_refs(&root, &new);
4125     - put_mountpoint(root_mp);
4126     error = 0;
4127     out4:
4128     unlock_mount(old_mp);
4129     diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
4130     index 5f1af4cd1a33..53e02b8bd9bd 100644
4131     --- a/fs/nfs/dir.c
4132     +++ b/fs/nfs/dir.c
4133     @@ -477,7 +477,7 @@ void nfs_force_use_readdirplus(struct inode *dir)
4134     {
4135     if (!list_empty(&NFS_I(dir)->open_files)) {
4136     nfs_advise_use_readdirplus(dir);
4137     - nfs_zap_mapping(dir, dir->i_mapping);
4138     + invalidate_mapping_pages(dir->i_mapping, 0, -1);
4139     }
4140     }
4141    
4142     @@ -886,17 +886,6 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc)
4143     goto out;
4144     }
4145    
4146     -static bool nfs_dir_mapping_need_revalidate(struct inode *dir)
4147     -{
4148     - struct nfs_inode *nfsi = NFS_I(dir);
4149     -
4150     - if (nfs_attribute_cache_expired(dir))
4151     - return true;
4152     - if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
4153     - return true;
4154     - return false;
4155     -}
4156     -
4157     /* The file offset position represents the dirent entry number. A
4158     last cookie cache takes care of the common case of reading the
4159     whole directory.
4160     @@ -928,7 +917,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
4161     desc->decode = NFS_PROTO(inode)->decode_dirent;
4162     desc->plus = nfs_use_readdirplus(inode, ctx) ? 1 : 0;
4163    
4164     - if (ctx->pos == 0 || nfs_dir_mapping_need_revalidate(inode))
4165     + if (ctx->pos == 0 || nfs_attribute_cache_expired(inode))
4166     res = nfs_revalidate_mapping(inode, file->f_mapping);
4167     if (res < 0)
4168     goto out;
4169     diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c
4170     index 4946ef40ba87..85ef38f9765f 100644
4171     --- a/fs/nfs/filelayout/filelayoutdev.c
4172     +++ b/fs/nfs/filelayout/filelayoutdev.c
4173     @@ -283,7 +283,8 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
4174     s->nfs_client->cl_rpcclient->cl_auth->au_flavor);
4175    
4176     out_test_devid:
4177     - if (filelayout_test_devid_unavailable(devid))
4178     + if (ret->ds_clp == NULL ||
4179     + filelayout_test_devid_unavailable(devid))
4180     ret = NULL;
4181     out:
4182     return ret;
4183     diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
4184     index 31b107e196fd..415d7e69bc5e 100644
4185     --- a/fs/nfs/pnfs.c
4186     +++ b/fs/nfs/pnfs.c
4187     @@ -1257,13 +1257,11 @@ bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
4188     * i_lock */
4189     spin_lock(&ino->i_lock);
4190     lo = nfsi->layout;
4191     - if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
4192     + if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
4193     + rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
4194     sleep = true;
4195     + }
4196     spin_unlock(&ino->i_lock);
4197     -
4198     - if (sleep)
4199     - rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
4200     -
4201     return sleep;
4202     }
4203    
4204     diff --git a/fs/nfs/super.c b/fs/nfs/super.c
4205     index 001796bcd6c8..ddce94ce8142 100644
4206     --- a/fs/nfs/super.c
4207     +++ b/fs/nfs/super.c
4208     @@ -2904,7 +2904,7 @@ module_param(max_session_slots, ushort, 0644);
4209     MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "
4210     "requests the client will negotiate");
4211     module_param(max_session_cb_slots, ushort, 0644);
4212     -MODULE_PARM_DESC(max_session_slots, "Maximum number of parallel NFSv4.1 "
4213     +MODULE_PARM_DESC(max_session_cb_slots, "Maximum number of parallel NFSv4.1 "
4214     "callbacks the client will process for a given server");
4215     module_param(send_implementation_id, ushort, 0644);
4216     MODULE_PARM_DESC(send_implementation_id,
4217     diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
4218     index 83d576f6a287..77d1632e905d 100644
4219     --- a/fs/ocfs2/dlmglue.c
4220     +++ b/fs/ocfs2/dlmglue.c
4221     @@ -3303,6 +3303,16 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
4222     mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
4223     lockres->l_level, new_level);
4224    
4225     + /*
4226     + * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always
4227     + * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that
4228     + * we can recover correctly from node failure. Otherwise, we may get
4229     + * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
4230     + */
4231     + if (!ocfs2_is_o2cb_active() &&
4232     + lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
4233     + lvb = 1;
4234     +
4235     if (lvb)
4236     dlm_flags |= DLM_LKF_VALBLK;
4237    
4238     diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
4239     index 52c07346bea3..820359096c7a 100644
4240     --- a/fs/ocfs2/stackglue.c
4241     +++ b/fs/ocfs2/stackglue.c
4242     @@ -48,6 +48,12 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
4243     */
4244     static struct ocfs2_stack_plugin *active_stack;
4245    
4246     +inline int ocfs2_is_o2cb_active(void)
4247     +{
4248     + return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB);
4249     +}
4250     +EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active);
4251     +
4252     static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
4253     {
4254     struct ocfs2_stack_plugin *p;
4255     diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
4256     index f2dce10fae54..e3036e1790e8 100644
4257     --- a/fs/ocfs2/stackglue.h
4258     +++ b/fs/ocfs2/stackglue.h
4259     @@ -298,6 +298,9 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p
4260     int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
4261     void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
4262    
4263     +/* In ocfs2_downconvert_lock(), we need to know which stack we are using */
4264     +int ocfs2_is_o2cb_active(void);
4265     +
4266     extern struct kset *ocfs2_kset;
4267    
4268     #endif /* STACKGLUE_H */
4269     diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
4270     index 55313d994895..d4e37acd4821 100644
4271     --- a/fs/proc/proc_sysctl.c
4272     +++ b/fs/proc/proc_sysctl.c
4273     @@ -709,7 +709,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
4274     ctl_dir = container_of(head, struct ctl_dir, header);
4275    
4276     if (!dir_emit_dots(file, ctx))
4277     - return 0;
4278     + goto out;
4279    
4280     pos = 2;
4281    
4282     @@ -719,6 +719,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
4283     break;
4284     }
4285     }
4286     +out:
4287     sysctl_head_finish(head);
4288     return 0;
4289     }
4290     diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
4291     index 2693ba84ec25..06763f5cc701 100644
4292     --- a/fs/xfs/xfs_aops.c
4293     +++ b/fs/xfs/xfs_aops.c
4294     @@ -1158,19 +1158,22 @@ xfs_vm_releasepage(
4295     * block_invalidatepage() can send pages that are still marked dirty
4296     * but otherwise have invalidated buffers.
4297     *
4298     - * We've historically freed buffers on the latter. Instead, quietly
4299     - * filter out all dirty pages to avoid spurious buffer state warnings.
4300     - * This can likely be removed once shrink_active_list() is fixed.
4301     + * We want to release the latter to avoid unnecessary buildup of the
4302     + * LRU, skip the former and warn if we've left any lingering
4303     + * delalloc/unwritten buffers on clean pages. Skip pages with delalloc
4304     + * or unwritten buffers and warn if the page is not dirty. Otherwise
4305     + * try to release the buffers.
4306     */
4307     - if (PageDirty(page))
4308     - return 0;
4309     -
4310     xfs_count_page_state(page, &delalloc, &unwritten);
4311    
4312     - if (WARN_ON_ONCE(delalloc))
4313     + if (delalloc) {
4314     + WARN_ON_ONCE(!PageDirty(page));
4315     return 0;
4316     - if (WARN_ON_ONCE(unwritten))
4317     + }
4318     + if (unwritten) {
4319     + WARN_ON_ONCE(!PageDirty(page));
4320     return 0;
4321     + }
4322    
4323     return try_to_free_buffers(page);
4324     }
4325     diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
4326     index c47c358ba052..f6a816129856 100644
4327     --- a/include/linux/blkdev.h
4328     +++ b/include/linux/blkdev.h
4329     @@ -1057,7 +1057,7 @@ static inline int blk_pre_runtime_suspend(struct request_queue *q)
4330     static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
4331     static inline void blk_pre_runtime_resume(struct request_queue *q) {}
4332     static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
4333     -extern inline void blk_set_runtime_active(struct request_queue *q) {}
4334     +static inline void blk_set_runtime_active(struct request_queue *q) {}
4335     #endif
4336    
4337     /*
4338     diff --git a/include/linux/efi.h b/include/linux/efi.h
4339     index 2d089487d2da..cba7177cbec7 100644
4340     --- a/include/linux/efi.h
4341     +++ b/include/linux/efi.h
4342     @@ -103,6 +103,7 @@ typedef struct {
4343    
4344     #define EFI_PAGE_SHIFT 12
4345     #define EFI_PAGE_SIZE (1UL << EFI_PAGE_SHIFT)
4346     +#define EFI_PAGES_MAX (U64_MAX >> EFI_PAGE_SHIFT)
4347    
4348     typedef struct {
4349     u32 type;
4350     @@ -930,6 +931,7 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
4351     #endif
4352     extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
4353    
4354     +extern phys_addr_t __init efi_memmap_alloc(unsigned int num_entries);
4355     extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
4356     extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
4357     extern void __init efi_memmap_unmap(void);
4358     diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
4359     index 089f70f83e97..23da3af459fe 100644
4360     --- a/include/linux/jump_label_ratelimit.h
4361     +++ b/include/linux/jump_label_ratelimit.h
4362     @@ -14,6 +14,7 @@ struct static_key_deferred {
4363    
4364     #ifdef HAVE_JUMP_LABEL
4365     extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
4366     +extern void static_key_deferred_flush(struct static_key_deferred *key);
4367     extern void
4368     jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
4369    
4370     @@ -26,6 +27,10 @@ static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
4371     STATIC_KEY_CHECK_USE();
4372     static_key_slow_dec(&key->key);
4373     }
4374     +static inline void static_key_deferred_flush(struct static_key_deferred *key)
4375     +{
4376     + STATIC_KEY_CHECK_USE();
4377     +}
4378     static inline void
4379     jump_label_rate_limit(struct static_key_deferred *key,
4380     unsigned long rl)
4381     diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
4382     index 61d20c17f3b7..254698856b8f 100644
4383     --- a/include/linux/memcontrol.h
4384     +++ b/include/linux/memcontrol.h
4385     @@ -120,7 +120,7 @@ struct mem_cgroup_reclaim_iter {
4386     */
4387     struct mem_cgroup_per_node {
4388     struct lruvec lruvec;
4389     - unsigned long lru_size[NR_LRU_LISTS];
4390     + unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
4391    
4392     struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
4393    
4394     @@ -432,7 +432,7 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
4395     int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
4396    
4397     void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
4398     - int nr_pages);
4399     + int zid, int nr_pages);
4400    
4401     unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
4402     int nid, unsigned int lru_mask);
4403     @@ -441,9 +441,23 @@ static inline
4404     unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
4405     {
4406     struct mem_cgroup_per_node *mz;
4407     + unsigned long nr_pages = 0;
4408     + int zid;
4409    
4410     mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
4411     - return mz->lru_size[lru];
4412     + for (zid = 0; zid < MAX_NR_ZONES; zid++)
4413     + nr_pages += mz->lru_zone_size[zid][lru];
4414     + return nr_pages;
4415     +}
4416     +
4417     +static inline
4418     +unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
4419     + enum lru_list lru, int zone_idx)
4420     +{
4421     + struct mem_cgroup_per_node *mz;
4422     +
4423     + mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
4424     + return mz->lru_zone_size[zone_idx][lru];
4425     }
4426    
4427     void mem_cgroup_handle_over_high(void);
4428     @@ -671,6 +685,12 @@ mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
4429     {
4430     return 0;
4431     }
4432     +static inline
4433     +unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
4434     + enum lru_list lru, int zone_idx)
4435     +{
4436     + return 0;
4437     +}
4438    
4439     static inline unsigned long
4440     mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
4441     diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
4442     index 71613e8a720f..41d376e7116d 100644
4443     --- a/include/linux/mm_inline.h
4444     +++ b/include/linux/mm_inline.h
4445     @@ -39,7 +39,7 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
4446     {
4447     __update_lru_size(lruvec, lru, zid, nr_pages);
4448     #ifdef CONFIG_MEMCG
4449     - mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
4450     + mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
4451     #endif
4452     }
4453    
4454     diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
4455     index e30deb046156..bed9557b69e7 100644
4456     --- a/include/linux/power/bq27xxx_battery.h
4457     +++ b/include/linux/power/bq27xxx_battery.h
4458     @@ -4,7 +4,8 @@
4459     enum bq27xxx_chip {
4460     BQ27000 = 1, /* bq27000, bq27200 */
4461     BQ27010, /* bq27010, bq27210 */
4462     - BQ27500, /* bq27500, bq27510, bq27520 */
4463     + BQ27500, /* bq27500 */
4464     + BQ27510, /* bq27510, bq27520 */
4465     BQ27530, /* bq27530, bq27531 */
4466     BQ27541, /* bq27541, bq27542, bq27546, bq27742 */
4467     BQ27545, /* bq27545 */
4468     diff --git a/include/linux/swap.h b/include/linux/swap.h
4469     index a56523cefb9b..55ff5593c193 100644
4470     --- a/include/linux/swap.h
4471     +++ b/include/linux/swap.h
4472     @@ -150,8 +150,9 @@ enum {
4473     SWP_FILE = (1 << 7), /* set after swap_activate success */
4474     SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */
4475     SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */
4476     + SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */
4477     /* add others here before... */
4478     - SWP_SCANNING = (1 << 10), /* refcount in scan_swap_map */
4479     + SWP_SCANNING = (1 << 11), /* refcount in scan_swap_map */
4480     };
4481    
4482     #define SWAP_CLUSTER_MAX 32UL
4483     diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h
4484     index 530c57bdefa0..915c4357945c 100644
4485     --- a/include/sound/hdmi-codec.h
4486     +++ b/include/sound/hdmi-codec.h
4487     @@ -36,10 +36,10 @@ struct hdmi_codec_daifmt {
4488     HDMI_AC97,
4489     HDMI_SPDIF,
4490     } fmt;
4491     - int bit_clk_inv:1;
4492     - int frame_clk_inv:1;
4493     - int bit_clk_master:1;
4494     - int frame_clk_master:1;
4495     + unsigned int bit_clk_inv:1;
4496     + unsigned int frame_clk_inv:1;
4497     + unsigned int bit_clk_master:1;
4498     + unsigned int frame_clk_master:1;
4499     };
4500    
4501     /*
4502     diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
4503     index e030d6f6c19a..6d7fe1169956 100644
4504     --- a/include/trace/events/btrfs.h
4505     +++ b/include/trace/events/btrfs.h
4506     @@ -1162,22 +1162,26 @@ DECLARE_EVENT_CLASS(btrfs__work,
4507     __entry->func, __entry->ordered_func, __entry->ordered_free)
4508     );
4509    
4510     -/* For situiations that the work is freed */
4511     +/*
4512     + * For situiations when the work is freed, we pass fs_info and a tag that that
4513     + * matches address of the work structure so it can be paired with the
4514     + * scheduling event.
4515     + */
4516     DECLARE_EVENT_CLASS(btrfs__work__done,
4517    
4518     - TP_PROTO(struct btrfs_work *work),
4519     + TP_PROTO(struct btrfs_fs_info *fs_info, void *wtag),
4520    
4521     - TP_ARGS(work),
4522     + TP_ARGS(fs_info, wtag),
4523    
4524     TP_STRUCT__entry_btrfs(
4525     - __field( void *, work )
4526     + __field( void *, wtag )
4527     ),
4528    
4529     - TP_fast_assign_btrfs(btrfs_work_owner(work),
4530     - __entry->work = work;
4531     + TP_fast_assign_btrfs(fs_info,
4532     + __entry->wtag = wtag;
4533     ),
4534    
4535     - TP_printk_btrfs("work->%p", __entry->work)
4536     + TP_printk_btrfs("work->%p", __entry->wtag)
4537     );
4538    
4539     DEFINE_EVENT(btrfs__work, btrfs_work_queued,
4540     @@ -1196,9 +1200,9 @@ DEFINE_EVENT(btrfs__work, btrfs_work_sched,
4541    
4542     DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done,
4543    
4544     - TP_PROTO(struct btrfs_work *work),
4545     + TP_PROTO(struct btrfs_fs_info *fs_info, void *wtag),
4546    
4547     - TP_ARGS(work)
4548     + TP_ARGS(fs_info, wtag)
4549     );
4550    
4551     DEFINE_EVENT(btrfs__work, btrfs_ordered_sched,
4552     diff --git a/kernel/jump_label.c b/kernel/jump_label.c
4553     index 93ad6c1fb9b6..a9b8cf500591 100644
4554     --- a/kernel/jump_label.c
4555     +++ b/kernel/jump_label.c
4556     @@ -182,6 +182,13 @@ void static_key_slow_dec_deferred(struct static_key_deferred *key)
4557     }
4558     EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
4559    
4560     +void static_key_deferred_flush(struct static_key_deferred *key)
4561     +{
4562     + STATIC_KEY_CHECK_USE();
4563     + flush_delayed_work(&key->work);
4564     +}
4565     +EXPORT_SYMBOL_GPL(static_key_deferred_flush);
4566     +
4567     void jump_label_rate_limit(struct static_key_deferred *key,
4568     unsigned long rl)
4569     {
4570     diff --git a/kernel/memremap.c b/kernel/memremap.c
4571     index b501e390bb34..9ecedc28b928 100644
4572     --- a/kernel/memremap.c
4573     +++ b/kernel/memremap.c
4574     @@ -246,7 +246,9 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
4575     /* pages are dead and unused, undo the arch mapping */
4576     align_start = res->start & ~(SECTION_SIZE - 1);
4577     align_size = ALIGN(resource_size(res), SECTION_SIZE);
4578     + mem_hotplug_begin();
4579     arch_remove_memory(align_start, align_size);
4580     + mem_hotplug_done();
4581     untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
4582     pgmap_radix_release(res);
4583     dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
4584     @@ -358,7 +360,9 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
4585     if (error)
4586     goto err_pfn_remap;
4587    
4588     + mem_hotplug_begin();
4589     error = arch_add_memory(nid, align_start, align_size, true);
4590     + mem_hotplug_done();
4591     if (error)
4592     goto err_add_memory;
4593    
4594     diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
4595     index df9e8e9e0be7..eef2ce968636 100644
4596     --- a/kernel/pid_namespace.c
4597     +++ b/kernel/pid_namespace.c
4598     @@ -151,8 +151,12 @@ static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns
4599    
4600     static void delayed_free_pidns(struct rcu_head *p)
4601     {
4602     - kmem_cache_free(pid_ns_cachep,
4603     - container_of(p, struct pid_namespace, rcu));
4604     + struct pid_namespace *ns = container_of(p, struct pid_namespace, rcu);
4605     +
4606     + dec_pid_namespaces(ns->ucounts);
4607     + put_user_ns(ns->user_ns);
4608     +
4609     + kmem_cache_free(pid_ns_cachep, ns);
4610     }
4611    
4612     static void destroy_pid_namespace(struct pid_namespace *ns)
4613     @@ -162,8 +166,6 @@ static void destroy_pid_namespace(struct pid_namespace *ns)
4614     ns_free_inum(&ns->ns);
4615     for (i = 0; i < PIDMAP_ENTRIES; i++)
4616     kfree(ns->pidmap[i].page);
4617     - dec_pid_namespaces(ns->ucounts);
4618     - put_user_ns(ns->user_ns);
4619     call_rcu(&ns->rcu, delayed_free_pidns);
4620     }
4621    
4622     diff --git a/lib/iov_iter.c b/lib/iov_iter.c
4623     index f2bd21b93dfc..efb0b4d267a1 100644
4624     --- a/lib/iov_iter.c
4625     +++ b/lib/iov_iter.c
4626     @@ -678,43 +678,50 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
4627     }
4628     EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
4629    
4630     +static inline void pipe_truncate(struct iov_iter *i)
4631     +{
4632     + struct pipe_inode_info *pipe = i->pipe;
4633     + if (pipe->nrbufs) {
4634     + size_t off = i->iov_offset;
4635     + int idx = i->idx;
4636     + int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
4637     + if (off) {
4638     + pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
4639     + idx = next_idx(idx, pipe);
4640     + nrbufs++;
4641     + }
4642     + while (pipe->nrbufs > nrbufs) {
4643     + pipe_buf_release(pipe, &pipe->bufs[idx]);
4644     + idx = next_idx(idx, pipe);
4645     + pipe->nrbufs--;
4646     + }
4647     + }
4648     +}
4649     +
4650     static void pipe_advance(struct iov_iter *i, size_t size)
4651     {
4652     struct pipe_inode_info *pipe = i->pipe;
4653     - struct pipe_buffer *buf;
4654     - int idx = i->idx;
4655     - size_t off = i->iov_offset, orig_sz;
4656     -
4657     if (unlikely(i->count < size))
4658     size = i->count;
4659     - orig_sz = size;
4660     -
4661     if (size) {
4662     + struct pipe_buffer *buf;
4663     + size_t off = i->iov_offset, left = size;
4664     + int idx = i->idx;
4665     if (off) /* make it relative to the beginning of buffer */
4666     - size += off - pipe->bufs[idx].offset;
4667     + left += off - pipe->bufs[idx].offset;
4668     while (1) {
4669     buf = &pipe->bufs[idx];
4670     - if (size <= buf->len)
4671     + if (left <= buf->len)
4672     break;
4673     - size -= buf->len;
4674     + left -= buf->len;
4675     idx = next_idx(idx, pipe);
4676     }
4677     - buf->len = size;
4678     i->idx = idx;
4679     - off = i->iov_offset = buf->offset + size;
4680     - }
4681     - if (off)
4682     - idx = next_idx(idx, pipe);
4683     - if (pipe->nrbufs) {
4684     - int unused = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
4685     - /* [curbuf,unused) is in use. Free [idx,unused) */
4686     - while (idx != unused) {
4687     - pipe_buf_release(pipe, &pipe->bufs[idx]);
4688     - idx = next_idx(idx, pipe);
4689     - pipe->nrbufs--;
4690     - }
4691     + i->iov_offset = buf->offset + left;
4692     }
4693     - i->count -= orig_sz;
4694     + i->count -= size;
4695     + /* ... and discard everything past that point */
4696     + pipe_truncate(i);
4697     }
4698    
4699     void iov_iter_advance(struct iov_iter *i, size_t size)
4700     @@ -774,6 +781,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
4701     size_t count)
4702     {
4703     BUG_ON(direction != ITER_PIPE);
4704     + WARN_ON(pipe->nrbufs == pipe->buffers);
4705     i->type = direction;
4706     i->pipe = pipe;
4707     i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
4708     diff --git a/mm/filemap.c b/mm/filemap.c
4709     index 9a50acecc473..779801092ef1 100644
4710     --- a/mm/filemap.c
4711     +++ b/mm/filemap.c
4712     @@ -144,7 +144,7 @@ static int page_cache_tree_insert(struct address_space *mapping,
4713     workingset_node_pages_dec(node);
4714     /* Wakeup waiters for exceptional entry lock */
4715     dax_wake_mapping_entry_waiter(mapping, page->index,
4716     - false);
4717     + true);
4718     }
4719     }
4720     radix_tree_replace_slot(slot, page);
4721     diff --git a/mm/huge_memory.c b/mm/huge_memory.c
4722     index d4a6e4001512..8ca40b70beae 100644
4723     --- a/mm/huge_memory.c
4724     +++ b/mm/huge_memory.c
4725     @@ -872,15 +872,17 @@ void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd)
4726     {
4727     pmd_t entry;
4728     unsigned long haddr;
4729     + bool write = fe->flags & FAULT_FLAG_WRITE;
4730    
4731     fe->ptl = pmd_lock(fe->vma->vm_mm, fe->pmd);
4732     if (unlikely(!pmd_same(*fe->pmd, orig_pmd)))
4733     goto unlock;
4734    
4735     entry = pmd_mkyoung(orig_pmd);
4736     + if (write)
4737     + entry = pmd_mkdirty(entry);
4738     haddr = fe->address & HPAGE_PMD_MASK;
4739     - if (pmdp_set_access_flags(fe->vma, haddr, fe->pmd, entry,
4740     - fe->flags & FAULT_FLAG_WRITE))
4741     + if (pmdp_set_access_flags(fe->vma, haddr, fe->pmd, entry, write))
4742     update_mmu_cache_pmd(fe->vma, fe->address, fe->pmd);
4743    
4744     unlock:
4745     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
4746     index 23aec01836aa..b6adedbafaf5 100644
4747     --- a/mm/hugetlb.c
4748     +++ b/mm/hugetlb.c
4749     @@ -1773,23 +1773,32 @@ static int gather_surplus_pages(struct hstate *h, int delta)
4750     }
4751    
4752     /*
4753     - * When releasing a hugetlb pool reservation, any surplus pages that were
4754     - * allocated to satisfy the reservation must be explicitly freed if they were
4755     - * never used.
4756     - * Called with hugetlb_lock held.
4757     + * This routine has two main purposes:
4758     + * 1) Decrement the reservation count (resv_huge_pages) by the value passed
4759     + * in unused_resv_pages. This corresponds to the prior adjustments made
4760     + * to the associated reservation map.
4761     + * 2) Free any unused surplus pages that may have been allocated to satisfy
4762     + * the reservation. As many as unused_resv_pages may be freed.
4763     + *
4764     + * Called with hugetlb_lock held. However, the lock could be dropped (and
4765     + * reacquired) during calls to cond_resched_lock. Whenever dropping the lock,
4766     + * we must make sure nobody else can claim pages we are in the process of
4767     + * freeing. Do this by ensuring resv_huge_page always is greater than the
4768     + * number of huge pages we plan to free when dropping the lock.
4769     */
4770     static void return_unused_surplus_pages(struct hstate *h,
4771     unsigned long unused_resv_pages)
4772     {
4773     unsigned long nr_pages;
4774    
4775     - /* Uncommit the reservation */
4776     - h->resv_huge_pages -= unused_resv_pages;
4777     -
4778     /* Cannot return gigantic pages currently */
4779     if (hstate_is_gigantic(h))
4780     - return;
4781     + goto out;
4782    
4783     + /*
4784     + * Part (or even all) of the reservation could have been backed
4785     + * by pre-allocated pages. Only free surplus pages.
4786     + */
4787     nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
4788    
4789     /*
4790     @@ -1799,12 +1808,22 @@ static void return_unused_surplus_pages(struct hstate *h,
4791     * when the nodes with surplus pages have no free pages.
4792     * free_pool_huge_page() will balance the the freed pages across the
4793     * on-line nodes with memory and will handle the hstate accounting.
4794     + *
4795     + * Note that we decrement resv_huge_pages as we free the pages. If
4796     + * we drop the lock, resv_huge_pages will still be sufficiently large
4797     + * to cover subsequent pages we may free.
4798     */
4799     while (nr_pages--) {
4800     + h->resv_huge_pages--;
4801     + unused_resv_pages--;
4802     if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
4803     - break;
4804     + goto out;
4805     cond_resched_lock(&hugetlb_lock);
4806     }
4807     +
4808     +out:
4809     + /* Fully uncommit the reservation */
4810     + h->resv_huge_pages -= unused_resv_pages;
4811     }
4812    
4813    
4814     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4815     index 0f870ba43942..d536a9daa511 100644
4816     --- a/mm/memcontrol.c
4817     +++ b/mm/memcontrol.c
4818     @@ -625,8 +625,8 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
4819     unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
4820     int nid, unsigned int lru_mask)
4821     {
4822     + struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
4823     unsigned long nr = 0;
4824     - struct mem_cgroup_per_node *mz;
4825     enum lru_list lru;
4826    
4827     VM_BUG_ON((unsigned)nid >= nr_node_ids);
4828     @@ -634,8 +634,7 @@ unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
4829     for_each_lru(lru) {
4830     if (!(BIT(lru) & lru_mask))
4831     continue;
4832     - mz = mem_cgroup_nodeinfo(memcg, nid);
4833     - nr += mz->lru_size[lru];
4834     + nr += mem_cgroup_get_lru_size(lruvec, lru);
4835     }
4836     return nr;
4837     }
4838     @@ -1002,6 +1001,7 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgd
4839     * mem_cgroup_update_lru_size - account for adding or removing an lru page
4840     * @lruvec: mem_cgroup per zone lru vector
4841     * @lru: index of lru list the page is sitting on
4842     + * @zid: zone id of the accounted pages
4843     * @nr_pages: positive when adding or negative when removing
4844     *
4845     * This function must be called under lru_lock, just before a page is added
4846     @@ -1009,27 +1009,25 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgd
4847     * so as to allow it to check that lru_size 0 is consistent with list_empty).
4848     */
4849     void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
4850     - int nr_pages)
4851     + int zid, int nr_pages)
4852     {
4853     struct mem_cgroup_per_node *mz;
4854     unsigned long *lru_size;
4855     long size;
4856     - bool empty;
4857    
4858     if (mem_cgroup_disabled())
4859     return;
4860    
4861     mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
4862     - lru_size = mz->lru_size + lru;
4863     - empty = list_empty(lruvec->lists + lru);
4864     + lru_size = &mz->lru_zone_size[zid][lru];
4865    
4866     if (nr_pages < 0)
4867     *lru_size += nr_pages;
4868    
4869     size = *lru_size;
4870     - if (WARN_ONCE(size < 0 || empty != !size,
4871     - "%s(%p, %d, %d): lru_size %ld but %sempty\n",
4872     - __func__, lruvec, lru, nr_pages, size, empty ? "" : "not ")) {
4873     + if (WARN_ONCE(size < 0,
4874     + "%s(%p, %d, %d): lru_size %ld\n",
4875     + __func__, lruvec, lru, nr_pages, size)) {
4876     VM_BUG_ON(1);
4877     *lru_size = 0;
4878     }
4879     diff --git a/mm/slab.c b/mm/slab.c
4880     index 0b0550ca85b4..bd878f051a3b 100644
4881     --- a/mm/slab.c
4882     +++ b/mm/slab.c
4883     @@ -2475,7 +2475,6 @@ union freelist_init_state {
4884     unsigned int pos;
4885     unsigned int *list;
4886     unsigned int count;
4887     - unsigned int rand;
4888     };
4889     struct rnd_state rnd_state;
4890     };
4891     @@ -2501,8 +2500,7 @@ static bool freelist_state_initialize(union freelist_init_state *state,
4892     } else {
4893     state->list = cachep->random_seq;
4894     state->count = count;
4895     - state->pos = 0;
4896     - state->rand = rand;
4897     + state->pos = rand % count;
4898     ret = true;
4899     }
4900     return ret;
4901     @@ -2511,7 +2509,9 @@ static bool freelist_state_initialize(union freelist_init_state *state,
4902     /* Get the next entry on the list and randomize it using a random shift */
4903     static freelist_idx_t next_random_slot(union freelist_init_state *state)
4904     {
4905     - return (state->list[state->pos++] + state->rand) % state->count;
4906     + if (state->pos >= state->count)
4907     + state->pos = 0;
4908     + return state->list[state->pos++];
4909     }
4910    
4911     /* Swap two freelist entries */
4912     diff --git a/mm/swapfile.c b/mm/swapfile.c
4913     index f30438970cd1..d76b2a18f044 100644
4914     --- a/mm/swapfile.c
4915     +++ b/mm/swapfile.c
4916     @@ -943,11 +943,25 @@ bool reuse_swap_page(struct page *page, int *total_mapcount)
4917     count = page_trans_huge_mapcount(page, total_mapcount);
4918     if (count <= 1 && PageSwapCache(page)) {
4919     count += page_swapcount(page);
4920     - if (count == 1 && !PageWriteback(page)) {
4921     + if (count != 1)
4922     + goto out;
4923     + if (!PageWriteback(page)) {
4924     delete_from_swap_cache(page);
4925     SetPageDirty(page);
4926     + } else {
4927     + swp_entry_t entry;
4928     + struct swap_info_struct *p;
4929     +
4930     + entry.val = page_private(page);
4931     + p = swap_info_get(entry);
4932     + if (p->flags & SWP_STABLE_WRITES) {
4933     + spin_unlock(&p->lock);
4934     + return false;
4935     + }
4936     + spin_unlock(&p->lock);
4937     }
4938     }
4939     +out:
4940     return count <= 1;
4941     }
4942    
4943     @@ -2449,6 +2463,10 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
4944     error = -ENOMEM;
4945     goto bad_swap;
4946     }
4947     +
4948     + if (bdi_cap_stable_pages_required(inode_to_bdi(inode)))
4949     + p->flags |= SWP_STABLE_WRITES;
4950     +
4951     if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
4952     int cpu;
4953    
4954     diff --git a/mm/vmscan.c b/mm/vmscan.c
4955     index c4abf08861d2..fa30010a5277 100644
4956     --- a/mm/vmscan.c
4957     +++ b/mm/vmscan.c
4958     @@ -242,6 +242,16 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru)
4959     return node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
4960     }
4961    
4962     +unsigned long lruvec_zone_lru_size(struct lruvec *lruvec, enum lru_list lru,
4963     + int zone_idx)
4964     +{
4965     + if (!mem_cgroup_disabled())
4966     + return mem_cgroup_get_zone_lru_size(lruvec, lru, zone_idx);
4967     +
4968     + return zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zone_idx],
4969     + NR_ZONE_LRU_BASE + lru);
4970     +}
4971     +
4972     /*
4973     * Add a shrinker callback to be called from the vm.
4974     */
4975     @@ -1382,8 +1392,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
4976     * be complete before mem_cgroup_update_lru_size due to a santity check.
4977     */
4978     static __always_inline void update_lru_sizes(struct lruvec *lruvec,
4979     - enum lru_list lru, unsigned long *nr_zone_taken,
4980     - unsigned long nr_taken)
4981     + enum lru_list lru, unsigned long *nr_zone_taken)
4982     {
4983     int zid;
4984    
4985     @@ -1392,11 +1401,11 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
4986     continue;
4987    
4988     __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
4989     - }
4990     -
4991     #ifdef CONFIG_MEMCG
4992     - mem_cgroup_update_lru_size(lruvec, lru, -nr_taken);
4993     + mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
4994     #endif
4995     + }
4996     +
4997     }
4998    
4999     /*
5000     @@ -1501,7 +1510,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
5001     *nr_scanned = scan;
5002     trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, scan,
5003     nr_taken, mode, is_file_lru(lru));
5004     - update_lru_sizes(lruvec, lru, nr_zone_taken, nr_taken);
5005     + update_lru_sizes(lruvec, lru, nr_zone_taken);
5006     return nr_taken;
5007     }
5008    
5009     @@ -2047,10 +2056,8 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
5010     if (!managed_zone(zone))
5011     continue;
5012    
5013     - inactive_zone = zone_page_state(zone,
5014     - NR_ZONE_LRU_BASE + (file * LRU_FILE));
5015     - active_zone = zone_page_state(zone,
5016     - NR_ZONE_LRU_BASE + (file * LRU_FILE) + LRU_ACTIVE);
5017     + inactive_zone = lruvec_zone_lru_size(lruvec, file * LRU_FILE, zid);
5018     + active_zone = lruvec_zone_lru_size(lruvec, (file * LRU_FILE) + LRU_ACTIVE, zid);
5019    
5020     inactive -= min(inactive, inactive_zone);
5021     active -= min(active, active_zone);
5022     diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
5023     index 2fe9345c1407..7fbdbae58e65 100644
5024     --- a/net/bridge/br_netfilter_hooks.c
5025     +++ b/net/bridge/br_netfilter_hooks.c
5026     @@ -399,7 +399,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_
5027     br_nf_hook_thresh(NF_BR_PRE_ROUTING,
5028     net, sk, skb, skb->dev,
5029     NULL,
5030     - br_nf_pre_routing_finish);
5031     + br_nf_pre_routing_finish_bridge);
5032     return 0;
5033     }
5034     ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
5035     diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
5036     index 02b45a8e8b35..91cbbf1c3f82 100644
5037     --- a/net/iucv/af_iucv.c
5038     +++ b/net/iucv/af_iucv.c
5039     @@ -1036,7 +1036,8 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
5040     {
5041     struct sock *sk = sock->sk;
5042     struct iucv_sock *iucv = iucv_sk(sk);
5043     - size_t headroom, linear;
5044     + size_t headroom = 0;
5045     + size_t linear;
5046     struct sk_buff *skb;
5047     struct iucv_message txmsg = {0};
5048     struct cmsghdr *cmsg;
5049     @@ -1114,18 +1115,20 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
5050     * this is fine for SOCK_SEQPACKET (unless we want to support
5051     * segmented records using the MSG_EOR flag), but
5052     * for SOCK_STREAM we might want to improve it in future */
5053     - headroom = (iucv->transport == AF_IUCV_TRANS_HIPER)
5054     - ? sizeof(struct af_iucv_trans_hdr) + ETH_HLEN : 0;
5055     - if (headroom + len < PAGE_SIZE) {
5056     + if (iucv->transport == AF_IUCV_TRANS_HIPER) {
5057     + headroom = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
5058     linear = len;
5059     } else {
5060     - /* In nonlinear "classic" iucv skb,
5061     - * reserve space for iucv_array
5062     - */
5063     - if (iucv->transport != AF_IUCV_TRANS_HIPER)
5064     - headroom += sizeof(struct iucv_array) *
5065     - (MAX_SKB_FRAGS + 1);
5066     - linear = PAGE_SIZE - headroom;
5067     + if (len < PAGE_SIZE) {
5068     + linear = len;
5069     + } else {
5070     + /* In nonlinear "classic" iucv skb,
5071     + * reserve space for iucv_array
5072     + */
5073     + headroom = sizeof(struct iucv_array) *
5074     + (MAX_SKB_FRAGS + 1);
5075     + linear = PAGE_SIZE - headroom;
5076     + }
5077     }
5078     skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
5079     noblock, &err, 0);
5080     diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
5081     index a2dd6edaae37..1b3c18c2c1ec 100644
5082     --- a/net/wireless/nl80211.c
5083     +++ b/net/wireless/nl80211.c
5084     @@ -14402,13 +14402,17 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
5085    
5086     list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
5087     bool schedule_destroy_work = false;
5088     - bool schedule_scan_stop = false;
5089     struct cfg80211_sched_scan_request *sched_scan_req =
5090     rcu_dereference(rdev->sched_scan_req);
5091    
5092     if (sched_scan_req && notify->portid &&
5093     - sched_scan_req->owner_nlportid == notify->portid)
5094     - schedule_scan_stop = true;
5095     + sched_scan_req->owner_nlportid == notify->portid) {
5096     + sched_scan_req->owner_nlportid = 0;
5097     +
5098     + if (rdev->ops->sched_scan_stop &&
5099     + rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
5100     + schedule_work(&rdev->sched_scan_stop_wk);
5101     + }
5102    
5103     list_for_each_entry_rcu(wdev, &rdev->wiphy.wdev_list, list) {
5104     cfg80211_mlme_unregister_socket(wdev, notify->portid);
5105     @@ -14439,12 +14443,6 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
5106     spin_unlock(&rdev->destroy_list_lock);
5107     schedule_work(&rdev->destroy_work);
5108     }
5109     - } else if (schedule_scan_stop) {
5110     - sched_scan_req->owner_nlportid = 0;
5111     -
5112     - if (rdev->ops->sched_scan_stop &&
5113     - rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
5114     - schedule_work(&rdev->sched_scan_stop_wk);
5115     }
5116     }
5117    
5118     diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
5119     index f770dba2a6f6..a899ef81c705 100644
5120     --- a/tools/testing/selftests/Makefile
5121     +++ b/tools/testing/selftests/Makefile
5122     @@ -87,7 +87,7 @@ ifdef INSTALL_PATH
5123     done;
5124    
5125     @# Ask all targets to emit their test scripts
5126     - echo "#!/bin/bash" > $(ALL_SCRIPT)
5127     + echo "#!/bin/sh" > $(ALL_SCRIPT)
5128     echo "cd \$$(dirname \$$0)" >> $(ALL_SCRIPT)
5129     echo "ROOT=\$$PWD" >> $(ALL_SCRIPT)
5130    
5131     diff --git a/tools/testing/selftests/net/run_netsocktests b/tools/testing/selftests/net/run_netsocktests
5132     index c09a682df56a..16058bbea7a8 100755
5133     --- a/tools/testing/selftests/net/run_netsocktests
5134     +++ b/tools/testing/selftests/net/run_netsocktests
5135     @@ -1,4 +1,4 @@
5136     -#!/bin/bash
5137     +#!/bin/sh
5138    
5139     echo "--------------------"
5140     echo "running socket test"
5141     diff --git a/virt/lib/irqbypass.c b/virt/lib/irqbypass.c
5142     index 52abac4bb6a2..6d2fcd6fcb25 100644
5143     --- a/virt/lib/irqbypass.c
5144     +++ b/virt/lib/irqbypass.c
5145     @@ -195,7 +195,7 @@ int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer)
5146     mutex_lock(&lock);
5147    
5148     list_for_each_entry(tmp, &consumers, node) {
5149     - if (tmp->token == consumer->token) {
5150     + if (tmp->token == consumer->token || tmp == consumer) {
5151     mutex_unlock(&lock);
5152     module_put(THIS_MODULE);
5153     return -EBUSY;
5154     @@ -245,7 +245,7 @@ void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer)
5155     mutex_lock(&lock);
5156    
5157     list_for_each_entry(tmp, &consumers, node) {
5158     - if (tmp->token != consumer->token)
5159     + if (tmp != consumer)
5160     continue;
5161    
5162     list_for_each_entry(producer, &producers, node) {