Magellan Linux

Annotation of /trunk/kernel26-alx/patches-3.10/0130-3.10.31-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2672 - (hide annotations) (download)
Tue Jul 21 16:46:35 2015 UTC (8 years, 9 months ago) by niro
File size: 26241 byte(s)
-3.10.84-alx-r1
1 niro 2672 diff --git a/Makefile b/Makefile
2     index 18016a55dbd3..56b93edbbe4e 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 3
7     PATCHLEVEL = 10
8     -SUBLEVEL = 30
9     +SUBLEVEL = 31
10     EXTRAVERSION =
11     NAME = TOSSUG Baby Fish
12    
13     diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
14     index 3300cbd18a89..0c13554965b8 100644
15     --- a/arch/arm64/include/asm/cacheflush.h
16     +++ b/arch/arm64/include/asm/cacheflush.h
17     @@ -116,6 +116,7 @@ extern void flush_dcache_page(struct page *);
18     static inline void __flush_icache_all(void)
19     {
20     asm("ic ialluis");
21     + dsb();
22     }
23    
24     #define flush_dcache_mmap_lock(mapping) \
25     diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
26     index 6a389dc1bd49..0ea7a22bcdf2 100644
27     --- a/arch/arm64/kernel/vdso.c
28     +++ b/arch/arm64/kernel/vdso.c
29     @@ -235,6 +235,8 @@ void update_vsyscall(struct timekeeper *tk)
30     vdso_data->use_syscall = use_syscall;
31     vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
32     vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
33     + vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
34     + vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
35    
36     if (!use_syscall) {
37     vdso_data->cs_cycle_last = tk->clock->cycle_last;
38     @@ -242,8 +244,6 @@ void update_vsyscall(struct timekeeper *tk)
39     vdso_data->xtime_clock_nsec = tk->xtime_nsec;
40     vdso_data->cs_mult = tk->mult;
41     vdso_data->cs_shift = tk->shift;
42     - vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
43     - vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
44     }
45    
46     smp_wmb();
47     diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
48     index d8064af42e62..6d20b7d162d8 100644
49     --- a/arch/arm64/kernel/vdso/Makefile
50     +++ b/arch/arm64/kernel/vdso/Makefile
51     @@ -48,7 +48,7 @@ $(obj-vdso): %.o: %.S
52    
53     # Actual build commands
54     quiet_cmd_vdsold = VDSOL $@
55     - cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@
56     + cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
57     quiet_cmd_vdsoas = VDSOA $@
58     cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
59    
60     diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
61     index f0a6d10b5211..fe652ffd34c2 100644
62     --- a/arch/arm64/kernel/vdso/gettimeofday.S
63     +++ b/arch/arm64/kernel/vdso/gettimeofday.S
64     @@ -103,6 +103,8 @@ ENTRY(__kernel_clock_gettime)
65     bl __do_get_tspec
66     seqcnt_check w9, 1b
67    
68     + mov x30, x2
69     +
70     cmp w0, #CLOCK_MONOTONIC
71     b.ne 6f
72    
73     @@ -118,6 +120,9 @@ ENTRY(__kernel_clock_gettime)
74     ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
75     b.ne 8f
76    
77     + /* xtime_coarse_nsec is already right-shifted */
78     + mov x12, #0
79     +
80     /* Get coarse timespec. */
81     adr vdso_data, _vdso_data
82     3: seqcnt_acquire
83     @@ -156,7 +161,7 @@ ENTRY(__kernel_clock_gettime)
84     lsr x11, x11, x12
85     stp x10, x11, [x1, #TSPEC_TV_SEC]
86     mov x0, xzr
87     - ret x2
88     + ret
89     7:
90     mov x30, x2
91     8: /* Syscall fallback. */
92     diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
93     index 80a369eab637..ba7477efad5c 100644
94     --- a/arch/arm64/mm/mmu.c
95     +++ b/arch/arm64/mm/mmu.c
96     @@ -203,10 +203,18 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
97     do {
98     next = pmd_addr_end(addr, end);
99     /* try section mapping first */
100     - if (((addr | next | phys) & ~SECTION_MASK) == 0)
101     + if (((addr | next | phys) & ~SECTION_MASK) == 0) {
102     + pmd_t old_pmd =*pmd;
103     set_pmd(pmd, __pmd(phys | prot_sect_kernel));
104     - else
105     + /*
106     + * Check for previous table entries created during
107     + * boot (__create_page_tables) and flush them.
108     + */
109     + if (!pmd_none(old_pmd))
110     + flush_tlb_all();
111     + } else {
112     alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
113     + }
114     phys += next - addr;
115     } while (pmd++, addr = next, addr != end);
116     }
117     diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
118     index 94e20dd2729f..2a245b55bb71 100644
119     --- a/arch/s390/crypto/aes_s390.c
120     +++ b/arch/s390/crypto/aes_s390.c
121     @@ -25,6 +25,7 @@
122     #include <linux/err.h>
123     #include <linux/module.h>
124     #include <linux/init.h>
125     +#include <linux/spinlock.h>
126     #include "crypt_s390.h"
127    
128     #define AES_KEYLEN_128 1
129     @@ -32,6 +33,7 @@
130     #define AES_KEYLEN_256 4
131    
132     static u8 *ctrblk;
133     +static DEFINE_SPINLOCK(ctrblk_lock);
134     static char keylen_flag;
135    
136     struct s390_aes_ctx {
137     @@ -756,43 +758,67 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
138     return aes_set_key(tfm, in_key, key_len);
139     }
140    
141     +static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
142     +{
143     + unsigned int i, n;
144     +
145     + /* only use complete blocks, max. PAGE_SIZE */
146     + n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
147     + for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
148     + memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
149     + AES_BLOCK_SIZE);
150     + crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
151     + }
152     + return n;
153     +}
154     +
155     static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
156     struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
157     {
158     int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
159     - unsigned int i, n, nbytes;
160     - u8 buf[AES_BLOCK_SIZE];
161     - u8 *out, *in;
162     + unsigned int n, nbytes;
163     + u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
164     + u8 *out, *in, *ctrptr = ctrbuf;
165    
166     if (!walk->nbytes)
167     return ret;
168    
169     - memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
170     + if (spin_trylock(&ctrblk_lock))
171     + ctrptr = ctrblk;
172     +
173     + memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
174     while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
175     out = walk->dst.virt.addr;
176     in = walk->src.virt.addr;
177     while (nbytes >= AES_BLOCK_SIZE) {
178     - /* only use complete blocks, max. PAGE_SIZE */
179     - n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
180     - nbytes & ~(AES_BLOCK_SIZE - 1);
181     - for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
182     - memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
183     - AES_BLOCK_SIZE);
184     - crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
185     - }
186     - ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
187     - if (ret < 0 || ret != n)
188     + if (ctrptr == ctrblk)
189     + n = __ctrblk_init(ctrptr, nbytes);
190     + else
191     + n = AES_BLOCK_SIZE;
192     + ret = crypt_s390_kmctr(func, sctx->key, out, in,
193     + n, ctrptr);
194     + if (ret < 0 || ret != n) {
195     + if (ctrptr == ctrblk)
196     + spin_unlock(&ctrblk_lock);
197     return -EIO;
198     + }
199     if (n > AES_BLOCK_SIZE)
200     - memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
201     + memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
202     AES_BLOCK_SIZE);
203     - crypto_inc(ctrblk, AES_BLOCK_SIZE);
204     + crypto_inc(ctrptr, AES_BLOCK_SIZE);
205     out += n;
206     in += n;
207     nbytes -= n;
208     }
209     ret = blkcipher_walk_done(desc, walk, nbytes);
210     }
211     + if (ctrptr == ctrblk) {
212     + if (nbytes)
213     + memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
214     + else
215     + memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
216     + spin_unlock(&ctrblk_lock);
217     + }
218     /*
219     * final block may be < AES_BLOCK_SIZE, copy only nbytes
220     */
221     @@ -800,14 +826,15 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
222     out = walk->dst.virt.addr;
223     in = walk->src.virt.addr;
224     ret = crypt_s390_kmctr(func, sctx->key, buf, in,
225     - AES_BLOCK_SIZE, ctrblk);
226     + AES_BLOCK_SIZE, ctrbuf);
227     if (ret < 0 || ret != AES_BLOCK_SIZE)
228     return -EIO;
229     memcpy(out, buf, nbytes);
230     - crypto_inc(ctrblk, AES_BLOCK_SIZE);
231     + crypto_inc(ctrbuf, AES_BLOCK_SIZE);
232     ret = blkcipher_walk_done(desc, walk, 0);
233     + memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
234     }
235     - memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
236     +
237     return ret;
238     }
239    
240     diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
241     index bcca01c9989d..2d96e68febb2 100644
242     --- a/arch/s390/crypto/des_s390.c
243     +++ b/arch/s390/crypto/des_s390.c
244     @@ -25,6 +25,7 @@
245     #define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
246    
247     static u8 *ctrblk;
248     +static DEFINE_SPINLOCK(ctrblk_lock);
249    
250     struct s390_des_ctx {
251     u8 iv[DES_BLOCK_SIZE];
252     @@ -105,29 +106,35 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
253     }
254    
255     static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
256     - u8 *iv, struct blkcipher_walk *walk)
257     + struct blkcipher_walk *walk)
258     {
259     + struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
260     int ret = blkcipher_walk_virt(desc, walk);
261     unsigned int nbytes = walk->nbytes;
262     + struct {
263     + u8 iv[DES_BLOCK_SIZE];
264     + u8 key[DES3_KEY_SIZE];
265     + } param;
266    
267     if (!nbytes)
268     goto out;
269    
270     - memcpy(iv, walk->iv, DES_BLOCK_SIZE);
271     + memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
272     + memcpy(param.key, ctx->key, DES3_KEY_SIZE);
273     do {
274     /* only use complete blocks */
275     unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
276     u8 *out = walk->dst.virt.addr;
277     u8 *in = walk->src.virt.addr;
278    
279     - ret = crypt_s390_kmc(func, iv, out, in, n);
280     + ret = crypt_s390_kmc(func, &param, out, in, n);
281     if (ret < 0 || ret != n)
282     return -EIO;
283    
284     nbytes &= DES_BLOCK_SIZE - 1;
285     ret = blkcipher_walk_done(desc, walk, nbytes);
286     } while ((nbytes = walk->nbytes));
287     - memcpy(walk->iv, iv, DES_BLOCK_SIZE);
288     + memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
289    
290     out:
291     return ret;
292     @@ -179,22 +186,20 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
293     struct scatterlist *dst, struct scatterlist *src,
294     unsigned int nbytes)
295     {
296     - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
297     struct blkcipher_walk walk;
298    
299     blkcipher_walk_init(&walk, dst, src, nbytes);
300     - return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk);
301     + return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk);
302     }
303    
304     static int cbc_des_decrypt(struct blkcipher_desc *desc,
305     struct scatterlist *dst, struct scatterlist *src,
306     unsigned int nbytes)
307     {
308     - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
309     struct blkcipher_walk walk;
310    
311     blkcipher_walk_init(&walk, dst, src, nbytes);
312     - return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk);
313     + return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk);
314     }
315    
316     static struct crypto_alg cbc_des_alg = {
317     @@ -327,22 +332,20 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc,
318     struct scatterlist *dst, struct scatterlist *src,
319     unsigned int nbytes)
320     {
321     - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
322     struct blkcipher_walk walk;
323    
324     blkcipher_walk_init(&walk, dst, src, nbytes);
325     - return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk);
326     + return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk);
327     }
328    
329     static int cbc_des3_decrypt(struct blkcipher_desc *desc,
330     struct scatterlist *dst, struct scatterlist *src,
331     unsigned int nbytes)
332     {
333     - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
334     struct blkcipher_walk walk;
335    
336     blkcipher_walk_init(&walk, dst, src, nbytes);
337     - return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk);
338     + return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk);
339     }
340    
341     static struct crypto_alg cbc_des3_alg = {
342     @@ -366,54 +369,80 @@ static struct crypto_alg cbc_des3_alg = {
343     }
344     };
345    
346     +static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
347     +{
348     + unsigned int i, n;
349     +
350     + /* align to block size, max. PAGE_SIZE */
351     + n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
352     + for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
353     + memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE);
354     + crypto_inc(ctrptr + i, DES_BLOCK_SIZE);
355     + }
356     + return n;
357     +}
358     +
359     static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
360     - struct s390_des_ctx *ctx, struct blkcipher_walk *walk)
361     + struct s390_des_ctx *ctx,
362     + struct blkcipher_walk *walk)
363     {
364     int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
365     - unsigned int i, n, nbytes;
366     - u8 buf[DES_BLOCK_SIZE];
367     - u8 *out, *in;
368     + unsigned int n, nbytes;
369     + u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE];
370     + u8 *out, *in, *ctrptr = ctrbuf;
371     +
372     + if (!walk->nbytes)
373     + return ret;
374    
375     - memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE);
376     + if (spin_trylock(&ctrblk_lock))
377     + ctrptr = ctrblk;
378     +
379     + memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE);
380     while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
381     out = walk->dst.virt.addr;
382     in = walk->src.virt.addr;
383     while (nbytes >= DES_BLOCK_SIZE) {
384     - /* align to block size, max. PAGE_SIZE */
385     - n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
386     - nbytes & ~(DES_BLOCK_SIZE - 1);
387     - for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
388     - memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE,
389     - DES_BLOCK_SIZE);
390     - crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
391     - }
392     - ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
393     - if (ret < 0 || ret != n)
394     + if (ctrptr == ctrblk)
395     + n = __ctrblk_init(ctrptr, nbytes);
396     + else
397     + n = DES_BLOCK_SIZE;
398     + ret = crypt_s390_kmctr(func, ctx->key, out, in,
399     + n, ctrptr);
400     + if (ret < 0 || ret != n) {
401     + if (ctrptr == ctrblk)
402     + spin_unlock(&ctrblk_lock);
403     return -EIO;
404     + }
405     if (n > DES_BLOCK_SIZE)
406     - memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
407     + memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE,
408     DES_BLOCK_SIZE);
409     - crypto_inc(ctrblk, DES_BLOCK_SIZE);
410     + crypto_inc(ctrptr, DES_BLOCK_SIZE);
411     out += n;
412     in += n;
413     nbytes -= n;
414     }
415     ret = blkcipher_walk_done(desc, walk, nbytes);
416     }
417     -
418     + if (ctrptr == ctrblk) {
419     + if (nbytes)
420     + memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE);
421     + else
422     + memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
423     + spin_unlock(&ctrblk_lock);
424     + }
425     /* final block may be < DES_BLOCK_SIZE, copy only nbytes */
426     if (nbytes) {
427     out = walk->dst.virt.addr;
428     in = walk->src.virt.addr;
429     ret = crypt_s390_kmctr(func, ctx->key, buf, in,
430     - DES_BLOCK_SIZE, ctrblk);
431     + DES_BLOCK_SIZE, ctrbuf);
432     if (ret < 0 || ret != DES_BLOCK_SIZE)
433     return -EIO;
434     memcpy(out, buf, nbytes);
435     - crypto_inc(ctrblk, DES_BLOCK_SIZE);
436     + crypto_inc(ctrbuf, DES_BLOCK_SIZE);
437     ret = blkcipher_walk_done(desc, walk, 0);
438     + memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE);
439     }
440     - memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE);
441     return ret;
442     }
443    
444     diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
445     index 32d37d9a7787..f187806dfc18 100644
446     --- a/arch/x86/kernel/cpu/intel.c
447     +++ b/arch/x86/kernel/cpu/intel.c
448     @@ -628,7 +628,7 @@ static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
449     tlb_flushall_shift = 5;
450     break;
451     case 0x63a: /* Ivybridge */
452     - tlb_flushall_shift = 1;
453     + tlb_flushall_shift = 2;
454     break;
455     default:
456     tlb_flushall_shift = 6;
457     diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
458     index 82442085cbe6..573b4601d5b9 100644
459     --- a/drivers/infiniband/hw/qib/qib_user_sdma.c
460     +++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
461     @@ -284,8 +284,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
462     int j;
463     int ret;
464    
465     - ret = get_user_pages(current, current->mm, addr,
466     - npages, 0, 1, pages, NULL);
467     + ret = get_user_pages_fast(addr, npages, 0, pages);
468    
469     if (ret != npages) {
470     int i;
471     @@ -830,10 +829,7 @@ int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
472     while (dim) {
473     const int mxp = 8;
474    
475     - down_write(&current->mm->mmap_sem);
476     ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
477     - up_write(&current->mm->mmap_sem);
478     -
479     if (ret <= 0)
480     goto done_unlock;
481     else {
482     diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
483     index bb328a366122..a51ee009ed83 100644
484     --- a/drivers/irqchip/irq-armada-370-xp.c
485     +++ b/drivers/irqchip/irq-armada-370-xp.c
486     @@ -229,7 +229,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
487     ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
488     & IPI_DOORBELL_MASK;
489    
490     - writel(~IPI_DOORBELL_MASK, per_cpu_int_base +
491     + writel(~ipimask, per_cpu_int_base +
492     ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
493    
494     /* Handle all pending doorbells */
495     diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
496     index 46f05ad529f9..2e93ba5598c4 100644
497     --- a/drivers/media/usb/dvb-usb-v2/af9035.c
498     +++ b/drivers/media/usb/dvb-usb-v2/af9035.c
499     @@ -1517,6 +1517,8 @@ static const struct usb_device_id af9035_id_table[] = {
500     &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) },
501     { DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05,
502     &af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
503     + { DVB_USB_DEVICE(USB_VID_HAUPPAUGE, 0xf900,
504     + &af9035_props, "Hauppauge WinTV-MiniStick 2", NULL) },
505     { }
506     };
507     MODULE_DEVICE_TABLE(usb, af9035_id_table);
508     diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
509     index 90f583e5d6a6..a8f65d88c9e7 100644
510     --- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
511     +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
512     @@ -68,7 +68,7 @@ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
513     #else
514     static inline
515     struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
516     - struct mxl111sf_state *mxl_state
517     + struct mxl111sf_state *mxl_state,
518     struct mxl111sf_tuner_config *cfg)
519     {
520     printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
521     diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
522     index 5327f35d9b5c..bb7ee9cb00b1 100644
523     --- a/drivers/pinctrl/core.c
524     +++ b/drivers/pinctrl/core.c
525     @@ -807,7 +807,9 @@ static struct pinctrl *create_pinctrl(struct device *dev)
526     kref_init(&p->users);
527    
528     /* Add the pinctrl handle to the global list */
529     + mutex_lock(&pinctrl_list_mutex);
530     list_add_tail(&p->node, &pinctrl_list);
531     + mutex_unlock(&pinctrl_list_mutex);
532    
533     return p;
534     }
535     diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
536     index 70d986e04afb..8b54b5da00c0 100644
537     --- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
538     +++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
539     @@ -276,7 +276,20 @@ static int wmt_pctl_dt_node_to_map_pull(struct wmt_pinctrl_data *data,
540     if (!configs)
541     return -ENOMEM;
542    
543     - configs[0] = pull;
544     + switch (pull) {
545     + case 0:
546     + configs[0] = PIN_CONFIG_BIAS_DISABLE;
547     + break;
548     + case 1:
549     + configs[0] = PIN_CONFIG_BIAS_PULL_DOWN;
550     + break;
551     + case 2:
552     + configs[0] = PIN_CONFIG_BIAS_PULL_UP;
553     + break;
554     + default:
555     + configs[0] = PIN_CONFIG_BIAS_DISABLE;
556     + dev_err(data->dev, "invalid pull state %d - disabling\n", pull);
557     + }
558    
559     map->type = PIN_MAP_TYPE_CONFIGS_PIN;
560     map->data.configs.group_or_pin = data->groups[group];
561     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
562     index 0bcee78cde16..25e6a8e1014e 100644
563     --- a/fs/btrfs/inode.c
564     +++ b/fs/btrfs/inode.c
565     @@ -2655,7 +2655,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
566     EXTENT_DEFRAG, 1, cached_state);
567     if (ret) {
568     u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
569     - if (last_snapshot >= BTRFS_I(inode)->generation)
570     + if (0 && last_snapshot >= BTRFS_I(inode)->generation)
571     /* the inode is shared */
572     new = record_old_file_extents(inode, ordered_extent);
573    
574     diff --git a/fs/buffer.c b/fs/buffer.c
575     index d2a4d1bb2d57..75964d734444 100644
576     --- a/fs/buffer.c
577     +++ b/fs/buffer.c
578     @@ -620,14 +620,16 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
579     static void __set_page_dirty(struct page *page,
580     struct address_space *mapping, int warn)
581     {
582     - spin_lock_irq(&mapping->tree_lock);
583     + unsigned long flags;
584     +
585     + spin_lock_irqsave(&mapping->tree_lock, flags);
586     if (page->mapping) { /* Race with truncate? */
587     WARN_ON_ONCE(warn && !PageUptodate(page));
588     account_page_dirtied(page, mapping);
589     radix_tree_tag_set(&mapping->page_tree,
590     page_index(page), PAGECACHE_TAG_DIRTY);
591     }
592     - spin_unlock_irq(&mapping->tree_lock);
593     + spin_unlock_irqrestore(&mapping->tree_lock, flags);
594     __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
595     }
596    
597     diff --git a/lib/Makefile b/lib/Makefile
598     index c55a037a354e..9efe480b975e 100644
599     --- a/lib/Makefile
600     +++ b/lib/Makefile
601     @@ -45,6 +45,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
602     lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
603     lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
604    
605     +GCOV_PROFILE_hweight.o := n
606     CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
607     obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
608    
609     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
610     index 40ad2c6e0ca9..aa3b9a63394b 100644
611     --- a/mm/hugetlb.c
612     +++ b/mm/hugetlb.c
613     @@ -21,6 +21,7 @@
614     #include <linux/rmap.h>
615     #include <linux/swap.h>
616     #include <linux/swapops.h>
617     +#include <linux/page-isolation.h>
618    
619     #include <asm/page.h>
620     #include <asm/pgtable.h>
621     @@ -517,9 +518,15 @@ static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
622     {
623     struct page *page;
624    
625     - if (list_empty(&h->hugepage_freelists[nid]))
626     + list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
627     + if (!is_migrate_isolate_page(page))
628     + break;
629     + /*
630     + * if 'non-isolated free hugepage' not found on the list,
631     + * the allocation fails.
632     + */
633     + if (&h->hugepage_freelists[nid] == &page->lru)
634     return NULL;
635     - page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
636     list_move(&page->lru, &h->hugepage_activelist);
637     set_page_refcounted(page);
638     h->free_huge_pages--;
639     diff --git a/mm/memory-failure.c b/mm/memory-failure.c
640     index 3b4120e38d48..f2a591d87d00 100644
641     --- a/mm/memory-failure.c
642     +++ b/mm/memory-failure.c
643     @@ -1421,7 +1421,8 @@ static int __get_any_page(struct page *p, unsigned long pfn, int flags)
644    
645     /*
646     * Isolate the page, so that it doesn't get reallocated if it
647     - * was free.
648     + * was free. This flag should be kept set until the source page
649     + * is freed and PG_hwpoison on it is set.
650     */
651     set_migratetype_isolate(p, true);
652     /*
653     @@ -1444,7 +1445,6 @@ static int __get_any_page(struct page *p, unsigned long pfn, int flags)
654     /* Not a free page */
655     ret = 1;
656     }
657     - unset_migratetype_isolate(p, MIGRATE_MOVABLE);
658     unlock_memory_hotplug();
659     return ret;
660     }
661     @@ -1511,7 +1511,6 @@ static int soft_offline_huge_page(struct page *page, int flags)
662     atomic_long_inc(&num_poisoned_pages);
663     }
664     }
665     - /* keep elevated page count for bad page */
666     return ret;
667     }
668    
669     @@ -1576,7 +1575,7 @@ int soft_offline_page(struct page *page, int flags)
670     atomic_long_inc(&num_poisoned_pages);
671     }
672     }
673     - /* keep elevated page count for bad page */
674     + unset_migratetype_isolate(page, MIGRATE_MOVABLE);
675     return ret;
676     }
677    
678     @@ -1642,7 +1641,22 @@ static int __soft_offline_page(struct page *page, int flags)
679     if (ret > 0)
680     ret = -EIO;
681     } else {
682     + /*
683     + * After page migration succeeds, the source page can
684     + * be trapped in pagevec and actual freeing is delayed.
685     + * Freeing code works differently based on PG_hwpoison,
686     + * so there's a race. We need to make sure that the
687     + * source page should be freed back to buddy before
688     + * setting PG_hwpoison.
689     + */
690     + if (!is_free_buddy_page(page))
691     + lru_add_drain_all();
692     + if (!is_free_buddy_page(page))
693     + drain_all_pages();
694     SetPageHWPoison(page);
695     + if (!is_free_buddy_page(page))
696     + pr_info("soft offline: %#lx: page leaked\n",
697     + pfn);
698     atomic_long_inc(&num_poisoned_pages);
699     }
700     } else {
701     diff --git a/mm/page-writeback.c b/mm/page-writeback.c
702     index 5a06d4cb9a3d..73cbc5dc150b 100644
703     --- a/mm/page-writeback.c
704     +++ b/mm/page-writeback.c
705     @@ -2026,11 +2026,12 @@ int __set_page_dirty_nobuffers(struct page *page)
706     if (!TestSetPageDirty(page)) {
707     struct address_space *mapping = page_mapping(page);
708     struct address_space *mapping2;
709     + unsigned long flags;
710    
711     if (!mapping)
712     return 1;
713    
714     - spin_lock_irq(&mapping->tree_lock);
715     + spin_lock_irqsave(&mapping->tree_lock, flags);
716     mapping2 = page_mapping(page);
717     if (mapping2) { /* Race with truncate? */
718     BUG_ON(mapping2 != mapping);
719     @@ -2039,7 +2040,7 @@ int __set_page_dirty_nobuffers(struct page *page)
720     radix_tree_tag_set(&mapping->page_tree,
721     page_index(page), PAGECACHE_TAG_DIRTY);
722     }
723     - spin_unlock_irq(&mapping->tree_lock);
724     + spin_unlock_irqrestore(&mapping->tree_lock, flags);
725     if (mapping->host) {
726     /* !PageAnon && !swapper_space */
727     __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
728     diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
729     index b4feecc3fe01..18caa16de27b 100644
730     --- a/security/selinux/ss/services.c
731     +++ b/security/selinux/ss/services.c
732     @@ -1231,6 +1231,10 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
733     struct context context;
734     int rc = 0;
735    
736     + /* An empty security context is never valid. */
737     + if (!scontext_len)
738     + return -EINVAL;
739     +
740     if (!ss_initialized) {
741     int i;
742    
743     diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
744     index a7b07f72c9dd..5a6527668c07 100644
745     --- a/sound/pci/hda/patch_analog.c
746     +++ b/sound/pci/hda/patch_analog.c
747     @@ -1680,6 +1680,7 @@ static int ad1983_parse_auto_config(struct hda_codec *codec)
748     return err;
749     spec = codec->spec;
750    
751     + spec->gen.mixer_nid = 0x0e;
752     spec->gen.beep_nid = 0x10;
753     set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
754     err = ad198x_parse_auto_config(codec);
755     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
756     index 4b06e0a64392..87a03aca1b2e 100644
757     --- a/sound/pci/hda/patch_realtek.c
758     +++ b/sound/pci/hda/patch_realtek.c
759     @@ -1765,6 +1765,7 @@ enum {
760     ALC889_FIXUP_IMAC91_VREF,
761     ALC889_FIXUP_MBA11_VREF,
762     ALC889_FIXUP_MBA21_VREF,
763     + ALC889_FIXUP_MP11_VREF,
764     ALC882_FIXUP_INV_DMIC,
765     ALC882_FIXUP_NO_PRIMARY_HP,
766     ALC887_FIXUP_ASUS_BASS,
767     @@ -2119,6 +2120,12 @@ static const struct hda_fixup alc882_fixups[] = {
768     .chained = true,
769     .chain_id = ALC889_FIXUP_MBP_VREF,
770     },
771     + [ALC889_FIXUP_MP11_VREF] = {
772     + .type = HDA_FIXUP_FUNC,
773     + .v.func = alc889_fixup_mba11_vref,
774     + .chained = true,
775     + .chain_id = ALC885_FIXUP_MACPRO_GPIO,
776     + },
777     [ALC882_FIXUP_INV_DMIC] = {
778     .type = HDA_FIXUP_FUNC,
779     .v.func = alc_fixup_inv_dmic_0x12,
780     @@ -2176,7 +2183,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
781     SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
782     SND_PCI_QUIRK(0x106b, 0x00a1, "Macbook", ALC889_FIXUP_MBP_VREF),
783     SND_PCI_QUIRK(0x106b, 0x00a4, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF),
784     - SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC885_FIXUP_MACPRO_GPIO),
785     + SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC889_FIXUP_MP11_VREF),
786     SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_FIXUP_MACPRO_GPIO),
787     SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_FIXUP_MACPRO_GPIO),
788     SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC889_FIXUP_MBP_VREF),
789     diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
790     index 225dfd737265..ba2664200d14 100644
791     --- a/sound/usb/Kconfig
792     +++ b/sound/usb/Kconfig
793     @@ -14,6 +14,7 @@ config SND_USB_AUDIO
794     select SND_HWDEP
795     select SND_RAWMIDI
796     select SND_PCM
797     + select BITREVERSE
798     help
799     Say Y here to include support for USB audio and USB MIDI
800     devices.