Magellan Linux

Annotation of /trunk/kernel-alx-legacy/patches-4.9/0175-4.9.76-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (hide annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 9 months ago) by niro
File size: 24323 byte(s)
-added kerenl-alx-legacy pkg
1 niro 3608 diff --git a/Makefile b/Makefile
2     index acbc1b032db2..2637f0ed0a07 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 75
9     +SUBLEVEL = 76
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
14     index 41faf17cd28d..0684fd2f42e8 100644
15     --- a/arch/arc/include/asm/uaccess.h
16     +++ b/arch/arc/include/asm/uaccess.h
17     @@ -673,6 +673,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
18     return 0;
19    
20     __asm__ __volatile__(
21     + " mov lp_count, %5 \n"
22     " lp 3f \n"
23     "1: ldb.ab %3, [%2, 1] \n"
24     " breq.d %3, 0, 3f \n"
25     @@ -689,8 +690,8 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
26     " .word 1b, 4b \n"
27     " .previous \n"
28     : "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
29     - : "g"(-EFAULT), "l"(count)
30     - : "memory");
31     + : "g"(-EFAULT), "r"(count)
32     + : "lp_count", "lp_start", "lp_end", "memory");
33    
34     return res;
35     }
36     diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
37     index 8be707e1b6c7..82dea145574e 100644
38     --- a/arch/parisc/include/asm/ldcw.h
39     +++ b/arch/parisc/include/asm/ldcw.h
40     @@ -11,6 +11,7 @@
41     for the semaphore. */
42    
43     #define __PA_LDCW_ALIGNMENT 16
44     +#define __PA_LDCW_ALIGN_ORDER 4
45     #define __ldcw_align(a) ({ \
46     unsigned long __ret = (unsigned long) &(a)->lock[0]; \
47     __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \
48     @@ -28,6 +29,7 @@
49     ldcd). */
50    
51     #define __PA_LDCW_ALIGNMENT 4
52     +#define __PA_LDCW_ALIGN_ORDER 2
53     #define __ldcw_align(a) (&(a)->slock)
54     #define __LDCW "ldcw,co"
55    
56     diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
57     index 4fcff2dcc9c3..e3d3e8e1d708 100644
58     --- a/arch/parisc/kernel/entry.S
59     +++ b/arch/parisc/kernel/entry.S
60     @@ -35,6 +35,7 @@
61     #include <asm/pgtable.h>
62     #include <asm/signal.h>
63     #include <asm/unistd.h>
64     +#include <asm/ldcw.h>
65     #include <asm/thread_info.h>
66    
67     #include <linux/linkage.h>
68     @@ -46,6 +47,14 @@
69     #endif
70    
71     .import pa_tlb_lock,data
72     + .macro load_pa_tlb_lock reg
73     +#if __PA_LDCW_ALIGNMENT > 4
74     + load32 PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
75     + depi 0,31,__PA_LDCW_ALIGN_ORDER, \reg
76     +#else
77     + load32 PA(pa_tlb_lock), \reg
78     +#endif
79     + .endm
80    
81     /* space_to_prot macro creates a prot id from a space id */
82    
83     @@ -457,7 +466,7 @@
84     .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
85     #ifdef CONFIG_SMP
86     cmpib,COND(=),n 0,\spc,2f
87     - load32 PA(pa_tlb_lock),\tmp
88     + load_pa_tlb_lock \tmp
89     1: LDCW 0(\tmp),\tmp1
90     cmpib,COND(=) 0,\tmp1,1b
91     nop
92     @@ -480,7 +489,7 @@
93     /* Release pa_tlb_lock lock. */
94     .macro tlb_unlock1 spc,tmp
95     #ifdef CONFIG_SMP
96     - load32 PA(pa_tlb_lock),\tmp
97     + load_pa_tlb_lock \tmp
98     tlb_unlock0 \spc,\tmp
99     #endif
100     .endm
101     diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
102     index adf7187f8951..2d40c4ff3f69 100644
103     --- a/arch/parisc/kernel/pacache.S
104     +++ b/arch/parisc/kernel/pacache.S
105     @@ -36,6 +36,7 @@
106     #include <asm/assembly.h>
107     #include <asm/pgtable.h>
108     #include <asm/cache.h>
109     +#include <asm/ldcw.h>
110     #include <linux/linkage.h>
111    
112     .text
113     @@ -333,8 +334,12 @@ ENDPROC_CFI(flush_data_cache_local)
114    
115     .macro tlb_lock la,flags,tmp
116     #ifdef CONFIG_SMP
117     - ldil L%pa_tlb_lock,%r1
118     - ldo R%pa_tlb_lock(%r1),\la
119     +#if __PA_LDCW_ALIGNMENT > 4
120     + load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
121     + depi 0,31,__PA_LDCW_ALIGN_ORDER, \la
122     +#else
123     + load32 pa_tlb_lock, \la
124     +#endif
125     rsm PSW_SM_I,\flags
126     1: LDCW 0(\la),\tmp
127     cmpib,<>,n 0,\tmp,3f
128     diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
129     index 7593787ed4c3..c3a532abac03 100644
130     --- a/arch/parisc/kernel/process.c
131     +++ b/arch/parisc/kernel/process.c
132     @@ -39,6 +39,7 @@
133     #include <linux/kernel.h>
134     #include <linux/mm.h>
135     #include <linux/fs.h>
136     +#include <linux/cpu.h>
137     #include <linux/module.h>
138     #include <linux/personality.h>
139     #include <linux/ptrace.h>
140     @@ -180,6 +181,44 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
141     return 1;
142     }
143    
144     +/*
145     + * Idle thread support
146     + *
147     + * Detect when running on QEMU with SeaBIOS PDC Firmware and let
148     + * QEMU idle the host too.
149     + */
150     +
151     +int running_on_qemu __read_mostly;
152     +
153     +void __cpuidle arch_cpu_idle_dead(void)
154     +{
155     + /* nop on real hardware, qemu will offline CPU. */
156     + asm volatile("or %%r31,%%r31,%%r31\n":::);
157     +}
158     +
159     +void __cpuidle arch_cpu_idle(void)
160     +{
161     + local_irq_enable();
162     +
163     + /* nop on real hardware, qemu will idle sleep. */
164     + asm volatile("or %%r10,%%r10,%%r10\n":::);
165     +}
166     +
167     +static int __init parisc_idle_init(void)
168     +{
169     + const char *marker;
170     +
171     + /* check QEMU/SeaBIOS marker in PAGE0 */
172     + marker = (char *) &PAGE0->pad0;
173     + running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
174     +
175     + if (!running_on_qemu)
176     + cpu_idle_poll_ctrl(1);
177     +
178     + return 0;
179     +}
180     +arch_initcall(parisc_idle_init);
181     +
182     /*
183     * Copy architecture-specific thread state
184     */
185     diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
186     index 0f9cd90c11af..f06a9a0063f1 100644
187     --- a/arch/s390/kernel/compat_linux.c
188     +++ b/arch/s390/kernel/compat_linux.c
189     @@ -263,6 +263,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplis
190     return retval;
191     }
192    
193     + groups_sort(group_info);
194     retval = set_current_groups(group_info);
195     put_group_info(group_info);
196    
197     diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
198     index 636c4b341f36..6bb7e92c6d50 100644
199     --- a/arch/x86/entry/vsyscall/vsyscall_64.c
200     +++ b/arch/x86/entry/vsyscall/vsyscall_64.c
201     @@ -66,6 +66,11 @@ static int __init vsyscall_setup(char *str)
202     }
203     early_param("vsyscall", vsyscall_setup);
204    
205     +bool vsyscall_enabled(void)
206     +{
207     + return vsyscall_mode != NONE;
208     +}
209     +
210     static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
211     const char *message)
212     {
213     diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
214     index 6ba66ee79710..4865e10dbb55 100644
215     --- a/arch/x86/include/asm/vsyscall.h
216     +++ b/arch/x86/include/asm/vsyscall.h
217     @@ -12,12 +12,14 @@ extern void map_vsyscall(void);
218     * Returns true if handled.
219     */
220     extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address);
221     +extern bool vsyscall_enabled(void);
222     #else
223     static inline void map_vsyscall(void) {}
224     static inline bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
225     {
226     return false;
227     }
228     +static inline bool vsyscall_enabled(void) { return false; }
229     #endif
230    
231     #endif /* _ASM_X86_VSYSCALL_H */
232     diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
233     index 017bda12caae..b74bb29db6b9 100644
234     --- a/arch/x86/kernel/cpu/microcode/amd.c
235     +++ b/arch/x86/kernel/cpu/microcode/amd.c
236     @@ -592,6 +592,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
237     #define F14H_MPB_MAX_SIZE 1824
238     #define F15H_MPB_MAX_SIZE 4096
239     #define F16H_MPB_MAX_SIZE 3458
240     +#define F17H_MPB_MAX_SIZE 3200
241    
242     switch (family) {
243     case 0x14:
244     @@ -603,6 +604,9 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
245     case 0x16:
246     max_size = F16H_MPB_MAX_SIZE;
247     break;
248     + case 0x17:
249     + max_size = F17H_MPB_MAX_SIZE;
250     + break;
251     default:
252     max_size = F1XH_MPB_MAX_SIZE;
253     break;
254     diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
255     index 1e779bca4f3e..f92bdb9f4e46 100644
256     --- a/arch/x86/mm/init.c
257     +++ b/arch/x86/mm/init.c
258     @@ -768,7 +768,7 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
259     .state = 0,
260     .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
261     };
262     -EXPORT_SYMBOL_GPL(cpu_tlbstate);
263     +EXPORT_PER_CPU_SYMBOL(cpu_tlbstate);
264    
265     void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
266     {
267     diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c
268     index d8376b4ad9f0..8f8e5e03d083 100644
269     --- a/arch/x86/mm/kaiser.c
270     +++ b/arch/x86/mm/kaiser.c
271     @@ -19,6 +19,7 @@
272     #include <asm/pgalloc.h>
273     #include <asm/desc.h>
274     #include <asm/cmdline.h>
275     +#include <asm/vsyscall.h>
276    
277     int kaiser_enabled __read_mostly = 1;
278     EXPORT_SYMBOL(kaiser_enabled); /* for inlined TLB flush functions */
279     @@ -110,12 +111,13 @@ static inline unsigned long get_pa_from_mapping(unsigned long vaddr)
280     *
281     * Returns a pointer to a PTE on success, or NULL on failure.
282     */
283     -static pte_t *kaiser_pagetable_walk(unsigned long address)
284     +static pte_t *kaiser_pagetable_walk(unsigned long address, bool user)
285     {
286     pmd_t *pmd;
287     pud_t *pud;
288     pgd_t *pgd = native_get_shadow_pgd(pgd_offset_k(address));
289     gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
290     + unsigned long prot = _KERNPG_TABLE;
291    
292     if (pgd_none(*pgd)) {
293     WARN_ONCE(1, "All shadow pgds should have been populated");
294     @@ -123,6 +125,17 @@ static pte_t *kaiser_pagetable_walk(unsigned long address)
295     }
296     BUILD_BUG_ON(pgd_large(*pgd) != 0);
297    
298     + if (user) {
299     + /*
300     + * The vsyscall page is the only page that will have
301     + * _PAGE_USER set. Catch everything else.
302     + */
303     + BUG_ON(address != VSYSCALL_ADDR);
304     +
305     + set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
306     + prot = _PAGE_TABLE;
307     + }
308     +
309     pud = pud_offset(pgd, address);
310     /* The shadow page tables do not use large mappings: */
311     if (pud_large(*pud)) {
312     @@ -135,7 +148,7 @@ static pte_t *kaiser_pagetable_walk(unsigned long address)
313     return NULL;
314     spin_lock(&shadow_table_allocation_lock);
315     if (pud_none(*pud)) {
316     - set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
317     + set_pud(pud, __pud(prot | __pa(new_pmd_page)));
318     __inc_zone_page_state(virt_to_page((void *)
319     new_pmd_page), NR_KAISERTABLE);
320     } else
321     @@ -155,7 +168,7 @@ static pte_t *kaiser_pagetable_walk(unsigned long address)
322     return NULL;
323     spin_lock(&shadow_table_allocation_lock);
324     if (pmd_none(*pmd)) {
325     - set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
326     + set_pmd(pmd, __pmd(prot | __pa(new_pte_page)));
327     __inc_zone_page_state(virt_to_page((void *)
328     new_pte_page), NR_KAISERTABLE);
329     } else
330     @@ -191,7 +204,7 @@ static int kaiser_add_user_map(const void *__start_addr, unsigned long size,
331     ret = -EIO;
332     break;
333     }
334     - pte = kaiser_pagetable_walk(address);
335     + pte = kaiser_pagetable_walk(address, flags & _PAGE_USER);
336     if (!pte) {
337     ret = -ENOMEM;
338     break;
339     @@ -318,6 +331,19 @@ void __init kaiser_init(void)
340    
341     kaiser_init_all_pgds();
342    
343     + /*
344     + * Note that this sets _PAGE_USER and it needs to happen when the
345     + * pagetable hierarchy gets created, i.e., early. Otherwise
346     + * kaiser_pagetable_walk() will encounter initialized PTEs in the
347     + * hierarchy and not set the proper permissions, leading to the
348     + * pagefaults with page-protection violations when trying to read the
349     + * vsyscall page. For example.
350     + */
351     + if (vsyscall_enabled())
352     + kaiser_add_user_map_early((void *)VSYSCALL_ADDR,
353     + PAGE_SIZE,
354     + __PAGE_KERNEL_VSYSCALL);
355     +
356     for_each_possible_cpu(cpu) {
357     void *percpu_vaddr = __per_cpu_user_mapped_start +
358     per_cpu_offset(cpu);
359     diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
360     index e899ef51dc8e..cb1c3a3287b0 100644
361     --- a/crypto/chacha20poly1305.c
362     +++ b/crypto/chacha20poly1305.c
363     @@ -610,6 +610,11 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
364     algt->mask));
365     if (IS_ERR(poly))
366     return PTR_ERR(poly);
367     + poly_hash = __crypto_hash_alg_common(poly);
368     +
369     + err = -EINVAL;
370     + if (poly_hash->digestsize != POLY1305_DIGEST_SIZE)
371     + goto out_put_poly;
372    
373     err = -ENOMEM;
374     inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
375     @@ -618,7 +623,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
376    
377     ctx = aead_instance_ctx(inst);
378     ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
379     - poly_hash = __crypto_hash_alg_common(poly);
380     err = crypto_init_ahash_spawn(&ctx->poly, poly_hash,
381     aead_crypto_instance(inst));
382     if (err)
383     diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
384     index ee9cfb99fe25..f8ec3d4ba4a8 100644
385     --- a/crypto/pcrypt.c
386     +++ b/crypto/pcrypt.c
387     @@ -254,6 +254,14 @@ static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
388     crypto_free_aead(ctx->child);
389     }
390    
391     +static void pcrypt_free(struct aead_instance *inst)
392     +{
393     + struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
394     +
395     + crypto_drop_aead(&ctx->spawn);
396     + kfree(inst);
397     +}
398     +
399     static int pcrypt_init_instance(struct crypto_instance *inst,
400     struct crypto_alg *alg)
401     {
402     @@ -319,6 +327,8 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
403     inst->alg.encrypt = pcrypt_aead_encrypt;
404     inst->alg.decrypt = pcrypt_aead_decrypt;
405    
406     + inst->free = pcrypt_free;
407     +
408     err = aead_register_instance(tmpl, inst);
409     if (err)
410     goto out_drop_aead;
411     @@ -349,14 +359,6 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
412     return -EINVAL;
413     }
414    
415     -static void pcrypt_free(struct crypto_instance *inst)
416     -{
417     - struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
418     -
419     - crypto_drop_aead(&ctx->spawn);
420     - kfree(inst);
421     -}
422     -
423     static int pcrypt_cpumask_change_notify(struct notifier_block *self,
424     unsigned long val, void *data)
425     {
426     @@ -469,7 +471,6 @@ static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
427     static struct crypto_template pcrypt_tmpl = {
428     .name = "pcrypt",
429     .create = pcrypt_create,
430     - .free = pcrypt_free,
431     .module = THIS_MODULE,
432     };
433    
434     diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
435     index 7d506cb73e54..4d30da269060 100644
436     --- a/drivers/block/nbd.c
437     +++ b/drivers/block/nbd.c
438     @@ -272,6 +272,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd)
439     int result, flags;
440     struct nbd_request request;
441     unsigned long size = blk_rq_bytes(req);
442     + struct bio *bio;
443     u32 type;
444    
445     if (req->cmd_type == REQ_TYPE_DRV_PRIV)
446     @@ -305,16 +306,20 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd)
447     return -EIO;
448     }
449    
450     - if (type == NBD_CMD_WRITE) {
451     - struct req_iterator iter;
452     + if (type != NBD_CMD_WRITE)
453     + return 0;
454     +
455     + flags = 0;
456     + bio = req->bio;
457     + while (bio) {
458     + struct bio *next = bio->bi_next;
459     + struct bvec_iter iter;
460     struct bio_vec bvec;
461     - /*
462     - * we are really probing at internals to determine
463     - * whether to set MSG_MORE or not...
464     - */
465     - rq_for_each_segment(bvec, req, iter) {
466     - flags = 0;
467     - if (!rq_iter_last(bvec, iter))
468     +
469     + bio_for_each_segment(bvec, bio, iter) {
470     + bool is_last = !next && bio_iter_last(bvec, iter);
471     +
472     + if (is_last)
473     flags = MSG_MORE;
474     dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
475     cmd, bvec.bv_len);
476     @@ -325,7 +330,16 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd)
477     result);
478     return -EIO;
479     }
480     + /*
481     + * The completion might already have come in,
482     + * so break for the last one instead of letting
483     + * the iterator do it. This prevents use-after-free
484     + * of the bio.
485     + */
486     + if (is_last)
487     + break;
488     }
489     + bio = next;
490     }
491     return 0;
492     }
493     diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
494     index 795c9d9c96a6..2051d926e303 100644
495     --- a/drivers/bus/sunxi-rsb.c
496     +++ b/drivers/bus/sunxi-rsb.c
497     @@ -178,6 +178,7 @@ static struct bus_type sunxi_rsb_bus = {
498     .match = sunxi_rsb_device_match,
499     .probe = sunxi_rsb_device_probe,
500     .remove = sunxi_rsb_device_remove,
501     + .uevent = of_device_uevent_modalias,
502     };
503    
504     static void sunxi_rsb_dev_release(struct device *dev)
505     diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
506     index c5aac25a5738..b365ad78ac27 100644
507     --- a/drivers/crypto/n2_core.c
508     +++ b/drivers/crypto/n2_core.c
509     @@ -1620,6 +1620,7 @@ static int queue_cache_init(void)
510     CWQ_ENTRY_SIZE, 0, NULL);
511     if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
512     kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
513     + queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
514     return -ENOMEM;
515     }
516     return 0;
517     @@ -1629,6 +1630,8 @@ static void queue_cache_destroy(void)
518     {
519     kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
520     kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
521     + queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
522     + queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
523     }
524    
525     static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
526     diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
527     index cd834da5934a..59603a5728f7 100644
528     --- a/drivers/input/mouse/elantech.c
529     +++ b/drivers/input/mouse/elantech.c
530     @@ -1609,7 +1609,7 @@ static int elantech_set_properties(struct elantech_data *etd)
531     case 5:
532     etd->hw_version = 3;
533     break;
534     - case 6 ... 14:
535     + case 6 ... 15:
536     etd->hw_version = 4;
537     break;
538     default:
539     diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
540     index d3d975ae24b7..7f294f785ce6 100644
541     --- a/drivers/iommu/arm-smmu-v3.c
542     +++ b/drivers/iommu/arm-smmu-v3.c
543     @@ -1547,13 +1547,15 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
544     domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
545     domain->geometry.aperture_end = (1UL << ias) - 1;
546     domain->geometry.force_aperture = true;
547     - smmu_domain->pgtbl_ops = pgtbl_ops;
548    
549     ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
550     - if (ret < 0)
551     + if (ret < 0) {
552     free_io_pgtable_ops(pgtbl_ops);
553     + return ret;
554     + }
555    
556     - return ret;
557     + smmu_domain->pgtbl_ops = pgtbl_ops;
558     + return 0;
559     }
560    
561     static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
562     @@ -1580,7 +1582,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
563    
564     static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
565     {
566     - int i;
567     + int i, j;
568     struct arm_smmu_master_data *master = fwspec->iommu_priv;
569     struct arm_smmu_device *smmu = master->smmu;
570    
571     @@ -1588,6 +1590,13 @@ static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
572     u32 sid = fwspec->ids[i];
573     __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
574    
575     + /* Bridged PCI devices may end up with duplicated IDs */
576     + for (j = 0; j < i; j++)
577     + if (fwspec->ids[j] == sid)
578     + break;
579     + if (j < i)
580     + continue;
581     +
582     arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
583     }
584    
585     diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
586     index b121bf4ed73a..3b8911cd3a19 100644
587     --- a/drivers/mtd/nand/pxa3xx_nand.c
588     +++ b/drivers/mtd/nand/pxa3xx_nand.c
589     @@ -950,6 +950,7 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
590    
591     switch (command) {
592     case NAND_CMD_READ0:
593     + case NAND_CMD_READOOB:
594     case NAND_CMD_PAGEPROG:
595     info->use_ecc = 1;
596     break;
597     diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
598     index 62469c60be23..75f942ae5176 100644
599     --- a/fs/nfsd/auth.c
600     +++ b/fs/nfsd/auth.c
601     @@ -59,6 +59,9 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
602     gi->gid[i] = exp->ex_anon_gid;
603     else
604     gi->gid[i] = rqgi->gid[i];
605     +
606     + /* Each thread allocates its own gi, no race */
607     + groups_sort(gi);
608     }
609     } else {
610     gi = get_group_info(rqgi);
611     diff --git a/include/linux/cred.h b/include/linux/cred.h
612     index f0e70a1bb3ac..cf1a5d0c4eb4 100644
613     --- a/include/linux/cred.h
614     +++ b/include/linux/cred.h
615     @@ -82,6 +82,7 @@ extern int set_current_groups(struct group_info *);
616     extern void set_groups(struct cred *, struct group_info *);
617     extern int groups_search(const struct group_info *, kgid_t);
618     extern bool may_setgroups(void);
619     +extern void groups_sort(struct group_info *);
620    
621     /*
622     * The security context of a task
623     diff --git a/include/linux/fscache.h b/include/linux/fscache.h
624     index 115bb81912cc..94a8aae8f9e2 100644
625     --- a/include/linux/fscache.h
626     +++ b/include/linux/fscache.h
627     @@ -764,7 +764,7 @@ bool fscache_maybe_release_page(struct fscache_cookie *cookie,
628     {
629     if (fscache_cookie_valid(cookie) && PageFsCache(page))
630     return __fscache_maybe_release_page(cookie, page, gfp);
631     - return false;
632     + return true;
633     }
634    
635     /**
636     diff --git a/kernel/acct.c b/kernel/acct.c
637     index 74963d192c5d..37f1dc696fbd 100644
638     --- a/kernel/acct.c
639     +++ b/kernel/acct.c
640     @@ -99,7 +99,7 @@ static int check_free_space(struct bsd_acct_struct *acct)
641     {
642     struct kstatfs sbuf;
643    
644     - if (time_is_before_jiffies(acct->needcheck))
645     + if (time_is_after_jiffies(acct->needcheck))
646     goto out;
647    
648     /* May block */
649     diff --git a/kernel/groups.c b/kernel/groups.c
650     index 2fcadd66a8fd..94bde5210e3d 100644
651     --- a/kernel/groups.c
652     +++ b/kernel/groups.c
653     @@ -77,7 +77,7 @@ static int groups_from_user(struct group_info *group_info,
654     }
655    
656     /* a simple Shell sort */
657     -static void groups_sort(struct group_info *group_info)
658     +void groups_sort(struct group_info *group_info)
659     {
660     int base, max, stride;
661     int gidsetsize = group_info->ngroups;
662     @@ -103,6 +103,7 @@ static void groups_sort(struct group_info *group_info)
663     stride /= 3;
664     }
665     }
666     +EXPORT_SYMBOL(groups_sort);
667    
668     /* a simple bsearch */
669     int groups_search(const struct group_info *group_info, kgid_t grp)
670     @@ -134,7 +135,6 @@ int groups_search(const struct group_info *group_info, kgid_t grp)
671     void set_groups(struct cred *new, struct group_info *group_info)
672     {
673     put_group_info(new->group_info);
674     - groups_sort(group_info);
675     get_group_info(group_info);
676     new->group_info = group_info;
677     }
678     @@ -218,6 +218,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
679     return retval;
680     }
681    
682     + groups_sort(group_info);
683     retval = set_current_groups(group_info);
684     put_group_info(group_info);
685    
686     diff --git a/kernel/signal.c b/kernel/signal.c
687     index e48668c3c972..7ebe236a5364 100644
688     --- a/kernel/signal.c
689     +++ b/kernel/signal.c
690     @@ -72,7 +72,7 @@ static int sig_task_ignored(struct task_struct *t, int sig, bool force)
691     handler = sig_handler(t, sig);
692    
693     if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
694     - handler == SIG_DFL && !force)
695     + handler == SIG_DFL && !(force && sig_kernel_only(sig)))
696     return 1;
697    
698     return sig_handler_ignored(handler, sig);
699     @@ -88,13 +88,15 @@ static int sig_ignored(struct task_struct *t, int sig, bool force)
700     if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
701     return 0;
702    
703     - if (!sig_task_ignored(t, sig, force))
704     - return 0;
705     -
706     /*
707     - * Tracers may want to know about even ignored signals.
708     + * Tracers may want to know about even ignored signal unless it
709     + * is SIGKILL which can't be reported anyway but can be ignored
710     + * by SIGNAL_UNKILLABLE task.
711     */
712     - return !t->ptrace;
713     + if (t->ptrace && sig != SIGKILL)
714     + return 0;
715     +
716     + return sig_task_ignored(t, sig, force);
717     }
718    
719     /*
720     @@ -917,9 +919,9 @@ static void complete_signal(int sig, struct task_struct *p, int group)
721     * then start taking the whole group down immediately.
722     */
723     if (sig_fatal(p, sig) &&
724     - !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
725     + !(signal->flags & SIGNAL_GROUP_EXIT) &&
726     !sigismember(&t->real_blocked, sig) &&
727     - (sig == SIGKILL || !t->ptrace)) {
728     + (sig == SIGKILL || !p->ptrace)) {
729     /*
730     * This signal will be fatal to the whole group.
731     */
732     diff --git a/kernel/uid16.c b/kernel/uid16.c
733     index cc40793464e3..dcffcce9d75e 100644
734     --- a/kernel/uid16.c
735     +++ b/kernel/uid16.c
736     @@ -190,6 +190,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
737     return retval;
738     }
739    
740     + groups_sort(group_info);
741     retval = set_current_groups(group_info);
742     put_group_info(group_info);
743    
744     diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
745     index 25d9a9cf7b66..624c322af3ab 100644
746     --- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
747     +++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
748     @@ -231,6 +231,7 @@ static int gssx_dec_linux_creds(struct xdr_stream *xdr,
749     goto out_free_groups;
750     creds->cr_group_info->gid[i] = kgid;
751     }
752     + groups_sort(creds->cr_group_info);
753    
754     return 0;
755     out_free_groups:
756     diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
757     index 153082598522..6a08bc451247 100644
758     --- a/net/sunrpc/auth_gss/svcauth_gss.c
759     +++ b/net/sunrpc/auth_gss/svcauth_gss.c
760     @@ -481,6 +481,7 @@ static int rsc_parse(struct cache_detail *cd,
761     goto out;
762     rsci.cred.cr_group_info->gid[i] = kgid;
763     }
764     + groups_sort(rsci.cred.cr_group_info);
765    
766     /* mech name */
767     len = qword_get(&mesg, buf, mlen);
768     diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
769     index 64af4f034de6..738a243c68a2 100644
770     --- a/net/sunrpc/svcauth_unix.c
771     +++ b/net/sunrpc/svcauth_unix.c
772     @@ -520,6 +520,7 @@ static int unix_gid_parse(struct cache_detail *cd,
773     ug.gi->gid[i] = kgid;
774     }
775    
776     + groups_sort(ug.gi);
777     ugp = unix_gid_lookup(cd, uid);
778     if (ugp) {
779     struct cache_head *ch;
780     @@ -819,6 +820,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
781     kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv));
782     cred->cr_group_info->gid[i] = kgid;
783     }
784     + groups_sort(cred->cr_group_info);
785     if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
786     *authp = rpc_autherr_badverf;
787     return SVC_DENIED;