Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.6/0102-4.6.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2817 - (show annotations) (download)
Thu Aug 4 13:24:40 2016 UTC (7 years, 9 months ago) by niro
File size: 144534 byte(s)
-linux-4.6.3
1 diff --git a/Makefile b/Makefile
2 index 93068c2d0656..c62b531d5a85 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 6
8 -SUBLEVEL = 2
9 +SUBLEVEL = 3
10 EXTRAVERSION =
11 NAME = Charred Weasel
12
13 diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
14 index ef9119f7462e..4d9375814b53 100644
15 --- a/arch/arm/kernel/ptrace.c
16 +++ b/arch/arm/kernel/ptrace.c
17 @@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target,
18 if (ret)
19 return ret;
20
21 - vfp_flush_hwstate(thread);
22 thread->vfpstate.hard = new_vfp;
23 + vfp_flush_hwstate(thread);
24
25 return 0;
26 }
27 diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
28 index 24ed037f09fd..83d48a599f69 100644
29 --- a/arch/arm64/include/asm/elf.h
30 +++ b/arch/arm64/include/asm/elf.h
31 @@ -160,14 +160,14 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
32 #define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
33 #endif
34
35 -#ifdef CONFIG_COMPAT
36 -
37 #ifdef __AARCH64EB__
38 #define COMPAT_ELF_PLATFORM ("v8b")
39 #else
40 #define COMPAT_ELF_PLATFORM ("v8l")
41 #endif
42
43 +#ifdef CONFIG_COMPAT
44 +
45 #define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
46
47 /* AArch32 registers. */
48 diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
49 index f0c3fb7ec8cf..2d2d7cb04a5d 100644
50 --- a/arch/arm64/kernel/cpuinfo.c
51 +++ b/arch/arm64/kernel/cpuinfo.c
52 @@ -22,6 +22,8 @@
53
54 #include <linux/bitops.h>
55 #include <linux/bug.h>
56 +#include <linux/compat.h>
57 +#include <linux/elf.h>
58 #include <linux/init.h>
59 #include <linux/kernel.h>
60 #include <linux/personality.h>
61 @@ -104,6 +106,7 @@ static const char *const compat_hwcap2_str[] = {
62 static int c_show(struct seq_file *m, void *v)
63 {
64 int i, j;
65 + bool compat = personality(current->personality) == PER_LINUX32;
66
67 for_each_online_cpu(i) {
68 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
69 @@ -115,6 +118,9 @@ static int c_show(struct seq_file *m, void *v)
70 * "processor". Give glibc what it expects.
71 */
72 seq_printf(m, "processor\t: %d\n", i);
73 + if (compat)
74 + seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
75 + MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
76
77 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
78 loops_per_jiffy / (500000UL/HZ),
79 @@ -127,7 +133,7 @@ static int c_show(struct seq_file *m, void *v)
80 * software which does already (at least for 32-bit).
81 */
82 seq_puts(m, "Features\t:");
83 - if (personality(current->personality) == PER_LINUX32) {
84 + if (compat) {
85 #ifdef CONFIG_COMPAT
86 for (j = 0; compat_hwcap_str[j]; j++)
87 if (compat_elf_hwcap & (1 << j))
88 diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
89 index fff7cd42b3a3..3129df9d3a73 100644
90 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
91 +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
92 @@ -190,12 +190,11 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
93 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
94 continue;
95
96 - if (cpu_if->vgic_elrsr & (1 << i)) {
97 + if (cpu_if->vgic_elrsr & (1 << i))
98 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
99 - continue;
100 - }
101 + else
102 + cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
103
104 - cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
105 __gic_v3_set_lr(0, i);
106 }
107
108 diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
109 index 3ae4a28c4aed..10b79e9e87d1 100644
110 --- a/arch/arm64/mm/fault.c
111 +++ b/arch/arm64/mm/fault.c
112 @@ -109,7 +109,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
113 * PTE_RDONLY is cleared by default in the asm below, so set it in
114 * back if necessary (read-only or clean PTE).
115 */
116 - if (!pte_write(entry) || !dirty)
117 + if (!pte_write(entry) || !pte_sw_dirty(entry))
118 pte_val(entry) |= PTE_RDONLY;
119
120 /*
121 diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
122 index d7c0acb35ec2..8d49614d600d 100644
123 --- a/arch/parisc/kernel/unaligned.c
124 +++ b/arch/parisc/kernel/unaligned.c
125 @@ -666,7 +666,7 @@ void handle_unaligned(struct pt_regs *regs)
126 break;
127 }
128
129 - if (modify && R1(regs->iir))
130 + if (ret == 0 && modify && R1(regs->iir))
131 regs->gr[R1(regs->iir)] = newbase;
132
133
134 @@ -677,6 +677,14 @@ void handle_unaligned(struct pt_regs *regs)
135
136 if (ret)
137 {
138 + /*
139 + * The unaligned handler failed.
140 + * If we were called by __get_user() or __put_user() jump
141 + * to it's exception fixup handler instead of crashing.
142 + */
143 + if (!user_mode(regs) && fixup_exception(regs))
144 + return;
145 +
146 printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
147 die_if_kernel("Unaligned data reference", regs, 28);
148
149 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
150 index f5f4c66bbbc9..166d8631747f 100644
151 --- a/arch/powerpc/include/asm/reg.h
152 +++ b/arch/powerpc/include/asm/reg.h
153 @@ -715,7 +715,7 @@
154 #define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
155 #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
156 #define SPRN_MMCR1 798
157 -#define SPRN_MMCR2 769
158 +#define SPRN_MMCR2 785
159 #define SPRN_MMCRA 0x312
160 #define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
161 #define MMCRA_SDAR_DCACHE_MISS 0x40000000UL
162 @@ -752,13 +752,13 @@
163 #define SPRN_PMC6 792
164 #define SPRN_PMC7 793
165 #define SPRN_PMC8 794
166 -#define SPRN_SIAR 780
167 -#define SPRN_SDAR 781
168 #define SPRN_SIER 784
169 #define SIER_SIPR 0x2000000 /* Sampled MSR_PR */
170 #define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
171 #define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
172 #define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
173 +#define SPRN_SIAR 796
174 +#define SPRN_SDAR 797
175 #define SPRN_TACR 888
176 #define SPRN_TCSCR 889
177 #define SPRN_CSIGR 890
178 diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
179 index da5192590c44..ccd2037c797f 100644
180 --- a/arch/powerpc/kernel/prom_init.c
181 +++ b/arch/powerpc/kernel/prom_init.c
182 @@ -656,6 +656,7 @@ unsigned char ibm_architecture_vec[] = {
183 W(0xffff0000), W(0x003e0000), /* POWER6 */
184 W(0xffff0000), W(0x003f0000), /* POWER7 */
185 W(0xffff0000), W(0x004b0000), /* POWER8E */
186 + W(0xffff0000), W(0x004c0000), /* POWER8NVL */
187 W(0xffff0000), W(0x004d0000), /* POWER8 */
188 W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */
189 W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */
190 diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
191 index 7635b1c6b5da..f4acba25fa5e 100644
192 --- a/arch/powerpc/mm/hash_utils_64.c
193 +++ b/arch/powerpc/mm/hash_utils_64.c
194 @@ -159,6 +159,19 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
195 },
196 };
197
198 +/*
199 + * 'R' and 'C' update notes:
200 + * - Under pHyp or KVM, the updatepp path will not set C, thus it *will*
201 + * create writeable HPTEs without C set, because the hcall H_PROTECT
202 + * that we use in that case will not update C
203 + * - The above is however not a problem, because we also don't do that
204 + * fancy "no flush" variant of eviction and we use H_REMOVE which will
205 + * do the right thing and thus we don't have the race I described earlier
206 + *
207 + * - Under bare metal, we do have the race, so we need R and C set
208 + * - We make sure R is always set and never lost
209 + * - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping
210 + */
211 unsigned long htab_convert_pte_flags(unsigned long pteflags)
212 {
213 unsigned long rflags = 0;
214 @@ -180,9 +193,14 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
215 rflags |= 0x1;
216 }
217 /*
218 - * Always add "C" bit for perf. Memory coherence is always enabled
219 + * We can't allow hardware to update hpte bits. Hence always
220 + * set 'R' bit and set 'C' if it is a write fault
221 + * Memory coherence is always enabled
222 */
223 - rflags |= HPTE_R_C | HPTE_R_M;
224 + rflags |= HPTE_R_R | HPTE_R_M;
225 +
226 + if (pteflags & _PAGE_DIRTY)
227 + rflags |= HPTE_R_C;
228 /*
229 * Add in WIG bits
230 */
231 diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
232 index ac3ffd97e059..405baaf96864 100644
233 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c
234 +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
235 @@ -615,29 +615,50 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
236 {
237 int config_addr;
238 int ret;
239 + /* Waiting 0.2s maximum before skipping configuration */
240 + int max_wait = 200;
241
242 /* Figure out the PE address */
243 config_addr = pe->config_addr;
244 if (pe->addr)
245 config_addr = pe->addr;
246
247 - /* Use new configure-pe function, if supported */
248 - if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
249 - ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
250 - config_addr, BUID_HI(pe->phb->buid),
251 - BUID_LO(pe->phb->buid));
252 - } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
253 - ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
254 - config_addr, BUID_HI(pe->phb->buid),
255 - BUID_LO(pe->phb->buid));
256 - } else {
257 - return -EFAULT;
258 - }
259 + while (max_wait > 0) {
260 + /* Use new configure-pe function, if supported */
261 + if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
262 + ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
263 + config_addr, BUID_HI(pe->phb->buid),
264 + BUID_LO(pe->phb->buid));
265 + } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
266 + ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
267 + config_addr, BUID_HI(pe->phb->buid),
268 + BUID_LO(pe->phb->buid));
269 + } else {
270 + return -EFAULT;
271 + }
272
273 - if (ret)
274 - pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
275 - __func__, pe->phb->global_number, pe->addr, ret);
276 + if (!ret)
277 + return ret;
278 +
279 + /*
280 + * If RTAS returns a delay value that's above 100ms, cut it
281 + * down to 100ms in case firmware made a mistake. For more
282 + * on how these delay values work see rtas_busy_delay_time
283 + */
284 + if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
285 + ret <= RTAS_EXTENDED_DELAY_MAX)
286 + ret = RTAS_EXTENDED_DELAY_MIN+2;
287 +
288 + max_wait -= rtas_busy_delay_time(ret);
289 +
290 + if (max_wait < 0)
291 + break;
292 +
293 + rtas_busy_delay(ret);
294 + }
295
296 + pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
297 + __func__, pe->phb->global_number, pe->addr, ret);
298 return ret;
299 }
300
301 diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
302 index f010c93a88b1..fda605dbc1b4 100644
303 --- a/arch/s390/net/bpf_jit.h
304 +++ b/arch/s390/net/bpf_jit.h
305 @@ -37,7 +37,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
306 * | | |
307 * +---------------+ |
308 * | 8 byte skbp | |
309 - * R15+170 -> +---------------+ |
310 + * R15+176 -> +---------------+ |
311 * | 8 byte hlen | |
312 * R15+168 -> +---------------+ |
313 * | 4 byte align | |
314 @@ -58,7 +58,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
315 #define STK_OFF (STK_SPACE - STK_160_UNUSED)
316 #define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */
317 #define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */
318 -#define STK_OFF_SKBP 170 /* Offset of SKB pointer on stack */
319 +#define STK_OFF_SKBP 176 /* Offset of SKB pointer on stack */
320
321 #define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */
322 #define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */
323 diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
324 index 3c0bfc1f2694..2662fcc67f8d 100644
325 --- a/arch/s390/net/bpf_jit_comp.c
326 +++ b/arch/s390/net/bpf_jit_comp.c
327 @@ -45,7 +45,7 @@ struct bpf_jit {
328 int labels[1]; /* Labels for local jumps */
329 };
330
331 -#define BPF_SIZE_MAX 0x7ffff /* Max size for program (20 bit signed displ) */
332 +#define BPF_SIZE_MAX 0xffff /* Max size for program (16 bit branches) */
333
334 #define SEEN_SKB 1 /* skb access */
335 #define SEEN_MEM 2 /* use mem[] for temporary storage */
336 @@ -446,7 +446,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
337 emit_load_skb_data_hlen(jit);
338 if (jit->seen & SEEN_SKB_CHANGE)
339 /* stg %b1,ST_OFF_SKBP(%r0,%r15) */
340 - EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
341 + EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
342 STK_OFF_SKBP);
343 }
344
345 diff --git a/arch/sparc/include/asm/head_64.h b/arch/sparc/include/asm/head_64.h
346 index 10e9dabc4c41..f0700cfeedd7 100644
347 --- a/arch/sparc/include/asm/head_64.h
348 +++ b/arch/sparc/include/asm/head_64.h
349 @@ -15,6 +15,10 @@
350
351 #define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ)
352
353 +#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
354 +#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
355 +#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
356 +
357 #define __CHEETAH_ID 0x003e0014
358 #define __JALAPENO_ID 0x003e0016
359 #define __SERRANO_ID 0x003e0022
360 diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
361 index f089cfa249f3..5a189bf3c8ac 100644
362 --- a/arch/sparc/include/asm/pgtable_64.h
363 +++ b/arch/sparc/include/asm/pgtable_64.h
364 @@ -375,7 +375,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
365 #define pgprot_noncached pgprot_noncached
366
367 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
368 -static inline pte_t pte_mkhuge(pte_t pte)
369 +static inline unsigned long __pte_huge_mask(void)
370 {
371 unsigned long mask;
372
373 @@ -390,8 +390,19 @@ static inline pte_t pte_mkhuge(pte_t pte)
374 : "=r" (mask)
375 : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
376
377 - return __pte(pte_val(pte) | mask);
378 + return mask;
379 +}
380 +
381 +static inline pte_t pte_mkhuge(pte_t pte)
382 +{
383 + return __pte(pte_val(pte) | __pte_huge_mask());
384 +}
385 +
386 +static inline bool is_hugetlb_pte(pte_t pte)
387 +{
388 + return !!(pte_val(pte) & __pte_huge_mask());
389 }
390 +
391 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
392 static inline pmd_t pmd_mkhuge(pmd_t pmd)
393 {
394 @@ -403,6 +414,11 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
395 return __pmd(pte_val(pte));
396 }
397 #endif
398 +#else
399 +static inline bool is_hugetlb_pte(pte_t pte)
400 +{
401 + return false;
402 +}
403 #endif
404
405 static inline pte_t pte_mkdirty(pte_t pte)
406 @@ -858,6 +874,19 @@ static inline unsigned long pud_pfn(pud_t pud)
407 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
408 pte_t *ptep, pte_t orig, int fullmm);
409
410 +static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
411 + pte_t *ptep, pte_t orig, int fullmm)
412 +{
413 + /* It is more efficient to let flush_tlb_kernel_range()
414 + * handle init_mm tlb flushes.
415 + *
416 + * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
417 + * and SUN4V pte layout, so this inline test is fine.
418 + */
419 + if (likely(mm != &init_mm) && pte_accessible(mm, orig))
420 + tlb_batch_add(mm, vaddr, ptep, orig, fullmm);
421 +}
422 +
423 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
424 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
425 unsigned long addr,
426 @@ -874,15 +903,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
427 pte_t orig = *ptep;
428
429 *ptep = pte;
430 -
431 - /* It is more efficient to let flush_tlb_kernel_range()
432 - * handle init_mm tlb flushes.
433 - *
434 - * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
435 - * and SUN4V pte layout, so this inline test is fine.
436 - */
437 - if (likely(mm != &init_mm) && pte_accessible(mm, orig))
438 - tlb_batch_add(mm, addr, ptep, orig, fullmm);
439 + maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm);
440 }
441
442 #define set_pte_at(mm,addr,ptep,pte) \
443 diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
444 index dea1cfa2122b..a8e192e90700 100644
445 --- a/arch/sparc/include/asm/tlbflush_64.h
446 +++ b/arch/sparc/include/asm/tlbflush_64.h
447 @@ -8,6 +8,7 @@
448 #define TLB_BATCH_NR 192
449
450 struct tlb_batch {
451 + bool huge;
452 struct mm_struct *mm;
453 unsigned long tlb_nr;
454 unsigned long active;
455 @@ -16,7 +17,7 @@ struct tlb_batch {
456
457 void flush_tsb_kernel_range(unsigned long start, unsigned long end);
458 void flush_tsb_user(struct tlb_batch *tb);
459 -void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
460 +void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge);
461
462 /* TLB flush operations. */
463
464 diff --git a/arch/sparc/include/asm/ttable.h b/arch/sparc/include/asm/ttable.h
465 index 71b5a67522ab..781b9f1dbdc2 100644
466 --- a/arch/sparc/include/asm/ttable.h
467 +++ b/arch/sparc/include/asm/ttable.h
468 @@ -589,8 +589,8 @@ user_rtt_fill_64bit: \
469 restored; \
470 nop; nop; nop; nop; nop; nop; \
471 nop; nop; nop; nop; nop; \
472 - ba,a,pt %xcc, user_rtt_fill_fixup; \
473 - ba,a,pt %xcc, user_rtt_fill_fixup; \
474 + ba,a,pt %xcc, user_rtt_fill_fixup_dax; \
475 + ba,a,pt %xcc, user_rtt_fill_fixup_mna; \
476 ba,a,pt %xcc, user_rtt_fill_fixup;
477
478
479 @@ -652,8 +652,8 @@ user_rtt_fill_32bit: \
480 restored; \
481 nop; nop; nop; nop; nop; \
482 nop; nop; nop; \
483 - ba,a,pt %xcc, user_rtt_fill_fixup; \
484 - ba,a,pt %xcc, user_rtt_fill_fixup; \
485 + ba,a,pt %xcc, user_rtt_fill_fixup_dax; \
486 + ba,a,pt %xcc, user_rtt_fill_fixup_mna; \
487 ba,a,pt %xcc, user_rtt_fill_fixup;
488
489
490 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
491 index 7cf9c6ea3f1f..fdb13327fded 100644
492 --- a/arch/sparc/kernel/Makefile
493 +++ b/arch/sparc/kernel/Makefile
494 @@ -21,6 +21,7 @@ CFLAGS_REMOVE_perf_event.o := -pg
495 CFLAGS_REMOVE_pcr.o := -pg
496 endif
497
498 +obj-$(CONFIG_SPARC64) += urtt_fill.o
499 obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o
500 obj-$(CONFIG_SPARC32) += etrap_32.o
501 obj-$(CONFIG_SPARC32) += rtrap_32.o
502 diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
503 index d08bdaffdbfc..216948ca4382 100644
504 --- a/arch/sparc/kernel/rtrap_64.S
505 +++ b/arch/sparc/kernel/rtrap_64.S
506 @@ -14,10 +14,6 @@
507 #include <asm/visasm.h>
508 #include <asm/processor.h>
509
510 -#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
511 -#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
512 -#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
513 -
514 #ifdef CONFIG_CONTEXT_TRACKING
515 # define SCHEDULE_USER schedule_user
516 #else
517 @@ -242,52 +238,17 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
518 wrpr %g1, %cwp
519 ba,a,pt %xcc, user_rtt_fill_64bit
520
521 -user_rtt_fill_fixup:
522 - rdpr %cwp, %g1
523 - add %g1, 1, %g1
524 - wrpr %g1, 0x0, %cwp
525 -
526 - rdpr %wstate, %g2
527 - sll %g2, 3, %g2
528 - wrpr %g2, 0x0, %wstate
529 -
530 - /* We know %canrestore and %otherwin are both zero. */
531 -
532 - sethi %hi(sparc64_kern_pri_context), %g2
533 - ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
534 - mov PRIMARY_CONTEXT, %g1
535 -
536 -661: stxa %g2, [%g1] ASI_DMMU
537 - .section .sun4v_1insn_patch, "ax"
538 - .word 661b
539 - stxa %g2, [%g1] ASI_MMU
540 - .previous
541 -
542 - sethi %hi(KERNBASE), %g1
543 - flush %g1
544 +user_rtt_fill_fixup_dax:
545 + ba,pt %xcc, user_rtt_fill_fixup_common
546 + mov 1, %g3
547
548 - or %g4, FAULT_CODE_WINFIXUP, %g4
549 - stb %g4, [%g6 + TI_FAULT_CODE]
550 - stx %g5, [%g6 + TI_FAULT_ADDR]
551 +user_rtt_fill_fixup_mna:
552 + ba,pt %xcc, user_rtt_fill_fixup_common
553 + mov 2, %g3
554
555 - mov %g6, %l1
556 - wrpr %g0, 0x0, %tl
557 -
558 -661: nop
559 - .section .sun4v_1insn_patch, "ax"
560 - .word 661b
561 - SET_GL(0)
562 - .previous
563 -
564 - wrpr %g0, RTRAP_PSTATE, %pstate
565 -
566 - mov %l1, %g6
567 - ldx [%g6 + TI_TASK], %g4
568 - LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
569 - call do_sparc64_fault
570 - add %sp, PTREGS_OFF, %o0
571 - ba,pt %xcc, rtrap
572 - nop
573 +user_rtt_fill_fixup:
574 + ba,pt %xcc, user_rtt_fill_fixup_common
575 + clr %g3
576
577 user_rtt_pre_restore:
578 add %g1, 1, %g1
579 diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
580 index 3c25241fa5cb..ebd0bfe25a72 100644
581 --- a/arch/sparc/kernel/signal32.c
582 +++ b/arch/sparc/kernel/signal32.c
583 @@ -138,12 +138,24 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
584 return 0;
585 }
586
587 +/* Checks if the fp is valid. We always build signal frames which are
588 + * 16-byte aligned, therefore we can always enforce that the restore
589 + * frame has that property as well.
590 + */
591 +static bool invalid_frame_pointer(void __user *fp, int fplen)
592 +{
593 + if ((((unsigned long) fp) & 15) ||
594 + ((unsigned long)fp) > 0x100000000ULL - fplen)
595 + return true;
596 + return false;
597 +}
598 +
599 void do_sigreturn32(struct pt_regs *regs)
600 {
601 struct signal_frame32 __user *sf;
602 compat_uptr_t fpu_save;
603 compat_uptr_t rwin_save;
604 - unsigned int psr;
605 + unsigned int psr, ufp;
606 unsigned int pc, npc;
607 sigset_t set;
608 compat_sigset_t seta;
609 @@ -158,11 +170,16 @@ void do_sigreturn32(struct pt_regs *regs)
610 sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP];
611
612 /* 1. Make sure we are not getting garbage from the user */
613 - if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
614 - (((unsigned long) sf) & 3))
615 + if (invalid_frame_pointer(sf, sizeof(*sf)))
616 + goto segv;
617 +
618 + if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
619 + goto segv;
620 +
621 + if (ufp & 0x7)
622 goto segv;
623
624 - if (get_user(pc, &sf->info.si_regs.pc) ||
625 + if (__get_user(pc, &sf->info.si_regs.pc) ||
626 __get_user(npc, &sf->info.si_regs.npc))
627 goto segv;
628
629 @@ -227,7 +244,7 @@ segv:
630 asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
631 {
632 struct rt_signal_frame32 __user *sf;
633 - unsigned int psr, pc, npc;
634 + unsigned int psr, pc, npc, ufp;
635 compat_uptr_t fpu_save;
636 compat_uptr_t rwin_save;
637 sigset_t set;
638 @@ -242,11 +259,16 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
639 sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
640
641 /* 1. Make sure we are not getting garbage from the user */
642 - if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
643 - (((unsigned long) sf) & 3))
644 + if (invalid_frame_pointer(sf, sizeof(*sf)))
645 goto segv;
646
647 - if (get_user(pc, &sf->regs.pc) ||
648 + if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
649 + goto segv;
650 +
651 + if (ufp & 0x7)
652 + goto segv;
653 +
654 + if (__get_user(pc, &sf->regs.pc) ||
655 __get_user(npc, &sf->regs.npc))
656 goto segv;
657
658 @@ -307,14 +329,6 @@ segv:
659 force_sig(SIGSEGV, current);
660 }
661
662 -/* Checks if the fp is valid */
663 -static int invalid_frame_pointer(void __user *fp, int fplen)
664 -{
665 - if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
666 - return 1;
667 - return 0;
668 -}
669 -
670 static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
671 {
672 unsigned long sp;
673 diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
674 index 52aa5e4ce5e7..c3c12efe0bc0 100644
675 --- a/arch/sparc/kernel/signal_32.c
676 +++ b/arch/sparc/kernel/signal_32.c
677 @@ -60,10 +60,22 @@ struct rt_signal_frame {
678 #define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7)))
679 #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
680
681 +/* Checks if the fp is valid. We always build signal frames which are
682 + * 16-byte aligned, therefore we can always enforce that the restore
683 + * frame has that property as well.
684 + */
685 +static inline bool invalid_frame_pointer(void __user *fp, int fplen)
686 +{
687 + if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen))
688 + return true;
689 +
690 + return false;
691 +}
692 +
693 asmlinkage void do_sigreturn(struct pt_regs *regs)
694 {
695 + unsigned long up_psr, pc, npc, ufp;
696 struct signal_frame __user *sf;
697 - unsigned long up_psr, pc, npc;
698 sigset_t set;
699 __siginfo_fpu_t __user *fpu_save;
700 __siginfo_rwin_t __user *rwin_save;
701 @@ -77,10 +89,13 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
702 sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
703
704 /* 1. Make sure we are not getting garbage from the user */
705 - if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
706 + if (!invalid_frame_pointer(sf, sizeof(*sf)))
707 + goto segv_and_exit;
708 +
709 + if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
710 goto segv_and_exit;
711
712 - if (((unsigned long) sf) & 3)
713 + if (ufp & 0x7)
714 goto segv_and_exit;
715
716 err = __get_user(pc, &sf->info.si_regs.pc);
717 @@ -127,7 +142,7 @@ segv_and_exit:
718 asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
719 {
720 struct rt_signal_frame __user *sf;
721 - unsigned int psr, pc, npc;
722 + unsigned int psr, pc, npc, ufp;
723 __siginfo_fpu_t __user *fpu_save;
724 __siginfo_rwin_t __user *rwin_save;
725 sigset_t set;
726 @@ -135,8 +150,13 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
727
728 synchronize_user_stack();
729 sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
730 - if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
731 - (((unsigned long) sf) & 0x03))
732 + if (!invalid_frame_pointer(sf, sizeof(*sf)))
733 + goto segv;
734 +
735 + if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
736 + goto segv;
737 +
738 + if (ufp & 0x7)
739 goto segv;
740
741 err = __get_user(pc, &sf->regs.pc);
742 @@ -178,15 +198,6 @@ segv:
743 force_sig(SIGSEGV, current);
744 }
745
746 -/* Checks if the fp is valid */
747 -static inline int invalid_frame_pointer(void __user *fp, int fplen)
748 -{
749 - if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen))
750 - return 1;
751 -
752 - return 0;
753 -}
754 -
755 static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
756 {
757 unsigned long sp = regs->u_regs[UREG_FP];
758 diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
759 index 39aaec173f66..5ee930c48f4c 100644
760 --- a/arch/sparc/kernel/signal_64.c
761 +++ b/arch/sparc/kernel/signal_64.c
762 @@ -234,6 +234,17 @@ do_sigsegv:
763 goto out;
764 }
765
766 +/* Checks if the fp is valid. We always build rt signal frames which
767 + * are 16-byte aligned, therefore we can always enforce that the
768 + * restore frame has that property as well.
769 + */
770 +static bool invalid_frame_pointer(void __user *fp)
771 +{
772 + if (((unsigned long) fp) & 15)
773 + return true;
774 + return false;
775 +}
776 +
777 struct rt_signal_frame {
778 struct sparc_stackf ss;
779 siginfo_t info;
780 @@ -246,8 +257,8 @@ struct rt_signal_frame {
781
782 void do_rt_sigreturn(struct pt_regs *regs)
783 {
784 + unsigned long tpc, tnpc, tstate, ufp;
785 struct rt_signal_frame __user *sf;
786 - unsigned long tpc, tnpc, tstate;
787 __siginfo_fpu_t __user *fpu_save;
788 __siginfo_rwin_t __user *rwin_save;
789 sigset_t set;
790 @@ -261,10 +272,16 @@ void do_rt_sigreturn(struct pt_regs *regs)
791 (regs->u_regs [UREG_FP] + STACK_BIAS);
792
793 /* 1. Make sure we are not getting garbage from the user */
794 - if (((unsigned long) sf) & 3)
795 + if (invalid_frame_pointer(sf))
796 + goto segv;
797 +
798 + if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
799 goto segv;
800
801 - err = get_user(tpc, &sf->regs.tpc);
802 + if ((ufp + STACK_BIAS) & 0x7)
803 + goto segv;
804 +
805 + err = __get_user(tpc, &sf->regs.tpc);
806 err |= __get_user(tnpc, &sf->regs.tnpc);
807 if (test_thread_flag(TIF_32BIT)) {
808 tpc &= 0xffffffff;
809 @@ -308,14 +325,6 @@ segv:
810 force_sig(SIGSEGV, current);
811 }
812
813 -/* Checks if the fp is valid */
814 -static int invalid_frame_pointer(void __user *fp)
815 -{
816 - if (((unsigned long) fp) & 15)
817 - return 1;
818 - return 0;
819 -}
820 -
821 static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
822 {
823 unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
824 diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c
825 index 0f6eebe71e6c..e5fe8cef9a69 100644
826 --- a/arch/sparc/kernel/sigutil_32.c
827 +++ b/arch/sparc/kernel/sigutil_32.c
828 @@ -48,6 +48,10 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
829 int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
830 {
831 int err;
832 +
833 + if (((unsigned long) fpu) & 3)
834 + return -EFAULT;
835 +
836 #ifdef CONFIG_SMP
837 if (test_tsk_thread_flag(current, TIF_USEDFPU))
838 regs->psr &= ~PSR_EF;
839 @@ -97,7 +101,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
840 struct thread_info *t = current_thread_info();
841 int i, wsaved, err;
842
843 - __get_user(wsaved, &rp->wsaved);
844 + if (((unsigned long) rp) & 3)
845 + return -EFAULT;
846 +
847 + get_user(wsaved, &rp->wsaved);
848 if (wsaved > NSWINS)
849 return -EFAULT;
850
851 diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
852 index 387834a9c56a..36aadcbeac69 100644
853 --- a/arch/sparc/kernel/sigutil_64.c
854 +++ b/arch/sparc/kernel/sigutil_64.c
855 @@ -37,7 +37,10 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
856 unsigned long fprs;
857 int err;
858
859 - err = __get_user(fprs, &fpu->si_fprs);
860 + if (((unsigned long) fpu) & 7)
861 + return -EFAULT;
862 +
863 + err = get_user(fprs, &fpu->si_fprs);
864 fprs_write(0);
865 regs->tstate &= ~TSTATE_PEF;
866 if (fprs & FPRS_DL)
867 @@ -72,7 +75,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
868 struct thread_info *t = current_thread_info();
869 int i, wsaved, err;
870
871 - __get_user(wsaved, &rp->wsaved);
872 + if (((unsigned long) rp) & 7)
873 + return -EFAULT;
874 +
875 + get_user(wsaved, &rp->wsaved);
876 if (wsaved > NSWINS)
877 return -EFAULT;
878
879 diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S
880 new file mode 100644
881 index 000000000000..5604a2b051d4
882 --- /dev/null
883 +++ b/arch/sparc/kernel/urtt_fill.S
884 @@ -0,0 +1,98 @@
885 +#include <asm/thread_info.h>
886 +#include <asm/trap_block.h>
887 +#include <asm/spitfire.h>
888 +#include <asm/ptrace.h>
889 +#include <asm/head.h>
890 +
891 + .text
892 + .align 8
893 + .globl user_rtt_fill_fixup_common
894 +user_rtt_fill_fixup_common:
895 + rdpr %cwp, %g1
896 + add %g1, 1, %g1
897 + wrpr %g1, 0x0, %cwp
898 +
899 + rdpr %wstate, %g2
900 + sll %g2, 3, %g2
901 + wrpr %g2, 0x0, %wstate
902 +
903 + /* We know %canrestore and %otherwin are both zero. */
904 +
905 + sethi %hi(sparc64_kern_pri_context), %g2
906 + ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
907 + mov PRIMARY_CONTEXT, %g1
908 +
909 +661: stxa %g2, [%g1] ASI_DMMU
910 + .section .sun4v_1insn_patch, "ax"
911 + .word 661b
912 + stxa %g2, [%g1] ASI_MMU
913 + .previous
914 +
915 + sethi %hi(KERNBASE), %g1
916 + flush %g1
917 +
918 + mov %g4, %l4
919 + mov %g5, %l5
920 + brnz,pn %g3, 1f
921 + mov %g3, %l3
922 +
923 + or %g4, FAULT_CODE_WINFIXUP, %g4
924 + stb %g4, [%g6 + TI_FAULT_CODE]
925 + stx %g5, [%g6 + TI_FAULT_ADDR]
926 +1:
927 + mov %g6, %l1
928 + wrpr %g0, 0x0, %tl
929 +
930 +661: nop
931 + .section .sun4v_1insn_patch, "ax"
932 + .word 661b
933 + SET_GL(0)
934 + .previous
935 +
936 + wrpr %g0, RTRAP_PSTATE, %pstate
937 +
938 + mov %l1, %g6
939 + ldx [%g6 + TI_TASK], %g4
940 + LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
941 +
942 + brnz,pn %l3, 1f
943 + nop
944 +
945 + call do_sparc64_fault
946 + add %sp, PTREGS_OFF, %o0
947 + ba,pt %xcc, rtrap
948 + nop
949 +
950 +1: cmp %g3, 2
951 + bne,pn %xcc, 2f
952 + nop
953 +
954 + sethi %hi(tlb_type), %g1
955 + lduw [%g1 + %lo(tlb_type)], %g1
956 + cmp %g1, 3
957 + bne,pt %icc, 1f
958 + add %sp, PTREGS_OFF, %o0
959 + mov %l4, %o2
960 + call sun4v_do_mna
961 + mov %l5, %o1
962 + ba,a,pt %xcc, rtrap
963 +1: mov %l4, %o1
964 + mov %l5, %o2
965 + call mem_address_unaligned
966 + nop
967 + ba,a,pt %xcc, rtrap
968 +
969 +2: sethi %hi(tlb_type), %g1
970 + mov %l4, %o1
971 + lduw [%g1 + %lo(tlb_type)], %g1
972 + mov %l5, %o2
973 + cmp %g1, 3
974 + bne,pt %icc, 1f
975 + add %sp, PTREGS_OFF, %o0
976 + call sun4v_data_access_exception
977 + nop
978 + ba,a,pt %xcc, rtrap
979 +
980 +1: call spitfire_data_access_exception
981 + nop
982 + ba,a,pt %xcc, rtrap
983 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
984 index 4977800e9770..ba52e6466a82 100644
985 --- a/arch/sparc/mm/hugetlbpage.c
986 +++ b/arch/sparc/mm/hugetlbpage.c
987 @@ -176,17 +176,31 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
988 pte_t *ptep, pte_t entry)
989 {
990 int i;
991 + pte_t orig[2];
992 + unsigned long nptes;
993
994 if (!pte_present(*ptep) && pte_present(entry))
995 mm->context.huge_pte_count++;
996
997 addr &= HPAGE_MASK;
998 - for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
999 - set_pte_at(mm, addr, ptep, entry);
1000 +
1001 + nptes = 1 << HUGETLB_PAGE_ORDER;
1002 + orig[0] = *ptep;
1003 + orig[1] = *(ptep + nptes / 2);
1004 + for (i = 0; i < nptes; i++) {
1005 + *ptep = entry;
1006 ptep++;
1007 addr += PAGE_SIZE;
1008 pte_val(entry) += PAGE_SIZE;
1009 }
1010 +
1011 + /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
1012 + addr -= REAL_HPAGE_SIZE;
1013 + ptep -= nptes / 2;
1014 + maybe_tlb_batch_add(mm, addr, ptep, orig[1], 0);
1015 + addr -= REAL_HPAGE_SIZE;
1016 + ptep -= nptes / 2;
1017 + maybe_tlb_batch_add(mm, addr, ptep, orig[0], 0);
1018 }
1019
1020 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1021 @@ -194,19 +208,28 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1022 {
1023 pte_t entry;
1024 int i;
1025 + unsigned long nptes;
1026
1027 entry = *ptep;
1028 if (pte_present(entry))
1029 mm->context.huge_pte_count--;
1030
1031 addr &= HPAGE_MASK;
1032 -
1033 - for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
1034 - pte_clear(mm, addr, ptep);
1035 + nptes = 1 << HUGETLB_PAGE_ORDER;
1036 + for (i = 0; i < nptes; i++) {
1037 + *ptep = __pte(0UL);
1038 addr += PAGE_SIZE;
1039 ptep++;
1040 }
1041
1042 + /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
1043 + addr -= REAL_HPAGE_SIZE;
1044 + ptep -= nptes / 2;
1045 + maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
1046 + addr -= REAL_HPAGE_SIZE;
1047 + ptep -= nptes / 2;
1048 + maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
1049 +
1050 return entry;
1051 }
1052
1053 diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
1054 index 09e838801e39..14bb0d5ed3c6 100644
1055 --- a/arch/sparc/mm/init_64.c
1056 +++ b/arch/sparc/mm/init_64.c
1057 @@ -324,18 +324,6 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde
1058 tsb_insert(tsb, tag, tte);
1059 }
1060
1061 -#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
1062 -static inline bool is_hugetlb_pte(pte_t pte)
1063 -{
1064 - if ((tlb_type == hypervisor &&
1065 - (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
1066 - (tlb_type != hypervisor &&
1067 - (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
1068 - return true;
1069 - return false;
1070 -}
1071 -#endif
1072 -
1073 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
1074 {
1075 struct mm_struct *mm;
1076 @@ -2836,9 +2824,10 @@ void hugetlb_setup(struct pt_regs *regs)
1077 * the Data-TLB for huge pages.
1078 */
1079 if (tlb_type == cheetah_plus) {
1080 + bool need_context_reload = false;
1081 unsigned long ctx;
1082
1083 - spin_lock(&ctx_alloc_lock);
1084 + spin_lock_irq(&ctx_alloc_lock);
1085 ctx = mm->context.sparc64_ctx_val;
1086 ctx &= ~CTX_PGSZ_MASK;
1087 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
1088 @@ -2857,9 +2846,12 @@ void hugetlb_setup(struct pt_regs *regs)
1089 * also executing in this address space.
1090 */
1091 mm->context.sparc64_ctx_val = ctx;
1092 - on_each_cpu(context_reload, mm, 0);
1093 + need_context_reload = true;
1094 }
1095 - spin_unlock(&ctx_alloc_lock);
1096 + spin_unlock_irq(&ctx_alloc_lock);
1097 +
1098 + if (need_context_reload)
1099 + on_each_cpu(context_reload, mm, 0);
1100 }
1101 }
1102 #endif
1103 diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
1104 index 9df2190c097e..f81cd9736700 100644
1105 --- a/arch/sparc/mm/tlb.c
1106 +++ b/arch/sparc/mm/tlb.c
1107 @@ -67,7 +67,7 @@ void arch_leave_lazy_mmu_mode(void)
1108 }
1109
1110 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
1111 - bool exec)
1112 + bool exec, bool huge)
1113 {
1114 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
1115 unsigned long nr;
1116 @@ -84,13 +84,21 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
1117 }
1118
1119 if (!tb->active) {
1120 - flush_tsb_user_page(mm, vaddr);
1121 + flush_tsb_user_page(mm, vaddr, huge);
1122 global_flush_tlb_page(mm, vaddr);
1123 goto out;
1124 }
1125
1126 - if (nr == 0)
1127 + if (nr == 0) {
1128 tb->mm = mm;
1129 + tb->huge = huge;
1130 + }
1131 +
1132 + if (tb->huge != huge) {
1133 + flush_tlb_pending();
1134 + tb->huge = huge;
1135 + nr = 0;
1136 + }
1137
1138 tb->vaddrs[nr] = vaddr;
1139 tb->tlb_nr = ++nr;
1140 @@ -104,6 +112,8 @@ out:
1141 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
1142 pte_t *ptep, pte_t orig, int fullmm)
1143 {
1144 + bool huge = is_hugetlb_pte(orig);
1145 +
1146 if (tlb_type != hypervisor &&
1147 pte_dirty(orig)) {
1148 unsigned long paddr, pfn = pte_pfn(orig);
1149 @@ -129,7 +139,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
1150
1151 no_cache_flush:
1152 if (!fullmm)
1153 - tlb_batch_add_one(mm, vaddr, pte_exec(orig));
1154 + tlb_batch_add_one(mm, vaddr, pte_exec(orig), huge);
1155 }
1156
1157 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1158 @@ -145,7 +155,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
1159 if (pte_val(*pte) & _PAGE_VALID) {
1160 bool exec = pte_exec(*pte);
1161
1162 - tlb_batch_add_one(mm, vaddr, exec);
1163 + tlb_batch_add_one(mm, vaddr, exec, false);
1164 }
1165 pte++;
1166 vaddr += PAGE_SIZE;
1167 @@ -185,8 +195,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1168 pte_t orig_pte = __pte(pmd_val(orig));
1169 bool exec = pte_exec(orig_pte);
1170
1171 - tlb_batch_add_one(mm, addr, exec);
1172 - tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
1173 + tlb_batch_add_one(mm, addr, exec, true);
1174 + tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
1175 + true);
1176 } else {
1177 tlb_batch_pmd_scan(mm, addr, orig);
1178 }
1179 diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
1180 index a06576683c38..a0604a493a36 100644
1181 --- a/arch/sparc/mm/tsb.c
1182 +++ b/arch/sparc/mm/tsb.c
1183 @@ -76,14 +76,15 @@ void flush_tsb_user(struct tlb_batch *tb)
1184
1185 spin_lock_irqsave(&mm->context.lock, flags);
1186
1187 - base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
1188 - nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
1189 - if (tlb_type == cheetah_plus || tlb_type == hypervisor)
1190 - base = __pa(base);
1191 - __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
1192 -
1193 + if (!tb->huge) {
1194 + base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
1195 + nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
1196 + if (tlb_type == cheetah_plus || tlb_type == hypervisor)
1197 + base = __pa(base);
1198 + __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
1199 + }
1200 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
1201 - if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
1202 + if (tb->huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) {
1203 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
1204 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
1205 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
1206 @@ -94,20 +95,21 @@ void flush_tsb_user(struct tlb_batch *tb)
1207 spin_unlock_irqrestore(&mm->context.lock, flags);
1208 }
1209
1210 -void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
1211 +void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge)
1212 {
1213 unsigned long nentries, base, flags;
1214
1215 spin_lock_irqsave(&mm->context.lock, flags);
1216
1217 - base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
1218 - nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
1219 - if (tlb_type == cheetah_plus || tlb_type == hypervisor)
1220 - base = __pa(base);
1221 - __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
1222 -
1223 + if (!huge) {
1224 + base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
1225 + nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
1226 + if (tlb_type == cheetah_plus || tlb_type == hypervisor)
1227 + base = __pa(base);
1228 + __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
1229 + }
1230 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
1231 - if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
1232 + if (huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) {
1233 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
1234 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
1235 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
1236 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
1237 index 06cbe25861f1..87bd6b6bf5bd 100644
1238 --- a/arch/x86/kernel/traps.c
1239 +++ b/arch/x86/kernel/traps.c
1240 @@ -95,6 +95,12 @@ static inline void cond_local_irq_disable(struct pt_regs *regs)
1241 local_irq_disable();
1242 }
1243
1244 +/*
1245 + * In IST context, we explicitly disable preemption. This serves two
1246 + * purposes: it makes it much less likely that we would accidentally
1247 + * schedule in IST context and it will force a warning if we somehow
1248 + * manage to schedule by accident.
1249 + */
1250 void ist_enter(struct pt_regs *regs)
1251 {
1252 if (user_mode(regs)) {
1253 @@ -109,13 +115,7 @@ void ist_enter(struct pt_regs *regs)
1254 rcu_nmi_enter();
1255 }
1256
1257 - /*
1258 - * We are atomic because we're on the IST stack; or we're on
1259 - * x86_32, in which case we still shouldn't schedule; or we're
1260 - * on x86_64 and entered from user mode, in which case we're
1261 - * still atomic unless ist_begin_non_atomic is called.
1262 - */
1263 - preempt_count_add(HARDIRQ_OFFSET);
1264 + preempt_disable();
1265
1266 /* This code is a bit fragile. Test it. */
1267 RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
1268 @@ -123,7 +123,7 @@ void ist_enter(struct pt_regs *regs)
1269
1270 void ist_exit(struct pt_regs *regs)
1271 {
1272 - preempt_count_sub(HARDIRQ_OFFSET);
1273 + preempt_enable_no_resched();
1274
1275 if (!user_mode(regs))
1276 rcu_nmi_exit();
1277 @@ -154,7 +154,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
1278 BUG_ON((unsigned long)(current_top_of_stack() -
1279 current_stack_pointer()) >= THREAD_SIZE);
1280
1281 - preempt_count_sub(HARDIRQ_OFFSET);
1282 + preempt_enable_no_resched();
1283 }
1284
1285 /**
1286 @@ -164,7 +164,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
1287 */
1288 void ist_end_non_atomic(void)
1289 {
1290 - preempt_count_add(HARDIRQ_OFFSET);
1291 + preempt_disable();
1292 }
1293
1294 static nokprobe_inline int
1295 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1296 index 9b7798c7b210..6b9701babaa1 100644
1297 --- a/arch/x86/kvm/x86.c
1298 +++ b/arch/x86/kvm/x86.c
1299 @@ -3032,6 +3032,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
1300 if (dbgregs->flags)
1301 return -EINVAL;
1302
1303 + if (dbgregs->dr6 & ~0xffffffffull)
1304 + return -EINVAL;
1305 + if (dbgregs->dr7 & ~0xffffffffull)
1306 + return -EINVAL;
1307 +
1308 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
1309 kvm_update_dr0123(vcpu);
1310 vcpu->arch.dr6 = dbgregs->dr6;
1311 diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
1312 index 91a7e047a765..477cbf39e0fb 100644
1313 --- a/crypto/asymmetric_keys/Kconfig
1314 +++ b/crypto/asymmetric_keys/Kconfig
1315 @@ -13,6 +13,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
1316 tristate "Asymmetric public-key crypto algorithm subtype"
1317 select MPILIB
1318 select CRYPTO_HASH_INFO
1319 + select CRYPTO_AKCIPHER
1320 help
1321 This option provides support for asymmetric public key type handling.
1322 If signature generation and/or verification are to be used,
1323 diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
1324 index 52c7395cb8d8..0d0d4529ee36 100644
1325 --- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
1326 +++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
1327 @@ -122,6 +122,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
1328 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1329 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
1330 unsigned int unit;
1331 + u32 unit_size;
1332 int ret;
1333
1334 if (!ctx->u.aes.key_len)
1335 @@ -133,11 +134,17 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
1336 if (!req->info)
1337 return -EINVAL;
1338
1339 - for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++)
1340 - if (!(req->nbytes & (unit_size_map[unit].size - 1)))
1341 - break;
1342 + unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
1343 + if (req->nbytes <= unit_size_map[0].size) {
1344 + for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
1345 + if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
1346 + unit_size = unit_size_map[unit].value;
1347 + break;
1348 + }
1349 + }
1350 + }
1351
1352 - if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) ||
1353 + if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
1354 (ctx->u.aes.key_len != AES_KEYSIZE_128)) {
1355 /* Use the fallback to process the request for any
1356 * unsupported unit sizes or key sizes
1357 @@ -158,7 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
1358 rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
1359 rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
1360 : CCP_AES_ACTION_DECRYPT;
1361 - rctx->cmd.u.xts.unit_size = unit_size_map[unit].value;
1362 + rctx->cmd.u.xts.unit_size = unit_size;
1363 rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
1364 rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
1365 rctx->cmd.u.xts.iv = &rctx->iv_sg;
1366 diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
1367 index 2fd38d598f3d..3c5e83263fe0 100644
1368 --- a/drivers/gpio/gpio-bcm-kona.c
1369 +++ b/drivers/gpio/gpio-bcm-kona.c
1370 @@ -546,11 +546,11 @@ static void bcm_kona_gpio_reset(struct bcm_kona_gpio *kona_gpio)
1371 /* disable interrupts and clear status */
1372 for (i = 0; i < kona_gpio->num_bank; i++) {
1373 /* Unlock the entire bank first */
1374 - bcm_kona_gpio_write_lock_regs(kona_gpio, i, UNLOCK_CODE);
1375 + bcm_kona_gpio_write_lock_regs(reg_base, i, UNLOCK_CODE);
1376 writel(0xffffffff, reg_base + GPIO_INT_MASK(i));
1377 writel(0xffffffff, reg_base + GPIO_INT_STATUS(i));
1378 /* Now re-lock the bank */
1379 - bcm_kona_gpio_write_lock_regs(kona_gpio, i, LOCK_CODE);
1380 + bcm_kona_gpio_write_lock_regs(reg_base, i, LOCK_CODE);
1381 }
1382 }
1383
1384 diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
1385 index 66d3d247d76d..e72794e463aa 100644
1386 --- a/drivers/gpio/gpio-zynq.c
1387 +++ b/drivers/gpio/gpio-zynq.c
1388 @@ -709,11 +709,17 @@ static int zynq_gpio_probe(struct platform_device *pdev)
1389 dev_err(&pdev->dev, "input clock not found.\n");
1390 return PTR_ERR(gpio->clk);
1391 }
1392 + ret = clk_prepare_enable(gpio->clk);
1393 + if (ret) {
1394 + dev_err(&pdev->dev, "Unable to enable clock.\n");
1395 + return ret;
1396 + }
1397
1398 + pm_runtime_set_active(&pdev->dev);
1399 pm_runtime_enable(&pdev->dev);
1400 ret = pm_runtime_get_sync(&pdev->dev);
1401 if (ret < 0)
1402 - return ret;
1403 + goto err_pm_dis;
1404
1405 /* report a bug if gpio chip registration fails */
1406 ret = gpiochip_add_data(chip, gpio);
1407 @@ -745,6 +751,9 @@ err_rm_gpiochip:
1408 gpiochip_remove(chip);
1409 err_pm_put:
1410 pm_runtime_put(&pdev->dev);
1411 +err_pm_dis:
1412 + pm_runtime_disable(&pdev->dev);
1413 + clk_disable_unprepare(gpio->clk);
1414
1415 return ret;
1416 }
1417 diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
1418 index b747c76fd2b1..cf3e71243d6d 100644
1419 --- a/drivers/gpio/gpiolib.c
1420 +++ b/drivers/gpio/gpiolib.c
1421 @@ -438,7 +438,6 @@ static void gpiodevice_release(struct device *dev)
1422 {
1423 struct gpio_device *gdev = dev_get_drvdata(dev);
1424
1425 - cdev_del(&gdev->chrdev);
1426 list_del(&gdev->list);
1427 ida_simple_remove(&gpio_ida, gdev->id);
1428 kfree(gdev->label);
1429 @@ -471,7 +470,6 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
1430
1431 /* From this point, the .release() function cleans up gpio_device */
1432 gdev->dev.release = gpiodevice_release;
1433 - get_device(&gdev->dev);
1434 pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
1435 __func__, gdev->base, gdev->base + gdev->ngpio - 1,
1436 dev_name(&gdev->dev), gdev->chip->label ? : "generic");
1437 @@ -742,6 +740,8 @@ void gpiochip_remove(struct gpio_chip *chip)
1438 * be removed, else it will be dangling until the last user is
1439 * gone.
1440 */
1441 + cdev_del(&gdev->chrdev);
1442 + device_del(&gdev->dev);
1443 put_device(&gdev->dev);
1444 }
1445 EXPORT_SYMBOL_GPL(gpiochip_remove);
1446 @@ -841,7 +841,7 @@ struct gpio_chip *gpiochip_find(void *data,
1447
1448 spin_lock_irqsave(&gpio_lock, flags);
1449 list_for_each_entry(gdev, &gpio_devices, list)
1450 - if (match(gdev->chip, data))
1451 + if (gdev->chip && match(gdev->chip, data))
1452 break;
1453
1454 /* No match? */
1455 @@ -1339,10 +1339,13 @@ done:
1456 /*
1457 * This descriptor validation needs to be inserted verbatim into each
1458 * function taking a descriptor, so we need to use a preprocessor
1459 - * macro to avoid endless duplication.
1460 + * macro to avoid endless duplication. If the desc is NULL it is an
1461 + * optional GPIO and calls should just bail out.
1462 */
1463 #define VALIDATE_DESC(desc) do { \
1464 - if (!desc || !desc->gdev) { \
1465 + if (!desc) \
1466 + return 0; \
1467 + if (!desc->gdev) { \
1468 pr_warn("%s: invalid GPIO\n", __func__); \
1469 return -EINVAL; \
1470 } \
1471 @@ -1353,7 +1356,9 @@ done:
1472 } } while (0)
1473
1474 #define VALIDATE_DESC_VOID(desc) do { \
1475 - if (!desc || !desc->gdev) { \
1476 + if (!desc) \
1477 + return; \
1478 + if (!desc->gdev) { \
1479 pr_warn("%s: invalid GPIO\n", __func__); \
1480 return; \
1481 } \
1482 @@ -2001,7 +2006,14 @@ int gpiod_to_irq(const struct gpio_desc *desc)
1483 struct gpio_chip *chip;
1484 int offset;
1485
1486 - VALIDATE_DESC(desc);
1487 + /*
1488 + * Cannot VALIDATE_DESC() here as gpiod_to_irq() consumer semantics
1489 + * requires this function to not return zero on an invalid descriptor
1490 + * but rather a negative error number.
1491 + */
1492 + if (!desc || !desc->gdev || !desc->gdev->chip)
1493 + return -EINVAL;
1494 +
1495 chip = desc->gdev->chip;
1496 offset = gpio_chip_hwgpio(desc);
1497 return chip->to_irq ? chip->to_irq(chip, offset) : -ENXIO;
1498 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
1499 index e08f962288d9..f30de8053545 100644
1500 --- a/drivers/gpu/drm/drm_crtc.c
1501 +++ b/drivers/gpu/drm/drm_crtc.c
1502 @@ -3434,6 +3434,24 @@ int drm_mode_addfb2(struct drm_device *dev,
1503 return 0;
1504 }
1505
1506 +struct drm_mode_rmfb_work {
1507 + struct work_struct work;
1508 + struct list_head fbs;
1509 +};
1510 +
1511 +static void drm_mode_rmfb_work_fn(struct work_struct *w)
1512 +{
1513 + struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work);
1514 +
1515 + while (!list_empty(&arg->fbs)) {
1516 + struct drm_framebuffer *fb =
1517 + list_first_entry(&arg->fbs, typeof(*fb), filp_head);
1518 +
1519 + list_del_init(&fb->filp_head);
1520 + drm_framebuffer_remove(fb);
1521 + }
1522 +}
1523 +
1524 /**
1525 * drm_mode_rmfb - remove an FB from the configuration
1526 * @dev: drm device for the ioctl
1527 @@ -3474,7 +3492,25 @@ int drm_mode_rmfb(struct drm_device *dev,
1528 mutex_unlock(&dev->mode_config.fb_lock);
1529 mutex_unlock(&file_priv->fbs_lock);
1530
1531 - drm_framebuffer_unreference(fb);
1532 + /*
1533 + * we now own the reference that was stored in the fbs list
1534 + *
1535 + * drm_framebuffer_remove may fail with -EINTR on pending signals,
1536 + * so run this in a separate stack as there's no way to correctly
1537 + * handle this after the fb is already removed from the lookup table.
1538 + */
1539 + if (atomic_read(&fb->refcount.refcount) > 1) {
1540 + struct drm_mode_rmfb_work arg;
1541 +
1542 + INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
1543 + INIT_LIST_HEAD(&arg.fbs);
1544 + list_add_tail(&fb->filp_head, &arg.fbs);
1545 +
1546 + schedule_work(&arg.work);
1547 + flush_work(&arg.work);
1548 + destroy_work_on_stack(&arg.work);
1549 + } else
1550 + drm_framebuffer_unreference(fb);
1551
1552 return 0;
1553
1554 @@ -3627,7 +3663,6 @@ out_err1:
1555 return ret;
1556 }
1557
1558 -
1559 /**
1560 * drm_fb_release - remove and free the FBs on this file
1561 * @priv: drm file for the ioctl
1562 @@ -3642,6 +3677,9 @@ out_err1:
1563 void drm_fb_release(struct drm_file *priv)
1564 {
1565 struct drm_framebuffer *fb, *tfb;
1566 + struct drm_mode_rmfb_work arg;
1567 +
1568 + INIT_LIST_HEAD(&arg.fbs);
1569
1570 /*
1571 * When the file gets released that means no one else can access the fb
1572 @@ -3654,10 +3692,22 @@ void drm_fb_release(struct drm_file *priv)
1573 * at it any more.
1574 */
1575 list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
1576 - list_del_init(&fb->filp_head);
1577 + if (atomic_read(&fb->refcount.refcount) > 1) {
1578 + list_move_tail(&fb->filp_head, &arg.fbs);
1579 + } else {
1580 + list_del_init(&fb->filp_head);
1581
1582 - /* This drops the fpriv->fbs reference. */
1583 - drm_framebuffer_unreference(fb);
1584 + /* This drops the fpriv->fbs reference. */
1585 + drm_framebuffer_unreference(fb);
1586 + }
1587 + }
1588 +
1589 + if (!list_empty(&arg.fbs)) {
1590 + INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
1591 +
1592 + schedule_work(&arg.work);
1593 + flush_work(&arg.work);
1594 + destroy_work_on_stack(&arg.work);
1595 }
1596 }
1597
1598 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
1599 index 1c212205d0e7..d1a46ef5ab3f 100644
1600 --- a/drivers/gpu/drm/i915/i915_irq.c
1601 +++ b/drivers/gpu/drm/i915/i915_irq.c
1602 @@ -1829,7 +1829,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1603 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1604 disable_rpm_wakeref_asserts(dev_priv);
1605
1606 - do {
1607 + for (;;) {
1608 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1609 iir = I915_READ(VLV_IIR);
1610
1611 @@ -1857,7 +1857,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1612
1613 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1614 POSTING_READ(GEN8_MASTER_IRQ);
1615 - } while (0);
1616 + }
1617
1618 enable_rpm_wakeref_asserts(dev_priv);
1619
1620 diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
1621 index 8fc93c5f6abc..d02c4240b7df 100644
1622 --- a/drivers/net/ethernet/atheros/alx/alx.h
1623 +++ b/drivers/net/ethernet/atheros/alx/alx.h
1624 @@ -96,6 +96,10 @@ struct alx_priv {
1625 unsigned int rx_ringsz;
1626 unsigned int rxbuf_size;
1627
1628 + struct page *rx_page;
1629 + unsigned int rx_page_offset;
1630 + unsigned int rx_frag_size;
1631 +
1632 struct napi_struct napi;
1633 struct alx_tx_queue txq;
1634 struct alx_rx_queue rxq;
1635 diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
1636 index 55b118e876fd..8611811fbf88 100644
1637 --- a/drivers/net/ethernet/atheros/alx/main.c
1638 +++ b/drivers/net/ethernet/atheros/alx/main.c
1639 @@ -70,6 +70,35 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry)
1640 }
1641 }
1642
1643 +static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp)
1644 +{
1645 + struct sk_buff *skb;
1646 + struct page *page;
1647 +
1648 + if (alx->rx_frag_size > PAGE_SIZE)
1649 + return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
1650 +
1651 + page = alx->rx_page;
1652 + if (!page) {
1653 + alx->rx_page = page = alloc_page(gfp);
1654 + if (unlikely(!page))
1655 + return NULL;
1656 + alx->rx_page_offset = 0;
1657 + }
1658 +
1659 + skb = build_skb(page_address(page) + alx->rx_page_offset,
1660 + alx->rx_frag_size);
1661 + if (likely(skb)) {
1662 + alx->rx_page_offset += alx->rx_frag_size;
1663 + if (alx->rx_page_offset >= PAGE_SIZE)
1664 + alx->rx_page = NULL;
1665 + else
1666 + get_page(page);
1667 + }
1668 + return skb;
1669 +}
1670 +
1671 +
1672 static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
1673 {
1674 struct alx_rx_queue *rxq = &alx->rxq;
1675 @@ -86,7 +115,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
1676 while (!cur_buf->skb && next != rxq->read_idx) {
1677 struct alx_rfd *rfd = &rxq->rfd[cur];
1678
1679 - skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
1680 + skb = alx_alloc_skb(alx, gfp);
1681 if (!skb)
1682 break;
1683 dma = dma_map_single(&alx->hw.pdev->dev,
1684 @@ -124,6 +153,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
1685 alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
1686 }
1687
1688 +
1689 return count;
1690 }
1691
1692 @@ -592,6 +622,11 @@ static void alx_free_rings(struct alx_priv *alx)
1693 kfree(alx->txq.bufs);
1694 kfree(alx->rxq.bufs);
1695
1696 + if (alx->rx_page) {
1697 + put_page(alx->rx_page);
1698 + alx->rx_page = NULL;
1699 + }
1700 +
1701 dma_free_coherent(&alx->hw.pdev->dev,
1702 alx->descmem.size,
1703 alx->descmem.virt,
1704 @@ -646,6 +681,7 @@ static int alx_request_irq(struct alx_priv *alx)
1705 alx->dev->name, alx);
1706 if (!err)
1707 goto out;
1708 +
1709 /* fall back to legacy interrupt */
1710 pci_disable_msi(alx->hw.pdev);
1711 }
1712 @@ -689,6 +725,7 @@ static int alx_init_sw(struct alx_priv *alx)
1713 struct pci_dev *pdev = alx->hw.pdev;
1714 struct alx_hw *hw = &alx->hw;
1715 int err;
1716 + unsigned int head_size;
1717
1718 err = alx_identify_hw(alx);
1719 if (err) {
1720 @@ -704,7 +741,12 @@ static int alx_init_sw(struct alx_priv *alx)
1721
1722 hw->smb_timer = 400;
1723 hw->mtu = alx->dev->mtu;
1724 +
1725 alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
1726 + head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
1727 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1728 + alx->rx_frag_size = roundup_pow_of_two(head_size);
1729 +
1730 alx->tx_ringsz = 256;
1731 alx->rx_ringsz = 512;
1732 hw->imt = 200;
1733 @@ -806,6 +848,7 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
1734 {
1735 struct alx_priv *alx = netdev_priv(netdev);
1736 int max_frame = ALX_MAX_FRAME_LEN(mtu);
1737 + unsigned int head_size;
1738
1739 if ((max_frame < ALX_MIN_FRAME_SIZE) ||
1740 (max_frame > ALX_MAX_FRAME_SIZE))
1741 @@ -817,6 +860,9 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
1742 netdev->mtu = mtu;
1743 alx->hw.mtu = mtu;
1744 alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
1745 + head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
1746 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1747 + alx->rx_frag_size = roundup_pow_of_two(head_size);
1748 netdev_update_features(netdev);
1749 if (netif_running(netdev))
1750 alx_reinit(alx);
1751 diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
1752 index 085f9125cf42..06f031715b57 100644
1753 --- a/drivers/net/ethernet/ezchip/nps_enet.c
1754 +++ b/drivers/net/ethernet/ezchip/nps_enet.c
1755 @@ -205,8 +205,10 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
1756 * re-adding ourselves to the poll list.
1757 */
1758
1759 - if (priv->tx_skb && !tx_ctrl_ct)
1760 + if (priv->tx_skb && !tx_ctrl_ct) {
1761 + nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
1762 napi_reschedule(napi);
1763 + }
1764 }
1765
1766 return work_done;
1767 diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
1768 index 01fccec632ec..466939f8f0cf 100644
1769 --- a/drivers/net/ethernet/marvell/mvneta_bm.c
1770 +++ b/drivers/net/ethernet/marvell/mvneta_bm.c
1771 @@ -189,6 +189,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
1772 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1773 hwbm_pool->construct = mvneta_bm_construct;
1774 hwbm_pool->priv = new_pool;
1775 + spin_lock_init(&hwbm_pool->lock);
1776
1777 /* Create new pool */
1778 err = mvneta_bm_pool_create(priv, new_pool);
1779 diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
1780 index 0e758bcb26b0..1ca796316173 100644
1781 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
1782 +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
1783 @@ -2727,7 +2727,7 @@ static int ofdpa_port_obj_fib4_add(struct rocker_port *rocker_port,
1784
1785 return ofdpa_port_fib_ipv4(ofdpa_port, trans,
1786 htonl(fib4->dst), fib4->dst_len,
1787 - &fib4->fi, fib4->tb_id, 0);
1788 + fib4->fi, fib4->tb_id, 0);
1789 }
1790
1791 static int ofdpa_port_obj_fib4_del(struct rocker_port *rocker_port,
1792 @@ -2737,7 +2737,7 @@ static int ofdpa_port_obj_fib4_del(struct rocker_port *rocker_port,
1793
1794 return ofdpa_port_fib_ipv4(ofdpa_port, NULL,
1795 htonl(fib4->dst), fib4->dst_len,
1796 - &fib4->fi, fib4->tb_id,
1797 + fib4->fi, fib4->tb_id,
1798 OFDPA_OP_FLAG_REMOVE);
1799 }
1800
1801 diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
1802 index 1681084cc96f..1f309127457d 100644
1803 --- a/drivers/net/ethernet/sfc/ef10.c
1804 +++ b/drivers/net/ethernet/sfc/ef10.c
1805 @@ -619,6 +619,17 @@ fail:
1806 return rc;
1807 }
1808
1809 +static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
1810 +{
1811 + struct efx_channel *channel;
1812 + struct efx_tx_queue *tx_queue;
1813 +
1814 + /* All our existing PIO buffers went away */
1815 + efx_for_each_channel(channel, efx)
1816 + efx_for_each_channel_tx_queue(tx_queue, channel)
1817 + tx_queue->piobuf = NULL;
1818 +}
1819 +
1820 #else /* !EFX_USE_PIO */
1821
1822 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
1823 @@ -635,6 +646,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx)
1824 {
1825 }
1826
1827 +static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
1828 +{
1829 +}
1830 +
1831 #endif /* EFX_USE_PIO */
1832
1833 static void efx_ef10_remove(struct efx_nic *efx)
1834 @@ -1018,6 +1033,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
1835 nic_data->must_realloc_vis = true;
1836 nic_data->must_restore_filters = true;
1837 nic_data->must_restore_piobufs = true;
1838 + efx_ef10_forget_old_piobufs(efx);
1839 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
1840
1841 /* Driver-created vswitches and vports must be re-created */
1842 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
1843 index 06704ca6f9ca..8683a21690b5 100644
1844 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
1845 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
1846 @@ -209,7 +209,7 @@ int stmmac_mdio_register(struct net_device *ndev)
1847 return -ENOMEM;
1848
1849 if (mdio_bus_data->irqs)
1850 - memcpy(new_bus->irq, mdio_bus_data, sizeof(new_bus->irq));
1851 + memcpy(new_bus->irq, mdio_bus_data->irqs, sizeof(new_bus->irq));
1852
1853 #ifdef CONFIG_OF
1854 if (priv->device->of_node)
1855 diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
1856 index 7b0a644122eb..9fcb4898fb68 100644
1857 --- a/drivers/net/geneve.c
1858 +++ b/drivers/net/geneve.c
1859 @@ -336,15 +336,15 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1860
1861 /* Need Geneve and inner Ethernet header to be present */
1862 if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
1863 - goto error;
1864 + goto drop;
1865
1866 /* Return packets with reserved bits set */
1867 geneveh = geneve_hdr(skb);
1868 if (unlikely(geneveh->ver != GENEVE_VER))
1869 - goto error;
1870 + goto drop;
1871
1872 if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
1873 - goto error;
1874 + goto drop;
1875
1876 gs = rcu_dereference_sk_user_data(sk);
1877 if (!gs)
1878 @@ -367,10 +367,6 @@ drop:
1879 /* Consume bad packet */
1880 kfree_skb(skb);
1881 return 0;
1882 -
1883 -error:
1884 - /* Let the UDP layer deal with the skb */
1885 - return 1;
1886 }
1887
1888 static struct socket *geneve_create_sock(struct net *net, bool ipv6,
1889 diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
1890 index 92eaab95ae2b..9e803bbcc0b6 100644
1891 --- a/drivers/net/macsec.c
1892 +++ b/drivers/net/macsec.c
1893 @@ -1645,7 +1645,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1894 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1895 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1896
1897 - nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEY], MACSEC_KEYID_LEN);
1898 + nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
1899 rx_sa->sc = rx_sc;
1900 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
1901
1902 @@ -1784,7 +1784,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1903 return -ENOMEM;
1904 }
1905
1906 - nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEY], MACSEC_KEYID_LEN);
1907 + nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
1908
1909 spin_lock_bh(&tx_sa->lock);
1910 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
1911 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
1912 index a0f64cba86ba..2ace126533cd 100644
1913 --- a/drivers/net/team/team.c
1914 +++ b/drivers/net/team/team.c
1915 @@ -990,7 +990,7 @@ static void team_port_disable(struct team *team,
1916 #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
1917 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
1918
1919 -static void __team_compute_features(struct team *team)
1920 +static void ___team_compute_features(struct team *team)
1921 {
1922 struct team_port *port;
1923 u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
1924 @@ -1021,15 +1021,20 @@ static void __team_compute_features(struct team *team)
1925 team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1926 if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1927 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1928 +}
1929
1930 +static void __team_compute_features(struct team *team)
1931 +{
1932 + ___team_compute_features(team);
1933 netdev_change_features(team->dev);
1934 }
1935
1936 static void team_compute_features(struct team *team)
1937 {
1938 mutex_lock(&team->lock);
1939 - __team_compute_features(team);
1940 + ___team_compute_features(team);
1941 mutex_unlock(&team->lock);
1942 + netdev_change_features(team->dev);
1943 }
1944
1945 static int team_port_enter(struct team *team, struct team_port *port)
1946 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1947 index 2c9e45f50edb..dda490542624 100644
1948 --- a/drivers/net/tun.c
1949 +++ b/drivers/net/tun.c
1950 @@ -568,11 +568,13 @@ static void tun_detach_all(struct net_device *dev)
1951 for (i = 0; i < n; i++) {
1952 tfile = rtnl_dereference(tun->tfiles[i]);
1953 BUG_ON(!tfile);
1954 + tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
1955 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1956 RCU_INIT_POINTER(tfile->tun, NULL);
1957 --tun->numqueues;
1958 }
1959 list_for_each_entry(tfile, &tun->disabled, next) {
1960 + tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
1961 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1962 RCU_INIT_POINTER(tfile->tun, NULL);
1963 }
1964 @@ -628,6 +630,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
1965 goto out;
1966 }
1967 tfile->queue_index = tun->numqueues;
1968 + tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
1969 rcu_assign_pointer(tfile->tun, tun);
1970 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
1971 tun->numqueues++;
1972 @@ -1425,9 +1428,6 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1973 if (!iov_iter_count(to))
1974 return 0;
1975
1976 - if (tun->dev->reg_state != NETREG_REGISTERED)
1977 - return -EIO;
1978 -
1979 /* Read frames from queue */
1980 skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
1981 &peeked, &off, &err);
1982 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
1983 index 8ac261ab7d7d..7e29b55015d0 100644
1984 --- a/drivers/net/vxlan.c
1985 +++ b/drivers/net/vxlan.c
1986 @@ -1262,7 +1262,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
1987
1988 /* Need Vxlan and inner Ethernet header to be present */
1989 if (!pskb_may_pull(skb, VXLAN_HLEN))
1990 - return 1;
1991 + goto drop;
1992
1993 unparsed = *vxlan_hdr(skb);
1994 /* VNI flag always required to be set */
1995 @@ -1271,7 +1271,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
1996 ntohl(vxlan_hdr(skb)->vx_flags),
1997 ntohl(vxlan_hdr(skb)->vx_vni));
1998 /* Return non vxlan pkt */
1999 - return 1;
2000 + goto drop;
2001 }
2002 unparsed.vx_flags &= ~VXLAN_HF_VNI;
2003 unparsed.vx_vni &= ~VXLAN_VNI_MASK;
2004 @@ -2959,6 +2959,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
2005 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
2006 conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
2007
2008 + if (tb[IFLA_MTU])
2009 + conf.mtu = nla_get_u32(tb[IFLA_MTU]);
2010 +
2011 err = vxlan_dev_configure(src_net, dev, &conf);
2012 switch (err) {
2013 case -ENODEV:
2014 diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
2015 index f70090897fdf..0e537fdc1d08 100644
2016 --- a/drivers/perf/arm_pmu.c
2017 +++ b/drivers/perf/arm_pmu.c
2018 @@ -987,9 +987,6 @@ int arm_pmu_device_probe(struct platform_device *pdev,
2019
2020 armpmu_init(pmu);
2021
2022 - if (!__oprofile_cpu_pmu)
2023 - __oprofile_cpu_pmu = pmu;
2024 -
2025 pmu->plat_device = pdev;
2026
2027 if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
2028 @@ -1025,6 +1022,9 @@ int arm_pmu_device_probe(struct platform_device *pdev,
2029 if (ret)
2030 goto out_destroy;
2031
2032 + if (!__oprofile_cpu_pmu)
2033 + __oprofile_cpu_pmu = pmu;
2034 +
2035 pr_info("enabled with %s PMU driver, %d counters available\n",
2036 pmu->name, pmu->num_events);
2037
2038 diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
2039 index 6ab8c3ccdeea..fba2dd99ee95 100644
2040 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
2041 +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
2042 @@ -1256,9 +1256,10 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
2043 const struct mtk_desc_pin *pin;
2044
2045 chained_irq_enter(chip, desc);
2046 - for (eint_num = 0; eint_num < pctl->devdata->ap_num; eint_num += 32) {
2047 + for (eint_num = 0;
2048 + eint_num < pctl->devdata->ap_num;
2049 + eint_num += 32, reg += 4) {
2050 status = readl(reg);
2051 - reg += 4;
2052 while (status) {
2053 offset = __ffs(status);
2054 index = eint_num + offset;
2055 diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
2056 index 3408578b08d6..ff41c310c900 100644
2057 --- a/drivers/scsi/scsi_devinfo.c
2058 +++ b/drivers/scsi/scsi_devinfo.c
2059 @@ -230,6 +230,7 @@ static struct {
2060 {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
2061 {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
2062 {"Promise", "", NULL, BLIST_SPARSELUN},
2063 + {"QEMU", "QEMU CD-ROM", NULL, BLIST_SKIP_VPD_PAGES},
2064 {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
2065 {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024},
2066 {"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
2067 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
2068 index 8106515d1df8..f704d02645f1 100644
2069 --- a/drivers/scsi/scsi_lib.c
2070 +++ b/drivers/scsi/scsi_lib.c
2071 @@ -911,9 +911,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
2072 }
2073
2074 /*
2075 - * If we finished all bytes in the request we are done now.
2076 + * special case: failed zero length commands always need to
2077 + * drop down into the retry code. Otherwise, if we finished
2078 + * all bytes in the request we are done now.
2079 */
2080 - if (!scsi_end_request(req, error, good_bytes, 0))
2081 + if (!(blk_rq_bytes(req) == 0 && error) &&
2082 + !scsi_end_request(req, error, good_bytes, 0))
2083 return;
2084
2085 /*
2086 diff --git a/fs/dcache.c b/fs/dcache.c
2087 index d5ecc6e477da..44008e3fafc4 100644
2088 --- a/fs/dcache.c
2089 +++ b/fs/dcache.c
2090 @@ -1619,7 +1619,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
2091 struct dentry *dentry = __d_alloc(parent->d_sb, name);
2092 if (!dentry)
2093 return NULL;
2094 -
2095 + dentry->d_flags |= DCACHE_RCUACCESS;
2096 spin_lock(&parent->d_lock);
2097 /*
2098 * don't need child lock because it is not subject
2099 @@ -2338,7 +2338,6 @@ static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2100 {
2101 BUG_ON(!d_unhashed(entry));
2102 hlist_bl_lock(b);
2103 - entry->d_flags |= DCACHE_RCUACCESS;
2104 hlist_bl_add_head_rcu(&entry->d_hash, b);
2105 hlist_bl_unlock(b);
2106 }
2107 @@ -2637,6 +2636,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
2108 /* ... and switch them in the tree */
2109 if (IS_ROOT(dentry)) {
2110 /* splicing a tree */
2111 + dentry->d_flags |= DCACHE_RCUACCESS;
2112 dentry->d_parent = target->d_parent;
2113 target->d_parent = target;
2114 list_del_init(&target->d_child);
2115 diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
2116 index 866bb18efefe..e818f5ac7a26 100644
2117 --- a/fs/ecryptfs/kthread.c
2118 +++ b/fs/ecryptfs/kthread.c
2119 @@ -25,6 +25,7 @@
2120 #include <linux/slab.h>
2121 #include <linux/wait.h>
2122 #include <linux/mount.h>
2123 +#include <linux/file.h>
2124 #include "ecryptfs_kernel.h"
2125
2126 struct ecryptfs_open_req {
2127 @@ -147,7 +148,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
2128 flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR;
2129 (*lower_file) = dentry_open(&req.path, flags, cred);
2130 if (!IS_ERR(*lower_file))
2131 - goto out;
2132 + goto have_file;
2133 if ((flags & O_ACCMODE) == O_RDONLY) {
2134 rc = PTR_ERR((*lower_file));
2135 goto out;
2136 @@ -165,8 +166,16 @@ int ecryptfs_privileged_open(struct file **lower_file,
2137 mutex_unlock(&ecryptfs_kthread_ctl.mux);
2138 wake_up(&ecryptfs_kthread_ctl.wait);
2139 wait_for_completion(&req.done);
2140 - if (IS_ERR(*lower_file))
2141 + if (IS_ERR(*lower_file)) {
2142 rc = PTR_ERR(*lower_file);
2143 + goto out;
2144 + }
2145 +have_file:
2146 + if ((*lower_file)->f_op->mmap == NULL) {
2147 + fput(*lower_file);
2148 + *lower_file = NULL;
2149 + rc = -EMEDIUMTYPE;
2150 + }
2151 out:
2152 return rc;
2153 }
2154 diff --git a/fs/proc/root.c b/fs/proc/root.c
2155 index 361ab4ee42fc..ec649c92d270 100644
2156 --- a/fs/proc/root.c
2157 +++ b/fs/proc/root.c
2158 @@ -121,6 +121,13 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
2159 if (IS_ERR(sb))
2160 return ERR_CAST(sb);
2161
2162 + /*
2163 + * procfs isn't actually a stacking filesystem; however, there is
2164 + * too much magic going on inside it to permit stacking things on
2165 + * top of it
2166 + */
2167 + sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
2168 +
2169 if (!proc_parse_options(options, ns)) {
2170 deactivate_locked_super(sb);
2171 return ERR_PTR(-EINVAL);
2172 diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
2173 index d5d798b35c1f..e98425058f20 100644
2174 --- a/include/linux/irqchip/arm-gic-v3.h
2175 +++ b/include/linux/irqchip/arm-gic-v3.h
2176 @@ -301,7 +301,7 @@
2177 #define ICC_SGI1R_AFFINITY_1_SHIFT 16
2178 #define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
2179 #define ICC_SGI1R_SGI_ID_SHIFT 24
2180 -#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT)
2181 +#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT)
2182 #define ICC_SGI1R_AFFINITY_2_SHIFT 32
2183 #define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
2184 #define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
2185 diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
2186 index 80a305b85323..4dd9306c9d56 100644
2187 --- a/include/linux/netfilter/x_tables.h
2188 +++ b/include/linux/netfilter/x_tables.h
2189 @@ -242,11 +242,18 @@ void xt_unregister_match(struct xt_match *target);
2190 int xt_register_matches(struct xt_match *match, unsigned int n);
2191 void xt_unregister_matches(struct xt_match *match, unsigned int n);
2192
2193 +int xt_check_entry_offsets(const void *base, const char *elems,
2194 + unsigned int target_offset,
2195 + unsigned int next_offset);
2196 +
2197 int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
2198 bool inv_proto);
2199 int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
2200 bool inv_proto);
2201
2202 +void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
2203 + struct xt_counters_info *info, bool compat);
2204 +
2205 struct xt_table *xt_register_table(struct net *net,
2206 const struct xt_table *table,
2207 struct xt_table_info *bootstrap,
2208 @@ -480,7 +487,7 @@ void xt_compat_init_offsets(u_int8_t af, unsigned int number);
2209 int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
2210
2211 int xt_compat_match_offset(const struct xt_match *match);
2212 -int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
2213 +void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
2214 unsigned int *size);
2215 int xt_compat_match_to_user(const struct xt_entry_match *m,
2216 void __user **dstptr, unsigned int *size);
2217 @@ -490,6 +497,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
2218 unsigned int *size);
2219 int xt_compat_target_to_user(const struct xt_entry_target *t,
2220 void __user **dstptr, unsigned int *size);
2221 +int xt_compat_check_entry_offsets(const void *base, const char *elems,
2222 + unsigned int target_offset,
2223 + unsigned int next_offset);
2224
2225 #endif /* CONFIG_COMPAT */
2226 #endif /* _X_TABLES_H */
2227 diff --git a/include/net/switchdev.h b/include/net/switchdev.h
2228 index 51d77b2ce2b2..985619a59323 100644
2229 --- a/include/net/switchdev.h
2230 +++ b/include/net/switchdev.h
2231 @@ -97,7 +97,7 @@ struct switchdev_obj_ipv4_fib {
2232 struct switchdev_obj obj;
2233 u32 dst;
2234 int dst_len;
2235 - struct fib_info fi;
2236 + struct fib_info *fi;
2237 u8 tos;
2238 u8 type;
2239 u32 nlflags;
2240 diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
2241 index d5e38c73377c..e4f048ee7043 100644
2242 --- a/include/uapi/linux/libc-compat.h
2243 +++ b/include/uapi/linux/libc-compat.h
2244 @@ -52,7 +52,7 @@
2245 #if defined(__GLIBC__)
2246
2247 /* Coordinate with glibc net/if.h header. */
2248 -#if defined(_NET_IF_H)
2249 +#if defined(_NET_IF_H) && defined(__USE_MISC)
2250
2251 /* GLIBC headers included first so don't define anything
2252 * that would already be defined. */
2253 diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
2254 index 8f94ca1860cf..b2aefa2d123a 100644
2255 --- a/kernel/bpf/inode.c
2256 +++ b/kernel/bpf/inode.c
2257 @@ -378,7 +378,7 @@ static int bpf_fill_super(struct super_block *sb, void *data, int silent)
2258 static struct dentry *bpf_mount(struct file_system_type *type, int flags,
2259 const char *dev_name, void *data)
2260 {
2261 - return mount_ns(type, flags, current->nsproxy->mnt_ns, bpf_fill_super);
2262 + return mount_nodev(type, flags, data, bpf_fill_super);
2263 }
2264
2265 static struct file_system_type bpf_fs_type = {
2266 @@ -386,7 +386,6 @@ static struct file_system_type bpf_fs_type = {
2267 .name = "bpf",
2268 .mount = bpf_mount,
2269 .kill_sb = kill_litter_super,
2270 - .fs_flags = FS_USERNS_MOUNT,
2271 };
2272
2273 MODULE_ALIAS_FS("bpf");
2274 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
2275 index d1f7149f8704..11546a6ed5df 100644
2276 --- a/kernel/sched/core.c
2277 +++ b/kernel/sched/core.c
2278 @@ -3047,7 +3047,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
2279 static inline void schedule_debug(struct task_struct *prev)
2280 {
2281 #ifdef CONFIG_SCHED_STACK_END_CHECK
2282 - BUG_ON(task_stack_end_corrupted(prev));
2283 + if (task_stack_end_corrupted(prev))
2284 + panic("corrupted stack end detected inside scheduler\n");
2285 #endif
2286
2287 if (unlikely(in_atomic_preempt_off())) {
2288 diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
2289 index 3e4ffb3ace5f..d0289414cdeb 100644
2290 --- a/kernel/trace/bpf_trace.c
2291 +++ b/kernel/trace/bpf_trace.c
2292 @@ -194,7 +194,7 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
2293 if (unlikely(index >= array->map.max_entries))
2294 return -E2BIG;
2295
2296 - file = (struct file *)array->ptrs[index];
2297 + file = READ_ONCE(array->ptrs[index]);
2298 if (unlikely(!file))
2299 return -ENOENT;
2300
2301 @@ -238,7 +238,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
2302 if (unlikely(index >= array->map.max_entries))
2303 return -E2BIG;
2304
2305 - file = (struct file *)array->ptrs[index];
2306 + file = READ_ONCE(array->ptrs[index]);
2307 if (unlikely(!file))
2308 return -ENOENT;
2309
2310 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
2311 index fe787f5c41bd..a2e79b83920f 100644
2312 --- a/mm/memcontrol.c
2313 +++ b/mm/memcontrol.c
2314 @@ -2877,6 +2877,7 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
2315 * ordering is imposed by list_lru_node->lock taken by
2316 * memcg_drain_all_list_lrus().
2317 */
2318 + rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
2319 css_for_each_descendant_pre(css, &memcg->css) {
2320 child = mem_cgroup_from_css(css);
2321 BUG_ON(child->kmemcg_id != kmemcg_id);
2322 @@ -2884,6 +2885,8 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
2323 if (!memcg->use_hierarchy)
2324 break;
2325 }
2326 + rcu_read_unlock();
2327 +
2328 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2329
2330 memcg_free_cache_id(kmemcg_id);
2331 diff --git a/mm/swap_state.c b/mm/swap_state.c
2332 index 366ce3518703..1155a6831a3d 100644
2333 --- a/mm/swap_state.c
2334 +++ b/mm/swap_state.c
2335 @@ -252,7 +252,10 @@ static inline void free_swap_cache(struct page *page)
2336 void free_page_and_swap_cache(struct page *page)
2337 {
2338 free_swap_cache(page);
2339 - put_page(page);
2340 + if (is_huge_zero_page(page))
2341 + put_huge_zero_page();
2342 + else
2343 + put_page(page);
2344 }
2345
2346 /*
2347 diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
2348 index dcea4f4c62b3..c18080ad4085 100644
2349 --- a/net/bridge/br_fdb.c
2350 +++ b/net/bridge/br_fdb.c
2351 @@ -279,6 +279,8 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
2352 * change from under us.
2353 */
2354 list_for_each_entry(v, &vg->vlan_list, vlist) {
2355 + if (!br_vlan_should_use(v))
2356 + continue;
2357 f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
2358 if (f && f->is_local && !f->dst)
2359 fdb_delete_local(br, NULL, f);
2360 diff --git a/net/core/hwbm.c b/net/core/hwbm.c
2361 index 941c28486896..2cab489ae62e 100644
2362 --- a/net/core/hwbm.c
2363 +++ b/net/core/hwbm.c
2364 @@ -55,18 +55,21 @@ int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp)
2365 spin_lock_irqsave(&bm_pool->lock, flags);
2366 if (bm_pool->buf_num == bm_pool->size) {
2367 pr_warn("pool already filled\n");
2368 + spin_unlock_irqrestore(&bm_pool->lock, flags);
2369 return bm_pool->buf_num;
2370 }
2371
2372 if (buf_num + bm_pool->buf_num > bm_pool->size) {
2373 pr_warn("cannot allocate %d buffers for pool\n",
2374 buf_num);
2375 + spin_unlock_irqrestore(&bm_pool->lock, flags);
2376 return 0;
2377 }
2378
2379 if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) {
2380 pr_warn("Adding %d buffers to the %d current buffers will overflow\n",
2381 buf_num, bm_pool->buf_num);
2382 + spin_unlock_irqrestore(&bm_pool->lock, flags);
2383 return 0;
2384 }
2385
2386 diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
2387 index 9e481992dbae..7ad0e567cf10 100644
2388 --- a/net/ipv4/af_inet.c
2389 +++ b/net/ipv4/af_inet.c
2390 @@ -1660,6 +1660,14 @@ static __net_init int inet_init_net(struct net *net)
2391 */
2392 net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
2393 net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
2394 +
2395 + /* Default values for sysctl-controlled parameters.
2396 + * We set them here, in case sysctl is not compiled.
2397 + */
2398 + net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
2399 + net->ipv4.sysctl_ip_dynaddr = 0;
2400 + net->ipv4.sysctl_ip_early_demux = 1;
2401 +
2402 return 0;
2403 }
2404
2405 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
2406 index 4133b0f513af..85d60c69bfe3 100644
2407 --- a/net/ipv4/netfilter/arp_tables.c
2408 +++ b/net/ipv4/netfilter/arp_tables.c
2409 @@ -367,6 +367,18 @@ static inline bool unconditional(const struct arpt_entry *e)
2410 memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
2411 }
2412
2413 +static bool find_jump_target(const struct xt_table_info *t,
2414 + const struct arpt_entry *target)
2415 +{
2416 + struct arpt_entry *iter;
2417 +
2418 + xt_entry_foreach(iter, t->entries, t->size) {
2419 + if (iter == target)
2420 + return true;
2421 + }
2422 + return false;
2423 +}
2424 +
2425 /* Figures out from what hook each rule can be called: returns 0 if
2426 * there are loops. Puts hook bitmask in comefrom.
2427 */
2428 @@ -439,6 +451,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
2429 size = e->next_offset;
2430 e = (struct arpt_entry *)
2431 (entry0 + pos + size);
2432 + if (pos + size >= newinfo->size)
2433 + return 0;
2434 e->counters.pcnt = pos;
2435 pos += size;
2436 } else {
2437 @@ -458,9 +472,15 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
2438 /* This a jump; chase it. */
2439 duprintf("Jump rule %u -> %u\n",
2440 pos, newpos);
2441 + e = (struct arpt_entry *)
2442 + (entry0 + newpos);
2443 + if (!find_jump_target(newinfo, e))
2444 + return 0;
2445 } else {
2446 /* ... this is a fallthru */
2447 newpos = pos + e->next_offset;
2448 + if (newpos >= newinfo->size)
2449 + return 0;
2450 }
2451 e = (struct arpt_entry *)
2452 (entry0 + newpos);
2453 @@ -474,23 +494,6 @@ next:
2454 return 1;
2455 }
2456
2457 -static inline int check_entry(const struct arpt_entry *e)
2458 -{
2459 - const struct xt_entry_target *t;
2460 -
2461 - if (!arp_checkentry(&e->arp))
2462 - return -EINVAL;
2463 -
2464 - if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset)
2465 - return -EINVAL;
2466 -
2467 - t = arpt_get_target_c(e);
2468 - if (e->target_offset + t->u.target_size > e->next_offset)
2469 - return -EINVAL;
2470 -
2471 - return 0;
2472 -}
2473 -
2474 static inline int check_target(struct arpt_entry *e, const char *name)
2475 {
2476 struct xt_entry_target *t = arpt_get_target(e);
2477 @@ -586,7 +589,11 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
2478 return -EINVAL;
2479 }
2480
2481 - err = check_entry(e);
2482 + if (!arp_checkentry(&e->arp))
2483 + return -EINVAL;
2484 +
2485 + err = xt_check_entry_offsets(e, e->elems, e->target_offset,
2486 + e->next_offset);
2487 if (err)
2488 return err;
2489
2490 @@ -691,10 +698,8 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
2491 }
2492 }
2493
2494 - if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
2495 - duprintf("Looping hook\n");
2496 + if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
2497 return -ELOOP;
2498 - }
2499
2500 /* Finally, each sanity check must pass */
2501 i = 0;
2502 @@ -1126,55 +1131,17 @@ static int do_add_counters(struct net *net, const void __user *user,
2503 unsigned int i;
2504 struct xt_counters_info tmp;
2505 struct xt_counters *paddc;
2506 - unsigned int num_counters;
2507 - const char *name;
2508 - int size;
2509 - void *ptmp;
2510 struct xt_table *t;
2511 const struct xt_table_info *private;
2512 int ret = 0;
2513 struct arpt_entry *iter;
2514 unsigned int addend;
2515 -#ifdef CONFIG_COMPAT
2516 - struct compat_xt_counters_info compat_tmp;
2517
2518 - if (compat) {
2519 - ptmp = &compat_tmp;
2520 - size = sizeof(struct compat_xt_counters_info);
2521 - } else
2522 -#endif
2523 - {
2524 - ptmp = &tmp;
2525 - size = sizeof(struct xt_counters_info);
2526 - }
2527 + paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
2528 + if (IS_ERR(paddc))
2529 + return PTR_ERR(paddc);
2530
2531 - if (copy_from_user(ptmp, user, size) != 0)
2532 - return -EFAULT;
2533 -
2534 -#ifdef CONFIG_COMPAT
2535 - if (compat) {
2536 - num_counters = compat_tmp.num_counters;
2537 - name = compat_tmp.name;
2538 - } else
2539 -#endif
2540 - {
2541 - num_counters = tmp.num_counters;
2542 - name = tmp.name;
2543 - }
2544 -
2545 - if (len != size + num_counters * sizeof(struct xt_counters))
2546 - return -EINVAL;
2547 -
2548 - paddc = vmalloc(len - size);
2549 - if (!paddc)
2550 - return -ENOMEM;
2551 -
2552 - if (copy_from_user(paddc, user + size, len - size) != 0) {
2553 - ret = -EFAULT;
2554 - goto free;
2555 - }
2556 -
2557 - t = xt_find_table_lock(net, NFPROTO_ARP, name);
2558 + t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name);
2559 if (IS_ERR_OR_NULL(t)) {
2560 ret = t ? PTR_ERR(t) : -ENOENT;
2561 goto free;
2562 @@ -1182,7 +1149,7 @@ static int do_add_counters(struct net *net, const void __user *user,
2563
2564 local_bh_disable();
2565 private = t->private;
2566 - if (private->number != num_counters) {
2567 + if (private->number != tmp.num_counters) {
2568 ret = -EINVAL;
2569 goto unlock_up_free;
2570 }
2571 @@ -1209,6 +1176,18 @@ static int do_add_counters(struct net *net, const void __user *user,
2572 }
2573
2574 #ifdef CONFIG_COMPAT
2575 +struct compat_arpt_replace {
2576 + char name[XT_TABLE_MAXNAMELEN];
2577 + u32 valid_hooks;
2578 + u32 num_entries;
2579 + u32 size;
2580 + u32 hook_entry[NF_ARP_NUMHOOKS];
2581 + u32 underflow[NF_ARP_NUMHOOKS];
2582 + u32 num_counters;
2583 + compat_uptr_t counters;
2584 + struct compat_arpt_entry entries[0];
2585 +};
2586 +
2587 static inline void compat_release_entry(struct compat_arpt_entry *e)
2588 {
2589 struct xt_entry_target *t;
2590 @@ -1217,20 +1196,17 @@ static inline void compat_release_entry(struct compat_arpt_entry *e)
2591 module_put(t->u.kernel.target->me);
2592 }
2593
2594 -static inline int
2595 +static int
2596 check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
2597 struct xt_table_info *newinfo,
2598 unsigned int *size,
2599 const unsigned char *base,
2600 - const unsigned char *limit,
2601 - const unsigned int *hook_entries,
2602 - const unsigned int *underflows,
2603 - const char *name)
2604 + const unsigned char *limit)
2605 {
2606 struct xt_entry_target *t;
2607 struct xt_target *target;
2608 unsigned int entry_offset;
2609 - int ret, off, h;
2610 + int ret, off;
2611
2612 duprintf("check_compat_entry_size_and_hooks %p\n", e);
2613 if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
2614 @@ -1247,8 +1223,11 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
2615 return -EINVAL;
2616 }
2617
2618 - /* For purposes of check_entry casting the compat entry is fine */
2619 - ret = check_entry((struct arpt_entry *)e);
2620 + if (!arp_checkentry(&e->arp))
2621 + return -EINVAL;
2622 +
2623 + ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset,
2624 + e->next_offset);
2625 if (ret)
2626 return ret;
2627
2628 @@ -1272,17 +1251,6 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
2629 if (ret)
2630 goto release_target;
2631
2632 - /* Check hooks & underflows */
2633 - for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
2634 - if ((unsigned char *)e - base == hook_entries[h])
2635 - newinfo->hook_entry[h] = hook_entries[h];
2636 - if ((unsigned char *)e - base == underflows[h])
2637 - newinfo->underflow[h] = underflows[h];
2638 - }
2639 -
2640 - /* Clear counters and comefrom */
2641 - memset(&e->counters, 0, sizeof(e->counters));
2642 - e->comefrom = 0;
2643 return 0;
2644
2645 release_target:
2646 @@ -1291,18 +1259,17 @@ out:
2647 return ret;
2648 }
2649
2650 -static int
2651 +static void
2652 compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
2653 - unsigned int *size, const char *name,
2654 + unsigned int *size,
2655 struct xt_table_info *newinfo, unsigned char *base)
2656 {
2657 struct xt_entry_target *t;
2658 struct xt_target *target;
2659 struct arpt_entry *de;
2660 unsigned int origsize;
2661 - int ret, h;
2662 + int h;
2663
2664 - ret = 0;
2665 origsize = *size;
2666 de = (struct arpt_entry *)*dstptr;
2667 memcpy(de, e, sizeof(struct arpt_entry));
2668 @@ -1323,148 +1290,82 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
2669 if ((unsigned char *)de - base < newinfo->underflow[h])
2670 newinfo->underflow[h] -= origsize - *size;
2671 }
2672 - return ret;
2673 }
2674
2675 -static int translate_compat_table(const char *name,
2676 - unsigned int valid_hooks,
2677 - struct xt_table_info **pinfo,
2678 +static int translate_compat_table(struct xt_table_info **pinfo,
2679 void **pentry0,
2680 - unsigned int total_size,
2681 - unsigned int number,
2682 - unsigned int *hook_entries,
2683 - unsigned int *underflows)
2684 + const struct compat_arpt_replace *compatr)
2685 {
2686 unsigned int i, j;
2687 struct xt_table_info *newinfo, *info;
2688 void *pos, *entry0, *entry1;
2689 struct compat_arpt_entry *iter0;
2690 - struct arpt_entry *iter1;
2691 + struct arpt_replace repl;
2692 unsigned int size;
2693 int ret = 0;
2694
2695 info = *pinfo;
2696 entry0 = *pentry0;
2697 - size = total_size;
2698 - info->number = number;
2699 -
2700 - /* Init all hooks to impossible value. */
2701 - for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
2702 - info->hook_entry[i] = 0xFFFFFFFF;
2703 - info->underflow[i] = 0xFFFFFFFF;
2704 - }
2705 + size = compatr->size;
2706 + info->number = compatr->num_entries;
2707
2708 duprintf("translate_compat_table: size %u\n", info->size);
2709 j = 0;
2710 xt_compat_lock(NFPROTO_ARP);
2711 - xt_compat_init_offsets(NFPROTO_ARP, number);
2712 + xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
2713 /* Walk through entries, checking offsets. */
2714 - xt_entry_foreach(iter0, entry0, total_size) {
2715 + xt_entry_foreach(iter0, entry0, compatr->size) {
2716 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
2717 entry0,
2718 - entry0 + total_size,
2719 - hook_entries,
2720 - underflows,
2721 - name);
2722 + entry0 + compatr->size);
2723 if (ret != 0)
2724 goto out_unlock;
2725 ++j;
2726 }
2727
2728 ret = -EINVAL;
2729 - if (j != number) {
2730 + if (j != compatr->num_entries) {
2731 duprintf("translate_compat_table: %u not %u entries\n",
2732 - j, number);
2733 + j, compatr->num_entries);
2734 goto out_unlock;
2735 }
2736
2737 - /* Check hooks all assigned */
2738 - for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
2739 - /* Only hooks which are valid */
2740 - if (!(valid_hooks & (1 << i)))
2741 - continue;
2742 - if (info->hook_entry[i] == 0xFFFFFFFF) {
2743 - duprintf("Invalid hook entry %u %u\n",
2744 - i, hook_entries[i]);
2745 - goto out_unlock;
2746 - }
2747 - if (info->underflow[i] == 0xFFFFFFFF) {
2748 - duprintf("Invalid underflow %u %u\n",
2749 - i, underflows[i]);
2750 - goto out_unlock;
2751 - }
2752 - }
2753 -
2754 ret = -ENOMEM;
2755 newinfo = xt_alloc_table_info(size);
2756 if (!newinfo)
2757 goto out_unlock;
2758
2759 - newinfo->number = number;
2760 + newinfo->number = compatr->num_entries;
2761 for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
2762 newinfo->hook_entry[i] = info->hook_entry[i];
2763 newinfo->underflow[i] = info->underflow[i];
2764 }
2765 entry1 = newinfo->entries;
2766 pos = entry1;
2767 - size = total_size;
2768 - xt_entry_foreach(iter0, entry0, total_size) {
2769 - ret = compat_copy_entry_from_user(iter0, &pos, &size,
2770 - name, newinfo, entry1);
2771 - if (ret != 0)
2772 - break;
2773 - }
2774 + size = compatr->size;
2775 + xt_entry_foreach(iter0, entry0, compatr->size)
2776 + compat_copy_entry_from_user(iter0, &pos, &size,
2777 + newinfo, entry1);
2778 +
2779 + /* all module references in entry0 are now gone */
2780 +
2781 xt_compat_flush_offsets(NFPROTO_ARP);
2782 xt_compat_unlock(NFPROTO_ARP);
2783 - if (ret)
2784 - goto free_newinfo;
2785
2786 - ret = -ELOOP;
2787 - if (!mark_source_chains(newinfo, valid_hooks, entry1))
2788 - goto free_newinfo;
2789 + memcpy(&repl, compatr, sizeof(*compatr));
2790
2791 - i = 0;
2792 - xt_entry_foreach(iter1, entry1, newinfo->size) {
2793 - iter1->counters.pcnt = xt_percpu_counter_alloc();
2794 - if (IS_ERR_VALUE(iter1->counters.pcnt)) {
2795 - ret = -ENOMEM;
2796 - break;
2797 - }
2798 -
2799 - ret = check_target(iter1, name);
2800 - if (ret != 0) {
2801 - xt_percpu_counter_free(iter1->counters.pcnt);
2802 - break;
2803 - }
2804 - ++i;
2805 - if (strcmp(arpt_get_target(iter1)->u.user.name,
2806 - XT_ERROR_TARGET) == 0)
2807 - ++newinfo->stacksize;
2808 - }
2809 - if (ret) {
2810 - /*
2811 - * The first i matches need cleanup_entry (calls ->destroy)
2812 - * because they had called ->check already. The other j-i
2813 - * entries need only release.
2814 - */
2815 - int skip = i;
2816 - j -= i;
2817 - xt_entry_foreach(iter0, entry0, newinfo->size) {
2818 - if (skip-- > 0)
2819 - continue;
2820 - if (j-- == 0)
2821 - break;
2822 - compat_release_entry(iter0);
2823 - }
2824 - xt_entry_foreach(iter1, entry1, newinfo->size) {
2825 - if (i-- == 0)
2826 - break;
2827 - cleanup_entry(iter1);
2828 - }
2829 - xt_free_table_info(newinfo);
2830 - return ret;
2831 + for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
2832 + repl.hook_entry[i] = newinfo->hook_entry[i];
2833 + repl.underflow[i] = newinfo->underflow[i];
2834 }
2835
2836 + repl.num_counters = 0;
2837 + repl.counters = NULL;
2838 + repl.size = newinfo->size;
2839 + ret = translate_table(newinfo, entry1, &repl);
2840 + if (ret)
2841 + goto free_newinfo;
2842 +
2843 *pinfo = newinfo;
2844 *pentry0 = entry1;
2845 xt_free_table_info(info);
2846 @@ -1472,31 +1373,18 @@ static int translate_compat_table(const char *name,
2847
2848 free_newinfo:
2849 xt_free_table_info(newinfo);
2850 -out:
2851 - xt_entry_foreach(iter0, entry0, total_size) {
2852 + return ret;
2853 +out_unlock:
2854 + xt_compat_flush_offsets(NFPROTO_ARP);
2855 + xt_compat_unlock(NFPROTO_ARP);
2856 + xt_entry_foreach(iter0, entry0, compatr->size) {
2857 if (j-- == 0)
2858 break;
2859 compat_release_entry(iter0);
2860 }
2861 return ret;
2862 -out_unlock:
2863 - xt_compat_flush_offsets(NFPROTO_ARP);
2864 - xt_compat_unlock(NFPROTO_ARP);
2865 - goto out;
2866 }
2867
2868 -struct compat_arpt_replace {
2869 - char name[XT_TABLE_MAXNAMELEN];
2870 - u32 valid_hooks;
2871 - u32 num_entries;
2872 - u32 size;
2873 - u32 hook_entry[NF_ARP_NUMHOOKS];
2874 - u32 underflow[NF_ARP_NUMHOOKS];
2875 - u32 num_counters;
2876 - compat_uptr_t counters;
2877 - struct compat_arpt_entry entries[0];
2878 -};
2879 -
2880 static int compat_do_replace(struct net *net, void __user *user,
2881 unsigned int len)
2882 {
2883 @@ -1529,10 +1417,7 @@ static int compat_do_replace(struct net *net, void __user *user,
2884 goto free_newinfo;
2885 }
2886
2887 - ret = translate_compat_table(tmp.name, tmp.valid_hooks,
2888 - &newinfo, &loc_cpu_entry, tmp.size,
2889 - tmp.num_entries, tmp.hook_entry,
2890 - tmp.underflow);
2891 + ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
2892 if (ret != 0)
2893 goto free_newinfo;
2894
2895 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
2896 index 631c100a1338..0984ea3fcf14 100644
2897 --- a/net/ipv4/netfilter/ip_tables.c
2898 +++ b/net/ipv4/netfilter/ip_tables.c
2899 @@ -443,6 +443,18 @@ ipt_do_table(struct sk_buff *skb,
2900 #endif
2901 }
2902
2903 +static bool find_jump_target(const struct xt_table_info *t,
2904 + const struct ipt_entry *target)
2905 +{
2906 + struct ipt_entry *iter;
2907 +
2908 + xt_entry_foreach(iter, t->entries, t->size) {
2909 + if (iter == target)
2910 + return true;
2911 + }
2912 + return false;
2913 +}
2914 +
2915 /* Figures out from what hook each rule can be called: returns 0 if
2916 there are loops. Puts hook bitmask in comefrom. */
2917 static int
2918 @@ -520,6 +532,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
2919 size = e->next_offset;
2920 e = (struct ipt_entry *)
2921 (entry0 + pos + size);
2922 + if (pos + size >= newinfo->size)
2923 + return 0;
2924 e->counters.pcnt = pos;
2925 pos += size;
2926 } else {
2927 @@ -538,9 +552,15 @@ mark_source_chains(const struct xt_table_info *newinfo,
2928 /* This a jump; chase it. */
2929 duprintf("Jump rule %u -> %u\n",
2930 pos, newpos);
2931 + e = (struct ipt_entry *)
2932 + (entry0 + newpos);
2933 + if (!find_jump_target(newinfo, e))
2934 + return 0;
2935 } else {
2936 /* ... this is a fallthru */
2937 newpos = pos + e->next_offset;
2938 + if (newpos >= newinfo->size)
2939 + return 0;
2940 }
2941 e = (struct ipt_entry *)
2942 (entry0 + newpos);
2943 @@ -568,25 +588,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
2944 }
2945
2946 static int
2947 -check_entry(const struct ipt_entry *e)
2948 -{
2949 - const struct xt_entry_target *t;
2950 -
2951 - if (!ip_checkentry(&e->ip))
2952 - return -EINVAL;
2953 -
2954 - if (e->target_offset + sizeof(struct xt_entry_target) >
2955 - e->next_offset)
2956 - return -EINVAL;
2957 -
2958 - t = ipt_get_target_c(e);
2959 - if (e->target_offset + t->u.target_size > e->next_offset)
2960 - return -EINVAL;
2961 -
2962 - return 0;
2963 -}
2964 -
2965 -static int
2966 check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
2967 {
2968 const struct ipt_ip *ip = par->entryinfo;
2969 @@ -750,7 +751,11 @@ check_entry_size_and_hooks(struct ipt_entry *e,
2970 return -EINVAL;
2971 }
2972
2973 - err = check_entry(e);
2974 + if (!ip_checkentry(&e->ip))
2975 + return -EINVAL;
2976 +
2977 + err = xt_check_entry_offsets(e, e->elems, e->target_offset,
2978 + e->next_offset);
2979 if (err)
2980 return err;
2981
2982 @@ -1309,55 +1314,17 @@ do_add_counters(struct net *net, const void __user *user,
2983 unsigned int i;
2984 struct xt_counters_info tmp;
2985 struct xt_counters *paddc;
2986 - unsigned int num_counters;
2987 - const char *name;
2988 - int size;
2989 - void *ptmp;
2990 struct xt_table *t;
2991 const struct xt_table_info *private;
2992 int ret = 0;
2993 struct ipt_entry *iter;
2994 unsigned int addend;
2995 -#ifdef CONFIG_COMPAT
2996 - struct compat_xt_counters_info compat_tmp;
2997
2998 - if (compat) {
2999 - ptmp = &compat_tmp;
3000 - size = sizeof(struct compat_xt_counters_info);
3001 - } else
3002 -#endif
3003 - {
3004 - ptmp = &tmp;
3005 - size = sizeof(struct xt_counters_info);
3006 - }
3007 -
3008 - if (copy_from_user(ptmp, user, size) != 0)
3009 - return -EFAULT;
3010 -
3011 -#ifdef CONFIG_COMPAT
3012 - if (compat) {
3013 - num_counters = compat_tmp.num_counters;
3014 - name = compat_tmp.name;
3015 - } else
3016 -#endif
3017 - {
3018 - num_counters = tmp.num_counters;
3019 - name = tmp.name;
3020 - }
3021 + paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
3022 + if (IS_ERR(paddc))
3023 + return PTR_ERR(paddc);
3024
3025 - if (len != size + num_counters * sizeof(struct xt_counters))
3026 - return -EINVAL;
3027 -
3028 - paddc = vmalloc(len - size);
3029 - if (!paddc)
3030 - return -ENOMEM;
3031 -
3032 - if (copy_from_user(paddc, user + size, len - size) != 0) {
3033 - ret = -EFAULT;
3034 - goto free;
3035 - }
3036 -
3037 - t = xt_find_table_lock(net, AF_INET, name);
3038 + t = xt_find_table_lock(net, AF_INET, tmp.name);
3039 if (IS_ERR_OR_NULL(t)) {
3040 ret = t ? PTR_ERR(t) : -ENOENT;
3041 goto free;
3042 @@ -1365,7 +1332,7 @@ do_add_counters(struct net *net, const void __user *user,
3043
3044 local_bh_disable();
3045 private = t->private;
3046 - if (private->number != num_counters) {
3047 + if (private->number != tmp.num_counters) {
3048 ret = -EINVAL;
3049 goto unlock_up_free;
3050 }
3051 @@ -1444,7 +1411,6 @@ compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
3052
3053 static int
3054 compat_find_calc_match(struct xt_entry_match *m,
3055 - const char *name,
3056 const struct ipt_ip *ip,
3057 int *size)
3058 {
3059 @@ -1479,17 +1445,14 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
3060 struct xt_table_info *newinfo,
3061 unsigned int *size,
3062 const unsigned char *base,
3063 - const unsigned char *limit,
3064 - const unsigned int *hook_entries,
3065 - const unsigned int *underflows,
3066 - const char *name)
3067 + const unsigned char *limit)
3068 {
3069 struct xt_entry_match *ematch;
3070 struct xt_entry_target *t;
3071 struct xt_target *target;
3072 unsigned int entry_offset;
3073 unsigned int j;
3074 - int ret, off, h;
3075 + int ret, off;
3076
3077 duprintf("check_compat_entry_size_and_hooks %p\n", e);
3078 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
3079 @@ -1506,8 +1469,11 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
3080 return -EINVAL;
3081 }
3082
3083 - /* For purposes of check_entry casting the compat entry is fine */
3084 - ret = check_entry((struct ipt_entry *)e);
3085 + if (!ip_checkentry(&e->ip))
3086 + return -EINVAL;
3087 +
3088 + ret = xt_compat_check_entry_offsets(e, e->elems,
3089 + e->target_offset, e->next_offset);
3090 if (ret)
3091 return ret;
3092
3093 @@ -1515,7 +1481,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
3094 entry_offset = (void *)e - (void *)base;
3095 j = 0;
3096 xt_ematch_foreach(ematch, e) {
3097 - ret = compat_find_calc_match(ematch, name, &e->ip, &off);
3098 + ret = compat_find_calc_match(ematch, &e->ip, &off);
3099 if (ret != 0)
3100 goto release_matches;
3101 ++j;
3102 @@ -1538,17 +1504,6 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
3103 if (ret)
3104 goto out;
3105
3106 - /* Check hooks & underflows */
3107 - for (h = 0; h < NF_INET_NUMHOOKS; h++) {
3108 - if ((unsigned char *)e - base == hook_entries[h])
3109 - newinfo->hook_entry[h] = hook_entries[h];
3110 - if ((unsigned char *)e - base == underflows[h])
3111 - newinfo->underflow[h] = underflows[h];
3112 - }
3113 -
3114 - /* Clear counters and comefrom */
3115 - memset(&e->counters, 0, sizeof(e->counters));
3116 - e->comefrom = 0;
3117 return 0;
3118
3119 out:
3120 @@ -1562,19 +1517,18 @@ release_matches:
3121 return ret;
3122 }
3123
3124 -static int
3125 +static void
3126 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
3127 - unsigned int *size, const char *name,
3128 + unsigned int *size,
3129 struct xt_table_info *newinfo, unsigned char *base)
3130 {
3131 struct xt_entry_target *t;
3132 struct xt_target *target;
3133 struct ipt_entry *de;
3134 unsigned int origsize;
3135 - int ret, h;
3136 + int h;
3137 struct xt_entry_match *ematch;
3138
3139 - ret = 0;
3140 origsize = *size;
3141 de = (struct ipt_entry *)*dstptr;
3142 memcpy(de, e, sizeof(struct ipt_entry));
3143 @@ -1583,201 +1537,105 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
3144 *dstptr += sizeof(struct ipt_entry);
3145 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
3146
3147 - xt_ematch_foreach(ematch, e) {
3148 - ret = xt_compat_match_from_user(ematch, dstptr, size);
3149 - if (ret != 0)
3150 - return ret;
3151 - }
3152 + xt_ematch_foreach(ematch, e)
3153 + xt_compat_match_from_user(ematch, dstptr, size);
3154 +
3155 de->target_offset = e->target_offset - (origsize - *size);
3156 t = compat_ipt_get_target(e);
3157 target = t->u.kernel.target;
3158 xt_compat_target_from_user(t, dstptr, size);
3159
3160 de->next_offset = e->next_offset - (origsize - *size);
3161 +
3162 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
3163 if ((unsigned char *)de - base < newinfo->hook_entry[h])
3164 newinfo->hook_entry[h] -= origsize - *size;
3165 if ((unsigned char *)de - base < newinfo->underflow[h])
3166 newinfo->underflow[h] -= origsize - *size;
3167 }
3168 - return ret;
3169 -}
3170 -
3171 -static int
3172 -compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
3173 -{
3174 - struct xt_entry_match *ematch;
3175 - struct xt_mtchk_param mtpar;
3176 - unsigned int j;
3177 - int ret = 0;
3178 -
3179 - e->counters.pcnt = xt_percpu_counter_alloc();
3180 - if (IS_ERR_VALUE(e->counters.pcnt))
3181 - return -ENOMEM;
3182 -
3183 - j = 0;
3184 - mtpar.net = net;
3185 - mtpar.table = name;
3186 - mtpar.entryinfo = &e->ip;
3187 - mtpar.hook_mask = e->comefrom;
3188 - mtpar.family = NFPROTO_IPV4;
3189 - xt_ematch_foreach(ematch, e) {
3190 - ret = check_match(ematch, &mtpar);
3191 - if (ret != 0)
3192 - goto cleanup_matches;
3193 - ++j;
3194 - }
3195 -
3196 - ret = check_target(e, net, name);
3197 - if (ret)
3198 - goto cleanup_matches;
3199 - return 0;
3200 -
3201 - cleanup_matches:
3202 - xt_ematch_foreach(ematch, e) {
3203 - if (j-- == 0)
3204 - break;
3205 - cleanup_match(ematch, net);
3206 - }
3207 -
3208 - xt_percpu_counter_free(e->counters.pcnt);
3209 -
3210 - return ret;
3211 }
3212
3213 static int
3214 translate_compat_table(struct net *net,
3215 - const char *name,
3216 - unsigned int valid_hooks,
3217 struct xt_table_info **pinfo,
3218 void **pentry0,
3219 - unsigned int total_size,
3220 - unsigned int number,
3221 - unsigned int *hook_entries,
3222 - unsigned int *underflows)
3223 + const struct compat_ipt_replace *compatr)
3224 {
3225 unsigned int i, j;
3226 struct xt_table_info *newinfo, *info;
3227 void *pos, *entry0, *entry1;
3228 struct compat_ipt_entry *iter0;
3229 - struct ipt_entry *iter1;
3230 + struct ipt_replace repl;
3231 unsigned int size;
3232 int ret;
3233
3234 info = *pinfo;
3235 entry0 = *pentry0;
3236 - size = total_size;
3237 - info->number = number;
3238 -
3239 - /* Init all hooks to impossible value. */
3240 - for (i = 0; i < NF_INET_NUMHOOKS; i++) {
3241 - info->hook_entry[i] = 0xFFFFFFFF;
3242 - info->underflow[i] = 0xFFFFFFFF;
3243 - }
3244 + size = compatr->size;
3245 + info->number = compatr->num_entries;
3246
3247 duprintf("translate_compat_table: size %u\n", info->size);
3248 j = 0;
3249 xt_compat_lock(AF_INET);
3250 - xt_compat_init_offsets(AF_INET, number);
3251 + xt_compat_init_offsets(AF_INET, compatr->num_entries);
3252 /* Walk through entries, checking offsets. */
3253 - xt_entry_foreach(iter0, entry0, total_size) {
3254 + xt_entry_foreach(iter0, entry0, compatr->size) {
3255 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
3256 entry0,
3257 - entry0 + total_size,
3258 - hook_entries,
3259 - underflows,
3260 - name);
3261 + entry0 + compatr->size);
3262 if (ret != 0)
3263 goto out_unlock;
3264 ++j;
3265 }
3266
3267 ret = -EINVAL;
3268 - if (j != number) {
3269 + if (j != compatr->num_entries) {
3270 duprintf("translate_compat_table: %u not %u entries\n",
3271 - j, number);
3272 + j, compatr->num_entries);
3273 goto out_unlock;
3274 }
3275
3276 - /* Check hooks all assigned */
3277 - for (i = 0; i < NF_INET_NUMHOOKS; i++) {
3278 - /* Only hooks which are valid */
3279 - if (!(valid_hooks & (1 << i)))
3280 - continue;
3281 - if (info->hook_entry[i] == 0xFFFFFFFF) {
3282 - duprintf("Invalid hook entry %u %u\n",
3283 - i, hook_entries[i]);
3284 - goto out_unlock;
3285 - }
3286 - if (info->underflow[i] == 0xFFFFFFFF) {
3287 - duprintf("Invalid underflow %u %u\n",
3288 - i, underflows[i]);
3289 - goto out_unlock;
3290 - }
3291 - }
3292 -
3293 ret = -ENOMEM;
3294 newinfo = xt_alloc_table_info(size);
3295 if (!newinfo)
3296 goto out_unlock;
3297
3298 - newinfo->number = number;
3299 + newinfo->number = compatr->num_entries;
3300 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
3301 - newinfo->hook_entry[i] = info->hook_entry[i];
3302 - newinfo->underflow[i] = info->underflow[i];
3303 + newinfo->hook_entry[i] = compatr->hook_entry[i];
3304 + newinfo->underflow[i] = compatr->underflow[i];
3305 }
3306 entry1 = newinfo->entries;
3307 pos = entry1;
3308 - size = total_size;
3309 - xt_entry_foreach(iter0, entry0, total_size) {
3310 - ret = compat_copy_entry_from_user(iter0, &pos, &size,
3311 - name, newinfo, entry1);
3312 - if (ret != 0)
3313 - break;
3314 - }
3315 + size = compatr->size;
3316 + xt_entry_foreach(iter0, entry0, compatr->size)
3317 + compat_copy_entry_from_user(iter0, &pos, &size,
3318 + newinfo, entry1);
3319 +
3320 + /* all module references in entry0 are now gone.
3321 + * entry1/newinfo contains a 64bit ruleset that looks exactly as
3322 + * generated by 64bit userspace.
3323 + *
3324 + * Call standard translate_table() to validate all hook_entrys,
3325 + * underflows, check for loops, etc.
3326 + */
3327 xt_compat_flush_offsets(AF_INET);
3328 xt_compat_unlock(AF_INET);
3329 - if (ret)
3330 - goto free_newinfo;
3331
3332 - ret = -ELOOP;
3333 - if (!mark_source_chains(newinfo, valid_hooks, entry1))
3334 - goto free_newinfo;
3335 + memcpy(&repl, compatr, sizeof(*compatr));
3336
3337 - i = 0;
3338 - xt_entry_foreach(iter1, entry1, newinfo->size) {
3339 - ret = compat_check_entry(iter1, net, name);
3340 - if (ret != 0)
3341 - break;
3342 - ++i;
3343 - if (strcmp(ipt_get_target(iter1)->u.user.name,
3344 - XT_ERROR_TARGET) == 0)
3345 - ++newinfo->stacksize;
3346 - }
3347 - if (ret) {
3348 - /*
3349 - * The first i matches need cleanup_entry (calls ->destroy)
3350 - * because they had called ->check already. The other j-i
3351 - * entries need only release.
3352 - */
3353 - int skip = i;
3354 - j -= i;
3355 - xt_entry_foreach(iter0, entry0, newinfo->size) {
3356 - if (skip-- > 0)
3357 - continue;
3358 - if (j-- == 0)
3359 - break;
3360 - compat_release_entry(iter0);
3361 - }
3362 - xt_entry_foreach(iter1, entry1, newinfo->size) {
3363 - if (i-- == 0)
3364 - break;
3365 - cleanup_entry(iter1, net);
3366 - }
3367 - xt_free_table_info(newinfo);
3368 - return ret;
3369 + for (i = 0; i < NF_INET_NUMHOOKS; i++) {
3370 + repl.hook_entry[i] = newinfo->hook_entry[i];
3371 + repl.underflow[i] = newinfo->underflow[i];
3372 }
3373
3374 + repl.num_counters = 0;
3375 + repl.counters = NULL;
3376 + repl.size = newinfo->size;
3377 + ret = translate_table(net, newinfo, entry1, &repl);
3378 + if (ret)
3379 + goto free_newinfo;
3380 +
3381 *pinfo = newinfo;
3382 *pentry0 = entry1;
3383 xt_free_table_info(info);
3384 @@ -1785,17 +1643,16 @@ translate_compat_table(struct net *net,
3385
3386 free_newinfo:
3387 xt_free_table_info(newinfo);
3388 -out:
3389 - xt_entry_foreach(iter0, entry0, total_size) {
3390 + return ret;
3391 +out_unlock:
3392 + xt_compat_flush_offsets(AF_INET);
3393 + xt_compat_unlock(AF_INET);
3394 + xt_entry_foreach(iter0, entry0, compatr->size) {
3395 if (j-- == 0)
3396 break;
3397 compat_release_entry(iter0);
3398 }
3399 return ret;
3400 -out_unlock:
3401 - xt_compat_flush_offsets(AF_INET);
3402 - xt_compat_unlock(AF_INET);
3403 - goto out;
3404 }
3405
3406 static int
3407 @@ -1831,10 +1688,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
3408 goto free_newinfo;
3409 }
3410
3411 - ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
3412 - &newinfo, &loc_cpu_entry, tmp.size,
3413 - tmp.num_entries, tmp.hook_entry,
3414 - tmp.underflow);
3415 + ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
3416 if (ret != 0)
3417 goto free_newinfo;
3418
3419 diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
3420 index 1e1fe6086dd9..03112a3106ab 100644
3421 --- a/net/ipv4/sysctl_net_ipv4.c
3422 +++ b/net/ipv4/sysctl_net_ipv4.c
3423 @@ -988,10 +988,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
3424 if (!net->ipv4.sysctl_local_reserved_ports)
3425 goto err_ports;
3426
3427 - net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
3428 - net->ipv4.sysctl_ip_dynaddr = 0;
3429 - net->ipv4.sysctl_ip_early_demux = 1;
3430 -
3431 return 0;
3432
3433 err_ports:
3434 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
3435 index a2e7f55a1f61..e9853dff7b52 100644
3436 --- a/net/ipv4/udp.c
3437 +++ b/net/ipv4/udp.c
3438 @@ -1616,7 +1616,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
3439
3440 /* if we're overly short, let UDP handle it */
3441 encap_rcv = ACCESS_ONCE(up->encap_rcv);
3442 - if (skb->len > sizeof(struct udphdr) && encap_rcv) {
3443 + if (encap_rcv) {
3444 int ret;
3445
3446 /* Verify checksum before giving to encap */
3447 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
3448 index bc972e7152c7..da88de82b3b8 100644
3449 --- a/net/ipv6/ip6_output.c
3450 +++ b/net/ipv6/ip6_output.c
3451 @@ -1071,17 +1071,12 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
3452 const struct in6_addr *final_dst)
3453 {
3454 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
3455 - int err;
3456
3457 dst = ip6_sk_dst_check(sk, dst, fl6);
3458 + if (!dst)
3459 + dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
3460
3461 - err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
3462 - if (err)
3463 - return ERR_PTR(err);
3464 - if (final_dst)
3465 - fl6->daddr = *final_dst;
3466 -
3467 - return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
3468 + return dst;
3469 }
3470 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
3471
3472 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
3473 index 86b67b70b626..9021b435506d 100644
3474 --- a/net/ipv6/netfilter/ip6_tables.c
3475 +++ b/net/ipv6/netfilter/ip6_tables.c
3476 @@ -455,6 +455,18 @@ ip6t_do_table(struct sk_buff *skb,
3477 #endif
3478 }
3479
3480 +static bool find_jump_target(const struct xt_table_info *t,
3481 + const struct ip6t_entry *target)
3482 +{
3483 + struct ip6t_entry *iter;
3484 +
3485 + xt_entry_foreach(iter, t->entries, t->size) {
3486 + if (iter == target)
3487 + return true;
3488 + }
3489 + return false;
3490 +}
3491 +
3492 /* Figures out from what hook each rule can be called: returns 0 if
3493 there are loops. Puts hook bitmask in comefrom. */
3494 static int
3495 @@ -532,6 +544,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
3496 size = e->next_offset;
3497 e = (struct ip6t_entry *)
3498 (entry0 + pos + size);
3499 + if (pos + size >= newinfo->size)
3500 + return 0;
3501 e->counters.pcnt = pos;
3502 pos += size;
3503 } else {
3504 @@ -550,9 +564,15 @@ mark_source_chains(const struct xt_table_info *newinfo,
3505 /* This a jump; chase it. */
3506 duprintf("Jump rule %u -> %u\n",
3507 pos, newpos);
3508 + e = (struct ip6t_entry *)
3509 + (entry0 + newpos);
3510 + if (!find_jump_target(newinfo, e))
3511 + return 0;
3512 } else {
3513 /* ... this is a fallthru */
3514 newpos = pos + e->next_offset;
3515 + if (newpos >= newinfo->size)
3516 + return 0;
3517 }
3518 e = (struct ip6t_entry *)
3519 (entry0 + newpos);
3520 @@ -579,25 +599,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
3521 module_put(par.match->me);
3522 }
3523
3524 -static int
3525 -check_entry(const struct ip6t_entry *e)
3526 -{
3527 - const struct xt_entry_target *t;
3528 -
3529 - if (!ip6_checkentry(&e->ipv6))
3530 - return -EINVAL;
3531 -
3532 - if (e->target_offset + sizeof(struct xt_entry_target) >
3533 - e->next_offset)
3534 - return -EINVAL;
3535 -
3536 - t = ip6t_get_target_c(e);
3537 - if (e->target_offset + t->u.target_size > e->next_offset)
3538 - return -EINVAL;
3539 -
3540 - return 0;
3541 -}
3542 -
3543 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
3544 {
3545 const struct ip6t_ip6 *ipv6 = par->entryinfo;
3546 @@ -762,7 +763,11 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
3547 return -EINVAL;
3548 }
3549
3550 - err = check_entry(e);
3551 + if (!ip6_checkentry(&e->ipv6))
3552 + return -EINVAL;
3553 +
3554 + err = xt_check_entry_offsets(e, e->elems, e->target_offset,
3555 + e->next_offset);
3556 if (err)
3557 return err;
3558
3559 @@ -1321,55 +1326,16 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
3560 unsigned int i;
3561 struct xt_counters_info tmp;
3562 struct xt_counters *paddc;
3563 - unsigned int num_counters;
3564 - char *name;
3565 - int size;
3566 - void *ptmp;
3567 struct xt_table *t;
3568 const struct xt_table_info *private;
3569 int ret = 0;
3570 struct ip6t_entry *iter;
3571 unsigned int addend;
3572 -#ifdef CONFIG_COMPAT
3573 - struct compat_xt_counters_info compat_tmp;
3574 -
3575 - if (compat) {
3576 - ptmp = &compat_tmp;
3577 - size = sizeof(struct compat_xt_counters_info);
3578 - } else
3579 -#endif
3580 - {
3581 - ptmp = &tmp;
3582 - size = sizeof(struct xt_counters_info);
3583 - }
3584 -
3585 - if (copy_from_user(ptmp, user, size) != 0)
3586 - return -EFAULT;
3587 -
3588 -#ifdef CONFIG_COMPAT
3589 - if (compat) {
3590 - num_counters = compat_tmp.num_counters;
3591 - name = compat_tmp.name;
3592 - } else
3593 -#endif
3594 - {
3595 - num_counters = tmp.num_counters;
3596 - name = tmp.name;
3597 - }
3598 -
3599 - if (len != size + num_counters * sizeof(struct xt_counters))
3600 - return -EINVAL;
3601 -
3602 - paddc = vmalloc(len - size);
3603 - if (!paddc)
3604 - return -ENOMEM;
3605
3606 - if (copy_from_user(paddc, user + size, len - size) != 0) {
3607 - ret = -EFAULT;
3608 - goto free;
3609 - }
3610 -
3611 - t = xt_find_table_lock(net, AF_INET6, name);
3612 + paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
3613 + if (IS_ERR(paddc))
3614 + return PTR_ERR(paddc);
3615 + t = xt_find_table_lock(net, AF_INET6, tmp.name);
3616 if (IS_ERR_OR_NULL(t)) {
3617 ret = t ? PTR_ERR(t) : -ENOENT;
3618 goto free;
3619 @@ -1377,7 +1343,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
3620
3621 local_bh_disable();
3622 private = t->private;
3623 - if (private->number != num_counters) {
3624 + if (private->number != tmp.num_counters) {
3625 ret = -EINVAL;
3626 goto unlock_up_free;
3627 }
3628 @@ -1456,7 +1422,6 @@ compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
3629
3630 static int
3631 compat_find_calc_match(struct xt_entry_match *m,
3632 - const char *name,
3633 const struct ip6t_ip6 *ipv6,
3634 int *size)
3635 {
3636 @@ -1491,17 +1456,14 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
3637 struct xt_table_info *newinfo,
3638 unsigned int *size,
3639 const unsigned char *base,
3640 - const unsigned char *limit,
3641 - const unsigned int *hook_entries,
3642 - const unsigned int *underflows,
3643 - const char *name)
3644 + const unsigned char *limit)
3645 {
3646 struct xt_entry_match *ematch;
3647 struct xt_entry_target *t;
3648 struct xt_target *target;
3649 unsigned int entry_offset;
3650 unsigned int j;
3651 - int ret, off, h;
3652 + int ret, off;
3653
3654 duprintf("check_compat_entry_size_and_hooks %p\n", e);
3655 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
3656 @@ -1518,8 +1480,11 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
3657 return -EINVAL;
3658 }
3659
3660 - /* For purposes of check_entry casting the compat entry is fine */
3661 - ret = check_entry((struct ip6t_entry *)e);
3662 + if (!ip6_checkentry(&e->ipv6))
3663 + return -EINVAL;
3664 +
3665 + ret = xt_compat_check_entry_offsets(e, e->elems,
3666 + e->target_offset, e->next_offset);
3667 if (ret)
3668 return ret;
3669
3670 @@ -1527,7 +1492,7 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
3671 entry_offset = (void *)e - (void *)base;
3672 j = 0;
3673 xt_ematch_foreach(ematch, e) {
3674 - ret = compat_find_calc_match(ematch, name, &e->ipv6, &off);
3675 + ret = compat_find_calc_match(ematch, &e->ipv6, &off);
3676 if (ret != 0)
3677 goto release_matches;
3678 ++j;
3679 @@ -1550,17 +1515,6 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
3680 if (ret)
3681 goto out;
3682
3683 - /* Check hooks & underflows */
3684 - for (h = 0; h < NF_INET_NUMHOOKS; h++) {
3685 - if ((unsigned char *)e - base == hook_entries[h])
3686 - newinfo->hook_entry[h] = hook_entries[h];
3687 - if ((unsigned char *)e - base == underflows[h])
3688 - newinfo->underflow[h] = underflows[h];
3689 - }
3690 -
3691 - /* Clear counters and comefrom */
3692 - memset(&e->counters, 0, sizeof(e->counters));
3693 - e->comefrom = 0;
3694 return 0;
3695
3696 out:
3697 @@ -1574,18 +1528,17 @@ release_matches:
3698 return ret;
3699 }
3700
3701 -static int
3702 +static void
3703 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
3704 - unsigned int *size, const char *name,
3705 + unsigned int *size,
3706 struct xt_table_info *newinfo, unsigned char *base)
3707 {
3708 struct xt_entry_target *t;
3709 struct ip6t_entry *de;
3710 unsigned int origsize;
3711 - int ret, h;
3712 + int h;
3713 struct xt_entry_match *ematch;
3714
3715 - ret = 0;
3716 origsize = *size;
3717 de = (struct ip6t_entry *)*dstptr;
3718 memcpy(de, e, sizeof(struct ip6t_entry));
3719 @@ -1594,11 +1547,9 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
3720 *dstptr += sizeof(struct ip6t_entry);
3721 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
3722
3723 - xt_ematch_foreach(ematch, e) {
3724 - ret = xt_compat_match_from_user(ematch, dstptr, size);
3725 - if (ret != 0)
3726 - return ret;
3727 - }
3728 + xt_ematch_foreach(ematch, e)
3729 + xt_compat_match_from_user(ematch, dstptr, size);
3730 +
3731 de->target_offset = e->target_offset - (origsize - *size);
3732 t = compat_ip6t_get_target(e);
3733 xt_compat_target_from_user(t, dstptr, size);
3734 @@ -1610,183 +1561,83 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
3735 if ((unsigned char *)de - base < newinfo->underflow[h])
3736 newinfo->underflow[h] -= origsize - *size;
3737 }
3738 - return ret;
3739 -}
3740 -
3741 -static int compat_check_entry(struct ip6t_entry *e, struct net *net,
3742 - const char *name)
3743 -{
3744 - unsigned int j;
3745 - int ret = 0;
3746 - struct xt_mtchk_param mtpar;
3747 - struct xt_entry_match *ematch;
3748 -
3749 - e->counters.pcnt = xt_percpu_counter_alloc();
3750 - if (IS_ERR_VALUE(e->counters.pcnt))
3751 - return -ENOMEM;
3752 - j = 0;
3753 - mtpar.net = net;
3754 - mtpar.table = name;
3755 - mtpar.entryinfo = &e->ipv6;
3756 - mtpar.hook_mask = e->comefrom;
3757 - mtpar.family = NFPROTO_IPV6;
3758 - xt_ematch_foreach(ematch, e) {
3759 - ret = check_match(ematch, &mtpar);
3760 - if (ret != 0)
3761 - goto cleanup_matches;
3762 - ++j;
3763 - }
3764 -
3765 - ret = check_target(e, net, name);
3766 - if (ret)
3767 - goto cleanup_matches;
3768 - return 0;
3769 -
3770 - cleanup_matches:
3771 - xt_ematch_foreach(ematch, e) {
3772 - if (j-- == 0)
3773 - break;
3774 - cleanup_match(ematch, net);
3775 - }
3776 -
3777 - xt_percpu_counter_free(e->counters.pcnt);
3778 -
3779 - return ret;
3780 }
3781
3782 static int
3783 translate_compat_table(struct net *net,
3784 - const char *name,
3785 - unsigned int valid_hooks,
3786 struct xt_table_info **pinfo,
3787 void **pentry0,
3788 - unsigned int total_size,
3789 - unsigned int number,
3790 - unsigned int *hook_entries,
3791 - unsigned int *underflows)
3792 + const struct compat_ip6t_replace *compatr)
3793 {
3794 unsigned int i, j;
3795 struct xt_table_info *newinfo, *info;
3796 void *pos, *entry0, *entry1;
3797 struct compat_ip6t_entry *iter0;
3798 - struct ip6t_entry *iter1;
3799 + struct ip6t_replace repl;
3800 unsigned int size;
3801 int ret = 0;
3802
3803 info = *pinfo;
3804 entry0 = *pentry0;
3805 - size = total_size;
3806 - info->number = number;
3807 -
3808 - /* Init all hooks to impossible value. */
3809 - for (i = 0; i < NF_INET_NUMHOOKS; i++) {
3810 - info->hook_entry[i] = 0xFFFFFFFF;
3811 - info->underflow[i] = 0xFFFFFFFF;
3812 - }
3813 + size = compatr->size;
3814 + info->number = compatr->num_entries;
3815
3816 duprintf("translate_compat_table: size %u\n", info->size);
3817 j = 0;
3818 xt_compat_lock(AF_INET6);
3819 - xt_compat_init_offsets(AF_INET6, number);
3820 + xt_compat_init_offsets(AF_INET6, compatr->num_entries);
3821 /* Walk through entries, checking offsets. */
3822 - xt_entry_foreach(iter0, entry0, total_size) {
3823 + xt_entry_foreach(iter0, entry0, compatr->size) {
3824 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
3825 entry0,
3826 - entry0 + total_size,
3827 - hook_entries,
3828 - underflows,
3829 - name);
3830 + entry0 + compatr->size);
3831 if (ret != 0)
3832 goto out_unlock;
3833 ++j;
3834 }
3835
3836 ret = -EINVAL;
3837 - if (j != number) {
3838 + if (j != compatr->num_entries) {
3839 duprintf("translate_compat_table: %u not %u entries\n",
3840 - j, number);
3841 + j, compatr->num_entries);
3842 goto out_unlock;
3843 }
3844
3845 - /* Check hooks all assigned */
3846 - for (i = 0; i < NF_INET_NUMHOOKS; i++) {
3847 - /* Only hooks which are valid */
3848 - if (!(valid_hooks & (1 << i)))
3849 - continue;
3850 - if (info->hook_entry[i] == 0xFFFFFFFF) {
3851 - duprintf("Invalid hook entry %u %u\n",
3852 - i, hook_entries[i]);
3853 - goto out_unlock;
3854 - }
3855 - if (info->underflow[i] == 0xFFFFFFFF) {
3856 - duprintf("Invalid underflow %u %u\n",
3857 - i, underflows[i]);
3858 - goto out_unlock;
3859 - }
3860 - }
3861 -
3862 ret = -ENOMEM;
3863 newinfo = xt_alloc_table_info(size);
3864 if (!newinfo)
3865 goto out_unlock;
3866
3867 - newinfo->number = number;
3868 + newinfo->number = compatr->num_entries;
3869 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
3870 - newinfo->hook_entry[i] = info->hook_entry[i];
3871 - newinfo->underflow[i] = info->underflow[i];
3872 + newinfo->hook_entry[i] = compatr->hook_entry[i];
3873 + newinfo->underflow[i] = compatr->underflow[i];
3874 }
3875 entry1 = newinfo->entries;
3876 pos = entry1;
3877 - size = total_size;
3878 - xt_entry_foreach(iter0, entry0, total_size) {
3879 - ret = compat_copy_entry_from_user(iter0, &pos, &size,
3880 - name, newinfo, entry1);
3881 - if (ret != 0)
3882 - break;
3883 - }
3884 + size = compatr->size;
3885 + xt_entry_foreach(iter0, entry0, compatr->size)
3886 + compat_copy_entry_from_user(iter0, &pos, &size,
3887 + newinfo, entry1);
3888 +
3889 + /* all module references in entry0 are now gone. */
3890 xt_compat_flush_offsets(AF_INET6);
3891 xt_compat_unlock(AF_INET6);
3892 - if (ret)
3893 - goto free_newinfo;
3894
3895 - ret = -ELOOP;
3896 - if (!mark_source_chains(newinfo, valid_hooks, entry1))
3897 - goto free_newinfo;
3898 + memcpy(&repl, compatr, sizeof(*compatr));
3899
3900 - i = 0;
3901 - xt_entry_foreach(iter1, entry1, newinfo->size) {
3902 - ret = compat_check_entry(iter1, net, name);
3903 - if (ret != 0)
3904 - break;
3905 - ++i;
3906 - if (strcmp(ip6t_get_target(iter1)->u.user.name,
3907 - XT_ERROR_TARGET) == 0)
3908 - ++newinfo->stacksize;
3909 - }
3910 - if (ret) {
3911 - /*
3912 - * The first i matches need cleanup_entry (calls ->destroy)
3913 - * because they had called ->check already. The other j-i
3914 - * entries need only release.
3915 - */
3916 - int skip = i;
3917 - j -= i;
3918 - xt_entry_foreach(iter0, entry0, newinfo->size) {
3919 - if (skip-- > 0)
3920 - continue;
3921 - if (j-- == 0)
3922 - break;
3923 - compat_release_entry(iter0);
3924 - }
3925 - xt_entry_foreach(iter1, entry1, newinfo->size) {
3926 - if (i-- == 0)
3927 - break;
3928 - cleanup_entry(iter1, net);
3929 - }
3930 - xt_free_table_info(newinfo);
3931 - return ret;
3932 + for (i = 0; i < NF_INET_NUMHOOKS; i++) {
3933 + repl.hook_entry[i] = newinfo->hook_entry[i];
3934 + repl.underflow[i] = newinfo->underflow[i];
3935 }
3936
3937 + repl.num_counters = 0;
3938 + repl.counters = NULL;
3939 + repl.size = newinfo->size;
3940 + ret = translate_table(net, newinfo, entry1, &repl);
3941 + if (ret)
3942 + goto free_newinfo;
3943 +
3944 *pinfo = newinfo;
3945 *pentry0 = entry1;
3946 xt_free_table_info(info);
3947 @@ -1794,17 +1645,16 @@ translate_compat_table(struct net *net,
3948
3949 free_newinfo:
3950 xt_free_table_info(newinfo);
3951 -out:
3952 - xt_entry_foreach(iter0, entry0, total_size) {
3953 + return ret;
3954 +out_unlock:
3955 + xt_compat_flush_offsets(AF_INET6);
3956 + xt_compat_unlock(AF_INET6);
3957 + xt_entry_foreach(iter0, entry0, compatr->size) {
3958 if (j-- == 0)
3959 break;
3960 compat_release_entry(iter0);
3961 }
3962 return ret;
3963 -out_unlock:
3964 - xt_compat_flush_offsets(AF_INET6);
3965 - xt_compat_unlock(AF_INET6);
3966 - goto out;
3967 }
3968
3969 static int
3970 @@ -1840,10 +1690,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
3971 goto free_newinfo;
3972 }
3973
3974 - ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
3975 - &newinfo, &loc_cpu_entry, tmp.size,
3976 - tmp.num_entries, tmp.hook_entry,
3977 - tmp.underflow);
3978 + ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
3979 if (ret != 0)
3980 goto free_newinfo;
3981
3982 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
3983 index f443c6b0ce16..f6d7516eeb8a 100644
3984 --- a/net/ipv6/tcp_ipv6.c
3985 +++ b/net/ipv6/tcp_ipv6.c
3986 @@ -1717,7 +1717,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
3987 destp = ntohs(inet->inet_dport);
3988 srcp = ntohs(inet->inet_sport);
3989
3990 - if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
3991 + if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
3992 + icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
3993 + icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
3994 timer_active = 1;
3995 timer_expires = icsk->icsk_timeout;
3996 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
3997 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
3998 index 6bc5c664fa46..f96831d9d419 100644
3999 --- a/net/ipv6/udp.c
4000 +++ b/net/ipv6/udp.c
4001 @@ -653,7 +653,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
4002
4003 /* if we're overly short, let UDP handle it */
4004 encap_rcv = ACCESS_ONCE(up->encap_rcv);
4005 - if (skb->len > sizeof(struct udphdr) && encap_rcv) {
4006 + if (encap_rcv) {
4007 int ret;
4008
4009 /* Verify checksum before giving to encap */
4010 diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
4011 index 6edfa9980314..1e40dacaa137 100644
4012 --- a/net/l2tp/l2tp_core.c
4013 +++ b/net/l2tp/l2tp_core.c
4014 @@ -1581,7 +1581,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
4015 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
4016 tunnel->encap = encap;
4017 if (encap == L2TP_ENCAPTYPE_UDP) {
4018 - struct udp_tunnel_sock_cfg udp_cfg;
4019 + struct udp_tunnel_sock_cfg udp_cfg = { };
4020
4021 udp_cfg.sk_user_data = tunnel;
4022 udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP;
4023 diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
4024 index 582c9cfd6567..2675d580c490 100644
4025 --- a/net/netfilter/x_tables.c
4026 +++ b/net/netfilter/x_tables.c
4027 @@ -416,6 +416,47 @@ int xt_check_match(struct xt_mtchk_param *par,
4028 }
4029 EXPORT_SYMBOL_GPL(xt_check_match);
4030
4031 +/** xt_check_entry_match - check that matches end before start of target
4032 + *
4033 + * @match: beginning of xt_entry_match
4034 + * @target: beginning of this rules target (alleged end of matches)
4035 + * @alignment: alignment requirement of match structures
4036 + *
4037 + * Validates that all matches add up to the beginning of the target,
4038 + * and that each match covers at least the base structure size.
4039 + *
4040 + * Return: 0 on success, negative errno on failure.
4041 + */
4042 +static int xt_check_entry_match(const char *match, const char *target,
4043 + const size_t alignment)
4044 +{
4045 + const struct xt_entry_match *pos;
4046 + int length = target - match;
4047 +
4048 + if (length == 0) /* no matches */
4049 + return 0;
4050 +
4051 + pos = (struct xt_entry_match *)match;
4052 + do {
4053 + if ((unsigned long)pos % alignment)
4054 + return -EINVAL;
4055 +
4056 + if (length < (int)sizeof(struct xt_entry_match))
4057 + return -EINVAL;
4058 +
4059 + if (pos->u.match_size < sizeof(struct xt_entry_match))
4060 + return -EINVAL;
4061 +
4062 + if (pos->u.match_size > length)
4063 + return -EINVAL;
4064 +
4065 + length -= pos->u.match_size;
4066 + pos = ((void *)((char *)(pos) + (pos)->u.match_size));
4067 + } while (length > 0);
4068 +
4069 + return 0;
4070 +}
4071 +
4072 #ifdef CONFIG_COMPAT
4073 int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
4074 {
4075 @@ -485,13 +526,14 @@ int xt_compat_match_offset(const struct xt_match *match)
4076 }
4077 EXPORT_SYMBOL_GPL(xt_compat_match_offset);
4078
4079 -int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
4080 - unsigned int *size)
4081 +void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
4082 + unsigned int *size)
4083 {
4084 const struct xt_match *match = m->u.kernel.match;
4085 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
4086 int pad, off = xt_compat_match_offset(match);
4087 u_int16_t msize = cm->u.user.match_size;
4088 + char name[sizeof(m->u.user.name)];
4089
4090 m = *dstptr;
4091 memcpy(m, cm, sizeof(*cm));
4092 @@ -505,10 +547,12 @@ int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
4093
4094 msize += off;
4095 m->u.user.match_size = msize;
4096 + strlcpy(name, match->name, sizeof(name));
4097 + module_put(match->me);
4098 + strncpy(m->u.user.name, name, sizeof(m->u.user.name));
4099
4100 *size += off;
4101 *dstptr += msize;
4102 - return 0;
4103 }
4104 EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
4105
4106 @@ -539,8 +583,125 @@ int xt_compat_match_to_user(const struct xt_entry_match *m,
4107 return 0;
4108 }
4109 EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
4110 +
4111 +/* non-compat version may have padding after verdict */
4112 +struct compat_xt_standard_target {
4113 + struct compat_xt_entry_target t;
4114 + compat_uint_t verdict;
4115 +};
4116 +
4117 +int xt_compat_check_entry_offsets(const void *base, const char *elems,
4118 + unsigned int target_offset,
4119 + unsigned int next_offset)
4120 +{
4121 + long size_of_base_struct = elems - (const char *)base;
4122 + const struct compat_xt_entry_target *t;
4123 + const char *e = base;
4124 +
4125 + if (target_offset < size_of_base_struct)
4126 + return -EINVAL;
4127 +
4128 + if (target_offset + sizeof(*t) > next_offset)
4129 + return -EINVAL;
4130 +
4131 + t = (void *)(e + target_offset);
4132 + if (t->u.target_size < sizeof(*t))
4133 + return -EINVAL;
4134 +
4135 + if (target_offset + t->u.target_size > next_offset)
4136 + return -EINVAL;
4137 +
4138 + if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
4139 + COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
4140 + return -EINVAL;
4141 +
4142 + /* compat_xt_entry match has less strict aligment requirements,
4143 + * otherwise they are identical. In case of padding differences
4144 + * we need to add compat version of xt_check_entry_match.
4145 + */
4146 + BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
4147 +
4148 + return xt_check_entry_match(elems, base + target_offset,
4149 + __alignof__(struct compat_xt_entry_match));
4150 +}
4151 +EXPORT_SYMBOL(xt_compat_check_entry_offsets);
4152 #endif /* CONFIG_COMPAT */
4153
4154 +/**
4155 + * xt_check_entry_offsets - validate arp/ip/ip6t_entry
4156 + *
4157 + * @base: pointer to arp/ip/ip6t_entry
4158 + * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
4159 + * @target_offset: the arp/ip/ip6_t->target_offset
4160 + * @next_offset: the arp/ip/ip6_t->next_offset
4161 + *
4162 + * validates that target_offset and next_offset are sane and that all
4163 + * match sizes (if any) align with the target offset.
4164 + *
4165 + * This function does not validate the targets or matches themselves, it
4166 + * only tests that all the offsets and sizes are correct, that all
4167 + * match structures are aligned, and that the last structure ends where
4168 + * the target structure begins.
4169 + *
4170 + * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
4171 + *
4172 + * The arp/ip/ip6t_entry structure @base must have passed following tests:
4173 + * - it must point to a valid memory location
4174 + * - base to base + next_offset must be accessible, i.e. not exceed allocated
4175 + * length.
4176 + *
4177 + * A well-formed entry looks like this:
4178 + *
4179 + * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry
4180 + * e->elems[]-----' | |
4181 + * matchsize | |
4182 + * matchsize | |
4183 + * | |
4184 + * target_offset---------------------------------' |
4185 + * next_offset---------------------------------------------------'
4186 + *
4187 + * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
4188 + * This is where matches (if any) and the target reside.
4189 + * target_offset: beginning of target.
4190 + * next_offset: start of the next rule; also: size of this rule.
4191 + * Since targets have a minimum size, target_offset + minlen <= next_offset.
4192 + *
4193 + * Every match stores its size, sum of sizes must not exceed target_offset.
4194 + *
4195 + * Return: 0 on success, negative errno on failure.
4196 + */
4197 +int xt_check_entry_offsets(const void *base,
4198 + const char *elems,
4199 + unsigned int target_offset,
4200 + unsigned int next_offset)
4201 +{
4202 + long size_of_base_struct = elems - (const char *)base;
4203 + const struct xt_entry_target *t;
4204 + const char *e = base;
4205 +
4206 + /* target start is within the ip/ip6/arpt_entry struct */
4207 + if (target_offset < size_of_base_struct)
4208 + return -EINVAL;
4209 +
4210 + if (target_offset + sizeof(*t) > next_offset)
4211 + return -EINVAL;
4212 +
4213 + t = (void *)(e + target_offset);
4214 + if (t->u.target_size < sizeof(*t))
4215 + return -EINVAL;
4216 +
4217 + if (target_offset + t->u.target_size > next_offset)
4218 + return -EINVAL;
4219 +
4220 + if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
4221 + XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
4222 + return -EINVAL;
4223 +
4224 + return xt_check_entry_match(elems, base + target_offset,
4225 + __alignof__(struct xt_entry_match));
4226 +}
4227 +EXPORT_SYMBOL(xt_check_entry_offsets);
4228 +
4229 int xt_check_target(struct xt_tgchk_param *par,
4230 unsigned int size, u_int8_t proto, bool inv_proto)
4231 {
4232 @@ -591,6 +752,80 @@ int xt_check_target(struct xt_tgchk_param *par,
4233 }
4234 EXPORT_SYMBOL_GPL(xt_check_target);
4235
4236 +/**
4237 + * xt_copy_counters_from_user - copy counters and metadata from userspace
4238 + *
4239 + * @user: src pointer to userspace memory
4240 + * @len: alleged size of userspace memory
4241 + * @info: where to store the xt_counters_info metadata
4242 + * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
4243 + *
4244 + * Copies counter meta data from @user and stores it in @info.
4245 + *
4246 + * vmallocs memory to hold the counters, then copies the counter data
4247 + * from @user to the new memory and returns a pointer to it.
4248 + *
4249 + * If @compat is true, @info gets converted automatically to the 64bit
4250 + * representation.
4251 + *
4252 + * The metadata associated with the counters is stored in @info.
4253 + *
4254 + * Return: returns pointer that caller has to test via IS_ERR().
4255 + * If IS_ERR is false, caller has to vfree the pointer.
4256 + */
4257 +void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
4258 + struct xt_counters_info *info, bool compat)
4259 +{
4260 + void *mem;
4261 + u64 size;
4262 +
4263 +#ifdef CONFIG_COMPAT
4264 + if (compat) {
4265 + /* structures only differ in size due to alignment */
4266 + struct compat_xt_counters_info compat_tmp;
4267 +
4268 + if (len <= sizeof(compat_tmp))
4269 + return ERR_PTR(-EINVAL);
4270 +
4271 + len -= sizeof(compat_tmp);
4272 + if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
4273 + return ERR_PTR(-EFAULT);
4274 +
4275 + strlcpy(info->name, compat_tmp.name, sizeof(info->name));
4276 + info->num_counters = compat_tmp.num_counters;
4277 + user += sizeof(compat_tmp);
4278 + } else
4279 +#endif
4280 + {
4281 + if (len <= sizeof(*info))
4282 + return ERR_PTR(-EINVAL);
4283 +
4284 + len -= sizeof(*info);
4285 + if (copy_from_user(info, user, sizeof(*info)) != 0)
4286 + return ERR_PTR(-EFAULT);
4287 +
4288 + info->name[sizeof(info->name) - 1] = '\0';
4289 + user += sizeof(*info);
4290 + }
4291 +
4292 + size = sizeof(struct xt_counters);
4293 + size *= info->num_counters;
4294 +
4295 + if (size != (u64)len)
4296 + return ERR_PTR(-EINVAL);
4297 +
4298 + mem = vmalloc(len);
4299 + if (!mem)
4300 + return ERR_PTR(-ENOMEM);
4301 +
4302 + if (copy_from_user(mem, user, len) == 0)
4303 + return mem;
4304 +
4305 + vfree(mem);
4306 + return ERR_PTR(-EFAULT);
4307 +}
4308 +EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
4309 +
4310 #ifdef CONFIG_COMPAT
4311 int xt_compat_target_offset(const struct xt_target *target)
4312 {
4313 @@ -606,6 +841,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
4314 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
4315 int pad, off = xt_compat_target_offset(target);
4316 u_int16_t tsize = ct->u.user.target_size;
4317 + char name[sizeof(t->u.user.name)];
4318
4319 t = *dstptr;
4320 memcpy(t, ct, sizeof(*ct));
4321 @@ -619,6 +855,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
4322
4323 tsize += off;
4324 t->u.user.target_size = tsize;
4325 + strlcpy(name, target->name, sizeof(name));
4326 + module_put(target->me);
4327 + strncpy(t->u.user.name, name, sizeof(t->u.user.name));
4328
4329 *size += off;
4330 *dstptr += tsize;
4331 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
4332 index 330ebd600f25..f48e3b3aedd5 100644
4333 --- a/net/netlink/af_netlink.c
4334 +++ b/net/netlink/af_netlink.c
4335 @@ -2059,6 +2059,7 @@ static int netlink_dump(struct sock *sk)
4336 struct netlink_callback *cb;
4337 struct sk_buff *skb = NULL;
4338 struct nlmsghdr *nlh;
4339 + struct module *module;
4340 int len, err = -ENOBUFS;
4341 int alloc_min_size;
4342 int alloc_size;
4343 @@ -2134,9 +2135,11 @@ static int netlink_dump(struct sock *sk)
4344 cb->done(cb);
4345
4346 nlk->cb_running = false;
4347 + module = cb->module;
4348 + skb = cb->skb;
4349 mutex_unlock(nlk->cb_mutex);
4350 - module_put(cb->module);
4351 - consume_skb(cb->skb);
4352 + module_put(module);
4353 + consume_skb(skb);
4354 return 0;
4355
4356 errout_skb:
4357 diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
4358 index b7e01d88bdc5..59658b2e9cdf 100644
4359 --- a/net/switchdev/switchdev.c
4360 +++ b/net/switchdev/switchdev.c
4361 @@ -1188,6 +1188,7 @@ int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
4362 .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
4363 .dst = dst,
4364 .dst_len = dst_len,
4365 + .fi = fi,
4366 .tos = tos,
4367 .type = type,
4368 .nlflags = nlflags,
4369 @@ -1196,8 +1197,6 @@ int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
4370 struct net_device *dev;
4371 int err = 0;
4372
4373 - memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi));
4374 -
4375 /* Don't offload route if using custom ip rules or if
4376 * IPv4 FIB offloading has been disabled completely.
4377 */
4378 @@ -1242,6 +1241,7 @@ int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
4379 .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
4380 .dst = dst,
4381 .dst_len = dst_len,
4382 + .fi = fi,
4383 .tos = tos,
4384 .type = type,
4385 .nlflags = 0,
4386 @@ -1250,8 +1250,6 @@ int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
4387 struct net_device *dev;
4388 int err = 0;
4389
4390 - memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi));
4391 -
4392 if (!(fi->fib_flags & RTNH_F_OFFLOAD))
4393 return 0;
4394
4395 diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
4396 index d7d050f44fc1..4dfc5c14f8c3 100644
4397 --- a/net/tipc/netlink_compat.c
4398 +++ b/net/tipc/netlink_compat.c
4399 @@ -802,7 +802,7 @@ static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg,
4400 goto out;
4401
4402 tipc_tlv_sprintf(msg->rep, "%-10u %s",
4403 - nla_get_u32(publ[TIPC_NLA_PUBL_REF]),
4404 + nla_get_u32(publ[TIPC_NLA_PUBL_KEY]),
4405 scope_str[nla_get_u32(publ[TIPC_NLA_PUBL_SCOPE])]);
4406 out:
4407 tipc_tlv_sprintf(msg->rep, "\n");
4408 diff --git a/net/tipc/socket.c b/net/tipc/socket.c
4409 index 3eeb50a27b89..5f80d3fa9c85 100644
4410 --- a/net/tipc/socket.c
4411 +++ b/net/tipc/socket.c
4412 @@ -2807,6 +2807,9 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
4413 if (err)
4414 return err;
4415
4416 + if (!attrs[TIPC_NLA_SOCK])
4417 + return -EINVAL;
4418 +
4419 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
4420 attrs[TIPC_NLA_SOCK],
4421 tipc_nl_sock_policy);
4422 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
4423 index b50ee5d622e1..c753211cb83f 100644
4424 --- a/net/wireless/wext-core.c
4425 +++ b/net/wireless/wext-core.c
4426 @@ -955,8 +955,29 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
4427 return private(dev, iwr, cmd, info, handler);
4428 }
4429 /* Old driver API : call driver ioctl handler */
4430 - if (dev->netdev_ops->ndo_do_ioctl)
4431 - return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
4432 + if (dev->netdev_ops->ndo_do_ioctl) {
4433 +#ifdef CONFIG_COMPAT
4434 + if (info->flags & IW_REQUEST_FLAG_COMPAT) {
4435 + int ret = 0;
4436 + struct iwreq iwr_lcl;
4437 + struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
4438 +
4439 + memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
4440 + iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
4441 + iwr_lcl.u.data.length = iwp_compat->length;
4442 + iwr_lcl.u.data.flags = iwp_compat->flags;
4443 +
4444 + ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
4445 +
4446 + iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
4447 + iwp_compat->length = iwr_lcl.u.data.length;
4448 + iwp_compat->flags = iwr_lcl.u.data.flags;
4449 +
4450 + return ret;
4451 + } else
4452 +#endif
4453 + return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
4454 + }
4455 return -EOPNOTSUPP;
4456 }
4457
4458 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4459 index 9a0d1445ca5c..94089fc71884 100644
4460 --- a/sound/pci/hda/hda_intel.c
4461 +++ b/sound/pci/hda/hda_intel.c
4462 @@ -365,8 +365,11 @@ enum {
4463
4464 #define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
4465 #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
4466 +#define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
4467 +#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
4468 #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
4469 -#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
4470 +#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
4471 + IS_KBL(pci) || IS_KBL_LP(pci)
4472
4473 static char *driver_short_names[] = {
4474 [AZX_DRIVER_ICH] = "HDA Intel",
4475 @@ -2181,6 +2184,12 @@ static const struct pci_device_id azx_ids[] = {
4476 /* Sunrise Point-LP */
4477 { PCI_DEVICE(0x8086, 0x9d70),
4478 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
4479 + /* Kabylake */
4480 + { PCI_DEVICE(0x8086, 0xa171),
4481 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
4482 + /* Kabylake-LP */
4483 + { PCI_DEVICE(0x8086, 0x9d71),
4484 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
4485 /* Broxton-P(Apollolake) */
4486 { PCI_DEVICE(0x8086, 0x5a98),
4487 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
4488 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4489 index d53c25e7a1c1..0fe18ede3e85 100644
4490 --- a/sound/pci/hda/patch_realtek.c
4491 +++ b/sound/pci/hda/patch_realtek.c
4492 @@ -346,6 +346,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
4493 case 0x10ec0234:
4494 case 0x10ec0274:
4495 case 0x10ec0294:
4496 + case 0x10ec0700:
4497 + case 0x10ec0701:
4498 + case 0x10ec0703:
4499 alc_update_coef_idx(codec, 0x10, 1<<15, 0);
4500 break;
4501 case 0x10ec0662:
4502 @@ -2655,6 +2658,7 @@ enum {
4503 ALC269_TYPE_ALC256,
4504 ALC269_TYPE_ALC225,
4505 ALC269_TYPE_ALC294,
4506 + ALC269_TYPE_ALC700,
4507 };
4508
4509 /*
4510 @@ -2686,6 +2690,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
4511 case ALC269_TYPE_ALC256:
4512 case ALC269_TYPE_ALC225:
4513 case ALC269_TYPE_ALC294:
4514 + case ALC269_TYPE_ALC700:
4515 ssids = alc269_ssids;
4516 break;
4517 default:
4518 @@ -3618,13 +3623,20 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
4519 static void alc_headset_mode_unplugged(struct hda_codec *codec)
4520 {
4521 static struct coef_fw coef0255[] = {
4522 - WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
4523 WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
4524 UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
4525 WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
4526 WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */
4527 {}
4528 };
4529 + static struct coef_fw coef0255_1[] = {
4530 + WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
4531 + {}
4532 + };
4533 + static struct coef_fw coef0256[] = {
4534 + WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */
4535 + {}
4536 + };
4537 static struct coef_fw coef0233[] = {
4538 WRITE_COEF(0x1b, 0x0c0b),
4539 WRITE_COEF(0x45, 0xc429),
4540 @@ -3677,7 +3689,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
4541
4542 switch (codec->core.vendor_id) {
4543 case 0x10ec0255:
4544 + alc_process_coef_fw(codec, coef0255_1);
4545 + alc_process_coef_fw(codec, coef0255);
4546 + break;
4547 case 0x10ec0256:
4548 + alc_process_coef_fw(codec, coef0256);
4549 alc_process_coef_fw(codec, coef0255);
4550 break;
4551 case 0x10ec0233:
4552 @@ -3896,6 +3912,12 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
4553 WRITE_COEFEX(0x57, 0x03, 0x8ea6),
4554 {}
4555 };
4556 + static struct coef_fw coef0256[] = {
4557 + WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */
4558 + WRITE_COEF(0x1b, 0x0c6b),
4559 + WRITE_COEFEX(0x57, 0x03, 0x8ea6),
4560 + {}
4561 + };
4562 static struct coef_fw coef0233[] = {
4563 WRITE_COEF(0x45, 0xd429),
4564 WRITE_COEF(0x1b, 0x0c2b),
4565 @@ -3936,9 +3958,11 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
4566
4567 switch (codec->core.vendor_id) {
4568 case 0x10ec0255:
4569 - case 0x10ec0256:
4570 alc_process_coef_fw(codec, coef0255);
4571 break;
4572 + case 0x10ec0256:
4573 + alc_process_coef_fw(codec, coef0256);
4574 + break;
4575 case 0x10ec0233:
4576 case 0x10ec0283:
4577 alc_process_coef_fw(codec, coef0233);
4578 @@ -3978,6 +4002,12 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
4579 WRITE_COEFEX(0x57, 0x03, 0x8ea6),
4580 {}
4581 };
4582 + static struct coef_fw coef0256[] = {
4583 + WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */
4584 + WRITE_COEF(0x1b, 0x0c6b),
4585 + WRITE_COEFEX(0x57, 0x03, 0x8ea6),
4586 + {}
4587 + };
4588 static struct coef_fw coef0233[] = {
4589 WRITE_COEF(0x45, 0xe429),
4590 WRITE_COEF(0x1b, 0x0c2b),
4591 @@ -4018,9 +4048,11 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
4592
4593 switch (codec->core.vendor_id) {
4594 case 0x10ec0255:
4595 - case 0x10ec0256:
4596 alc_process_coef_fw(codec, coef0255);
4597 break;
4598 + case 0x10ec0256:
4599 + alc_process_coef_fw(codec, coef0256);
4600 + break;
4601 case 0x10ec0233:
4602 case 0x10ec0283:
4603 alc_process_coef_fw(codec, coef0233);
4604 @@ -4266,7 +4298,7 @@ static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
4605 static void alc255_set_default_jack_type(struct hda_codec *codec)
4606 {
4607 /* Set to iphone type */
4608 - static struct coef_fw fw[] = {
4609 + static struct coef_fw alc255fw[] = {
4610 WRITE_COEF(0x1b, 0x880b),
4611 WRITE_COEF(0x45, 0xd089),
4612 WRITE_COEF(0x1b, 0x080b),
4613 @@ -4274,7 +4306,22 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
4614 WRITE_COEF(0x1b, 0x0c0b),
4615 {}
4616 };
4617 - alc_process_coef_fw(codec, fw);
4618 + static struct coef_fw alc256fw[] = {
4619 + WRITE_COEF(0x1b, 0x884b),
4620 + WRITE_COEF(0x45, 0xd089),
4621 + WRITE_COEF(0x1b, 0x084b),
4622 + WRITE_COEF(0x46, 0x0004),
4623 + WRITE_COEF(0x1b, 0x0c4b),
4624 + {}
4625 + };
4626 + switch (codec->core.vendor_id) {
4627 + case 0x10ec0255:
4628 + alc_process_coef_fw(codec, alc255fw);
4629 + break;
4630 + case 0x10ec0256:
4631 + alc_process_coef_fw(codec, alc256fw);
4632 + break;
4633 + }
4634 msleep(30);
4635 }
4636
4637 @@ -5587,6 +5634,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4638 SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
4639 SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
4640 SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
4641 + SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
4642 SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
4643 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
4644 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
4645 @@ -5775,6 +5823,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
4646 {0x12, 0x90a60180},
4647 {0x14, 0x90170130},
4648 {0x21, 0x02211040}),
4649 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5565", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4650 + {0x12, 0x90a60180},
4651 + {0x14, 0x90170120},
4652 + {0x21, 0x02211030}),
4653 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4654 {0x12, 0x90a60160},
4655 {0x14, 0x90170120},
4656 @@ -6053,6 +6105,14 @@ static int patch_alc269(struct hda_codec *codec)
4657 case 0x10ec0294:
4658 spec->codec_variant = ALC269_TYPE_ALC294;
4659 break;
4660 + case 0x10ec0700:
4661 + case 0x10ec0701:
4662 + case 0x10ec0703:
4663 + spec->codec_variant = ALC269_TYPE_ALC700;
4664 + spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
4665 + alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */
4666 + break;
4667 +
4668 }
4669
4670 if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
4671 @@ -7008,6 +7068,9 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
4672 HDA_CODEC_ENTRY(0x10ec0670, "ALC670", patch_alc662),
4673 HDA_CODEC_ENTRY(0x10ec0671, "ALC671", patch_alc662),
4674 HDA_CODEC_ENTRY(0x10ec0680, "ALC680", patch_alc680),
4675 + HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
4676 + HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
4677 + HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
4678 HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc882),
4679 HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
4680 HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
4681 diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
4682 index 674bdf8ecf4f..501849ad0b60 100644
4683 --- a/virt/kvm/arm/hyp/vgic-v2-sr.c
4684 +++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
4685 @@ -93,12 +93,11 @@ static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
4686 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
4687 continue;
4688
4689 - if (cpu_if->vgic_elrsr & (1UL << i)) {
4690 + if (cpu_if->vgic_elrsr & (1UL << i))
4691 cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
4692 - continue;
4693 - }
4694 + else
4695 + cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
4696
4697 - cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
4698 writel_relaxed(0, base + GICH_LR0 + (i * 4));
4699 }
4700 }
4701 diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c
4702 index fe84e1a95dd5..8db197bb6c7a 100644
4703 --- a/virt/kvm/irqchip.c
4704 +++ b/virt/kvm/irqchip.c
4705 @@ -40,7 +40,7 @@ int kvm_irq_map_gsi(struct kvm *kvm,
4706
4707 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
4708 lockdep_is_held(&kvm->irq_lock));
4709 - if (gsi < irq_rt->nr_rt_entries) {
4710 + if (irq_rt && gsi < irq_rt->nr_rt_entries) {
4711 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
4712 entries[n] = *e;
4713 ++n;