Magellan Linux

Contents of /trunk/kernel-alx-legacy/patches-4.9/0293-4.9.194-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (show annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 9 months ago) by niro
File size: 65649 byte(s)
-added kerenl-alx-legacy pkg
1 diff --git a/Makefile b/Makefile
2 index 48f79c6729ad..6e3c81c3bf40 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 193
9 +SUBLEVEL = 194
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
14 index 2fb0cd39a31c..cd6e3615e3d1 100644
15 --- a/arch/arc/kernel/traps.c
16 +++ b/arch/arc/kernel/traps.c
17 @@ -163,3 +163,4 @@ void abort(void)
18 {
19 __asm__ __volatile__("trap_s 5\n");
20 }
21 +EXPORT_SYMBOL(abort);
22 diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
23 index cf65ab8bb004..e5dcbda20129 100644
24 --- a/arch/arm/mach-omap2/omap4-common.c
25 +++ b/arch/arm/mach-omap2/omap4-common.c
26 @@ -131,6 +131,9 @@ static int __init omap4_sram_init(void)
27 struct device_node *np;
28 struct gen_pool *sram_pool;
29
30 + if (!soc_is_omap44xx() && !soc_is_omap54xx())
31 + return 0;
32 +
33 np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
34 if (!np)
35 pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
36 diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
37 index 1ab7096af8e2..f850fc3a91e8 100644
38 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
39 +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
40 @@ -387,7 +387,8 @@ static struct omap_hwmod dra7xx_dcan2_hwmod = {
41 static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = {
42 .rev_offs = 0x0,
43 .sysc_offs = 0x4,
44 - .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET,
45 + .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
46 + SYSC_HAS_RESET_STATUS,
47 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
48 .sysc_fields = &omap_hwmod_sysc_type2,
49 };
50 diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
51 index 1565d6b67163..0fe4a7025e46 100644
52 --- a/arch/arm/mm/init.c
53 +++ b/arch/arm/mm/init.c
54 @@ -192,6 +192,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
55 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
56 int pfn_valid(unsigned long pfn)
57 {
58 + phys_addr_t addr = __pfn_to_phys(pfn);
59 +
60 + if (__phys_to_pfn(addr) != pfn)
61 + return 0;
62 +
63 return memblock_is_map_memory(__pfn_to_phys(pfn));
64 }
65 EXPORT_SYMBOL(pfn_valid);
66 @@ -698,7 +703,8 @@ static void update_sections_early(struct section_perm perms[], int n)
67 if (t->flags & PF_KTHREAD)
68 continue;
69 for_each_thread(t, s)
70 - set_section_perms(perms, n, true, s->mm);
71 + if (s->mm)
72 + set_section_perms(perms, n, true, s->mm);
73 }
74 read_unlock(&tasklist_lock);
75 set_section_perms(perms, n, true, current->active_mm);
76 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
77 index 6cd230434f32..92bcde046b6b 100644
78 --- a/arch/mips/Kconfig
79 +++ b/arch/mips/Kconfig
80 @@ -792,7 +792,6 @@ config SIBYTE_SWARM
81 select SYS_SUPPORTS_HIGHMEM
82 select SYS_SUPPORTS_LITTLE_ENDIAN
83 select ZONE_DMA32 if 64BIT
84 - select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
85
86 config SIBYTE_LITTLESUR
87 bool "Sibyte BCM91250C2-LittleSur"
88 @@ -815,7 +814,6 @@ config SIBYTE_SENTOSA
89 select SYS_HAS_CPU_SB1
90 select SYS_SUPPORTS_BIG_ENDIAN
91 select SYS_SUPPORTS_LITTLE_ENDIAN
92 - select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
93
94 config SIBYTE_BIGSUR
95 bool "Sibyte BCM91480B-BigSur"
96 @@ -829,7 +827,6 @@ config SIBYTE_BIGSUR
97 select SYS_SUPPORTS_HIGHMEM
98 select SYS_SUPPORTS_LITTLE_ENDIAN
99 select ZONE_DMA32 if 64BIT
100 - select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
101
102 config SNI_RM
103 bool "SNI RM200/300/400"
104 diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
105 index 060f23ff1817..258158c34df1 100644
106 --- a/arch/mips/include/asm/smp.h
107 +++ b/arch/mips/include/asm/smp.h
108 @@ -25,7 +25,17 @@ extern cpumask_t cpu_sibling_map[];
109 extern cpumask_t cpu_core_map[];
110 extern cpumask_t cpu_foreign_map[];
111
112 -#define raw_smp_processor_id() (current_thread_info()->cpu)
113 +static inline int raw_smp_processor_id(void)
114 +{
115 +#if defined(__VDSO__)
116 + extern int vdso_smp_processor_id(void)
117 + __compiletime_error("VDSO should not call smp_processor_id()");
118 + return vdso_smp_processor_id();
119 +#else
120 + return current_thread_info()->cpu;
121 +#endif
122 +}
123 +#define raw_smp_processor_id raw_smp_processor_id
124
125 /* Map from cpu id to sequential logical cpu number. This will only
126 not be idempotent when cpus failed to come on-line. */
127 diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile
128 index 3ef3fb658136..b3d6bf23a662 100644
129 --- a/arch/mips/sibyte/common/Makefile
130 +++ b/arch/mips/sibyte/common/Makefile
131 @@ -1,5 +1,4 @@
132 obj-y := cfe.o
133 -obj-$(CONFIG_SWIOTLB) += dma.o
134 obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o
135 obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o
136 obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o
137 diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c
138 deleted file mode 100644
139 index eb47a94f3583..000000000000
140 --- a/arch/mips/sibyte/common/dma.c
141 +++ /dev/null
142 @@ -1,14 +0,0 @@
143 -// SPDX-License-Identifier: GPL-2.0+
144 -/*
145 - * DMA support for Broadcom SiByte platforms.
146 - *
147 - * Copyright (c) 2018 Maciej W. Rozycki
148 - */
149 -
150 -#include <linux/swiotlb.h>
151 -#include <asm/bootinfo.h>
152 -
153 -void __init plat_swiotlb_setup(void)
154 -{
155 - swiotlb_init(1);
156 -}
157 diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
158 index 0b845cc7fbdc..247ca2e9add9 100644
159 --- a/arch/mips/vdso/Makefile
160 +++ b/arch/mips/vdso/Makefile
161 @@ -6,7 +6,9 @@ ccflags-vdso := \
162 $(filter -I%,$(KBUILD_CFLAGS)) \
163 $(filter -E%,$(KBUILD_CFLAGS)) \
164 $(filter -mmicromips,$(KBUILD_CFLAGS)) \
165 - $(filter -march=%,$(KBUILD_CFLAGS))
166 + $(filter -march=%,$(KBUILD_CFLAGS)) \
167 + $(filter -m%-float,$(KBUILD_CFLAGS)) \
168 + -D__VDSO__
169 cflags-vdso := $(ccflags-vdso) \
170 $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
171 -O2 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
172 diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
173 index 44c33ee397a0..2525f23da4be 100644
174 --- a/arch/powerpc/mm/pgtable-radix.c
175 +++ b/arch/powerpc/mm/pgtable-radix.c
176 @@ -287,14 +287,6 @@ void __init radix__early_init_devtree(void)
177 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
178 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
179 found:
180 -#ifdef CONFIG_SPARSEMEM_VMEMMAP
181 - if (mmu_psize_defs[MMU_PAGE_2M].shift) {
182 - /*
183 - * map vmemmap using 2M if available
184 - */
185 - mmu_vmemmap_psize = MMU_PAGE_2M;
186 - }
187 -#endif /* CONFIG_SPARSEMEM_VMEMMAP */
188 return;
189 }
190
191 @@ -337,7 +329,13 @@ void __init radix__early_init_mmu(void)
192
193 #ifdef CONFIG_SPARSEMEM_VMEMMAP
194 /* vmemmap mapping */
195 - mmu_vmemmap_psize = mmu_virtual_psize;
196 + if (mmu_psize_defs[MMU_PAGE_2M].shift) {
197 + /*
198 + * map vmemmap using 2M if available
199 + */
200 + mmu_vmemmap_psize = MMU_PAGE_2M;
201 + } else
202 + mmu_vmemmap_psize = mmu_virtual_psize;
203 #endif
204 /*
205 * initialize page table size
206 diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
207 index be4db07f70d3..95126d25aed5 100644
208 --- a/arch/s390/kvm/interrupt.c
209 +++ b/arch/s390/kvm/interrupt.c
210 @@ -1652,6 +1652,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
211 case KVM_S390_MCHK:
212 irq->u.mchk.mcic = s390int->parm64;
213 break;
214 + case KVM_S390_INT_PFAULT_INIT:
215 + irq->u.ext.ext_params = s390int->parm;
216 + irq->u.ext.ext_params2 = s390int->parm64;
217 + break;
218 + case KVM_S390_RESTART:
219 + case KVM_S390_INT_CLOCK_COMP:
220 + case KVM_S390_INT_CPU_TIMER:
221 + break;
222 + default:
223 + return -EINVAL;
224 }
225 return 0;
226 }
227 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
228 index 07f571900676..ea20b60edde7 100644
229 --- a/arch/s390/kvm/kvm-s390.c
230 +++ b/arch/s390/kvm/kvm-s390.c
231 @@ -3105,7 +3105,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
232 }
233 case KVM_S390_INTERRUPT: {
234 struct kvm_s390_interrupt s390int;
235 - struct kvm_s390_irq s390irq;
236 + struct kvm_s390_irq s390irq = {};
237
238 r = -EFAULT;
239 if (copy_from_user(&s390int, argp, sizeof(s390int)))
240 diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
241 index 896344b6e036..9b15a1dc6628 100644
242 --- a/arch/s390/net/bpf_jit_comp.c
243 +++ b/arch/s390/net/bpf_jit_comp.c
244 @@ -881,7 +881,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
245 break;
246 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
247 /* lcgr %dst,%dst */
248 - EMIT4(0xb9130000, dst_reg, dst_reg);
249 + EMIT4(0xb9030000, dst_reg, dst_reg);
250 break;
251 /*
252 * BPF_FROM_BE/LE
253 @@ -1062,8 +1062,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
254 /* llgf %w1,map.max_entries(%b2) */
255 EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
256 offsetof(struct bpf_array, map.max_entries));
257 - /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */
258 - EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3,
259 + /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
260 + EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
261 REG_W1, 0, 0xa);
262
263 /*
264 @@ -1089,8 +1089,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
265 * goto out;
266 */
267
268 - /* sllg %r1,%b3,3: %r1 = index * 8 */
269 - EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
270 + /* llgfr %r1,%b3: %r1 = (u32) index */
271 + EMIT4(0xb9160000, REG_1, BPF_REG_3);
272 + /* sllg %r1,%r1,3: %r1 *= 8 */
273 + EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
274 /* lg %r1,prog(%b2,%r1) */
275 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
276 REG_1, offsetof(struct bpf_array, ptrs));
277 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
278 index 2996a1d0a410..940ed27a6212 100644
279 --- a/arch/x86/Makefile
280 +++ b/arch/x86/Makefile
281 @@ -38,6 +38,7 @@ REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -D__KERNEL__ \
282
283 REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
284 REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
285 +REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
286 REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
287 export REALMODE_CFLAGS
288
289 diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
290 index fd4484ae3ffc..112e3c4636b4 100644
291 --- a/arch/x86/events/amd/ibs.c
292 +++ b/arch/x86/events/amd/ibs.c
293 @@ -671,10 +671,17 @@ fail:
294
295 throttle = perf_event_overflow(event, &data, &regs);
296 out:
297 - if (throttle)
298 + if (throttle) {
299 perf_ibs_stop(event, 0);
300 - else
301 - perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
302 + } else {
303 + period >>= 4;
304 +
305 + if ((ibs_caps & IBS_CAPS_RDWROPCNT) &&
306 + (*config & IBS_OP_CNT_CTL))
307 + period |= *config & IBS_OP_CUR_CNT_RAND;
308 +
309 + perf_ibs_enable_event(perf_ibs, hwc, period);
310 + }
311
312 perf_event_update_userpage(event);
313
314 diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
315 index e98e238d3775..55e362f9dbfa 100644
316 --- a/arch/x86/events/intel/core.c
317 +++ b/arch/x86/events/intel/core.c
318 @@ -3075,6 +3075,11 @@ static u64 bdw_limit_period(struct perf_event *event, u64 left)
319 return left;
320 }
321
322 +static u64 nhm_limit_period(struct perf_event *event, u64 left)
323 +{
324 + return max(left, 32ULL);
325 +}
326 +
327 PMU_FORMAT_ATTR(event, "config:0-7" );
328 PMU_FORMAT_ATTR(umask, "config:8-15" );
329 PMU_FORMAT_ATTR(edge, "config:18" );
330 @@ -3734,6 +3739,7 @@ __init int intel_pmu_init(void)
331 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
332 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
333 x86_pmu.extra_regs = intel_nehalem_extra_regs;
334 + x86_pmu.limit_period = nhm_limit_period;
335
336 x86_pmu.cpu_events = nhm_events_attrs;
337
338 diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
339 index 0232b5a2a2d9..588d8fbd1e6d 100644
340 --- a/arch/x86/include/asm/bootparam_utils.h
341 +++ b/arch/x86/include/asm/bootparam_utils.h
342 @@ -71,6 +71,7 @@ static void sanitize_boot_params(struct boot_params *boot_params)
343 BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
344 BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
345 BOOT_PARAM_PRESERVE(hdr),
346 + BOOT_PARAM_PRESERVE(e820_map),
347 BOOT_PARAM_PRESERVE(eddbuf),
348 };
349
350 diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
351 index f353061bba1d..81d5ea71bbe9 100644
352 --- a/arch/x86/include/asm/perf_event.h
353 +++ b/arch/x86/include/asm/perf_event.h
354 @@ -200,16 +200,20 @@ struct x86_pmu_capability {
355 #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
356 #define IBSCTL_LVT_OFFSET_MASK 0x0F
357
358 -/* ibs fetch bits/masks */
359 +/* IBS fetch bits/masks */
360 #define IBS_FETCH_RAND_EN (1ULL<<57)
361 #define IBS_FETCH_VAL (1ULL<<49)
362 #define IBS_FETCH_ENABLE (1ULL<<48)
363 #define IBS_FETCH_CNT 0xFFFF0000ULL
364 #define IBS_FETCH_MAX_CNT 0x0000FFFFULL
365
366 -/* ibs op bits/masks */
367 -/* lower 4 bits of the current count are ignored: */
368 -#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
369 +/*
370 + * IBS op bits/masks
371 + * The lower 7 bits of the current count are random bits
372 + * preloaded by hardware and ignored in software
373 + */
374 +#define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
375 +#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
376 #define IBS_OP_CNT_CTL (1ULL<<19)
377 #define IBS_OP_VAL (1ULL<<18)
378 #define IBS_OP_ENABLE (1ULL<<17)
379 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
380 index 2177c7551ff7..9db8d8758ed3 100644
381 --- a/arch/x86/include/asm/uaccess.h
382 +++ b/arch/x86/include/asm/uaccess.h
383 @@ -438,8 +438,10 @@ do { \
384 ({ \
385 int __gu_err; \
386 __inttype(*(ptr)) __gu_val; \
387 + __typeof__(ptr) __gu_ptr = (ptr); \
388 + __typeof__(size) __gu_size = (size); \
389 __uaccess_begin_nospec(); \
390 - __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
391 + __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \
392 __uaccess_end(); \
393 (x) = (__force __typeof__(*(ptr)))__gu_val; \
394 __builtin_expect(__gu_err, 0); \
395 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
396 index d34629d70421..09dd95cabfc2 100644
397 --- a/arch/x86/kernel/apic/io_apic.c
398 +++ b/arch/x86/kernel/apic/io_apic.c
399 @@ -2346,7 +2346,13 @@ unsigned int arch_dynirq_lower_bound(unsigned int from)
400 * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
401 * gsi_top if ioapic_dynirq_base hasn't been initialized yet.
402 */
403 - return ioapic_initialized ? ioapic_dynirq_base : gsi_top;
404 + if (!ioapic_initialized)
405 + return gsi_top;
406 + /*
407 + * For DT enabled machines ioapic_dynirq_base is irrelevant and not
408 + * updated. So simply return @from if ioapic_dynirq_base == 0.
409 + */
410 + return ioapic_dynirq_base ? : from;
411 }
412
413 #ifdef CONFIG_X86_32
414 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
415 index 8feb4f7e2e59..7ab13ad53a59 100644
416 --- a/arch/x86/kvm/vmx.c
417 +++ b/arch/x86/kvm/vmx.c
418 @@ -7639,6 +7639,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
419 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
420 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
421 gva_t gva = 0;
422 + struct x86_exception e;
423
424 if (!nested_vmx_check_permission(vcpu) ||
425 !nested_vmx_check_vmcs12(vcpu))
426 @@ -7665,8 +7666,10 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
427 vmx_instruction_info, true, &gva))
428 return 1;
429 /* _system ok, as nested_vmx_check_permission verified cpl=0 */
430 - kvm_write_guest_virt_system(vcpu, gva, &field_value,
431 - (is_long_mode(vcpu) ? 8 : 4), NULL);
432 + if (kvm_write_guest_virt_system(vcpu, gva, &field_value,
433 + (is_long_mode(vcpu) ? 8 : 4),
434 + NULL))
435 + kvm_inject_page_fault(vcpu, &e);
436 }
437
438 nested_vmx_succeed(vcpu);
439 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
440 index bbecbf2b1f5e..aabfc141d2f1 100644
441 --- a/arch/x86/kvm/x86.c
442 +++ b/arch/x86/kvm/x86.c
443 @@ -4620,6 +4620,13 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
444 /* kvm_write_guest_virt_system can pull in tons of pages. */
445 vcpu->arch.l1tf_flush_l1d = true;
446
447 + /*
448 + * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
449 + * is returned, but our callers are not ready for that and they blindly
450 + * call kvm_inject_page_fault. Ensure that they at least do not leak
451 + * uninitialized kernel stack memory into cr2 and error code.
452 + */
453 + memset(exception, 0, sizeof(*exception));
454 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
455 PFERR_WRITE_MASK, exception);
456 }
457 diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
458 index 31c60101a69a..7fa840170151 100644
459 --- a/drivers/atm/Kconfig
460 +++ b/drivers/atm/Kconfig
461 @@ -199,7 +199,7 @@ config ATM_NICSTAR_USE_SUNI
462 make the card work).
463
464 config ATM_NICSTAR_USE_IDT77105
465 - bool "Use IDT77015 PHY driver (25Mbps)"
466 + bool "Use IDT77105 PHY driver (25Mbps)"
467 depends on ATM_NICSTAR
468 help
469 Support for the PHYsical layer chip in ForeRunner LE25 cards. In
470 diff --git a/drivers/base/core.c b/drivers/base/core.c
471 index 901aec4bb01d..3dc483f00060 100644
472 --- a/drivers/base/core.c
473 +++ b/drivers/base/core.c
474 @@ -857,12 +857,63 @@ static inline struct kobject *get_glue_dir(struct device *dev)
475 */
476 static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
477 {
478 + unsigned int ref;
479 +
480 /* see if we live in a "glue" directory */
481 if (!live_in_glue_dir(glue_dir, dev))
482 return;
483
484 mutex_lock(&gdp_mutex);
485 - if (!kobject_has_children(glue_dir))
486 + /**
487 + * There is a race condition between removing glue directory
488 + * and adding a new device under the glue directory.
489 + *
490 + * CPU1: CPU2:
491 + *
492 + * device_add()
493 + * get_device_parent()
494 + * class_dir_create_and_add()
495 + * kobject_add_internal()
496 + * create_dir() // create glue_dir
497 + *
498 + * device_add()
499 + * get_device_parent()
500 + * kobject_get() // get glue_dir
501 + *
502 + * device_del()
503 + * cleanup_glue_dir()
504 + * kobject_del(glue_dir)
505 + *
506 + * kobject_add()
507 + * kobject_add_internal()
508 + * create_dir() // in glue_dir
509 + * sysfs_create_dir_ns()
510 + * kernfs_create_dir_ns(sd)
511 + *
512 + * sysfs_remove_dir() // glue_dir->sd=NULL
513 + * sysfs_put() // free glue_dir->sd
514 + *
515 + * // sd is freed
516 + * kernfs_new_node(sd)
517 + * kernfs_get(glue_dir)
518 + * kernfs_add_one()
519 + * kernfs_put()
520 + *
521 + * Before CPU1 remove last child device under glue dir, if CPU2 add
522 + * a new device under glue dir, the glue_dir kobject reference count
523 + * will be increase to 2 in kobject_get(k). And CPU2 has been called
524 + * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
525 + * and sysfs_put(). This result in glue_dir->sd is freed.
526 + *
527 + * Then the CPU2 will see a stale "empty" but still potentially used
528 + * glue dir around in kernfs_new_node().
529 + *
530 + * In order to avoid this happening, we also should make sure that
531 + * kernfs_node for glue_dir is released in CPU1 only when refcount
532 + * for glue_dir kobj is 1.
533 + */
534 + ref = atomic_read(&glue_dir->kref.refcount);
535 + if (!kobject_has_children(glue_dir) && !--ref)
536 kobject_del(glue_dir);
537 kobject_put(glue_dir);
538 mutex_unlock(&gdp_mutex);
539 diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
540 index 6930abef42b3..ece4f706b38f 100644
541 --- a/drivers/block/floppy.c
542 +++ b/drivers/block/floppy.c
543 @@ -3784,7 +3784,7 @@ static int compat_getdrvprm(int drive,
544 v.native_format = UDP->native_format;
545 mutex_unlock(&floppy_mutex);
546
547 - if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
548 + if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
549 return -EFAULT;
550 return 0;
551 }
552 @@ -3820,7 +3820,7 @@ static int compat_getdrvstat(int drive, bool poll,
553 v.bufblocks = UDRS->bufblocks;
554 mutex_unlock(&floppy_mutex);
555
556 - if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
557 + if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
558 return -EFAULT;
559 return 0;
560 Eintr:
561 diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
562 index fe7d9ed1d436..b0a18bc1a27f 100644
563 --- a/drivers/clk/rockchip/clk-mmc-phase.c
564 +++ b/drivers/clk/rockchip/clk-mmc-phase.c
565 @@ -59,10 +59,8 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
566 u32 delay_num = 0;
567
568 /* See the comment for rockchip_mmc_set_phase below */
569 - if (!rate) {
570 - pr_err("%s: invalid clk rate\n", __func__);
571 + if (!rate)
572 return -EINVAL;
573 - }
574
575 raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
576
577 diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
578 index ea8595d2c3d8..30f8bbe757b7 100644
579 --- a/drivers/crypto/talitos.c
580 +++ b/drivers/crypto/talitos.c
581 @@ -943,11 +943,13 @@ static void talitos_sg_unmap(struct device *dev,
582
583 static void ipsec_esp_unmap(struct device *dev,
584 struct talitos_edesc *edesc,
585 - struct aead_request *areq)
586 + struct aead_request *areq, bool encrypt)
587 {
588 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
589 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
590 unsigned int ivsize = crypto_aead_ivsize(aead);
591 + unsigned int authsize = crypto_aead_authsize(aead);
592 + unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
593
594 if (edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)
595 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
596 @@ -956,7 +958,7 @@ static void ipsec_esp_unmap(struct device *dev,
597 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
598 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
599
600 - talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
601 + talitos_sg_unmap(dev, edesc, areq->src, areq->dst, cryptlen,
602 areq->assoclen);
603
604 if (edesc->dma_len)
605 @@ -967,7 +969,7 @@ static void ipsec_esp_unmap(struct device *dev,
606 unsigned int dst_nents = edesc->dst_nents ? : 1;
607
608 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
609 - areq->assoclen + areq->cryptlen - ivsize);
610 + areq->assoclen + cryptlen - ivsize);
611 }
612 }
613
614 @@ -988,7 +990,7 @@ static void ipsec_esp_encrypt_done(struct device *dev,
615
616 edesc = container_of(desc, struct talitos_edesc, desc);
617
618 - ipsec_esp_unmap(dev, edesc, areq);
619 + ipsec_esp_unmap(dev, edesc, areq, true);
620
621 /* copy the generated ICV to dst */
622 if (edesc->icv_ool) {
623 @@ -1020,7 +1022,7 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
624
625 edesc = container_of(desc, struct talitos_edesc, desc);
626
627 - ipsec_esp_unmap(dev, edesc, req);
628 + ipsec_esp_unmap(dev, edesc, req, false);
629
630 if (!err) {
631 char icvdata[SHA512_DIGEST_SIZE];
632 @@ -1066,7 +1068,7 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
633
634 edesc = container_of(desc, struct talitos_edesc, desc);
635
636 - ipsec_esp_unmap(dev, edesc, req);
637 + ipsec_esp_unmap(dev, edesc, req, false);
638
639 /* check ICV auth status */
640 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
641 @@ -1173,6 +1175,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
642 * fill in and submit ipsec_esp descriptor
643 */
644 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
645 + bool encrypt,
646 void (*callback)(struct device *dev,
647 struct talitos_desc *desc,
648 void *context, int error))
649 @@ -1182,7 +1185,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
650 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
651 struct device *dev = ctx->dev;
652 struct talitos_desc *desc = &edesc->desc;
653 - unsigned int cryptlen = areq->cryptlen;
654 + unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
655 unsigned int ivsize = crypto_aead_ivsize(aead);
656 int tbl_off = 0;
657 int sg_count, ret;
658 @@ -1324,7 +1327,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
659
660 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
661 if (ret != -EINPROGRESS) {
662 - ipsec_esp_unmap(dev, edesc, areq);
663 + ipsec_esp_unmap(dev, edesc, areq, encrypt);
664 kfree(edesc);
665 }
666 return ret;
667 @@ -1433,9 +1436,10 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
668 unsigned int authsize = crypto_aead_authsize(authenc);
669 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
670 unsigned int ivsize = crypto_aead_ivsize(authenc);
671 + unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
672
673 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
674 - iv, areq->assoclen, areq->cryptlen,
675 + iv, areq->assoclen, cryptlen,
676 authsize, ivsize, icv_stashing,
677 areq->base.flags, encrypt);
678 }
679 @@ -1454,7 +1458,7 @@ static int aead_encrypt(struct aead_request *req)
680 /* set encrypt */
681 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
682
683 - return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
684 + return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
685 }
686
687 static int aead_decrypt(struct aead_request *req)
688 @@ -1466,14 +1470,13 @@ static int aead_decrypt(struct aead_request *req)
689 struct talitos_edesc *edesc;
690 void *icvdata;
691
692 - req->cryptlen -= authsize;
693 -
694 /* allocate extended descriptor */
695 edesc = aead_edesc_alloc(req, req->iv, 1, false);
696 if (IS_ERR(edesc))
697 return PTR_ERR(edesc);
698
699 - if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
700 + if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
701 + (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
702 ((!edesc->src_nents && !edesc->dst_nents) ||
703 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
704
705 @@ -1485,7 +1488,8 @@ static int aead_decrypt(struct aead_request *req)
706 /* reset integrity check result bits */
707 edesc->desc.hdr_lo = 0;
708
709 - return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
710 + return ipsec_esp(edesc, req, false,
711 + ipsec_esp_decrypt_hwauth_done);
712 }
713
714 /* Have to check the ICV with software */
715 @@ -1501,7 +1505,7 @@ static int aead_decrypt(struct aead_request *req)
716 sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
717 req->assoclen + req->cryptlen - authsize);
718
719 - return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
720 + return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
721 }
722
723 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
724 @@ -1528,6 +1532,18 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
725 return 0;
726 }
727
728 +static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
729 + const u8 *key, unsigned int keylen)
730 +{
731 + if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
732 + keylen == AES_KEYSIZE_256)
733 + return ablkcipher_setkey(cipher, key, keylen);
734 +
735 + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
736 +
737 + return -EINVAL;
738 +}
739 +
740 static void common_nonsnoop_unmap(struct device *dev,
741 struct talitos_edesc *edesc,
742 struct ablkcipher_request *areq)
743 @@ -1656,6 +1672,14 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
744 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
745 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
746 struct talitos_edesc *edesc;
747 + unsigned int blocksize =
748 + crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
749 +
750 + if (!areq->nbytes)
751 + return 0;
752 +
753 + if (areq->nbytes % blocksize)
754 + return -EINVAL;
755
756 /* allocate extended descriptor */
757 edesc = ablkcipher_edesc_alloc(areq, true);
758 @@ -1673,6 +1697,14 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
759 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
760 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
761 struct talitos_edesc *edesc;
762 + unsigned int blocksize =
763 + crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
764 +
765 + if (!areq->nbytes)
766 + return 0;
767 +
768 + if (areq->nbytes % blocksize)
769 + return -EINVAL;
770
771 /* allocate extended descriptor */
772 edesc = ablkcipher_edesc_alloc(areq, false);
773 @@ -2621,6 +2653,7 @@ static struct talitos_alg_template driver_algs[] = {
774 .min_keysize = AES_MIN_KEY_SIZE,
775 .max_keysize = AES_MAX_KEY_SIZE,
776 .ivsize = AES_BLOCK_SIZE,
777 + .setkey = ablkcipher_aes_setkey,
778 }
779 },
780 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
781 @@ -2631,13 +2664,13 @@ static struct talitos_alg_template driver_algs[] = {
782 .alg.crypto = {
783 .cra_name = "ctr(aes)",
784 .cra_driver_name = "ctr-aes-talitos",
785 - .cra_blocksize = AES_BLOCK_SIZE,
786 + .cra_blocksize = 1,
787 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
788 CRYPTO_ALG_ASYNC,
789 .cra_ablkcipher = {
790 .min_keysize = AES_MIN_KEY_SIZE,
791 .max_keysize = AES_MAX_KEY_SIZE,
792 - .ivsize = AES_BLOCK_SIZE,
793 + .setkey = ablkcipher_aes_setkey,
794 }
795 },
796 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
797 diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
798 index 6b16ce390dce..9f901f16bddc 100644
799 --- a/drivers/dma/omap-dma.c
800 +++ b/drivers/dma/omap-dma.c
801 @@ -1429,8 +1429,10 @@ static int omap_dma_probe(struct platform_device *pdev)
802
803 rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
804 IRQF_SHARED, "omap-dma-engine", od);
805 - if (rc)
806 + if (rc) {
807 + omap_dma_free(od);
808 return rc;
809 + }
810 }
811
812 if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
813 diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
814 index 8c3c588834d2..a7e1f6e17e3d 100644
815 --- a/drivers/dma/ti-dma-crossbar.c
816 +++ b/drivers/dma/ti-dma-crossbar.c
817 @@ -395,8 +395,10 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
818
819 ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
820 nelm * 2);
821 - if (ret)
822 + if (ret) {
823 + kfree(rsv_events);
824 return ret;
825 + }
826
827 for (i = 0; i < nelm; i++) {
828 ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
829 diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
830 index 3b0d77b2fdc5..6008a30a17d0 100644
831 --- a/drivers/gpio/gpiolib.c
832 +++ b/drivers/gpio/gpiolib.c
833 @@ -426,12 +426,23 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
834 struct linehandle_state *lh;
835 struct file *file;
836 int fd, i, count = 0, ret;
837 + u32 lflags;
838
839 if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
840 return -EFAULT;
841 if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX))
842 return -EINVAL;
843
844 + lflags = handlereq.flags;
845 +
846 + /*
847 + * Do not allow both INPUT & OUTPUT flags to be set as they are
848 + * contradictory.
849 + */
850 + if ((lflags & GPIOHANDLE_REQUEST_INPUT) &&
851 + (lflags & GPIOHANDLE_REQUEST_OUTPUT))
852 + return -EINVAL;
853 +
854 lh = kzalloc(sizeof(*lh), GFP_KERNEL);
855 if (!lh)
856 return -ENOMEM;
857 @@ -452,7 +463,6 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
858 /* Request each GPIO */
859 for (i = 0; i < handlereq.lines; i++) {
860 u32 offset = handlereq.lineoffsets[i];
861 - u32 lflags = handlereq.flags;
862 struct gpio_desc *desc;
863
864 if (offset >= gdev->ngpio) {
865 @@ -787,7 +797,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
866 }
867
868 /* This is just wrong: we don't look for events on output lines */
869 - if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
870 + if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
871 + (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
872 + (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) {
873 ret = -EINVAL;
874 goto out_free_label;
875 }
876 @@ -801,10 +813,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
877
878 if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
879 set_bit(FLAG_ACTIVE_LOW, &desc->flags);
880 - if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN)
881 - set_bit(FLAG_OPEN_DRAIN, &desc->flags);
882 - if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
883 - set_bit(FLAG_OPEN_SOURCE, &desc->flags);
884
885 ret = gpiod_direction_input(desc);
886 if (ret)
887 diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
888 index 48dfc163233e..286587607931 100644
889 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
890 +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
891 @@ -423,12 +423,15 @@ static int mtk_drm_probe(struct platform_device *pdev)
892 comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
893 if (!comp) {
894 ret = -ENOMEM;
895 + of_node_put(node);
896 goto err_node;
897 }
898
899 ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
900 - if (ret)
901 + if (ret) {
902 + of_node_put(node);
903 goto err_node;
904 + }
905
906 private->ddp_comp[comp_id] = comp;
907 }
908 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
909 index c1233d0288a0..dd7880de7e4e 100644
910 --- a/drivers/iommu/amd_iommu.c
911 +++ b/drivers/iommu/amd_iommu.c
912 @@ -1321,18 +1321,21 @@ static void domain_flush_devices(struct protection_domain *domain)
913 * another level increases the size of the address space by 9 bits to a size up
914 * to 64 bits.
915 */
916 -static bool increase_address_space(struct protection_domain *domain,
917 +static void increase_address_space(struct protection_domain *domain,
918 gfp_t gfp)
919 {
920 + unsigned long flags;
921 u64 *pte;
922
923 - if (domain->mode == PAGE_MODE_6_LEVEL)
924 + spin_lock_irqsave(&domain->lock, flags);
925 +
926 + if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
927 /* address space already 64 bit large */
928 - return false;
929 + goto out;
930
931 pte = (void *)get_zeroed_page(gfp);
932 if (!pte)
933 - return false;
934 + goto out;
935
936 *pte = PM_LEVEL_PDE(domain->mode,
937 virt_to_phys(domain->pt_root));
938 @@ -1340,7 +1343,10 @@ static bool increase_address_space(struct protection_domain *domain,
939 domain->mode += 1;
940 domain->updated = true;
941
942 - return true;
943 +out:
944 + spin_unlock_irqrestore(&domain->lock, flags);
945 +
946 + return;
947 }
948
949 static u64 *alloc_pte(struct protection_domain *domain,
950 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
951 index 6a2df3297e77..691ad069444d 100644
952 --- a/drivers/isdn/capi/capi.c
953 +++ b/drivers/isdn/capi/capi.c
954 @@ -687,6 +687,9 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
955 if (!cdev->ap.applid)
956 return -ENODEV;
957
958 + if (count < CAPIMSG_BASELEN)
959 + return -EINVAL;
960 +
961 skb = alloc_skb(count, GFP_USER);
962 if (!skb)
963 return -ENOMEM;
964 @@ -697,7 +700,8 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
965 }
966 mlen = CAPIMSG_LEN(skb->data);
967 if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
968 - if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
969 + if (count < CAPI_DATA_B3_REQ_LEN ||
970 + (size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
971 kfree_skb(skb);
972 return -EINVAL;
973 }
974 @@ -710,6 +714,10 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
975 CAPIMSG_SETAPPID(skb->data, cdev->ap.applid);
976
977 if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) {
978 + if (count < CAPI_DISCONNECT_B3_RESP_LEN) {
979 + kfree_skb(skb);
980 + return -EINVAL;
981 + }
982 mutex_lock(&cdev->lock);
983 capincci_free(cdev, CAPIMSG_NCCI(skb->data));
984 mutex_unlock(&cdev->lock);
985 diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
986 index 4706628a3ed5..10bccce22858 100644
987 --- a/drivers/media/usb/dvb-usb/technisat-usb2.c
988 +++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
989 @@ -612,10 +612,9 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
990 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
991 {
992 struct technisat_usb2_state *state = d->priv;
993 - u8 *buf = state->buf;
994 - u8 *b;
995 - int ret;
996 struct ir_raw_event ev;
997 + u8 *buf = state->buf;
998 + int i, ret;
999
1000 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
1001 buf[1] = 0x08;
1002 @@ -651,26 +650,25 @@ unlock:
1003 return 0; /* no key pressed */
1004
1005 /* decoding */
1006 - b = buf+1;
1007
1008 #if 0
1009 deb_rc("RC: %d ", ret);
1010 - debug_dump(b, ret, deb_rc);
1011 + debug_dump(buf + 1, ret, deb_rc);
1012 #endif
1013
1014 ev.pulse = 0;
1015 - while (1) {
1016 - ev.pulse = !ev.pulse;
1017 - ev.duration = (*b * FIRMWARE_CLOCK_DIVISOR * FIRMWARE_CLOCK_TICK) / 1000;
1018 - ir_raw_event_store(d->rc_dev, &ev);
1019 -
1020 - b++;
1021 - if (*b == 0xff) {
1022 + for (i = 1; i < ARRAY_SIZE(state->buf); i++) {
1023 + if (buf[i] == 0xff) {
1024 ev.pulse = 0;
1025 ev.duration = 888888*2;
1026 ir_raw_event_store(d->rc_dev, &ev);
1027 break;
1028 }
1029 +
1030 + ev.pulse = !ev.pulse;
1031 + ev.duration = (buf[i] * FIRMWARE_CLOCK_DIVISOR *
1032 + FIRMWARE_CLOCK_TICK) / 1000;
1033 + ir_raw_event_store(d->rc_dev, &ev);
1034 }
1035
1036 ir_raw_event_handle(d->rc_dev);
1037 diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
1038 index ee88ae83230c..185c8079d0f9 100644
1039 --- a/drivers/media/usb/tm6000/tm6000-dvb.c
1040 +++ b/drivers/media/usb/tm6000/tm6000-dvb.c
1041 @@ -111,6 +111,7 @@ static void tm6000_urb_received(struct urb *urb)
1042 printk(KERN_ERR "tm6000: error %s\n", __func__);
1043 kfree(urb->transfer_buffer);
1044 usb_free_urb(urb);
1045 + dev->dvb->bulk_urb = NULL;
1046 }
1047 }
1048 }
1049 @@ -141,6 +142,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
1050 dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
1051 if (dvb->bulk_urb->transfer_buffer == NULL) {
1052 usb_free_urb(dvb->bulk_urb);
1053 + dvb->bulk_urb = NULL;
1054 printk(KERN_ERR "tm6000: couldn't allocate transfer buffer!\n");
1055 return -ENOMEM;
1056 }
1057 @@ -168,6 +170,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
1058
1059 kfree(dvb->bulk_urb->transfer_buffer);
1060 usb_free_urb(dvb->bulk_urb);
1061 + dvb->bulk_urb = NULL;
1062 return ret;
1063 }
1064
1065 diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
1066 index 5223a2182ee4..ca95ae00215e 100644
1067 --- a/drivers/mtd/nand/mtk_nand.c
1068 +++ b/drivers/mtd/nand/mtk_nand.c
1069 @@ -810,19 +810,21 @@ static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1070 return ret & NAND_STATUS_FAIL ? -EIO : 0;
1071 }
1072
1073 -static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
1074 +static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 start,
1075 + u32 sectors)
1076 {
1077 struct nand_chip *chip = mtd_to_nand(mtd);
1078 struct mtk_nfc *nfc = nand_get_controller_data(chip);
1079 struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1080 struct mtk_ecc_stats stats;
1081 + u32 reg_size = mtk_nand->fdm.reg_size;
1082 int rc, i;
1083
1084 rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
1085 if (rc) {
1086 memset(buf, 0xff, sectors * chip->ecc.size);
1087 for (i = 0; i < sectors; i++)
1088 - memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
1089 + memset(oob_ptr(chip, start + i), 0xff, reg_size);
1090 return 0;
1091 }
1092
1093 @@ -842,7 +844,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1094 u32 spare = mtk_nand->spare_per_sector;
1095 u32 column, sectors, start, end, reg;
1096 dma_addr_t addr;
1097 - int bitflips;
1098 + int bitflips = 0;
1099 size_t len;
1100 u8 *buf;
1101 int rc;
1102 @@ -910,14 +912,11 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1103 if (rc < 0) {
1104 dev_err(nfc->dev, "subpage done timeout\n");
1105 bitflips = -EIO;
1106 - } else {
1107 - bitflips = 0;
1108 - if (!raw) {
1109 - rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
1110 - bitflips = rc < 0 ? -ETIMEDOUT :
1111 - mtk_nfc_update_ecc_stats(mtd, buf, sectors);
1112 - mtk_nfc_read_fdm(chip, start, sectors);
1113 - }
1114 + } else if (!raw) {
1115 + rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
1116 + bitflips = rc < 0 ? -ETIMEDOUT :
1117 + mtk_nfc_update_ecc_stats(mtd, buf, start, sectors);
1118 + mtk_nfc_read_fdm(chip, start, sectors);
1119 }
1120
1121 dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
1122 diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
1123 index 59dbecd19c93..49f692907a30 100644
1124 --- a/drivers/net/ethernet/marvell/sky2.c
1125 +++ b/drivers/net/ethernet/marvell/sky2.c
1126 @@ -4946,6 +4946,13 @@ static const struct dmi_system_id msi_blacklist[] = {
1127 DMI_MATCH(DMI_BOARD_NAME, "P6T"),
1128 },
1129 },
1130 + {
1131 + .ident = "ASUS P6X",
1132 + .matches = {
1133 + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1134 + DMI_MATCH(DMI_BOARD_NAME, "P6X"),
1135 + },
1136 + },
1137 {}
1138 };
1139
1140 diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
1141 index a769196628d9..708117fc6f73 100644
1142 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c
1143 +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
1144 @@ -958,7 +958,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
1145 &drv_version);
1146 if (rc) {
1147 DP_NOTICE(cdev, "Failed sending drv version command\n");
1148 - return rc;
1149 + goto err4;
1150 }
1151 }
1152
1153 @@ -966,6 +966,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
1154
1155 return 0;
1156
1157 +err4:
1158 + qed_ll2_dealloc_if(cdev);
1159 err3:
1160 qed_hw_stop(cdev);
1161 err2:
1162 diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
1163 index c2bd5378ffda..3527962f0bda 100644
1164 --- a/drivers/net/ethernet/seeq/sgiseeq.c
1165 +++ b/drivers/net/ethernet/seeq/sgiseeq.c
1166 @@ -792,15 +792,16 @@ static int sgiseeq_probe(struct platform_device *pdev)
1167 printk(KERN_ERR "Sgiseeq: Cannot register net device, "
1168 "aborting.\n");
1169 err = -ENODEV;
1170 - goto err_out_free_page;
1171 + goto err_out_free_attrs;
1172 }
1173
1174 printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
1175
1176 return 0;
1177
1178 -err_out_free_page:
1179 - free_page((unsigned long) sp->srings);
1180 +err_out_free_attrs:
1181 + dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
1182 + sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
1183 err_out_free_dev:
1184 free_netdev(dev);
1185
1186 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1187 index 36136a147867..17be1f6a813f 100644
1188 --- a/drivers/net/tun.c
1189 +++ b/drivers/net/tun.c
1190 @@ -627,7 +627,8 @@ static void tun_detach_all(struct net_device *dev)
1191 module_put(THIS_MODULE);
1192 }
1193
1194 -static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter)
1195 +static int tun_attach(struct tun_struct *tun, struct file *file,
1196 + bool skip_filter, bool publish_tun)
1197 {
1198 struct tun_file *tfile = file->private_data;
1199 struct net_device *dev = tun->dev;
1200 @@ -669,7 +670,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
1201
1202 tfile->queue_index = tun->numqueues;
1203 tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
1204 - rcu_assign_pointer(tfile->tun, tun);
1205 + if (publish_tun)
1206 + rcu_assign_pointer(tfile->tun, tun);
1207 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
1208 tun->numqueues++;
1209
1210 @@ -1751,7 +1753,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1211 if (err < 0)
1212 return err;
1213
1214 - err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER);
1215 + err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, true);
1216 if (err < 0)
1217 return err;
1218
1219 @@ -1839,13 +1841,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1220 NETIF_F_HW_VLAN_STAG_TX);
1221
1222 INIT_LIST_HEAD(&tun->disabled);
1223 - err = tun_attach(tun, file, false);
1224 + err = tun_attach(tun, file, false, false);
1225 if (err < 0)
1226 goto err_free_flow;
1227
1228 err = register_netdevice(tun->dev);
1229 if (err < 0)
1230 goto err_detach;
1231 + /* free_netdev() won't check refcnt, to aovid race
1232 + * with dev_put() we need publish tun after registration.
1233 + */
1234 + rcu_assign_pointer(tfile->tun, tun);
1235 }
1236
1237 netif_carrier_on(tun->dev);
1238 @@ -1989,7 +1995,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
1239 ret = security_tun_dev_attach_queue(tun->security);
1240 if (ret < 0)
1241 goto unlock;
1242 - ret = tun_attach(tun, file, false);
1243 + ret = tun_attach(tun, file, false, true);
1244 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
1245 tun = rtnl_dereference(tfile->tun);
1246 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
1247 diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
1248 index 99424c87b464..8f03cc52ddda 100644
1249 --- a/drivers/net/usb/cdc_ether.c
1250 +++ b/drivers/net/usb/cdc_ether.c
1251 @@ -212,9 +212,16 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
1252 goto bad_desc;
1253 }
1254 skip:
1255 - if ( rndis &&
1256 - header.usb_cdc_acm_descriptor &&
1257 - header.usb_cdc_acm_descriptor->bmCapabilities) {
1258 + /* Communcation class functions with bmCapabilities are not
1259 + * RNDIS. But some Wireless class RNDIS functions use
1260 + * bmCapabilities for their own purpose. The failsafe is
1261 + * therefore applied only to Communication class RNDIS
1262 + * functions. The rndis test is redundant, but a cheap
1263 + * optimization.
1264 + */
1265 + if (rndis && is_rndis(&intf->cur_altsetting->desc) &&
1266 + header.usb_cdc_acm_descriptor &&
1267 + header.usb_cdc_acm_descriptor->bmCapabilities) {
1268 dev_dbg(&intf->dev,
1269 "ACM capabilities %02x, not really RNDIS?\n",
1270 header.usb_cdc_acm_descriptor->bmCapabilities);
1271 diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
1272 index 02e29562d254..15dc70c11857 100644
1273 --- a/drivers/net/usb/r8152.c
1274 +++ b/drivers/net/usb/r8152.c
1275 @@ -689,8 +689,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
1276 ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
1277 RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
1278 value, index, tmp, size, 500);
1279 + if (ret < 0)
1280 + memset(data, 0xff, size);
1281 + else
1282 + memcpy(data, tmp, size);
1283
1284 - memcpy(data, tmp, size);
1285 kfree(tmp);
1286
1287 return ret;
1288 diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
1289 index 0f977dc556ca..c67e08fa1aaf 100644
1290 --- a/drivers/net/wireless/marvell/mwifiex/ie.c
1291 +++ b/drivers/net/wireless/marvell/mwifiex/ie.c
1292 @@ -240,6 +240,9 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
1293 }
1294
1295 vs_ie = (struct ieee_types_header *)vendor_ie;
1296 + if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 >
1297 + IEEE_MAX_IE_SIZE)
1298 + return -EINVAL;
1299 memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
1300 vs_ie, vs_ie->len + 2);
1301 le16_add_cpu(&ie->ie_length, vs_ie->len + 2);
1302 diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
1303 index a7e9f544f219..f2ef1464e20c 100644
1304 --- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
1305 +++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
1306 @@ -287,6 +287,8 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
1307
1308 rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
1309 if (rate_ie) {
1310 + if (rate_ie->len > MWIFIEX_SUPPORTED_RATES)
1311 + return;
1312 memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
1313 rate_len = rate_ie->len;
1314 }
1315 @@ -294,8 +296,11 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
1316 rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
1317 params->beacon.tail,
1318 params->beacon.tail_len);
1319 - if (rate_ie)
1320 + if (rate_ie) {
1321 + if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len)
1322 + return;
1323 memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
1324 + }
1325
1326 return;
1327 }
1328 @@ -413,6 +418,8 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
1329 params->beacon.tail_len);
1330 if (vendor_ie) {
1331 wmm_ie = (struct ieee_types_header *)vendor_ie;
1332 + if (*(vendor_ie + 1) > sizeof(struct mwifiex_types_wmm_info))
1333 + return;
1334 memcpy(&bss_cfg->wmm_info, wmm_ie + 1,
1335 sizeof(bss_cfg->wmm_info));
1336 priv->wmm_enabled = 1;
1337 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
1338 index 14ceeaaa7fe5..c31c564b8eab 100644
1339 --- a/drivers/net/xen-netfront.c
1340 +++ b/drivers/net/xen-netfront.c
1341 @@ -907,7 +907,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
1342 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1343 }
1344 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
1345 - queue->rx.rsp_cons = ++cons;
1346 + queue->rx.rsp_cons = ++cons + skb_queue_len(list);
1347 kfree_skb(nskb);
1348 return ~0U;
1349 }
1350 diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
1351 index bb2f79933b17..9ca24e4d5d49 100644
1352 --- a/drivers/nvmem/core.c
1353 +++ b/drivers/nvmem/core.c
1354 @@ -401,10 +401,17 @@ static int nvmem_setup_compat(struct nvmem_device *nvmem,
1355 if (!config->base_dev)
1356 return -EINVAL;
1357
1358 - if (nvmem->read_only)
1359 - nvmem->eeprom = bin_attr_ro_root_nvmem;
1360 - else
1361 - nvmem->eeprom = bin_attr_rw_root_nvmem;
1362 + if (nvmem->read_only) {
1363 + if (config->root_only)
1364 + nvmem->eeprom = bin_attr_ro_root_nvmem;
1365 + else
1366 + nvmem->eeprom = bin_attr_ro_nvmem;
1367 + } else {
1368 + if (config->root_only)
1369 + nvmem->eeprom = bin_attr_rw_root_nvmem;
1370 + else
1371 + nvmem->eeprom = bin_attr_rw_nvmem;
1372 + }
1373 nvmem->eeprom.attr.name = "eeprom";
1374 nvmem->eeprom.size = nvmem->size;
1375 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1376 diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
1377 index ef688aadb032..578242239daa 100644
1378 --- a/drivers/tty/serial/atmel_serial.c
1379 +++ b/drivers/tty/serial/atmel_serial.c
1380 @@ -1279,7 +1279,6 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
1381
1382 atmel_port->hd_start_rx = false;
1383 atmel_start_rx(port);
1384 - return;
1385 }
1386
1387 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
1388 diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
1389 index 747560feb63e..2e34239ac8a9 100644
1390 --- a/drivers/tty/serial/sprd_serial.c
1391 +++ b/drivers/tty/serial/sprd_serial.c
1392 @@ -240,7 +240,7 @@ static inline void sprd_rx(struct uart_port *port)
1393
1394 if (lsr & (SPRD_LSR_BI | SPRD_LSR_PE |
1395 SPRD_LSR_FE | SPRD_LSR_OE))
1396 - if (handle_lsr_errors(port, &lsr, &flag))
1397 + if (handle_lsr_errors(port, &flag, &lsr))
1398 continue;
1399 if (uart_handle_sysrq_char(port, ch))
1400 continue;
1401 diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
1402 index eb12eea13770..94ec2dc27748 100644
1403 --- a/drivers/usb/core/config.c
1404 +++ b/drivers/usb/core/config.c
1405 @@ -920,7 +920,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
1406 struct usb_bos_descriptor *bos;
1407 struct usb_dev_cap_header *cap;
1408 struct usb_ssp_cap_descriptor *ssp_cap;
1409 - unsigned char *buffer;
1410 + unsigned char *buffer, *buffer0;
1411 int length, total_len, num, i, ssac;
1412 __u8 cap_type;
1413 int ret;
1414 @@ -965,10 +965,12 @@ int usb_get_bos_descriptor(struct usb_device *dev)
1415 ret = -ENOMSG;
1416 goto err;
1417 }
1418 +
1419 + buffer0 = buffer;
1420 total_len -= length;
1421 + buffer += length;
1422
1423 for (i = 0; i < num; i++) {
1424 - buffer += length;
1425 cap = (struct usb_dev_cap_header *)buffer;
1426
1427 if (total_len < sizeof(*cap) || total_len < cap->bLength) {
1428 @@ -982,8 +984,6 @@ int usb_get_bos_descriptor(struct usb_device *dev)
1429 break;
1430 }
1431
1432 - total_len -= length;
1433 -
1434 if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
1435 dev_warn(ddev, "descriptor type invalid, skip\n");
1436 continue;
1437 @@ -1018,7 +1018,11 @@ int usb_get_bos_descriptor(struct usb_device *dev)
1438 default:
1439 break;
1440 }
1441 +
1442 + total_len -= length;
1443 + buffer += length;
1444 }
1445 + dev->bos->desc->wTotalLength = cpu_to_le16(buffer - buffer0);
1446
1447 return 0;
1448
1449 diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
1450 index 02bb7b52cb36..65e1eaa5df84 100644
1451 --- a/fs/btrfs/tree-log.c
1452 +++ b/fs/btrfs/tree-log.c
1453 @@ -4846,7 +4846,7 @@ again:
1454 err = btrfs_log_inode(trans, root, other_inode,
1455 LOG_OTHER_INODE,
1456 0, LLONG_MAX, ctx);
1457 - iput(other_inode);
1458 + btrfs_add_delayed_iput(other_inode);
1459 if (err)
1460 goto out_unlock;
1461 else
1462 @@ -5264,7 +5264,7 @@ process_leaf:
1463 }
1464
1465 if (btrfs_inode_in_log(di_inode, trans->transid)) {
1466 - iput(di_inode);
1467 + btrfs_add_delayed_iput(di_inode);
1468 break;
1469 }
1470
1471 @@ -5276,7 +5276,7 @@ process_leaf:
1472 if (!ret &&
1473 btrfs_must_commit_transaction(trans, di_inode))
1474 ret = 1;
1475 - iput(di_inode);
1476 + btrfs_add_delayed_iput(di_inode);
1477 if (ret)
1478 goto next_dir_inode;
1479 if (ctx->log_new_dentries) {
1480 @@ -5422,7 +5422,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
1481 if (!ret && ctx && ctx->log_new_dentries)
1482 ret = log_new_dir_dentries(trans, root,
1483 dir_inode, ctx);
1484 - iput(dir_inode);
1485 + btrfs_add_delayed_iput(dir_inode);
1486 if (ret)
1487 goto out;
1488 }
1489 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
1490 index f291ed0c155d..e43ba6db2bdd 100644
1491 --- a/fs/cifs/connect.c
1492 +++ b/fs/cifs/connect.c
1493 @@ -2447,6 +2447,7 @@ static int
1494 cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
1495 {
1496 int rc = 0;
1497 + int is_domain = 0;
1498 const char *delim, *payload;
1499 char *desc;
1500 ssize_t len;
1501 @@ -2494,6 +2495,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
1502 rc = PTR_ERR(key);
1503 goto out_err;
1504 }
1505 + is_domain = 1;
1506 }
1507
1508 down_read(&key->sem);
1509 @@ -2551,6 +2553,26 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
1510 goto out_key_put;
1511 }
1512
1513 + /*
1514 + * If we have a domain key then we must set the domainName in the
1515 + * for the request.
1516 + */
1517 + if (is_domain && ses->domainName) {
1518 + vol->domainname = kstrndup(ses->domainName,
1519 + strlen(ses->domainName),
1520 + GFP_KERNEL);
1521 + if (!vol->domainname) {
1522 + cifs_dbg(FYI, "Unable to allocate %zd bytes for "
1523 + "domain\n", len);
1524 + rc = -ENOMEM;
1525 + kfree(vol->username);
1526 + vol->username = NULL;
1527 + kzfree(vol->password);
1528 + vol->password = NULL;
1529 + goto out_key_put;
1530 + }
1531 + }
1532 +
1533 out_key_put:
1534 up_read(&key->sem);
1535 key_put(key);
1536 diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
1537 index 8a0c301b0c69..7138383382ff 100644
1538 --- a/fs/nfs/nfs4file.c
1539 +++ b/fs/nfs/nfs4file.c
1540 @@ -73,13 +73,13 @@ nfs4_file_open(struct inode *inode, struct file *filp)
1541 if (IS_ERR(inode)) {
1542 err = PTR_ERR(inode);
1543 switch (err) {
1544 - case -EPERM:
1545 - case -EACCES:
1546 - case -EDQUOT:
1547 - case -ENOSPC:
1548 - case -EROFS:
1549 - goto out_put_ctx;
1550 default:
1551 + goto out_put_ctx;
1552 + case -ENOENT:
1553 + case -ESTALE:
1554 + case -EISDIR:
1555 + case -ENOTDIR:
1556 + case -ELOOP:
1557 goto out_drop;
1558 }
1559 }
1560 diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
1561 index fad4d5188aaf..b6e25126a0b0 100644
1562 --- a/fs/nfs/pagelist.c
1563 +++ b/fs/nfs/pagelist.c
1564 @@ -562,7 +562,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
1565 }
1566
1567 hdr->res.fattr = &hdr->fattr;
1568 - hdr->res.count = count;
1569 + hdr->res.count = 0;
1570 hdr->res.eof = 0;
1571 hdr->res.verf = &hdr->verf;
1572 nfs_fattr_init(&hdr->fattr);
1573 diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
1574 index b7bca8303989..06e72229be12 100644
1575 --- a/fs/nfs/proc.c
1576 +++ b/fs/nfs/proc.c
1577 @@ -588,7 +588,8 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
1578 /* Emulate the eof flag, which isn't normally needed in NFSv2
1579 * as it is guaranteed to always return the file attributes
1580 */
1581 - if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
1582 + if ((hdr->res.count == 0 && hdr->args.count > 0) ||
1583 + hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
1584 hdr->res.eof = 1;
1585 }
1586 return 0;
1587 @@ -609,8 +610,10 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task,
1588
1589 static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
1590 {
1591 - if (task->tk_status >= 0)
1592 + if (task->tk_status >= 0) {
1593 + hdr->res.count = hdr->args.count;
1594 nfs_writeback_update_inode(hdr);
1595 + }
1596 return 0;
1597 }
1598
1599 diff --git a/include/uapi/linux/isdn/capicmd.h b/include/uapi/linux/isdn/capicmd.h
1600 index b58635f722da..ae1e1fba2e13 100644
1601 --- a/include/uapi/linux/isdn/capicmd.h
1602 +++ b/include/uapi/linux/isdn/capicmd.h
1603 @@ -15,6 +15,7 @@
1604 #define CAPI_MSG_BASELEN 8
1605 #define CAPI_DATA_B3_REQ_LEN (CAPI_MSG_BASELEN+4+4+2+2+2)
1606 #define CAPI_DATA_B3_RESP_LEN (CAPI_MSG_BASELEN+4+2)
1607 +#define CAPI_DISCONNECT_B3_RESP_LEN (CAPI_MSG_BASELEN+4)
1608
1609 /*----- CAPI commands -----*/
1610 #define CAPI_ALERT 0x01
1611 diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
1612 index b86886beee4f..867fb0ed4aa6 100644
1613 --- a/kernel/irq/resend.c
1614 +++ b/kernel/irq/resend.c
1615 @@ -37,6 +37,8 @@ static void resend_irqs(unsigned long arg)
1616 irq = find_first_bit(irqs_resend, nr_irqs);
1617 clear_bit(irq, irqs_resend);
1618 desc = irq_to_desc(irq);
1619 + if (!desc)
1620 + continue;
1621 local_irq_disable();
1622 desc->handle_irq(desc);
1623 local_irq_enable();
1624 diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
1625 index 1aeeadca620c..f435435b447e 100644
1626 --- a/net/batman-adv/bat_v_ogm.c
1627 +++ b/net/batman-adv/bat_v_ogm.c
1628 @@ -618,17 +618,23 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
1629 * batadv_v_ogm_aggr_packet - checks if there is another OGM aggregated
1630 * @buff_pos: current position in the skb
1631 * @packet_len: total length of the skb
1632 - * @tvlv_len: tvlv length of the previously considered OGM
1633 + * @ogm2_packet: potential OGM2 in buffer
1634 *
1635 * Return: true if there is enough space for another OGM, false otherwise.
1636 */
1637 -static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
1638 - __be16 tvlv_len)
1639 +static bool
1640 +batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
1641 + const struct batadv_ogm2_packet *ogm2_packet)
1642 {
1643 int next_buff_pos = 0;
1644
1645 - next_buff_pos += buff_pos + BATADV_OGM2_HLEN;
1646 - next_buff_pos += ntohs(tvlv_len);
1647 + /* check if there is enough space for the header */
1648 + next_buff_pos += buff_pos + sizeof(*ogm2_packet);
1649 + if (next_buff_pos > packet_len)
1650 + return false;
1651 +
1652 + /* check if there is enough space for the optional TVLV */
1653 + next_buff_pos += ntohs(ogm2_packet->tvlv_len);
1654
1655 return (next_buff_pos <= packet_len) &&
1656 (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
1657 @@ -775,7 +781,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
1658 ogm_packet = (struct batadv_ogm2_packet *)skb->data;
1659
1660 while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
1661 - ogm_packet->tvlv_len)) {
1662 + ogm_packet)) {
1663 batadv_v_ogm_process(skb, ogm_offset, if_incoming);
1664
1665 ogm_offset += BATADV_OGM2_HLEN;
1666 diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
1667 index 6406010e155b..7007683973b4 100644
1668 --- a/net/bridge/br_mdb.c
1669 +++ b/net/bridge/br_mdb.c
1670 @@ -372,7 +372,7 @@ static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
1671 struct nlmsghdr *nlh;
1672 struct nlattr *nest;
1673
1674 - nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
1675 + nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
1676 if (!nlh)
1677 return -EMSGSIZE;
1678
1679 diff --git a/net/core/dev.c b/net/core/dev.c
1680 index 08bcbce16e12..547b4daae5ca 100644
1681 --- a/net/core/dev.c
1682 +++ b/net/core/dev.c
1683 @@ -7353,6 +7353,8 @@ int register_netdevice(struct net_device *dev)
1684 ret = notifier_to_errno(ret);
1685 if (ret) {
1686 rollback_registered(dev);
1687 + rcu_barrier();
1688 +
1689 dev->reg_state = NETREG_UNREGISTERED;
1690 }
1691 /*
1692 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1693 index 4a71d78d0c6a..7164569c1ec8 100644
1694 --- a/net/core/skbuff.c
1695 +++ b/net/core/skbuff.c
1696 @@ -3094,6 +3094,25 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
1697 int pos;
1698 int dummy;
1699
1700 + if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
1701 + (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
1702 + /* gso_size is untrusted, and we have a frag_list with a linear
1703 + * non head_frag head.
1704 + *
1705 + * (we assume checking the first list_skb member suffices;
1706 + * i.e if either of the list_skb members have non head_frag
1707 + * head, then the first one has too).
1708 + *
1709 + * If head_skb's headlen does not fit requested gso_size, it
1710 + * means that the frag_list members do NOT terminate on exact
1711 + * gso_size boundaries. Hence we cannot perform skb_frag_t page
1712 + * sharing. Therefore we must fallback to copying the frag_list
1713 + * skbs; we do so by disabling SG.
1714 + */
1715 + if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
1716 + features &= ~NETIF_F_SG;
1717 + }
1718 +
1719 __skb_push(head_skb, doffset);
1720 proto = skb_network_protocol(head_skb, &dummy);
1721 if (unlikely(!proto))
1722 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
1723 index e2e58bc42ba4..84ff36a6d4e3 100644
1724 --- a/net/ipv4/tcp_input.c
1725 +++ b/net/ipv4/tcp_input.c
1726 @@ -247,7 +247,7 @@ static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb)
1727
1728 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
1729 {
1730 - tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
1731 + tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
1732 }
1733
1734 static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
1735 diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
1736 index 982868193dbb..e209ae19fe78 100644
1737 --- a/net/ipv6/ping.c
1738 +++ b/net/ipv6/ping.c
1739 @@ -239,7 +239,7 @@ static int __net_init ping_v6_proc_init_net(struct net *net)
1740 return ping_proc_register(net, &ping_v6_seq_afinfo);
1741 }
1742
1743 -static void __net_init ping_v6_proc_exit_net(struct net *net)
1744 +static void __net_exit ping_v6_proc_exit_net(struct net *net)
1745 {
1746 return ping_proc_unregister(net, &ping_v6_seq_afinfo);
1747 }
1748 diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
1749 index e3ed20060878..562b54524249 100644
1750 --- a/net/netfilter/nf_conntrack_ftp.c
1751 +++ b/net/netfilter/nf_conntrack_ftp.c
1752 @@ -323,7 +323,7 @@ static int find_pattern(const char *data, size_t dlen,
1753 i++;
1754 }
1755
1756 - pr_debug("Skipped up to `%c'!\n", skip);
1757 + pr_debug("Skipped up to 0x%hhx delimiter!\n", skip);
1758
1759 *numoff = i;
1760 *numlen = getnum(data + i, dlen - i, cmd, term, numoff);
1761 diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
1762 index 9016c8baf2aa..88ce8edf1261 100644
1763 --- a/net/sched/sch_generic.c
1764 +++ b/net/sched/sch_generic.c
1765 @@ -699,7 +699,11 @@ static void qdisc_rcu_free(struct rcu_head *head)
1766
1767 void qdisc_destroy(struct Qdisc *qdisc)
1768 {
1769 - const struct Qdisc_ops *ops = qdisc->ops;
1770 + const struct Qdisc_ops *ops;
1771 +
1772 + if (!qdisc)
1773 + return;
1774 + ops = qdisc->ops;
1775
1776 if (qdisc->flags & TCQ_F_BUILTIN ||
1777 !atomic_dec_and_test(&qdisc->refcnt))
1778 diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
1779 index f4b2d69973c3..fe32239253a6 100644
1780 --- a/net/sched/sch_hhf.c
1781 +++ b/net/sched/sch_hhf.c
1782 @@ -543,7 +543,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
1783 new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
1784
1785 non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
1786 - if (non_hh_quantum > INT_MAX)
1787 + if (non_hh_quantum == 0 || non_hh_quantum > INT_MAX)
1788 return -EINVAL;
1789
1790 sch_tree_lock(sch);
1791 diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
1792 index d6af93a24aa0..833283c8fe11 100644
1793 --- a/net/sctp/protocol.c
1794 +++ b/net/sctp/protocol.c
1795 @@ -1336,7 +1336,7 @@ static int __net_init sctp_ctrlsock_init(struct net *net)
1796 return status;
1797 }
1798
1799 -static void __net_init sctp_ctrlsock_exit(struct net *net)
1800 +static void __net_exit sctp_ctrlsock_exit(struct net *net)
1801 {
1802 /* Free the control endpoint. */
1803 inet_ctl_sock_destroy(net->sctp.ctl_sock);
1804 diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
1805 index b1ead1776e81..8b4cf78987e4 100644
1806 --- a/net/sctp/sm_sideeffect.c
1807 +++ b/net/sctp/sm_sideeffect.c
1808 @@ -509,7 +509,7 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
1809 if (net->sctp.pf_enable &&
1810 (transport->state == SCTP_ACTIVE) &&
1811 (transport->error_count < transport->pathmaxrxt) &&
1812 - (transport->error_count > asoc->pf_retrans)) {
1813 + (transport->error_count > transport->pf_retrans)) {
1814
1815 sctp_assoc_control_transport(asoc, transport,
1816 SCTP_TRANSPORT_PF,
1817 diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
1818 index 23f8899e0f8c..7ebcaff8c1c4 100644
1819 --- a/net/tipc/name_distr.c
1820 +++ b/net/tipc/name_distr.c
1821 @@ -224,7 +224,8 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
1822 publ->key);
1823 }
1824
1825 - kfree_rcu(p, rcu);
1826 + if (p)
1827 + kfree_rcu(p, rcu);
1828 }
1829
1830 /**
1831 diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
1832 index f60baeb338e5..b47445022d5c 100644
1833 --- a/security/keys/request_key_auth.c
1834 +++ b/security/keys/request_key_auth.c
1835 @@ -71,6 +71,9 @@ static void request_key_auth_describe(const struct key *key,
1836 {
1837 struct request_key_auth *rka = key->payload.data[0];
1838
1839 + if (!rka)
1840 + return;
1841 +
1842 seq_puts(m, "key:");
1843 seq_puts(m, key->description);
1844 if (key_is_positive(key))
1845 @@ -88,6 +91,9 @@ static long request_key_auth_read(const struct key *key,
1846 size_t datalen;
1847 long ret;
1848
1849 + if (!rka)
1850 + return -EKEYREVOKED;
1851 +
1852 datalen = rka->callout_len;
1853 ret = datalen;
1854
1855 diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
1856 index b4c5d96e54c1..7c2c8e74aa9a 100644
1857 --- a/tools/power/x86/turbostat/turbostat.c
1858 +++ b/tools/power/x86/turbostat/turbostat.c
1859 @@ -3593,7 +3593,7 @@ int initialize_counters(int cpu_id)
1860
1861 void allocate_output_buffer()
1862 {
1863 - output_buffer = calloc(1, (1 + topo.num_cpus) * 1024);
1864 + output_buffer = calloc(1, (1 + topo.num_cpus) * 2048);
1865 outp = output_buffer;
1866 if (outp == NULL)
1867 err(-1, "calloc output buffer");
1868 diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
1869 index 571c1ce37d15..5c1efb869df2 100644
1870 --- a/virt/kvm/coalesced_mmio.c
1871 +++ b/virt/kvm/coalesced_mmio.c
1872 @@ -39,7 +39,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
1873 return 1;
1874 }
1875
1876 -static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
1877 +static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
1878 {
1879 struct kvm_coalesced_mmio_ring *ring;
1880 unsigned avail;
1881 @@ -51,7 +51,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
1882 * there is always one unused entry in the buffer
1883 */
1884 ring = dev->kvm->coalesced_mmio_ring;
1885 - avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
1886 + avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
1887 if (avail == 0) {
1888 /* full */
1889 return 0;
1890 @@ -66,24 +66,27 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
1891 {
1892 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
1893 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
1894 + __u32 insert;
1895
1896 if (!coalesced_mmio_in_range(dev, addr, len))
1897 return -EOPNOTSUPP;
1898
1899 spin_lock(&dev->kvm->ring_lock);
1900
1901 - if (!coalesced_mmio_has_room(dev)) {
1902 + insert = READ_ONCE(ring->last);
1903 + if (!coalesced_mmio_has_room(dev, insert) ||
1904 + insert >= KVM_COALESCED_MMIO_MAX) {
1905 spin_unlock(&dev->kvm->ring_lock);
1906 return -EOPNOTSUPP;
1907 }
1908
1909 /* copy data in first free entry of the ring */
1910
1911 - ring->coalesced_mmio[ring->last].phys_addr = addr;
1912 - ring->coalesced_mmio[ring->last].len = len;
1913 - memcpy(ring->coalesced_mmio[ring->last].data, val, len);
1914 + ring->coalesced_mmio[insert].phys_addr = addr;
1915 + ring->coalesced_mmio[insert].len = len;
1916 + memcpy(ring->coalesced_mmio[insert].data, val, len);
1917 smp_wmb();
1918 - ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
1919 + ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
1920 spin_unlock(&dev->kvm->ring_lock);
1921 return 0;
1922 }