Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0208-4.9.109-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3185 - (hide annotations) (download)
Wed Aug 8 14:17:36 2018 UTC (5 years, 9 months ago) by niro
File size: 48044 byte(s)
-linux-4.9.109
1 niro 3185 diff --git a/Makefile b/Makefile
2     index 1fa9daf219c4..1570cc85313d 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 108
9     +SUBLEVEL = 109
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
14     index 60a391b8c4a2..dd1958436591 100644
15     --- a/arch/x86/crypto/crc32c-intel_glue.c
16     +++ b/arch/x86/crypto/crc32c-intel_glue.c
17     @@ -58,16 +58,11 @@
18     asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
19     unsigned int crc_init);
20     static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU;
21     -#if defined(X86_FEATURE_EAGER_FPU)
22     #define set_pcl_breakeven_point() \
23     do { \
24     if (!use_eager_fpu()) \
25     crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU; \
26     } while (0)
27     -#else
28     -#define set_pcl_breakeven_point() \
29     - (crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU)
30     -#endif
31     #endif /* CONFIG_X86_64 */
32    
33     static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length)
34     diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
35     index c278f276c9b3..aea30afeddb8 100644
36     --- a/arch/x86/include/asm/cpufeatures.h
37     +++ b/arch/x86/include/asm/cpufeatures.h
38     @@ -104,7 +104,7 @@
39     #define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
40     #define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
41     #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
42     -#define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
43     +/* free, was #define X86_FEATURE_EAGER_FPU ( 3*32+29) * "eagerfpu" Non lazy FPU restore */
44     #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
45    
46     /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
47     diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
48     index 2737366ea583..8852e3afa1ad 100644
49     --- a/arch/x86/include/asm/fpu/internal.h
50     +++ b/arch/x86/include/asm/fpu/internal.h
51     @@ -62,7 +62,7 @@ extern u64 fpu__get_supported_xfeatures_mask(void);
52     */
53     static __always_inline __pure bool use_eager_fpu(void)
54     {
55     - return static_cpu_has(X86_FEATURE_EAGER_FPU);
56     + return true;
57     }
58    
59     static __always_inline __pure bool use_xsaveopt(void)
60     diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
61     index fc3c7e49c8e4..ae357d0afc91 100644
62     --- a/arch/x86/include/asm/kvm_emulate.h
63     +++ b/arch/x86/include/asm/kvm_emulate.h
64     @@ -105,11 +105,12 @@ struct x86_emulate_ops {
65     * @addr: [IN ] Linear address from which to read.
66     * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
67     * @bytes: [IN ] Number of bytes to read from memory.
68     + * @system:[IN ] Whether the access is forced to be at CPL0.
69     */
70     int (*read_std)(struct x86_emulate_ctxt *ctxt,
71     unsigned long addr, void *val,
72     unsigned int bytes,
73     - struct x86_exception *fault);
74     + struct x86_exception *fault, bool system);
75    
76     /*
77     * read_phys: Read bytes of standard (non-emulated/special) memory.
78     @@ -127,10 +128,11 @@ struct x86_emulate_ops {
79     * @addr: [IN ] Linear address to which to write.
80     * @val: [OUT] Value write to memory, zero-extended to 'u_long'.
81     * @bytes: [IN ] Number of bytes to write to memory.
82     + * @system:[IN ] Whether the access is forced to be at CPL0.
83     */
84     int (*write_std)(struct x86_emulate_ctxt *ctxt,
85     unsigned long addr, void *val, unsigned int bytes,
86     - struct x86_exception *fault);
87     + struct x86_exception *fault, bool system);
88     /*
89     * fetch: Read bytes of standard (non-emulated/special) memory.
90     * Used for instruction fetch.
91     diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
92     index 6f0ab305dd5e..9f3657891b87 100644
93     --- a/arch/x86/kernel/fpu/init.c
94     +++ b/arch/x86/kernel/fpu/init.c
95     @@ -15,10 +15,7 @@
96     */
97     static void fpu__init_cpu_ctx_switch(void)
98     {
99     - if (!boot_cpu_has(X86_FEATURE_EAGER_FPU))
100     - stts();
101     - else
102     - clts();
103     + clts();
104     }
105    
106     /*
107     @@ -233,42 +230,6 @@ static void __init fpu__init_system_xstate_size_legacy(void)
108     fpu_user_xstate_size = fpu_kernel_xstate_size;
109     }
110    
111     -/*
112     - * FPU context switching strategies:
113     - *
114     - * Against popular belief, we don't do lazy FPU saves, due to the
115     - * task migration complications it brings on SMP - we only do
116     - * lazy FPU restores.
117     - *
118     - * 'lazy' is the traditional strategy, which is based on setting
119     - * CR0::TS to 1 during context-switch (instead of doing a full
120     - * restore of the FPU state), which causes the first FPU instruction
121     - * after the context switch (whenever it is executed) to fault - at
122     - * which point we lazily restore the FPU state into FPU registers.
123     - *
124     - * Tasks are of course under no obligation to execute FPU instructions,
125     - * so it can easily happen that another context-switch occurs without
126     - * a single FPU instruction being executed. If we eventually switch
127     - * back to the original task (that still owns the FPU) then we have
128     - * not only saved the restores along the way, but we also have the
129     - * FPU ready to be used for the original task.
130     - *
131     - * 'lazy' is deprecated because it's almost never a performance win
132     - * and it's much more complicated than 'eager'.
133     - *
134     - * 'eager' switching is by default on all CPUs, there we switch the FPU
135     - * state during every context switch, regardless of whether the task
136     - * has used FPU instructions in that time slice or not. This is done
137     - * because modern FPU context saving instructions are able to optimize
138     - * state saving and restoration in hardware: they can detect both
139     - * unused and untouched FPU state and optimize accordingly.
140     - *
141     - * [ Note that even in 'lazy' mode we might optimize context switches
142     - * to use 'eager' restores, if we detect that a task is using the FPU
143     - * frequently. See the fpu->counter logic in fpu/internal.h for that. ]
144     - */
145     -static enum { ENABLE, DISABLE } eagerfpu = ENABLE;
146     -
147     /*
148     * Find supported xfeatures based on cpu features and command-line input.
149     * This must be called after fpu__init_parse_early_param() is called and
150     @@ -276,40 +237,10 @@ static enum { ENABLE, DISABLE } eagerfpu = ENABLE;
151     */
152     u64 __init fpu__get_supported_xfeatures_mask(void)
153     {
154     - /* Support all xfeatures known to us */
155     - if (eagerfpu != DISABLE)
156     - return XCNTXT_MASK;
157     -
158     - /* Warning of xfeatures being disabled for no eagerfpu mode */
159     - if (xfeatures_mask & XFEATURE_MASK_EAGER) {
160     - pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n",
161     - xfeatures_mask & XFEATURE_MASK_EAGER);
162     - }
163     -
164     - /* Return a mask that masks out all features requiring eagerfpu mode */
165     - return ~XFEATURE_MASK_EAGER;
166     -}
167     -
168     -/*
169     - * Disable features dependent on eagerfpu.
170     - */
171     -static void __init fpu__clear_eager_fpu_features(void)
172     -{
173     - setup_clear_cpu_cap(X86_FEATURE_MPX);
174     + return XCNTXT_MASK;
175     }
176    
177     -/*
178     - * Pick the FPU context switching strategy:
179     - *
180     - * When eagerfpu is AUTO or ENABLE, we ensure it is ENABLE if either of
181     - * the following is true:
182     - *
183     - * (1) the cpu has xsaveopt, as it has the optimization and doing eager
184     - * FPU switching has a relatively low cost compared to a plain xsave;
185     - * (2) the cpu has xsave features (e.g. MPX) that depend on eager FPU
186     - * switching. Should the kernel boot with noxsaveopt, we support MPX
187     - * with eager FPU switching at a higher cost.
188     - */
189     +/* Legacy code to initialize eager fpu mode. */
190     static void __init fpu__init_system_ctx_switch(void)
191     {
192     static bool on_boot_cpu __initdata = 1;
193     @@ -318,17 +249,6 @@ static void __init fpu__init_system_ctx_switch(void)
194     on_boot_cpu = 0;
195    
196     WARN_ON_FPU(current->thread.fpu.fpstate_active);
197     -
198     - if (boot_cpu_has(X86_FEATURE_XSAVEOPT) && eagerfpu != DISABLE)
199     - eagerfpu = ENABLE;
200     -
201     - if (xfeatures_mask & XFEATURE_MASK_EAGER)
202     - eagerfpu = ENABLE;
203     -
204     - if (eagerfpu == ENABLE)
205     - setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
206     -
207     - printk(KERN_INFO "x86/fpu: Using '%s' FPU context switches.\n", eagerfpu == ENABLE ? "eager" : "lazy");
208     }
209    
210     /*
211     @@ -337,11 +257,6 @@ static void __init fpu__init_system_ctx_switch(void)
212     */
213     static void __init fpu__init_parse_early_param(void)
214     {
215     - if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off")) {
216     - eagerfpu = DISABLE;
217     - fpu__clear_eager_fpu_features();
218     - }
219     -
220     if (cmdline_find_option_bool(boot_command_line, "no387"))
221     setup_clear_cpu_cap(X86_FEATURE_FPU);
222    
223     diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
224     index c8d573822e60..510cfc06701a 100644
225     --- a/arch/x86/kvm/emulate.c
226     +++ b/arch/x86/kvm/emulate.c
227     @@ -802,6 +802,19 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
228     return assign_eip_near(ctxt, ctxt->_eip + rel);
229     }
230    
231     +static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
232     + void *data, unsigned size)
233     +{
234     + return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
235     +}
236     +
237     +static int linear_write_system(struct x86_emulate_ctxt *ctxt,
238     + ulong linear, void *data,
239     + unsigned int size)
240     +{
241     + return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
242     +}
243     +
244     static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
245     struct segmented_address addr,
246     void *data,
247     @@ -813,7 +826,7 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
248     rc = linearize(ctxt, addr, size, false, &linear);
249     if (rc != X86EMUL_CONTINUE)
250     return rc;
251     - return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
252     + return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
253     }
254    
255     static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
256     @@ -827,7 +840,7 @@ static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
257     rc = linearize(ctxt, addr, size, true, &linear);
258     if (rc != X86EMUL_CONTINUE)
259     return rc;
260     - return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
261     + return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
262     }
263    
264     /*
265     @@ -1500,8 +1513,7 @@ static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
266     return emulate_gp(ctxt, index << 3 | 0x2);
267    
268     addr = dt.address + index * 8;
269     - return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
270     - &ctxt->exception);
271     + return linear_read_system(ctxt, addr, desc, sizeof *desc);
272     }
273    
274     static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
275     @@ -1564,8 +1576,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
276     if (rc != X86EMUL_CONTINUE)
277     return rc;
278    
279     - return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
280     - &ctxt->exception);
281     + return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
282     }
283    
284     /* allowed just for 8 bytes segments */
285     @@ -1579,8 +1590,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
286     if (rc != X86EMUL_CONTINUE)
287     return rc;
288    
289     - return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
290     - &ctxt->exception);
291     + return linear_write_system(ctxt, addr, desc, sizeof *desc);
292     }
293    
294     static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
295     @@ -1741,8 +1751,7 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
296     return ret;
297     }
298     } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
299     - ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
300     - sizeof(base3), &ctxt->exception);
301     + ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
302     if (ret != X86EMUL_CONTINUE)
303     return ret;
304     if (is_noncanonical_address(get_desc_base(&seg_desc) |
305     @@ -2055,11 +2064,11 @@ static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
306     eip_addr = dt.address + (irq << 2);
307     cs_addr = dt.address + (irq << 2) + 2;
308    
309     - rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
310     + rc = linear_read_system(ctxt, cs_addr, &cs, 2);
311     if (rc != X86EMUL_CONTINUE)
312     return rc;
313    
314     - rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
315     + rc = linear_read_system(ctxt, eip_addr, &eip, 2);
316     if (rc != X86EMUL_CONTINUE)
317     return rc;
318    
319     @@ -2903,12 +2912,12 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
320     #ifdef CONFIG_X86_64
321     base |= ((u64)base3) << 32;
322     #endif
323     - r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
324     + r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
325     if (r != X86EMUL_CONTINUE)
326     return false;
327     if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
328     return false;
329     - r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
330     + r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
331     if (r != X86EMUL_CONTINUE)
332     return false;
333     if ((perm >> bit_idx) & mask)
334     @@ -3037,35 +3046,30 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
335     u16 tss_selector, u16 old_tss_sel,
336     ulong old_tss_base, struct desc_struct *new_desc)
337     {
338     - const struct x86_emulate_ops *ops = ctxt->ops;
339     struct tss_segment_16 tss_seg;
340     int ret;
341     u32 new_tss_base = get_desc_base(new_desc);
342    
343     - ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
344     - &ctxt->exception);
345     + ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
346     if (ret != X86EMUL_CONTINUE)
347     return ret;
348    
349     save_state_to_tss16(ctxt, &tss_seg);
350    
351     - ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
352     - &ctxt->exception);
353     + ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
354     if (ret != X86EMUL_CONTINUE)
355     return ret;
356    
357     - ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
358     - &ctxt->exception);
359     + ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
360     if (ret != X86EMUL_CONTINUE)
361     return ret;
362    
363     if (old_tss_sel != 0xffff) {
364     tss_seg.prev_task_link = old_tss_sel;
365    
366     - ret = ops->write_std(ctxt, new_tss_base,
367     - &tss_seg.prev_task_link,
368     - sizeof tss_seg.prev_task_link,
369     - &ctxt->exception);
370     + ret = linear_write_system(ctxt, new_tss_base,
371     + &tss_seg.prev_task_link,
372     + sizeof tss_seg.prev_task_link);
373     if (ret != X86EMUL_CONTINUE)
374     return ret;
375     }
376     @@ -3181,38 +3185,34 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
377     u16 tss_selector, u16 old_tss_sel,
378     ulong old_tss_base, struct desc_struct *new_desc)
379     {
380     - const struct x86_emulate_ops *ops = ctxt->ops;
381     struct tss_segment_32 tss_seg;
382     int ret;
383     u32 new_tss_base = get_desc_base(new_desc);
384     u32 eip_offset = offsetof(struct tss_segment_32, eip);
385     u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
386    
387     - ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
388     - &ctxt->exception);
389     + ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
390     if (ret != X86EMUL_CONTINUE)
391     return ret;
392    
393     save_state_to_tss32(ctxt, &tss_seg);
394    
395     /* Only GP registers and segment selectors are saved */
396     - ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
397     - ldt_sel_offset - eip_offset, &ctxt->exception);
398     + ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
399     + ldt_sel_offset - eip_offset);
400     if (ret != X86EMUL_CONTINUE)
401     return ret;
402    
403     - ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
404     - &ctxt->exception);
405     + ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
406     if (ret != X86EMUL_CONTINUE)
407     return ret;
408    
409     if (old_tss_sel != 0xffff) {
410     tss_seg.prev_task_link = old_tss_sel;
411    
412     - ret = ops->write_std(ctxt, new_tss_base,
413     - &tss_seg.prev_task_link,
414     - sizeof tss_seg.prev_task_link,
415     - &ctxt->exception);
416     + ret = linear_write_system(ctxt, new_tss_base,
417     + &tss_seg.prev_task_link,
418     + sizeof tss_seg.prev_task_link);
419     if (ret != X86EMUL_CONTINUE)
420     return ret;
421     }
422     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
423     index 4a66a620fc17..4e0292e0aafb 100644
424     --- a/arch/x86/kvm/vmx.c
425     +++ b/arch/x86/kvm/vmx.c
426     @@ -6928,8 +6928,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
427     vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
428     return 1;
429    
430     - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
431     - sizeof(vmptr), &e)) {
432     + if (kvm_read_guest_virt(vcpu, gva, &vmptr, sizeof(vmptr), &e)) {
433     kvm_inject_page_fault(vcpu, &e);
434     return 1;
435     }
436     @@ -7469,8 +7468,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
437     vmx_instruction_info, true, &gva))
438     return 1;
439     /* _system ok, as nested_vmx_check_permission verified cpl=0 */
440     - kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
441     - &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
442     + kvm_write_guest_virt_system(vcpu, gva, &field_value,
443     + (is_long_mode(vcpu) ? 8 : 4), NULL);
444     }
445    
446     nested_vmx_succeed(vcpu);
447     @@ -7505,8 +7504,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
448     if (get_vmx_mem_address(vcpu, exit_qualification,
449     vmx_instruction_info, false, &gva))
450     return 1;
451     - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
452     - &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
453     + if (kvm_read_guest_virt(vcpu, gva, &field_value,
454     + (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
455     kvm_inject_page_fault(vcpu, &e);
456     return 1;
457     }
458     @@ -7603,9 +7602,9 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
459     vmx_instruction_info, true, &vmcs_gva))
460     return 1;
461     /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
462     - if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
463     - (void *)&to_vmx(vcpu)->nested.current_vmptr,
464     - sizeof(u64), &e)) {
465     + if (kvm_write_guest_virt_system(vcpu, vmcs_gva,
466     + (void *)&to_vmx(vcpu)->nested.current_vmptr,
467     + sizeof(u64), &e)) {
468     kvm_inject_page_fault(vcpu, &e);
469     return 1;
470     }
471     @@ -7659,8 +7658,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
472     if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
473     vmx_instruction_info, false, &gva))
474     return 1;
475     - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
476     - sizeof(operand), &e)) {
477     + if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
478     kvm_inject_page_fault(vcpu, &e);
479     return 1;
480     }
481     @@ -7723,8 +7721,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
482     if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
483     vmx_instruction_info, false, &gva))
484     return 1;
485     - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vpid,
486     - sizeof(u32), &e)) {
487     + if (kvm_read_guest_virt(vcpu, gva, &vpid, sizeof(u32), &e)) {
488     kvm_inject_page_fault(vcpu, &e);
489     return 1;
490     }
491     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
492     index 4aa265ae8cf7..5ca23af44c81 100644
493     --- a/arch/x86/kvm/x86.c
494     +++ b/arch/x86/kvm/x86.c
495     @@ -4395,11 +4395,10 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
496     return X86EMUL_CONTINUE;
497     }
498    
499     -int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
500     +int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
501     gva_t addr, void *val, unsigned int bytes,
502     struct x86_exception *exception)
503     {
504     - struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
505     u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
506    
507     return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
508     @@ -4407,12 +4406,17 @@ int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
509     }
510     EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
511    
512     -static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
513     - gva_t addr, void *val, unsigned int bytes,
514     - struct x86_exception *exception)
515     +static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
516     + gva_t addr, void *val, unsigned int bytes,
517     + struct x86_exception *exception, bool system)
518     {
519     struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
520     - return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
521     + u32 access = 0;
522     +
523     + if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
524     + access |= PFERR_USER_MASK;
525     +
526     + return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
527     }
528    
529     static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
530     @@ -4424,18 +4428,16 @@ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
531     return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
532     }
533    
534     -int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
535     - gva_t addr, void *val,
536     - unsigned int bytes,
537     - struct x86_exception *exception)
538     +static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
539     + struct kvm_vcpu *vcpu, u32 access,
540     + struct x86_exception *exception)
541     {
542     - struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
543     void *data = val;
544     int r = X86EMUL_CONTINUE;
545    
546     while (bytes) {
547     gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
548     - PFERR_WRITE_MASK,
549     + access,
550     exception);
551     unsigned offset = addr & (PAGE_SIZE-1);
552     unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
553     @@ -4456,6 +4458,27 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
554     out:
555     return r;
556     }
557     +
558     +static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
559     + unsigned int bytes, struct x86_exception *exception,
560     + bool system)
561     +{
562     + struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
563     + u32 access = PFERR_WRITE_MASK;
564     +
565     + if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
566     + access |= PFERR_USER_MASK;
567     +
568     + return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
569     + access, exception);
570     +}
571     +
572     +int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
573     + unsigned int bytes, struct x86_exception *exception)
574     +{
575     + return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
576     + PFERR_WRITE_MASK, exception);
577     +}
578     EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
579    
580     static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
581     @@ -5180,8 +5203,8 @@ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla
582     static const struct x86_emulate_ops emulate_ops = {
583     .read_gpr = emulator_read_gpr,
584     .write_gpr = emulator_write_gpr,
585     - .read_std = kvm_read_guest_virt_system,
586     - .write_std = kvm_write_guest_virt_system,
587     + .read_std = emulator_read_std,
588     + .write_std = emulator_write_std,
589     .read_phys = kvm_read_guest_phys_system,
590     .fetch = kvm_fetch_guest_virt,
591     .read_emulated = emulator_read_emulated,
592     diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
593     index e8ff3e4ce38a..2133a18f2d36 100644
594     --- a/arch/x86/kvm/x86.h
595     +++ b/arch/x86/kvm/x86.h
596     @@ -161,11 +161,11 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
597     void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
598     u64 get_kvmclock_ns(struct kvm *kvm);
599    
600     -int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
601     +int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
602     gva_t addr, void *val, unsigned int bytes,
603     struct x86_exception *exception);
604    
605     -int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
606     +int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
607     gva_t addr, void *val, unsigned int bytes,
608     struct x86_exception *exception);
609    
610     diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
611     index d8305ddf87d0..ff6ac4e824b5 100644
612     --- a/drivers/crypto/omap-sham.c
613     +++ b/drivers/crypto/omap-sham.c
614     @@ -1081,7 +1081,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
615    
616     if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
617     free_pages((unsigned long)sg_virt(ctx->sg),
618     - get_order(ctx->sg->length));
619     + get_order(ctx->sg->length + ctx->bufcnt));
620    
621     if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
622     kfree(ctx->sg);
623     diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
624     index 022c7ab7351a..b0cd5aff3822 100644
625     --- a/drivers/crypto/vmx/aes.c
626     +++ b/drivers/crypto/vmx/aes.c
627     @@ -53,8 +53,6 @@ static int p8_aes_init(struct crypto_tfm *tfm)
628     alg, PTR_ERR(fallback));
629     return PTR_ERR(fallback);
630     }
631     - printk(KERN_INFO "Using '%s' as fallback implementation.\n",
632     - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
633    
634     crypto_cipher_set_flags(fallback,
635     crypto_cipher_get_flags((struct
636     diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
637     index 94ad5c0adbcb..46131701c378 100644
638     --- a/drivers/crypto/vmx/aes_cbc.c
639     +++ b/drivers/crypto/vmx/aes_cbc.c
640     @@ -55,8 +55,6 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm)
641     alg, PTR_ERR(fallback));
642     return PTR_ERR(fallback);
643     }
644     - printk(KERN_INFO "Using '%s' as fallback implementation.\n",
645     - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
646    
647     crypto_blkcipher_set_flags(
648     fallback,
649     diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
650     index 7cf6d31c1123..6ef7548c5c87 100644
651     --- a/drivers/crypto/vmx/aes_ctr.c
652     +++ b/drivers/crypto/vmx/aes_ctr.c
653     @@ -53,8 +53,6 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
654     alg, PTR_ERR(fallback));
655     return PTR_ERR(fallback);
656     }
657     - printk(KERN_INFO "Using '%s' as fallback implementation.\n",
658     - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
659    
660     crypto_blkcipher_set_flags(
661     fallback,
662     diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
663     index 27a94a119009..1c4b5b889fba 100644
664     --- a/drivers/crypto/vmx/ghash.c
665     +++ b/drivers/crypto/vmx/ghash.c
666     @@ -64,8 +64,6 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
667     alg, PTR_ERR(fallback));
668     return PTR_ERR(fallback);
669     }
670     - printk(KERN_INFO "Using '%s' as fallback implementation.\n",
671     - crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback)));
672    
673     crypto_shash_set_flags(fallback,
674     crypto_shash_get_flags((struct crypto_shash
675     diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
676     index 56b24198741c..dd0076497463 100644
677     --- a/drivers/gpio/gpiolib.c
678     +++ b/drivers/gpio/gpiolib.c
679     @@ -3204,6 +3204,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
680     struct gpio_desc *desc = NULL;
681     int status;
682     enum gpio_lookup_flags lookupflags = 0;
683     + /* Maybe we have a device name, maybe not */
684     + const char *devname = dev ? dev_name(dev) : "?";
685    
686     dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id);
687    
688     @@ -3232,8 +3234,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
689     return desc;
690     }
691    
692     - /* If a connection label was passed use that, else use the device name as label */
693     - status = gpiod_request(desc, con_id ? con_id : dev_name(dev));
694     + /*
695     + * If a connection label was passed use that, else attempt to use
696     + * the device name as label
697     + */
698     + status = gpiod_request(desc, con_id ? con_id : devname);
699     if (status < 0)
700     return ERR_PTR(status);
701    
702     diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
703     index 3851d5715772..aeb8250ab079 100644
704     --- a/drivers/input/mouse/elan_i2c_core.c
705     +++ b/drivers/input/mouse/elan_i2c_core.c
706     @@ -1249,6 +1249,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
707     { "ELAN060B", 0 },
708     { "ELAN060C", 0 },
709     { "ELAN0611", 0 },
710     + { "ELAN0612", 0 },
711     { "ELAN1000", 0 },
712     { }
713     };
714     diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
715     index 5907fddcc966..c599b5a2373b 100644
716     --- a/drivers/input/touchscreen/goodix.c
717     +++ b/drivers/input/touchscreen/goodix.c
718     @@ -858,6 +858,7 @@ MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
719     #ifdef CONFIG_ACPI
720     static const struct acpi_device_id goodix_acpi_match[] = {
721     { "GDIX1001", 0 },
722     + { "GDIX1002", 0 },
723     { }
724     };
725     MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
726     diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
727     index 1e688bfec567..fe90b7e04427 100644
728     --- a/drivers/misc/vmw_balloon.c
729     +++ b/drivers/misc/vmw_balloon.c
730     @@ -576,15 +576,9 @@ static void vmballoon_pop(struct vmballoon *b)
731     }
732     }
733    
734     - if (b->batch_page) {
735     - vunmap(b->batch_page);
736     - b->batch_page = NULL;
737     - }
738     -
739     - if (b->page) {
740     - __free_page(b->page);
741     - b->page = NULL;
742     - }
743     + /* Clearing the batch_page unconditionally has no adverse effect */
744     + free_page((unsigned long)b->batch_page);
745     + b->batch_page = NULL;
746     }
747    
748     /*
749     @@ -991,16 +985,13 @@ static const struct vmballoon_ops vmballoon_batched_ops = {
750    
751     static bool vmballoon_init_batching(struct vmballoon *b)
752     {
753     - b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP);
754     - if (!b->page)
755     - return false;
756     + struct page *page;
757    
758     - b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL);
759     - if (!b->batch_page) {
760     - __free_page(b->page);
761     + page = alloc_page(GFP_KERNEL | __GFP_ZERO);
762     + if (!page)
763     return false;
764     - }
765    
766     + b->batch_page = page_address(page);
767     return true;
768     }
769    
770     diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
771     index 1a139d0f2232..f5fcc0850dac 100644
772     --- a/drivers/net/bonding/bond_main.c
773     +++ b/drivers/net/bonding/bond_main.c
774     @@ -384,20 +384,15 @@ static int bond_update_speed_duplex(struct slave *slave)
775     slave->duplex = DUPLEX_UNKNOWN;
776    
777     res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
778     - if (res < 0) {
779     - slave->link = BOND_LINK_DOWN;
780     + if (res < 0)
781     return 1;
782     - }
783     - if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1)) {
784     - slave->link = BOND_LINK_DOWN;
785     + if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
786     return 1;
787     - }
788     switch (ecmd.base.duplex) {
789     case DUPLEX_FULL:
790     case DUPLEX_HALF:
791     break;
792     default:
793     - slave->link = BOND_LINK_DOWN;
794     return 1;
795     }
796    
797     @@ -1536,7 +1531,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
798     new_slave->delay = 0;
799     new_slave->link_failure_count = 0;
800    
801     - bond_update_speed_duplex(new_slave);
802     + if (bond_update_speed_duplex(new_slave) &&
803     + bond_needs_speed_duplex(bond))
804     + new_slave->link = BOND_LINK_DOWN;
805    
806     new_slave->last_rx = jiffies -
807     (msecs_to_jiffies(bond->params.arp_interval) + 1);
808     @@ -2140,7 +2137,14 @@ static void bond_miimon_commit(struct bonding *bond)
809     continue;
810    
811     case BOND_LINK_UP:
812     - bond_update_speed_duplex(slave);
813     + if (bond_update_speed_duplex(slave) &&
814     + bond_needs_speed_duplex(bond)) {
815     + slave->link = BOND_LINK_DOWN;
816     + netdev_warn(bond->dev,
817     + "failed to get link speed/duplex for %s\n",
818     + slave->dev->name);
819     + continue;
820     + }
821     bond_set_slave_link_state(slave, BOND_LINK_UP,
822     BOND_SLAVE_NOTIFY_NOW);
823     slave->last_link_up = jiffies;
824     diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
825     index 642ee00e9143..a55d112583bd 100644
826     --- a/drivers/nvme/host/pci.c
827     +++ b/drivers/nvme/host/pci.c
828     @@ -1126,11 +1126,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
829     if (result < 0)
830     goto release_cq;
831    
832     + nvme_init_queue(nvmeq, qid);
833     result = queue_request_irq(nvmeq);
834     if (result < 0)
835     goto release_sq;
836    
837     - nvme_init_queue(nvmeq, qid);
838     return result;
839    
840     release_sq:
841     @@ -1248,6 +1248,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
842     return result;
843    
844     nvmeq->cq_vector = 0;
845     + nvme_init_queue(nvmeq, 0);
846     result = queue_request_irq(nvmeq);
847     if (result) {
848     nvmeq->cq_vector = -1;
849     @@ -1776,7 +1777,6 @@ static void nvme_reset_work(struct work_struct *work)
850     if (result)
851     goto out;
852    
853     - nvme_init_queue(dev->queues[0], 0);
854     result = nvme_alloc_admin_tags(dev);
855     if (result)
856     goto out;
857     diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
858     index f791d46fe50f..2caed285fd7b 100644
859     --- a/drivers/nvme/target/admin-cmd.c
860     +++ b/drivers/nvme/target/admin-cmd.c
861     @@ -166,11 +166,21 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
862     nvmet_req_complete(req, status);
863     }
864    
865     +static void copy_and_pad(char *dst, int dst_len, const char *src, int src_len)
866     +{
867     + int len = min(src_len, dst_len);
868     +
869     + memcpy(dst, src, len);
870     + if (dst_len > len)
871     + memset(dst + len, ' ', dst_len - len);
872     +}
873     +
874     static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
875     {
876     struct nvmet_ctrl *ctrl = req->sq->ctrl;
877     struct nvme_id_ctrl *id;
878     u16 status = 0;
879     + const char model[] = "Linux";
880    
881     id = kzalloc(sizeof(*id), GFP_KERNEL);
882     if (!id) {
883     @@ -182,14 +192,10 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
884     id->vid = 0;
885     id->ssvid = 0;
886    
887     - memset(id->sn, ' ', sizeof(id->sn));
888     - snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial);
889     -
890     - memset(id->mn, ' ', sizeof(id->mn));
891     - strncpy((char *)id->mn, "Linux", sizeof(id->mn));
892     -
893     - memset(id->fr, ' ', sizeof(id->fr));
894     - strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
895     + bin2hex(id->sn, &ctrl->subsys->serial,
896     + min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
897     + copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1);
898     + copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE));
899    
900     id->rab = 6;
901    
902     diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
903     index 3a044922b048..64b40a12abcf 100644
904     --- a/drivers/nvme/target/core.c
905     +++ b/drivers/nvme/target/core.c
906     @@ -743,9 +743,6 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
907     memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
908     memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
909    
910     - /* generate a random serial number as our controllers are ephemeral: */
911     - get_random_bytes(&ctrl->serial, sizeof(ctrl->serial));
912     -
913     kref_init(&ctrl->ref);
914     ctrl->subsys = subsys;
915    
916     @@ -904,6 +901,8 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
917     return NULL;
918    
919     subsys->ver = NVME_VS(1, 2, 1); /* NVMe 1.2.1 */
920     + /* generate a random serial number as our controllers are ephemeral: */
921     + get_random_bytes(&subsys->serial, sizeof(subsys->serial));
922    
923     switch (type) {
924     case NVME_NQN_NVME:
925     diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
926     index 26b87dc843d2..0bc530cdf2b4 100644
927     --- a/drivers/nvme/target/nvmet.h
928     +++ b/drivers/nvme/target/nvmet.h
929     @@ -110,7 +110,6 @@ struct nvmet_ctrl {
930    
931     struct mutex lock;
932     u64 cap;
933     - u64 serial;
934     u32 cc;
935     u32 csts;
936    
937     @@ -151,6 +150,7 @@ struct nvmet_subsys {
938     u16 max_qid;
939    
940     u64 ver;
941     + u64 serial;
942     char *subsysnqn;
943    
944     struct config_group group;
945     diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
946     index 209a8f7ef02b..6f9974cb0e15 100644
947     --- a/drivers/staging/android/ion/ion.c
948     +++ b/drivers/staging/android/ion/ion.c
949     @@ -192,8 +192,11 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
950    
951     void ion_buffer_destroy(struct ion_buffer *buffer)
952     {
953     - if (WARN_ON(buffer->kmap_cnt > 0))
954     + if (buffer->kmap_cnt > 0) {
955     + pr_warn_once("%s: buffer still mapped in the kernel\n",
956     + __func__);
957     buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
958     + }
959     buffer->heap->ops->free(buffer);
960     vfree(buffer->pages);
961     kfree(buffer);
962     diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
963     index e8b34f16ba2c..a3adf21f9dce 100644
964     --- a/drivers/tty/serial/8250/8250_omap.c
965     +++ b/drivers/tty/serial/8250/8250_omap.c
966     @@ -1078,13 +1078,14 @@ static int omap8250_no_handle_irq(struct uart_port *port)
967     return 0;
968     }
969    
970     +static const u8 omap4_habit = UART_ERRATA_CLOCK_DISABLE;
971     static const u8 am3352_habit = OMAP_DMA_TX_KICK | UART_ERRATA_CLOCK_DISABLE;
972     static const u8 dra742_habit = UART_ERRATA_CLOCK_DISABLE;
973    
974     static const struct of_device_id omap8250_dt_ids[] = {
975     { .compatible = "ti,omap2-uart" },
976     { .compatible = "ti,omap3-uart" },
977     - { .compatible = "ti,omap4-uart" },
978     + { .compatible = "ti,omap4-uart", .data = &omap4_habit, },
979     { .compatible = "ti,am3352-uart", .data = &am3352_habit, },
980     { .compatible = "ti,am4372-uart", .data = &am3352_habit, },
981     { .compatible = "ti,dra742-uart", .data = &dra742_habit, },
982     @@ -1326,6 +1327,19 @@ static int omap8250_soft_reset(struct device *dev)
983     int sysc;
984     int syss;
985    
986     + /*
987     + * At least on omap4, unused uarts may not idle after reset without
988     + * a basic scr dma configuration even with no dma in use. The
989     + * module clkctrl status bits will be 1 instead of 3 blocking idle
990     + * for the whole clockdomain. The softreset below will clear scr,
991     + * and we restore it on resume so this is safe to do on all SoCs
992     + * needing omap8250_soft_reset() quirk. Do it in two writes as
993     + * recommended in the comment for omap8250_update_scr().
994     + */
995     + serial_out(up, UART_OMAP_SCR, OMAP_UART_SCR_DMAMODE_1);
996     + serial_out(up, UART_OMAP_SCR,
997     + OMAP_UART_SCR_DMAMODE_1 | OMAP_UART_SCR_DMAMODE_CTL);
998     +
999     sysc = serial_in(up, UART_OMAP_SYSC);
1000    
1001     /* softreset the UART */
1002     diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
1003     index 6b1863293fe1..41b0dd67fcce 100644
1004     --- a/drivers/tty/serial/amba-pl011.c
1005     +++ b/drivers/tty/serial/amba-pl011.c
1006     @@ -1726,10 +1726,26 @@ static int pl011_allocate_irq(struct uart_amba_port *uap)
1007     */
1008     static void pl011_enable_interrupts(struct uart_amba_port *uap)
1009     {
1010     + unsigned int i;
1011     +
1012     spin_lock_irq(&uap->port.lock);
1013    
1014     /* Clear out any spuriously appearing RX interrupts */
1015     pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
1016     +
1017     + /*
1018     + * RXIS is asserted only when the RX FIFO transitions from below
1019     + * to above the trigger threshold. If the RX FIFO is already
1020     + * full to the threshold this can't happen and RXIS will now be
1021     + * stuck off. Drain the RX FIFO explicitly to fix this:
1022     + */
1023     + for (i = 0; i < uap->fifosize * 2; ++i) {
1024     + if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE)
1025     + break;
1026     +
1027     + pl011_read(uap, REG_DR);
1028     + }
1029     +
1030     uap->im = UART011_RTIM;
1031     if (!pl011_dma_rx_running(uap))
1032     uap->im |= UART011_RXIM;
1033     diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
1034     index addb287cacea..5a341b1c65c3 100644
1035     --- a/drivers/tty/serial/atmel_serial.c
1036     +++ b/drivers/tty/serial/atmel_serial.c
1037     @@ -1803,7 +1803,6 @@ static int atmel_startup(struct uart_port *port)
1038     {
1039     struct platform_device *pdev = to_platform_device(port->dev);
1040     struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1041     - struct tty_struct *tty = port->state->port.tty;
1042     int retval;
1043    
1044     /*
1045     @@ -1818,8 +1817,8 @@ static int atmel_startup(struct uart_port *port)
1046     * Allocate the IRQ
1047     */
1048     retval = request_irq(port->irq, atmel_interrupt,
1049     - IRQF_SHARED | IRQF_COND_SUSPEND,
1050     - tty ? tty->name : "atmel_serial", port);
1051     + IRQF_SHARED | IRQF_COND_SUSPEND,
1052     + dev_name(&pdev->dev), port);
1053     if (retval) {
1054     dev_err(port->dev, "atmel_startup - Can't get irq\n");
1055     return retval;
1056     diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
1057     index f2ab6d8aab41..5609305b3676 100644
1058     --- a/drivers/tty/serial/samsung.c
1059     +++ b/drivers/tty/serial/samsung.c
1060     @@ -866,15 +866,12 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
1061     dma->rx_conf.direction = DMA_DEV_TO_MEM;
1062     dma->rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1063     dma->rx_conf.src_addr = p->port.mapbase + S3C2410_URXH;
1064     - dma->rx_conf.src_maxburst = 16;
1065     + dma->rx_conf.src_maxburst = 1;
1066    
1067     dma->tx_conf.direction = DMA_MEM_TO_DEV;
1068     dma->tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1069     dma->tx_conf.dst_addr = p->port.mapbase + S3C2410_UTXH;
1070     - if (dma_get_cache_alignment() >= 16)
1071     - dma->tx_conf.dst_maxburst = 16;
1072     - else
1073     - dma->tx_conf.dst_maxburst = 1;
1074     + dma->tx_conf.dst_maxburst = 1;
1075    
1076     dma_cap_zero(mask);
1077     dma_cap_set(DMA_SLAVE, mask);
1078     diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
1079     index 107f0d194ac5..da46f0fba5da 100644
1080     --- a/drivers/tty/serial/sh-sci.c
1081     +++ b/drivers/tty/serial/sh-sci.c
1082     @@ -2626,8 +2626,8 @@ static int sci_init_clocks(struct sci_port *sci_port, struct device *dev)
1083     dev_dbg(dev, "failed to get %s (%ld)\n", clk_names[i],
1084     PTR_ERR(clk));
1085     else
1086     - dev_dbg(dev, "clk %s is %pC rate %pCr\n", clk_names[i],
1087     - clk, clk);
1088     + dev_dbg(dev, "clk %s is %pC rate %lu\n", clk_names[i],
1089     + clk, clk_get_rate(clk));
1090     sci_port->clks[i] = IS_ERR(clk) ? NULL : clk;
1091     }
1092     return 0;
1093     diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
1094     index 2197a50ed2ab..b1ae944c83a9 100644
1095     --- a/drivers/usb/gadget/udc/renesas_usb3.c
1096     +++ b/drivers/usb/gadget/udc/renesas_usb3.c
1097     @@ -521,6 +521,13 @@ static void usb3_disconnect(struct renesas_usb3 *usb3)
1098     usb3_usb2_pullup(usb3, 0);
1099     usb3_clear_bit(usb3, USB30_CON_B3_CONNECT, USB3_USB30_CON);
1100     usb3_reset_epc(usb3);
1101     + usb3_disable_irq_1(usb3, USB_INT_1_B2_RSUM | USB_INT_1_B3_PLLWKUP |
1102     + USB_INT_1_B3_LUPSUCS | USB_INT_1_B3_DISABLE |
1103     + USB_INT_1_SPEED | USB_INT_1_B3_WRMRST |
1104     + USB_INT_1_B3_HOTRST | USB_INT_1_B2_SPND |
1105     + USB_INT_1_B2_L1SPND | USB_INT_1_B2_USBRST);
1106     + usb3_clear_bit(usb3, USB_COM_CON_SPD_MODE, USB3_USB_COM_CON);
1107     + usb3_init_epc_registers(usb3);
1108    
1109     if (usb3->driver)
1110     usb3->driver->disconnect(&usb3->gadget);
1111     diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
1112     index a96dcc660d0f..8dd200f92020 100644
1113     --- a/drivers/usb/storage/uas.c
1114     +++ b/drivers/usb/storage/uas.c
1115     @@ -836,6 +836,12 @@ static int uas_slave_configure(struct scsi_device *sdev)
1116     if (devinfo->flags & US_FL_BROKEN_FUA)
1117     sdev->broken_fua = 1;
1118    
1119     + /* UAS also needs to support FL_ALWAYS_SYNC */
1120     + if (devinfo->flags & US_FL_ALWAYS_SYNC) {
1121     + sdev->skip_ms_page_3f = 1;
1122     + sdev->skip_ms_page_8 = 1;
1123     + sdev->wce_default_on = 1;
1124     + }
1125     scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
1126     return 0;
1127     }
1128     diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
1129     index ca3a5d430ae1..fc5ed351defb 100644
1130     --- a/drivers/usb/storage/unusual_devs.h
1131     +++ b/drivers/usb/storage/unusual_devs.h
1132     @@ -2340,6 +2340,15 @@ UNUSUAL_DEV( 0x4146, 0xba01, 0x0100, 0x0100,
1133     "Micro Mini 1GB",
1134     USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
1135    
1136     +/* "G-DRIVE" external HDD hangs on write without these.
1137     + * Patch submitted by Alexander Kappner <agk@godking.net>
1138     + */
1139     +UNUSUAL_DEV(0x4971, 0x8024, 0x0000, 0x9999,
1140     + "SimpleTech",
1141     + "External HDD",
1142     + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1143     + US_FL_ALWAYS_SYNC),
1144     +
1145     /*
1146     * Nick Bowler <nbowler@elliptictech.com>
1147     * SCSI stack spams (otherwise harmless) error messages.
1148     diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
1149     index 719ec68ae309..f15aa47c54a9 100644
1150     --- a/drivers/usb/storage/unusual_uas.h
1151     +++ b/drivers/usb/storage/unusual_uas.h
1152     @@ -183,3 +183,12 @@ UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999,
1153     "External HDD",
1154     USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1155     US_FL_NO_REPORT_OPCODES),
1156     +
1157     +/* "G-DRIVE" external HDD hangs on write without these.
1158     + * Patch submitted by Alexander Kappner <agk@godking.net>
1159     + */
1160     +UNUSUAL_DEV(0x4971, 0x8024, 0x0000, 0x9999,
1161     + "SimpleTech",
1162     + "External HDD",
1163     + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1164     + US_FL_ALWAYS_SYNC),
1165     diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
1166     index c287ccc78fde..e8a008de8dbc 100644
1167     --- a/drivers/usb/usbip/vhci_sysfs.c
1168     +++ b/drivers/usb/usbip/vhci_sysfs.c
1169     @@ -24,6 +24,9 @@
1170     #include <linux/platform_device.h>
1171     #include <linux/slab.h>
1172    
1173     +/* Hardening for Spectre-v1 */
1174     +#include <linux/nospec.h>
1175     +
1176     #include "usbip_common.h"
1177     #include "vhci.h"
1178    
1179     @@ -181,16 +184,20 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci, __u32 rhport)
1180     return 0;
1181     }
1182    
1183     -static int valid_port(__u32 pdev_nr, __u32 rhport)
1184     +static int valid_port(__u32 *pdev_nr, __u32 *rhport)
1185     {
1186     - if (pdev_nr >= vhci_num_controllers) {
1187     - pr_err("pdev %u\n", pdev_nr);
1188     + if (*pdev_nr >= vhci_num_controllers) {
1189     + pr_err("pdev %u\n", *pdev_nr);
1190     return 0;
1191     }
1192     - if (rhport >= VHCI_HC_PORTS) {
1193     - pr_err("rhport %u\n", rhport);
1194     + *pdev_nr = array_index_nospec(*pdev_nr, vhci_num_controllers);
1195     +
1196     + if (*rhport >= VHCI_HC_PORTS) {
1197     + pr_err("rhport %u\n", *rhport);
1198     return 0;
1199     }
1200     + *rhport = array_index_nospec(*rhport, VHCI_HC_PORTS);
1201     +
1202     return 1;
1203     }
1204    
1205     @@ -207,7 +214,7 @@ static ssize_t store_detach(struct device *dev, struct device_attribute *attr,
1206     pdev_nr = port_to_pdev_nr(port);
1207     rhport = port_to_rhport(port);
1208    
1209     - if (!valid_port(pdev_nr, rhport))
1210     + if (!valid_port(&pdev_nr, &rhport))
1211     return -EINVAL;
1212    
1213     hcd = platform_get_drvdata(*(vhci_pdevs + pdev_nr));
1214     @@ -226,7 +233,8 @@ static ssize_t store_detach(struct device *dev, struct device_attribute *attr,
1215     }
1216     static DEVICE_ATTR(detach, S_IWUSR, NULL, store_detach);
1217    
1218     -static int valid_args(__u32 pdev_nr, __u32 rhport, enum usb_device_speed speed)
1219     +static int valid_args(__u32 *pdev_nr, __u32 *rhport,
1220     + enum usb_device_speed speed)
1221     {
1222     if (!valid_port(pdev_nr, rhport)) {
1223     return 0;
1224     @@ -288,7 +296,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
1225     sockfd, devid, speed);
1226    
1227     /* check received parameters */
1228     - if (!valid_args(pdev_nr, rhport, speed))
1229     + if (!valid_args(&pdev_nr, &rhport, speed))
1230     return -EINVAL;
1231    
1232     hcd = platform_get_drvdata(*(vhci_pdevs + pdev_nr));
1233     diff --git a/include/net/bonding.h b/include/net/bonding.h
1234     index 7734cc9c7d29..714428c54c68 100644
1235     --- a/include/net/bonding.h
1236     +++ b/include/net/bonding.h
1237     @@ -277,6 +277,11 @@ static inline bool bond_is_lb(const struct bonding *bond)
1238     BOND_MODE(bond) == BOND_MODE_ALB;
1239     }
1240    
1241     +static inline bool bond_needs_speed_duplex(const struct bonding *bond)
1242     +{
1243     + return BOND_MODE(bond) == BOND_MODE_8023AD || bond_is_lb(bond);
1244     +}
1245     +
1246     static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
1247     {
1248     return (BOND_MODE(bond) == BOND_MODE_TLB) &&
1249     diff --git a/net/key/af_key.c b/net/key/af_key.c
1250     index 15150b412930..3ba903ff2bb0 100644
1251     --- a/net/key/af_key.c
1252     +++ b/net/key/af_key.c
1253     @@ -437,6 +437,24 @@ static int verify_address_len(const void *p)
1254     return 0;
1255     }
1256    
1257     +static inline int sadb_key_len(const struct sadb_key *key)
1258     +{
1259     + int key_bytes = DIV_ROUND_UP(key->sadb_key_bits, 8);
1260     +
1261     + return DIV_ROUND_UP(sizeof(struct sadb_key) + key_bytes,
1262     + sizeof(uint64_t));
1263     +}
1264     +
1265     +static int verify_key_len(const void *p)
1266     +{
1267     + const struct sadb_key *key = p;
1268     +
1269     + if (sadb_key_len(key) > key->sadb_key_len)
1270     + return -EINVAL;
1271     +
1272     + return 0;
1273     +}
1274     +
1275     static inline int pfkey_sec_ctx_len(const struct sadb_x_sec_ctx *sec_ctx)
1276     {
1277     return DIV_ROUND_UP(sizeof(struct sadb_x_sec_ctx) +
1278     @@ -533,16 +551,25 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void *
1279     return -EINVAL;
1280     if (ext_hdrs[ext_type-1] != NULL)
1281     return -EINVAL;
1282     - if (ext_type == SADB_EXT_ADDRESS_SRC ||
1283     - ext_type == SADB_EXT_ADDRESS_DST ||
1284     - ext_type == SADB_EXT_ADDRESS_PROXY ||
1285     - ext_type == SADB_X_EXT_NAT_T_OA) {
1286     + switch (ext_type) {
1287     + case SADB_EXT_ADDRESS_SRC:
1288     + case SADB_EXT_ADDRESS_DST:
1289     + case SADB_EXT_ADDRESS_PROXY:
1290     + case SADB_X_EXT_NAT_T_OA:
1291     if (verify_address_len(p))
1292     return -EINVAL;
1293     - }
1294     - if (ext_type == SADB_X_EXT_SEC_CTX) {
1295     + break;
1296     + case SADB_X_EXT_SEC_CTX:
1297     if (verify_sec_ctx_len(p))
1298     return -EINVAL;
1299     + break;
1300     + case SADB_EXT_KEY_AUTH:
1301     + case SADB_EXT_KEY_ENCRYPT:
1302     + if (verify_key_len(p))
1303     + return -EINVAL;
1304     + break;
1305     + default:
1306     + break;
1307     }
1308     ext_hdrs[ext_type-1] = (void *) p;
1309     }
1310     @@ -1111,14 +1138,12 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1311     key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
1312     if (key != NULL &&
1313     sa->sadb_sa_auth != SADB_X_AALG_NULL &&
1314     - ((key->sadb_key_bits+7) / 8 == 0 ||
1315     - (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t)))
1316     + key->sadb_key_bits == 0)
1317     return ERR_PTR(-EINVAL);
1318     key = ext_hdrs[SADB_EXT_KEY_ENCRYPT-1];
1319     if (key != NULL &&
1320     sa->sadb_sa_encrypt != SADB_EALG_NULL &&
1321     - ((key->sadb_key_bits+7) / 8 == 0 ||
1322     - (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t)))
1323     + key->sadb_key_bits == 0)
1324     return ERR_PTR(-EINVAL);
1325    
1326     x = xfrm_state_alloc(net);
1327     diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
1328     index c278f276c9b3..aea30afeddb8 100644
1329     --- a/tools/arch/x86/include/asm/cpufeatures.h
1330     +++ b/tools/arch/x86/include/asm/cpufeatures.h
1331     @@ -104,7 +104,7 @@
1332     #define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
1333     #define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
1334     #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
1335     -#define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
1336     +/* free, was #define X86_FEATURE_EAGER_FPU ( 3*32+29) * "eagerfpu" Non lazy FPU restore */
1337     #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
1338    
1339     /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */