Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.14/0149-4.14.50-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (hide annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 7 months ago) by niro
File size: 84844 byte(s)
-added up to patches-4.14.79
1 niro 3238 diff --git a/Makefile b/Makefile
2     index 480ae7ef755c..84374c5ba60e 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 14
9     -SUBLEVEL = 49
10     +SUBLEVEL = 50
11     EXTRAVERSION =
12     NAME = Petit Gorille
13    
14     diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
15     index ee23a43386a2..8493303d8b2e 100644
16     --- a/arch/x86/include/asm/kvm_emulate.h
17     +++ b/arch/x86/include/asm/kvm_emulate.h
18     @@ -107,11 +107,12 @@ struct x86_emulate_ops {
19     * @addr: [IN ] Linear address from which to read.
20     * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
21     * @bytes: [IN ] Number of bytes to read from memory.
22     + * @system:[IN ] Whether the access is forced to be at CPL0.
23     */
24     int (*read_std)(struct x86_emulate_ctxt *ctxt,
25     unsigned long addr, void *val,
26     unsigned int bytes,
27     - struct x86_exception *fault);
28     + struct x86_exception *fault, bool system);
29    
30     /*
31     * read_phys: Read bytes of standard (non-emulated/special) memory.
32     @@ -129,10 +130,11 @@ struct x86_emulate_ops {
33     * @addr: [IN ] Linear address to which to write.
34     * @val: [OUT] Value write to memory, zero-extended to 'u_long'.
35     * @bytes: [IN ] Number of bytes to write to memory.
36     + * @system:[IN ] Whether the access is forced to be at CPL0.
37     */
38     int (*write_std)(struct x86_emulate_ctxt *ctxt,
39     unsigned long addr, void *val, unsigned int bytes,
40     - struct x86_exception *fault);
41     + struct x86_exception *fault, bool system);
42     /*
43     * fetch: Read bytes of standard (non-emulated/special) memory.
44     * Used for instruction fetch.
45     diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
46     index fab073b19528..5f758568fc44 100644
47     --- a/arch/x86/kvm/emulate.c
48     +++ b/arch/x86/kvm/emulate.c
49     @@ -811,6 +811,19 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
50     return assign_eip_near(ctxt, ctxt->_eip + rel);
51     }
52    
53     +static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
54     + void *data, unsigned size)
55     +{
56     + return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
57     +}
58     +
59     +static int linear_write_system(struct x86_emulate_ctxt *ctxt,
60     + ulong linear, void *data,
61     + unsigned int size)
62     +{
63     + return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
64     +}
65     +
66     static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
67     struct segmented_address addr,
68     void *data,
69     @@ -822,7 +835,7 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
70     rc = linearize(ctxt, addr, size, false, &linear);
71     if (rc != X86EMUL_CONTINUE)
72     return rc;
73     - return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
74     + return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
75     }
76    
77     static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
78     @@ -836,7 +849,7 @@ static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
79     rc = linearize(ctxt, addr, size, true, &linear);
80     if (rc != X86EMUL_CONTINUE)
81     return rc;
82     - return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
83     + return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
84     }
85    
86     /*
87     @@ -1509,8 +1522,7 @@ static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
88     return emulate_gp(ctxt, index << 3 | 0x2);
89    
90     addr = dt.address + index * 8;
91     - return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
92     - &ctxt->exception);
93     + return linear_read_system(ctxt, addr, desc, sizeof *desc);
94     }
95    
96     static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
97     @@ -1573,8 +1585,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
98     if (rc != X86EMUL_CONTINUE)
99     return rc;
100    
101     - return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
102     - &ctxt->exception);
103     + return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
104     }
105    
106     /* allowed just for 8 bytes segments */
107     @@ -1588,8 +1599,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
108     if (rc != X86EMUL_CONTINUE)
109     return rc;
110    
111     - return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
112     - &ctxt->exception);
113     + return linear_write_system(ctxt, addr, desc, sizeof *desc);
114     }
115    
116     static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
117     @@ -1750,8 +1760,7 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
118     return ret;
119     }
120     } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
121     - ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
122     - sizeof(base3), &ctxt->exception);
123     + ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
124     if (ret != X86EMUL_CONTINUE)
125     return ret;
126     if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
127     @@ -2064,11 +2073,11 @@ static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
128     eip_addr = dt.address + (irq << 2);
129     cs_addr = dt.address + (irq << 2) + 2;
130    
131     - rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
132     + rc = linear_read_system(ctxt, cs_addr, &cs, 2);
133     if (rc != X86EMUL_CONTINUE)
134     return rc;
135    
136     - rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
137     + rc = linear_read_system(ctxt, eip_addr, &eip, 2);
138     if (rc != X86EMUL_CONTINUE)
139     return rc;
140    
141     @@ -2912,12 +2921,12 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
142     #ifdef CONFIG_X86_64
143     base |= ((u64)base3) << 32;
144     #endif
145     - r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
146     + r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
147     if (r != X86EMUL_CONTINUE)
148     return false;
149     if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
150     return false;
151     - r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
152     + r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
153     if (r != X86EMUL_CONTINUE)
154     return false;
155     if ((perm >> bit_idx) & mask)
156     @@ -3046,35 +3055,30 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
157     u16 tss_selector, u16 old_tss_sel,
158     ulong old_tss_base, struct desc_struct *new_desc)
159     {
160     - const struct x86_emulate_ops *ops = ctxt->ops;
161     struct tss_segment_16 tss_seg;
162     int ret;
163     u32 new_tss_base = get_desc_base(new_desc);
164    
165     - ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
166     - &ctxt->exception);
167     + ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
168     if (ret != X86EMUL_CONTINUE)
169     return ret;
170    
171     save_state_to_tss16(ctxt, &tss_seg);
172    
173     - ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
174     - &ctxt->exception);
175     + ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
176     if (ret != X86EMUL_CONTINUE)
177     return ret;
178    
179     - ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
180     - &ctxt->exception);
181     + ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
182     if (ret != X86EMUL_CONTINUE)
183     return ret;
184    
185     if (old_tss_sel != 0xffff) {
186     tss_seg.prev_task_link = old_tss_sel;
187    
188     - ret = ops->write_std(ctxt, new_tss_base,
189     - &tss_seg.prev_task_link,
190     - sizeof tss_seg.prev_task_link,
191     - &ctxt->exception);
192     + ret = linear_write_system(ctxt, new_tss_base,
193     + &tss_seg.prev_task_link,
194     + sizeof tss_seg.prev_task_link);
195     if (ret != X86EMUL_CONTINUE)
196     return ret;
197     }
198     @@ -3190,38 +3194,34 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
199     u16 tss_selector, u16 old_tss_sel,
200     ulong old_tss_base, struct desc_struct *new_desc)
201     {
202     - const struct x86_emulate_ops *ops = ctxt->ops;
203     struct tss_segment_32 tss_seg;
204     int ret;
205     u32 new_tss_base = get_desc_base(new_desc);
206     u32 eip_offset = offsetof(struct tss_segment_32, eip);
207     u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
208    
209     - ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
210     - &ctxt->exception);
211     + ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
212     if (ret != X86EMUL_CONTINUE)
213     return ret;
214    
215     save_state_to_tss32(ctxt, &tss_seg);
216    
217     /* Only GP registers and segment selectors are saved */
218     - ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
219     - ldt_sel_offset - eip_offset, &ctxt->exception);
220     + ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
221     + ldt_sel_offset - eip_offset);
222     if (ret != X86EMUL_CONTINUE)
223     return ret;
224    
225     - ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
226     - &ctxt->exception);
227     + ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
228     if (ret != X86EMUL_CONTINUE)
229     return ret;
230    
231     if (old_tss_sel != 0xffff) {
232     tss_seg.prev_task_link = old_tss_sel;
233    
234     - ret = ops->write_std(ctxt, new_tss_base,
235     - &tss_seg.prev_task_link,
236     - sizeof tss_seg.prev_task_link,
237     - &ctxt->exception);
238     + ret = linear_write_system(ctxt, new_tss_base,
239     + &tss_seg.prev_task_link,
240     + sizeof tss_seg.prev_task_link);
241     if (ret != X86EMUL_CONTINUE)
242     return ret;
243     }
244     @@ -4152,7 +4152,9 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
245     maxphyaddr = eax & 0xff;
246     else
247     maxphyaddr = 36;
248     - rsvd = rsvd_bits(maxphyaddr, 62);
249     + rsvd = rsvd_bits(maxphyaddr, 63);
250     + if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
251     + rsvd &= ~CR3_PCID_INVD;
252     }
253    
254     if (new_val & rsvd)
255     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
256     index 4c88572d2b81..b1556166a06d 100644
257     --- a/arch/x86/kvm/vmx.c
258     +++ b/arch/x86/kvm/vmx.c
259     @@ -7317,8 +7317,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
260     vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
261     return 1;
262    
263     - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer,
264     - sizeof(*vmpointer), &e)) {
265     + if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
266     kvm_inject_page_fault(vcpu, &e);
267     return 1;
268     }
269     @@ -7399,6 +7398,12 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
270     return 1;
271     }
272    
273     + /* CPL=0 must be checked manually. */
274     + if (vmx_get_cpl(vcpu)) {
275     + kvm_queue_exception(vcpu, UD_VECTOR);
276     + return 1;
277     + }
278     +
279     if (vmx->nested.vmxon) {
280     nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
281     return kvm_skip_emulated_instruction(vcpu);
282     @@ -7458,6 +7463,11 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
283     */
284     static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
285     {
286     + if (vmx_get_cpl(vcpu)) {
287     + kvm_queue_exception(vcpu, UD_VECTOR);
288     + return 0;
289     + }
290     +
291     if (!to_vmx(vcpu)->nested.vmxon) {
292     kvm_queue_exception(vcpu, UD_VECTOR);
293     return 0;
294     @@ -7790,9 +7800,9 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
295     if (get_vmx_mem_address(vcpu, exit_qualification,
296     vmx_instruction_info, true, &gva))
297     return 1;
298     - /* _system ok, as hardware has verified cpl=0 */
299     - kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
300     - &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
301     + /* _system ok, nested_vmx_check_permission has verified cpl=0 */
302     + kvm_write_guest_virt_system(vcpu, gva, &field_value,
303     + (is_long_mode(vcpu) ? 8 : 4), NULL);
304     }
305    
306     nested_vmx_succeed(vcpu);
307     @@ -7828,8 +7838,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
308     if (get_vmx_mem_address(vcpu, exit_qualification,
309     vmx_instruction_info, false, &gva))
310     return 1;
311     - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
312     - &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
313     + if (kvm_read_guest_virt(vcpu, gva, &field_value,
314     + (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
315     kvm_inject_page_fault(vcpu, &e);
316     return 1;
317     }
318     @@ -7933,10 +7943,10 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
319     if (get_vmx_mem_address(vcpu, exit_qualification,
320     vmx_instruction_info, true, &vmcs_gva))
321     return 1;
322     - /* ok to use *_system, as hardware has verified cpl=0 */
323     - if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
324     - (void *)&to_vmx(vcpu)->nested.current_vmptr,
325     - sizeof(u64), &e)) {
326     + /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
327     + if (kvm_write_guest_virt_system(vcpu, vmcs_gva,
328     + (void *)&to_vmx(vcpu)->nested.current_vmptr,
329     + sizeof(u64), &e)) {
330     kvm_inject_page_fault(vcpu, &e);
331     return 1;
332     }
333     @@ -7983,8 +7993,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
334     if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
335     vmx_instruction_info, false, &gva))
336     return 1;
337     - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
338     - sizeof(operand), &e)) {
339     + if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
340     kvm_inject_page_fault(vcpu, &e);
341     return 1;
342     }
343     @@ -8048,8 +8057,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
344     if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
345     vmx_instruction_info, false, &gva))
346     return 1;
347     - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
348     - sizeof(operand), &e)) {
349     + if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
350     kvm_inject_page_fault(vcpu, &e);
351     return 1;
352     }
353     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
354     index adac01d0181a..b62328cd4cb0 100644
355     --- a/arch/x86/kvm/x86.c
356     +++ b/arch/x86/kvm/x86.c
357     @@ -836,7 +836,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
358     }
359    
360     if (is_long_mode(vcpu) &&
361     - (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 62)))
362     + (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63)))
363     return 1;
364     else if (is_pae(vcpu) && is_paging(vcpu) &&
365     !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
366     @@ -4492,11 +4492,10 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
367     return X86EMUL_CONTINUE;
368     }
369    
370     -int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
371     +int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
372     gva_t addr, void *val, unsigned int bytes,
373     struct x86_exception *exception)
374     {
375     - struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
376     u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
377    
378     return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
379     @@ -4504,12 +4503,17 @@ int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
380     }
381     EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
382    
383     -static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
384     - gva_t addr, void *val, unsigned int bytes,
385     - struct x86_exception *exception)
386     +static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
387     + gva_t addr, void *val, unsigned int bytes,
388     + struct x86_exception *exception, bool system)
389     {
390     struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
391     - return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
392     + u32 access = 0;
393     +
394     + if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
395     + access |= PFERR_USER_MASK;
396     +
397     + return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
398     }
399    
400     static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
401     @@ -4521,18 +4525,16 @@ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
402     return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
403     }
404    
405     -int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
406     - gva_t addr, void *val,
407     - unsigned int bytes,
408     - struct x86_exception *exception)
409     +static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
410     + struct kvm_vcpu *vcpu, u32 access,
411     + struct x86_exception *exception)
412     {
413     - struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
414     void *data = val;
415     int r = X86EMUL_CONTINUE;
416    
417     while (bytes) {
418     gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
419     - PFERR_WRITE_MASK,
420     + access,
421     exception);
422     unsigned offset = addr & (PAGE_SIZE-1);
423     unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
424     @@ -4553,6 +4555,27 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
425     out:
426     return r;
427     }
428     +
429     +static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
430     + unsigned int bytes, struct x86_exception *exception,
431     + bool system)
432     +{
433     + struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
434     + u32 access = PFERR_WRITE_MASK;
435     +
436     + if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
437     + access |= PFERR_USER_MASK;
438     +
439     + return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
440     + access, exception);
441     +}
442     +
443     +int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
444     + unsigned int bytes, struct x86_exception *exception)
445     +{
446     + return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
447     + PFERR_WRITE_MASK, exception);
448     +}
449     EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
450    
451     static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
452     @@ -5287,8 +5310,8 @@ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla
453     static const struct x86_emulate_ops emulate_ops = {
454     .read_gpr = emulator_read_gpr,
455     .write_gpr = emulator_write_gpr,
456     - .read_std = kvm_read_guest_virt_system,
457     - .write_std = kvm_write_guest_virt_system,
458     + .read_std = emulator_read_std,
459     + .write_std = emulator_write_std,
460     .read_phys = kvm_read_guest_phys_system,
461     .fetch = kvm_fetch_guest_virt,
462     .read_emulated = emulator_read_emulated,
463     diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
464     index 6d112d8f799c..d4b59cf0dc51 100644
465     --- a/arch/x86/kvm/x86.h
466     +++ b/arch/x86/kvm/x86.h
467     @@ -213,11 +213,11 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
468     void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
469     u64 get_kvmclock_ns(struct kvm *kvm);
470    
471     -int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
472     +int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
473     gva_t addr, void *val, unsigned int bytes,
474     struct x86_exception *exception);
475    
476     -int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
477     +int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
478     gva_t addr, void *val, unsigned int bytes,
479     struct x86_exception *exception);
480    
481     diff --git a/block/blk-zoned.c b/block/blk-zoned.c
482     index ff57fb51b338..77fce6f09f78 100644
483     --- a/block/blk-zoned.c
484     +++ b/block/blk-zoned.c
485     @@ -286,7 +286,11 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
486     if (!rep.nr_zones)
487     return -EINVAL;
488    
489     - zones = kcalloc(rep.nr_zones, sizeof(struct blk_zone), GFP_KERNEL);
490     + if (rep.nr_zones > INT_MAX / sizeof(struct blk_zone))
491     + return -ERANGE;
492     +
493     + zones = kvmalloc(rep.nr_zones * sizeof(struct blk_zone),
494     + GFP_KERNEL | __GFP_ZERO);
495     if (!zones)
496     return -ENOMEM;
497    
498     @@ -308,7 +312,7 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
499     }
500    
501     out:
502     - kfree(zones);
503     + kvfree(zones);
504    
505     return ret;
506     }
507     diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
508     index 54f3b375a453..a8a2a271b63d 100644
509     --- a/drivers/crypto/caam/caamalg.c
510     +++ b/drivers/crypto/caam/caamalg.c
511     @@ -735,15 +735,18 @@ struct aead_edesc {
512     * @src_nents: number of segments in input s/w scatterlist
513     * @dst_nents: number of segments in output s/w scatterlist
514     * @iv_dma: dma address of iv for checking continuity and link table
515     + * @iv_dir: DMA mapping direction for IV
516     * @sec4_sg_bytes: length of dma mapped sec4_sg space
517     * @sec4_sg_dma: bus physical mapped address of h/w link table
518     * @sec4_sg: pointer to h/w link table
519     * @hw_desc: the h/w job descriptor followed by any referenced link tables
520     + * and IV
521     */
522     struct ablkcipher_edesc {
523     int src_nents;
524     int dst_nents;
525     dma_addr_t iv_dma;
526     + enum dma_data_direction iv_dir;
527     int sec4_sg_bytes;
528     dma_addr_t sec4_sg_dma;
529     struct sec4_sg_entry *sec4_sg;
530     @@ -753,7 +756,8 @@ struct ablkcipher_edesc {
531     static void caam_unmap(struct device *dev, struct scatterlist *src,
532     struct scatterlist *dst, int src_nents,
533     int dst_nents,
534     - dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
535     + dma_addr_t iv_dma, int ivsize,
536     + enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
537     int sec4_sg_bytes)
538     {
539     if (dst != src) {
540     @@ -765,7 +769,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
541     }
542    
543     if (iv_dma)
544     - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
545     + dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
546     if (sec4_sg_bytes)
547     dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
548     DMA_TO_DEVICE);
549     @@ -776,7 +780,7 @@ static void aead_unmap(struct device *dev,
550     struct aead_request *req)
551     {
552     caam_unmap(dev, req->src, req->dst,
553     - edesc->src_nents, edesc->dst_nents, 0, 0,
554     + edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
555     edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
556     }
557    
558     @@ -789,7 +793,7 @@ static void ablkcipher_unmap(struct device *dev,
559    
560     caam_unmap(dev, req->src, req->dst,
561     edesc->src_nents, edesc->dst_nents,
562     - edesc->iv_dma, ivsize,
563     + edesc->iv_dma, ivsize, edesc->iv_dir,
564     edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
565     }
566    
567     @@ -878,6 +882,18 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
568     scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
569     ivsize, 0);
570    
571     + /* In case initial IV was generated, copy it in GIVCIPHER request */
572     + if (edesc->iv_dir == DMA_FROM_DEVICE) {
573     + u8 *iv;
574     + struct skcipher_givcrypt_request *greq;
575     +
576     + greq = container_of(req, struct skcipher_givcrypt_request,
577     + creq);
578     + iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
579     + edesc->sec4_sg_bytes;
580     + memcpy(greq->giv, iv, ivsize);
581     + }
582     +
583     kfree(edesc);
584    
585     ablkcipher_request_complete(req, err);
586     @@ -888,10 +904,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
587     {
588     struct ablkcipher_request *req = context;
589     struct ablkcipher_edesc *edesc;
590     +#ifdef DEBUG
591     struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
592     int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
593    
594     -#ifdef DEBUG
595     dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
596     #endif
597    
598     @@ -909,14 +925,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
599     edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
600    
601     ablkcipher_unmap(jrdev, edesc, req);
602     -
603     - /*
604     - * The crypto API expects us to set the IV (req->info) to the last
605     - * ciphertext block.
606     - */
607     - scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
608     - ivsize, 0);
609     -
610     kfree(edesc);
611    
612     ablkcipher_request_complete(req, err);
613     @@ -1057,15 +1065,14 @@ static void init_authenc_job(struct aead_request *req,
614     */
615     static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
616     struct ablkcipher_edesc *edesc,
617     - struct ablkcipher_request *req,
618     - bool iv_contig)
619     + struct ablkcipher_request *req)
620     {
621     struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
622     int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
623     u32 *desc = edesc->hw_desc;
624     - u32 out_options = 0, in_options;
625     - dma_addr_t dst_dma, src_dma;
626     - int len, sec4_sg_index = 0;
627     + u32 out_options = 0;
628     + dma_addr_t dst_dma;
629     + int len;
630    
631     #ifdef DEBUG
632     print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
633     @@ -1081,30 +1088,18 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
634     len = desc_len(sh_desc);
635     init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
636    
637     - if (iv_contig) {
638     - src_dma = edesc->iv_dma;
639     - in_options = 0;
640     - } else {
641     - src_dma = edesc->sec4_sg_dma;
642     - sec4_sg_index += edesc->src_nents + 1;
643     - in_options = LDST_SGF;
644     - }
645     - append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
646     + append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize,
647     + LDST_SGF);
648    
649     if (likely(req->src == req->dst)) {
650     - if (edesc->src_nents == 1 && iv_contig) {
651     - dst_dma = sg_dma_address(req->src);
652     - } else {
653     - dst_dma = edesc->sec4_sg_dma +
654     - sizeof(struct sec4_sg_entry);
655     - out_options = LDST_SGF;
656     - }
657     + dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry);
658     + out_options = LDST_SGF;
659     } else {
660     if (edesc->dst_nents == 1) {
661     dst_dma = sg_dma_address(req->dst);
662     } else {
663     - dst_dma = edesc->sec4_sg_dma +
664     - sec4_sg_index * sizeof(struct sec4_sg_entry);
665     + dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) *
666     + sizeof(struct sec4_sg_entry);
667     out_options = LDST_SGF;
668     }
669     }
670     @@ -1116,13 +1111,12 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
671     */
672     static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
673     struct ablkcipher_edesc *edesc,
674     - struct ablkcipher_request *req,
675     - bool iv_contig)
676     + struct ablkcipher_request *req)
677     {
678     struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
679     int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
680     u32 *desc = edesc->hw_desc;
681     - u32 out_options, in_options;
682     + u32 in_options;
683     dma_addr_t dst_dma, src_dma;
684     int len, sec4_sg_index = 0;
685    
686     @@ -1148,15 +1142,9 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
687     }
688     append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
689    
690     - if (iv_contig) {
691     - dst_dma = edesc->iv_dma;
692     - out_options = 0;
693     - } else {
694     - dst_dma = edesc->sec4_sg_dma +
695     - sec4_sg_index * sizeof(struct sec4_sg_entry);
696     - out_options = LDST_SGF;
697     - }
698     - append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
699     + dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
700     + sizeof(struct sec4_sg_entry);
701     + append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF);
702     }
703    
704     /*
705     @@ -1245,7 +1233,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
706     GFP_DMA | flags);
707     if (!edesc) {
708     caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
709     - 0, 0, 0);
710     + 0, DMA_NONE, 0, 0);
711     return ERR_PTR(-ENOMEM);
712     }
713    
714     @@ -1449,8 +1437,7 @@ static int aead_decrypt(struct aead_request *req)
715     * allocate and map the ablkcipher extended descriptor for ablkcipher
716     */
717     static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
718     - *req, int desc_bytes,
719     - bool *iv_contig_out)
720     + *req, int desc_bytes)
721     {
722     struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
723     struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
724     @@ -1459,8 +1446,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
725     GFP_KERNEL : GFP_ATOMIC;
726     int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
727     struct ablkcipher_edesc *edesc;
728     - dma_addr_t iv_dma = 0;
729     - bool in_contig;
730     + dma_addr_t iv_dma;
731     + u8 *iv;
732     int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
733     int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
734    
735     @@ -1504,33 +1491,20 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
736     }
737     }
738    
739     - iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
740     - if (dma_mapping_error(jrdev, iv_dma)) {
741     - dev_err(jrdev, "unable to map IV\n");
742     - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
743     - 0, 0, 0);
744     - return ERR_PTR(-ENOMEM);
745     - }
746     -
747     - if (mapped_src_nents == 1 &&
748     - iv_dma + ivsize == sg_dma_address(req->src)) {
749     - in_contig = true;
750     - sec4_sg_ents = 0;
751     - } else {
752     - in_contig = false;
753     - sec4_sg_ents = 1 + mapped_src_nents;
754     - }
755     + sec4_sg_ents = 1 + mapped_src_nents;
756     dst_sg_idx = sec4_sg_ents;
757     sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
758     sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
759    
760     - /* allocate space for base edesc and hw desc commands, link tables */
761     - edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
762     + /*
763     + * allocate space for base edesc and hw desc commands, link tables, IV
764     + */
765     + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
766     GFP_DMA | flags);
767     if (!edesc) {
768     dev_err(jrdev, "could not allocate extended descriptor\n");
769     - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
770     - iv_dma, ivsize, 0, 0);
771     + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
772     + 0, DMA_NONE, 0, 0);
773     return ERR_PTR(-ENOMEM);
774     }
775    
776     @@ -1539,13 +1513,24 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
777     edesc->sec4_sg_bytes = sec4_sg_bytes;
778     edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
779     desc_bytes;
780     + edesc->iv_dir = DMA_TO_DEVICE;
781    
782     - if (!in_contig) {
783     - dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
784     - sg_to_sec4_sg_last(req->src, mapped_src_nents,
785     - edesc->sec4_sg + 1, 0);
786     + /* Make sure IV is located in a DMAable area */
787     + iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
788     + memcpy(iv, req->info, ivsize);
789     +
790     + iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
791     + if (dma_mapping_error(jrdev, iv_dma)) {
792     + dev_err(jrdev, "unable to map IV\n");
793     + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
794     + 0, DMA_NONE, 0, 0);
795     + kfree(edesc);
796     + return ERR_PTR(-ENOMEM);
797     }
798    
799     + dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
800     + sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0);
801     +
802     if (mapped_dst_nents > 1) {
803     sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
804     edesc->sec4_sg + dst_sg_idx, 0);
805     @@ -1556,7 +1541,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
806     if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
807     dev_err(jrdev, "unable to map S/G table\n");
808     caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
809     - iv_dma, ivsize, 0, 0);
810     + iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
811     kfree(edesc);
812     return ERR_PTR(-ENOMEM);
813     }
814     @@ -1569,7 +1554,6 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
815     sec4_sg_bytes, 1);
816     #endif
817    
818     - *iv_contig_out = in_contig;
819     return edesc;
820     }
821    
822     @@ -1579,19 +1563,16 @@ static int ablkcipher_encrypt(struct ablkcipher_request *req)
823     struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
824     struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
825     struct device *jrdev = ctx->jrdev;
826     - bool iv_contig;
827     u32 *desc;
828     int ret = 0;
829    
830     /* allocate extended descriptor */
831     - edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
832     - CAAM_CMD_SZ, &iv_contig);
833     + edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
834     if (IS_ERR(edesc))
835     return PTR_ERR(edesc);
836    
837     /* Create and submit job descriptor*/
838     - init_ablkcipher_job(ctx->sh_desc_enc,
839     - ctx->sh_desc_enc_dma, edesc, req, iv_contig);
840     + init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req);
841     #ifdef DEBUG
842     print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
843     DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
844     @@ -1615,20 +1596,25 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req)
845     struct ablkcipher_edesc *edesc;
846     struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
847     struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
848     + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
849     struct device *jrdev = ctx->jrdev;
850     - bool iv_contig;
851     u32 *desc;
852     int ret = 0;
853    
854     /* allocate extended descriptor */
855     - edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
856     - CAAM_CMD_SZ, &iv_contig);
857     + edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
858     if (IS_ERR(edesc))
859     return PTR_ERR(edesc);
860    
861     + /*
862     + * The crypto API expects us to set the IV (req->info) to the last
863     + * ciphertext block.
864     + */
865     + scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
866     + ivsize, 0);
867     +
868     /* Create and submit job descriptor*/
869     - init_ablkcipher_job(ctx->sh_desc_dec,
870     - ctx->sh_desc_dec_dma, edesc, req, iv_contig);
871     + init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req);
872     desc = edesc->hw_desc;
873     #ifdef DEBUG
874     print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
875     @@ -1653,8 +1639,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req)
876     */
877     static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
878     struct skcipher_givcrypt_request *greq,
879     - int desc_bytes,
880     - bool *iv_contig_out)
881     + int desc_bytes)
882     {
883     struct ablkcipher_request *req = &greq->creq;
884     struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
885     @@ -1664,8 +1649,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
886     GFP_KERNEL : GFP_ATOMIC;
887     int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
888     struct ablkcipher_edesc *edesc;
889     - dma_addr_t iv_dma = 0;
890     - bool out_contig;
891     + dma_addr_t iv_dma;
892     + u8 *iv;
893     int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
894     int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
895    
896     @@ -1710,36 +1695,20 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
897     }
898     }
899    
900     - /*
901     - * Check if iv can be contiguous with source and destination.
902     - * If so, include it. If not, create scatterlist.
903     - */
904     - iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
905     - if (dma_mapping_error(jrdev, iv_dma)) {
906     - dev_err(jrdev, "unable to map IV\n");
907     - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
908     - 0, 0, 0);
909     - return ERR_PTR(-ENOMEM);
910     - }
911     -
912     sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
913     dst_sg_idx = sec4_sg_ents;
914     - if (mapped_dst_nents == 1 &&
915     - iv_dma + ivsize == sg_dma_address(req->dst)) {
916     - out_contig = true;
917     - } else {
918     - out_contig = false;
919     - sec4_sg_ents += 1 + mapped_dst_nents;
920     - }
921     + sec4_sg_ents += 1 + mapped_dst_nents;
922    
923     - /* allocate space for base edesc and hw desc commands, link tables */
924     + /*
925     + * allocate space for base edesc and hw desc commands, link tables, IV
926     + */
927     sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
928     - edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
929     + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
930     GFP_DMA | flags);
931     if (!edesc) {
932     dev_err(jrdev, "could not allocate extended descriptor\n");
933     - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
934     - iv_dma, ivsize, 0, 0);
935     + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
936     + 0, DMA_NONE, 0, 0);
937     return ERR_PTR(-ENOMEM);
938     }
939    
940     @@ -1748,24 +1717,33 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
941     edesc->sec4_sg_bytes = sec4_sg_bytes;
942     edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
943     desc_bytes;
944     + edesc->iv_dir = DMA_FROM_DEVICE;
945     +
946     + /* Make sure IV is located in a DMAable area */
947     + iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
948     + iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE);
949     + if (dma_mapping_error(jrdev, iv_dma)) {
950     + dev_err(jrdev, "unable to map IV\n");
951     + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
952     + 0, DMA_NONE, 0, 0);
953     + kfree(edesc);
954     + return ERR_PTR(-ENOMEM);
955     + }
956    
957     if (mapped_src_nents > 1)
958     sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
959     0);
960    
961     - if (!out_contig) {
962     - dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx,
963     - iv_dma, ivsize, 0);
964     - sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
965     - edesc->sec4_sg + dst_sg_idx + 1, 0);
966     - }
967     + dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0);
968     + sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg +
969     + dst_sg_idx + 1, 0);
970    
971     edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
972     sec4_sg_bytes, DMA_TO_DEVICE);
973     if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
974     dev_err(jrdev, "unable to map S/G table\n");
975     caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
976     - iv_dma, ivsize, 0, 0);
977     + iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
978     kfree(edesc);
979     return ERR_PTR(-ENOMEM);
980     }
981     @@ -1778,7 +1756,6 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
982     sec4_sg_bytes, 1);
983     #endif
984    
985     - *iv_contig_out = out_contig;
986     return edesc;
987     }
988    
989     @@ -1789,19 +1766,17 @@ static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
990     struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
991     struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
992     struct device *jrdev = ctx->jrdev;
993     - bool iv_contig = false;
994     u32 *desc;
995     int ret = 0;
996    
997     /* allocate extended descriptor */
998     - edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
999     - CAAM_CMD_SZ, &iv_contig);
1000     + edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
1001     if (IS_ERR(edesc))
1002     return PTR_ERR(edesc);
1003    
1004     /* Create and submit job descriptor*/
1005     init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
1006     - edesc, req, iv_contig);
1007     + edesc, req);
1008     #ifdef DEBUG
1009     print_hex_dump(KERN_ERR,
1010     "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
1011     diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
1012     index b648e31673f9..e7966e37a5aa 100644
1013     --- a/drivers/crypto/caam/caamalg_qi.c
1014     +++ b/drivers/crypto/caam/caamalg_qi.c
1015     @@ -401,7 +401,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1016     * @assoclen: associated data length, in CAAM endianness
1017     * @assoclen_dma: bus physical mapped address of req->assoclen
1018     * @drv_req: driver-specific request structure
1019     - * @sgt: the h/w link table
1020     + * @sgt: the h/w link table, followed by IV
1021     */
1022     struct aead_edesc {
1023     int src_nents;
1024     @@ -412,9 +412,6 @@ struct aead_edesc {
1025     unsigned int assoclen;
1026     dma_addr_t assoclen_dma;
1027     struct caam_drv_req drv_req;
1028     -#define CAAM_QI_MAX_AEAD_SG \
1029     - ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
1030     - sizeof(struct qm_sg_entry))
1031     struct qm_sg_entry sgt[0];
1032     };
1033    
1034     @@ -426,7 +423,7 @@ struct aead_edesc {
1035     * @qm_sg_bytes: length of dma mapped h/w link table
1036     * @qm_sg_dma: bus physical mapped address of h/w link table
1037     * @drv_req: driver-specific request structure
1038     - * @sgt: the h/w link table
1039     + * @sgt: the h/w link table, followed by IV
1040     */
1041     struct ablkcipher_edesc {
1042     int src_nents;
1043     @@ -435,9 +432,6 @@ struct ablkcipher_edesc {
1044     int qm_sg_bytes;
1045     dma_addr_t qm_sg_dma;
1046     struct caam_drv_req drv_req;
1047     -#define CAAM_QI_MAX_ABLKCIPHER_SG \
1048     - ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
1049     - sizeof(struct qm_sg_entry))
1050     struct qm_sg_entry sgt[0];
1051     };
1052    
1053     @@ -649,17 +643,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1054     }
1055     }
1056    
1057     - if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
1058     + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
1059     ivsize = crypto_aead_ivsize(aead);
1060     - iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
1061     - if (dma_mapping_error(qidev, iv_dma)) {
1062     - dev_err(qidev, "unable to map IV\n");
1063     - caam_unmap(qidev, req->src, req->dst, src_nents,
1064     - dst_nents, 0, 0, op_type, 0, 0);
1065     - qi_cache_free(edesc);
1066     - return ERR_PTR(-ENOMEM);
1067     - }
1068     - }
1069    
1070     /*
1071     * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
1072     @@ -667,16 +652,33 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1073     */
1074     qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
1075     (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
1076     - if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
1077     - dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
1078     - qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
1079     - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1080     - iv_dma, ivsize, op_type, 0, 0);
1081     + sg_table = &edesc->sgt[0];
1082     + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
1083     + if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
1084     + CAAM_QI_MEMCACHE_SIZE)) {
1085     + dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1086     + qm_sg_ents, ivsize);
1087     + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1088     + 0, 0, 0, 0);
1089     qi_cache_free(edesc);
1090     return ERR_PTR(-ENOMEM);
1091     }
1092     - sg_table = &edesc->sgt[0];
1093     - qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
1094     +
1095     + if (ivsize) {
1096     + u8 *iv = (u8 *)(sg_table + qm_sg_ents);
1097     +
1098     + /* Make sure IV is located in a DMAable area */
1099     + memcpy(iv, req->iv, ivsize);
1100     +
1101     + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1102     + if (dma_mapping_error(qidev, iv_dma)) {
1103     + dev_err(qidev, "unable to map IV\n");
1104     + caam_unmap(qidev, req->src, req->dst, src_nents,
1105     + dst_nents, 0, 0, 0, 0, 0);
1106     + qi_cache_free(edesc);
1107     + return ERR_PTR(-ENOMEM);
1108     + }
1109     + }
1110    
1111     edesc->src_nents = src_nents;
1112     edesc->dst_nents = dst_nents;
1113     @@ -813,15 +815,27 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
1114     #endif
1115    
1116     ablkcipher_unmap(qidev, edesc, req);
1117     - qi_cache_free(edesc);
1118     +
1119     + /* In case initial IV was generated, copy it in GIVCIPHER request */
1120     + if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
1121     + u8 *iv;
1122     + struct skcipher_givcrypt_request *greq;
1123     +
1124     + greq = container_of(req, struct skcipher_givcrypt_request,
1125     + creq);
1126     + iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
1127     + memcpy(greq->giv, iv, ivsize);
1128     + }
1129    
1130     /*
1131     * The crypto API expects us to set the IV (req->info) to the last
1132     * ciphertext block. This is used e.g. by the CTS mode.
1133     */
1134     - scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
1135     - ivsize, 0);
1136     + if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
1137     + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
1138     + ivsize, ivsize, 0);
1139    
1140     + qi_cache_free(edesc);
1141     ablkcipher_request_complete(req, status);
1142     }
1143    
1144     @@ -836,9 +850,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1145     int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1146     struct ablkcipher_edesc *edesc;
1147     dma_addr_t iv_dma;
1148     - bool in_contig;
1149     + u8 *iv;
1150     int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1151     - int dst_sg_idx, qm_sg_ents;
1152     + int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1153     struct qm_sg_entry *sg_table, *fd_sgt;
1154     struct caam_drv_ctx *drv_ctx;
1155     enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
1156     @@ -885,55 +899,53 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1157     }
1158     }
1159    
1160     - iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
1161     - if (dma_mapping_error(qidev, iv_dma)) {
1162     - dev_err(qidev, "unable to map IV\n");
1163     - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1164     - 0, 0, 0, 0);
1165     - return ERR_PTR(-ENOMEM);
1166     - }
1167     -
1168     - if (mapped_src_nents == 1 &&
1169     - iv_dma + ivsize == sg_dma_address(req->src)) {
1170     - in_contig = true;
1171     - qm_sg_ents = 0;
1172     - } else {
1173     - in_contig = false;
1174     - qm_sg_ents = 1 + mapped_src_nents;
1175     - }
1176     + qm_sg_ents = 1 + mapped_src_nents;
1177     dst_sg_idx = qm_sg_ents;
1178    
1179     qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1180     - if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
1181     - dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
1182     - qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
1183     - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1184     - iv_dma, ivsize, op_type, 0, 0);
1185     + qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1186     + if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
1187     + ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1188     + dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1189     + qm_sg_ents, ivsize);
1190     + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1191     + 0, 0, 0, 0);
1192     return ERR_PTR(-ENOMEM);
1193     }
1194    
1195     - /* allocate space for base edesc and link tables */
1196     + /* allocate space for base edesc, link tables and IV */
1197     edesc = qi_cache_alloc(GFP_DMA | flags);
1198     if (unlikely(!edesc)) {
1199     dev_err(qidev, "could not allocate extended descriptor\n");
1200     - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1201     - iv_dma, ivsize, op_type, 0, 0);
1202     + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1203     + 0, 0, 0, 0);
1204     + return ERR_PTR(-ENOMEM);
1205     + }
1206     +
1207     + /* Make sure IV is located in a DMAable area */
1208     + sg_table = &edesc->sgt[0];
1209     + iv = (u8 *)(sg_table + qm_sg_ents);
1210     + memcpy(iv, req->info, ivsize);
1211     +
1212     + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1213     + if (dma_mapping_error(qidev, iv_dma)) {
1214     + dev_err(qidev, "unable to map IV\n");
1215     + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1216     + 0, 0, 0, 0);
1217     + qi_cache_free(edesc);
1218     return ERR_PTR(-ENOMEM);
1219     }
1220    
1221     edesc->src_nents = src_nents;
1222     edesc->dst_nents = dst_nents;
1223     edesc->iv_dma = iv_dma;
1224     - sg_table = &edesc->sgt[0];
1225     - edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
1226     + edesc->qm_sg_bytes = qm_sg_bytes;
1227     edesc->drv_req.app_ctx = req;
1228     edesc->drv_req.cbk = ablkcipher_done;
1229     edesc->drv_req.drv_ctx = drv_ctx;
1230    
1231     - if (!in_contig) {
1232     - dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1233     - sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1234     - }
1235     + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1236     + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1237    
1238     if (mapped_dst_nents > 1)
1239     sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1240     @@ -951,20 +963,12 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1241    
1242     fd_sgt = &edesc->drv_req.fd_sgt[0];
1243    
1244     - if (!in_contig)
1245     - dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
1246     - ivsize + req->nbytes, 0);
1247     - else
1248     - dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
1249     - 0);
1250     + dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
1251     + ivsize + req->nbytes, 0);
1252    
1253     if (req->src == req->dst) {
1254     - if (!in_contig)
1255     - dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
1256     - sizeof(*sg_table), req->nbytes, 0);
1257     - else
1258     - dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
1259     - req->nbytes, 0);
1260     + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
1261     + sizeof(*sg_table), req->nbytes, 0);
1262     } else if (mapped_dst_nents > 1) {
1263     dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1264     sizeof(*sg_table), req->nbytes, 0);
1265     @@ -988,10 +992,10 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1266     int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
1267     struct ablkcipher_edesc *edesc;
1268     dma_addr_t iv_dma;
1269     - bool out_contig;
1270     + u8 *iv;
1271     int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1272     struct qm_sg_entry *sg_table, *fd_sgt;
1273     - int dst_sg_idx, qm_sg_ents;
1274     + int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1275     struct caam_drv_ctx *drv_ctx;
1276    
1277     drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
1278     @@ -1039,46 +1043,45 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1279     mapped_dst_nents = src_nents;
1280     }
1281    
1282     - iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
1283     - if (dma_mapping_error(qidev, iv_dma)) {
1284     - dev_err(qidev, "unable to map IV\n");
1285     - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1286     - 0, 0, 0, 0);
1287     - return ERR_PTR(-ENOMEM);
1288     - }
1289     -
1290     qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
1291     dst_sg_idx = qm_sg_ents;
1292     - if (mapped_dst_nents == 1 &&
1293     - iv_dma + ivsize == sg_dma_address(req->dst)) {
1294     - out_contig = true;
1295     - } else {
1296     - out_contig = false;
1297     - qm_sg_ents += 1 + mapped_dst_nents;
1298     - }
1299    
1300     - if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
1301     - dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
1302     - qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
1303     - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1304     - iv_dma, ivsize, GIVENCRYPT, 0, 0);
1305     + qm_sg_ents += 1 + mapped_dst_nents;
1306     + qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1307     + if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
1308     + ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1309     + dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1310     + qm_sg_ents, ivsize);
1311     + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1312     + 0, 0, 0, 0);
1313     return ERR_PTR(-ENOMEM);
1314     }
1315    
1316     - /* allocate space for base edesc and link tables */
1317     + /* allocate space for base edesc, link tables and IV */
1318     edesc = qi_cache_alloc(GFP_DMA | flags);
1319     if (!edesc) {
1320     dev_err(qidev, "could not allocate extended descriptor\n");
1321     - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1322     - iv_dma, ivsize, GIVENCRYPT, 0, 0);
1323     + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1324     + 0, 0, 0, 0);
1325     + return ERR_PTR(-ENOMEM);
1326     + }
1327     +
1328     + /* Make sure IV is located in a DMAable area */
1329     + sg_table = &edesc->sgt[0];
1330     + iv = (u8 *)(sg_table + qm_sg_ents);
1331     + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
1332     + if (dma_mapping_error(qidev, iv_dma)) {
1333     + dev_err(qidev, "unable to map IV\n");
1334     + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1335     + 0, 0, 0, 0);
1336     + qi_cache_free(edesc);
1337     return ERR_PTR(-ENOMEM);
1338     }
1339    
1340     edesc->src_nents = src_nents;
1341     edesc->dst_nents = dst_nents;
1342     edesc->iv_dma = iv_dma;
1343     - sg_table = &edesc->sgt[0];
1344     - edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
1345     + edesc->qm_sg_bytes = qm_sg_bytes;
1346     edesc->drv_req.app_ctx = req;
1347     edesc->drv_req.cbk = ablkcipher_done;
1348     edesc->drv_req.drv_ctx = drv_ctx;
1349     @@ -1086,11 +1089,9 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1350     if (mapped_src_nents > 1)
1351     sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
1352    
1353     - if (!out_contig) {
1354     - dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
1355     - sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1356     - dst_sg_idx + 1, 0);
1357     - }
1358     + dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
1359     + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
1360     + 0);
1361    
1362     edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1363     DMA_TO_DEVICE);
1364     @@ -1111,13 +1112,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1365     dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
1366     req->nbytes, 0);
1367    
1368     - if (!out_contig)
1369     - dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1370     - sizeof(*sg_table), ivsize + req->nbytes,
1371     - 0);
1372     - else
1373     - dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
1374     - ivsize + req->nbytes, 0);
1375     + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1376     + sizeof(*sg_table), ivsize + req->nbytes, 0);
1377    
1378     return edesc;
1379     }
1380     @@ -1127,6 +1123,7 @@ static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
1381     struct ablkcipher_edesc *edesc;
1382     struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1383     struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1384     + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1385     int ret;
1386    
1387     if (unlikely(caam_congested))
1388     @@ -1137,6 +1134,14 @@ static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
1389     if (IS_ERR(edesc))
1390     return PTR_ERR(edesc);
1391    
1392     + /*
1393     + * The crypto API expects us to set the IV (req->info) to the last
1394     + * ciphertext block.
1395     + */
1396     + if (!encrypt)
1397     + scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
1398     + ivsize, ivsize, 0);
1399     +
1400     ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1401     if (!ret) {
1402     ret = -EINPROGRESS;
1403     diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
1404     index 7a897209f181..7ff4a25440ac 100644
1405     --- a/drivers/crypto/caam/caampkc.c
1406     +++ b/drivers/crypto/caam/caampkc.c
1407     @@ -66,7 +66,7 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
1408     struct caam_rsa_key *key = &ctx->key;
1409     struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
1410     size_t p_sz = key->p_sz;
1411     - size_t q_sz = key->p_sz;
1412     + size_t q_sz = key->q_sz;
1413    
1414     dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
1415     dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
1416     @@ -83,7 +83,7 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
1417     struct caam_rsa_key *key = &ctx->key;
1418     struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
1419     size_t p_sz = key->p_sz;
1420     - size_t q_sz = key->p_sz;
1421     + size_t q_sz = key->q_sz;
1422    
1423     dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
1424     dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
1425     @@ -166,18 +166,71 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
1426     akcipher_request_complete(req, err);
1427     }
1428    
1429     +static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
1430     + unsigned int nbytes,
1431     + unsigned int flags)
1432     +{
1433     + struct sg_mapping_iter miter;
1434     + int lzeros, ents;
1435     + unsigned int len;
1436     + unsigned int tbytes = nbytes;
1437     + const u8 *buff;
1438     +
1439     + ents = sg_nents_for_len(sgl, nbytes);
1440     + if (ents < 0)
1441     + return ents;
1442     +
1443     + sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
1444     +
1445     + lzeros = 0;
1446     + len = 0;
1447     + while (nbytes > 0) {
1448     + while (len && !*buff) {
1449     + lzeros++;
1450     + len--;
1451     + buff++;
1452     + }
1453     +
1454     + if (len && *buff)
1455     + break;
1456     +
1457     + sg_miter_next(&miter);
1458     + buff = miter.addr;
1459     + len = miter.length;
1460     +
1461     + nbytes -= lzeros;
1462     + lzeros = 0;
1463     + }
1464     +
1465     + miter.consumed = lzeros;
1466     + sg_miter_stop(&miter);
1467     + nbytes -= lzeros;
1468     +
1469     + return tbytes - nbytes;
1470     +}
1471     +
1472     static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
1473     size_t desclen)
1474     {
1475     struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
1476     struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1477     struct device *dev = ctx->dev;
1478     + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
1479     struct rsa_edesc *edesc;
1480     gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1481     GFP_KERNEL : GFP_ATOMIC;
1482     + int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
1483     int sgc;
1484     int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1485     int src_nents, dst_nents;
1486     + int lzeros;
1487     +
1488     + lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
1489     + if (lzeros < 0)
1490     + return ERR_PTR(lzeros);
1491     +
1492     + req->src_len -= lzeros;
1493     + req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
1494    
1495     src_nents = sg_nents_for_len(req->src, req->src_len);
1496     dst_nents = sg_nents_for_len(req->dst, req->dst_len);
1497     @@ -344,7 +397,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
1498     struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
1499     int sec4_sg_index = 0;
1500     size_t p_sz = key->p_sz;
1501     - size_t q_sz = key->p_sz;
1502     + size_t q_sz = key->q_sz;
1503    
1504     pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
1505     if (dma_mapping_error(dev, pdb->d_dma)) {
1506     @@ -419,7 +472,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
1507     struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
1508     int sec4_sg_index = 0;
1509     size_t p_sz = key->p_sz;
1510     - size_t q_sz = key->p_sz;
1511     + size_t q_sz = key->q_sz;
1512    
1513     pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
1514     if (dma_mapping_error(dev, pdb->p_dma)) {
1515     @@ -953,6 +1006,7 @@ static struct akcipher_alg caam_rsa = {
1516     .max_size = caam_rsa_max_size,
1517     .init = caam_rsa_init_tfm,
1518     .exit = caam_rsa_exit_tfm,
1519     + .reqsize = sizeof(struct caam_rsa_req_ctx),
1520     .base = {
1521     .cra_name = "rsa",
1522     .cra_driver_name = "rsa-caam",
1523     diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
1524     index fd145c46eae1..82645bcf8b27 100644
1525     --- a/drivers/crypto/caam/caampkc.h
1526     +++ b/drivers/crypto/caam/caampkc.h
1527     @@ -95,6 +95,14 @@ struct caam_rsa_ctx {
1528     struct device *dev;
1529     };
1530    
1531     +/**
1532     + * caam_rsa_req_ctx - per request context.
1533     + * @src: input scatterlist (stripped of leading zeros)
1534     + */
1535     +struct caam_rsa_req_ctx {
1536     + struct scatterlist src[2];
1537     +};
1538     +
1539     /**
1540     * rsa_edesc - s/w-extended rsa descriptor
1541     * @src_nents : number of segments in input scatterlist
1542     diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h
1543     index dc451e0a43c5..58fb3ed6e644 100644
1544     --- a/drivers/crypto/cavium/zip/common.h
1545     +++ b/drivers/crypto/cavium/zip/common.h
1546     @@ -46,8 +46,10 @@
1547     #ifndef __COMMON_H__
1548     #define __COMMON_H__
1549    
1550     +#include <linux/delay.h>
1551     #include <linux/init.h>
1552     #include <linux/interrupt.h>
1553     +#include <linux/io.h>
1554     #include <linux/kernel.h>
1555     #include <linux/module.h>
1556     #include <linux/pci.h>
1557     @@ -149,6 +151,25 @@ struct zip_operation {
1558     u32 sizeofzops;
1559     };
1560    
1561     +static inline int zip_poll_result(union zip_zres_s *result)
1562     +{
1563     + int retries = 1000;
1564     +
1565     + while (!result->s.compcode) {
1566     + if (!--retries) {
1567     + pr_err("ZIP ERR: request timed out");
1568     + return -ETIMEDOUT;
1569     + }
1570     + udelay(10);
1571     + /*
1572     + * Force re-reading of compcode which is updated
1573     + * by the ZIP coprocessor.
1574     + */
1575     + rmb();
1576     + }
1577     + return 0;
1578     +}
1579     +
1580     /* error messages */
1581     #define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \
1582     fmt "\n", __func__, __LINE__, ## args)
1583     diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
1584     index 8df4d26cf9d4..b92b6e7e100f 100644
1585     --- a/drivers/crypto/cavium/zip/zip_crypto.c
1586     +++ b/drivers/crypto/cavium/zip/zip_crypto.c
1587     @@ -124,7 +124,7 @@ int zip_compress(const u8 *src, unsigned int slen,
1588     struct zip_kernel_ctx *zip_ctx)
1589     {
1590     struct zip_operation *zip_ops = NULL;
1591     - struct zip_state zip_state;
1592     + struct zip_state *zip_state;
1593     struct zip_device *zip = NULL;
1594     int ret;
1595    
1596     @@ -135,20 +135,23 @@ int zip_compress(const u8 *src, unsigned int slen,
1597     if (!zip)
1598     return -ENODEV;
1599    
1600     - memset(&zip_state, 0, sizeof(struct zip_state));
1601     + zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
1602     + if (!zip_state)
1603     + return -ENOMEM;
1604     +
1605     zip_ops = &zip_ctx->zip_comp;
1606    
1607     zip_ops->input_len = slen;
1608     zip_ops->output_len = *dlen;
1609     memcpy(zip_ops->input, src, slen);
1610    
1611     - ret = zip_deflate(zip_ops, &zip_state, zip);
1612     + ret = zip_deflate(zip_ops, zip_state, zip);
1613    
1614     if (!ret) {
1615     *dlen = zip_ops->output_len;
1616     memcpy(dst, zip_ops->output, *dlen);
1617     }
1618     -
1619     + kfree(zip_state);
1620     return ret;
1621     }
1622    
1623     @@ -157,7 +160,7 @@ int zip_decompress(const u8 *src, unsigned int slen,
1624     struct zip_kernel_ctx *zip_ctx)
1625     {
1626     struct zip_operation *zip_ops = NULL;
1627     - struct zip_state zip_state;
1628     + struct zip_state *zip_state;
1629     struct zip_device *zip = NULL;
1630     int ret;
1631    
1632     @@ -168,7 +171,10 @@ int zip_decompress(const u8 *src, unsigned int slen,
1633     if (!zip)
1634     return -ENODEV;
1635    
1636     - memset(&zip_state, 0, sizeof(struct zip_state));
1637     + zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
1638     + if (!zip_state)
1639     + return -ENOMEM;
1640     +
1641     zip_ops = &zip_ctx->zip_decomp;
1642     memcpy(zip_ops->input, src, slen);
1643    
1644     @@ -179,13 +185,13 @@ int zip_decompress(const u8 *src, unsigned int slen,
1645     zip_ops->input_len = slen;
1646     zip_ops->output_len = *dlen;
1647    
1648     - ret = zip_inflate(zip_ops, &zip_state, zip);
1649     + ret = zip_inflate(zip_ops, zip_state, zip);
1650    
1651     if (!ret) {
1652     *dlen = zip_ops->output_len;
1653     memcpy(dst, zip_ops->output, *dlen);
1654     }
1655     -
1656     + kfree(zip_state);
1657     return ret;
1658     }
1659    
1660     diff --git a/drivers/crypto/cavium/zip/zip_deflate.c b/drivers/crypto/cavium/zip/zip_deflate.c
1661     index 9a944b8c1e29..d7133f857d67 100644
1662     --- a/drivers/crypto/cavium/zip/zip_deflate.c
1663     +++ b/drivers/crypto/cavium/zip/zip_deflate.c
1664     @@ -129,8 +129,8 @@ int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
1665     /* Stats update for compression requests submitted */
1666     atomic64_inc(&zip_dev->stats.comp_req_submit);
1667    
1668     - while (!result_ptr->s.compcode)
1669     - continue;
1670     + /* Wait for completion or error */
1671     + zip_poll_result(result_ptr);
1672    
1673     /* Stats update for compression requests completed */
1674     atomic64_inc(&zip_dev->stats.comp_req_complete);
1675     diff --git a/drivers/crypto/cavium/zip/zip_inflate.c b/drivers/crypto/cavium/zip/zip_inflate.c
1676     index 50cbdd83dbf2..7e0d73e2f89e 100644
1677     --- a/drivers/crypto/cavium/zip/zip_inflate.c
1678     +++ b/drivers/crypto/cavium/zip/zip_inflate.c
1679     @@ -143,8 +143,8 @@ int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
1680     /* Decompression requests submitted stats update */
1681     atomic64_inc(&zip_dev->stats.decomp_req_submit);
1682    
1683     - while (!result_ptr->s.compcode)
1684     - continue;
1685     + /* Wait for completion or error */
1686     + zip_poll_result(result_ptr);
1687    
1688     /* Decompression requests completed stats update */
1689     atomic64_inc(&zip_dev->stats.decomp_req_complete);
1690     diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
1691     index c40ac30ec002..c1f8da958c78 100644
1692     --- a/drivers/crypto/omap-sham.c
1693     +++ b/drivers/crypto/omap-sham.c
1694     @@ -1082,7 +1082,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
1695    
1696     if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
1697     free_pages((unsigned long)sg_virt(ctx->sg),
1698     - get_order(ctx->sg->length));
1699     + get_order(ctx->sg->length + ctx->bufcnt));
1700    
1701     if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
1702     kfree(ctx->sg);
1703     diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
1704     index 96072b9b55c4..d7316f7a3a69 100644
1705     --- a/drivers/crypto/vmx/aes.c
1706     +++ b/drivers/crypto/vmx/aes.c
1707     @@ -48,8 +48,6 @@ static int p8_aes_init(struct crypto_tfm *tfm)
1708     alg, PTR_ERR(fallback));
1709     return PTR_ERR(fallback);
1710     }
1711     - printk(KERN_INFO "Using '%s' as fallback implementation.\n",
1712     - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
1713    
1714     crypto_cipher_set_flags(fallback,
1715     crypto_cipher_get_flags((struct
1716     diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
1717     index 7394d35d5936..5285ece4f33a 100644
1718     --- a/drivers/crypto/vmx/aes_cbc.c
1719     +++ b/drivers/crypto/vmx/aes_cbc.c
1720     @@ -52,9 +52,6 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm)
1721     alg, PTR_ERR(fallback));
1722     return PTR_ERR(fallback);
1723     }
1724     - printk(KERN_INFO "Using '%s' as fallback implementation.\n",
1725     - crypto_skcipher_driver_name(fallback));
1726     -
1727    
1728     crypto_skcipher_set_flags(
1729     fallback,
1730     diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
1731     index 17d84217dd76..02ba5f2aa0e6 100644
1732     --- a/drivers/crypto/vmx/aes_ctr.c
1733     +++ b/drivers/crypto/vmx/aes_ctr.c
1734     @@ -48,8 +48,6 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
1735     alg, PTR_ERR(fallback));
1736     return PTR_ERR(fallback);
1737     }
1738     - printk(KERN_INFO "Using '%s' as fallback implementation.\n",
1739     - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
1740    
1741     crypto_blkcipher_set_flags(
1742     fallback,
1743     diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
1744     index 8cd6e62e4c90..8bd9aff0f55f 100644
1745     --- a/drivers/crypto/vmx/aes_xts.c
1746     +++ b/drivers/crypto/vmx/aes_xts.c
1747     @@ -53,8 +53,6 @@ static int p8_aes_xts_init(struct crypto_tfm *tfm)
1748     alg, PTR_ERR(fallback));
1749     return PTR_ERR(fallback);
1750     }
1751     - printk(KERN_INFO "Using '%s' as fallback implementation.\n",
1752     - crypto_skcipher_driver_name(fallback));
1753    
1754     crypto_skcipher_set_flags(
1755     fallback,
1756     diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
1757     index 27a94a119009..1c4b5b889fba 100644
1758     --- a/drivers/crypto/vmx/ghash.c
1759     +++ b/drivers/crypto/vmx/ghash.c
1760     @@ -64,8 +64,6 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
1761     alg, PTR_ERR(fallback));
1762     return PTR_ERR(fallback);
1763     }
1764     - printk(KERN_INFO "Using '%s' as fallback implementation.\n",
1765     - crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback)));
1766    
1767     crypto_shash_set_flags(fallback,
1768     crypto_shash_get_flags((struct crypto_shash
1769     diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
1770     index 68ea6e712bf9..7e0bfd7347f6 100644
1771     --- a/drivers/gpio/gpiolib.c
1772     +++ b/drivers/gpio/gpiolib.c
1773     @@ -3313,6 +3313,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
1774     struct gpio_desc *desc = NULL;
1775     int status;
1776     enum gpio_lookup_flags lookupflags = 0;
1777     + /* Maybe we have a device name, maybe not */
1778     + const char *devname = dev ? dev_name(dev) : "?";
1779    
1780     dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id);
1781    
1782     @@ -3341,8 +3343,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
1783     return desc;
1784     }
1785    
1786     - /* If a connection label was passed use that, else use the device name as label */
1787     - status = gpiod_request(desc, con_id ? con_id : dev_name(dev));
1788     + /*
1789     + * If a connection label was passed use that, else attempt to use
1790     + * the device name as label
1791     + */
1792     + status = gpiod_request(desc, con_id ? con_id : devname);
1793     if (status < 0)
1794     return ERR_PTR(status);
1795    
1796     diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
1797     index d88d3e0f59fb..466cef930bf1 100644
1798     --- a/drivers/input/joystick/xpad.c
1799     +++ b/drivers/input/joystick/xpad.c
1800     @@ -126,6 +126,7 @@ static const struct xpad_device {
1801     u8 mapping;
1802     u8 xtype;
1803     } xpad_device[] = {
1804     + { 0x0079, 0x18d4, "GPD Win 2 Controller", 0, XTYPE_XBOX360 },
1805     { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
1806     { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
1807     { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
1808     @@ -411,6 +412,7 @@ static const signed short xpad_abs_triggers[] = {
1809    
1810     static const struct usb_device_id xpad_table[] = {
1811     { USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */
1812     + XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 Controller */
1813     XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */
1814     XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
1815     XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */
1816     diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
1817     index d6135900da64..c4926645c779 100644
1818     --- a/drivers/input/mouse/elan_i2c_core.c
1819     +++ b/drivers/input/mouse/elan_i2c_core.c
1820     @@ -1260,6 +1260,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
1821     { "ELAN060B", 0 },
1822     { "ELAN060C", 0 },
1823     { "ELAN0611", 0 },
1824     + { "ELAN0612", 0 },
1825     { "ELAN1000", 0 },
1826     { }
1827     };
1828     diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
1829     index 5dafafad6351..2bfa89ec552c 100644
1830     --- a/drivers/input/touchscreen/goodix.c
1831     +++ b/drivers/input/touchscreen/goodix.c
1832     @@ -888,6 +888,7 @@ MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
1833     #ifdef CONFIG_ACPI
1834     static const struct acpi_device_id goodix_acpi_match[] = {
1835     { "GDIX1001", 0 },
1836     + { "GDIX1002", 0 },
1837     { }
1838     };
1839     MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
1840     diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
1841     index 9047c0a529b2..efd733472a35 100644
1842     --- a/drivers/misc/vmw_balloon.c
1843     +++ b/drivers/misc/vmw_balloon.c
1844     @@ -576,15 +576,9 @@ static void vmballoon_pop(struct vmballoon *b)
1845     }
1846     }
1847    
1848     - if (b->batch_page) {
1849     - vunmap(b->batch_page);
1850     - b->batch_page = NULL;
1851     - }
1852     -
1853     - if (b->page) {
1854     - __free_page(b->page);
1855     - b->page = NULL;
1856     - }
1857     + /* Clearing the batch_page unconditionally has no adverse effect */
1858     + free_page((unsigned long)b->batch_page);
1859     + b->batch_page = NULL;
1860     }
1861    
1862     /*
1863     @@ -991,16 +985,13 @@ static const struct vmballoon_ops vmballoon_batched_ops = {
1864    
1865     static bool vmballoon_init_batching(struct vmballoon *b)
1866     {
1867     - b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP);
1868     - if (!b->page)
1869     - return false;
1870     + struct page *page;
1871    
1872     - b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL);
1873     - if (!b->batch_page) {
1874     - __free_page(b->page);
1875     + page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1876     + if (!page)
1877     return false;
1878     - }
1879    
1880     + b->batch_page = page_address(page);
1881     return true;
1882     }
1883    
1884     diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
1885     index e153e8b64bb8..d5553c47014f 100644
1886     --- a/drivers/nfc/pn533/usb.c
1887     +++ b/drivers/nfc/pn533/usb.c
1888     @@ -62,6 +62,9 @@ struct pn533_usb_phy {
1889     struct urb *out_urb;
1890     struct urb *in_urb;
1891    
1892     + struct urb *ack_urb;
1893     + u8 *ack_buffer;
1894     +
1895     struct pn533 *priv;
1896     };
1897    
1898     @@ -150,13 +153,16 @@ static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags)
1899     struct pn533_usb_phy *phy = dev->phy;
1900     static const u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00};
1901     /* spec 7.1.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */
1902     - int rc;
1903    
1904     - phy->out_urb->transfer_buffer = (u8 *)ack;
1905     - phy->out_urb->transfer_buffer_length = sizeof(ack);
1906     - rc = usb_submit_urb(phy->out_urb, flags);
1907     + if (!phy->ack_buffer) {
1908     + phy->ack_buffer = kmemdup(ack, sizeof(ack), flags);
1909     + if (!phy->ack_buffer)
1910     + return -ENOMEM;
1911     + }
1912    
1913     - return rc;
1914     + phy->ack_urb->transfer_buffer = phy->ack_buffer;
1915     + phy->ack_urb->transfer_buffer_length = sizeof(ack);
1916     + return usb_submit_urb(phy->ack_urb, flags);
1917     }
1918    
1919     static int pn533_usb_send_frame(struct pn533 *dev,
1920     @@ -375,26 +381,31 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
1921     /* Power on th reader (CCID cmd) */
1922     u8 cmd[10] = {PN533_ACR122_PC_TO_RDR_ICCPOWERON,
1923     0, 0, 0, 0, 0, 0, 3, 0, 0};
1924     + char *buffer;
1925     + int transferred;
1926     int rc;
1927     void *cntx;
1928     struct pn533_acr122_poweron_rdr_arg arg;
1929    
1930     dev_dbg(&phy->udev->dev, "%s\n", __func__);
1931    
1932     + buffer = kmemdup(cmd, sizeof(cmd), GFP_KERNEL);
1933     + if (!buffer)
1934     + return -ENOMEM;
1935     +
1936     init_completion(&arg.done);
1937     cntx = phy->in_urb->context; /* backup context */
1938    
1939     phy->in_urb->complete = pn533_acr122_poweron_rdr_resp;
1940     phy->in_urb->context = &arg;
1941    
1942     - phy->out_urb->transfer_buffer = cmd;
1943     - phy->out_urb->transfer_buffer_length = sizeof(cmd);
1944     -
1945     print_hex_dump_debug("ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1,
1946     cmd, sizeof(cmd), false);
1947    
1948     - rc = usb_submit_urb(phy->out_urb, GFP_KERNEL);
1949     - if (rc) {
1950     + rc = usb_bulk_msg(phy->udev, phy->out_urb->pipe, buffer, sizeof(cmd),
1951     + &transferred, 0);
1952     + kfree(buffer);
1953     + if (rc || (transferred != sizeof(cmd))) {
1954     nfc_err(&phy->udev->dev,
1955     "Reader power on cmd error %d\n", rc);
1956     return rc;
1957     @@ -490,8 +501,9 @@ static int pn533_usb_probe(struct usb_interface *interface,
1958    
1959     phy->in_urb = usb_alloc_urb(0, GFP_KERNEL);
1960     phy->out_urb = usb_alloc_urb(0, GFP_KERNEL);
1961     + phy->ack_urb = usb_alloc_urb(0, GFP_KERNEL);
1962    
1963     - if (!phy->in_urb || !phy->out_urb)
1964     + if (!phy->in_urb || !phy->out_urb || !phy->ack_urb)
1965     goto error;
1966    
1967     usb_fill_bulk_urb(phy->in_urb, phy->udev,
1968     @@ -501,7 +513,9 @@ static int pn533_usb_probe(struct usb_interface *interface,
1969     usb_fill_bulk_urb(phy->out_urb, phy->udev,
1970     usb_sndbulkpipe(phy->udev, out_endpoint),
1971     NULL, 0, pn533_send_complete, phy);
1972     -
1973     + usb_fill_bulk_urb(phy->ack_urb, phy->udev,
1974     + usb_sndbulkpipe(phy->udev, out_endpoint),
1975     + NULL, 0, pn533_send_complete, phy);
1976    
1977     switch (id->driver_info) {
1978     case PN533_DEVICE_STD:
1979     @@ -554,6 +568,7 @@ static int pn533_usb_probe(struct usb_interface *interface,
1980     error:
1981     usb_free_urb(phy->in_urb);
1982     usb_free_urb(phy->out_urb);
1983     + usb_free_urb(phy->ack_urb);
1984     usb_put_dev(phy->udev);
1985     kfree(in_buf);
1986    
1987     @@ -573,10 +588,13 @@ static void pn533_usb_disconnect(struct usb_interface *interface)
1988    
1989     usb_kill_urb(phy->in_urb);
1990     usb_kill_urb(phy->out_urb);
1991     + usb_kill_urb(phy->ack_urb);
1992    
1993     kfree(phy->in_urb->transfer_buffer);
1994     usb_free_urb(phy->in_urb);
1995     usb_free_urb(phy->out_urb);
1996     + usb_free_urb(phy->ack_urb);
1997     + kfree(phy->ack_buffer);
1998    
1999     nfc_info(&interface->dev, "NXP PN533 NFC device disconnected\n");
2000     }
2001     diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
2002     index 6c575244c0fb..af9b7005a2ba 100644
2003     --- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
2004     +++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
2005     @@ -178,6 +178,10 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy)
2006     struct device *dev = &qphy->phy->dev;
2007     u8 *val;
2008    
2009     + /* efuse register is optional */
2010     + if (!qphy->cell)
2011     + return;
2012     +
2013     /*
2014     * Read efuse register having TUNE2 parameter's high nibble.
2015     * If efuse register shows value as 0x0, or if we fail to find
2016     diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
2017     index 83dc3292e9ab..24cb666c9224 100644
2018     --- a/drivers/staging/android/ion/ion.c
2019     +++ b/drivers/staging/android/ion/ion.c
2020     @@ -131,8 +131,11 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
2021    
2022     void ion_buffer_destroy(struct ion_buffer *buffer)
2023     {
2024     - if (WARN_ON(buffer->kmap_cnt > 0))
2025     + if (buffer->kmap_cnt > 0) {
2026     + pr_warn_once("%s: buffer still mapped in the kernel\n",
2027     + __func__);
2028     buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
2029     + }
2030     buffer->heap->ops->free(buffer);
2031     kfree(buffer);
2032     }
2033     diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
2034     index 833771bca0a5..da04ba1ecf68 100644
2035     --- a/drivers/tty/serial/8250/8250_omap.c
2036     +++ b/drivers/tty/serial/8250/8250_omap.c
2037     @@ -1100,13 +1100,14 @@ static int omap8250_no_handle_irq(struct uart_port *port)
2038     return 0;
2039     }
2040    
2041     +static const u8 omap4_habit = UART_ERRATA_CLOCK_DISABLE;
2042     static const u8 am3352_habit = OMAP_DMA_TX_KICK | UART_ERRATA_CLOCK_DISABLE;
2043     static const u8 dra742_habit = UART_ERRATA_CLOCK_DISABLE;
2044    
2045     static const struct of_device_id omap8250_dt_ids[] = {
2046     { .compatible = "ti,omap2-uart" },
2047     { .compatible = "ti,omap3-uart" },
2048     - { .compatible = "ti,omap4-uart" },
2049     + { .compatible = "ti,omap4-uart", .data = &omap4_habit, },
2050     { .compatible = "ti,am3352-uart", .data = &am3352_habit, },
2051     { .compatible = "ti,am4372-uart", .data = &am3352_habit, },
2052     { .compatible = "ti,dra742-uart", .data = &dra742_habit, },
2053     @@ -1343,6 +1344,19 @@ static int omap8250_soft_reset(struct device *dev)
2054     int sysc;
2055     int syss;
2056    
2057     + /*
2058     + * At least on omap4, unused uarts may not idle after reset without
2059     + * a basic scr dma configuration even with no dma in use. The
2060     + * module clkctrl status bits will be 1 instead of 3 blocking idle
2061     + * for the whole clockdomain. The softreset below will clear scr,
2062     + * and we restore it on resume so this is safe to do on all SoCs
2063     + * needing omap8250_soft_reset() quirk. Do it in two writes as
2064     + * recommended in the comment for omap8250_update_scr().
2065     + */
2066     + serial_out(up, UART_OMAP_SCR, OMAP_UART_SCR_DMAMODE_1);
2067     + serial_out(up, UART_OMAP_SCR,
2068     + OMAP_UART_SCR_DMAMODE_1 | OMAP_UART_SCR_DMAMODE_CTL);
2069     +
2070     sysc = serial_in(up, UART_OMAP_SYSC);
2071    
2072     /* softreset the UART */
2073     diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
2074     index 111e6a950779..c9f701aca677 100644
2075     --- a/drivers/tty/serial/amba-pl011.c
2076     +++ b/drivers/tty/serial/amba-pl011.c
2077     @@ -1747,10 +1747,26 @@ static int pl011_allocate_irq(struct uart_amba_port *uap)
2078     */
2079     static void pl011_enable_interrupts(struct uart_amba_port *uap)
2080     {
2081     + unsigned int i;
2082     +
2083     spin_lock_irq(&uap->port.lock);
2084    
2085     /* Clear out any spuriously appearing RX interrupts */
2086     pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
2087     +
2088     + /*
2089     + * RXIS is asserted only when the RX FIFO transitions from below
2090     + * to above the trigger threshold. If the RX FIFO is already
2091     + * full to the threshold this can't happen and RXIS will now be
2092     + * stuck off. Drain the RX FIFO explicitly to fix this:
2093     + */
2094     + for (i = 0; i < uap->fifosize * 2; ++i) {
2095     + if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE)
2096     + break;
2097     +
2098     + pl011_read(uap, REG_DR);
2099     + }
2100     +
2101     uap->im = UART011_RTIM;
2102     if (!pl011_dma_rx_running(uap))
2103     uap->im |= UART011_RXIM;
2104     diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
2105     index a0b24bc09783..2286e9d73115 100644
2106     --- a/drivers/tty/serial/atmel_serial.c
2107     +++ b/drivers/tty/serial/atmel_serial.c
2108     @@ -1786,7 +1786,6 @@ static int atmel_startup(struct uart_port *port)
2109     {
2110     struct platform_device *pdev = to_platform_device(port->dev);
2111     struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2112     - struct tty_struct *tty = port->state->port.tty;
2113     int retval;
2114    
2115     /*
2116     @@ -1801,8 +1800,8 @@ static int atmel_startup(struct uart_port *port)
2117     * Allocate the IRQ
2118     */
2119     retval = request_irq(port->irq, atmel_interrupt,
2120     - IRQF_SHARED | IRQF_COND_SUSPEND,
2121     - tty ? tty->name : "atmel_serial", port);
2122     + IRQF_SHARED | IRQF_COND_SUSPEND,
2123     + dev_name(&pdev->dev), port);
2124     if (retval) {
2125     dev_err(port->dev, "atmel_startup - Can't get irq\n");
2126     return retval;
2127     diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
2128     index bedd4bdec4ab..57baa84ccf86 100644
2129     --- a/drivers/tty/serial/samsung.c
2130     +++ b/drivers/tty/serial/samsung.c
2131     @@ -865,15 +865,12 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
2132     dma->rx_conf.direction = DMA_DEV_TO_MEM;
2133     dma->rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
2134     dma->rx_conf.src_addr = p->port.mapbase + S3C2410_URXH;
2135     - dma->rx_conf.src_maxburst = 16;
2136     + dma->rx_conf.src_maxburst = 1;
2137    
2138     dma->tx_conf.direction = DMA_MEM_TO_DEV;
2139     dma->tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
2140     dma->tx_conf.dst_addr = p->port.mapbase + S3C2410_UTXH;
2141     - if (dma_get_cache_alignment() >= 16)
2142     - dma->tx_conf.dst_maxburst = 16;
2143     - else
2144     - dma->tx_conf.dst_maxburst = 1;
2145     + dma->tx_conf.dst_maxburst = 1;
2146    
2147     dma->rx_chan = dma_request_chan(p->port.dev, "rx");
2148    
2149     diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
2150     index 8a58ee32ff61..c6daa315ee4e 100644
2151     --- a/drivers/tty/serial/sh-sci.c
2152     +++ b/drivers/tty/serial/sh-sci.c
2153     @@ -2669,8 +2669,8 @@ static int sci_init_clocks(struct sci_port *sci_port, struct device *dev)
2154     dev_dbg(dev, "failed to get %s (%ld)\n", clk_names[i],
2155     PTR_ERR(clk));
2156     else
2157     - dev_dbg(dev, "clk %s is %pC rate %pCr\n", clk_names[i],
2158     - clk, clk);
2159     + dev_dbg(dev, "clk %s is %pC rate %lu\n", clk_names[i],
2160     + clk, clk_get_rate(clk));
2161     sci_port->clks[i] = IS_ERR(clk) ? NULL : clk;
2162     }
2163     return 0;
2164     diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
2165     index ea0da35a44e2..e6d4fa5eeff1 100644
2166     --- a/drivers/usb/gadget/function/f_printer.c
2167     +++ b/drivers/usb/gadget/function/f_printer.c
2168     @@ -635,19 +635,19 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
2169     return -EAGAIN;
2170     }
2171    
2172     + list_add(&req->list, &dev->tx_reqs_active);
2173     +
2174     /* here, we unlock, and only unlock, to avoid deadlock. */
2175     spin_unlock(&dev->lock);
2176     value = usb_ep_queue(dev->in_ep, req, GFP_ATOMIC);
2177     spin_lock(&dev->lock);
2178     if (value) {
2179     + list_del(&req->list);
2180     list_add(&req->list, &dev->tx_reqs);
2181     spin_unlock_irqrestore(&dev->lock, flags);
2182     mutex_unlock(&dev->lock_printer_io);
2183     return -EAGAIN;
2184     }
2185     -
2186     - list_add(&req->list, &dev->tx_reqs_active);
2187     -
2188     }
2189    
2190     spin_unlock_irqrestore(&dev->lock, flags);
2191     diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
2192     index 6b3e8adb64e6..4cfa72cb0a91 100644
2193     --- a/drivers/usb/gadget/udc/renesas_usb3.c
2194     +++ b/drivers/usb/gadget/udc/renesas_usb3.c
2195     @@ -623,6 +623,13 @@ static void usb3_disconnect(struct renesas_usb3 *usb3)
2196     usb3_usb2_pullup(usb3, 0);
2197     usb3_clear_bit(usb3, USB30_CON_B3_CONNECT, USB3_USB30_CON);
2198     usb3_reset_epc(usb3);
2199     + usb3_disable_irq_1(usb3, USB_INT_1_B2_RSUM | USB_INT_1_B3_PLLWKUP |
2200     + USB_INT_1_B3_LUPSUCS | USB_INT_1_B3_DISABLE |
2201     + USB_INT_1_SPEED | USB_INT_1_B3_WRMRST |
2202     + USB_INT_1_B3_HOTRST | USB_INT_1_B2_SPND |
2203     + USB_INT_1_B2_L1SPND | USB_INT_1_B2_USBRST);
2204     + usb3_clear_bit(usb3, USB_COM_CON_SPD_MODE, USB3_USB_COM_CON);
2205     + usb3_init_epc_registers(usb3);
2206    
2207     if (usb3->driver)
2208     usb3->driver->disconnect(&usb3->gadget);
2209     diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
2210     index 25a281f876b5..33a6d624c843 100644
2211     --- a/drivers/usb/storage/uas.c
2212     +++ b/drivers/usb/storage/uas.c
2213     @@ -836,6 +836,12 @@ static int uas_slave_configure(struct scsi_device *sdev)
2214     if (devinfo->flags & US_FL_BROKEN_FUA)
2215     sdev->broken_fua = 1;
2216    
2217     + /* UAS also needs to support FL_ALWAYS_SYNC */
2218     + if (devinfo->flags & US_FL_ALWAYS_SYNC) {
2219     + sdev->skip_ms_page_3f = 1;
2220     + sdev->skip_ms_page_8 = 1;
2221     + sdev->wce_default_on = 1;
2222     + }
2223     scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
2224     return 0;
2225     }
2226     diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
2227     index 52b3e6da0745..d100290628bd 100644
2228     --- a/drivers/usb/storage/unusual_devs.h
2229     +++ b/drivers/usb/storage/unusual_devs.h
2230     @@ -2340,6 +2340,15 @@ UNUSUAL_DEV( 0x4146, 0xba01, 0x0100, 0x0100,
2231     "Micro Mini 1GB",
2232     USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
2233    
2234     +/* "G-DRIVE" external HDD hangs on write without these.
2235     + * Patch submitted by Alexander Kappner <agk@godking.net>
2236     + */
2237     +UNUSUAL_DEV(0x4971, 0x8024, 0x0000, 0x9999,
2238     + "SimpleTech",
2239     + "External HDD",
2240     + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2241     + US_FL_ALWAYS_SYNC),
2242     +
2243     /*
2244     * Nick Bowler <nbowler@elliptictech.com>
2245     * SCSI stack spams (otherwise harmless) error messages.
2246     diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
2247     index 719ec68ae309..f15aa47c54a9 100644
2248     --- a/drivers/usb/storage/unusual_uas.h
2249     +++ b/drivers/usb/storage/unusual_uas.h
2250     @@ -183,3 +183,12 @@ UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999,
2251     "External HDD",
2252     USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2253     US_FL_NO_REPORT_OPCODES),
2254     +
2255     +/* "G-DRIVE" external HDD hangs on write without these.
2256     + * Patch submitted by Alexander Kappner <agk@godking.net>
2257     + */
2258     +UNUSUAL_DEV(0x4971, 0x8024, 0x0000, 0x9999,
2259     + "SimpleTech",
2260     + "External HDD",
2261     + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2262     + US_FL_ALWAYS_SYNC),
2263     diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
2264     index 84df63e3130d..4a22a9f06d96 100644
2265     --- a/drivers/usb/usbip/vhci_sysfs.c
2266     +++ b/drivers/usb/usbip/vhci_sysfs.c
2267     @@ -24,6 +24,9 @@
2268     #include <linux/platform_device.h>
2269     #include <linux/slab.h>
2270    
2271     +/* Hardening for Spectre-v1 */
2272     +#include <linux/nospec.h>
2273     +
2274     #include "usbip_common.h"
2275     #include "vhci.h"
2276    
2277     @@ -219,16 +222,20 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
2278     return 0;
2279     }
2280    
2281     -static int valid_port(__u32 pdev_nr, __u32 rhport)
2282     +static int valid_port(__u32 *pdev_nr, __u32 *rhport)
2283     {
2284     - if (pdev_nr >= vhci_num_controllers) {
2285     - pr_err("pdev %u\n", pdev_nr);
2286     + if (*pdev_nr >= vhci_num_controllers) {
2287     + pr_err("pdev %u\n", *pdev_nr);
2288     return 0;
2289     }
2290     - if (rhport >= VHCI_HC_PORTS) {
2291     - pr_err("rhport %u\n", rhport);
2292     + *pdev_nr = array_index_nospec(*pdev_nr, vhci_num_controllers);
2293     +
2294     + if (*rhport >= VHCI_HC_PORTS) {
2295     + pr_err("rhport %u\n", *rhport);
2296     return 0;
2297     }
2298     + *rhport = array_index_nospec(*rhport, VHCI_HC_PORTS);
2299     +
2300     return 1;
2301     }
2302    
2303     @@ -246,7 +253,7 @@ static ssize_t store_detach(struct device *dev, struct device_attribute *attr,
2304     pdev_nr = port_to_pdev_nr(port);
2305     rhport = port_to_rhport(port);
2306    
2307     - if (!valid_port(pdev_nr, rhport))
2308     + if (!valid_port(&pdev_nr, &rhport))
2309     return -EINVAL;
2310    
2311     hcd = platform_get_drvdata(vhcis[pdev_nr].pdev);
2312     @@ -272,7 +279,8 @@ static ssize_t store_detach(struct device *dev, struct device_attribute *attr,
2313     }
2314     static DEVICE_ATTR(detach, S_IWUSR, NULL, store_detach);
2315    
2316     -static int valid_args(__u32 pdev_nr, __u32 rhport, enum usb_device_speed speed)
2317     +static int valid_args(__u32 *pdev_nr, __u32 *rhport,
2318     + enum usb_device_speed speed)
2319     {
2320     if (!valid_port(pdev_nr, rhport)) {
2321     return 0;
2322     @@ -336,7 +344,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
2323     sockfd, devid, speed);
2324    
2325     /* check received parameters */
2326     - if (!valid_args(pdev_nr, rhport, speed))
2327     + if (!valid_args(&pdev_nr, &rhport, speed))
2328     return -EINVAL;
2329    
2330     hcd = platform_get_drvdata(vhcis[pdev_nr].pdev);
2331     diff --git a/net/key/af_key.c b/net/key/af_key.c
2332     index 2ad693232f74..3b209cbfe1df 100644
2333     --- a/net/key/af_key.c
2334     +++ b/net/key/af_key.c
2335     @@ -437,6 +437,24 @@ static int verify_address_len(const void *p)
2336     return 0;
2337     }
2338    
2339     +static inline int sadb_key_len(const struct sadb_key *key)
2340     +{
2341     + int key_bytes = DIV_ROUND_UP(key->sadb_key_bits, 8);
2342     +
2343     + return DIV_ROUND_UP(sizeof(struct sadb_key) + key_bytes,
2344     + sizeof(uint64_t));
2345     +}
2346     +
2347     +static int verify_key_len(const void *p)
2348     +{
2349     + const struct sadb_key *key = p;
2350     +
2351     + if (sadb_key_len(key) > key->sadb_key_len)
2352     + return -EINVAL;
2353     +
2354     + return 0;
2355     +}
2356     +
2357     static inline int pfkey_sec_ctx_len(const struct sadb_x_sec_ctx *sec_ctx)
2358     {
2359     return DIV_ROUND_UP(sizeof(struct sadb_x_sec_ctx) +
2360     @@ -533,16 +551,25 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void *
2361     return -EINVAL;
2362     if (ext_hdrs[ext_type-1] != NULL)
2363     return -EINVAL;
2364     - if (ext_type == SADB_EXT_ADDRESS_SRC ||
2365     - ext_type == SADB_EXT_ADDRESS_DST ||
2366     - ext_type == SADB_EXT_ADDRESS_PROXY ||
2367     - ext_type == SADB_X_EXT_NAT_T_OA) {
2368     + switch (ext_type) {
2369     + case SADB_EXT_ADDRESS_SRC:
2370     + case SADB_EXT_ADDRESS_DST:
2371     + case SADB_EXT_ADDRESS_PROXY:
2372     + case SADB_X_EXT_NAT_T_OA:
2373     if (verify_address_len(p))
2374     return -EINVAL;
2375     - }
2376     - if (ext_type == SADB_X_EXT_SEC_CTX) {
2377     + break;
2378     + case SADB_X_EXT_SEC_CTX:
2379     if (verify_sec_ctx_len(p))
2380     return -EINVAL;
2381     + break;
2382     + case SADB_EXT_KEY_AUTH:
2383     + case SADB_EXT_KEY_ENCRYPT:
2384     + if (verify_key_len(p))
2385     + return -EINVAL;
2386     + break;
2387     + default:
2388     + break;
2389     }
2390     ext_hdrs[ext_type-1] = (void *) p;
2391     }
2392     @@ -1104,14 +1131,12 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
2393     key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
2394     if (key != NULL &&
2395     sa->sadb_sa_auth != SADB_X_AALG_NULL &&
2396     - ((key->sadb_key_bits+7) / 8 == 0 ||
2397     - (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t)))
2398     + key->sadb_key_bits == 0)
2399     return ERR_PTR(-EINVAL);
2400     key = ext_hdrs[SADB_EXT_KEY_ENCRYPT-1];
2401     if (key != NULL &&
2402     sa->sadb_sa_encrypt != SADB_EALG_NULL &&
2403     - ((key->sadb_key_bits+7) / 8 == 0 ||
2404     - (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t)))
2405     + key->sadb_key_bits == 0)
2406     return ERR_PTR(-EINVAL);
2407    
2408     x = xfrm_state_alloc(net);
2409     diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
2410     index bd0975d7dd6f..5e0d367a0988 100644
2411     --- a/net/netfilter/nft_ct.c
2412     +++ b/net/netfilter/nft_ct.c
2413     @@ -875,22 +875,26 @@ static int nft_ct_helper_obj_dump(struct sk_buff *skb,
2414     struct nft_object *obj, bool reset)
2415     {
2416     const struct nft_ct_helper_obj *priv = nft_obj_data(obj);
2417     - const struct nf_conntrack_helper *helper = priv->helper4;
2418     + const struct nf_conntrack_helper *helper;
2419     u16 family;
2420    
2421     + if (priv->helper4 && priv->helper6) {
2422     + family = NFPROTO_INET;
2423     + helper = priv->helper4;
2424     + } else if (priv->helper6) {
2425     + family = NFPROTO_IPV6;
2426     + helper = priv->helper6;
2427     + } else {
2428     + family = NFPROTO_IPV4;
2429     + helper = priv->helper4;
2430     + }
2431     +
2432     if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name))
2433     return -1;
2434    
2435     if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto))
2436     return -1;
2437    
2438     - if (priv->helper4 && priv->helper6)
2439     - family = NFPROTO_INET;
2440     - else if (priv->helper6)
2441     - family = NFPROTO_IPV6;
2442     - else
2443     - family = NFPROTO_IPV4;
2444     -
2445     if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family)))
2446     return -1;
2447