Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.17/0101-4.17.2-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3135 - (hide annotations) (download)
Mon Jun 18 09:23:45 2018 UTC (5 years, 11 months ago) by niro
File size: 100377 byte(s)
-linux-4.17.2
1 niro 3135 diff --git a/Documentation/ABI/stable/sysfs-bus-vmbus b/Documentation/ABI/stable/sysfs-bus-vmbus
2     index 0c9d9dcd2151..3eaffbb2d468 100644
3     --- a/Documentation/ABI/stable/sysfs-bus-vmbus
4     +++ b/Documentation/ABI/stable/sysfs-bus-vmbus
5     @@ -1,25 +1,25 @@
6     -What: /sys/bus/vmbus/devices/vmbus_*/id
7     +What: /sys/bus/vmbus/devices/<UUID>/id
8     Date: Jul 2009
9     KernelVersion: 2.6.31
10     Contact: K. Y. Srinivasan <kys@microsoft.com>
11     Description: The VMBus child_relid of the device's primary channel
12     Users: tools/hv/lsvmbus
13    
14     -What: /sys/bus/vmbus/devices/vmbus_*/class_id
15     +What: /sys/bus/vmbus/devices/<UUID>/class_id
16     Date: Jul 2009
17     KernelVersion: 2.6.31
18     Contact: K. Y. Srinivasan <kys@microsoft.com>
19     Description: The VMBus interface type GUID of the device
20     Users: tools/hv/lsvmbus
21    
22     -What: /sys/bus/vmbus/devices/vmbus_*/device_id
23     +What: /sys/bus/vmbus/devices/<UUID>/device_id
24     Date: Jul 2009
25     KernelVersion: 2.6.31
26     Contact: K. Y. Srinivasan <kys@microsoft.com>
27     Description: The VMBus interface instance GUID of the device
28     Users: tools/hv/lsvmbus
29    
30     -What: /sys/bus/vmbus/devices/vmbus_*/channel_vp_mapping
31     +What: /sys/bus/vmbus/devices/<UUID>/channel_vp_mapping
32     Date: Jul 2015
33     KernelVersion: 4.2.0
34     Contact: K. Y. Srinivasan <kys@microsoft.com>
35     @@ -28,112 +28,112 @@ Description: The mapping of which primary/sub channels are bound to which
36     Format: <channel's child_relid:the bound cpu's number>
37     Users: tools/hv/lsvmbus
38    
39     -What: /sys/bus/vmbus/devices/vmbus_*/device
40     +What: /sys/bus/vmbus/devices/<UUID>/device
41     Date: Dec. 2015
42     KernelVersion: 4.5
43     Contact: K. Y. Srinivasan <kys@microsoft.com>
44     Description: The 16 bit device ID of the device
45     Users: tools/hv/lsvmbus and user level RDMA libraries
46    
47     -What: /sys/bus/vmbus/devices/vmbus_*/vendor
48     +What: /sys/bus/vmbus/devices/<UUID>/vendor
49     Date: Dec. 2015
50     KernelVersion: 4.5
51     Contact: K. Y. Srinivasan <kys@microsoft.com>
52     Description: The 16 bit vendor ID of the device
53     Users: tools/hv/lsvmbus and user level RDMA libraries
54    
55     -What: /sys/bus/vmbus/devices/vmbus_*/channels/NN
56     +What: /sys/bus/vmbus/devices/<UUID>/channels/<N>
57     Date: September. 2017
58     KernelVersion: 4.14
59     Contact: Stephen Hemminger <sthemmin@microsoft.com>
60     Description: Directory for per-channel information
61     NN is the VMBUS relid associtated with the channel.
62    
63     -What: /sys/bus/vmbus/devices/vmbus_*/channels/NN/cpu
64     +What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/cpu
65     Date: September. 2017
66     KernelVersion: 4.14
67     Contact: Stephen Hemminger <sthemmin@microsoft.com>
68     Description: VCPU (sub)channel is affinitized to
69     Users: tools/hv/lsvmbus and other debugging tools
70    
71     -What: /sys/bus/vmbus/devices/vmbus_*/channels/NN/cpu
72     +What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/cpu
73     Date: September. 2017
74     KernelVersion: 4.14
75     Contact: Stephen Hemminger <sthemmin@microsoft.com>
76     Description: VCPU (sub)channel is affinitized to
77     Users: tools/hv/lsvmbus and other debugging tools
78    
79     -What: /sys/bus/vmbus/devices/vmbus_*/channels/NN/in_mask
80     +What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/in_mask
81     Date: September. 2017
82     KernelVersion: 4.14
83     Contact: Stephen Hemminger <sthemmin@microsoft.com>
84     Description: Host to guest channel interrupt mask
85     Users: Debugging tools
86    
87     -What: /sys/bus/vmbus/devices/vmbus_*/channels/NN/latency
88     +What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/latency
89     Date: September. 2017
90     KernelVersion: 4.14
91     Contact: Stephen Hemminger <sthemmin@microsoft.com>
92     Description: Channel signaling latency
93     Users: Debugging tools
94    
95     -What: /sys/bus/vmbus/devices/vmbus_*/channels/NN/out_mask
96     +What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/out_mask
97     Date: September. 2017
98     KernelVersion: 4.14
99     Contact: Stephen Hemminger <sthemmin@microsoft.com>
100     Description: Guest to host channel interrupt mask
101     Users: Debugging tools
102    
103     -What: /sys/bus/vmbus/devices/vmbus_*/channels/NN/pending
104     +What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/pending
105     Date: September. 2017
106     KernelVersion: 4.14
107     Contact: Stephen Hemminger <sthemmin@microsoft.com>
108     Description: Channel interrupt pending state
109     Users: Debugging tools
110    
111     -What: /sys/bus/vmbus/devices/vmbus_*/channels/NN/read_avail
112     +What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/read_avail
113     Date: September. 2017
114     KernelVersion: 4.14
115     Contact: Stephen Hemminger <sthemmin@microsoft.com>
116     Description: Bytes available to read
117     Users: Debugging tools
118    
119     -What: /sys/bus/vmbus/devices/vmbus_*/channels/NN/write_avail
120     +What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/write_avail
121     Date: September. 2017
122     KernelVersion: 4.14
123     Contact: Stephen Hemminger <sthemmin@microsoft.com>
124     Description: Bytes available to write
125     Users: Debugging tools
126    
127     -What: /sys/bus/vmbus/devices/vmbus_*/channels/NN/events
128     +What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/events
129     Date: September. 2017
130     KernelVersion: 4.14
131     Contact: Stephen Hemminger <sthemmin@microsoft.com>
132     Description: Number of times we have signaled the host
133     Users: Debugging tools
134    
135     -What: /sys/bus/vmbus/devices/vmbus_*/channels/NN/interrupts
136     +What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/interrupts
137     Date: September. 2017
138     KernelVersion: 4.14
139     Contact: Stephen Hemminger <sthemmin@microsoft.com>
140     Description: Number of times we have taken an interrupt (incoming)
141     Users: Debugging tools
142    
143     -What: /sys/bus/vmbus/devices/vmbus_*/channels/NN/subchannel_id
144     +What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/subchannel_id
145     Date: January. 2018
146     KernelVersion: 4.16
147     Contact: Stephen Hemminger <sthemmin@microsoft.com>
148     Description: Subchannel ID associated with VMBUS channel
149     Users: Debugging tools and userspace drivers
150    
151     -What: /sys/bus/vmbus/devices/vmbus_*/channels/NN/monitor_id
152     +What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/monitor_id
153     Date: January. 2018
154     KernelVersion: 4.16
155     Contact: Stephen Hemminger <sthemmin@microsoft.com>
156     Description: Monitor bit associated with channel
157     Users: Debugging tools and userspace drivers
158    
159     -What: /sys/bus/vmbus/devices/vmbus_*/channels/NN/ring
160     +What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/ring
161     Date: January. 2018
162     KernelVersion: 4.16
163     Contact: Stephen Hemminger <sthemmin@microsoft.com>
164     diff --git a/Makefile b/Makefile
165     index e551c9af6a06..f43cd522b175 100644
166     --- a/Makefile
167     +++ b/Makefile
168     @@ -1,7 +1,7 @@
169     # SPDX-License-Identifier: GPL-2.0
170     VERSION = 4
171     PATCHLEVEL = 17
172     -SUBLEVEL = 1
173     +SUBLEVEL = 2
174     EXTRAVERSION =
175     NAME = Merciless Moray
176    
177     diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
178     index ecf613761e78..fe005df02ed3 100644
179     --- a/arch/arm64/configs/defconfig
180     +++ b/arch/arm64/configs/defconfig
181     @@ -320,6 +320,7 @@ CONFIG_PINCTRL_MAX77620=y
182     CONFIG_PINCTRL_MSM8916=y
183     CONFIG_PINCTRL_MSM8994=y
184     CONFIG_PINCTRL_MSM8996=y
185     +CONFIG_PINCTRL_MT7622=y
186     CONFIG_PINCTRL_QDF2XXX=y
187     CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
188     CONFIG_GPIO_DWAPB=y
189     diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
190     index b24b1c8b3979..0f82cd91cd3c 100644
191     --- a/arch/x86/include/asm/kvm_emulate.h
192     +++ b/arch/x86/include/asm/kvm_emulate.h
193     @@ -107,11 +107,12 @@ struct x86_emulate_ops {
194     * @addr: [IN ] Linear address from which to read.
195     * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
196     * @bytes: [IN ] Number of bytes to read from memory.
197     + * @system:[IN ] Whether the access is forced to be at CPL0.
198     */
199     int (*read_std)(struct x86_emulate_ctxt *ctxt,
200     unsigned long addr, void *val,
201     unsigned int bytes,
202     - struct x86_exception *fault);
203     + struct x86_exception *fault, bool system);
204    
205     /*
206     * read_phys: Read bytes of standard (non-emulated/special) memory.
207     @@ -129,10 +130,11 @@ struct x86_emulate_ops {
208     * @addr: [IN ] Linear address to which to write.
209     * @val: [OUT] Value write to memory, zero-extended to 'u_long'.
210     * @bytes: [IN ] Number of bytes to write to memory.
211     + * @system:[IN ] Whether the access is forced to be at CPL0.
212     */
213     int (*write_std)(struct x86_emulate_ctxt *ctxt,
214     unsigned long addr, void *val, unsigned int bytes,
215     - struct x86_exception *fault);
216     + struct x86_exception *fault, bool system);
217     /*
218     * fetch: Read bytes of standard (non-emulated/special) memory.
219     * Used for instruction fetch.
220     diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
221     index b3705ae52824..4c4f4263420c 100644
222     --- a/arch/x86/kvm/emulate.c
223     +++ b/arch/x86/kvm/emulate.c
224     @@ -812,6 +812,19 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
225     return assign_eip_near(ctxt, ctxt->_eip + rel);
226     }
227    
228     +static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
229     + void *data, unsigned size)
230     +{
231     + return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
232     +}
233     +
234     +static int linear_write_system(struct x86_emulate_ctxt *ctxt,
235     + ulong linear, void *data,
236     + unsigned int size)
237     +{
238     + return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
239     +}
240     +
241     static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
242     struct segmented_address addr,
243     void *data,
244     @@ -823,7 +836,7 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
245     rc = linearize(ctxt, addr, size, false, &linear);
246     if (rc != X86EMUL_CONTINUE)
247     return rc;
248     - return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
249     + return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
250     }
251    
252     static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
253     @@ -837,7 +850,7 @@ static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
254     rc = linearize(ctxt, addr, size, true, &linear);
255     if (rc != X86EMUL_CONTINUE)
256     return rc;
257     - return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
258     + return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
259     }
260    
261     /*
262     @@ -1496,8 +1509,7 @@ static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
263     return emulate_gp(ctxt, index << 3 | 0x2);
264    
265     addr = dt.address + index * 8;
266     - return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
267     - &ctxt->exception);
268     + return linear_read_system(ctxt, addr, desc, sizeof *desc);
269     }
270    
271     static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
272     @@ -1560,8 +1572,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
273     if (rc != X86EMUL_CONTINUE)
274     return rc;
275    
276     - return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
277     - &ctxt->exception);
278     + return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
279     }
280    
281     /* allowed just for 8 bytes segments */
282     @@ -1575,8 +1586,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
283     if (rc != X86EMUL_CONTINUE)
284     return rc;
285    
286     - return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
287     - &ctxt->exception);
288     + return linear_write_system(ctxt, addr, desc, sizeof *desc);
289     }
290    
291     static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
292     @@ -1737,8 +1747,7 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
293     return ret;
294     }
295     } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
296     - ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
297     - sizeof(base3), &ctxt->exception);
298     + ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
299     if (ret != X86EMUL_CONTINUE)
300     return ret;
301     if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
302     @@ -2051,11 +2060,11 @@ static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
303     eip_addr = dt.address + (irq << 2);
304     cs_addr = dt.address + (irq << 2) + 2;
305    
306     - rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
307     + rc = linear_read_system(ctxt, cs_addr, &cs, 2);
308     if (rc != X86EMUL_CONTINUE)
309     return rc;
310    
311     - rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
312     + rc = linear_read_system(ctxt, eip_addr, &eip, 2);
313     if (rc != X86EMUL_CONTINUE)
314     return rc;
315    
316     @@ -2919,12 +2928,12 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
317     #ifdef CONFIG_X86_64
318     base |= ((u64)base3) << 32;
319     #endif
320     - r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
321     + r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
322     if (r != X86EMUL_CONTINUE)
323     return false;
324     if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
325     return false;
326     - r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
327     + r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
328     if (r != X86EMUL_CONTINUE)
329     return false;
330     if ((perm >> bit_idx) & mask)
331     @@ -3053,35 +3062,30 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
332     u16 tss_selector, u16 old_tss_sel,
333     ulong old_tss_base, struct desc_struct *new_desc)
334     {
335     - const struct x86_emulate_ops *ops = ctxt->ops;
336     struct tss_segment_16 tss_seg;
337     int ret;
338     u32 new_tss_base = get_desc_base(new_desc);
339    
340     - ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
341     - &ctxt->exception);
342     + ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
343     if (ret != X86EMUL_CONTINUE)
344     return ret;
345    
346     save_state_to_tss16(ctxt, &tss_seg);
347    
348     - ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
349     - &ctxt->exception);
350     + ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
351     if (ret != X86EMUL_CONTINUE)
352     return ret;
353    
354     - ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
355     - &ctxt->exception);
356     + ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
357     if (ret != X86EMUL_CONTINUE)
358     return ret;
359    
360     if (old_tss_sel != 0xffff) {
361     tss_seg.prev_task_link = old_tss_sel;
362    
363     - ret = ops->write_std(ctxt, new_tss_base,
364     - &tss_seg.prev_task_link,
365     - sizeof tss_seg.prev_task_link,
366     - &ctxt->exception);
367     + ret = linear_write_system(ctxt, new_tss_base,
368     + &tss_seg.prev_task_link,
369     + sizeof tss_seg.prev_task_link);
370     if (ret != X86EMUL_CONTINUE)
371     return ret;
372     }
373     @@ -3197,38 +3201,34 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
374     u16 tss_selector, u16 old_tss_sel,
375     ulong old_tss_base, struct desc_struct *new_desc)
376     {
377     - const struct x86_emulate_ops *ops = ctxt->ops;
378     struct tss_segment_32 tss_seg;
379     int ret;
380     u32 new_tss_base = get_desc_base(new_desc);
381     u32 eip_offset = offsetof(struct tss_segment_32, eip);
382     u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
383    
384     - ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
385     - &ctxt->exception);
386     + ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
387     if (ret != X86EMUL_CONTINUE)
388     return ret;
389    
390     save_state_to_tss32(ctxt, &tss_seg);
391    
392     /* Only GP registers and segment selectors are saved */
393     - ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
394     - ldt_sel_offset - eip_offset, &ctxt->exception);
395     + ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
396     + ldt_sel_offset - eip_offset);
397     if (ret != X86EMUL_CONTINUE)
398     return ret;
399    
400     - ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
401     - &ctxt->exception);
402     + ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
403     if (ret != X86EMUL_CONTINUE)
404     return ret;
405    
406     if (old_tss_sel != 0xffff) {
407     tss_seg.prev_task_link = old_tss_sel;
408    
409     - ret = ops->write_std(ctxt, new_tss_base,
410     - &tss_seg.prev_task_link,
411     - sizeof tss_seg.prev_task_link,
412     - &ctxt->exception);
413     + ret = linear_write_system(ctxt, new_tss_base,
414     + &tss_seg.prev_task_link,
415     + sizeof tss_seg.prev_task_link);
416     if (ret != X86EMUL_CONTINUE)
417     return ret;
418     }
419     @@ -4189,7 +4189,9 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
420     maxphyaddr = eax & 0xff;
421     else
422     maxphyaddr = 36;
423     - rsvd = rsvd_bits(maxphyaddr, 62);
424     + rsvd = rsvd_bits(maxphyaddr, 63);
425     + if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
426     + rsvd &= ~CR3_PCID_INVD;
427     }
428    
429     if (new_val & rsvd)
430     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
431     index 40aa29204baf..82f5e915e568 100644
432     --- a/arch/x86/kvm/vmx.c
433     +++ b/arch/x86/kvm/vmx.c
434     @@ -7588,8 +7588,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
435     vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
436     return 1;
437    
438     - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer,
439     - sizeof(*vmpointer), &e)) {
440     + if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
441     kvm_inject_page_fault(vcpu, &e);
442     return 1;
443     }
444     @@ -7670,6 +7669,12 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
445     return 1;
446     }
447    
448     + /* CPL=0 must be checked manually. */
449     + if (vmx_get_cpl(vcpu)) {
450     + kvm_queue_exception(vcpu, UD_VECTOR);
451     + return 1;
452     + }
453     +
454     if (vmx->nested.vmxon) {
455     nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
456     return kvm_skip_emulated_instruction(vcpu);
457     @@ -7729,6 +7734,11 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
458     */
459     static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
460     {
461     + if (vmx_get_cpl(vcpu)) {
462     + kvm_queue_exception(vcpu, UD_VECTOR);
463     + return 0;
464     + }
465     +
466     if (!to_vmx(vcpu)->nested.vmxon) {
467     kvm_queue_exception(vcpu, UD_VECTOR);
468     return 0;
469     @@ -8029,9 +8039,9 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
470     if (get_vmx_mem_address(vcpu, exit_qualification,
471     vmx_instruction_info, true, &gva))
472     return 1;
473     - /* _system ok, as hardware has verified cpl=0 */
474     - kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
475     - &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
476     + /* _system ok, nested_vmx_check_permission has verified cpl=0 */
477     + kvm_write_guest_virt_system(vcpu, gva, &field_value,
478     + (is_long_mode(vcpu) ? 8 : 4), NULL);
479     }
480    
481     nested_vmx_succeed(vcpu);
482     @@ -8069,8 +8079,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
483     if (get_vmx_mem_address(vcpu, exit_qualification,
484     vmx_instruction_info, false, &gva))
485     return 1;
486     - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
487     - &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
488     + if (kvm_read_guest_virt(vcpu, gva, &field_value,
489     + (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
490     kvm_inject_page_fault(vcpu, &e);
491     return 1;
492     }
493     @@ -8189,10 +8199,10 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
494     if (get_vmx_mem_address(vcpu, exit_qualification,
495     vmx_instruction_info, true, &vmcs_gva))
496     return 1;
497     - /* ok to use *_system, as hardware has verified cpl=0 */
498     - if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
499     - (void *)&to_vmx(vcpu)->nested.current_vmptr,
500     - sizeof(u64), &e)) {
501     + /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
502     + if (kvm_write_guest_virt_system(vcpu, vmcs_gva,
503     + (void *)&to_vmx(vcpu)->nested.current_vmptr,
504     + sizeof(u64), &e)) {
505     kvm_inject_page_fault(vcpu, &e);
506     return 1;
507     }
508     @@ -8239,8 +8249,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
509     if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
510     vmx_instruction_info, false, &gva))
511     return 1;
512     - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
513     - sizeof(operand), &e)) {
514     + if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
515     kvm_inject_page_fault(vcpu, &e);
516     return 1;
517     }
518     @@ -8304,8 +8313,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
519     if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
520     vmx_instruction_info, false, &gva))
521     return 1;
522     - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
523     - sizeof(operand), &e)) {
524     + if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
525     kvm_inject_page_fault(vcpu, &e);
526     return 1;
527     }
528     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
529     index 71e7cda6d014..fbc4d17e3ecc 100644
530     --- a/arch/x86/kvm/x86.c
531     +++ b/arch/x86/kvm/x86.c
532     @@ -856,7 +856,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
533     }
534    
535     if (is_long_mode(vcpu) &&
536     - (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 62)))
537     + (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63)))
538     return 1;
539     else if (is_pae(vcpu) && is_paging(vcpu) &&
540     !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
541     @@ -2894,7 +2894,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
542     r = KVM_CLOCK_TSC_STABLE;
543     break;
544     case KVM_CAP_X86_DISABLE_EXITS:
545     - r |= KVM_X86_DISABLE_EXITS_HTL | KVM_X86_DISABLE_EXITS_PAUSE;
546     + r |= KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE;
547     if(kvm_can_mwait_in_guest())
548     r |= KVM_X86_DISABLE_EXITS_MWAIT;
549     break;
550     @@ -4248,7 +4248,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
551     if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) &&
552     kvm_can_mwait_in_guest())
553     kvm->arch.mwait_in_guest = true;
554     - if (cap->args[0] & KVM_X86_DISABLE_EXITS_HTL)
555     + if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
556     kvm->arch.hlt_in_guest = true;
557     if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE)
558     kvm->arch.pause_in_guest = true;
559     @@ -4787,11 +4787,10 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
560     return X86EMUL_CONTINUE;
561     }
562    
563     -int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
564     +int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
565     gva_t addr, void *val, unsigned int bytes,
566     struct x86_exception *exception)
567     {
568     - struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
569     u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
570    
571     return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
572     @@ -4799,12 +4798,17 @@ int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
573     }
574     EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
575    
576     -static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
577     - gva_t addr, void *val, unsigned int bytes,
578     - struct x86_exception *exception)
579     +static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
580     + gva_t addr, void *val, unsigned int bytes,
581     + struct x86_exception *exception, bool system)
582     {
583     struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
584     - return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
585     + u32 access = 0;
586     +
587     + if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
588     + access |= PFERR_USER_MASK;
589     +
590     + return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
591     }
592    
593     static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
594     @@ -4816,18 +4820,16 @@ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
595     return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
596     }
597    
598     -int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
599     - gva_t addr, void *val,
600     - unsigned int bytes,
601     - struct x86_exception *exception)
602     +static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
603     + struct kvm_vcpu *vcpu, u32 access,
604     + struct x86_exception *exception)
605     {
606     - struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
607     void *data = val;
608     int r = X86EMUL_CONTINUE;
609    
610     while (bytes) {
611     gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
612     - PFERR_WRITE_MASK,
613     + access,
614     exception);
615     unsigned offset = addr & (PAGE_SIZE-1);
616     unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
617     @@ -4848,6 +4850,27 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
618     out:
619     return r;
620     }
621     +
622     +static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
623     + unsigned int bytes, struct x86_exception *exception,
624     + bool system)
625     +{
626     + struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
627     + u32 access = PFERR_WRITE_MASK;
628     +
629     + if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
630     + access |= PFERR_USER_MASK;
631     +
632     + return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
633     + access, exception);
634     +}
635     +
636     +int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
637     + unsigned int bytes, struct x86_exception *exception)
638     +{
639     + return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
640     + PFERR_WRITE_MASK, exception);
641     +}
642     EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
643    
644     int handle_ud(struct kvm_vcpu *vcpu)
645     @@ -4858,8 +4881,8 @@ int handle_ud(struct kvm_vcpu *vcpu)
646     struct x86_exception e;
647    
648     if (force_emulation_prefix &&
649     - kvm_read_guest_virt(&vcpu->arch.emulate_ctxt,
650     - kvm_get_linear_rip(vcpu), sig, sizeof(sig), &e) == 0 &&
651     + kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu),
652     + sig, sizeof(sig), &e) == 0 &&
653     memcmp(sig, "\xf\xbkvm", sizeof(sig)) == 0) {
654     kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig));
655     emul_type = 0;
656     @@ -5600,8 +5623,8 @@ static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase)
657     static const struct x86_emulate_ops emulate_ops = {
658     .read_gpr = emulator_read_gpr,
659     .write_gpr = emulator_write_gpr,
660     - .read_std = kvm_read_guest_virt_system,
661     - .write_std = kvm_write_guest_virt_system,
662     + .read_std = emulator_read_std,
663     + .write_std = emulator_write_std,
664     .read_phys = kvm_read_guest_phys_system,
665     .fetch = kvm_fetch_guest_virt,
666     .read_emulated = emulator_read_emulated,
667     diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
668     index c9492f764902..331993c49dae 100644
669     --- a/arch/x86/kvm/x86.h
670     +++ b/arch/x86/kvm/x86.h
671     @@ -247,11 +247,11 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
672     void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
673     u64 get_kvmclock_ns(struct kvm *kvm);
674    
675     -int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
676     +int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
677     gva_t addr, void *val, unsigned int bytes,
678     struct x86_exception *exception);
679    
680     -int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
681     +int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
682     gva_t addr, void *val, unsigned int bytes,
683     struct x86_exception *exception);
684    
685     diff --git a/block/blk-zoned.c b/block/blk-zoned.c
686     index 08e84ef2bc05..3d08dc84db16 100644
687     --- a/block/blk-zoned.c
688     +++ b/block/blk-zoned.c
689     @@ -328,7 +328,11 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
690     if (!rep.nr_zones)
691     return -EINVAL;
692    
693     - zones = kcalloc(rep.nr_zones, sizeof(struct blk_zone), GFP_KERNEL);
694     + if (rep.nr_zones > INT_MAX / sizeof(struct blk_zone))
695     + return -ERANGE;
696     +
697     + zones = kvmalloc(rep.nr_zones * sizeof(struct blk_zone),
698     + GFP_KERNEL | __GFP_ZERO);
699     if (!zones)
700     return -ENOMEM;
701    
702     @@ -350,7 +354,7 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
703     }
704    
705     out:
706     - kfree(zones);
707     + kvfree(zones);
708    
709     return ret;
710     }
711     diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
712     index 7207a535942d..d67667970f7e 100644
713     --- a/drivers/crypto/caam/caamalg.c
714     +++ b/drivers/crypto/caam/caamalg.c
715     @@ -769,15 +769,18 @@ struct aead_edesc {
716     * @src_nents: number of segments in input s/w scatterlist
717     * @dst_nents: number of segments in output s/w scatterlist
718     * @iv_dma: dma address of iv for checking continuity and link table
719     + * @iv_dir: DMA mapping direction for IV
720     * @sec4_sg_bytes: length of dma mapped sec4_sg space
721     * @sec4_sg_dma: bus physical mapped address of h/w link table
722     * @sec4_sg: pointer to h/w link table
723     * @hw_desc: the h/w job descriptor followed by any referenced link tables
724     + * and IV
725     */
726     struct ablkcipher_edesc {
727     int src_nents;
728     int dst_nents;
729     dma_addr_t iv_dma;
730     + enum dma_data_direction iv_dir;
731     int sec4_sg_bytes;
732     dma_addr_t sec4_sg_dma;
733     struct sec4_sg_entry *sec4_sg;
734     @@ -787,7 +790,8 @@ struct ablkcipher_edesc {
735     static void caam_unmap(struct device *dev, struct scatterlist *src,
736     struct scatterlist *dst, int src_nents,
737     int dst_nents,
738     - dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
739     + dma_addr_t iv_dma, int ivsize,
740     + enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
741     int sec4_sg_bytes)
742     {
743     if (dst != src) {
744     @@ -799,7 +803,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
745     }
746    
747     if (iv_dma)
748     - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
749     + dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
750     if (sec4_sg_bytes)
751     dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
752     DMA_TO_DEVICE);
753     @@ -810,7 +814,7 @@ static void aead_unmap(struct device *dev,
754     struct aead_request *req)
755     {
756     caam_unmap(dev, req->src, req->dst,
757     - edesc->src_nents, edesc->dst_nents, 0, 0,
758     + edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
759     edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
760     }
761    
762     @@ -823,7 +827,7 @@ static void ablkcipher_unmap(struct device *dev,
763    
764     caam_unmap(dev, req->src, req->dst,
765     edesc->src_nents, edesc->dst_nents,
766     - edesc->iv_dma, ivsize,
767     + edesc->iv_dma, ivsize, edesc->iv_dir,
768     edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
769     }
770    
771     @@ -912,6 +916,18 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
772     scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
773     ivsize, 0);
774    
775     + /* In case initial IV was generated, copy it in GIVCIPHER request */
776     + if (edesc->iv_dir == DMA_FROM_DEVICE) {
777     + u8 *iv;
778     + struct skcipher_givcrypt_request *greq;
779     +
780     + greq = container_of(req, struct skcipher_givcrypt_request,
781     + creq);
782     + iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
783     + edesc->sec4_sg_bytes;
784     + memcpy(greq->giv, iv, ivsize);
785     + }
786     +
787     kfree(edesc);
788    
789     ablkcipher_request_complete(req, err);
790     @@ -922,10 +938,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
791     {
792     struct ablkcipher_request *req = context;
793     struct ablkcipher_edesc *edesc;
794     +#ifdef DEBUG
795     struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
796     int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
797    
798     -#ifdef DEBUG
799     dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
800     #endif
801    
802     @@ -943,14 +959,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
803     edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
804    
805     ablkcipher_unmap(jrdev, edesc, req);
806     -
807     - /*
808     - * The crypto API expects us to set the IV (req->info) to the last
809     - * ciphertext block.
810     - */
811     - scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
812     - ivsize, 0);
813     -
814     kfree(edesc);
815    
816     ablkcipher_request_complete(req, err);
817     @@ -1099,15 +1107,14 @@ static void init_authenc_job(struct aead_request *req,
818     */
819     static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
820     struct ablkcipher_edesc *edesc,
821     - struct ablkcipher_request *req,
822     - bool iv_contig)
823     + struct ablkcipher_request *req)
824     {
825     struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
826     int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
827     u32 *desc = edesc->hw_desc;
828     - u32 out_options = 0, in_options;
829     - dma_addr_t dst_dma, src_dma;
830     - int len, sec4_sg_index = 0;
831     + u32 out_options = 0;
832     + dma_addr_t dst_dma;
833     + int len;
834    
835     #ifdef DEBUG
836     print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
837     @@ -1123,30 +1130,18 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
838     len = desc_len(sh_desc);
839     init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
840    
841     - if (iv_contig) {
842     - src_dma = edesc->iv_dma;
843     - in_options = 0;
844     - } else {
845     - src_dma = edesc->sec4_sg_dma;
846     - sec4_sg_index += edesc->src_nents + 1;
847     - in_options = LDST_SGF;
848     - }
849     - append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
850     + append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize,
851     + LDST_SGF);
852    
853     if (likely(req->src == req->dst)) {
854     - if (edesc->src_nents == 1 && iv_contig) {
855     - dst_dma = sg_dma_address(req->src);
856     - } else {
857     - dst_dma = edesc->sec4_sg_dma +
858     - sizeof(struct sec4_sg_entry);
859     - out_options = LDST_SGF;
860     - }
861     + dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry);
862     + out_options = LDST_SGF;
863     } else {
864     if (edesc->dst_nents == 1) {
865     dst_dma = sg_dma_address(req->dst);
866     } else {
867     - dst_dma = edesc->sec4_sg_dma +
868     - sec4_sg_index * sizeof(struct sec4_sg_entry);
869     + dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) *
870     + sizeof(struct sec4_sg_entry);
871     out_options = LDST_SGF;
872     }
873     }
874     @@ -1158,13 +1153,12 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
875     */
876     static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
877     struct ablkcipher_edesc *edesc,
878     - struct ablkcipher_request *req,
879     - bool iv_contig)
880     + struct ablkcipher_request *req)
881     {
882     struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
883     int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
884     u32 *desc = edesc->hw_desc;
885     - u32 out_options, in_options;
886     + u32 in_options;
887     dma_addr_t dst_dma, src_dma;
888     int len, sec4_sg_index = 0;
889    
890     @@ -1190,15 +1184,9 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
891     }
892     append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
893    
894     - if (iv_contig) {
895     - dst_dma = edesc->iv_dma;
896     - out_options = 0;
897     - } else {
898     - dst_dma = edesc->sec4_sg_dma +
899     - sec4_sg_index * sizeof(struct sec4_sg_entry);
900     - out_options = LDST_SGF;
901     - }
902     - append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
903     + dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
904     + sizeof(struct sec4_sg_entry);
905     + append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF);
906     }
907    
908     /*
909     @@ -1287,7 +1275,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
910     GFP_DMA | flags);
911     if (!edesc) {
912     caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
913     - 0, 0, 0);
914     + 0, DMA_NONE, 0, 0);
915     return ERR_PTR(-ENOMEM);
916     }
917    
918     @@ -1491,8 +1479,7 @@ static int aead_decrypt(struct aead_request *req)
919     * allocate and map the ablkcipher extended descriptor for ablkcipher
920     */
921     static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
922     - *req, int desc_bytes,
923     - bool *iv_contig_out)
924     + *req, int desc_bytes)
925     {
926     struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
927     struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
928     @@ -1501,8 +1488,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
929     GFP_KERNEL : GFP_ATOMIC;
930     int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
931     struct ablkcipher_edesc *edesc;
932     - dma_addr_t iv_dma = 0;
933     - bool in_contig;
934     + dma_addr_t iv_dma;
935     + u8 *iv;
936     int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
937     int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
938    
939     @@ -1546,33 +1533,20 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
940     }
941     }
942    
943     - iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
944     - if (dma_mapping_error(jrdev, iv_dma)) {
945     - dev_err(jrdev, "unable to map IV\n");
946     - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
947     - 0, 0, 0);
948     - return ERR_PTR(-ENOMEM);
949     - }
950     -
951     - if (mapped_src_nents == 1 &&
952     - iv_dma + ivsize == sg_dma_address(req->src)) {
953     - in_contig = true;
954     - sec4_sg_ents = 0;
955     - } else {
956     - in_contig = false;
957     - sec4_sg_ents = 1 + mapped_src_nents;
958     - }
959     + sec4_sg_ents = 1 + mapped_src_nents;
960     dst_sg_idx = sec4_sg_ents;
961     sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
962     sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
963    
964     - /* allocate space for base edesc and hw desc commands, link tables */
965     - edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
966     + /*
967     + * allocate space for base edesc and hw desc commands, link tables, IV
968     + */
969     + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
970     GFP_DMA | flags);
971     if (!edesc) {
972     dev_err(jrdev, "could not allocate extended descriptor\n");
973     - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
974     - iv_dma, ivsize, 0, 0);
975     + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
976     + 0, DMA_NONE, 0, 0);
977     return ERR_PTR(-ENOMEM);
978     }
979    
980     @@ -1581,13 +1555,24 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
981     edesc->sec4_sg_bytes = sec4_sg_bytes;
982     edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
983     desc_bytes;
984     + edesc->iv_dir = DMA_TO_DEVICE;
985    
986     - if (!in_contig) {
987     - dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
988     - sg_to_sec4_sg_last(req->src, mapped_src_nents,
989     - edesc->sec4_sg + 1, 0);
990     + /* Make sure IV is located in a DMAable area */
991     + iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
992     + memcpy(iv, req->info, ivsize);
993     +
994     + iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
995     + if (dma_mapping_error(jrdev, iv_dma)) {
996     + dev_err(jrdev, "unable to map IV\n");
997     + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
998     + 0, DMA_NONE, 0, 0);
999     + kfree(edesc);
1000     + return ERR_PTR(-ENOMEM);
1001     }
1002    
1003     + dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1004     + sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0);
1005     +
1006     if (mapped_dst_nents > 1) {
1007     sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
1008     edesc->sec4_sg + dst_sg_idx, 0);
1009     @@ -1598,7 +1583,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1010     if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1011     dev_err(jrdev, "unable to map S/G table\n");
1012     caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
1013     - iv_dma, ivsize, 0, 0);
1014     + iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
1015     kfree(edesc);
1016     return ERR_PTR(-ENOMEM);
1017     }
1018     @@ -1611,7 +1596,6 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1019     sec4_sg_bytes, 1);
1020     #endif
1021    
1022     - *iv_contig_out = in_contig;
1023     return edesc;
1024     }
1025    
1026     @@ -1621,19 +1605,16 @@ static int ablkcipher_encrypt(struct ablkcipher_request *req)
1027     struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1028     struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1029     struct device *jrdev = ctx->jrdev;
1030     - bool iv_contig;
1031     u32 *desc;
1032     int ret = 0;
1033    
1034     /* allocate extended descriptor */
1035     - edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1036     - CAAM_CMD_SZ, &iv_contig);
1037     + edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
1038     if (IS_ERR(edesc))
1039     return PTR_ERR(edesc);
1040    
1041     /* Create and submit job descriptor*/
1042     - init_ablkcipher_job(ctx->sh_desc_enc,
1043     - ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1044     + init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req);
1045     #ifdef DEBUG
1046     print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1047     DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1048     @@ -1657,20 +1638,25 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req)
1049     struct ablkcipher_edesc *edesc;
1050     struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1051     struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1052     + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1053     struct device *jrdev = ctx->jrdev;
1054     - bool iv_contig;
1055     u32 *desc;
1056     int ret = 0;
1057    
1058     /* allocate extended descriptor */
1059     - edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1060     - CAAM_CMD_SZ, &iv_contig);
1061     + edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
1062     if (IS_ERR(edesc))
1063     return PTR_ERR(edesc);
1064    
1065     + /*
1066     + * The crypto API expects us to set the IV (req->info) to the last
1067     + * ciphertext block.
1068     + */
1069     + scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
1070     + ivsize, 0);
1071     +
1072     /* Create and submit job descriptor*/
1073     - init_ablkcipher_job(ctx->sh_desc_dec,
1074     - ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1075     + init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req);
1076     desc = edesc->hw_desc;
1077     #ifdef DEBUG
1078     print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1079     @@ -1695,8 +1681,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req)
1080     */
1081     static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1082     struct skcipher_givcrypt_request *greq,
1083     - int desc_bytes,
1084     - bool *iv_contig_out)
1085     + int desc_bytes)
1086     {
1087     struct ablkcipher_request *req = &greq->creq;
1088     struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1089     @@ -1706,8 +1691,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1090     GFP_KERNEL : GFP_ATOMIC;
1091     int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
1092     struct ablkcipher_edesc *edesc;
1093     - dma_addr_t iv_dma = 0;
1094     - bool out_contig;
1095     + dma_addr_t iv_dma;
1096     + u8 *iv;
1097     int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1098     int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
1099    
1100     @@ -1752,36 +1737,20 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1101     }
1102     }
1103    
1104     - /*
1105     - * Check if iv can be contiguous with source and destination.
1106     - * If so, include it. If not, create scatterlist.
1107     - */
1108     - iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1109     - if (dma_mapping_error(jrdev, iv_dma)) {
1110     - dev_err(jrdev, "unable to map IV\n");
1111     - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1112     - 0, 0, 0);
1113     - return ERR_PTR(-ENOMEM);
1114     - }
1115     -
1116     sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
1117     dst_sg_idx = sec4_sg_ents;
1118     - if (mapped_dst_nents == 1 &&
1119     - iv_dma + ivsize == sg_dma_address(req->dst)) {
1120     - out_contig = true;
1121     - } else {
1122     - out_contig = false;
1123     - sec4_sg_ents += 1 + mapped_dst_nents;
1124     - }
1125     + sec4_sg_ents += 1 + mapped_dst_nents;
1126    
1127     - /* allocate space for base edesc and hw desc commands, link tables */
1128     + /*
1129     + * allocate space for base edesc and hw desc commands, link tables, IV
1130     + */
1131     sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
1132     - edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
1133     + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
1134     GFP_DMA | flags);
1135     if (!edesc) {
1136     dev_err(jrdev, "could not allocate extended descriptor\n");
1137     - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
1138     - iv_dma, ivsize, 0, 0);
1139     + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1140     + 0, DMA_NONE, 0, 0);
1141     return ERR_PTR(-ENOMEM);
1142     }
1143    
1144     @@ -1790,24 +1759,33 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1145     edesc->sec4_sg_bytes = sec4_sg_bytes;
1146     edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1147     desc_bytes;
1148     + edesc->iv_dir = DMA_FROM_DEVICE;
1149     +
1150     + /* Make sure IV is located in a DMAable area */
1151     + iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
1152     + iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE);
1153     + if (dma_mapping_error(jrdev, iv_dma)) {
1154     + dev_err(jrdev, "unable to map IV\n");
1155     + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1156     + 0, DMA_NONE, 0, 0);
1157     + kfree(edesc);
1158     + return ERR_PTR(-ENOMEM);
1159     + }
1160    
1161     if (mapped_src_nents > 1)
1162     sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
1163     0);
1164    
1165     - if (!out_contig) {
1166     - dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx,
1167     - iv_dma, ivsize, 0);
1168     - sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
1169     - edesc->sec4_sg + dst_sg_idx + 1, 0);
1170     - }
1171     + dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0);
1172     + sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg +
1173     + dst_sg_idx + 1, 0);
1174    
1175     edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1176     sec4_sg_bytes, DMA_TO_DEVICE);
1177     if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1178     dev_err(jrdev, "unable to map S/G table\n");
1179     caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
1180     - iv_dma, ivsize, 0, 0);
1181     + iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
1182     kfree(edesc);
1183     return ERR_PTR(-ENOMEM);
1184     }
1185     @@ -1820,7 +1798,6 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1186     sec4_sg_bytes, 1);
1187     #endif
1188    
1189     - *iv_contig_out = out_contig;
1190     return edesc;
1191     }
1192    
1193     @@ -1831,19 +1808,17 @@ static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
1194     struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1195     struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1196     struct device *jrdev = ctx->jrdev;
1197     - bool iv_contig = false;
1198     u32 *desc;
1199     int ret = 0;
1200    
1201     /* allocate extended descriptor */
1202     - edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
1203     - CAAM_CMD_SZ, &iv_contig);
1204     + edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
1205     if (IS_ERR(edesc))
1206     return PTR_ERR(edesc);
1207    
1208     /* Create and submit job descriptor*/
1209     init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
1210     - edesc, req, iv_contig);
1211     + edesc, req);
1212     #ifdef DEBUG
1213     print_hex_dump(KERN_ERR,
1214     "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
1215     diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
1216     index cacda0831390..6e61cc93c2b0 100644
1217     --- a/drivers/crypto/caam/caamalg_qi.c
1218     +++ b/drivers/crypto/caam/caamalg_qi.c
1219     @@ -728,7 +728,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
1220     * @assoclen: associated data length, in CAAM endianness
1221     * @assoclen_dma: bus physical mapped address of req->assoclen
1222     * @drv_req: driver-specific request structure
1223     - * @sgt: the h/w link table
1224     + * @sgt: the h/w link table, followed by IV
1225     */
1226     struct aead_edesc {
1227     int src_nents;
1228     @@ -739,9 +739,6 @@ struct aead_edesc {
1229     unsigned int assoclen;
1230     dma_addr_t assoclen_dma;
1231     struct caam_drv_req drv_req;
1232     -#define CAAM_QI_MAX_AEAD_SG \
1233     - ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
1234     - sizeof(struct qm_sg_entry))
1235     struct qm_sg_entry sgt[0];
1236     };
1237    
1238     @@ -753,7 +750,7 @@ struct aead_edesc {
1239     * @qm_sg_bytes: length of dma mapped h/w link table
1240     * @qm_sg_dma: bus physical mapped address of h/w link table
1241     * @drv_req: driver-specific request structure
1242     - * @sgt: the h/w link table
1243     + * @sgt: the h/w link table, followed by IV
1244     */
1245     struct ablkcipher_edesc {
1246     int src_nents;
1247     @@ -762,9 +759,6 @@ struct ablkcipher_edesc {
1248     int qm_sg_bytes;
1249     dma_addr_t qm_sg_dma;
1250     struct caam_drv_req drv_req;
1251     -#define CAAM_QI_MAX_ABLKCIPHER_SG \
1252     - ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
1253     - sizeof(struct qm_sg_entry))
1254     struct qm_sg_entry sgt[0];
1255     };
1256    
1257     @@ -986,17 +980,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1258     }
1259     }
1260    
1261     - if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
1262     + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
1263     ivsize = crypto_aead_ivsize(aead);
1264     - iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
1265     - if (dma_mapping_error(qidev, iv_dma)) {
1266     - dev_err(qidev, "unable to map IV\n");
1267     - caam_unmap(qidev, req->src, req->dst, src_nents,
1268     - dst_nents, 0, 0, op_type, 0, 0);
1269     - qi_cache_free(edesc);
1270     - return ERR_PTR(-ENOMEM);
1271     - }
1272     - }
1273    
1274     /*
1275     * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
1276     @@ -1004,16 +989,33 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1277     */
1278     qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
1279     (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
1280     - if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
1281     - dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
1282     - qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
1283     - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1284     - iv_dma, ivsize, op_type, 0, 0);
1285     + sg_table = &edesc->sgt[0];
1286     + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
1287     + if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
1288     + CAAM_QI_MEMCACHE_SIZE)) {
1289     + dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1290     + qm_sg_ents, ivsize);
1291     + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1292     + 0, 0, 0, 0);
1293     qi_cache_free(edesc);
1294     return ERR_PTR(-ENOMEM);
1295     }
1296     - sg_table = &edesc->sgt[0];
1297     - qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
1298     +
1299     + if (ivsize) {
1300     + u8 *iv = (u8 *)(sg_table + qm_sg_ents);
1301     +
1302     + /* Make sure IV is located in a DMAable area */
1303     + memcpy(iv, req->iv, ivsize);
1304     +
1305     + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1306     + if (dma_mapping_error(qidev, iv_dma)) {
1307     + dev_err(qidev, "unable to map IV\n");
1308     + caam_unmap(qidev, req->src, req->dst, src_nents,
1309     + dst_nents, 0, 0, 0, 0, 0);
1310     + qi_cache_free(edesc);
1311     + return ERR_PTR(-ENOMEM);
1312     + }
1313     + }
1314    
1315     edesc->src_nents = src_nents;
1316     edesc->dst_nents = dst_nents;
1317     @@ -1166,15 +1168,27 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
1318     #endif
1319    
1320     ablkcipher_unmap(qidev, edesc, req);
1321     - qi_cache_free(edesc);
1322     +
1323     + /* In case initial IV was generated, copy it in GIVCIPHER request */
1324     + if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
1325     + u8 *iv;
1326     + struct skcipher_givcrypt_request *greq;
1327     +
1328     + greq = container_of(req, struct skcipher_givcrypt_request,
1329     + creq);
1330     + iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
1331     + memcpy(greq->giv, iv, ivsize);
1332     + }
1333    
1334     /*
1335     * The crypto API expects us to set the IV (req->info) to the last
1336     * ciphertext block. This is used e.g. by the CTS mode.
1337     */
1338     - scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
1339     - ivsize, 0);
1340     + if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
1341     + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
1342     + ivsize, ivsize, 0);
1343    
1344     + qi_cache_free(edesc);
1345     ablkcipher_request_complete(req, status);
1346     }
1347    
1348     @@ -1189,9 +1203,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1349     int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1350     struct ablkcipher_edesc *edesc;
1351     dma_addr_t iv_dma;
1352     - bool in_contig;
1353     + u8 *iv;
1354     int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1355     - int dst_sg_idx, qm_sg_ents;
1356     + int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1357     struct qm_sg_entry *sg_table, *fd_sgt;
1358     struct caam_drv_ctx *drv_ctx;
1359     enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
1360     @@ -1238,55 +1252,53 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1361     }
1362     }
1363    
1364     - iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
1365     - if (dma_mapping_error(qidev, iv_dma)) {
1366     - dev_err(qidev, "unable to map IV\n");
1367     - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1368     - 0, 0, 0, 0);
1369     - return ERR_PTR(-ENOMEM);
1370     - }
1371     -
1372     - if (mapped_src_nents == 1 &&
1373     - iv_dma + ivsize == sg_dma_address(req->src)) {
1374     - in_contig = true;
1375     - qm_sg_ents = 0;
1376     - } else {
1377     - in_contig = false;
1378     - qm_sg_ents = 1 + mapped_src_nents;
1379     - }
1380     + qm_sg_ents = 1 + mapped_src_nents;
1381     dst_sg_idx = qm_sg_ents;
1382    
1383     qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1384     - if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
1385     - dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
1386     - qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
1387     - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1388     - iv_dma, ivsize, op_type, 0, 0);
1389     + qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1390     + if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
1391     + ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1392     + dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1393     + qm_sg_ents, ivsize);
1394     + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1395     + 0, 0, 0, 0);
1396     return ERR_PTR(-ENOMEM);
1397     }
1398    
1399     - /* allocate space for base edesc and link tables */
1400     + /* allocate space for base edesc, link tables and IV */
1401     edesc = qi_cache_alloc(GFP_DMA | flags);
1402     if (unlikely(!edesc)) {
1403     dev_err(qidev, "could not allocate extended descriptor\n");
1404     - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1405     - iv_dma, ivsize, op_type, 0, 0);
1406     + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1407     + 0, 0, 0, 0);
1408     + return ERR_PTR(-ENOMEM);
1409     + }
1410     +
1411     + /* Make sure IV is located in a DMAable area */
1412     + sg_table = &edesc->sgt[0];
1413     + iv = (u8 *)(sg_table + qm_sg_ents);
1414     + memcpy(iv, req->info, ivsize);
1415     +
1416     + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1417     + if (dma_mapping_error(qidev, iv_dma)) {
1418     + dev_err(qidev, "unable to map IV\n");
1419     + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1420     + 0, 0, 0, 0);
1421     + qi_cache_free(edesc);
1422     return ERR_PTR(-ENOMEM);
1423     }
1424    
1425     edesc->src_nents = src_nents;
1426     edesc->dst_nents = dst_nents;
1427     edesc->iv_dma = iv_dma;
1428     - sg_table = &edesc->sgt[0];
1429     - edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
1430     + edesc->qm_sg_bytes = qm_sg_bytes;
1431     edesc->drv_req.app_ctx = req;
1432     edesc->drv_req.cbk = ablkcipher_done;
1433     edesc->drv_req.drv_ctx = drv_ctx;
1434    
1435     - if (!in_contig) {
1436     - dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1437     - sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1438     - }
1439     + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1440     + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1441    
1442     if (mapped_dst_nents > 1)
1443     sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1444     @@ -1304,20 +1316,12 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1445    
1446     fd_sgt = &edesc->drv_req.fd_sgt[0];
1447    
1448     - if (!in_contig)
1449     - dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
1450     - ivsize + req->nbytes, 0);
1451     - else
1452     - dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
1453     - 0);
1454     + dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
1455     + ivsize + req->nbytes, 0);
1456    
1457     if (req->src == req->dst) {
1458     - if (!in_contig)
1459     - dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
1460     - sizeof(*sg_table), req->nbytes, 0);
1461     - else
1462     - dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
1463     - req->nbytes, 0);
1464     + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
1465     + sizeof(*sg_table), req->nbytes, 0);
1466     } else if (mapped_dst_nents > 1) {
1467     dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1468     sizeof(*sg_table), req->nbytes, 0);
1469     @@ -1341,10 +1345,10 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1470     int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
1471     struct ablkcipher_edesc *edesc;
1472     dma_addr_t iv_dma;
1473     - bool out_contig;
1474     + u8 *iv;
1475     int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1476     struct qm_sg_entry *sg_table, *fd_sgt;
1477     - int dst_sg_idx, qm_sg_ents;
1478     + int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1479     struct caam_drv_ctx *drv_ctx;
1480    
1481     drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
1482     @@ -1392,46 +1396,45 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1483     mapped_dst_nents = src_nents;
1484     }
1485    
1486     - iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
1487     - if (dma_mapping_error(qidev, iv_dma)) {
1488     - dev_err(qidev, "unable to map IV\n");
1489     - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1490     - 0, 0, 0, 0);
1491     - return ERR_PTR(-ENOMEM);
1492     - }
1493     -
1494     qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
1495     dst_sg_idx = qm_sg_ents;
1496     - if (mapped_dst_nents == 1 &&
1497     - iv_dma + ivsize == sg_dma_address(req->dst)) {
1498     - out_contig = true;
1499     - } else {
1500     - out_contig = false;
1501     - qm_sg_ents += 1 + mapped_dst_nents;
1502     - }
1503    
1504     - if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
1505     - dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
1506     - qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
1507     - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1508     - iv_dma, ivsize, GIVENCRYPT, 0, 0);
1509     + qm_sg_ents += 1 + mapped_dst_nents;
1510     + qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1511     + if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
1512     + ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1513     + dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1514     + qm_sg_ents, ivsize);
1515     + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1516     + 0, 0, 0, 0);
1517     return ERR_PTR(-ENOMEM);
1518     }
1519    
1520     - /* allocate space for base edesc and link tables */
1521     + /* allocate space for base edesc, link tables and IV */
1522     edesc = qi_cache_alloc(GFP_DMA | flags);
1523     if (!edesc) {
1524     dev_err(qidev, "could not allocate extended descriptor\n");
1525     - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1526     - iv_dma, ivsize, GIVENCRYPT, 0, 0);
1527     + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1528     + 0, 0, 0, 0);
1529     + return ERR_PTR(-ENOMEM);
1530     + }
1531     +
1532     + /* Make sure IV is located in a DMAable area */
1533     + sg_table = &edesc->sgt[0];
1534     + iv = (u8 *)(sg_table + qm_sg_ents);
1535     + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
1536     + if (dma_mapping_error(qidev, iv_dma)) {
1537     + dev_err(qidev, "unable to map IV\n");
1538     + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1539     + 0, 0, 0, 0);
1540     + qi_cache_free(edesc);
1541     return ERR_PTR(-ENOMEM);
1542     }
1543    
1544     edesc->src_nents = src_nents;
1545     edesc->dst_nents = dst_nents;
1546     edesc->iv_dma = iv_dma;
1547     - sg_table = &edesc->sgt[0];
1548     - edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
1549     + edesc->qm_sg_bytes = qm_sg_bytes;
1550     edesc->drv_req.app_ctx = req;
1551     edesc->drv_req.cbk = ablkcipher_done;
1552     edesc->drv_req.drv_ctx = drv_ctx;
1553     @@ -1439,11 +1442,9 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1554     if (mapped_src_nents > 1)
1555     sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
1556    
1557     - if (!out_contig) {
1558     - dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
1559     - sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1560     - dst_sg_idx + 1, 0);
1561     - }
1562     + dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
1563     + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
1564     + 0);
1565    
1566     edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1567     DMA_TO_DEVICE);
1568     @@ -1464,13 +1465,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1569     dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
1570     req->nbytes, 0);
1571    
1572     - if (!out_contig)
1573     - dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1574     - sizeof(*sg_table), ivsize + req->nbytes,
1575     - 0);
1576     - else
1577     - dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
1578     - ivsize + req->nbytes, 0);
1579     + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1580     + sizeof(*sg_table), ivsize + req->nbytes, 0);
1581    
1582     return edesc;
1583     }
1584     @@ -1480,6 +1476,7 @@ static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
1585     struct ablkcipher_edesc *edesc;
1586     struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1587     struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1588     + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1589     int ret;
1590    
1591     if (unlikely(caam_congested))
1592     @@ -1490,6 +1487,14 @@ static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
1593     if (IS_ERR(edesc))
1594     return PTR_ERR(edesc);
1595    
1596     + /*
1597     + * The crypto API expects us to set the IV (req->info) to the last
1598     + * ciphertext block.
1599     + */
1600     + if (!encrypt)
1601     + scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
1602     + ivsize, ivsize, 0);
1603     +
1604     ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1605     if (!ret) {
1606     ret = -EINPROGRESS;
1607     diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
1608     index 7a897209f181..7ff4a25440ac 100644
1609     --- a/drivers/crypto/caam/caampkc.c
1610     +++ b/drivers/crypto/caam/caampkc.c
1611     @@ -66,7 +66,7 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
1612     struct caam_rsa_key *key = &ctx->key;
1613     struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
1614     size_t p_sz = key->p_sz;
1615     - size_t q_sz = key->p_sz;
1616     + size_t q_sz = key->q_sz;
1617    
1618     dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
1619     dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
1620     @@ -83,7 +83,7 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
1621     struct caam_rsa_key *key = &ctx->key;
1622     struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
1623     size_t p_sz = key->p_sz;
1624     - size_t q_sz = key->p_sz;
1625     + size_t q_sz = key->q_sz;
1626    
1627     dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
1628     dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
1629     @@ -166,18 +166,71 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
1630     akcipher_request_complete(req, err);
1631     }
1632    
1633     +static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
1634     + unsigned int nbytes,
1635     + unsigned int flags)
1636     +{
1637     + struct sg_mapping_iter miter;
1638     + int lzeros, ents;
1639     + unsigned int len;
1640     + unsigned int tbytes = nbytes;
1641     + const u8 *buff;
1642     +
1643     + ents = sg_nents_for_len(sgl, nbytes);
1644     + if (ents < 0)
1645     + return ents;
1646     +
1647     + sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
1648     +
1649     + lzeros = 0;
1650     + len = 0;
1651     + while (nbytes > 0) {
1652     + while (len && !*buff) {
1653     + lzeros++;
1654     + len--;
1655     + buff++;
1656     + }
1657     +
1658     + if (len && *buff)
1659     + break;
1660     +
1661     + sg_miter_next(&miter);
1662     + buff = miter.addr;
1663     + len = miter.length;
1664     +
1665     + nbytes -= lzeros;
1666     + lzeros = 0;
1667     + }
1668     +
1669     + miter.consumed = lzeros;
1670     + sg_miter_stop(&miter);
1671     + nbytes -= lzeros;
1672     +
1673     + return tbytes - nbytes;
1674     +}
1675     +
1676     static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
1677     size_t desclen)
1678     {
1679     struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
1680     struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1681     struct device *dev = ctx->dev;
1682     + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
1683     struct rsa_edesc *edesc;
1684     gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1685     GFP_KERNEL : GFP_ATOMIC;
1686     + int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
1687     int sgc;
1688     int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1689     int src_nents, dst_nents;
1690     + int lzeros;
1691     +
1692     + lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
1693     + if (lzeros < 0)
1694     + return ERR_PTR(lzeros);
1695     +
1696     + req->src_len -= lzeros;
1697     + req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
1698    
1699     src_nents = sg_nents_for_len(req->src, req->src_len);
1700     dst_nents = sg_nents_for_len(req->dst, req->dst_len);
1701     @@ -344,7 +397,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
1702     struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
1703     int sec4_sg_index = 0;
1704     size_t p_sz = key->p_sz;
1705     - size_t q_sz = key->p_sz;
1706     + size_t q_sz = key->q_sz;
1707    
1708     pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
1709     if (dma_mapping_error(dev, pdb->d_dma)) {
1710     @@ -419,7 +472,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
1711     struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
1712     int sec4_sg_index = 0;
1713     size_t p_sz = key->p_sz;
1714     - size_t q_sz = key->p_sz;
1715     + size_t q_sz = key->q_sz;
1716    
1717     pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
1718     if (dma_mapping_error(dev, pdb->p_dma)) {
1719     @@ -953,6 +1006,7 @@ static struct akcipher_alg caam_rsa = {
1720     .max_size = caam_rsa_max_size,
1721     .init = caam_rsa_init_tfm,
1722     .exit = caam_rsa_exit_tfm,
1723     + .reqsize = sizeof(struct caam_rsa_req_ctx),
1724     .base = {
1725     .cra_name = "rsa",
1726     .cra_driver_name = "rsa-caam",
1727     diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
1728     index fd145c46eae1..82645bcf8b27 100644
1729     --- a/drivers/crypto/caam/caampkc.h
1730     +++ b/drivers/crypto/caam/caampkc.h
1731     @@ -95,6 +95,14 @@ struct caam_rsa_ctx {
1732     struct device *dev;
1733     };
1734    
1735     +/**
1736     + * caam_rsa_req_ctx - per request context.
1737     + * @src: input scatterlist (stripped of leading zeros)
1738     + */
1739     +struct caam_rsa_req_ctx {
1740     + struct scatterlist src[2];
1741     +};
1742     +
1743     /**
1744     * rsa_edesc - s/w-extended rsa descriptor
1745     * @src_nents : number of segments in input scatterlist
1746     diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h
1747     index dc451e0a43c5..58fb3ed6e644 100644
1748     --- a/drivers/crypto/cavium/zip/common.h
1749     +++ b/drivers/crypto/cavium/zip/common.h
1750     @@ -46,8 +46,10 @@
1751     #ifndef __COMMON_H__
1752     #define __COMMON_H__
1753    
1754     +#include <linux/delay.h>
1755     #include <linux/init.h>
1756     #include <linux/interrupt.h>
1757     +#include <linux/io.h>
1758     #include <linux/kernel.h>
1759     #include <linux/module.h>
1760     #include <linux/pci.h>
1761     @@ -149,6 +151,25 @@ struct zip_operation {
1762     u32 sizeofzops;
1763     };
1764    
1765     +static inline int zip_poll_result(union zip_zres_s *result)
1766     +{
1767     + int retries = 1000;
1768     +
1769     + while (!result->s.compcode) {
1770     + if (!--retries) {
1771     + pr_err("ZIP ERR: request timed out");
1772     + return -ETIMEDOUT;
1773     + }
1774     + udelay(10);
1775     + /*
1776     + * Force re-reading of compcode which is updated
1777     + * by the ZIP coprocessor.
1778     + */
1779     + rmb();
1780     + }
1781     + return 0;
1782     +}
1783     +
1784     /* error messages */
1785     #define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \
1786     fmt "\n", __func__, __LINE__, ## args)
1787     diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
1788     index 8df4d26cf9d4..b92b6e7e100f 100644
1789     --- a/drivers/crypto/cavium/zip/zip_crypto.c
1790     +++ b/drivers/crypto/cavium/zip/zip_crypto.c
1791     @@ -124,7 +124,7 @@ int zip_compress(const u8 *src, unsigned int slen,
1792     struct zip_kernel_ctx *zip_ctx)
1793     {
1794     struct zip_operation *zip_ops = NULL;
1795     - struct zip_state zip_state;
1796     + struct zip_state *zip_state;
1797     struct zip_device *zip = NULL;
1798     int ret;
1799    
1800     @@ -135,20 +135,23 @@ int zip_compress(const u8 *src, unsigned int slen,
1801     if (!zip)
1802     return -ENODEV;
1803    
1804     - memset(&zip_state, 0, sizeof(struct zip_state));
1805     + zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
1806     + if (!zip_state)
1807     + return -ENOMEM;
1808     +
1809     zip_ops = &zip_ctx->zip_comp;
1810    
1811     zip_ops->input_len = slen;
1812     zip_ops->output_len = *dlen;
1813     memcpy(zip_ops->input, src, slen);
1814    
1815     - ret = zip_deflate(zip_ops, &zip_state, zip);
1816     + ret = zip_deflate(zip_ops, zip_state, zip);
1817    
1818     if (!ret) {
1819     *dlen = zip_ops->output_len;
1820     memcpy(dst, zip_ops->output, *dlen);
1821     }
1822     -
1823     + kfree(zip_state);
1824     return ret;
1825     }
1826    
1827     @@ -157,7 +160,7 @@ int zip_decompress(const u8 *src, unsigned int slen,
1828     struct zip_kernel_ctx *zip_ctx)
1829     {
1830     struct zip_operation *zip_ops = NULL;
1831     - struct zip_state zip_state;
1832     + struct zip_state *zip_state;
1833     struct zip_device *zip = NULL;
1834     int ret;
1835    
1836     @@ -168,7 +171,10 @@ int zip_decompress(const u8 *src, unsigned int slen,
1837     if (!zip)
1838     return -ENODEV;
1839    
1840     - memset(&zip_state, 0, sizeof(struct zip_state));
1841     + zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
1842     + if (!zip_state)
1843     + return -ENOMEM;
1844     +
1845     zip_ops = &zip_ctx->zip_decomp;
1846     memcpy(zip_ops->input, src, slen);
1847    
1848     @@ -179,13 +185,13 @@ int zip_decompress(const u8 *src, unsigned int slen,
1849     zip_ops->input_len = slen;
1850     zip_ops->output_len = *dlen;
1851    
1852     - ret = zip_inflate(zip_ops, &zip_state, zip);
1853     + ret = zip_inflate(zip_ops, zip_state, zip);
1854    
1855     if (!ret) {
1856     *dlen = zip_ops->output_len;
1857     memcpy(dst, zip_ops->output, *dlen);
1858     }
1859     -
1860     + kfree(zip_state);
1861     return ret;
1862     }
1863    
1864     diff --git a/drivers/crypto/cavium/zip/zip_deflate.c b/drivers/crypto/cavium/zip/zip_deflate.c
1865     index 9a944b8c1e29..d7133f857d67 100644
1866     --- a/drivers/crypto/cavium/zip/zip_deflate.c
1867     +++ b/drivers/crypto/cavium/zip/zip_deflate.c
1868     @@ -129,8 +129,8 @@ int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
1869     /* Stats update for compression requests submitted */
1870     atomic64_inc(&zip_dev->stats.comp_req_submit);
1871    
1872     - while (!result_ptr->s.compcode)
1873     - continue;
1874     + /* Wait for completion or error */
1875     + zip_poll_result(result_ptr);
1876    
1877     /* Stats update for compression requests completed */
1878     atomic64_inc(&zip_dev->stats.comp_req_complete);
1879     diff --git a/drivers/crypto/cavium/zip/zip_inflate.c b/drivers/crypto/cavium/zip/zip_inflate.c
1880     index 50cbdd83dbf2..7e0d73e2f89e 100644
1881     --- a/drivers/crypto/cavium/zip/zip_inflate.c
1882     +++ b/drivers/crypto/cavium/zip/zip_inflate.c
1883     @@ -143,8 +143,8 @@ int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
1884     /* Decompression requests submitted stats update */
1885     atomic64_inc(&zip_dev->stats.decomp_req_submit);
1886    
1887     - while (!result_ptr->s.compcode)
1888     - continue;
1889     + /* Wait for completion or error */
1890     + zip_poll_result(result_ptr);
1891    
1892     /* Decompression requests completed stats update */
1893     atomic64_inc(&zip_dev->stats.decomp_req_complete);
1894     diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
1895     index 08f8db489cf0..5ca184e42483 100644
1896     --- a/drivers/crypto/ccree/cc_debugfs.c
1897     +++ b/drivers/crypto/ccree/cc_debugfs.c
1898     @@ -26,7 +26,8 @@ struct cc_debugfs_ctx {
1899     static struct dentry *cc_debugfs_dir;
1900    
1901     static struct debugfs_reg32 debug_regs[] = {
1902     - CC_DEBUG_REG(HOST_SIGNATURE),
1903     + { .name = "SIGNATURE" }, /* Must be 0th */
1904     + { .name = "VERSION" }, /* Must be 1st */
1905     CC_DEBUG_REG(HOST_IRR),
1906     CC_DEBUG_REG(HOST_POWER_DOWN_EN),
1907     CC_DEBUG_REG(AXIM_MON_ERR),
1908     @@ -34,7 +35,6 @@ static struct debugfs_reg32 debug_regs[] = {
1909     CC_DEBUG_REG(HOST_IMR),
1910     CC_DEBUG_REG(AXIM_CFG),
1911     CC_DEBUG_REG(AXIM_CACHE_PARAMS),
1912     - CC_DEBUG_REG(HOST_VERSION),
1913     CC_DEBUG_REG(GPR_HOST),
1914     CC_DEBUG_REG(AXIM_MON_COMP),
1915     };
1916     @@ -58,6 +58,9 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
1917     struct debugfs_regset32 *regset;
1918     struct dentry *file;
1919    
1920     + debug_regs[0].offset = drvdata->sig_offset;
1921     + debug_regs[1].offset = drvdata->ver_offset;
1922     +
1923     ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1924     if (!ctx)
1925     return -ENOMEM;
1926     diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
1927     index 89ce013ae093..6f93ce7701ec 100644
1928     --- a/drivers/crypto/ccree/cc_driver.c
1929     +++ b/drivers/crypto/ccree/cc_driver.c
1930     @@ -207,9 +207,13 @@ static int init_cc_resources(struct platform_device *plat_dev)
1931     if (hw_rev->rev >= CC_HW_REV_712) {
1932     new_drvdata->hash_len_sz = HASH_LEN_SIZE_712;
1933     new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP);
1934     + new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_712);
1935     + new_drvdata->ver_offset = CC_REG(HOST_VERSION_712);
1936     } else {
1937     new_drvdata->hash_len_sz = HASH_LEN_SIZE_630;
1938     new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP8);
1939     + new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_630);
1940     + new_drvdata->ver_offset = CC_REG(HOST_VERSION_630);
1941     }
1942    
1943     platform_set_drvdata(plat_dev, new_drvdata);
1944     @@ -276,7 +280,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
1945     }
1946    
1947     /* Verify correct mapping */
1948     - signature_val = cc_ioread(new_drvdata, CC_REG(HOST_SIGNATURE));
1949     + signature_val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
1950     if (signature_val != hw_rev->sig) {
1951     dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
1952     signature_val, hw_rev->sig);
1953     @@ -287,7 +291,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
1954    
1955     /* Display HW versions */
1956     dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
1957     - hw_rev->name, cc_ioread(new_drvdata, CC_REG(HOST_VERSION)),
1958     + hw_rev->name, cc_ioread(new_drvdata, new_drvdata->ver_offset),
1959     DRV_MODULE_VERSION);
1960    
1961     rc = init_cc_regs(new_drvdata, true);
1962     diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
1963     index 2048fdeb9579..95f82b2d1e70 100644
1964     --- a/drivers/crypto/ccree/cc_driver.h
1965     +++ b/drivers/crypto/ccree/cc_driver.h
1966     @@ -129,6 +129,8 @@ struct cc_drvdata {
1967     enum cc_hw_rev hw_rev;
1968     u32 hash_len_sz;
1969     u32 axim_mon_offset;
1970     + u32 sig_offset;
1971     + u32 ver_offset;
1972     };
1973    
1974     struct cc_crypto_alg {
1975     diff --git a/drivers/crypto/ccree/cc_host_regs.h b/drivers/crypto/ccree/cc_host_regs.h
1976     index f51001898ca1..616b2e1c41ba 100644
1977     --- a/drivers/crypto/ccree/cc_host_regs.h
1978     +++ b/drivers/crypto/ccree/cc_host_regs.h
1979     @@ -45,7 +45,8 @@
1980     #define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
1981     #define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
1982     #define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
1983     -#define CC_HOST_SIGNATURE_REG_OFFSET 0xA24UL
1984     +#define CC_HOST_SIGNATURE_712_REG_OFFSET 0xA24UL
1985     +#define CC_HOST_SIGNATURE_630_REG_OFFSET 0xAC8UL
1986     #define CC_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL
1987     #define CC_HOST_SIGNATURE_VALUE_BIT_SIZE 0x20UL
1988     #define CC_HOST_BOOT_REG_OFFSET 0xA28UL
1989     @@ -105,7 +106,8 @@
1990     #define CC_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SIZE 0x1UL
1991     #define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SHIFT 0x1EUL
1992     #define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SIZE 0x1UL
1993     -#define CC_HOST_VERSION_REG_OFFSET 0xA40UL
1994     +#define CC_HOST_VERSION_712_REG_OFFSET 0xA40UL
1995     +#define CC_HOST_VERSION_630_REG_OFFSET 0xAD8UL
1996     #define CC_HOST_VERSION_VALUE_BIT_SHIFT 0x0UL
1997     #define CC_HOST_VERSION_VALUE_BIT_SIZE 0x20UL
1998     #define CC_HOST_KFDE0_VALID_REG_OFFSET 0xA60UL
1999     diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
2000     index 8e0aa3f175c9..461b97e2f1fd 100644
2001     --- a/drivers/crypto/chelsio/chcr_ipsec.c
2002     +++ b/drivers/crypto/chelsio/chcr_ipsec.c
2003     @@ -346,18 +346,23 @@ inline void *copy_cpltx_pktxt(struct sk_buff *skb,
2004     struct net_device *dev,
2005     void *pos)
2006     {
2007     + struct cpl_tx_pkt_core *cpl;
2008     + struct sge_eth_txq *q;
2009     struct adapter *adap;
2010     struct port_info *pi;
2011     - struct sge_eth_txq *q;
2012     - struct cpl_tx_pkt_core *cpl;
2013     - u64 cntrl = 0;
2014     u32 ctrl0, qidx;
2015     + u64 cntrl = 0;
2016     + int left;
2017    
2018     pi = netdev_priv(dev);
2019     adap = pi->adapter;
2020     qidx = skb->queue_mapping;
2021     q = &adap->sge.ethtxq[qidx + pi->first_qset];
2022    
2023     + left = (void *)q->q.stat - pos;
2024     + if (!left)
2025     + pos = q->q.desc;
2026     +
2027     cpl = (struct cpl_tx_pkt_core *)pos;
2028    
2029     cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
2030     @@ -382,18 +387,17 @@ inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
2031     void *pos,
2032     struct ipsec_sa_entry *sa_entry)
2033     {
2034     - struct adapter *adap;
2035     - struct port_info *pi;
2036     - struct sge_eth_txq *q;
2037     - unsigned int len, qidx;
2038     struct _key_ctx *key_ctx;
2039     int left, eoq, key_len;
2040     + struct sge_eth_txq *q;
2041     + struct adapter *adap;
2042     + struct port_info *pi;
2043     + unsigned int qidx;
2044    
2045     pi = netdev_priv(dev);
2046     adap = pi->adapter;
2047     qidx = skb->queue_mapping;
2048     q = &adap->sge.ethtxq[qidx + pi->first_qset];
2049     - len = sa_entry->enckey_len + sizeof(struct cpl_tx_pkt_core);
2050     key_len = sa_entry->kctx_len;
2051    
2052     /* end of queue, reset pos to start of queue */
2053     @@ -411,19 +415,14 @@ inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
2054     pos += sizeof(struct _key_ctx);
2055     left -= sizeof(struct _key_ctx);
2056    
2057     - if (likely(len <= left)) {
2058     + if (likely(key_len <= left)) {
2059     memcpy(key_ctx->key, sa_entry->key, key_len);
2060     pos += key_len;
2061     } else {
2062     - if (key_len <= left) {
2063     - memcpy(pos, sa_entry->key, key_len);
2064     - pos += key_len;
2065     - } else {
2066     - memcpy(pos, sa_entry->key, left);
2067     - memcpy(q->q.desc, sa_entry->key + left,
2068     - key_len - left);
2069     - pos = (u8 *)q->q.desc + (key_len - left);
2070     - }
2071     + memcpy(pos, sa_entry->key, left);
2072     + memcpy(q->q.desc, sa_entry->key + left,
2073     + key_len - left);
2074     + pos = (u8 *)q->q.desc + (key_len - left);
2075     }
2076     /* Copy CPL TX PKT XT */
2077     pos = copy_cpltx_pktxt(skb, dev, pos);
2078     diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
2079     index ad02aa63b519..d1a1c74fb56a 100644
2080     --- a/drivers/crypto/omap-sham.c
2081     +++ b/drivers/crypto/omap-sham.c
2082     @@ -1087,7 +1087,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
2083    
2084     if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
2085     free_pages((unsigned long)sg_virt(ctx->sg),
2086     - get_order(ctx->sg->length));
2087     + get_order(ctx->sg->length + ctx->bufcnt));
2088    
2089     if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
2090     kfree(ctx->sg);
2091     diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
2092     index 96072b9b55c4..d7316f7a3a69 100644
2093     --- a/drivers/crypto/vmx/aes.c
2094     +++ b/drivers/crypto/vmx/aes.c
2095     @@ -48,8 +48,6 @@ static int p8_aes_init(struct crypto_tfm *tfm)
2096     alg, PTR_ERR(fallback));
2097     return PTR_ERR(fallback);
2098     }
2099     - printk(KERN_INFO "Using '%s' as fallback implementation.\n",
2100     - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
2101    
2102     crypto_cipher_set_flags(fallback,
2103     crypto_cipher_get_flags((struct
2104     diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
2105     index 7394d35d5936..5285ece4f33a 100644
2106     --- a/drivers/crypto/vmx/aes_cbc.c
2107     +++ b/drivers/crypto/vmx/aes_cbc.c
2108     @@ -52,9 +52,6 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm)
2109     alg, PTR_ERR(fallback));
2110     return PTR_ERR(fallback);
2111     }
2112     - printk(KERN_INFO "Using '%s' as fallback implementation.\n",
2113     - crypto_skcipher_driver_name(fallback));
2114     -
2115    
2116     crypto_skcipher_set_flags(
2117     fallback,
2118     diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
2119     index fc60d00a2e84..cd777c75291d 100644
2120     --- a/drivers/crypto/vmx/aes_ctr.c
2121     +++ b/drivers/crypto/vmx/aes_ctr.c
2122     @@ -50,8 +50,6 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
2123     alg, PTR_ERR(fallback));
2124     return PTR_ERR(fallback);
2125     }
2126     - printk(KERN_INFO "Using '%s' as fallback implementation.\n",
2127     - crypto_skcipher_driver_name(fallback));
2128    
2129     crypto_skcipher_set_flags(
2130     fallback,
2131     diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
2132     index 8cd6e62e4c90..8bd9aff0f55f 100644
2133     --- a/drivers/crypto/vmx/aes_xts.c
2134     +++ b/drivers/crypto/vmx/aes_xts.c
2135     @@ -53,8 +53,6 @@ static int p8_aes_xts_init(struct crypto_tfm *tfm)
2136     alg, PTR_ERR(fallback));
2137     return PTR_ERR(fallback);
2138     }
2139     - printk(KERN_INFO "Using '%s' as fallback implementation.\n",
2140     - crypto_skcipher_driver_name(fallback));
2141    
2142     crypto_skcipher_set_flags(
2143     fallback,
2144     diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
2145     index 27a94a119009..1c4b5b889fba 100644
2146     --- a/drivers/crypto/vmx/ghash.c
2147     +++ b/drivers/crypto/vmx/ghash.c
2148     @@ -64,8 +64,6 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
2149     alg, PTR_ERR(fallback));
2150     return PTR_ERR(fallback);
2151     }
2152     - printk(KERN_INFO "Using '%s' as fallback implementation.\n",
2153     - crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback)));
2154    
2155     crypto_shash_set_flags(fallback,
2156     crypto_shash_get_flags((struct crypto_shash
2157     diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
2158     index 06e9650b3b30..a89b81b35932 100644
2159     --- a/drivers/input/joystick/xpad.c
2160     +++ b/drivers/input/joystick/xpad.c
2161     @@ -123,6 +123,7 @@ static const struct xpad_device {
2162     u8 mapping;
2163     u8 xtype;
2164     } xpad_device[] = {
2165     + { 0x0079, 0x18d4, "GPD Win 2 Controller", 0, XTYPE_XBOX360 },
2166     { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
2167     { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
2168     { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
2169     @@ -409,6 +410,7 @@ static const signed short xpad_abs_triggers[] = {
2170    
2171     static const struct usb_device_id xpad_table[] = {
2172     { USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */
2173     + XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 Controller */
2174     XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */
2175     XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
2176     XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */
2177     diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
2178     index 75e757520ef0..93967c8139e7 100644
2179     --- a/drivers/input/mouse/elan_i2c_core.c
2180     +++ b/drivers/input/mouse/elan_i2c_core.c
2181     @@ -1262,6 +1262,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
2182     { "ELAN060B", 0 },
2183     { "ELAN060C", 0 },
2184     { "ELAN0611", 0 },
2185     + { "ELAN0612", 0 },
2186     { "ELAN1000", 0 },
2187     { }
2188     };
2189     diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
2190     index 9736c83dd418..f2d9c2c41885 100644
2191     --- a/drivers/input/touchscreen/goodix.c
2192     +++ b/drivers/input/touchscreen/goodix.c
2193     @@ -933,6 +933,7 @@ MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
2194     #ifdef CONFIG_ACPI
2195     static const struct acpi_device_id goodix_acpi_match[] = {
2196     { "GDIX1001", 0 },
2197     + { "GDIX1002", 0 },
2198     { }
2199     };
2200     MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
2201     diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
2202     index 9047c0a529b2..efd733472a35 100644
2203     --- a/drivers/misc/vmw_balloon.c
2204     +++ b/drivers/misc/vmw_balloon.c
2205     @@ -576,15 +576,9 @@ static void vmballoon_pop(struct vmballoon *b)
2206     }
2207     }
2208    
2209     - if (b->batch_page) {
2210     - vunmap(b->batch_page);
2211     - b->batch_page = NULL;
2212     - }
2213     -
2214     - if (b->page) {
2215     - __free_page(b->page);
2216     - b->page = NULL;
2217     - }
2218     + /* Clearing the batch_page unconditionally has no adverse effect */
2219     + free_page((unsigned long)b->batch_page);
2220     + b->batch_page = NULL;
2221     }
2222    
2223     /*
2224     @@ -991,16 +985,13 @@ static const struct vmballoon_ops vmballoon_batched_ops = {
2225    
2226     static bool vmballoon_init_batching(struct vmballoon *b)
2227     {
2228     - b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP);
2229     - if (!b->page)
2230     - return false;
2231     + struct page *page;
2232    
2233     - b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL);
2234     - if (!b->batch_page) {
2235     - __free_page(b->page);
2236     + page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2237     + if (!page)
2238     return false;
2239     - }
2240    
2241     + b->batch_page = page_address(page);
2242     return true;
2243     }
2244    
2245     diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
2246     index e153e8b64bb8..d5553c47014f 100644
2247     --- a/drivers/nfc/pn533/usb.c
2248     +++ b/drivers/nfc/pn533/usb.c
2249     @@ -62,6 +62,9 @@ struct pn533_usb_phy {
2250     struct urb *out_urb;
2251     struct urb *in_urb;
2252    
2253     + struct urb *ack_urb;
2254     + u8 *ack_buffer;
2255     +
2256     struct pn533 *priv;
2257     };
2258    
2259     @@ -150,13 +153,16 @@ static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags)
2260     struct pn533_usb_phy *phy = dev->phy;
2261     static const u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00};
2262     /* spec 7.1.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */
2263     - int rc;
2264    
2265     - phy->out_urb->transfer_buffer = (u8 *)ack;
2266     - phy->out_urb->transfer_buffer_length = sizeof(ack);
2267     - rc = usb_submit_urb(phy->out_urb, flags);
2268     + if (!phy->ack_buffer) {
2269     + phy->ack_buffer = kmemdup(ack, sizeof(ack), flags);
2270     + if (!phy->ack_buffer)
2271     + return -ENOMEM;
2272     + }
2273    
2274     - return rc;
2275     + phy->ack_urb->transfer_buffer = phy->ack_buffer;
2276     + phy->ack_urb->transfer_buffer_length = sizeof(ack);
2277     + return usb_submit_urb(phy->ack_urb, flags);
2278     }
2279    
2280     static int pn533_usb_send_frame(struct pn533 *dev,
2281     @@ -375,26 +381,31 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
2282     /* Power on th reader (CCID cmd) */
2283     u8 cmd[10] = {PN533_ACR122_PC_TO_RDR_ICCPOWERON,
2284     0, 0, 0, 0, 0, 0, 3, 0, 0};
2285     + char *buffer;
2286     + int transferred;
2287     int rc;
2288     void *cntx;
2289     struct pn533_acr122_poweron_rdr_arg arg;
2290    
2291     dev_dbg(&phy->udev->dev, "%s\n", __func__);
2292    
2293     + buffer = kmemdup(cmd, sizeof(cmd), GFP_KERNEL);
2294     + if (!buffer)
2295     + return -ENOMEM;
2296     +
2297     init_completion(&arg.done);
2298     cntx = phy->in_urb->context; /* backup context */
2299    
2300     phy->in_urb->complete = pn533_acr122_poweron_rdr_resp;
2301     phy->in_urb->context = &arg;
2302    
2303     - phy->out_urb->transfer_buffer = cmd;
2304     - phy->out_urb->transfer_buffer_length = sizeof(cmd);
2305     -
2306     print_hex_dump_debug("ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1,
2307     cmd, sizeof(cmd), false);
2308    
2309     - rc = usb_submit_urb(phy->out_urb, GFP_KERNEL);
2310     - if (rc) {
2311     + rc = usb_bulk_msg(phy->udev, phy->out_urb->pipe, buffer, sizeof(cmd),
2312     + &transferred, 0);
2313     + kfree(buffer);
2314     + if (rc || (transferred != sizeof(cmd))) {
2315     nfc_err(&phy->udev->dev,
2316     "Reader power on cmd error %d\n", rc);
2317     return rc;
2318     @@ -490,8 +501,9 @@ static int pn533_usb_probe(struct usb_interface *interface,
2319    
2320     phy->in_urb = usb_alloc_urb(0, GFP_KERNEL);
2321     phy->out_urb = usb_alloc_urb(0, GFP_KERNEL);
2322     + phy->ack_urb = usb_alloc_urb(0, GFP_KERNEL);
2323    
2324     - if (!phy->in_urb || !phy->out_urb)
2325     + if (!phy->in_urb || !phy->out_urb || !phy->ack_urb)
2326     goto error;
2327    
2328     usb_fill_bulk_urb(phy->in_urb, phy->udev,
2329     @@ -501,7 +513,9 @@ static int pn533_usb_probe(struct usb_interface *interface,
2330     usb_fill_bulk_urb(phy->out_urb, phy->udev,
2331     usb_sndbulkpipe(phy->udev, out_endpoint),
2332     NULL, 0, pn533_send_complete, phy);
2333     -
2334     + usb_fill_bulk_urb(phy->ack_urb, phy->udev,
2335     + usb_sndbulkpipe(phy->udev, out_endpoint),
2336     + NULL, 0, pn533_send_complete, phy);
2337    
2338     switch (id->driver_info) {
2339     case PN533_DEVICE_STD:
2340     @@ -554,6 +568,7 @@ static int pn533_usb_probe(struct usb_interface *interface,
2341     error:
2342     usb_free_urb(phy->in_urb);
2343     usb_free_urb(phy->out_urb);
2344     + usb_free_urb(phy->ack_urb);
2345     usb_put_dev(phy->udev);
2346     kfree(in_buf);
2347    
2348     @@ -573,10 +588,13 @@ static void pn533_usb_disconnect(struct usb_interface *interface)
2349    
2350     usb_kill_urb(phy->in_urb);
2351     usb_kill_urb(phy->out_urb);
2352     + usb_kill_urb(phy->ack_urb);
2353    
2354     kfree(phy->in_urb->transfer_buffer);
2355     usb_free_urb(phy->in_urb);
2356     usb_free_urb(phy->out_urb);
2357     + usb_free_urb(phy->ack_urb);
2358     + kfree(phy->ack_buffer);
2359    
2360     nfc_info(&interface->dev, "NXP PN533 NFC device disconnected\n");
2361     }
2362     diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
2363     index 94afeac1a19e..40fdef8b5b75 100644
2364     --- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
2365     +++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
2366     @@ -315,6 +315,10 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy)
2367     const struct qusb2_phy_cfg *cfg = qphy->cfg;
2368     u8 *val;
2369    
2370     + /* efuse register is optional */
2371     + if (!qphy->cell)
2372     + return;
2373     +
2374     /*
2375     * Read efuse register having TUNE2/1 parameter's high nibble.
2376     * If efuse register shows value as 0x0, or if we fail to find
2377     diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
2378     index e74db7902549..a68329411b29 100644
2379     --- a/drivers/staging/android/ion/ion.c
2380     +++ b/drivers/staging/android/ion/ion.c
2381     @@ -114,8 +114,11 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
2382    
2383     void ion_buffer_destroy(struct ion_buffer *buffer)
2384     {
2385     - if (WARN_ON(buffer->kmap_cnt > 0))
2386     + if (buffer->kmap_cnt > 0) {
2387     + pr_warn_once("%s: buffer still mapped in the kernel\n",
2388     + __func__);
2389     buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
2390     + }
2391     buffer->heap->ops->free(buffer);
2392     kfree(buffer);
2393     }
2394     diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
2395     index 624b501fd253..93de20e87abe 100644
2396     --- a/drivers/tty/serial/8250/8250_omap.c
2397     +++ b/drivers/tty/serial/8250/8250_omap.c
2398     @@ -1110,13 +1110,14 @@ static int omap8250_no_handle_irq(struct uart_port *port)
2399     return 0;
2400     }
2401    
2402     +static const u8 omap4_habit = UART_ERRATA_CLOCK_DISABLE;
2403     static const u8 am3352_habit = OMAP_DMA_TX_KICK | UART_ERRATA_CLOCK_DISABLE;
2404     static const u8 dra742_habit = UART_ERRATA_CLOCK_DISABLE;
2405    
2406     static const struct of_device_id omap8250_dt_ids[] = {
2407     { .compatible = "ti,omap2-uart" },
2408     { .compatible = "ti,omap3-uart" },
2409     - { .compatible = "ti,omap4-uart" },
2410     + { .compatible = "ti,omap4-uart", .data = &omap4_habit, },
2411     { .compatible = "ti,am3352-uart", .data = &am3352_habit, },
2412     { .compatible = "ti,am4372-uart", .data = &am3352_habit, },
2413     { .compatible = "ti,dra742-uart", .data = &dra742_habit, },
2414     @@ -1353,6 +1354,19 @@ static int omap8250_soft_reset(struct device *dev)
2415     int sysc;
2416     int syss;
2417    
2418     + /*
2419     + * At least on omap4, unused uarts may not idle after reset without
2420     + * a basic scr dma configuration even with no dma in use. The
2421     + * module clkctrl status bits will be 1 instead of 3 blocking idle
2422     + * for the whole clockdomain. The softreset below will clear scr,
2423     + * and we restore it on resume so this is safe to do on all SoCs
2424     + * needing omap8250_soft_reset() quirk. Do it in two writes as
2425     + * recommended in the comment for omap8250_update_scr().
2426     + */
2427     + serial_out(up, UART_OMAP_SCR, OMAP_UART_SCR_DMAMODE_1);
2428     + serial_out(up, UART_OMAP_SCR,
2429     + OMAP_UART_SCR_DMAMODE_1 | OMAP_UART_SCR_DMAMODE_CTL);
2430     +
2431     sysc = serial_in(up, UART_OMAP_SYSC);
2432    
2433     /* softreset the UART */
2434     diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
2435     index 4b40a5b449ee..ebd33c0232e6 100644
2436     --- a/drivers/tty/serial/amba-pl011.c
2437     +++ b/drivers/tty/serial/amba-pl011.c
2438     @@ -1727,10 +1727,26 @@ static int pl011_allocate_irq(struct uart_amba_port *uap)
2439     */
2440     static void pl011_enable_interrupts(struct uart_amba_port *uap)
2441     {
2442     + unsigned int i;
2443     +
2444     spin_lock_irq(&uap->port.lock);
2445    
2446     /* Clear out any spuriously appearing RX interrupts */
2447     pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
2448     +
2449     + /*
2450     + * RXIS is asserted only when the RX FIFO transitions from below
2451     + * to above the trigger threshold. If the RX FIFO is already
2452     + * full to the threshold this can't happen and RXIS will now be
2453     + * stuck off. Drain the RX FIFO explicitly to fix this:
2454     + */
2455     + for (i = 0; i < uap->fifosize * 2; ++i) {
2456     + if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE)
2457     + break;
2458     +
2459     + pl011_read(uap, REG_DR);
2460     + }
2461     +
2462     uap->im = UART011_RTIM;
2463     if (!pl011_dma_rx_running(uap))
2464     uap->im |= UART011_RXIM;
2465     diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
2466     index e287fe8f10fc..55b3eff148b1 100644
2467     --- a/drivers/tty/serial/atmel_serial.c
2468     +++ b/drivers/tty/serial/atmel_serial.c
2469     @@ -1757,7 +1757,6 @@ static int atmel_startup(struct uart_port *port)
2470     {
2471     struct platform_device *pdev = to_platform_device(port->dev);
2472     struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2473     - struct tty_struct *tty = port->state->port.tty;
2474     int retval;
2475    
2476     /*
2477     @@ -1772,8 +1771,8 @@ static int atmel_startup(struct uart_port *port)
2478     * Allocate the IRQ
2479     */
2480     retval = request_irq(port->irq, atmel_interrupt,
2481     - IRQF_SHARED | IRQF_COND_SUSPEND,
2482     - tty ? tty->name : "atmel_serial", port);
2483     + IRQF_SHARED | IRQF_COND_SUSPEND,
2484     + dev_name(&pdev->dev), port);
2485     if (retval) {
2486     dev_err(port->dev, "atmel_startup - Can't get irq\n");
2487     return retval;
2488     diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
2489     index 3f2f8c118ce0..64e96926f1ad 100644
2490     --- a/drivers/tty/serial/samsung.c
2491     +++ b/drivers/tty/serial/samsung.c
2492     @@ -862,15 +862,12 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
2493     dma->rx_conf.direction = DMA_DEV_TO_MEM;
2494     dma->rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
2495     dma->rx_conf.src_addr = p->port.mapbase + S3C2410_URXH;
2496     - dma->rx_conf.src_maxburst = 16;
2497     + dma->rx_conf.src_maxburst = 1;
2498    
2499     dma->tx_conf.direction = DMA_MEM_TO_DEV;
2500     dma->tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
2501     dma->tx_conf.dst_addr = p->port.mapbase + S3C2410_UTXH;
2502     - if (dma_get_cache_alignment() >= 16)
2503     - dma->tx_conf.dst_maxburst = 16;
2504     - else
2505     - dma->tx_conf.dst_maxburst = 1;
2506     + dma->tx_conf.dst_maxburst = 1;
2507    
2508     dma->rx_chan = dma_request_chan(p->port.dev, "rx");
2509    
2510     diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
2511     index fdbbff547106..a4f82ec665fe 100644
2512     --- a/drivers/tty/serial/sh-sci.c
2513     +++ b/drivers/tty/serial/sh-sci.c
2514     @@ -2704,8 +2704,8 @@ static int sci_init_clocks(struct sci_port *sci_port, struct device *dev)
2515     dev_dbg(dev, "failed to get %s (%ld)\n", clk_names[i],
2516     PTR_ERR(clk));
2517     else
2518     - dev_dbg(dev, "clk %s is %pC rate %pCr\n", clk_names[i],
2519     - clk, clk);
2520     + dev_dbg(dev, "clk %s is %pC rate %lu\n", clk_names[i],
2521     + clk, clk_get_rate(clk));
2522     sci_port->clks[i] = IS_ERR(clk) ? NULL : clk;
2523     }
2524     return 0;
2525     diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
2526     index 0c11d40a12bc..7b137003c2be 100644
2527     --- a/drivers/usb/core/message.c
2528     +++ b/drivers/usb/core/message.c
2529     @@ -940,7 +940,7 @@ int usb_set_isoch_delay(struct usb_device *dev)
2530     return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
2531     USB_REQ_SET_ISOCH_DELAY,
2532     USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
2533     - cpu_to_le16(dev->hub_delay), 0, NULL, 0,
2534     + dev->hub_delay, 0, NULL, 0,
2535     USB_CTRL_SET_TIMEOUT);
2536     }
2537    
2538     diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
2539     index d359efe06c76..9c7ed2539ff7 100644
2540     --- a/drivers/usb/gadget/function/f_printer.c
2541     +++ b/drivers/usb/gadget/function/f_printer.c
2542     @@ -631,19 +631,19 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
2543     return -EAGAIN;
2544     }
2545    
2546     + list_add(&req->list, &dev->tx_reqs_active);
2547     +
2548     /* here, we unlock, and only unlock, to avoid deadlock. */
2549     spin_unlock(&dev->lock);
2550     value = usb_ep_queue(dev->in_ep, req, GFP_ATOMIC);
2551     spin_lock(&dev->lock);
2552     if (value) {
2553     + list_del(&req->list);
2554     list_add(&req->list, &dev->tx_reqs);
2555     spin_unlock_irqrestore(&dev->lock, flags);
2556     mutex_unlock(&dev->lock_printer_io);
2557     return -EAGAIN;
2558     }
2559     -
2560     - list_add(&req->list, &dev->tx_reqs_active);
2561     -
2562     }
2563    
2564     spin_unlock_irqrestore(&dev->lock, flags);
2565     diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
2566     index 409cde4e6a51..5caf78bbbf7c 100644
2567     --- a/drivers/usb/gadget/udc/renesas_usb3.c
2568     +++ b/drivers/usb/gadget/udc/renesas_usb3.c
2569     @@ -333,6 +333,7 @@ struct renesas_usb3 {
2570     struct extcon_dev *extcon;
2571     struct work_struct extcon_work;
2572     struct phy *phy;
2573     + struct dentry *dentry;
2574    
2575     struct renesas_usb3_ep *usb3_ep;
2576     int num_usb3_eps;
2577     @@ -622,6 +623,13 @@ static void usb3_disconnect(struct renesas_usb3 *usb3)
2578     usb3_usb2_pullup(usb3, 0);
2579     usb3_clear_bit(usb3, USB30_CON_B3_CONNECT, USB3_USB30_CON);
2580     usb3_reset_epc(usb3);
2581     + usb3_disable_irq_1(usb3, USB_INT_1_B2_RSUM | USB_INT_1_B3_PLLWKUP |
2582     + USB_INT_1_B3_LUPSUCS | USB_INT_1_B3_DISABLE |
2583     + USB_INT_1_SPEED | USB_INT_1_B3_WRMRST |
2584     + USB_INT_1_B3_HOTRST | USB_INT_1_B2_SPND |
2585     + USB_INT_1_B2_L1SPND | USB_INT_1_B2_USBRST);
2586     + usb3_clear_bit(usb3, USB_COM_CON_SPD_MODE, USB3_USB_COM_CON);
2587     + usb3_init_epc_registers(usb3);
2588    
2589     if (usb3->driver)
2590     usb3->driver->disconnect(&usb3->gadget);
2591     @@ -2393,8 +2401,12 @@ static void renesas_usb3_debugfs_init(struct renesas_usb3 *usb3,
2592    
2593     file = debugfs_create_file("b_device", 0644, root, usb3,
2594     &renesas_usb3_b_device_fops);
2595     - if (!file)
2596     + if (!file) {
2597     dev_info(dev, "%s: Can't create debugfs mode\n", __func__);
2598     + debugfs_remove_recursive(root);
2599     + } else {
2600     + usb3->dentry = root;
2601     + }
2602     }
2603    
2604     /*------- platform_driver ------------------------------------------------*/
2605     @@ -2402,14 +2414,13 @@ static int renesas_usb3_remove(struct platform_device *pdev)
2606     {
2607     struct renesas_usb3 *usb3 = platform_get_drvdata(pdev);
2608    
2609     + debugfs_remove_recursive(usb3->dentry);
2610     device_remove_file(&pdev->dev, &dev_attr_role);
2611    
2612     usb_del_gadget_udc(&usb3->gadget);
2613     renesas_usb3_dma_free_prd(usb3, &pdev->dev);
2614    
2615     __renesas_usb3_ep_free_request(usb3->ep0_req);
2616     - if (usb3->phy)
2617     - phy_put(usb3->phy);
2618     pm_runtime_disable(&pdev->dev);
2619    
2620     return 0;
2621     @@ -2628,6 +2639,17 @@ static int renesas_usb3_probe(struct platform_device *pdev)
2622     if (ret < 0)
2623     goto err_alloc_prd;
2624    
2625     + /*
2626     + * This is optional. So, if this driver cannot get a phy,
2627     + * this driver will not handle a phy anymore.
2628     + */
2629     + usb3->phy = devm_phy_optional_get(&pdev->dev, "usb");
2630     + if (IS_ERR(usb3->phy)) {
2631     + ret = PTR_ERR(usb3->phy);
2632     + goto err_add_udc;
2633     + }
2634     +
2635     + pm_runtime_enable(&pdev->dev);
2636     ret = usb_add_gadget_udc(&pdev->dev, &usb3->gadget);
2637     if (ret < 0)
2638     goto err_add_udc;
2639     @@ -2636,20 +2658,11 @@ static int renesas_usb3_probe(struct platform_device *pdev)
2640     if (ret < 0)
2641     goto err_dev_create;
2642    
2643     - /*
2644     - * This is an optional. So, if this driver cannot get a phy,
2645     - * this driver will not handle a phy anymore.
2646     - */
2647     - usb3->phy = devm_phy_get(&pdev->dev, "usb");
2648     - if (IS_ERR(usb3->phy))
2649     - usb3->phy = NULL;
2650     -
2651     usb3->workaround_for_vbus = priv->workaround_for_vbus;
2652    
2653     renesas_usb3_debugfs_init(usb3, &pdev->dev);
2654    
2655     dev_info(&pdev->dev, "probed%s\n", usb3->phy ? " with phy" : "");
2656     - pm_runtime_enable(usb3_to_dev(usb3));
2657    
2658     return 0;
2659    
2660     diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
2661     index 6034c39b67d1..9e9de5452860 100644
2662     --- a/drivers/usb/storage/uas.c
2663     +++ b/drivers/usb/storage/uas.c
2664     @@ -836,6 +836,12 @@ static int uas_slave_configure(struct scsi_device *sdev)
2665     if (devinfo->flags & US_FL_BROKEN_FUA)
2666     sdev->broken_fua = 1;
2667    
2668     + /* UAS also needs to support FL_ALWAYS_SYNC */
2669     + if (devinfo->flags & US_FL_ALWAYS_SYNC) {
2670     + sdev->skip_ms_page_3f = 1;
2671     + sdev->skip_ms_page_8 = 1;
2672     + sdev->wce_default_on = 1;
2673     + }
2674     scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
2675     return 0;
2676     }
2677     diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
2678     index 747d3a9596d9..22fcfccf453a 100644
2679     --- a/drivers/usb/storage/unusual_devs.h
2680     +++ b/drivers/usb/storage/unusual_devs.h
2681     @@ -2321,6 +2321,15 @@ UNUSUAL_DEV( 0x4146, 0xba01, 0x0100, 0x0100,
2682     "Micro Mini 1GB",
2683     USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
2684    
2685     +/* "G-DRIVE" external HDD hangs on write without these.
2686     + * Patch submitted by Alexander Kappner <agk@godking.net>
2687     + */
2688     +UNUSUAL_DEV(0x4971, 0x8024, 0x0000, 0x9999,
2689     + "SimpleTech",
2690     + "External HDD",
2691     + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2692     + US_FL_ALWAYS_SYNC),
2693     +
2694     /*
2695     * Nick Bowler <nbowler@elliptictech.com>
2696     * SCSI stack spams (otherwise harmless) error messages.
2697     diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
2698     index 38434d88954a..d0bdebd87ce3 100644
2699     --- a/drivers/usb/storage/unusual_uas.h
2700     +++ b/drivers/usb/storage/unusual_uas.h
2701     @@ -107,3 +107,12 @@ UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999,
2702     "External HDD",
2703     USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2704     US_FL_NO_REPORT_OPCODES),
2705     +
2706     +/* "G-DRIVE" external HDD hangs on write without these.
2707     + * Patch submitted by Alexander Kappner <agk@godking.net>
2708     + */
2709     +UNUSUAL_DEV(0x4971, 0x8024, 0x0000, 0x9999,
2710     + "SimpleTech",
2711     + "External HDD",
2712     + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2713     + US_FL_ALWAYS_SYNC),
2714     diff --git a/drivers/usb/typec/typec_wcove.c b/drivers/usb/typec/typec_wcove.c
2715     index 19cca7f1b2c5..58dc7ea7cf0d 100644
2716     --- a/drivers/usb/typec/typec_wcove.c
2717     +++ b/drivers/usb/typec/typec_wcove.c
2718     @@ -202,6 +202,10 @@ static int wcove_init(struct tcpc_dev *tcpc)
2719     struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
2720     int ret;
2721    
2722     + ret = regmap_write(wcove->regmap, USBC_CONTROL1, 0);
2723     + if (ret)
2724     + return ret;
2725     +
2726     /* Unmask everything */
2727     ret = regmap_write(wcove->regmap, USBC_IRQMASK1, 0);
2728     if (ret)
2729     @@ -285,8 +289,30 @@ static int wcove_get_cc(struct tcpc_dev *tcpc, enum typec_cc_status *cc1,
2730    
2731     static int wcove_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
2732     {
2733     - /* XXX: Relying on the HW FSM to configure things correctly for now */
2734     - return 0;
2735     + struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
2736     + unsigned int ctrl;
2737     +
2738     + switch (cc) {
2739     + case TYPEC_CC_RD:
2740     + ctrl = USBC_CONTROL1_MODE_SNK;
2741     + break;
2742     + case TYPEC_CC_RP_DEF:
2743     + ctrl = USBC_CONTROL1_CURSRC_UA_80 | USBC_CONTROL1_MODE_SRC;
2744     + break;
2745     + case TYPEC_CC_RP_1_5:
2746     + ctrl = USBC_CONTROL1_CURSRC_UA_180 | USBC_CONTROL1_MODE_SRC;
2747     + break;
2748     + case TYPEC_CC_RP_3_0:
2749     + ctrl = USBC_CONTROL1_CURSRC_UA_330 | USBC_CONTROL1_MODE_SRC;
2750     + break;
2751     + case TYPEC_CC_OPEN:
2752     + ctrl = 0;
2753     + break;
2754     + default:
2755     + return -EINVAL;
2756     + }
2757     +
2758     + return regmap_write(wcove->regmap, USBC_CONTROL1, ctrl);
2759     }
2760    
2761     static int wcove_set_polarity(struct tcpc_dev *tcpc, enum typec_cc_polarity pol)
2762     diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
2763     index 48808388ec33..be37aec250c2 100644
2764     --- a/drivers/usb/usbip/vhci_sysfs.c
2765     +++ b/drivers/usb/usbip/vhci_sysfs.c
2766     @@ -10,6 +10,9 @@
2767     #include <linux/platform_device.h>
2768     #include <linux/slab.h>
2769    
2770     +/* Hardening for Spectre-v1 */
2771     +#include <linux/nospec.h>
2772     +
2773     #include "usbip_common.h"
2774     #include "vhci.h"
2775    
2776     @@ -205,16 +208,20 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
2777     return 0;
2778     }
2779    
2780     -static int valid_port(__u32 pdev_nr, __u32 rhport)
2781     +static int valid_port(__u32 *pdev_nr, __u32 *rhport)
2782     {
2783     - if (pdev_nr >= vhci_num_controllers) {
2784     - pr_err("pdev %u\n", pdev_nr);
2785     + if (*pdev_nr >= vhci_num_controllers) {
2786     + pr_err("pdev %u\n", *pdev_nr);
2787     return 0;
2788     }
2789     - if (rhport >= VHCI_HC_PORTS) {
2790     - pr_err("rhport %u\n", rhport);
2791     + *pdev_nr = array_index_nospec(*pdev_nr, vhci_num_controllers);
2792     +
2793     + if (*rhport >= VHCI_HC_PORTS) {
2794     + pr_err("rhport %u\n", *rhport);
2795     return 0;
2796     }
2797     + *rhport = array_index_nospec(*rhport, VHCI_HC_PORTS);
2798     +
2799     return 1;
2800     }
2801    
2802     @@ -232,7 +239,7 @@ static ssize_t detach_store(struct device *dev, struct device_attribute *attr,
2803     pdev_nr = port_to_pdev_nr(port);
2804     rhport = port_to_rhport(port);
2805    
2806     - if (!valid_port(pdev_nr, rhport))
2807     + if (!valid_port(&pdev_nr, &rhport))
2808     return -EINVAL;
2809    
2810     hcd = platform_get_drvdata(vhcis[pdev_nr].pdev);
2811     @@ -258,7 +265,8 @@ static ssize_t detach_store(struct device *dev, struct device_attribute *attr,
2812     }
2813     static DEVICE_ATTR_WO(detach);
2814    
2815     -static int valid_args(__u32 pdev_nr, __u32 rhport, enum usb_device_speed speed)
2816     +static int valid_args(__u32 *pdev_nr, __u32 *rhport,
2817     + enum usb_device_speed speed)
2818     {
2819     if (!valid_port(pdev_nr, rhport)) {
2820     return 0;
2821     @@ -322,7 +330,7 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
2822     sockfd, devid, speed);
2823    
2824     /* check received parameters */
2825     - if (!valid_args(pdev_nr, rhport, speed))
2826     + if (!valid_args(&pdev_nr, &rhport, speed))
2827     return -EINVAL;
2828    
2829     hcd = platform_get_drvdata(vhcis[pdev_nr].pdev);
2830     diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
2831     index b02c41e53d56..39e364c70caf 100644
2832     --- a/include/uapi/linux/kvm.h
2833     +++ b/include/uapi/linux/kvm.h
2834     @@ -677,10 +677,10 @@ struct kvm_ioeventfd {
2835     };
2836    
2837     #define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0)
2838     -#define KVM_X86_DISABLE_EXITS_HTL (1 << 1)
2839     +#define KVM_X86_DISABLE_EXITS_HLT (1 << 1)
2840     #define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
2841     #define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \
2842     - KVM_X86_DISABLE_EXITS_HTL | \
2843     + KVM_X86_DISABLE_EXITS_HLT | \
2844     KVM_X86_DISABLE_EXITS_PAUSE)
2845    
2846     /* for KVM_ENABLE_CAP */
2847     diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
2848     index b02c41e53d56..39e364c70caf 100644
2849     --- a/tools/include/uapi/linux/kvm.h
2850     +++ b/tools/include/uapi/linux/kvm.h
2851     @@ -677,10 +677,10 @@ struct kvm_ioeventfd {
2852     };
2853    
2854     #define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0)
2855     -#define KVM_X86_DISABLE_EXITS_HTL (1 << 1)
2856     +#define KVM_X86_DISABLE_EXITS_HLT (1 << 1)
2857     #define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
2858     #define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \
2859     - KVM_X86_DISABLE_EXITS_HTL | \
2860     + KVM_X86_DISABLE_EXITS_HLT | \
2861     KVM_X86_DISABLE_EXITS_PAUSE)
2862    
2863     /* for KVM_ENABLE_CAP */