Contents of /trunk/kernel-alx/patches-5.4/0206-5.4.107-all-fixes.patch
Parent Directory | Revision Log
Revision 3635 -
(show annotations)
(download)
Mon Oct 24 12:34:12 2022 UTC (19 months ago) by niro
File size: 118318 byte(s)
Mon Oct 24 12:34:12 2022 UTC (19 months ago) by niro
File size: 118318 byte(s)
-sync kernel patches
1 | diff --git a/Makefile b/Makefile |
2 | index a333b378f1f71..43159b21a83f4 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,7 +1,7 @@ |
6 | # SPDX-License-Identifier: GPL-2.0 |
7 | VERSION = 5 |
8 | PATCHLEVEL = 4 |
9 | -SUBLEVEL = 106 |
10 | +SUBLEVEL = 107 |
11 | EXTRAVERSION = |
12 | NAME = Kleptomaniac Octopus |
13 | |
14 | diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h |
15 | index 97f21cc666579..7f7fdb16bb968 100644 |
16 | --- a/arch/arm64/include/asm/kvm_hyp.h |
17 | +++ b/arch/arm64/include/asm/kvm_hyp.h |
18 | @@ -71,6 +71,9 @@ void __sysreg32_restore_state(struct kvm_vcpu *vcpu); |
19 | |
20 | void __debug_switch_to_guest(struct kvm_vcpu *vcpu); |
21 | void __debug_switch_to_host(struct kvm_vcpu *vcpu); |
22 | +void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu); |
23 | +void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu); |
24 | + |
25 | |
26 | void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); |
27 | void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); |
28 | diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c |
29 | index 0fc9872a14671..aead8a5fbe919 100644 |
30 | --- a/arch/arm64/kvm/hyp/debug-sr.c |
31 | +++ b/arch/arm64/kvm/hyp/debug-sr.c |
32 | @@ -168,6 +168,21 @@ static void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu, |
33 | write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1); |
34 | } |
35 | |
36 | +void __hyp_text __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu) |
37 | +{ |
38 | + /* |
39 | + * Non-VHE: Disable and flush SPE data generation |
40 | + * VHE: The vcpu can run, but it can't hide. |
41 | + */ |
42 | + __debug_save_spe_nvhe(&vcpu->arch.host_debug_state.pmscr_el1); |
43 | + |
44 | +} |
45 | + |
46 | +void __hyp_text __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu) |
47 | +{ |
48 | + __debug_restore_spe_nvhe(vcpu->arch.host_debug_state.pmscr_el1); |
49 | +} |
50 | + |
51 | void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu) |
52 | { |
53 | struct kvm_cpu_context *host_ctxt; |
54 | @@ -175,13 +190,6 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu) |
55 | struct kvm_guest_debug_arch *host_dbg; |
56 | struct kvm_guest_debug_arch *guest_dbg; |
57 | |
58 | - /* |
59 | - * Non-VHE: Disable and flush SPE data generation |
60 | - * VHE: The vcpu can run, but it can't hide. |
61 | - */ |
62 | - if (!has_vhe()) |
63 | - __debug_save_spe_nvhe(&vcpu->arch.host_debug_state.pmscr_el1); |
64 | - |
65 | if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) |
66 | return; |
67 | |
68 | @@ -201,8 +209,6 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu) |
69 | struct kvm_guest_debug_arch *host_dbg; |
70 | struct kvm_guest_debug_arch *guest_dbg; |
71 | |
72 | - if (!has_vhe()) |
73 | - __debug_restore_spe_nvhe(vcpu->arch.host_debug_state.pmscr_el1); |
74 | |
75 | if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) |
76 | return; |
77 | diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c |
78 | index 84964983198e2..14607fac7ca38 100644 |
79 | --- a/arch/arm64/kvm/hyp/switch.c |
80 | +++ b/arch/arm64/kvm/hyp/switch.c |
81 | @@ -682,6 +682,15 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) |
82 | |
83 | __sysreg_save_state_nvhe(host_ctxt); |
84 | |
85 | + /* |
86 | + * We must flush and disable the SPE buffer for nVHE, as |
87 | + * the translation regime(EL1&0) is going to be loaded with |
88 | + * that of the guest. And we must do this before we change the |
89 | + * translation regime to EL2 (via MDCR_EL2_EPB == 0) and |
90 | + * before we load guest Stage1. |
91 | + */ |
92 | + __debug_save_host_buffers_nvhe(vcpu); |
93 | + |
94 | __activate_vm(kern_hyp_va(vcpu->kvm)); |
95 | __activate_traps(vcpu); |
96 | |
97 | @@ -720,11 +729,13 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) |
98 | if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) |
99 | __fpsimd_save_fpexc32(vcpu); |
100 | |
101 | + __debug_switch_to_host(vcpu); |
102 | + |
103 | /* |
104 | * This must come after restoring the host sysregs, since a non-VHE |
105 | * system may enable SPE here and make use of the TTBRs. |
106 | */ |
107 | - __debug_switch_to_host(vcpu); |
108 | + __debug_restore_host_buffers_nvhe(vcpu); |
109 | |
110 | if (pmu_switch_needed) |
111 | __pmu_switch_to_host(host_ctxt); |
112 | diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S |
113 | index 9afeb58c910eb..dd954d8db629b 100644 |
114 | --- a/arch/x86/crypto/aesni-intel_asm.S |
115 | +++ b/arch/x86/crypto/aesni-intel_asm.S |
116 | @@ -319,7 +319,7 @@ _initial_blocks_\@: |
117 | |
118 | # Main loop - Encrypt/Decrypt remaining blocks |
119 | |
120 | - cmp $0, %r13 |
121 | + test %r13, %r13 |
122 | je _zero_cipher_left_\@ |
123 | sub $64, %r13 |
124 | je _four_cipher_left_\@ |
125 | @@ -438,7 +438,7 @@ _multiple_of_16_bytes_\@: |
126 | |
127 | mov PBlockLen(%arg2), %r12 |
128 | |
129 | - cmp $0, %r12 |
130 | + test %r12, %r12 |
131 | je _partial_done\@ |
132 | |
133 | GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 |
134 | @@ -475,7 +475,7 @@ _T_8_\@: |
135 | add $8, %r10 |
136 | sub $8, %r11 |
137 | psrldq $8, %xmm0 |
138 | - cmp $0, %r11 |
139 | + test %r11, %r11 |
140 | je _return_T_done_\@ |
141 | _T_4_\@: |
142 | movd %xmm0, %eax |
143 | @@ -483,7 +483,7 @@ _T_4_\@: |
144 | add $4, %r10 |
145 | sub $4, %r11 |
146 | psrldq $4, %xmm0 |
147 | - cmp $0, %r11 |
148 | + test %r11, %r11 |
149 | je _return_T_done_\@ |
150 | _T_123_\@: |
151 | movd %xmm0, %eax |
152 | @@ -620,7 +620,7 @@ _get_AAD_blocks\@: |
153 | |
154 | /* read the last <16B of AAD */ |
155 | _get_AAD_rest\@: |
156 | - cmp $0, %r11 |
157 | + test %r11, %r11 |
158 | je _get_AAD_done\@ |
159 | |
160 | READ_PARTIAL_BLOCK %r10, %r11, \TMP1, \TMP7 |
161 | @@ -641,7 +641,7 @@ _get_AAD_done\@: |
162 | .macro PARTIAL_BLOCK CYPH_PLAIN_OUT PLAIN_CYPH_IN PLAIN_CYPH_LEN DATA_OFFSET \ |
163 | AAD_HASH operation |
164 | mov PBlockLen(%arg2), %r13 |
165 | - cmp $0, %r13 |
166 | + test %r13, %r13 |
167 | je _partial_block_done_\@ # Leave Macro if no partial blocks |
168 | # Read in input data without over reading |
169 | cmp $16, \PLAIN_CYPH_LEN |
170 | @@ -693,7 +693,7 @@ _no_extra_mask_1_\@: |
171 | PSHUFB_XMM %xmm2, %xmm3 |
172 | pxor %xmm3, \AAD_HASH |
173 | |
174 | - cmp $0, %r10 |
175 | + test %r10, %r10 |
176 | jl _partial_incomplete_1_\@ |
177 | |
178 | # GHASH computation for the last <16 Byte block |
179 | @@ -728,7 +728,7 @@ _no_extra_mask_2_\@: |
180 | PSHUFB_XMM %xmm2, %xmm9 |
181 | pxor %xmm9, \AAD_HASH |
182 | |
183 | - cmp $0, %r10 |
184 | + test %r10, %r10 |
185 | jl _partial_incomplete_2_\@ |
186 | |
187 | # GHASH computation for the last <16 Byte block |
188 | @@ -748,7 +748,7 @@ _encode_done_\@: |
189 | PSHUFB_XMM %xmm2, %xmm9 |
190 | .endif |
191 | # output encrypted Bytes |
192 | - cmp $0, %r10 |
193 | + test %r10, %r10 |
194 | jl _partial_fill_\@ |
195 | mov %r13, %r12 |
196 | mov $16, %r13 |
197 | @@ -1946,7 +1946,7 @@ ENTRY(aesni_set_key) |
198 | ENDPROC(aesni_set_key) |
199 | |
200 | /* |
201 | - * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) |
202 | + * void aesni_enc(const void *ctx, u8 *dst, const u8 *src) |
203 | */ |
204 | ENTRY(aesni_enc) |
205 | FRAME_BEGIN |
206 | @@ -2137,7 +2137,7 @@ _aesni_enc4: |
207 | ENDPROC(_aesni_enc4) |
208 | |
209 | /* |
210 | - * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) |
211 | + * void aesni_dec (const void *ctx, u8 *dst, const u8 *src) |
212 | */ |
213 | ENTRY(aesni_dec) |
214 | FRAME_BEGIN |
215 | @@ -2726,25 +2726,18 @@ ENDPROC(aesni_ctr_enc) |
216 | pxor CTR, IV; |
217 | |
218 | /* |
219 | - * void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, |
220 | - * bool enc, u8 *iv) |
221 | + * void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, |
222 | + * const u8 *src, unsigned int len, le128 *iv) |
223 | */ |
224 | -ENTRY(aesni_xts_crypt8) |
225 | +ENTRY(aesni_xts_encrypt) |
226 | FRAME_BEGIN |
227 | - cmpb $0, %cl |
228 | - movl $0, %ecx |
229 | - movl $240, %r10d |
230 | - leaq _aesni_enc4, %r11 |
231 | - leaq _aesni_dec4, %rax |
232 | - cmovel %r10d, %ecx |
233 | - cmoveq %rax, %r11 |
234 | |
235 | movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK |
236 | movups (IVP), IV |
237 | |
238 | mov 480(KEYP), KLEN |
239 | - addq %rcx, KEYP |
240 | |
241 | +.Lxts_enc_loop4: |
242 | movdqa IV, STATE1 |
243 | movdqu 0x00(INP), INC |
244 | pxor INC, STATE1 |
245 | @@ -2768,71 +2761,103 @@ ENTRY(aesni_xts_crypt8) |
246 | pxor INC, STATE4 |
247 | movdqu IV, 0x30(OUTP) |
248 | |
249 | - CALL_NOSPEC %r11 |
250 | + call _aesni_enc4 |
251 | |
252 | movdqu 0x00(OUTP), INC |
253 | pxor INC, STATE1 |
254 | movdqu STATE1, 0x00(OUTP) |
255 | |
256 | - _aesni_gf128mul_x_ble() |
257 | - movdqa IV, STATE1 |
258 | - movdqu 0x40(INP), INC |
259 | - pxor INC, STATE1 |
260 | - movdqu IV, 0x40(OUTP) |
261 | - |
262 | movdqu 0x10(OUTP), INC |
263 | pxor INC, STATE2 |
264 | movdqu STATE2, 0x10(OUTP) |
265 | |
266 | - _aesni_gf128mul_x_ble() |
267 | - movdqa IV, STATE2 |
268 | - movdqu 0x50(INP), INC |
269 | - pxor INC, STATE2 |
270 | - movdqu IV, 0x50(OUTP) |
271 | - |
272 | movdqu 0x20(OUTP), INC |
273 | pxor INC, STATE3 |
274 | movdqu STATE3, 0x20(OUTP) |
275 | |
276 | - _aesni_gf128mul_x_ble() |
277 | - movdqa IV, STATE3 |
278 | - movdqu 0x60(INP), INC |
279 | - pxor INC, STATE3 |
280 | - movdqu IV, 0x60(OUTP) |
281 | - |
282 | movdqu 0x30(OUTP), INC |
283 | pxor INC, STATE4 |
284 | movdqu STATE4, 0x30(OUTP) |
285 | |
286 | _aesni_gf128mul_x_ble() |
287 | - movdqa IV, STATE4 |
288 | - movdqu 0x70(INP), INC |
289 | - pxor INC, STATE4 |
290 | - movdqu IV, 0x70(OUTP) |
291 | |
292 | - _aesni_gf128mul_x_ble() |
293 | + add $64, INP |
294 | + add $64, OUTP |
295 | + sub $64, LEN |
296 | + ja .Lxts_enc_loop4 |
297 | + |
298 | movups IV, (IVP) |
299 | |
300 | - CALL_NOSPEC %r11 |
301 | + FRAME_END |
302 | + ret |
303 | +ENDPROC(aesni_xts_encrypt) |
304 | + |
305 | +/* |
306 | + * void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst, |
307 | + * const u8 *src, unsigned int len, le128 *iv) |
308 | + */ |
309 | +ENTRY(aesni_xts_decrypt) |
310 | + FRAME_BEGIN |
311 | + |
312 | + movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK |
313 | + movups (IVP), IV |
314 | + |
315 | + mov 480(KEYP), KLEN |
316 | + add $240, KEYP |
317 | |
318 | - movdqu 0x40(OUTP), INC |
319 | +.Lxts_dec_loop4: |
320 | + movdqa IV, STATE1 |
321 | + movdqu 0x00(INP), INC |
322 | pxor INC, STATE1 |
323 | - movdqu STATE1, 0x40(OUTP) |
324 | + movdqu IV, 0x00(OUTP) |
325 | |
326 | - movdqu 0x50(OUTP), INC |
327 | + _aesni_gf128mul_x_ble() |
328 | + movdqa IV, STATE2 |
329 | + movdqu 0x10(INP), INC |
330 | + pxor INC, STATE2 |
331 | + movdqu IV, 0x10(OUTP) |
332 | + |
333 | + _aesni_gf128mul_x_ble() |
334 | + movdqa IV, STATE3 |
335 | + movdqu 0x20(INP), INC |
336 | + pxor INC, STATE3 |
337 | + movdqu IV, 0x20(OUTP) |
338 | + |
339 | + _aesni_gf128mul_x_ble() |
340 | + movdqa IV, STATE4 |
341 | + movdqu 0x30(INP), INC |
342 | + pxor INC, STATE4 |
343 | + movdqu IV, 0x30(OUTP) |
344 | + |
345 | + call _aesni_dec4 |
346 | + |
347 | + movdqu 0x00(OUTP), INC |
348 | + pxor INC, STATE1 |
349 | + movdqu STATE1, 0x00(OUTP) |
350 | + |
351 | + movdqu 0x10(OUTP), INC |
352 | pxor INC, STATE2 |
353 | - movdqu STATE2, 0x50(OUTP) |
354 | + movdqu STATE2, 0x10(OUTP) |
355 | |
356 | - movdqu 0x60(OUTP), INC |
357 | + movdqu 0x20(OUTP), INC |
358 | pxor INC, STATE3 |
359 | - movdqu STATE3, 0x60(OUTP) |
360 | + movdqu STATE3, 0x20(OUTP) |
361 | |
362 | - movdqu 0x70(OUTP), INC |
363 | + movdqu 0x30(OUTP), INC |
364 | pxor INC, STATE4 |
365 | - movdqu STATE4, 0x70(OUTP) |
366 | + movdqu STATE4, 0x30(OUTP) |
367 | + |
368 | + _aesni_gf128mul_x_ble() |
369 | + |
370 | + add $64, INP |
371 | + add $64, OUTP |
372 | + sub $64, LEN |
373 | + ja .Lxts_dec_loop4 |
374 | + |
375 | + movups IV, (IVP) |
376 | |
377 | FRAME_END |
378 | ret |
379 | -ENDPROC(aesni_xts_crypt8) |
380 | +ENDPROC(aesni_xts_decrypt) |
381 | |
382 | #endif |
383 | diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S |
384 | index 91c039ab56999..4e4d34956170b 100644 |
385 | --- a/arch/x86/crypto/aesni-intel_avx-x86_64.S |
386 | +++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S |
387 | @@ -370,7 +370,7 @@ _initial_num_blocks_is_0\@: |
388 | |
389 | |
390 | _initial_blocks_encrypted\@: |
391 | - cmp $0, %r13 |
392 | + test %r13, %r13 |
393 | je _zero_cipher_left\@ |
394 | |
395 | sub $128, %r13 |
396 | @@ -529,7 +529,7 @@ _multiple_of_16_bytes\@: |
397 | vmovdqu HashKey(arg2), %xmm13 |
398 | |
399 | mov PBlockLen(arg2), %r12 |
400 | - cmp $0, %r12 |
401 | + test %r12, %r12 |
402 | je _partial_done\@ |
403 | |
404 | #GHASH computation for the last <16 Byte block |
405 | @@ -574,7 +574,7 @@ _T_8\@: |
406 | add $8, %r10 |
407 | sub $8, %r11 |
408 | vpsrldq $8, %xmm9, %xmm9 |
409 | - cmp $0, %r11 |
410 | + test %r11, %r11 |
411 | je _return_T_done\@ |
412 | _T_4\@: |
413 | vmovd %xmm9, %eax |
414 | @@ -582,7 +582,7 @@ _T_4\@: |
415 | add $4, %r10 |
416 | sub $4, %r11 |
417 | vpsrldq $4, %xmm9, %xmm9 |
418 | - cmp $0, %r11 |
419 | + test %r11, %r11 |
420 | je _return_T_done\@ |
421 | _T_123\@: |
422 | vmovd %xmm9, %eax |
423 | @@ -626,7 +626,7 @@ _get_AAD_blocks\@: |
424 | cmp $16, %r11 |
425 | jge _get_AAD_blocks\@ |
426 | vmovdqu \T8, \T7 |
427 | - cmp $0, %r11 |
428 | + test %r11, %r11 |
429 | je _get_AAD_done\@ |
430 | |
431 | vpxor \T7, \T7, \T7 |
432 | @@ -645,7 +645,7 @@ _get_AAD_rest8\@: |
433 | vpxor \T1, \T7, \T7 |
434 | jmp _get_AAD_rest8\@ |
435 | _get_AAD_rest4\@: |
436 | - cmp $0, %r11 |
437 | + test %r11, %r11 |
438 | jle _get_AAD_rest0\@ |
439 | mov (%r10), %eax |
440 | movq %rax, \T1 |
441 | @@ -750,7 +750,7 @@ _done_read_partial_block_\@: |
442 | .macro PARTIAL_BLOCK GHASH_MUL CYPH_PLAIN_OUT PLAIN_CYPH_IN PLAIN_CYPH_LEN DATA_OFFSET \ |
443 | AAD_HASH ENC_DEC |
444 | mov PBlockLen(arg2), %r13 |
445 | - cmp $0, %r13 |
446 | + test %r13, %r13 |
447 | je _partial_block_done_\@ # Leave Macro if no partial blocks |
448 | # Read in input data without over reading |
449 | cmp $16, \PLAIN_CYPH_LEN |
450 | @@ -802,7 +802,7 @@ _no_extra_mask_1_\@: |
451 | vpshufb %xmm2, %xmm3, %xmm3 |
452 | vpxor %xmm3, \AAD_HASH, \AAD_HASH |
453 | |
454 | - cmp $0, %r10 |
455 | + test %r10, %r10 |
456 | jl _partial_incomplete_1_\@ |
457 | |
458 | # GHASH computation for the last <16 Byte block |
459 | @@ -837,7 +837,7 @@ _no_extra_mask_2_\@: |
460 | vpshufb %xmm2, %xmm9, %xmm9 |
461 | vpxor %xmm9, \AAD_HASH, \AAD_HASH |
462 | |
463 | - cmp $0, %r10 |
464 | + test %r10, %r10 |
465 | jl _partial_incomplete_2_\@ |
466 | |
467 | # GHASH computation for the last <16 Byte block |
468 | @@ -857,7 +857,7 @@ _encode_done_\@: |
469 | vpshufb %xmm2, %xmm9, %xmm9 |
470 | .endif |
471 | # output encrypted Bytes |
472 | - cmp $0, %r10 |
473 | + test %r10, %r10 |
474 | jl _partial_fill_\@ |
475 | mov %r13, %r12 |
476 | mov $16, %r13 |
477 | diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c |
478 | index 88ad272aa2b46..18cfb76daa232 100644 |
479 | --- a/arch/x86/crypto/aesni-intel_glue.c |
480 | +++ b/arch/x86/crypto/aesni-intel_glue.c |
481 | @@ -83,10 +83,8 @@ struct gcm_context_data { |
482 | |
483 | asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, |
484 | unsigned int key_len); |
485 | -asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out, |
486 | - const u8 *in); |
487 | -asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out, |
488 | - const u8 *in); |
489 | +asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in); |
490 | +asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in); |
491 | asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out, |
492 | const u8 *in, unsigned int len); |
493 | asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out, |
494 | @@ -99,6 +97,12 @@ asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, |
495 | #define AVX_GEN2_OPTSIZE 640 |
496 | #define AVX_GEN4_OPTSIZE 4096 |
497 | |
498 | +asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, |
499 | + const u8 *in, unsigned int len, u8 *iv); |
500 | + |
501 | +asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, |
502 | + const u8 *in, unsigned int len, u8 *iv); |
503 | + |
504 | #ifdef CONFIG_X86_64 |
505 | |
506 | static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out, |
507 | @@ -106,9 +110,6 @@ static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out, |
508 | asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, |
509 | const u8 *in, unsigned int len, u8 *iv); |
510 | |
511 | -asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out, |
512 | - const u8 *in, bool enc, u8 *iv); |
513 | - |
514 | /* asmlinkage void aesni_gcm_enc() |
515 | * void *ctx, AES Key schedule. Starts on a 16 byte boundary. |
516 | * struct gcm_context_data. May be uninitialized. |
517 | @@ -550,29 +551,24 @@ static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key, |
518 | } |
519 | |
520 | |
521 | -static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in) |
522 | -{ |
523 | - aesni_enc(ctx, out, in); |
524 | -} |
525 | - |
526 | -static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
527 | +static void aesni_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) |
528 | { |
529 | - glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc)); |
530 | + glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_enc); |
531 | } |
532 | |
533 | -static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
534 | +static void aesni_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) |
535 | { |
536 | - glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec)); |
537 | + glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_dec); |
538 | } |
539 | |
540 | -static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
541 | +static void aesni_xts_enc32(const void *ctx, u8 *dst, const u8 *src, le128 *iv) |
542 | { |
543 | - aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv); |
544 | + aesni_xts_encrypt(ctx, dst, src, 32 * AES_BLOCK_SIZE, (u8 *)iv); |
545 | } |
546 | |
547 | -static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
548 | +static void aesni_xts_dec32(const void *ctx, u8 *dst, const u8 *src, le128 *iv) |
549 | { |
550 | - aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv); |
551 | + aesni_xts_decrypt(ctx, dst, src, 32 * AES_BLOCK_SIZE, (u8 *)iv); |
552 | } |
553 | |
554 | static const struct common_glue_ctx aesni_enc_xts = { |
555 | @@ -580,11 +576,11 @@ static const struct common_glue_ctx aesni_enc_xts = { |
556 | .fpu_blocks_limit = 1, |
557 | |
558 | .funcs = { { |
559 | - .num_blocks = 8, |
560 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) } |
561 | + .num_blocks = 32, |
562 | + .fn_u = { .xts = aesni_xts_enc32 } |
563 | }, { |
564 | .num_blocks = 1, |
565 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) } |
566 | + .fn_u = { .xts = aesni_xts_enc } |
567 | } } |
568 | }; |
569 | |
570 | @@ -593,11 +589,11 @@ static const struct common_glue_ctx aesni_dec_xts = { |
571 | .fpu_blocks_limit = 1, |
572 | |
573 | .funcs = { { |
574 | - .num_blocks = 8, |
575 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) } |
576 | + .num_blocks = 32, |
577 | + .fn_u = { .xts = aesni_xts_dec32 } |
578 | }, { |
579 | .num_blocks = 1, |
580 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) } |
581 | + .fn_u = { .xts = aesni_xts_dec } |
582 | } } |
583 | }; |
584 | |
585 | @@ -606,8 +602,7 @@ static int xts_encrypt(struct skcipher_request *req) |
586 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
587 | struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
588 | |
589 | - return glue_xts_req_128bit(&aesni_enc_xts, req, |
590 | - XTS_TWEAK_CAST(aesni_xts_tweak), |
591 | + return glue_xts_req_128bit(&aesni_enc_xts, req, aesni_enc, |
592 | aes_ctx(ctx->raw_tweak_ctx), |
593 | aes_ctx(ctx->raw_crypt_ctx), |
594 | false); |
595 | @@ -618,8 +613,7 @@ static int xts_decrypt(struct skcipher_request *req) |
596 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
597 | struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
598 | |
599 | - return glue_xts_req_128bit(&aesni_dec_xts, req, |
600 | - XTS_TWEAK_CAST(aesni_xts_tweak), |
601 | + return glue_xts_req_128bit(&aesni_dec_xts, req, aesni_enc, |
602 | aes_ctx(ctx->raw_tweak_ctx), |
603 | aes_ctx(ctx->raw_crypt_ctx), |
604 | true); |
605 | diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c |
606 | index a4f00128ea552..a8cc2c83fe1bb 100644 |
607 | --- a/arch/x86/crypto/camellia_aesni_avx2_glue.c |
608 | +++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c |
609 | @@ -19,20 +19,17 @@ |
610 | #define CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS 32 |
611 | |
612 | /* 32-way AVX2/AES-NI parallel cipher functions */ |
613 | -asmlinkage void camellia_ecb_enc_32way(struct camellia_ctx *ctx, u8 *dst, |
614 | - const u8 *src); |
615 | -asmlinkage void camellia_ecb_dec_32way(struct camellia_ctx *ctx, u8 *dst, |
616 | - const u8 *src); |
617 | +asmlinkage void camellia_ecb_enc_32way(const void *ctx, u8 *dst, const u8 *src); |
618 | +asmlinkage void camellia_ecb_dec_32way(const void *ctx, u8 *dst, const u8 *src); |
619 | |
620 | -asmlinkage void camellia_cbc_dec_32way(struct camellia_ctx *ctx, u8 *dst, |
621 | - const u8 *src); |
622 | -asmlinkage void camellia_ctr_32way(struct camellia_ctx *ctx, u8 *dst, |
623 | - const u8 *src, le128 *iv); |
624 | +asmlinkage void camellia_cbc_dec_32way(const void *ctx, u8 *dst, const u8 *src); |
625 | +asmlinkage void camellia_ctr_32way(const void *ctx, u8 *dst, const u8 *src, |
626 | + le128 *iv); |
627 | |
628 | -asmlinkage void camellia_xts_enc_32way(struct camellia_ctx *ctx, u8 *dst, |
629 | - const u8 *src, le128 *iv); |
630 | -asmlinkage void camellia_xts_dec_32way(struct camellia_ctx *ctx, u8 *dst, |
631 | - const u8 *src, le128 *iv); |
632 | +asmlinkage void camellia_xts_enc_32way(const void *ctx, u8 *dst, const u8 *src, |
633 | + le128 *iv); |
634 | +asmlinkage void camellia_xts_dec_32way(const void *ctx, u8 *dst, const u8 *src, |
635 | + le128 *iv); |
636 | |
637 | static const struct common_glue_ctx camellia_enc = { |
638 | .num_funcs = 4, |
639 | @@ -40,16 +37,16 @@ static const struct common_glue_ctx camellia_enc = { |
640 | |
641 | .funcs = { { |
642 | .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, |
643 | - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_32way) } |
644 | + .fn_u = { .ecb = camellia_ecb_enc_32way } |
645 | }, { |
646 | .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, |
647 | - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_16way) } |
648 | + .fn_u = { .ecb = camellia_ecb_enc_16way } |
649 | }, { |
650 | .num_blocks = 2, |
651 | - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) } |
652 | + .fn_u = { .ecb = camellia_enc_blk_2way } |
653 | }, { |
654 | .num_blocks = 1, |
655 | - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) } |
656 | + .fn_u = { .ecb = camellia_enc_blk } |
657 | } } |
658 | }; |
659 | |
660 | @@ -59,16 +56,16 @@ static const struct common_glue_ctx camellia_ctr = { |
661 | |
662 | .funcs = { { |
663 | .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, |
664 | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_32way) } |
665 | + .fn_u = { .ctr = camellia_ctr_32way } |
666 | }, { |
667 | .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, |
668 | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_16way) } |
669 | + .fn_u = { .ctr = camellia_ctr_16way } |
670 | }, { |
671 | .num_blocks = 2, |
672 | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) } |
673 | + .fn_u = { .ctr = camellia_crypt_ctr_2way } |
674 | }, { |
675 | .num_blocks = 1, |
676 | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) } |
677 | + .fn_u = { .ctr = camellia_crypt_ctr } |
678 | } } |
679 | }; |
680 | |
681 | @@ -78,13 +75,13 @@ static const struct common_glue_ctx camellia_enc_xts = { |
682 | |
683 | .funcs = { { |
684 | .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, |
685 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc_32way) } |
686 | + .fn_u = { .xts = camellia_xts_enc_32way } |
687 | }, { |
688 | .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, |
689 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc_16way) } |
690 | + .fn_u = { .xts = camellia_xts_enc_16way } |
691 | }, { |
692 | .num_blocks = 1, |
693 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc) } |
694 | + .fn_u = { .xts = camellia_xts_enc } |
695 | } } |
696 | }; |
697 | |
698 | @@ -94,16 +91,16 @@ static const struct common_glue_ctx camellia_dec = { |
699 | |
700 | .funcs = { { |
701 | .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, |
702 | - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_32way) } |
703 | + .fn_u = { .ecb = camellia_ecb_dec_32way } |
704 | }, { |
705 | .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, |
706 | - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_16way) } |
707 | + .fn_u = { .ecb = camellia_ecb_dec_16way } |
708 | }, { |
709 | .num_blocks = 2, |
710 | - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) } |
711 | + .fn_u = { .ecb = camellia_dec_blk_2way } |
712 | }, { |
713 | .num_blocks = 1, |
714 | - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) } |
715 | + .fn_u = { .ecb = camellia_dec_blk } |
716 | } } |
717 | }; |
718 | |
719 | @@ -113,16 +110,16 @@ static const struct common_glue_ctx camellia_dec_cbc = { |
720 | |
721 | .funcs = { { |
722 | .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, |
723 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_32way) } |
724 | + .fn_u = { .cbc = camellia_cbc_dec_32way } |
725 | }, { |
726 | .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, |
727 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_16way) } |
728 | + .fn_u = { .cbc = camellia_cbc_dec_16way } |
729 | }, { |
730 | .num_blocks = 2, |
731 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) } |
732 | + .fn_u = { .cbc = camellia_decrypt_cbc_2way } |
733 | }, { |
734 | .num_blocks = 1, |
735 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) } |
736 | + .fn_u = { .cbc = camellia_dec_blk } |
737 | } } |
738 | }; |
739 | |
740 | @@ -132,13 +129,13 @@ static const struct common_glue_ctx camellia_dec_xts = { |
741 | |
742 | .funcs = { { |
743 | .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, |
744 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec_32way) } |
745 | + .fn_u = { .xts = camellia_xts_dec_32way } |
746 | }, { |
747 | .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, |
748 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec_16way) } |
749 | + .fn_u = { .xts = camellia_xts_dec_16way } |
750 | }, { |
751 | .num_blocks = 1, |
752 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec) } |
753 | + .fn_u = { .xts = camellia_xts_dec } |
754 | } } |
755 | }; |
756 | |
757 | @@ -161,8 +158,7 @@ static int ecb_decrypt(struct skcipher_request *req) |
758 | |
759 | static int cbc_encrypt(struct skcipher_request *req) |
760 | { |
761 | - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk), |
762 | - req); |
763 | + return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req); |
764 | } |
765 | |
766 | static int cbc_decrypt(struct skcipher_request *req) |
767 | @@ -180,8 +176,7 @@ static int xts_encrypt(struct skcipher_request *req) |
768 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
769 | struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
770 | |
771 | - return glue_xts_req_128bit(&camellia_enc_xts, req, |
772 | - XTS_TWEAK_CAST(camellia_enc_blk), |
773 | + return glue_xts_req_128bit(&camellia_enc_xts, req, camellia_enc_blk, |
774 | &ctx->tweak_ctx, &ctx->crypt_ctx, false); |
775 | } |
776 | |
777 | @@ -190,8 +185,7 @@ static int xts_decrypt(struct skcipher_request *req) |
778 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
779 | struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
780 | |
781 | - return glue_xts_req_128bit(&camellia_dec_xts, req, |
782 | - XTS_TWEAK_CAST(camellia_enc_blk), |
783 | + return glue_xts_req_128bit(&camellia_dec_xts, req, camellia_enc_blk, |
784 | &ctx->tweak_ctx, &ctx->crypt_ctx, true); |
785 | } |
786 | |
787 | diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c |
788 | index f28d282779b87..31a82a79f4ac9 100644 |
789 | --- a/arch/x86/crypto/camellia_aesni_avx_glue.c |
790 | +++ b/arch/x86/crypto/camellia_aesni_avx_glue.c |
791 | @@ -18,41 +18,36 @@ |
792 | #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 |
793 | |
794 | /* 16-way parallel cipher functions (avx/aes-ni) */ |
795 | -asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst, |
796 | - const u8 *src); |
797 | +asmlinkage void camellia_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src); |
798 | EXPORT_SYMBOL_GPL(camellia_ecb_enc_16way); |
799 | |
800 | -asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst, |
801 | - const u8 *src); |
802 | +asmlinkage void camellia_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src); |
803 | EXPORT_SYMBOL_GPL(camellia_ecb_dec_16way); |
804 | |
805 | -asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst, |
806 | - const u8 *src); |
807 | +asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src); |
808 | EXPORT_SYMBOL_GPL(camellia_cbc_dec_16way); |
809 | |
810 | -asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst, |
811 | - const u8 *src, le128 *iv); |
812 | +asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src, |
813 | + le128 *iv); |
814 | EXPORT_SYMBOL_GPL(camellia_ctr_16way); |
815 | |
816 | -asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst, |
817 | - const u8 *src, le128 *iv); |
818 | +asmlinkage void camellia_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src, |
819 | + le128 *iv); |
820 | EXPORT_SYMBOL_GPL(camellia_xts_enc_16way); |
821 | |
822 | -asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst, |
823 | - const u8 *src, le128 *iv); |
824 | +asmlinkage void camellia_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src, |
825 | + le128 *iv); |
826 | EXPORT_SYMBOL_GPL(camellia_xts_dec_16way); |
827 | |
828 | -void camellia_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
829 | +void camellia_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) |
830 | { |
831 | - glue_xts_crypt_128bit_one(ctx, dst, src, iv, |
832 | - GLUE_FUNC_CAST(camellia_enc_blk)); |
833 | + glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_enc_blk); |
834 | } |
835 | EXPORT_SYMBOL_GPL(camellia_xts_enc); |
836 | |
837 | -void camellia_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
838 | +void camellia_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) |
839 | { |
840 | - glue_xts_crypt_128bit_one(ctx, dst, src, iv, |
841 | - GLUE_FUNC_CAST(camellia_dec_blk)); |
842 | + glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_dec_blk); |
843 | } |
844 | EXPORT_SYMBOL_GPL(camellia_xts_dec); |
845 | |
846 | @@ -62,13 +57,13 @@ static const struct common_glue_ctx camellia_enc = { |
847 | |
848 | .funcs = { { |
849 | .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, |
850 | - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_16way) } |
851 | + .fn_u = { .ecb = camellia_ecb_enc_16way } |
852 | }, { |
853 | .num_blocks = 2, |
854 | - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) } |
855 | + .fn_u = { .ecb = camellia_enc_blk_2way } |
856 | }, { |
857 | .num_blocks = 1, |
858 | - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) } |
859 | + .fn_u = { .ecb = camellia_enc_blk } |
860 | } } |
861 | }; |
862 | |
863 | @@ -78,13 +73,13 @@ static const struct common_glue_ctx camellia_ctr = { |
864 | |
865 | .funcs = { { |
866 | .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, |
867 | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_16way) } |
868 | + .fn_u = { .ctr = camellia_ctr_16way } |
869 | }, { |
870 | .num_blocks = 2, |
871 | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) } |
872 | + .fn_u = { .ctr = camellia_crypt_ctr_2way } |
873 | }, { |
874 | .num_blocks = 1, |
875 | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) } |
876 | + .fn_u = { .ctr = camellia_crypt_ctr } |
877 | } } |
878 | }; |
879 | |
880 | @@ -94,10 +89,10 @@ static const struct common_glue_ctx camellia_enc_xts = { |
881 | |
882 | .funcs = { { |
883 | .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, |
884 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc_16way) } |
885 | + .fn_u = { .xts = camellia_xts_enc_16way } |
886 | }, { |
887 | .num_blocks = 1, |
888 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc) } |
889 | + .fn_u = { .xts = camellia_xts_enc } |
890 | } } |
891 | }; |
892 | |
893 | @@ -107,13 +102,13 @@ static const struct common_glue_ctx camellia_dec = { |
894 | |
895 | .funcs = { { |
896 | .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, |
897 | - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_16way) } |
898 | + .fn_u = { .ecb = camellia_ecb_dec_16way } |
899 | }, { |
900 | .num_blocks = 2, |
901 | - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) } |
902 | + .fn_u = { .ecb = camellia_dec_blk_2way } |
903 | }, { |
904 | .num_blocks = 1, |
905 | - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) } |
906 | + .fn_u = { .ecb = camellia_dec_blk } |
907 | } } |
908 | }; |
909 | |
910 | @@ -123,13 +118,13 @@ static const struct common_glue_ctx camellia_dec_cbc = { |
911 | |
912 | .funcs = { { |
913 | .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, |
914 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_16way) } |
915 | + .fn_u = { .cbc = camellia_cbc_dec_16way } |
916 | }, { |
917 | .num_blocks = 2, |
918 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) } |
919 | + .fn_u = { .cbc = camellia_decrypt_cbc_2way } |
920 | }, { |
921 | .num_blocks = 1, |
922 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) } |
923 | + .fn_u = { .cbc = camellia_dec_blk } |
924 | } } |
925 | }; |
926 | |
927 | @@ -139,10 +134,10 @@ static const struct common_glue_ctx camellia_dec_xts = { |
928 | |
929 | .funcs = { { |
930 | .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, |
931 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec_16way) } |
932 | + .fn_u = { .xts = camellia_xts_dec_16way } |
933 | }, { |
934 | .num_blocks = 1, |
935 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec) } |
936 | + .fn_u = { .xts = camellia_xts_dec } |
937 | } } |
938 | }; |
939 | |
940 | @@ -165,8 +160,7 @@ static int ecb_decrypt(struct skcipher_request *req) |
941 | |
942 | static int cbc_encrypt(struct skcipher_request *req) |
943 | { |
944 | - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk), |
945 | - req); |
946 | + return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req); |
947 | } |
948 | |
949 | static int cbc_decrypt(struct skcipher_request *req) |
950 | @@ -206,8 +200,7 @@ static int xts_encrypt(struct skcipher_request *req) |
951 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
952 | struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
953 | |
954 | - return glue_xts_req_128bit(&camellia_enc_xts, req, |
955 | - XTS_TWEAK_CAST(camellia_enc_blk), |
956 | + return glue_xts_req_128bit(&camellia_enc_xts, req, camellia_enc_blk, |
957 | &ctx->tweak_ctx, &ctx->crypt_ctx, false); |
958 | } |
959 | |
960 | @@ -216,8 +209,7 @@ static int xts_decrypt(struct skcipher_request *req) |
961 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
962 | struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
963 | |
964 | - return glue_xts_req_128bit(&camellia_dec_xts, req, |
965 | - XTS_TWEAK_CAST(camellia_enc_blk), |
966 | + return glue_xts_req_128bit(&camellia_dec_xts, req, camellia_enc_blk, |
967 | &ctx->tweak_ctx, &ctx->crypt_ctx, true); |
968 | } |
969 | |
970 | diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c |
971 | index 7c62db56ffe1b..5f3ed5af68d70 100644 |
972 | --- a/arch/x86/crypto/camellia_glue.c |
973 | +++ b/arch/x86/crypto/camellia_glue.c |
974 | @@ -18,19 +18,17 @@ |
975 | #include <asm/crypto/glue_helper.h> |
976 | |
977 | /* regular block cipher functions */ |
978 | -asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst, |
979 | - const u8 *src, bool xor); |
980 | +asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src, |
981 | + bool xor); |
982 | EXPORT_SYMBOL_GPL(__camellia_enc_blk); |
983 | -asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst, |
984 | - const u8 *src); |
985 | +asmlinkage void camellia_dec_blk(const void *ctx, u8 *dst, const u8 *src); |
986 | EXPORT_SYMBOL_GPL(camellia_dec_blk); |
987 | |
988 | /* 2-way parallel cipher functions */ |
989 | -asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst, |
990 | - const u8 *src, bool xor); |
991 | +asmlinkage void __camellia_enc_blk_2way(const void *ctx, u8 *dst, const u8 *src, |
992 | + bool xor); |
993 | EXPORT_SYMBOL_GPL(__camellia_enc_blk_2way); |
994 | -asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst, |
995 | - const u8 *src); |
996 | +asmlinkage void camellia_dec_blk_2way(const void *ctx, u8 *dst, const u8 *src); |
997 | EXPORT_SYMBOL_GPL(camellia_dec_blk_2way); |
998 | |
999 | static void camellia_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) |
1000 | @@ -1267,8 +1265,10 @@ static int camellia_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, |
1001 | return camellia_setkey(&tfm->base, key, key_len); |
1002 | } |
1003 | |
1004 | -void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src) |
1005 | +void camellia_decrypt_cbc_2way(const void *ctx, u8 *d, const u8 *s) |
1006 | { |
1007 | + u128 *dst = (u128 *)d; |
1008 | + const u128 *src = (const u128 *)s; |
1009 | u128 iv = *src; |
1010 | |
1011 | camellia_dec_blk_2way(ctx, (u8 *)dst, (u8 *)src); |
1012 | @@ -1277,9 +1277,11 @@ void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src) |
1013 | } |
1014 | EXPORT_SYMBOL_GPL(camellia_decrypt_cbc_2way); |
1015 | |
1016 | -void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
1017 | +void camellia_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) |
1018 | { |
1019 | be128 ctrblk; |
1020 | + u128 *dst = (u128 *)d; |
1021 | + const u128 *src = (const u128 *)s; |
1022 | |
1023 | if (dst != src) |
1024 | *dst = *src; |
1025 | @@ -1291,9 +1293,11 @@ void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
1026 | } |
1027 | EXPORT_SYMBOL_GPL(camellia_crypt_ctr); |
1028 | |
1029 | -void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
1030 | +void camellia_crypt_ctr_2way(const void *ctx, u8 *d, const u8 *s, le128 *iv) |
1031 | { |
1032 | be128 ctrblks[2]; |
1033 | + u128 *dst = (u128 *)d; |
1034 | + const u128 *src = (const u128 *)s; |
1035 | |
1036 | if (dst != src) { |
1037 | dst[0] = src[0]; |
1038 | @@ -1315,10 +1319,10 @@ static const struct common_glue_ctx camellia_enc = { |
1039 | |
1040 | .funcs = { { |
1041 | .num_blocks = 2, |
1042 | - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) } |
1043 | + .fn_u = { .ecb = camellia_enc_blk_2way } |
1044 | }, { |
1045 | .num_blocks = 1, |
1046 | - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) } |
1047 | + .fn_u = { .ecb = camellia_enc_blk } |
1048 | } } |
1049 | }; |
1050 | |
1051 | @@ -1328,10 +1332,10 @@ static const struct common_glue_ctx camellia_ctr = { |
1052 | |
1053 | .funcs = { { |
1054 | .num_blocks = 2, |
1055 | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) } |
1056 | + .fn_u = { .ctr = camellia_crypt_ctr_2way } |
1057 | }, { |
1058 | .num_blocks = 1, |
1059 | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) } |
1060 | + .fn_u = { .ctr = camellia_crypt_ctr } |
1061 | } } |
1062 | }; |
1063 | |
1064 | @@ -1341,10 +1345,10 @@ static const struct common_glue_ctx camellia_dec = { |
1065 | |
1066 | .funcs = { { |
1067 | .num_blocks = 2, |
1068 | - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) } |
1069 | + .fn_u = { .ecb = camellia_dec_blk_2way } |
1070 | }, { |
1071 | .num_blocks = 1, |
1072 | - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) } |
1073 | + .fn_u = { .ecb = camellia_dec_blk } |
1074 | } } |
1075 | }; |
1076 | |
1077 | @@ -1354,10 +1358,10 @@ static const struct common_glue_ctx camellia_dec_cbc = { |
1078 | |
1079 | .funcs = { { |
1080 | .num_blocks = 2, |
1081 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) } |
1082 | + .fn_u = { .cbc = camellia_decrypt_cbc_2way } |
1083 | }, { |
1084 | .num_blocks = 1, |
1085 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) } |
1086 | + .fn_u = { .cbc = camellia_dec_blk } |
1087 | } } |
1088 | }; |
1089 | |
1090 | @@ -1373,8 +1377,7 @@ static int ecb_decrypt(struct skcipher_request *req) |
1091 | |
1092 | static int cbc_encrypt(struct skcipher_request *req) |
1093 | { |
1094 | - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk), |
1095 | - req); |
1096 | + return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req); |
1097 | } |
1098 | |
1099 | static int cbc_decrypt(struct skcipher_request *req) |
1100 | diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c |
1101 | index a8a38fffb4a98..da5297475f9ec 100644 |
1102 | --- a/arch/x86/crypto/cast6_avx_glue.c |
1103 | +++ b/arch/x86/crypto/cast6_avx_glue.c |
1104 | @@ -20,20 +20,17 @@ |
1105 | |
1106 | #define CAST6_PARALLEL_BLOCKS 8 |
1107 | |
1108 | -asmlinkage void cast6_ecb_enc_8way(struct cast6_ctx *ctx, u8 *dst, |
1109 | - const u8 *src); |
1110 | -asmlinkage void cast6_ecb_dec_8way(struct cast6_ctx *ctx, u8 *dst, |
1111 | - const u8 *src); |
1112 | - |
1113 | -asmlinkage void cast6_cbc_dec_8way(struct cast6_ctx *ctx, u8 *dst, |
1114 | - const u8 *src); |
1115 | -asmlinkage void cast6_ctr_8way(struct cast6_ctx *ctx, u8 *dst, const u8 *src, |
1116 | +asmlinkage void cast6_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src); |
1117 | +asmlinkage void cast6_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src); |
1118 | + |
1119 | +asmlinkage void cast6_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src); |
1120 | +asmlinkage void cast6_ctr_8way(const void *ctx, u8 *dst, const u8 *src, |
1121 | le128 *iv); |
1122 | |
1123 | -asmlinkage void cast6_xts_enc_8way(struct cast6_ctx *ctx, u8 *dst, |
1124 | - const u8 *src, le128 *iv); |
1125 | -asmlinkage void cast6_xts_dec_8way(struct cast6_ctx *ctx, u8 *dst, |
1126 | - const u8 *src, le128 *iv); |
1127 | +asmlinkage void cast6_xts_enc_8way(const void *ctx, u8 *dst, const u8 *src, |
1128 | + le128 *iv); |
1129 | +asmlinkage void cast6_xts_dec_8way(const void *ctx, u8 *dst, const u8 *src, |
1130 | + le128 *iv); |
1131 | |
1132 | static int cast6_setkey_skcipher(struct crypto_skcipher *tfm, |
1133 | const u8 *key, unsigned int keylen) |
1134 | @@ -41,21 +38,21 @@ static int cast6_setkey_skcipher(struct crypto_skcipher *tfm, |
1135 | return cast6_setkey(&tfm->base, key, keylen); |
1136 | } |
1137 | |
1138 | -static void cast6_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
1139 | +static void cast6_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) |
1140 | { |
1141 | - glue_xts_crypt_128bit_one(ctx, dst, src, iv, |
1142 | - GLUE_FUNC_CAST(__cast6_encrypt)); |
1143 | + glue_xts_crypt_128bit_one(ctx, dst, src, iv, __cast6_encrypt); |
1144 | } |
1145 | |
1146 | -static void cast6_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
1147 | +static void cast6_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) |
1148 | { |
1149 | - glue_xts_crypt_128bit_one(ctx, dst, src, iv, |
1150 | - GLUE_FUNC_CAST(__cast6_decrypt)); |
1151 | + glue_xts_crypt_128bit_one(ctx, dst, src, iv, __cast6_decrypt); |
1152 | } |
1153 | |
1154 | -static void cast6_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
1155 | +static void cast6_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) |
1156 | { |
1157 | be128 ctrblk; |
1158 | + u128 *dst = (u128 *)d; |
1159 | + const u128 *src = (const u128 *)s; |
1160 | |
1161 | le128_to_be128(&ctrblk, iv); |
1162 | le128_inc(iv); |
1163 | @@ -70,10 +67,10 @@ static const struct common_glue_ctx cast6_enc = { |
1164 | |
1165 | .funcs = { { |
1166 | .num_blocks = CAST6_PARALLEL_BLOCKS, |
1167 | - .fn_u = { .ecb = GLUE_FUNC_CAST(cast6_ecb_enc_8way) } |
1168 | + .fn_u = { .ecb = cast6_ecb_enc_8way } |
1169 | }, { |
1170 | .num_blocks = 1, |
1171 | - .fn_u = { .ecb = GLUE_FUNC_CAST(__cast6_encrypt) } |
1172 | + .fn_u = { .ecb = __cast6_encrypt } |
1173 | } } |
1174 | }; |
1175 | |
1176 | @@ -83,10 +80,10 @@ static const struct common_glue_ctx cast6_ctr = { |
1177 | |
1178 | .funcs = { { |
1179 | .num_blocks = CAST6_PARALLEL_BLOCKS, |
1180 | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(cast6_ctr_8way) } |
1181 | + .fn_u = { .ctr = cast6_ctr_8way } |
1182 | }, { |
1183 | .num_blocks = 1, |
1184 | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(cast6_crypt_ctr) } |
1185 | + .fn_u = { .ctr = cast6_crypt_ctr } |
1186 | } } |
1187 | }; |
1188 | |
1189 | @@ -96,10 +93,10 @@ static const struct common_glue_ctx cast6_enc_xts = { |
1190 | |
1191 | .funcs = { { |
1192 | .num_blocks = CAST6_PARALLEL_BLOCKS, |
1193 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_enc_8way) } |
1194 | + .fn_u = { .xts = cast6_xts_enc_8way } |
1195 | }, { |
1196 | .num_blocks = 1, |
1197 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_enc) } |
1198 | + .fn_u = { .xts = cast6_xts_enc } |
1199 | } } |
1200 | }; |
1201 | |
1202 | @@ -109,10 +106,10 @@ static const struct common_glue_ctx cast6_dec = { |
1203 | |
1204 | .funcs = { { |
1205 | .num_blocks = CAST6_PARALLEL_BLOCKS, |
1206 | - .fn_u = { .ecb = GLUE_FUNC_CAST(cast6_ecb_dec_8way) } |
1207 | + .fn_u = { .ecb = cast6_ecb_dec_8way } |
1208 | }, { |
1209 | .num_blocks = 1, |
1210 | - .fn_u = { .ecb = GLUE_FUNC_CAST(__cast6_decrypt) } |
1211 | + .fn_u = { .ecb = __cast6_decrypt } |
1212 | } } |
1213 | }; |
1214 | |
1215 | @@ -122,10 +119,10 @@ static const struct common_glue_ctx cast6_dec_cbc = { |
1216 | |
1217 | .funcs = { { |
1218 | .num_blocks = CAST6_PARALLEL_BLOCKS, |
1219 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(cast6_cbc_dec_8way) } |
1220 | + .fn_u = { .cbc = cast6_cbc_dec_8way } |
1221 | }, { |
1222 | .num_blocks = 1, |
1223 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__cast6_decrypt) } |
1224 | + .fn_u = { .cbc = __cast6_decrypt } |
1225 | } } |
1226 | }; |
1227 | |
1228 | @@ -135,10 +132,10 @@ static const struct common_glue_ctx cast6_dec_xts = { |
1229 | |
1230 | .funcs = { { |
1231 | .num_blocks = CAST6_PARALLEL_BLOCKS, |
1232 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_dec_8way) } |
1233 | + .fn_u = { .xts = cast6_xts_dec_8way } |
1234 | }, { |
1235 | .num_blocks = 1, |
1236 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_dec) } |
1237 | + .fn_u = { .xts = cast6_xts_dec } |
1238 | } } |
1239 | }; |
1240 | |
1241 | @@ -154,8 +151,7 @@ static int ecb_decrypt(struct skcipher_request *req) |
1242 | |
1243 | static int cbc_encrypt(struct skcipher_request *req) |
1244 | { |
1245 | - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__cast6_encrypt), |
1246 | - req); |
1247 | + return glue_cbc_encrypt_req_128bit(__cast6_encrypt, req); |
1248 | } |
1249 | |
1250 | static int cbc_decrypt(struct skcipher_request *req) |
1251 | @@ -199,8 +195,7 @@ static int xts_encrypt(struct skcipher_request *req) |
1252 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
1253 | struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
1254 | |
1255 | - return glue_xts_req_128bit(&cast6_enc_xts, req, |
1256 | - XTS_TWEAK_CAST(__cast6_encrypt), |
1257 | + return glue_xts_req_128bit(&cast6_enc_xts, req, __cast6_encrypt, |
1258 | &ctx->tweak_ctx, &ctx->crypt_ctx, false); |
1259 | } |
1260 | |
1261 | @@ -209,8 +204,7 @@ static int xts_decrypt(struct skcipher_request *req) |
1262 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
1263 | struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
1264 | |
1265 | - return glue_xts_req_128bit(&cast6_dec_xts, req, |
1266 | - XTS_TWEAK_CAST(__cast6_encrypt), |
1267 | + return glue_xts_req_128bit(&cast6_dec_xts, req, __cast6_encrypt, |
1268 | &ctx->tweak_ctx, &ctx->crypt_ctx, true); |
1269 | } |
1270 | |
1271 | diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c |
1272 | index d15b99397480b..d3d91a0abf88f 100644 |
1273 | --- a/arch/x86/crypto/glue_helper.c |
1274 | +++ b/arch/x86/crypto/glue_helper.c |
1275 | @@ -134,7 +134,8 @@ int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx, |
1276 | src -= num_blocks - 1; |
1277 | dst -= num_blocks - 1; |
1278 | |
1279 | - gctx->funcs[i].fn_u.cbc(ctx, dst, src); |
1280 | + gctx->funcs[i].fn_u.cbc(ctx, (u8 *)dst, |
1281 | + (const u8 *)src); |
1282 | |
1283 | nbytes -= func_bytes; |
1284 | if (nbytes < bsize) |
1285 | @@ -188,7 +189,9 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx, |
1286 | |
1287 | /* Process multi-block batch */ |
1288 | do { |
1289 | - gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk); |
1290 | + gctx->funcs[i].fn_u.ctr(ctx, (u8 *)dst, |
1291 | + (const u8 *)src, |
1292 | + &ctrblk); |
1293 | src += num_blocks; |
1294 | dst += num_blocks; |
1295 | nbytes -= func_bytes; |
1296 | @@ -210,7 +213,8 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx, |
1297 | |
1298 | be128_to_le128(&ctrblk, (be128 *)walk.iv); |
1299 | memcpy(&tmp, walk.src.virt.addr, nbytes); |
1300 | - gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, &tmp, &tmp, |
1301 | + gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, (u8 *)&tmp, |
1302 | + (const u8 *)&tmp, |
1303 | &ctrblk); |
1304 | memcpy(walk.dst.virt.addr, &tmp, nbytes); |
1305 | le128_to_be128((be128 *)walk.iv, &ctrblk); |
1306 | @@ -240,7 +244,8 @@ static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx, |
1307 | |
1308 | if (nbytes >= func_bytes) { |
1309 | do { |
1310 | - gctx->funcs[i].fn_u.xts(ctx, dst, src, |
1311 | + gctx->funcs[i].fn_u.xts(ctx, (u8 *)dst, |
1312 | + (const u8 *)src, |
1313 | walk->iv); |
1314 | |
1315 | src += num_blocks; |
1316 | @@ -354,8 +359,8 @@ out: |
1317 | } |
1318 | EXPORT_SYMBOL_GPL(glue_xts_req_128bit); |
1319 | |
1320 | -void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv, |
1321 | - common_glue_func_t fn) |
1322 | +void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst, const u8 *src, |
1323 | + le128 *iv, common_glue_func_t fn) |
1324 | { |
1325 | le128 ivblk = *iv; |
1326 | |
1327 | @@ -363,13 +368,13 @@ void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv, |
1328 | gf128mul_x_ble(iv, &ivblk); |
1329 | |
1330 | /* CC <- T xor C */ |
1331 | - u128_xor(dst, src, (u128 *)&ivblk); |
1332 | + u128_xor((u128 *)dst, (const u128 *)src, (u128 *)&ivblk); |
1333 | |
1334 | /* PP <- D(Key2,CC) */ |
1335 | - fn(ctx, (u8 *)dst, (u8 *)dst); |
1336 | + fn(ctx, dst, dst); |
1337 | |
1338 | /* P <- T xor PP */ |
1339 | - u128_xor(dst, dst, (u128 *)&ivblk); |
1340 | + u128_xor((u128 *)dst, (u128 *)dst, (u128 *)&ivblk); |
1341 | } |
1342 | EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one); |
1343 | |
1344 | diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c |
1345 | index 13fd8d3d2da00..f973ace44ad35 100644 |
1346 | --- a/arch/x86/crypto/serpent_avx2_glue.c |
1347 | +++ b/arch/x86/crypto/serpent_avx2_glue.c |
1348 | @@ -19,18 +19,16 @@ |
1349 | #define SERPENT_AVX2_PARALLEL_BLOCKS 16 |
1350 | |
1351 | /* 16-way AVX2 parallel cipher functions */ |
1352 | -asmlinkage void serpent_ecb_enc_16way(struct serpent_ctx *ctx, u8 *dst, |
1353 | - const u8 *src); |
1354 | -asmlinkage void serpent_ecb_dec_16way(struct serpent_ctx *ctx, u8 *dst, |
1355 | - const u8 *src); |
1356 | -asmlinkage void serpent_cbc_dec_16way(void *ctx, u128 *dst, const u128 *src); |
1357 | +asmlinkage void serpent_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src); |
1358 | +asmlinkage void serpent_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src); |
1359 | +asmlinkage void serpent_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src); |
1360 | |
1361 | -asmlinkage void serpent_ctr_16way(void *ctx, u128 *dst, const u128 *src, |
1362 | +asmlinkage void serpent_ctr_16way(const void *ctx, u8 *dst, const u8 *src, |
1363 | le128 *iv); |
1364 | -asmlinkage void serpent_xts_enc_16way(struct serpent_ctx *ctx, u8 *dst, |
1365 | - const u8 *src, le128 *iv); |
1366 | -asmlinkage void serpent_xts_dec_16way(struct serpent_ctx *ctx, u8 *dst, |
1367 | - const u8 *src, le128 *iv); |
1368 | +asmlinkage void serpent_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src, |
1369 | + le128 *iv); |
1370 | +asmlinkage void serpent_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src, |
1371 | + le128 *iv); |
1372 | |
1373 | static int serpent_setkey_skcipher(struct crypto_skcipher *tfm, |
1374 | const u8 *key, unsigned int keylen) |
1375 | @@ -44,13 +42,13 @@ static const struct common_glue_ctx serpent_enc = { |
1376 | |
1377 | .funcs = { { |
1378 | .num_blocks = 16, |
1379 | - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_16way) } |
1380 | + .fn_u = { .ecb = serpent_ecb_enc_16way } |
1381 | }, { |
1382 | .num_blocks = 8, |
1383 | - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_8way_avx) } |
1384 | + .fn_u = { .ecb = serpent_ecb_enc_8way_avx } |
1385 | }, { |
1386 | .num_blocks = 1, |
1387 | - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) } |
1388 | + .fn_u = { .ecb = __serpent_encrypt } |
1389 | } } |
1390 | }; |
1391 | |
1392 | @@ -60,13 +58,13 @@ static const struct common_glue_ctx serpent_ctr = { |
1393 | |
1394 | .funcs = { { |
1395 | .num_blocks = 16, |
1396 | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_16way) } |
1397 | + .fn_u = { .ctr = serpent_ctr_16way } |
1398 | }, { |
1399 | .num_blocks = 8, |
1400 | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_8way_avx) } |
1401 | + .fn_u = { .ctr = serpent_ctr_8way_avx } |
1402 | }, { |
1403 | .num_blocks = 1, |
1404 | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(__serpent_crypt_ctr) } |
1405 | + .fn_u = { .ctr = __serpent_crypt_ctr } |
1406 | } } |
1407 | }; |
1408 | |
1409 | @@ -76,13 +74,13 @@ static const struct common_glue_ctx serpent_enc_xts = { |
1410 | |
1411 | .funcs = { { |
1412 | .num_blocks = 16, |
1413 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_16way) } |
1414 | + .fn_u = { .xts = serpent_xts_enc_16way } |
1415 | }, { |
1416 | .num_blocks = 8, |
1417 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_8way_avx) } |
1418 | + .fn_u = { .xts = serpent_xts_enc_8way_avx } |
1419 | }, { |
1420 | .num_blocks = 1, |
1421 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc) } |
1422 | + .fn_u = { .xts = serpent_xts_enc } |
1423 | } } |
1424 | }; |
1425 | |
1426 | @@ -92,13 +90,13 @@ static const struct common_glue_ctx serpent_dec = { |
1427 | |
1428 | .funcs = { { |
1429 | .num_blocks = 16, |
1430 | - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_16way) } |
1431 | + .fn_u = { .ecb = serpent_ecb_dec_16way } |
1432 | }, { |
1433 | .num_blocks = 8, |
1434 | - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_8way_avx) } |
1435 | + .fn_u = { .ecb = serpent_ecb_dec_8way_avx } |
1436 | }, { |
1437 | .num_blocks = 1, |
1438 | - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) } |
1439 | + .fn_u = { .ecb = __serpent_decrypt } |
1440 | } } |
1441 | }; |
1442 | |
1443 | @@ -108,13 +106,13 @@ static const struct common_glue_ctx serpent_dec_cbc = { |
1444 | |
1445 | .funcs = { { |
1446 | .num_blocks = 16, |
1447 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_16way) } |
1448 | + .fn_u = { .cbc = serpent_cbc_dec_16way } |
1449 | }, { |
1450 | .num_blocks = 8, |
1451 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_8way_avx) } |
1452 | + .fn_u = { .cbc = serpent_cbc_dec_8way_avx } |
1453 | }, { |
1454 | .num_blocks = 1, |
1455 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) } |
1456 | + .fn_u = { .cbc = __serpent_decrypt } |
1457 | } } |
1458 | }; |
1459 | |
1460 | @@ -124,13 +122,13 @@ static const struct common_glue_ctx serpent_dec_xts = { |
1461 | |
1462 | .funcs = { { |
1463 | .num_blocks = 16, |
1464 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_16way) } |
1465 | + .fn_u = { .xts = serpent_xts_dec_16way } |
1466 | }, { |
1467 | .num_blocks = 8, |
1468 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_8way_avx) } |
1469 | + .fn_u = { .xts = serpent_xts_dec_8way_avx } |
1470 | }, { |
1471 | .num_blocks = 1, |
1472 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec) } |
1473 | + .fn_u = { .xts = serpent_xts_dec } |
1474 | } } |
1475 | }; |
1476 | |
1477 | @@ -146,8 +144,7 @@ static int ecb_decrypt(struct skcipher_request *req) |
1478 | |
1479 | static int cbc_encrypt(struct skcipher_request *req) |
1480 | { |
1481 | - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt), |
1482 | - req); |
1483 | + return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req); |
1484 | } |
1485 | |
1486 | static int cbc_decrypt(struct skcipher_request *req) |
1487 | @@ -166,8 +163,8 @@ static int xts_encrypt(struct skcipher_request *req) |
1488 | struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
1489 | |
1490 | return glue_xts_req_128bit(&serpent_enc_xts, req, |
1491 | - XTS_TWEAK_CAST(__serpent_encrypt), |
1492 | - &ctx->tweak_ctx, &ctx->crypt_ctx, false); |
1493 | + __serpent_encrypt, &ctx->tweak_ctx, |
1494 | + &ctx->crypt_ctx, false); |
1495 | } |
1496 | |
1497 | static int xts_decrypt(struct skcipher_request *req) |
1498 | @@ -176,8 +173,8 @@ static int xts_decrypt(struct skcipher_request *req) |
1499 | struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
1500 | |
1501 | return glue_xts_req_128bit(&serpent_dec_xts, req, |
1502 | - XTS_TWEAK_CAST(__serpent_encrypt), |
1503 | - &ctx->tweak_ctx, &ctx->crypt_ctx, true); |
1504 | + __serpent_encrypt, &ctx->tweak_ctx, |
1505 | + &ctx->crypt_ctx, true); |
1506 | } |
1507 | |
1508 | static struct skcipher_alg serpent_algs[] = { |
1509 | diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c |
1510 | index 7d3dca38a5a2e..7806d1cbe8541 100644 |
1511 | --- a/arch/x86/crypto/serpent_avx_glue.c |
1512 | +++ b/arch/x86/crypto/serpent_avx_glue.c |
1513 | @@ -20,33 +20,35 @@ |
1514 | #include <asm/crypto/serpent-avx.h> |
1515 | |
1516 | /* 8-way parallel cipher functions */ |
1517 | -asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, |
1518 | +asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst, |
1519 | const u8 *src); |
1520 | EXPORT_SYMBOL_GPL(serpent_ecb_enc_8way_avx); |
1521 | |
1522 | -asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, |
1523 | +asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst, |
1524 | const u8 *src); |
1525 | EXPORT_SYMBOL_GPL(serpent_ecb_dec_8way_avx); |
1526 | |
1527 | -asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, |
1528 | +asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst, |
1529 | const u8 *src); |
1530 | EXPORT_SYMBOL_GPL(serpent_cbc_dec_8way_avx); |
1531 | |
1532 | -asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst, |
1533 | - const u8 *src, le128 *iv); |
1534 | +asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src, |
1535 | + le128 *iv); |
1536 | EXPORT_SYMBOL_GPL(serpent_ctr_8way_avx); |
1537 | |
1538 | -asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, |
1539 | +asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst, |
1540 | const u8 *src, le128 *iv); |
1541 | EXPORT_SYMBOL_GPL(serpent_xts_enc_8way_avx); |
1542 | |
1543 | -asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, |
1544 | +asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst, |
1545 | const u8 *src, le128 *iv); |
1546 | EXPORT_SYMBOL_GPL(serpent_xts_dec_8way_avx); |
1547 | |
1548 | -void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
1549 | +void __serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) |
1550 | { |
1551 | be128 ctrblk; |
1552 | + u128 *dst = (u128 *)d; |
1553 | + const u128 *src = (const u128 *)s; |
1554 | |
1555 | le128_to_be128(&ctrblk, iv); |
1556 | le128_inc(iv); |
1557 | @@ -56,17 +58,15 @@ void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
1558 | } |
1559 | EXPORT_SYMBOL_GPL(__serpent_crypt_ctr); |
1560 | |
1561 | -void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
1562 | +void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) |
1563 | { |
1564 | - glue_xts_crypt_128bit_one(ctx, dst, src, iv, |
1565 | - GLUE_FUNC_CAST(__serpent_encrypt)); |
1566 | + glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_encrypt); |
1567 | } |
1568 | EXPORT_SYMBOL_GPL(serpent_xts_enc); |
1569 | |
1570 | -void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
1571 | +void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) |
1572 | { |
1573 | - glue_xts_crypt_128bit_one(ctx, dst, src, iv, |
1574 | - GLUE_FUNC_CAST(__serpent_decrypt)); |
1575 | + glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_decrypt); |
1576 | } |
1577 | EXPORT_SYMBOL_GPL(serpent_xts_dec); |
1578 | |
1579 | @@ -102,10 +102,10 @@ static const struct common_glue_ctx serpent_enc = { |
1580 | |
1581 | .funcs = { { |
1582 | .num_blocks = SERPENT_PARALLEL_BLOCKS, |
1583 | - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_8way_avx) } |
1584 | + .fn_u = { .ecb = serpent_ecb_enc_8way_avx } |
1585 | }, { |
1586 | .num_blocks = 1, |
1587 | - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) } |
1588 | + .fn_u = { .ecb = __serpent_encrypt } |
1589 | } } |
1590 | }; |
1591 | |
1592 | @@ -115,10 +115,10 @@ static const struct common_glue_ctx serpent_ctr = { |
1593 | |
1594 | .funcs = { { |
1595 | .num_blocks = SERPENT_PARALLEL_BLOCKS, |
1596 | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_8way_avx) } |
1597 | + .fn_u = { .ctr = serpent_ctr_8way_avx } |
1598 | }, { |
1599 | .num_blocks = 1, |
1600 | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(__serpent_crypt_ctr) } |
1601 | + .fn_u = { .ctr = __serpent_crypt_ctr } |
1602 | } } |
1603 | }; |
1604 | |
1605 | @@ -128,10 +128,10 @@ static const struct common_glue_ctx serpent_enc_xts = { |
1606 | |
1607 | .funcs = { { |
1608 | .num_blocks = SERPENT_PARALLEL_BLOCKS, |
1609 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_8way_avx) } |
1610 | + .fn_u = { .xts = serpent_xts_enc_8way_avx } |
1611 | }, { |
1612 | .num_blocks = 1, |
1613 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc) } |
1614 | + .fn_u = { .xts = serpent_xts_enc } |
1615 | } } |
1616 | }; |
1617 | |
1618 | @@ -141,10 +141,10 @@ static const struct common_glue_ctx serpent_dec = { |
1619 | |
1620 | .funcs = { { |
1621 | .num_blocks = SERPENT_PARALLEL_BLOCKS, |
1622 | - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_8way_avx) } |
1623 | + .fn_u = { .ecb = serpent_ecb_dec_8way_avx } |
1624 | }, { |
1625 | .num_blocks = 1, |
1626 | - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) } |
1627 | + .fn_u = { .ecb = __serpent_decrypt } |
1628 | } } |
1629 | }; |
1630 | |
1631 | @@ -154,10 +154,10 @@ static const struct common_glue_ctx serpent_dec_cbc = { |
1632 | |
1633 | .funcs = { { |
1634 | .num_blocks = SERPENT_PARALLEL_BLOCKS, |
1635 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_8way_avx) } |
1636 | + .fn_u = { .cbc = serpent_cbc_dec_8way_avx } |
1637 | }, { |
1638 | .num_blocks = 1, |
1639 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) } |
1640 | + .fn_u = { .cbc = __serpent_decrypt } |
1641 | } } |
1642 | }; |
1643 | |
1644 | @@ -167,10 +167,10 @@ static const struct common_glue_ctx serpent_dec_xts = { |
1645 | |
1646 | .funcs = { { |
1647 | .num_blocks = SERPENT_PARALLEL_BLOCKS, |
1648 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_8way_avx) } |
1649 | + .fn_u = { .xts = serpent_xts_dec_8way_avx } |
1650 | }, { |
1651 | .num_blocks = 1, |
1652 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec) } |
1653 | + .fn_u = { .xts = serpent_xts_dec } |
1654 | } } |
1655 | }; |
1656 | |
1657 | @@ -186,8 +186,7 @@ static int ecb_decrypt(struct skcipher_request *req) |
1658 | |
1659 | static int cbc_encrypt(struct skcipher_request *req) |
1660 | { |
1661 | - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt), |
1662 | - req); |
1663 | + return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req); |
1664 | } |
1665 | |
1666 | static int cbc_decrypt(struct skcipher_request *req) |
1667 | @@ -206,8 +205,8 @@ static int xts_encrypt(struct skcipher_request *req) |
1668 | struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
1669 | |
1670 | return glue_xts_req_128bit(&serpent_enc_xts, req, |
1671 | - XTS_TWEAK_CAST(__serpent_encrypt), |
1672 | - &ctx->tweak_ctx, &ctx->crypt_ctx, false); |
1673 | + __serpent_encrypt, &ctx->tweak_ctx, |
1674 | + &ctx->crypt_ctx, false); |
1675 | } |
1676 | |
1677 | static int xts_decrypt(struct skcipher_request *req) |
1678 | @@ -216,8 +215,8 @@ static int xts_decrypt(struct skcipher_request *req) |
1679 | struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
1680 | |
1681 | return glue_xts_req_128bit(&serpent_dec_xts, req, |
1682 | - XTS_TWEAK_CAST(__serpent_encrypt), |
1683 | - &ctx->tweak_ctx, &ctx->crypt_ctx, true); |
1684 | + __serpent_encrypt, &ctx->tweak_ctx, |
1685 | + &ctx->crypt_ctx, true); |
1686 | } |
1687 | |
1688 | static struct skcipher_alg serpent_algs[] = { |
1689 | diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c |
1690 | index 5fdf1931d0690..4fed8d26b91a4 100644 |
1691 | --- a/arch/x86/crypto/serpent_sse2_glue.c |
1692 | +++ b/arch/x86/crypto/serpent_sse2_glue.c |
1693 | @@ -31,9 +31,11 @@ static int serpent_setkey_skcipher(struct crypto_skcipher *tfm, |
1694 | return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen); |
1695 | } |
1696 | |
1697 | -static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) |
1698 | +static void serpent_decrypt_cbc_xway(const void *ctx, u8 *d, const u8 *s) |
1699 | { |
1700 | u128 ivs[SERPENT_PARALLEL_BLOCKS - 1]; |
1701 | + u128 *dst = (u128 *)d; |
1702 | + const u128 *src = (const u128 *)s; |
1703 | unsigned int j; |
1704 | |
1705 | for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++) |
1706 | @@ -45,9 +47,11 @@ static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) |
1707 | u128_xor(dst + (j + 1), dst + (j + 1), ivs + j); |
1708 | } |
1709 | |
1710 | -static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
1711 | +static void serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) |
1712 | { |
1713 | be128 ctrblk; |
1714 | + u128 *dst = (u128 *)d; |
1715 | + const u128 *src = (const u128 *)s; |
1716 | |
1717 | le128_to_be128(&ctrblk, iv); |
1718 | le128_inc(iv); |
1719 | @@ -56,10 +60,12 @@ static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
1720 | u128_xor(dst, src, (u128 *)&ctrblk); |
1721 | } |
1722 | |
1723 | -static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src, |
1724 | +static void serpent_crypt_ctr_xway(const void *ctx, u8 *d, const u8 *s, |
1725 | le128 *iv) |
1726 | { |
1727 | be128 ctrblks[SERPENT_PARALLEL_BLOCKS]; |
1728 | + u128 *dst = (u128 *)d; |
1729 | + const u128 *src = (const u128 *)s; |
1730 | unsigned int i; |
1731 | |
1732 | for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) { |
1733 | @@ -79,10 +85,10 @@ static const struct common_glue_ctx serpent_enc = { |
1734 | |
1735 | .funcs = { { |
1736 | .num_blocks = SERPENT_PARALLEL_BLOCKS, |
1737 | - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) } |
1738 | + .fn_u = { .ecb = serpent_enc_blk_xway } |
1739 | }, { |
1740 | .num_blocks = 1, |
1741 | - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) } |
1742 | + .fn_u = { .ecb = __serpent_encrypt } |
1743 | } } |
1744 | }; |
1745 | |
1746 | @@ -92,10 +98,10 @@ static const struct common_glue_ctx serpent_ctr = { |
1747 | |
1748 | .funcs = { { |
1749 | .num_blocks = SERPENT_PARALLEL_BLOCKS, |
1750 | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) } |
1751 | + .fn_u = { .ctr = serpent_crypt_ctr_xway } |
1752 | }, { |
1753 | .num_blocks = 1, |
1754 | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) } |
1755 | + .fn_u = { .ctr = serpent_crypt_ctr } |
1756 | } } |
1757 | }; |
1758 | |
1759 | @@ -105,10 +111,10 @@ static const struct common_glue_ctx serpent_dec = { |
1760 | |
1761 | .funcs = { { |
1762 | .num_blocks = SERPENT_PARALLEL_BLOCKS, |
1763 | - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) } |
1764 | + .fn_u = { .ecb = serpent_dec_blk_xway } |
1765 | }, { |
1766 | .num_blocks = 1, |
1767 | - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) } |
1768 | + .fn_u = { .ecb = __serpent_decrypt } |
1769 | } } |
1770 | }; |
1771 | |
1772 | @@ -118,10 +124,10 @@ static const struct common_glue_ctx serpent_dec_cbc = { |
1773 | |
1774 | .funcs = { { |
1775 | .num_blocks = SERPENT_PARALLEL_BLOCKS, |
1776 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) } |
1777 | + .fn_u = { .cbc = serpent_decrypt_cbc_xway } |
1778 | }, { |
1779 | .num_blocks = 1, |
1780 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) } |
1781 | + .fn_u = { .cbc = __serpent_decrypt } |
1782 | } } |
1783 | }; |
1784 | |
1785 | @@ -137,7 +143,7 @@ static int ecb_decrypt(struct skcipher_request *req) |
1786 | |
1787 | static int cbc_encrypt(struct skcipher_request *req) |
1788 | { |
1789 | - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt), |
1790 | + return glue_cbc_encrypt_req_128bit(__serpent_encrypt, |
1791 | req); |
1792 | } |
1793 | |
1794 | diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c |
1795 | index d561c821788b7..3b36e97ec7abb 100644 |
1796 | --- a/arch/x86/crypto/twofish_avx_glue.c |
1797 | +++ b/arch/x86/crypto/twofish_avx_glue.c |
1798 | @@ -22,20 +22,17 @@ |
1799 | #define TWOFISH_PARALLEL_BLOCKS 8 |
1800 | |
1801 | /* 8-way parallel cipher functions */ |
1802 | -asmlinkage void twofish_ecb_enc_8way(struct twofish_ctx *ctx, u8 *dst, |
1803 | - const u8 *src); |
1804 | -asmlinkage void twofish_ecb_dec_8way(struct twofish_ctx *ctx, u8 *dst, |
1805 | - const u8 *src); |
1806 | +asmlinkage void twofish_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src); |
1807 | +asmlinkage void twofish_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src); |
1808 | |
1809 | -asmlinkage void twofish_cbc_dec_8way(struct twofish_ctx *ctx, u8 *dst, |
1810 | - const u8 *src); |
1811 | -asmlinkage void twofish_ctr_8way(struct twofish_ctx *ctx, u8 *dst, |
1812 | - const u8 *src, le128 *iv); |
1813 | +asmlinkage void twofish_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src); |
1814 | +asmlinkage void twofish_ctr_8way(const void *ctx, u8 *dst, const u8 *src, |
1815 | + le128 *iv); |
1816 | |
1817 | -asmlinkage void twofish_xts_enc_8way(struct twofish_ctx *ctx, u8 *dst, |
1818 | - const u8 *src, le128 *iv); |
1819 | -asmlinkage void twofish_xts_dec_8way(struct twofish_ctx *ctx, u8 *dst, |
1820 | - const u8 *src, le128 *iv); |
1821 | +asmlinkage void twofish_xts_enc_8way(const void *ctx, u8 *dst, const u8 *src, |
1822 | + le128 *iv); |
1823 | +asmlinkage void twofish_xts_dec_8way(const void *ctx, u8 *dst, const u8 *src, |
1824 | + le128 *iv); |
1825 | |
1826 | static int twofish_setkey_skcipher(struct crypto_skcipher *tfm, |
1827 | const u8 *key, unsigned int keylen) |
1828 | @@ -43,22 +40,19 @@ static int twofish_setkey_skcipher(struct crypto_skcipher *tfm, |
1829 | return twofish_setkey(&tfm->base, key, keylen); |
1830 | } |
1831 | |
1832 | -static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, |
1833 | - const u8 *src) |
1834 | +static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src) |
1835 | { |
1836 | __twofish_enc_blk_3way(ctx, dst, src, false); |
1837 | } |
1838 | |
1839 | -static void twofish_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
1840 | +static void twofish_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) |
1841 | { |
1842 | - glue_xts_crypt_128bit_one(ctx, dst, src, iv, |
1843 | - GLUE_FUNC_CAST(twofish_enc_blk)); |
1844 | + glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_enc_blk); |
1845 | } |
1846 | |
1847 | -static void twofish_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
1848 | +static void twofish_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) |
1849 | { |
1850 | - glue_xts_crypt_128bit_one(ctx, dst, src, iv, |
1851 | - GLUE_FUNC_CAST(twofish_dec_blk)); |
1852 | + glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_dec_blk); |
1853 | } |
1854 | |
1855 | struct twofish_xts_ctx { |
1856 | @@ -93,13 +87,13 @@ static const struct common_glue_ctx twofish_enc = { |
1857 | |
1858 | .funcs = { { |
1859 | .num_blocks = TWOFISH_PARALLEL_BLOCKS, |
1860 | - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_enc_8way) } |
1861 | + .fn_u = { .ecb = twofish_ecb_enc_8way } |
1862 | }, { |
1863 | .num_blocks = 3, |
1864 | - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_3way) } |
1865 | + .fn_u = { .ecb = twofish_enc_blk_3way } |
1866 | }, { |
1867 | .num_blocks = 1, |
1868 | - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk) } |
1869 | + .fn_u = { .ecb = twofish_enc_blk } |
1870 | } } |
1871 | }; |
1872 | |
1873 | @@ -109,13 +103,13 @@ static const struct common_glue_ctx twofish_ctr = { |
1874 | |
1875 | .funcs = { { |
1876 | .num_blocks = TWOFISH_PARALLEL_BLOCKS, |
1877 | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_ctr_8way) } |
1878 | + .fn_u = { .ctr = twofish_ctr_8way } |
1879 | }, { |
1880 | .num_blocks = 3, |
1881 | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr_3way) } |
1882 | + .fn_u = { .ctr = twofish_enc_blk_ctr_3way } |
1883 | }, { |
1884 | .num_blocks = 1, |
1885 | - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr) } |
1886 | + .fn_u = { .ctr = twofish_enc_blk_ctr } |
1887 | } } |
1888 | }; |
1889 | |
1890 | @@ -125,10 +119,10 @@ static const struct common_glue_ctx twofish_enc_xts = { |
1891 | |
1892 | .funcs = { { |
1893 | .num_blocks = TWOFISH_PARALLEL_BLOCKS, |
1894 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_enc_8way) } |
1895 | + .fn_u = { .xts = twofish_xts_enc_8way } |
1896 | }, { |
1897 | .num_blocks = 1, |
1898 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_enc) } |
1899 | + .fn_u = { .xts = twofish_xts_enc } |
1900 | } } |
1901 | }; |
1902 | |
1903 | @@ -138,13 +132,13 @@ static const struct common_glue_ctx twofish_dec = { |
1904 | |
1905 | .funcs = { { |
1906 | .num_blocks = TWOFISH_PARALLEL_BLOCKS, |
1907 | - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_dec_8way) } |
1908 | + .fn_u = { .ecb = twofish_ecb_dec_8way } |
1909 | }, { |
1910 | .num_blocks = 3, |
1911 | - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_3way) } |
1912 | + .fn_u = { .ecb = twofish_dec_blk_3way } |
1913 | }, { |
1914 | .num_blocks = 1, |
1915 | - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk) } |
1916 | + .fn_u = { .ecb = twofish_dec_blk } |
1917 | } } |
1918 | }; |
1919 | |
1920 | @@ -154,13 +148,13 @@ static const struct common_glue_ctx twofish_dec_cbc = { |
1921 | |
1922 | .funcs = { { |
1923 | .num_blocks = TWOFISH_PARALLEL_BLOCKS, |
1924 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_cbc_dec_8way) } |
1925 | + .fn_u = { .cbc = twofish_cbc_dec_8way } |
1926 | }, { |
1927 | .num_blocks = 3, |
1928 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_3way) } |
1929 | + .fn_u = { .cbc = twofish_dec_blk_cbc_3way } |
1930 | }, { |
1931 | .num_blocks = 1, |
1932 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk) } |
1933 | + .fn_u = { .cbc = twofish_dec_blk } |
1934 | } } |
1935 | }; |
1936 | |
1937 | @@ -170,10 +164,10 @@ static const struct common_glue_ctx twofish_dec_xts = { |
1938 | |
1939 | .funcs = { { |
1940 | .num_blocks = TWOFISH_PARALLEL_BLOCKS, |
1941 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_dec_8way) } |
1942 | + .fn_u = { .xts = twofish_xts_dec_8way } |
1943 | }, { |
1944 | .num_blocks = 1, |
1945 | - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_dec) } |
1946 | + .fn_u = { .xts = twofish_xts_dec } |
1947 | } } |
1948 | }; |
1949 | |
1950 | @@ -189,8 +183,7 @@ static int ecb_decrypt(struct skcipher_request *req) |
1951 | |
1952 | static int cbc_encrypt(struct skcipher_request *req) |
1953 | { |
1954 | - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk), |
1955 | - req); |
1956 | + return glue_cbc_encrypt_req_128bit(twofish_enc_blk, req); |
1957 | } |
1958 | |
1959 | static int cbc_decrypt(struct skcipher_request *req) |
1960 | @@ -208,8 +201,7 @@ static int xts_encrypt(struct skcipher_request *req) |
1961 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
1962 | struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
1963 | |
1964 | - return glue_xts_req_128bit(&twofish_enc_xts, req, |
1965 | - XTS_TWEAK_CAST(twofish_enc_blk), |
1966 | + return glue_xts_req_128bit(&twofish_enc_xts, req, twofish_enc_blk, |
1967 | &ctx->tweak_ctx, &ctx->crypt_ctx, false); |
1968 | } |
1969 | |
1970 | @@ -218,8 +210,7 @@ static int xts_decrypt(struct skcipher_request *req) |
1971 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
1972 | struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
1973 | |
1974 | - return glue_xts_req_128bit(&twofish_dec_xts, req, |
1975 | - XTS_TWEAK_CAST(twofish_enc_blk), |
1976 | + return glue_xts_req_128bit(&twofish_dec_xts, req, twofish_enc_blk, |
1977 | &ctx->tweak_ctx, &ctx->crypt_ctx, true); |
1978 | } |
1979 | |
1980 | diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c |
1981 | index 1dc9e29f221e8..768af6075479c 100644 |
1982 | --- a/arch/x86/crypto/twofish_glue_3way.c |
1983 | +++ b/arch/x86/crypto/twofish_glue_3way.c |
1984 | @@ -25,21 +25,22 @@ static int twofish_setkey_skcipher(struct crypto_skcipher *tfm, |
1985 | return twofish_setkey(&tfm->base, key, keylen); |
1986 | } |
1987 | |
1988 | -static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, |
1989 | - const u8 *src) |
1990 | +static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src) |
1991 | { |
1992 | __twofish_enc_blk_3way(ctx, dst, src, false); |
1993 | } |
1994 | |
1995 | -static inline void twofish_enc_blk_xor_3way(struct twofish_ctx *ctx, u8 *dst, |
1996 | +static inline void twofish_enc_blk_xor_3way(const void *ctx, u8 *dst, |
1997 | const u8 *src) |
1998 | { |
1999 | __twofish_enc_blk_3way(ctx, dst, src, true); |
2000 | } |
2001 | |
2002 | -void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src) |
2003 | +void twofish_dec_blk_cbc_3way(const void *ctx, u8 *d, const u8 *s) |
2004 | { |
2005 | u128 ivs[2]; |
2006 | + u128 *dst = (u128 *)d; |
2007 | + const u128 *src = (const u128 *)s; |
2008 | |
2009 | ivs[0] = src[0]; |
2010 | ivs[1] = src[1]; |
2011 | @@ -51,9 +52,11 @@ void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src) |
2012 | } |
2013 | EXPORT_SYMBOL_GPL(twofish_dec_blk_cbc_3way); |
2014 | |
2015 | -void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
2016 | +void twofish_enc_blk_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) |
2017 | { |
2018 | be128 ctrblk; |
2019 | + u128 *dst = (u128 *)d; |
2020 | + const u128 *src = (const u128 *)s; |
2021 | |
2022 | if (dst != src) |
2023 | *dst = *src; |
2024 | @@ -66,10 +69,11 @@ void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) |
2025 | } |
2026 | EXPORT_SYMBOL_GPL(twofish_enc_blk_ctr); |
2027 | |
2028 | -void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src, |
2029 | - le128 *iv) |
2030 | +void twofish_enc_blk_ctr_3way(const void *ctx, u8 *d, const u8 *s, le128 *iv) |
2031 | { |
2032 | be128 ctrblks[3]; |
2033 | + u128 *dst = (u128 *)d; |
2034 | + const u128 *src = (const u128 *)s; |
2035 | |
2036 | if (dst != src) { |
2037 | dst[0] = src[0]; |
2038 | @@ -94,10 +98,10 @@ static const struct common_glue_ctx twofish_enc = { |
2039 | |
2040 | .funcs = { { |
2041 | .num_blocks = 3, |
2042 | - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_3way) } |
2043 | + .fn_u = { .ecb = twofish_enc_blk_3way } |
2044 | }, { |
2045 | .num_blocks = 1, |
2046 | - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk) } |
2047 | + .fn_u = { .ecb = twofish_enc_blk } |
2048 | } } |
2049 | }; |
2050 | |
2051 | @@ -107,10 +111,10 @@ static const struct common_glue_ctx twofish_ctr = { |
2052 | |
2053 | .funcs = { { |
2054 | .num_blocks = 3, |
2055 | - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_ctr_3way) } |
2056 | + .fn_u = { .ctr = twofish_enc_blk_ctr_3way } |
2057 | }, { |
2058 | .num_blocks = 1, |
2059 | - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_ctr) } |
2060 | + .fn_u = { .ctr = twofish_enc_blk_ctr } |
2061 | } } |
2062 | }; |
2063 | |
2064 | @@ -120,10 +124,10 @@ static const struct common_glue_ctx twofish_dec = { |
2065 | |
2066 | .funcs = { { |
2067 | .num_blocks = 3, |
2068 | - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_3way) } |
2069 | + .fn_u = { .ecb = twofish_dec_blk_3way } |
2070 | }, { |
2071 | .num_blocks = 1, |
2072 | - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk) } |
2073 | + .fn_u = { .ecb = twofish_dec_blk } |
2074 | } } |
2075 | }; |
2076 | |
2077 | @@ -133,10 +137,10 @@ static const struct common_glue_ctx twofish_dec_cbc = { |
2078 | |
2079 | .funcs = { { |
2080 | .num_blocks = 3, |
2081 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_3way) } |
2082 | + .fn_u = { .cbc = twofish_dec_blk_cbc_3way } |
2083 | }, { |
2084 | .num_blocks = 1, |
2085 | - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk) } |
2086 | + .fn_u = { .cbc = twofish_dec_blk } |
2087 | } } |
2088 | }; |
2089 | |
2090 | @@ -152,8 +156,7 @@ static int ecb_decrypt(struct skcipher_request *req) |
2091 | |
2092 | static int cbc_encrypt(struct skcipher_request *req) |
2093 | { |
2094 | - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk), |
2095 | - req); |
2096 | + return glue_cbc_encrypt_req_128bit(twofish_enc_blk, req); |
2097 | } |
2098 | |
2099 | static int cbc_decrypt(struct skcipher_request *req) |
2100 | diff --git a/arch/x86/include/asm/crypto/camellia.h b/arch/x86/include/asm/crypto/camellia.h |
2101 | index a5d86fc0593f2..f1592619dd651 100644 |
2102 | --- a/arch/x86/include/asm/crypto/camellia.h |
2103 | +++ b/arch/x86/include/asm/crypto/camellia.h |
2104 | @@ -32,65 +32,60 @@ extern int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key, |
2105 | unsigned int keylen); |
2106 | |
2107 | /* regular block cipher functions */ |
2108 | -asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst, |
2109 | - const u8 *src, bool xor); |
2110 | -asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst, |
2111 | - const u8 *src); |
2112 | +asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src, |
2113 | + bool xor); |
2114 | +asmlinkage void camellia_dec_blk(const void *ctx, u8 *dst, const u8 *src); |
2115 | |
2116 | /* 2-way parallel cipher functions */ |
2117 | -asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst, |
2118 | - const u8 *src, bool xor); |
2119 | -asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst, |
2120 | - const u8 *src); |
2121 | +asmlinkage void __camellia_enc_blk_2way(const void *ctx, u8 *dst, const u8 *src, |
2122 | + bool xor); |
2123 | +asmlinkage void camellia_dec_blk_2way(const void *ctx, u8 *dst, const u8 *src); |
2124 | |
2125 | /* 16-way parallel cipher functions (avx/aes-ni) */ |
2126 | -asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst, |
2127 | - const u8 *src); |
2128 | -asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst, |
2129 | - const u8 *src); |
2130 | - |
2131 | -asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst, |
2132 | - const u8 *src); |
2133 | -asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst, |
2134 | - const u8 *src, le128 *iv); |
2135 | - |
2136 | -asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst, |
2137 | - const u8 *src, le128 *iv); |
2138 | -asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst, |
2139 | - const u8 *src, le128 *iv); |
2140 | - |
2141 | -static inline void camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst, |
2142 | - const u8 *src) |
2143 | +asmlinkage void camellia_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src); |
2144 | +asmlinkage void camellia_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src); |
2145 | + |
2146 | +asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src); |
2147 | +asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src, |
2148 | + le128 *iv); |
2149 | + |
2150 | +asmlinkage void camellia_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src, |
2151 | + le128 *iv); |
2152 | +asmlinkage void camellia_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src, |
2153 | + le128 *iv); |
2154 | + |
2155 | +static inline void camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src) |
2156 | { |
2157 | __camellia_enc_blk(ctx, dst, src, false); |
2158 | } |
2159 | |
2160 | -static inline void camellia_enc_blk_xor(struct camellia_ctx *ctx, u8 *dst, |
2161 | - const u8 *src) |
2162 | +static inline void camellia_enc_blk_xor(const void *ctx, u8 *dst, const u8 *src) |
2163 | { |
2164 | __camellia_enc_blk(ctx, dst, src, true); |
2165 | } |
2166 | |
2167 | -static inline void camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst, |
2168 | +static inline void camellia_enc_blk_2way(const void *ctx, u8 *dst, |
2169 | const u8 *src) |
2170 | { |
2171 | __camellia_enc_blk_2way(ctx, dst, src, false); |
2172 | } |
2173 | |
2174 | -static inline void camellia_enc_blk_xor_2way(struct camellia_ctx *ctx, u8 *dst, |
2175 | +static inline void camellia_enc_blk_xor_2way(const void *ctx, u8 *dst, |
2176 | const u8 *src) |
2177 | { |
2178 | __camellia_enc_blk_2way(ctx, dst, src, true); |
2179 | } |
2180 | |
2181 | /* glue helpers */ |
2182 | -extern void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src); |
2183 | -extern void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src, |
2184 | +extern void camellia_decrypt_cbc_2way(const void *ctx, u8 *dst, const u8 *src); |
2185 | +extern void camellia_crypt_ctr(const void *ctx, u8 *dst, const u8 *src, |
2186 | le128 *iv); |
2187 | -extern void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src, |
2188 | +extern void camellia_crypt_ctr_2way(const void *ctx, u8 *dst, const u8 *src, |
2189 | le128 *iv); |
2190 | |
2191 | -extern void camellia_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv); |
2192 | -extern void camellia_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv); |
2193 | +extern void camellia_xts_enc(const void *ctx, u8 *dst, const u8 *src, |
2194 | + le128 *iv); |
2195 | +extern void camellia_xts_dec(const void *ctx, u8 *dst, const u8 *src, |
2196 | + le128 *iv); |
2197 | |
2198 | #endif /* ASM_X86_CAMELLIA_H */ |
2199 | diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h |
2200 | index 8d4a8e1226ee3..777c0f63418c8 100644 |
2201 | --- a/arch/x86/include/asm/crypto/glue_helper.h |
2202 | +++ b/arch/x86/include/asm/crypto/glue_helper.h |
2203 | @@ -11,18 +11,13 @@ |
2204 | #include <asm/fpu/api.h> |
2205 | #include <crypto/b128ops.h> |
2206 | |
2207 | -typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src); |
2208 | -typedef void (*common_glue_cbc_func_t)(void *ctx, u128 *dst, const u128 *src); |
2209 | -typedef void (*common_glue_ctr_func_t)(void *ctx, u128 *dst, const u128 *src, |
2210 | +typedef void (*common_glue_func_t)(const void *ctx, u8 *dst, const u8 *src); |
2211 | +typedef void (*common_glue_cbc_func_t)(const void *ctx, u8 *dst, const u8 *src); |
2212 | +typedef void (*common_glue_ctr_func_t)(const void *ctx, u8 *dst, const u8 *src, |
2213 | le128 *iv); |
2214 | -typedef void (*common_glue_xts_func_t)(void *ctx, u128 *dst, const u128 *src, |
2215 | +typedef void (*common_glue_xts_func_t)(const void *ctx, u8 *dst, const u8 *src, |
2216 | le128 *iv); |
2217 | |
2218 | -#define GLUE_FUNC_CAST(fn) ((common_glue_func_t)(fn)) |
2219 | -#define GLUE_CBC_FUNC_CAST(fn) ((common_glue_cbc_func_t)(fn)) |
2220 | -#define GLUE_CTR_FUNC_CAST(fn) ((common_glue_ctr_func_t)(fn)) |
2221 | -#define GLUE_XTS_FUNC_CAST(fn) ((common_glue_xts_func_t)(fn)) |
2222 | - |
2223 | struct common_glue_func_entry { |
2224 | unsigned int num_blocks; /* number of blocks that @fn will process */ |
2225 | union { |
2226 | @@ -116,7 +111,8 @@ extern int glue_xts_req_128bit(const struct common_glue_ctx *gctx, |
2227 | common_glue_func_t tweak_fn, void *tweak_ctx, |
2228 | void *crypt_ctx, bool decrypt); |
2229 | |
2230 | -extern void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, |
2231 | - le128 *iv, common_glue_func_t fn); |
2232 | +extern void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst, |
2233 | + const u8 *src, le128 *iv, |
2234 | + common_glue_func_t fn); |
2235 | |
2236 | #endif /* _CRYPTO_GLUE_HELPER_H */ |
2237 | diff --git a/arch/x86/include/asm/crypto/serpent-avx.h b/arch/x86/include/asm/crypto/serpent-avx.h |
2238 | index db7c9cc322342..251c2c89d7cfe 100644 |
2239 | --- a/arch/x86/include/asm/crypto/serpent-avx.h |
2240 | +++ b/arch/x86/include/asm/crypto/serpent-avx.h |
2241 | @@ -15,26 +15,26 @@ struct serpent_xts_ctx { |
2242 | struct serpent_ctx crypt_ctx; |
2243 | }; |
2244 | |
2245 | -asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, |
2246 | +asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst, |
2247 | const u8 *src); |
2248 | -asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, |
2249 | +asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst, |
2250 | const u8 *src); |
2251 | |
2252 | -asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, |
2253 | +asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst, |
2254 | const u8 *src); |
2255 | -asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst, |
2256 | - const u8 *src, le128 *iv); |
2257 | +asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src, |
2258 | + le128 *iv); |
2259 | |
2260 | -asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, |
2261 | +asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst, |
2262 | const u8 *src, le128 *iv); |
2263 | -asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, |
2264 | +asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst, |
2265 | const u8 *src, le128 *iv); |
2266 | |
2267 | -extern void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, |
2268 | +extern void __serpent_crypt_ctr(const void *ctx, u8 *dst, const u8 *src, |
2269 | le128 *iv); |
2270 | |
2271 | -extern void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv); |
2272 | -extern void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv); |
2273 | +extern void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv); |
2274 | +extern void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv); |
2275 | |
2276 | extern int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key, |
2277 | unsigned int keylen); |
2278 | diff --git a/arch/x86/include/asm/crypto/serpent-sse2.h b/arch/x86/include/asm/crypto/serpent-sse2.h |
2279 | index 1a345e8a7496c..860ca248914b1 100644 |
2280 | --- a/arch/x86/include/asm/crypto/serpent-sse2.h |
2281 | +++ b/arch/x86/include/asm/crypto/serpent-sse2.h |
2282 | @@ -9,25 +9,23 @@ |
2283 | |
2284 | #define SERPENT_PARALLEL_BLOCKS 4 |
2285 | |
2286 | -asmlinkage void __serpent_enc_blk_4way(struct serpent_ctx *ctx, u8 *dst, |
2287 | +asmlinkage void __serpent_enc_blk_4way(const struct serpent_ctx *ctx, u8 *dst, |
2288 | const u8 *src, bool xor); |
2289 | -asmlinkage void serpent_dec_blk_4way(struct serpent_ctx *ctx, u8 *dst, |
2290 | +asmlinkage void serpent_dec_blk_4way(const struct serpent_ctx *ctx, u8 *dst, |
2291 | const u8 *src); |
2292 | |
2293 | -static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst, |
2294 | - const u8 *src) |
2295 | +static inline void serpent_enc_blk_xway(const void *ctx, u8 *dst, const u8 *src) |
2296 | { |
2297 | __serpent_enc_blk_4way(ctx, dst, src, false); |
2298 | } |
2299 | |
2300 | -static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst, |
2301 | - const u8 *src) |
2302 | +static inline void serpent_enc_blk_xway_xor(const struct serpent_ctx *ctx, |
2303 | + u8 *dst, const u8 *src) |
2304 | { |
2305 | __serpent_enc_blk_4way(ctx, dst, src, true); |
2306 | } |
2307 | |
2308 | -static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst, |
2309 | - const u8 *src) |
2310 | +static inline void serpent_dec_blk_xway(const void *ctx, u8 *dst, const u8 *src) |
2311 | { |
2312 | serpent_dec_blk_4way(ctx, dst, src); |
2313 | } |
2314 | @@ -36,25 +34,23 @@ static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst, |
2315 | |
2316 | #define SERPENT_PARALLEL_BLOCKS 8 |
2317 | |
2318 | -asmlinkage void __serpent_enc_blk_8way(struct serpent_ctx *ctx, u8 *dst, |
2319 | +asmlinkage void __serpent_enc_blk_8way(const struct serpent_ctx *ctx, u8 *dst, |
2320 | const u8 *src, bool xor); |
2321 | -asmlinkage void serpent_dec_blk_8way(struct serpent_ctx *ctx, u8 *dst, |
2322 | +asmlinkage void serpent_dec_blk_8way(const struct serpent_ctx *ctx, u8 *dst, |
2323 | const u8 *src); |
2324 | |
2325 | -static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst, |
2326 | - const u8 *src) |
2327 | +static inline void serpent_enc_blk_xway(const void *ctx, u8 *dst, const u8 *src) |
2328 | { |
2329 | __serpent_enc_blk_8way(ctx, dst, src, false); |
2330 | } |
2331 | |
2332 | -static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst, |
2333 | - const u8 *src) |
2334 | +static inline void serpent_enc_blk_xway_xor(const struct serpent_ctx *ctx, |
2335 | + u8 *dst, const u8 *src) |
2336 | { |
2337 | __serpent_enc_blk_8way(ctx, dst, src, true); |
2338 | } |
2339 | |
2340 | -static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst, |
2341 | - const u8 *src) |
2342 | +static inline void serpent_dec_blk_xway(const void *ctx, u8 *dst, const u8 *src) |
2343 | { |
2344 | serpent_dec_blk_8way(ctx, dst, src); |
2345 | } |
2346 | diff --git a/arch/x86/include/asm/crypto/twofish.h b/arch/x86/include/asm/crypto/twofish.h |
2347 | index f618bf272b900..2c377a8042e17 100644 |
2348 | --- a/arch/x86/include/asm/crypto/twofish.h |
2349 | +++ b/arch/x86/include/asm/crypto/twofish.h |
2350 | @@ -7,22 +7,19 @@ |
2351 | #include <crypto/b128ops.h> |
2352 | |
2353 | /* regular block cipher functions from twofish_x86_64 module */ |
2354 | -asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst, |
2355 | - const u8 *src); |
2356 | -asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst, |
2357 | - const u8 *src); |
2358 | +asmlinkage void twofish_enc_blk(const void *ctx, u8 *dst, const u8 *src); |
2359 | +asmlinkage void twofish_dec_blk(const void *ctx, u8 *dst, const u8 *src); |
2360 | |
2361 | /* 3-way parallel cipher functions */ |
2362 | -asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, |
2363 | - const u8 *src, bool xor); |
2364 | -asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst, |
2365 | - const u8 *src); |
2366 | +asmlinkage void __twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src, |
2367 | + bool xor); |
2368 | +asmlinkage void twofish_dec_blk_3way(const void *ctx, u8 *dst, const u8 *src); |
2369 | |
2370 | /* helpers from twofish_x86_64-3way module */ |
2371 | -extern void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src); |
2372 | -extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, |
2373 | +extern void twofish_dec_blk_cbc_3way(const void *ctx, u8 *dst, const u8 *src); |
2374 | +extern void twofish_enc_blk_ctr(const void *ctx, u8 *dst, const u8 *src, |
2375 | le128 *iv); |
2376 | -extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src, |
2377 | +extern void twofish_enc_blk_ctr_3way(const void *ctx, u8 *dst, const u8 *src, |
2378 | le128 *iv); |
2379 | |
2380 | #endif /* ASM_X86_TWOFISH_H */ |
2381 | diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c |
2382 | index a8248f8e2777e..85328522c5ca1 100644 |
2383 | --- a/crypto/cast6_generic.c |
2384 | +++ b/crypto/cast6_generic.c |
2385 | @@ -154,7 +154,7 @@ int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) |
2386 | EXPORT_SYMBOL_GPL(cast6_setkey); |
2387 | |
2388 | /*forward quad round*/ |
2389 | -static inline void Q(u32 *block, u8 *Kr, u32 *Km) |
2390 | +static inline void Q(u32 *block, const u8 *Kr, const u32 *Km) |
2391 | { |
2392 | u32 I; |
2393 | block[2] ^= F1(block[3], Kr[0], Km[0]); |
2394 | @@ -164,7 +164,7 @@ static inline void Q(u32 *block, u8 *Kr, u32 *Km) |
2395 | } |
2396 | |
2397 | /*reverse quad round*/ |
2398 | -static inline void QBAR(u32 *block, u8 *Kr, u32 *Km) |
2399 | +static inline void QBAR(u32 *block, const u8 *Kr, const u32 *Km) |
2400 | { |
2401 | u32 I; |
2402 | block[3] ^= F1(block[0], Kr[3], Km[3]); |
2403 | @@ -173,13 +173,14 @@ static inline void QBAR(u32 *block, u8 *Kr, u32 *Km) |
2404 | block[2] ^= F1(block[3], Kr[0], Km[0]); |
2405 | } |
2406 | |
2407 | -void __cast6_encrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf) |
2408 | +void __cast6_encrypt(const void *ctx, u8 *outbuf, const u8 *inbuf) |
2409 | { |
2410 | + const struct cast6_ctx *c = ctx; |
2411 | const __be32 *src = (const __be32 *)inbuf; |
2412 | __be32 *dst = (__be32 *)outbuf; |
2413 | u32 block[4]; |
2414 | - u32 *Km; |
2415 | - u8 *Kr; |
2416 | + const u32 *Km; |
2417 | + const u8 *Kr; |
2418 | |
2419 | block[0] = be32_to_cpu(src[0]); |
2420 | block[1] = be32_to_cpu(src[1]); |
2421 | @@ -211,13 +212,14 @@ static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) |
2422 | __cast6_encrypt(crypto_tfm_ctx(tfm), outbuf, inbuf); |
2423 | } |
2424 | |
2425 | -void __cast6_decrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf) |
2426 | +void __cast6_decrypt(const void *ctx, u8 *outbuf, const u8 *inbuf) |
2427 | { |
2428 | + const struct cast6_ctx *c = ctx; |
2429 | const __be32 *src = (const __be32 *)inbuf; |
2430 | __be32 *dst = (__be32 *)outbuf; |
2431 | u32 block[4]; |
2432 | - u32 *Km; |
2433 | - u8 *Kr; |
2434 | + const u32 *Km; |
2435 | + const u8 *Kr; |
2436 | |
2437 | block[0] = be32_to_cpu(src[0]); |
2438 | block[1] = be32_to_cpu(src[1]); |
2439 | diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c |
2440 | index 56fa665a4f010..492c1d0bfe068 100644 |
2441 | --- a/crypto/serpent_generic.c |
2442 | +++ b/crypto/serpent_generic.c |
2443 | @@ -449,8 +449,9 @@ int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) |
2444 | } |
2445 | EXPORT_SYMBOL_GPL(serpent_setkey); |
2446 | |
2447 | -void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src) |
2448 | +void __serpent_encrypt(const void *c, u8 *dst, const u8 *src) |
2449 | { |
2450 | + const struct serpent_ctx *ctx = c; |
2451 | const u32 *k = ctx->expkey; |
2452 | const __le32 *s = (const __le32 *)src; |
2453 | __le32 *d = (__le32 *)dst; |
2454 | @@ -514,8 +515,9 @@ static void serpent_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) |
2455 | __serpent_encrypt(ctx, dst, src); |
2456 | } |
2457 | |
2458 | -void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src) |
2459 | +void __serpent_decrypt(const void *c, u8 *dst, const u8 *src) |
2460 | { |
2461 | + const struct serpent_ctx *ctx = c; |
2462 | const u32 *k = ctx->expkey; |
2463 | const __le32 *s = (const __le32 *)src; |
2464 | __le32 *d = (__le32 *)dst; |
2465 | diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c |
2466 | index 59aa5e64acb04..21a562c2b1f50 100644 |
2467 | --- a/drivers/gpu/drm/i915/gvt/display.c |
2468 | +++ b/drivers/gpu/drm/i915/gvt/display.c |
2469 | @@ -172,21 +172,176 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) |
2470 | int pipe; |
2471 | |
2472 | if (IS_BROXTON(dev_priv)) { |
2473 | + enum transcoder trans; |
2474 | + enum port port; |
2475 | + |
2476 | + /* Clear PIPE, DDI, PHY, HPD before setting new */ |
2477 | vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~(BXT_DE_PORT_HP_DDIA | |
2478 | BXT_DE_PORT_HP_DDIB | |
2479 | BXT_DE_PORT_HP_DDIC); |
2480 | |
2481 | + for_each_pipe(dev_priv, pipe) { |
2482 | + vgpu_vreg_t(vgpu, PIPECONF(pipe)) &= |
2483 | + ~(PIPECONF_ENABLE | I965_PIPECONF_ACTIVE); |
2484 | + vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE; |
2485 | + vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; |
2486 | + vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE; |
2487 | + vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE; |
2488 | + } |
2489 | + |
2490 | + for (trans = TRANSCODER_A; trans <= TRANSCODER_EDP; trans++) { |
2491 | + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(trans)) &= |
2492 | + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | |
2493 | + TRANS_DDI_PORT_MASK | TRANS_DDI_FUNC_ENABLE); |
2494 | + } |
2495 | + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= |
2496 | + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | |
2497 | + TRANS_DDI_PORT_MASK); |
2498 | + |
2499 | + for (port = PORT_A; port <= PORT_C; port++) { |
2500 | + vgpu_vreg_t(vgpu, BXT_PHY_CTL(port)) &= |
2501 | + ~BXT_PHY_LANE_ENABLED; |
2502 | + vgpu_vreg_t(vgpu, BXT_PHY_CTL(port)) |= |
2503 | + (BXT_PHY_CMNLANE_POWERDOWN_ACK | |
2504 | + BXT_PHY_LANE_POWERDOWN_ACK); |
2505 | + |
2506 | + vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port)) &= |
2507 | + ~(PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE | |
2508 | + PORT_PLL_REF_SEL | PORT_PLL_LOCK | |
2509 | + PORT_PLL_ENABLE); |
2510 | + |
2511 | + vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) &= |
2512 | + ~(DDI_INIT_DISPLAY_DETECTED | |
2513 | + DDI_BUF_CTL_ENABLE); |
2514 | + vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) |= DDI_BUF_IS_IDLE; |
2515 | + } |
2516 | + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= |
2517 | + ~(PORTA_HOTPLUG_ENABLE | PORTA_HOTPLUG_STATUS_MASK); |
2518 | + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= |
2519 | + ~(PORTB_HOTPLUG_ENABLE | PORTB_HOTPLUG_STATUS_MASK); |
2520 | + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= |
2521 | + ~(PORTC_HOTPLUG_ENABLE | PORTC_HOTPLUG_STATUS_MASK); |
2522 | + /* No hpd_invert set in vgpu vbt, need to clear invert mask */ |
2523 | + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~BXT_DDI_HPD_INVERT_MASK; |
2524 | + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~BXT_DE_PORT_HOTPLUG_MASK; |
2525 | + |
2526 | + vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1)); |
2527 | + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= |
2528 | + ~PHY_POWER_GOOD; |
2529 | + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &= |
2530 | + ~PHY_POWER_GOOD; |
2531 | + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &= ~BIT(30); |
2532 | + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &= ~BIT(30); |
2533 | + |
2534 | + vgpu_vreg_t(vgpu, SFUSE_STRAP) &= ~SFUSE_STRAP_DDIB_DETECTED; |
2535 | + vgpu_vreg_t(vgpu, SFUSE_STRAP) &= ~SFUSE_STRAP_DDIC_DETECTED; |
2536 | + |
2537 | + /* |
2538 | + * Only 1 PIPE enabled in current vGPU display and PIPE_A is |
2539 | + * tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A, |
2540 | + * TRANSCODER_A can be enabled. PORT_x depends on the input of |
2541 | + * setup_virtual_dp_monitor. |
2542 | + */ |
2543 | + vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; |
2544 | + vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= I965_PIPECONF_ACTIVE; |
2545 | + |
2546 | + /* |
2547 | + * Golden M/N are calculated based on: |
2548 | + * 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID), |
2549 | + * DP link clk 1620 MHz and non-constant_n. |
2550 | + * TODO: calculate DP link symbol clk and stream clk m/n. |
2551 | + */ |
2552 | + vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT; |
2553 | + vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e; |
2554 | + vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000; |
2555 | + vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e; |
2556 | + vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000; |
2557 | + |
2558 | + /* Enable per-DDI/PORT vreg */ |
2559 | if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { |
2560 | + vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(1); |
2561 | + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |= |
2562 | + PHY_POWER_GOOD; |
2563 | + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) |= |
2564 | + BIT(30); |
2565 | + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |= |
2566 | + BXT_PHY_LANE_ENABLED; |
2567 | + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &= |
2568 | + ~(BXT_PHY_CMNLANE_POWERDOWN_ACK | |
2569 | + BXT_PHY_LANE_POWERDOWN_ACK); |
2570 | + vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_A)) |= |
2571 | + (PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE | |
2572 | + PORT_PLL_REF_SEL | PORT_PLL_LOCK | |
2573 | + PORT_PLL_ENABLE); |
2574 | + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) |= |
2575 | + (DDI_BUF_CTL_ENABLE | DDI_INIT_DISPLAY_DETECTED); |
2576 | + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) &= |
2577 | + ~DDI_BUF_IS_IDLE; |
2578 | + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)) |= |
2579 | + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | |
2580 | + TRANS_DDI_FUNC_ENABLE); |
2581 | + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= |
2582 | + PORTA_HOTPLUG_ENABLE; |
2583 | vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= |
2584 | BXT_DE_PORT_HP_DDIA; |
2585 | } |
2586 | |
2587 | if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { |
2588 | + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; |
2589 | + vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(0); |
2590 | + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |= |
2591 | + PHY_POWER_GOOD; |
2592 | + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) |= |
2593 | + BIT(30); |
2594 | + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |= |
2595 | + BXT_PHY_LANE_ENABLED; |
2596 | + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &= |
2597 | + ~(BXT_PHY_CMNLANE_POWERDOWN_ACK | |
2598 | + BXT_PHY_LANE_POWERDOWN_ACK); |
2599 | + vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_B)) |= |
2600 | + (PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE | |
2601 | + PORT_PLL_REF_SEL | PORT_PLL_LOCK | |
2602 | + PORT_PLL_ENABLE); |
2603 | + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) |= |
2604 | + DDI_BUF_CTL_ENABLE; |
2605 | + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &= |
2606 | + ~DDI_BUF_IS_IDLE; |
2607 | + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= |
2608 | + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | |
2609 | + (PORT_B << TRANS_DDI_PORT_SHIFT) | |
2610 | + TRANS_DDI_FUNC_ENABLE); |
2611 | + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= |
2612 | + PORTB_HOTPLUG_ENABLE; |
2613 | vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= |
2614 | BXT_DE_PORT_HP_DDIB; |
2615 | } |
2616 | |
2617 | if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { |
2618 | + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; |
2619 | + vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(0); |
2620 | + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |= |
2621 | + PHY_POWER_GOOD; |
2622 | + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) |= |
2623 | + BIT(30); |
2624 | + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |= |
2625 | + BXT_PHY_LANE_ENABLED; |
2626 | + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &= |
2627 | + ~(BXT_PHY_CMNLANE_POWERDOWN_ACK | |
2628 | + BXT_PHY_LANE_POWERDOWN_ACK); |
2629 | + vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_C)) |= |
2630 | + (PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE | |
2631 | + PORT_PLL_REF_SEL | PORT_PLL_LOCK | |
2632 | + PORT_PLL_ENABLE); |
2633 | + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) |= |
2634 | + DDI_BUF_CTL_ENABLE; |
2635 | + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &= |
2636 | + ~DDI_BUF_IS_IDLE; |
2637 | + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= |
2638 | + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | |
2639 | + (PORT_B << TRANS_DDI_PORT_SHIFT) | |
2640 | + TRANS_DDI_FUNC_ENABLE); |
2641 | + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= |
2642 | + PORTC_HOTPLUG_ENABLE; |
2643 | vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= |
2644 | BXT_DE_PORT_HP_DDIC; |
2645 | } |
2646 | @@ -511,6 +666,63 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected) |
2647 | vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= |
2648 | PORTD_HOTPLUG_STATUS_MASK; |
2649 | intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG); |
2650 | + } else if (IS_BROXTON(dev_priv)) { |
2651 | + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { |
2652 | + if (connected) { |
2653 | + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= |
2654 | + BXT_DE_PORT_HP_DDIA; |
2655 | + } else { |
2656 | + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= |
2657 | + ~BXT_DE_PORT_HP_DDIA; |
2658 | + } |
2659 | + vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |= |
2660 | + BXT_DE_PORT_HP_DDIA; |
2661 | + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= |
2662 | + ~PORTA_HOTPLUG_STATUS_MASK; |
2663 | + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= |
2664 | + PORTA_HOTPLUG_LONG_DETECT; |
2665 | + intel_vgpu_trigger_virtual_event(vgpu, DP_A_HOTPLUG); |
2666 | + } |
2667 | + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { |
2668 | + if (connected) { |
2669 | + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= |
2670 | + BXT_DE_PORT_HP_DDIB; |
2671 | + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= |
2672 | + SFUSE_STRAP_DDIB_DETECTED; |
2673 | + } else { |
2674 | + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= |
2675 | + ~BXT_DE_PORT_HP_DDIB; |
2676 | + vgpu_vreg_t(vgpu, SFUSE_STRAP) &= |
2677 | + ~SFUSE_STRAP_DDIB_DETECTED; |
2678 | + } |
2679 | + vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |= |
2680 | + BXT_DE_PORT_HP_DDIB; |
2681 | + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= |
2682 | + ~PORTB_HOTPLUG_STATUS_MASK; |
2683 | + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= |
2684 | + PORTB_HOTPLUG_LONG_DETECT; |
2685 | + intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG); |
2686 | + } |
2687 | + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { |
2688 | + if (connected) { |
2689 | + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= |
2690 | + BXT_DE_PORT_HP_DDIC; |
2691 | + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= |
2692 | + SFUSE_STRAP_DDIC_DETECTED; |
2693 | + } else { |
2694 | + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= |
2695 | + ~BXT_DE_PORT_HP_DDIC; |
2696 | + vgpu_vreg_t(vgpu, SFUSE_STRAP) &= |
2697 | + ~SFUSE_STRAP_DDIC_DETECTED; |
2698 | + } |
2699 | + vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |= |
2700 | + BXT_DE_PORT_HP_DDIC; |
2701 | + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= |
2702 | + ~PORTC_HOTPLUG_STATUS_MASK; |
2703 | + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= |
2704 | + PORTC_HOTPLUG_LONG_DETECT; |
2705 | + intel_vgpu_trigger_virtual_event(vgpu, DP_C_HOTPLUG); |
2706 | + } |
2707 | } |
2708 | } |
2709 | |
2710 | diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c |
2711 | index 689b07bc91c47..245c20d36f1b2 100644 |
2712 | --- a/drivers/gpu/drm/i915/gvt/handlers.c |
2713 | +++ b/drivers/gpu/drm/i915/gvt/handlers.c |
2714 | @@ -1632,6 +1632,34 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu, |
2715 | return 0; |
2716 | } |
2717 | |
2718 | +/** |
2719 | + * FixMe: |
2720 | + * If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did: |
2721 | + * 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.) |
2722 | + * Due to the missing flush of bb filled by VM vCPU, host GPU hangs on executing |
2723 | + * these MI_BATCH_BUFFER. |
2724 | + * Temporarily workaround this by setting SNOOP bit for PAT3 used by PPGTT |
2725 | + * PML4 PTE: PAT(0) PCD(1) PWT(1). |
2726 | + * The performance is still expected to be low, will need further improvement. |
2727 | + */ |
2728 | +static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset, |
2729 | + void *p_data, unsigned int bytes) |
2730 | +{ |
2731 | + u64 pat = |
2732 | + GEN8_PPAT(0, CHV_PPAT_SNOOP) | |
2733 | + GEN8_PPAT(1, 0) | |
2734 | + GEN8_PPAT(2, 0) | |
2735 | + GEN8_PPAT(3, CHV_PPAT_SNOOP) | |
2736 | + GEN8_PPAT(4, CHV_PPAT_SNOOP) | |
2737 | + GEN8_PPAT(5, CHV_PPAT_SNOOP) | |
2738 | + GEN8_PPAT(6, CHV_PPAT_SNOOP) | |
2739 | + GEN8_PPAT(7, CHV_PPAT_SNOOP); |
2740 | + |
2741 | + vgpu_vreg(vgpu, offset) = lower_32_bits(pat); |
2742 | + |
2743 | + return 0; |
2744 | +} |
2745 | + |
2746 | static int mmio_read_from_hw(struct intel_vgpu *vgpu, |
2747 | unsigned int offset, void *p_data, unsigned int bytes) |
2748 | { |
2749 | @@ -2778,7 +2806,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) |
2750 | |
2751 | MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write); |
2752 | |
2753 | - MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS); |
2754 | + MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS & ~D_BXT); |
2755 | MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS); |
2756 | |
2757 | MMIO_D(GAMTARBMODE, D_BDW_PLUS); |
2758 | @@ -3104,7 +3132,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) |
2759 | NULL, NULL); |
2760 | |
2761 | MMIO_D(GAMT_CHKN_BIT_REG, D_KBL | D_CFL); |
2762 | - MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS); |
2763 | + MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT); |
2764 | |
2765 | return 0; |
2766 | } |
2767 | @@ -3278,9 +3306,17 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) |
2768 | MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT); |
2769 | MMIO_D(GEN6_GFXPAUSE, D_BXT); |
2770 | MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL); |
2771 | + MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL); |
2772 | + MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL); |
2773 | + MMIO_F(HSW_CS_GPR(0), 0x40, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL); |
2774 | + MMIO_F(_MMIO(0x12600), 0x40, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL); |
2775 | + MMIO_F(BCS_GPR(0), 0x40, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL); |
2776 | + MMIO_F(_MMIO(0x1a600), 0x40, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL); |
2777 | |
2778 | MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL); |
2779 | |
2780 | + MMIO_DH(GEN8_PRIVATE_PAT_LO, D_BXT, NULL, bxt_ppat_low_write); |
2781 | + |
2782 | return 0; |
2783 | } |
2784 | |
2785 | diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c |
2786 | index a55178884d67a..e0e7adc545a5b 100644 |
2787 | --- a/drivers/gpu/drm/i915/gvt/mmio.c |
2788 | +++ b/drivers/gpu/drm/i915/gvt/mmio.c |
2789 | @@ -271,6 +271,11 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) |
2790 | vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |= |
2791 | BXT_PHY_CMNLANE_POWERDOWN_ACK | |
2792 | BXT_PHY_LANE_POWERDOWN_ACK; |
2793 | + vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |= |
2794 | + SKL_FUSE_DOWNLOAD_STATUS | |
2795 | + SKL_FUSE_PG_DIST_STATUS(SKL_PG0) | |
2796 | + SKL_FUSE_PG_DIST_STATUS(SKL_PG1) | |
2797 | + SKL_FUSE_PG_DIST_STATUS(SKL_PG2); |
2798 | } |
2799 | } else { |
2800 | #define GVT_GEN8_MMIO_RESET_OFFSET (0x44200) |
2801 | diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c |
2802 | index 32e57635709ad..4deb7fec5eb52 100644 |
2803 | --- a/drivers/gpu/drm/i915/gvt/vgpu.c |
2804 | +++ b/drivers/gpu/drm/i915/gvt/vgpu.c |
2805 | @@ -432,8 +432,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, |
2806 | if (ret) |
2807 | goto out_clean_sched_policy; |
2808 | |
2809 | - /*TODO: add more platforms support */ |
2810 | - if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) |
2811 | + if (IS_BROADWELL(gvt->dev_priv) || IS_BROXTON(gvt->dev_priv)) |
2812 | + ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B); |
2813 | + else |
2814 | ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D); |
2815 | if (ret) |
2816 | goto out_clean_sched_policy; |
2817 | diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c |
2818 | index 0b1223f360d97..f35757b63ea78 100644 |
2819 | --- a/drivers/net/dsa/b53/b53_common.c |
2820 | +++ b/drivers/net/dsa/b53/b53_common.c |
2821 | @@ -514,6 +514,19 @@ void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) |
2822 | } |
2823 | EXPORT_SYMBOL(b53_imp_vlan_setup); |
2824 | |
2825 | +static void b53_port_set_learning(struct b53_device *dev, int port, |
2826 | + bool learning) |
2827 | +{ |
2828 | + u16 reg; |
2829 | + |
2830 | + b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, ®); |
2831 | + if (learning) |
2832 | + reg &= ~BIT(port); |
2833 | + else |
2834 | + reg |= BIT(port); |
2835 | + b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg); |
2836 | +} |
2837 | + |
2838 | int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) |
2839 | { |
2840 | struct b53_device *dev = ds->priv; |
2841 | @@ -527,6 +540,7 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) |
2842 | cpu_port = ds->ports[port].cpu_dp->index; |
2843 | |
2844 | b53_br_egress_floods(ds, port, true, true); |
2845 | + b53_port_set_learning(dev, port, false); |
2846 | |
2847 | if (dev->ops->irq_enable) |
2848 | ret = dev->ops->irq_enable(dev, port); |
2849 | @@ -645,6 +659,7 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port) |
2850 | b53_brcm_hdr_setup(dev->ds, port); |
2851 | |
2852 | b53_br_egress_floods(dev->ds, port, true, true); |
2853 | + b53_port_set_learning(dev, port, false); |
2854 | } |
2855 | |
2856 | static void b53_enable_mib(struct b53_device *dev) |
2857 | @@ -1704,6 +1719,8 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br) |
2858 | b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); |
2859 | dev->ports[port].vlan_ctl_mask = pvlan; |
2860 | |
2861 | + b53_port_set_learning(dev, port, true); |
2862 | + |
2863 | return 0; |
2864 | } |
2865 | EXPORT_SYMBOL(b53_br_join); |
2866 | @@ -1751,6 +1768,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) |
2867 | vl->untag |= BIT(port) | BIT(cpu_port); |
2868 | b53_set_vlan_entry(dev, pvid, vl); |
2869 | } |
2870 | + b53_port_set_learning(dev, port, false); |
2871 | } |
2872 | EXPORT_SYMBOL(b53_br_leave); |
2873 | |
2874 | diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h |
2875 | index c90985c294a2e..b2c539a421545 100644 |
2876 | --- a/drivers/net/dsa/b53/b53_regs.h |
2877 | +++ b/drivers/net/dsa/b53/b53_regs.h |
2878 | @@ -115,6 +115,7 @@ |
2879 | #define B53_UC_FLOOD_MASK 0x32 |
2880 | #define B53_MC_FLOOD_MASK 0x34 |
2881 | #define B53_IPMC_FLOOD_MASK 0x36 |
2882 | +#define B53_DIS_LEARNING 0x3c |
2883 | |
2884 | /* |
2885 | * Override Ports 0-7 State on devices with xMII interfaces (8 bit) |
2886 | diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c |
2887 | index 6dd29bad1609f..ca425c15953b1 100644 |
2888 | --- a/drivers/net/dsa/bcm_sf2.c |
2889 | +++ b/drivers/net/dsa/bcm_sf2.c |
2890 | @@ -172,11 +172,6 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, |
2891 | reg &= ~P_TXQ_PSM_VDD(port); |
2892 | core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); |
2893 | |
2894 | - /* Enable learning */ |
2895 | - reg = core_readl(priv, CORE_DIS_LEARN); |
2896 | - reg &= ~BIT(port); |
2897 | - core_writel(priv, reg, CORE_DIS_LEARN); |
2898 | - |
2899 | /* Enable Broadcom tags for that port if requested */ |
2900 | if (priv->brcm_tag_mask & BIT(port)) |
2901 | b53_brcm_hdr_setup(ds, port); |
2902 | diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c |
2903 | index 08ca9441270d2..a352c1704042d 100644 |
2904 | --- a/fs/btrfs/block-group.c |
2905 | +++ b/fs/btrfs/block-group.c |
2906 | @@ -2048,8 +2048,17 @@ static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags) |
2907 | return flags; |
2908 | } |
2909 | |
2910 | -int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache) |
2911 | - |
2912 | +/* |
2913 | + * Mark one block group RO, can be called several times for the same block |
2914 | + * group. |
2915 | + * |
2916 | + * @cache: the destination block group |
2917 | + * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to |
2918 | + * ensure we still have some free space after marking this |
2919 | + * block group RO. |
2920 | + */ |
2921 | +int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache, |
2922 | + bool do_chunk_alloc) |
2923 | { |
2924 | struct btrfs_fs_info *fs_info = cache->fs_info; |
2925 | struct btrfs_trans_handle *trans; |
2926 | @@ -2079,25 +2088,29 @@ again: |
2927 | goto again; |
2928 | } |
2929 | |
2930 | - /* |
2931 | - * if we are changing raid levels, try to allocate a corresponding |
2932 | - * block group with the new raid level. |
2933 | - */ |
2934 | - alloc_flags = update_block_group_flags(fs_info, cache->flags); |
2935 | - if (alloc_flags != cache->flags) { |
2936 | - ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); |
2937 | + if (do_chunk_alloc) { |
2938 | /* |
2939 | - * ENOSPC is allowed here, we may have enough space |
2940 | - * already allocated at the new raid level to |
2941 | - * carry on |
2942 | + * If we are changing raid levels, try to allocate a |
2943 | + * corresponding block group with the new raid level. |
2944 | */ |
2945 | - if (ret == -ENOSPC) |
2946 | - ret = 0; |
2947 | - if (ret < 0) |
2948 | - goto out; |
2949 | + alloc_flags = update_block_group_flags(fs_info, cache->flags); |
2950 | + if (alloc_flags != cache->flags) { |
2951 | + ret = btrfs_chunk_alloc(trans, alloc_flags, |
2952 | + CHUNK_ALLOC_FORCE); |
2953 | + /* |
2954 | + * ENOSPC is allowed here, we may have enough space |
2955 | + * already allocated at the new raid level to carry on |
2956 | + */ |
2957 | + if (ret == -ENOSPC) |
2958 | + ret = 0; |
2959 | + if (ret < 0) |
2960 | + goto out; |
2961 | + } |
2962 | } |
2963 | |
2964 | - ret = inc_block_group_ro(cache, 0); |
2965 | + ret = inc_block_group_ro(cache, !do_chunk_alloc); |
2966 | + if (!do_chunk_alloc) |
2967 | + goto unlock_out; |
2968 | if (!ret) |
2969 | goto out; |
2970 | alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); |
2971 | @@ -2112,6 +2125,7 @@ out: |
2972 | check_system_chunk(trans, alloc_flags); |
2973 | mutex_unlock(&fs_info->chunk_mutex); |
2974 | } |
2975 | +unlock_out: |
2976 | mutex_unlock(&fs_info->ro_block_group_mutex); |
2977 | |
2978 | btrfs_end_transaction(trans); |
2979 | diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h |
2980 | index c391800388dda..0758e6d52acba 100644 |
2981 | --- a/fs/btrfs/block-group.h |
2982 | +++ b/fs/btrfs/block-group.h |
2983 | @@ -205,7 +205,8 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info); |
2984 | int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, |
2985 | u64 type, u64 chunk_offset, u64 size); |
2986 | void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans); |
2987 | -int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache); |
2988 | +int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache, |
2989 | + bool do_chunk_alloc); |
2990 | void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache); |
2991 | int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans); |
2992 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans); |
2993 | diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c |
2994 | index 05b3e27b21d44..68b5d7c4aa491 100644 |
2995 | --- a/fs/btrfs/relocation.c |
2996 | +++ b/fs/btrfs/relocation.c |
2997 | @@ -4428,7 +4428,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) |
2998 | rc->extent_root = extent_root; |
2999 | rc->block_group = bg; |
3000 | |
3001 | - ret = btrfs_inc_block_group_ro(rc->block_group); |
3002 | + ret = btrfs_inc_block_group_ro(rc->block_group, true); |
3003 | if (ret) { |
3004 | err = ret; |
3005 | goto out; |
3006 | diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c |
3007 | index 93d7cb56e44b2..e5db948daa123 100644 |
3008 | --- a/fs/btrfs/scrub.c |
3009 | +++ b/fs/btrfs/scrub.c |
3010 | @@ -3560,7 +3560,26 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, |
3011 | * -> btrfs_scrub_pause() |
3012 | */ |
3013 | scrub_pause_on(fs_info); |
3014 | - ret = btrfs_inc_block_group_ro(cache); |
3015 | + |
3016 | + /* |
3017 | + * Don't do chunk preallocation for scrub. |
3018 | + * |
3019 | + * This is especially important for SYSTEM bgs, or we can hit |
3020 | + * -EFBIG from btrfs_finish_chunk_alloc() like: |
3021 | + * 1. The only SYSTEM bg is marked RO. |
3022 | + * Since SYSTEM bg is small, that's pretty common. |
3023 | + * 2. New SYSTEM bg will be allocated |
3024 | + * Due to regular version will allocate new chunk. |
3025 | + * 3. New SYSTEM bg is empty and will get cleaned up |
3026 | + * Before cleanup really happens, it's marked RO again. |
3027 | + * 4. Empty SYSTEM bg get scrubbed |
3028 | + * We go back to 2. |
3029 | + * |
3030 | + * This can easily boost the amount of SYSTEM chunks if cleaner |
3031 | + * thread can't be triggered fast enough, and use up all space |
3032 | + * of btrfs_super_block::sys_chunk_array |
3033 | + */ |
3034 | + ret = btrfs_inc_block_group_ro(cache, false); |
3035 | if (!ret && sctx->is_dev_replace) { |
3036 | /* |
3037 | * If we are doing a device replace wait for any tasks |
3038 | diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h |
3039 | index e3688312e9f1b..43bacf0a6bd31 100644 |
3040 | --- a/fs/fuse/fuse_i.h |
3041 | +++ b/fs/fuse/fuse_i.h |
3042 | @@ -791,6 +791,7 @@ static inline u64 fuse_get_attr_version(struct fuse_conn *fc) |
3043 | |
3044 | static inline void fuse_make_bad(struct inode *inode) |
3045 | { |
3046 | + remove_inode_hash(inode); |
3047 | set_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state); |
3048 | } |
3049 | |
3050 | diff --git a/include/crypto/cast6.h b/include/crypto/cast6.h |
3051 | index c71f6ef47f0f4..4c8d0c72f78d4 100644 |
3052 | --- a/include/crypto/cast6.h |
3053 | +++ b/include/crypto/cast6.h |
3054 | @@ -19,7 +19,7 @@ int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key, |
3055 | unsigned int keylen, u32 *flags); |
3056 | int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); |
3057 | |
3058 | -void __cast6_encrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src); |
3059 | -void __cast6_decrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src); |
3060 | +void __cast6_encrypt(const void *ctx, u8 *dst, const u8 *src); |
3061 | +void __cast6_decrypt(const void *ctx, u8 *dst, const u8 *src); |
3062 | |
3063 | #endif |
3064 | diff --git a/include/crypto/serpent.h b/include/crypto/serpent.h |
3065 | index 7dd780c5d0589..75c7eaa208535 100644 |
3066 | --- a/include/crypto/serpent.h |
3067 | +++ b/include/crypto/serpent.h |
3068 | @@ -22,7 +22,7 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key, |
3069 | unsigned int keylen); |
3070 | int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); |
3071 | |
3072 | -void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src); |
3073 | -void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src); |
3074 | +void __serpent_encrypt(const void *ctx, u8 *dst, const u8 *src); |
3075 | +void __serpent_decrypt(const void *ctx, u8 *dst, const u8 *src); |
3076 | |
3077 | #endif |
3078 | diff --git a/include/crypto/xts.h b/include/crypto/xts.h |
3079 | index 75fd96ff976b7..15ae7fdc04789 100644 |
3080 | --- a/include/crypto/xts.h |
3081 | +++ b/include/crypto/xts.h |
3082 | @@ -8,8 +8,6 @@ |
3083 | |
3084 | #define XTS_BLOCK_SIZE 16 |
3085 | |
3086 | -#define XTS_TWEAK_CAST(x) ((void (*)(void *, u8*, const u8*))(x)) |
3087 | - |
3088 | static inline int xts_check_key(struct crypto_tfm *tfm, |
3089 | const u8 *key, unsigned int keylen) |
3090 | { |
3091 | diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c |
3092 | index e6a43c0fdee88..ab2a4b7dfca57 100644 |
3093 | --- a/kernel/bpf/verifier.c |
3094 | +++ b/kernel/bpf/verifier.c |
3095 | @@ -4268,10 +4268,14 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, |
3096 | { |
3097 | bool mask_to_left = (opcode == BPF_ADD && off_is_neg) || |
3098 | (opcode == BPF_SUB && !off_is_neg); |
3099 | - u32 off; |
3100 | + u32 off, max; |
3101 | |
3102 | switch (ptr_reg->type) { |
3103 | case PTR_TO_STACK: |
3104 | + /* Offset 0 is out-of-bounds, but acceptable start for the |
3105 | + * left direction, see BPF_REG_FP. |
3106 | + */ |
3107 | + max = MAX_BPF_STACK + mask_to_left; |
3108 | /* Indirect variable offset stack access is prohibited in |
3109 | * unprivileged mode so it's not handled here. |
3110 | */ |
3111 | @@ -4279,16 +4283,17 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, |
3112 | if (mask_to_left) |
3113 | *ptr_limit = MAX_BPF_STACK + off; |
3114 | else |
3115 | - *ptr_limit = -off; |
3116 | - return 0; |
3117 | + *ptr_limit = -off - 1; |
3118 | + return *ptr_limit >= max ? -ERANGE : 0; |
3119 | case PTR_TO_MAP_VALUE: |
3120 | + max = ptr_reg->map_ptr->value_size; |
3121 | if (mask_to_left) { |
3122 | *ptr_limit = ptr_reg->umax_value + ptr_reg->off; |
3123 | } else { |
3124 | off = ptr_reg->smin_value + ptr_reg->off; |
3125 | - *ptr_limit = ptr_reg->map_ptr->value_size - off; |
3126 | + *ptr_limit = ptr_reg->map_ptr->value_size - off - 1; |
3127 | } |
3128 | - return 0; |
3129 | + return *ptr_limit >= max ? -ERANGE : 0; |
3130 | default: |
3131 | return -EINVAL; |
3132 | } |
3133 | @@ -4341,6 +4346,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, |
3134 | u32 alu_state, alu_limit; |
3135 | struct bpf_reg_state tmp; |
3136 | bool ret; |
3137 | + int err; |
3138 | |
3139 | if (can_skip_alu_sanitation(env, insn)) |
3140 | return 0; |
3141 | @@ -4356,10 +4362,13 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, |
3142 | alu_state |= ptr_is_dst_reg ? |
3143 | BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; |
3144 | |
3145 | - if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) |
3146 | - return 0; |
3147 | - if (update_alu_sanitation_state(aux, alu_state, alu_limit)) |
3148 | - return -EACCES; |
3149 | + err = retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg); |
3150 | + if (err < 0) |
3151 | + return err; |
3152 | + |
3153 | + err = update_alu_sanitation_state(aux, alu_state, alu_limit); |
3154 | + if (err < 0) |
3155 | + return err; |
3156 | do_sim: |
3157 | /* Simulate and find potential out-of-bounds access under |
3158 | * speculative execution from truncation as a result of |
3159 | @@ -4467,7 +4476,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, |
3160 | case BPF_ADD: |
3161 | ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); |
3162 | if (ret < 0) { |
3163 | - verbose(env, "R%d tried to add from different maps or paths\n", dst); |
3164 | + verbose(env, "R%d tried to add from different maps, paths, or prohibited types\n", dst); |
3165 | return ret; |
3166 | } |
3167 | /* We can take a fixed offset as long as it doesn't overflow |
3168 | @@ -4522,7 +4531,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, |
3169 | case BPF_SUB: |
3170 | ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); |
3171 | if (ret < 0) { |
3172 | - verbose(env, "R%d tried to sub from different maps or paths\n", dst); |
3173 | + verbose(env, "R%d tried to sub from different maps, paths, or prohibited types\n", dst); |
3174 | return ret; |
3175 | } |
3176 | if (dst_reg == off_reg) { |
3177 | @@ -9077,7 +9086,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) |
3178 | off_reg = issrc ? insn->src_reg : insn->dst_reg; |
3179 | if (isneg) |
3180 | *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); |
3181 | - *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1); |
3182 | + *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); |
3183 | *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); |
3184 | *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); |
3185 | *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); |
3186 | diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c |
3187 | index d6619edd53e5a..edc505e07125b 100644 |
3188 | --- a/net/dsa/tag_mtk.c |
3189 | +++ b/net/dsa/tag_mtk.c |
3190 | @@ -13,6 +13,7 @@ |
3191 | #define MTK_HDR_LEN 4 |
3192 | #define MTK_HDR_XMIT_UNTAGGED 0 |
3193 | #define MTK_HDR_XMIT_TAGGED_TPID_8100 1 |
3194 | +#define MTK_HDR_XMIT_TAGGED_TPID_88A8 2 |
3195 | #define MTK_HDR_RECV_SOURCE_PORT_MASK GENMASK(2, 0) |
3196 | #define MTK_HDR_XMIT_DP_BIT_MASK GENMASK(5, 0) |
3197 | #define MTK_HDR_XMIT_SA_DIS BIT(6) |
3198 | @@ -21,8 +22,8 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb, |
3199 | struct net_device *dev) |
3200 | { |
3201 | struct dsa_port *dp = dsa_slave_to_port(dev); |
3202 | + u8 xmit_tpid; |
3203 | u8 *mtk_tag; |
3204 | - bool is_vlan_skb = true; |
3205 | unsigned char *dest = eth_hdr(skb)->h_dest; |
3206 | bool is_multicast_skb = is_multicast_ether_addr(dest) && |
3207 | !is_broadcast_ether_addr(dest); |
3208 | @@ -33,13 +34,20 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb, |
3209 | * the both special and VLAN tag at the same time and then look up VLAN |
3210 | * table with VID. |
3211 | */ |
3212 | - if (!skb_vlan_tagged(skb)) { |
3213 | + switch (skb->protocol) { |
3214 | + case htons(ETH_P_8021Q): |
3215 | + xmit_tpid = MTK_HDR_XMIT_TAGGED_TPID_8100; |
3216 | + break; |
3217 | + case htons(ETH_P_8021AD): |
3218 | + xmit_tpid = MTK_HDR_XMIT_TAGGED_TPID_88A8; |
3219 | + break; |
3220 | + default: |
3221 | if (skb_cow_head(skb, MTK_HDR_LEN) < 0) |
3222 | return NULL; |
3223 | |
3224 | + xmit_tpid = MTK_HDR_XMIT_UNTAGGED; |
3225 | skb_push(skb, MTK_HDR_LEN); |
3226 | memmove(skb->data, skb->data + MTK_HDR_LEN, 2 * ETH_ALEN); |
3227 | - is_vlan_skb = false; |
3228 | } |
3229 | |
3230 | mtk_tag = skb->data + 2 * ETH_ALEN; |
3231 | @@ -47,8 +55,7 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb, |
3232 | /* Mark tag attribute on special tag insertion to notify hardware |
3233 | * whether that's a combined special tag with 802.1Q header. |
3234 | */ |
3235 | - mtk_tag[0] = is_vlan_skb ? MTK_HDR_XMIT_TAGGED_TPID_8100 : |
3236 | - MTK_HDR_XMIT_UNTAGGED; |
3237 | + mtk_tag[0] = xmit_tpid; |
3238 | mtk_tag[1] = (1 << dp->index) & MTK_HDR_XMIT_DP_BIT_MASK; |
3239 | |
3240 | /* Disable SA learning for multicast frames */ |
3241 | @@ -56,7 +63,7 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb, |
3242 | mtk_tag[1] |= MTK_HDR_XMIT_SA_DIS; |
3243 | |
3244 | /* Tag control information is kept for 802.1Q */ |
3245 | - if (!is_vlan_skb) { |
3246 | + if (xmit_tpid == MTK_HDR_XMIT_UNTAGGED) { |
3247 | mtk_tag[2] = 0; |
3248 | mtk_tag[3] = 0; |
3249 | } |
3250 | diff --git a/tools/testing/selftests/bpf/verifier/bounds_deduction.c b/tools/testing/selftests/bpf/verifier/bounds_deduction.c |
3251 | index 1fd07a4f27ac2..c162498a64fc6 100644 |
3252 | --- a/tools/testing/selftests/bpf/verifier/bounds_deduction.c |
3253 | +++ b/tools/testing/selftests/bpf/verifier/bounds_deduction.c |
3254 | @@ -6,8 +6,9 @@ |
3255 | BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), |
3256 | BPF_EXIT_INSN(), |
3257 | }, |
3258 | - .result = REJECT, |
3259 | + .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", |
3260 | .errstr = "R0 tried to subtract pointer from scalar", |
3261 | + .result = REJECT, |
3262 | }, |
3263 | { |
3264 | "check deducing bounds from const, 2", |
3265 | @@ -20,6 +21,8 @@ |
3266 | BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), |
3267 | BPF_EXIT_INSN(), |
3268 | }, |
3269 | + .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types", |
3270 | + .result_unpriv = REJECT, |
3271 | .result = ACCEPT, |
3272 | .retval = 1, |
3273 | }, |
3274 | @@ -31,8 +34,9 @@ |
3275 | BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), |
3276 | BPF_EXIT_INSN(), |
3277 | }, |
3278 | - .result = REJECT, |
3279 | + .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", |
3280 | .errstr = "R0 tried to subtract pointer from scalar", |
3281 | + .result = REJECT, |
3282 | }, |
3283 | { |
3284 | "check deducing bounds from const, 4", |
3285 | @@ -45,6 +49,8 @@ |
3286 | BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), |
3287 | BPF_EXIT_INSN(), |
3288 | }, |
3289 | + .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types", |
3290 | + .result_unpriv = REJECT, |
3291 | .result = ACCEPT, |
3292 | }, |
3293 | { |
3294 | @@ -55,8 +61,9 @@ |
3295 | BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), |
3296 | BPF_EXIT_INSN(), |
3297 | }, |
3298 | - .result = REJECT, |
3299 | + .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", |
3300 | .errstr = "R0 tried to subtract pointer from scalar", |
3301 | + .result = REJECT, |
3302 | }, |
3303 | { |
3304 | "check deducing bounds from const, 6", |
3305 | @@ -67,8 +74,9 @@ |
3306 | BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), |
3307 | BPF_EXIT_INSN(), |
3308 | }, |
3309 | - .result = REJECT, |
3310 | + .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", |
3311 | .errstr = "R0 tried to subtract pointer from scalar", |
3312 | + .result = REJECT, |
3313 | }, |
3314 | { |
3315 | "check deducing bounds from const, 7", |
3316 | @@ -80,8 +88,9 @@ |
3317 | offsetof(struct __sk_buff, mark)), |
3318 | BPF_EXIT_INSN(), |
3319 | }, |
3320 | - .result = REJECT, |
3321 | + .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types", |
3322 | .errstr = "dereference of modified ctx ptr", |
3323 | + .result = REJECT, |
3324 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3325 | }, |
3326 | { |
3327 | @@ -94,8 +103,9 @@ |
3328 | offsetof(struct __sk_buff, mark)), |
3329 | BPF_EXIT_INSN(), |
3330 | }, |
3331 | - .result = REJECT, |
3332 | + .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types", |
3333 | .errstr = "dereference of modified ctx ptr", |
3334 | + .result = REJECT, |
3335 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, |
3336 | }, |
3337 | { |
3338 | @@ -106,8 +116,9 @@ |
3339 | BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), |
3340 | BPF_EXIT_INSN(), |
3341 | }, |
3342 | - .result = REJECT, |
3343 | + .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", |
3344 | .errstr = "R0 tried to subtract pointer from scalar", |
3345 | + .result = REJECT, |
3346 | }, |
3347 | { |
3348 | "check deducing bounds from const, 10", |
3349 | @@ -119,6 +130,6 @@ |
3350 | BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), |
3351 | BPF_EXIT_INSN(), |
3352 | }, |
3353 | - .result = REJECT, |
3354 | .errstr = "math between ctx pointer and register with unbounded min value is not allowed", |
3355 | + .result = REJECT, |
3356 | }, |
3357 | diff --git a/tools/testing/selftests/bpf/verifier/unpriv.c b/tools/testing/selftests/bpf/verifier/unpriv.c |
3358 | index 91bb77c24a2ef..0d621c841db14 100644 |
3359 | --- a/tools/testing/selftests/bpf/verifier/unpriv.c |
3360 | +++ b/tools/testing/selftests/bpf/verifier/unpriv.c |
3361 | @@ -495,7 +495,7 @@ |
3362 | .result = ACCEPT, |
3363 | }, |
3364 | { |
3365 | - "unpriv: adding of fp", |
3366 | + "unpriv: adding of fp, reg", |
3367 | .insns = { |
3368 | BPF_MOV64_IMM(BPF_REG_0, 0), |
3369 | BPF_MOV64_IMM(BPF_REG_1, 0), |
3370 | @@ -503,6 +503,19 @@ |
3371 | BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), |
3372 | BPF_EXIT_INSN(), |
3373 | }, |
3374 | + .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types", |
3375 | + .result_unpriv = REJECT, |
3376 | + .result = ACCEPT, |
3377 | +}, |
3378 | +{ |
3379 | + "unpriv: adding of fp, imm", |
3380 | + .insns = { |
3381 | + BPF_MOV64_IMM(BPF_REG_0, 0), |
3382 | + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), |
3383 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0), |
3384 | + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), |
3385 | + BPF_EXIT_INSN(), |
3386 | + }, |
3387 | .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", |
3388 | .result_unpriv = REJECT, |
3389 | .result = ACCEPT, |
3390 | diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c |
3391 | index a53d99cebd9ff..00b59d5d7a7f0 100644 |
3392 | --- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c |
3393 | +++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c |
3394 | @@ -169,7 +169,7 @@ |
3395 | .fixup_map_array_48b = { 1 }, |
3396 | .result = ACCEPT, |
3397 | .result_unpriv = REJECT, |
3398 | - .errstr_unpriv = "R2 tried to add from different maps or paths", |
3399 | + .errstr_unpriv = "R2 tried to add from different maps, paths, or prohibited types", |
3400 | .retval = 0, |
3401 | }, |
3402 | { |
3403 | @@ -516,6 +516,27 @@ |
3404 | .result = ACCEPT, |
3405 | .retval = 0xabcdef12, |
3406 | }, |
3407 | +{ |
3408 | + "map access: value_ptr += N, value_ptr -= N known scalar", |
3409 | + .insns = { |
3410 | + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), |
3411 | + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), |
3412 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), |
3413 | + BPF_LD_MAP_FD(BPF_REG_1, 0), |
3414 | + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), |
3415 | + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), |
3416 | + BPF_MOV32_IMM(BPF_REG_1, 0x12345678), |
3417 | + BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), |
3418 | + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2), |
3419 | + BPF_MOV64_IMM(BPF_REG_1, 2), |
3420 | + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), |
3421 | + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), |
3422 | + BPF_EXIT_INSN(), |
3423 | + }, |
3424 | + .fixup_map_array_48b = { 3 }, |
3425 | + .result = ACCEPT, |
3426 | + .retval = 0x12345678, |
3427 | +}, |
3428 | { |
3429 | "map access: unknown scalar += value_ptr, 1", |
3430 | .insns = { |