Contents of /alx-src/tags/kernel26-2.6.12-alx-r9/crypto/cipher.c
Parent Directory | Revision Log
Revision 630 -
(show annotations)
(download)
Wed Mar 4 11:03:09 2009 UTC (15 years, 6 months ago) by niro
File MIME type: text/plain
File size: 8747 byte(s)
Wed Mar 4 11:03:09 2009 UTC (15 years, 6 months ago) by niro
File MIME type: text/plain
File size: 8747 byte(s)
Tag kernel26-2.6.12-alx-r9
1 | /* |
2 | * Cryptographic API. |
3 | * |
4 | * Cipher operations. |
5 | * |
6 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify it |
9 | * under the terms of the GNU General Public License as published by the Free |
10 | * Software Foundation; either version 2 of the License, or (at your option) |
11 | * any later version. |
12 | * |
13 | */ |
14 | #include <linux/compiler.h> |
15 | #include <linux/kernel.h> |
16 | #include <linux/crypto.h> |
17 | #include <linux/errno.h> |
18 | #include <linux/mm.h> |
19 | #include <linux/slab.h> |
20 | #include <linux/string.h> |
21 | #include <asm/scatterlist.h> |
22 | #include "internal.h" |
23 | #include "scatterwalk.h" |
24 | |
25 | typedef void (cryptfn_t)(void *, u8 *, const u8 *); |
26 | typedef void (procfn_t)(struct crypto_tfm *, u8 *, |
27 | u8*, cryptfn_t, void *); |
28 | |
29 | static inline void xor_64(u8 *a, const u8 *b) |
30 | { |
31 | ((u32 *)a)[0] ^= ((u32 *)b)[0]; |
32 | ((u32 *)a)[1] ^= ((u32 *)b)[1]; |
33 | } |
34 | |
35 | static inline void xor_128(u8 *a, const u8 *b) |
36 | { |
37 | ((u32 *)a)[0] ^= ((u32 *)b)[0]; |
38 | ((u32 *)a)[1] ^= ((u32 *)b)[1]; |
39 | ((u32 *)a)[2] ^= ((u32 *)b)[2]; |
40 | ((u32 *)a)[3] ^= ((u32 *)b)[3]; |
41 | } |
42 | |
43 | static inline void *prepare_src(struct scatter_walk *walk, int bsize, |
44 | void *tmp, int in_place) |
45 | { |
46 | void *src = walk->data; |
47 | int n = bsize; |
48 | |
49 | if (unlikely(scatterwalk_across_pages(walk, bsize))) { |
50 | src = tmp; |
51 | n = scatterwalk_copychunks(src, walk, bsize, 0); |
52 | } |
53 | scatterwalk_advance(walk, n); |
54 | return src; |
55 | } |
56 | |
57 | static inline void *prepare_dst(struct scatter_walk *walk, int bsize, |
58 | void *tmp, int in_place) |
59 | { |
60 | void *dst = walk->data; |
61 | |
62 | if (unlikely(scatterwalk_across_pages(walk, bsize)) || in_place) |
63 | dst = tmp; |
64 | return dst; |
65 | } |
66 | |
67 | static inline void complete_src(struct scatter_walk *walk, int bsize, |
68 | void *src, int in_place) |
69 | { |
70 | } |
71 | |
72 | static inline void complete_dst(struct scatter_walk *walk, int bsize, |
73 | void *dst, int in_place) |
74 | { |
75 | int n = bsize; |
76 | |
77 | if (unlikely(scatterwalk_across_pages(walk, bsize))) |
78 | n = scatterwalk_copychunks(dst, walk, bsize, 1); |
79 | else if (in_place) |
80 | memcpy(walk->data, dst, bsize); |
81 | scatterwalk_advance(walk, n); |
82 | } |
83 | |
84 | /* |
85 | * Generic encrypt/decrypt wrapper for ciphers, handles operations across |
86 | * multiple page boundaries by using temporary blocks. In user context, |
87 | * the kernel is given a chance to schedule us once per block. |
88 | */ |
89 | static int crypt(struct crypto_tfm *tfm, |
90 | struct scatterlist *dst, |
91 | struct scatterlist *src, |
92 | unsigned int nbytes, cryptfn_t crfn, |
93 | procfn_t prfn, void *info) |
94 | { |
95 | struct scatter_walk walk_in, walk_out; |
96 | const unsigned int bsize = crypto_tfm_alg_blocksize(tfm); |
97 | u8 tmp_src[bsize]; |
98 | u8 tmp_dst[bsize]; |
99 | |
100 | if (!nbytes) |
101 | return 0; |
102 | |
103 | if (nbytes % bsize) { |
104 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; |
105 | return -EINVAL; |
106 | } |
107 | |
108 | scatterwalk_start(&walk_in, src); |
109 | scatterwalk_start(&walk_out, dst); |
110 | |
111 | for(;;) { |
112 | u8 *src_p, *dst_p; |
113 | int in_place; |
114 | |
115 | scatterwalk_map(&walk_in, 0); |
116 | scatterwalk_map(&walk_out, 1); |
117 | |
118 | in_place = scatterwalk_samebuf(&walk_in, &walk_out); |
119 | |
120 | do { |
121 | src_p = prepare_src(&walk_in, bsize, tmp_src, |
122 | in_place); |
123 | dst_p = prepare_dst(&walk_out, bsize, tmp_dst, |
124 | in_place); |
125 | |
126 | prfn(tfm, dst_p, src_p, crfn, info); |
127 | |
128 | complete_src(&walk_in, bsize, src_p, in_place); |
129 | complete_dst(&walk_out, bsize, dst_p, in_place); |
130 | |
131 | nbytes -= bsize; |
132 | } while (nbytes && |
133 | !scatterwalk_across_pages(&walk_in, bsize) && |
134 | !scatterwalk_across_pages(&walk_out, bsize)); |
135 | |
136 | scatterwalk_done(&walk_in, 0, nbytes); |
137 | scatterwalk_done(&walk_out, 1, nbytes); |
138 | |
139 | if (!nbytes) |
140 | return 0; |
141 | |
142 | crypto_yield(tfm); |
143 | } |
144 | } |
145 | |
146 | static void cbc_process_encrypt(struct crypto_tfm *tfm, u8 *dst, u8 *src, |
147 | cryptfn_t fn, void *info) |
148 | { |
149 | u8 *iv = info; |
150 | |
151 | tfm->crt_u.cipher.cit_xor_block(iv, src); |
152 | fn(crypto_tfm_ctx(tfm), dst, iv); |
153 | memcpy(iv, dst, crypto_tfm_alg_blocksize(tfm)); |
154 | } |
155 | |
156 | static void cbc_process_decrypt(struct crypto_tfm *tfm, u8 *dst, u8 *src, |
157 | cryptfn_t fn, void *info) |
158 | { |
159 | u8 *iv = info; |
160 | |
161 | fn(crypto_tfm_ctx(tfm), dst, src); |
162 | tfm->crt_u.cipher.cit_xor_block(dst, iv); |
163 | memcpy(iv, src, crypto_tfm_alg_blocksize(tfm)); |
164 | } |
165 | |
166 | static void ecb_process(struct crypto_tfm *tfm, u8 *dst, u8 *src, |
167 | cryptfn_t fn, void *info) |
168 | { |
169 | fn(crypto_tfm_ctx(tfm), dst, src); |
170 | } |
171 | |
172 | static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) |
173 | { |
174 | struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher; |
175 | |
176 | if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) { |
177 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; |
178 | return -EINVAL; |
179 | } else |
180 | return cia->cia_setkey(crypto_tfm_ctx(tfm), key, keylen, |
181 | &tfm->crt_flags); |
182 | } |
183 | |
184 | static int ecb_encrypt(struct crypto_tfm *tfm, |
185 | struct scatterlist *dst, |
186 | struct scatterlist *src, unsigned int nbytes) |
187 | { |
188 | return crypt(tfm, dst, src, nbytes, |
189 | tfm->__crt_alg->cra_cipher.cia_encrypt, |
190 | ecb_process, NULL); |
191 | } |
192 | |
193 | static int ecb_decrypt(struct crypto_tfm *tfm, |
194 | struct scatterlist *dst, |
195 | struct scatterlist *src, |
196 | unsigned int nbytes) |
197 | { |
198 | return crypt(tfm, dst, src, nbytes, |
199 | tfm->__crt_alg->cra_cipher.cia_decrypt, |
200 | ecb_process, NULL); |
201 | } |
202 | |
203 | static int cbc_encrypt(struct crypto_tfm *tfm, |
204 | struct scatterlist *dst, |
205 | struct scatterlist *src, |
206 | unsigned int nbytes) |
207 | { |
208 | return crypt(tfm, dst, src, nbytes, |
209 | tfm->__crt_alg->cra_cipher.cia_encrypt, |
210 | cbc_process_encrypt, tfm->crt_cipher.cit_iv); |
211 | } |
212 | |
213 | static int cbc_encrypt_iv(struct crypto_tfm *tfm, |
214 | struct scatterlist *dst, |
215 | struct scatterlist *src, |
216 | unsigned int nbytes, u8 *iv) |
217 | { |
218 | return crypt(tfm, dst, src, nbytes, |
219 | tfm->__crt_alg->cra_cipher.cia_encrypt, |
220 | cbc_process_encrypt, iv); |
221 | } |
222 | |
223 | static int cbc_decrypt(struct crypto_tfm *tfm, |
224 | struct scatterlist *dst, |
225 | struct scatterlist *src, |
226 | unsigned int nbytes) |
227 | { |
228 | return crypt(tfm, dst, src, nbytes, |
229 | tfm->__crt_alg->cra_cipher.cia_decrypt, |
230 | cbc_process_decrypt, tfm->crt_cipher.cit_iv); |
231 | } |
232 | |
233 | static int cbc_decrypt_iv(struct crypto_tfm *tfm, |
234 | struct scatterlist *dst, |
235 | struct scatterlist *src, |
236 | unsigned int nbytes, u8 *iv) |
237 | { |
238 | return crypt(tfm, dst, src, nbytes, |
239 | tfm->__crt_alg->cra_cipher.cia_decrypt, |
240 | cbc_process_decrypt, iv); |
241 | } |
242 | |
243 | static int nocrypt(struct crypto_tfm *tfm, |
244 | struct scatterlist *dst, |
245 | struct scatterlist *src, |
246 | unsigned int nbytes) |
247 | { |
248 | return -ENOSYS; |
249 | } |
250 | |
251 | static int nocrypt_iv(struct crypto_tfm *tfm, |
252 | struct scatterlist *dst, |
253 | struct scatterlist *src, |
254 | unsigned int nbytes, u8 *iv) |
255 | { |
256 | return -ENOSYS; |
257 | } |
258 | |
259 | int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags) |
260 | { |
261 | u32 mode = flags & CRYPTO_TFM_MODE_MASK; |
262 | |
263 | tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB; |
264 | if (flags & CRYPTO_TFM_REQ_WEAK_KEY) |
265 | tfm->crt_flags = CRYPTO_TFM_REQ_WEAK_KEY; |
266 | |
267 | return 0; |
268 | } |
269 | |
270 | int crypto_init_cipher_ops(struct crypto_tfm *tfm) |
271 | { |
272 | int ret = 0; |
273 | struct cipher_tfm *ops = &tfm->crt_cipher; |
274 | |
275 | ops->cit_setkey = setkey; |
276 | |
277 | switch (tfm->crt_cipher.cit_mode) { |
278 | case CRYPTO_TFM_MODE_ECB: |
279 | ops->cit_encrypt = ecb_encrypt; |
280 | ops->cit_decrypt = ecb_decrypt; |
281 | break; |
282 | |
283 | case CRYPTO_TFM_MODE_CBC: |
284 | ops->cit_encrypt = cbc_encrypt; |
285 | ops->cit_decrypt = cbc_decrypt; |
286 | ops->cit_encrypt_iv = cbc_encrypt_iv; |
287 | ops->cit_decrypt_iv = cbc_decrypt_iv; |
288 | break; |
289 | |
290 | case CRYPTO_TFM_MODE_CFB: |
291 | ops->cit_encrypt = nocrypt; |
292 | ops->cit_decrypt = nocrypt; |
293 | ops->cit_encrypt_iv = nocrypt_iv; |
294 | ops->cit_decrypt_iv = nocrypt_iv; |
295 | break; |
296 | |
297 | case CRYPTO_TFM_MODE_CTR: |
298 | ops->cit_encrypt = nocrypt; |
299 | ops->cit_decrypt = nocrypt; |
300 | ops->cit_encrypt_iv = nocrypt_iv; |
301 | ops->cit_decrypt_iv = nocrypt_iv; |
302 | break; |
303 | |
304 | default: |
305 | BUG(); |
306 | } |
307 | |
308 | if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) { |
309 | |
310 | switch (crypto_tfm_alg_blocksize(tfm)) { |
311 | case 8: |
312 | ops->cit_xor_block = xor_64; |
313 | break; |
314 | |
315 | case 16: |
316 | ops->cit_xor_block = xor_128; |
317 | break; |
318 | |
319 | default: |
320 | printk(KERN_WARNING "%s: block size %u not supported\n", |
321 | crypto_tfm_alg_name(tfm), |
322 | crypto_tfm_alg_blocksize(tfm)); |
323 | ret = -EINVAL; |
324 | goto out; |
325 | } |
326 | |
327 | ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm); |
328 | ops->cit_iv = kmalloc(ops->cit_ivsize, GFP_KERNEL); |
329 | if (ops->cit_iv == NULL) |
330 | ret = -ENOMEM; |
331 | } |
332 | |
333 | out: |
334 | return ret; |
335 | } |
336 | |
337 | void crypto_exit_cipher_ops(struct crypto_tfm *tfm) |
338 | { |
339 | if (tfm->crt_cipher.cit_iv) |
340 | kfree(tfm->crt_cipher.cit_iv); |
341 | } |