OpenWrt – Blame information for rev 1
?pathlinks?
Rev | Author | Line No. | Line |
---|---|---|---|
1 | office | 1 | From 936d5f485f2ff837cdd7d49839771bd3367e8b92 Mon Sep 17 00:00:00 2001 |
2 | From: Biwen Li <biwen.li@nxp.com> |
||
3 | Date: Tue, 30 Oct 2018 18:28:03 +0800 |
||
4 | Subject: [PATCH 37/40] sec: support layerscape |
||
5 | This is an integrated patch of sec for layerscape |
||
6 | |||
7 | Signed-off-by: Alex Porosanu <alexandru.porosanu@nxp.com> |
||
8 | Signed-off-by: Cristian Stoica <cristian.stoica@nxp.com> |
||
9 | Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com> |
||
10 | Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> |
||
11 | Signed-off-by: Horia Geantă <horia.geanta@nxp.com> |
||
12 | Signed-off-by: Horia Geantă horia.geanta@nxp.com |
||
13 | Signed-off-by: Radu Alexe <radu.alexe@nxp.com> |
||
14 | Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com> |
||
15 | Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com> |
||
16 | Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com> |
||
17 | Signed-off-by: Biwen Li <biwen.li@nxp.com> |
||
18 | --- |
||
19 | crypto/Kconfig | 20 + |
||
20 | crypto/Makefile | 1 + |
||
21 | crypto/tcrypt.c | 27 +- |
||
22 | crypto/testmgr.c | 244 ++ |
||
23 | crypto/testmgr.h | 219 ++ |
||
24 | crypto/tls.c | 607 +++ |
||
25 | drivers/crypto/Makefile | 2 +- |
||
26 | drivers/crypto/caam/Kconfig | 57 +- |
||
27 | drivers/crypto/caam/Makefile | 10 +- |
||
28 | drivers/crypto/caam/caamalg.c | 131 +- |
||
29 | drivers/crypto/caam/caamalg_desc.c | 761 +++- |
||
30 | drivers/crypto/caam/caamalg_desc.h | 47 +- |
||
31 | drivers/crypto/caam/caamalg_qi.c | 927 ++++- |
||
32 | drivers/crypto/caam/caamalg_qi2.c | 5691 +++++++++++++++++++++++++++ |
||
33 | drivers/crypto/caam/caamalg_qi2.h | 274 ++ |
||
34 | drivers/crypto/caam/caamhash.c | 132 +- |
||
35 | drivers/crypto/caam/caamhash_desc.c | 108 + |
||
36 | drivers/crypto/caam/caamhash_desc.h | 49 + |
||
37 | drivers/crypto/caam/compat.h | 2 + |
||
38 | drivers/crypto/caam/ctrl.c | 23 +- |
||
39 | drivers/crypto/caam/desc.h | 62 +- |
||
40 | drivers/crypto/caam/desc_constr.h | 52 +- |
||
41 | drivers/crypto/caam/dpseci.c | 865 ++++ |
||
42 | drivers/crypto/caam/dpseci.h | 433 ++ |
||
43 | drivers/crypto/caam/dpseci_cmd.h | 287 ++ |
||
44 | drivers/crypto/caam/error.c | 75 +- |
||
45 | drivers/crypto/caam/error.h | 6 +- |
||
46 | drivers/crypto/caam/intern.h | 1 + |
||
47 | drivers/crypto/caam/jr.c | 42 + |
||
48 | drivers/crypto/caam/jr.h | 2 + |
||
49 | drivers/crypto/caam/key_gen.c | 30 - |
||
50 | drivers/crypto/caam/key_gen.h | 30 + |
||
51 | drivers/crypto/caam/qi.c | 85 +- |
||
52 | drivers/crypto/caam/qi.h | 2 +- |
||
53 | drivers/crypto/caam/regs.h | 2 + |
||
54 | drivers/crypto/caam/sg_sw_qm.h | 46 +- |
||
55 | drivers/crypto/talitos.c | 8 + |
||
56 | 37 files changed, 11006 insertions(+), 354 deletions(-) |
||
57 | create mode 100644 crypto/tls.c |
||
58 | create mode 100644 drivers/crypto/caam/caamalg_qi2.c |
||
59 | create mode 100644 drivers/crypto/caam/caamalg_qi2.h |
||
60 | create mode 100644 drivers/crypto/caam/caamhash_desc.c |
||
61 | create mode 100644 drivers/crypto/caam/caamhash_desc.h |
||
62 | create mode 100644 drivers/crypto/caam/dpseci.c |
||
63 | create mode 100644 drivers/crypto/caam/dpseci.h |
||
64 | create mode 100644 drivers/crypto/caam/dpseci_cmd.h |
||
65 | |||
66 | --- a/crypto/Kconfig |
||
67 | +++ b/crypto/Kconfig |
||
68 | @@ -312,6 +312,26 @@ config CRYPTO_ECHAINIV |
||
69 | a sequence number xored with a salt. This is the default |
||
70 | algorithm for CBC. |
||
71 | |||
72 | +config CRYPTO_TLS |
||
73 | + tristate "TLS support" |
||
74 | + select CRYPTO_AEAD |
||
75 | + select CRYPTO_BLKCIPHER |
||
76 | + select CRYPTO_MANAGER |
||
77 | + select CRYPTO_HASH |
||
78 | + select CRYPTO_NULL |
||
79 | + select CRYPTO_AUTHENC |
||
80 | + help |
||
81 | + Support for TLS 1.0 record encryption and decryption |
||
82 | + |
||
83 | + This module adds support for encryption/decryption of TLS 1.0 frames |
||
84 | + using blockcipher algorithms. The name of the resulting algorithm is |
||
85 | + "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base |
||
86 | + algorithms are used (e.g. aes-generic, sha1-generic), but hardware |
||
87 | + accelerated versions will be used automatically if available. |
||
88 | + |
||
89 | + User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0 |
||
90 | + operations through AF_ALG or cryptodev interfaces |
||
91 | + |
||
92 | comment "Block modes" |
||
93 | |||
94 | config CRYPTO_CBC |
||
95 | --- a/crypto/Makefile |
||
96 | +++ b/crypto/Makefile |
||
97 | @@ -118,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_ge |
||
98 | obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o |
||
99 | obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o |
||
100 | obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o |
||
101 | +obj-$(CONFIG_CRYPTO_TLS) += tls.o |
||
102 | obj-$(CONFIG_CRYPTO_LZO) += lzo.o |
||
103 | obj-$(CONFIG_CRYPTO_LZ4) += lz4.o |
||
104 | obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o |
||
105 | --- a/crypto/tcrypt.c |
||
106 | +++ b/crypto/tcrypt.c |
||
107 | @@ -76,7 +76,7 @@ static char *check[] = { |
||
108 | "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", |
||
109 | "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320", |
||
110 | "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512", |
||
111 | - NULL |
||
112 | + "rsa", NULL |
||
113 | }; |
||
114 | |||
115 | struct tcrypt_result { |
||
116 | @@ -355,11 +355,13 @@ static void test_aead_speed(const char * |
||
117 | iv); |
||
118 | aead_request_set_ad(req, aad_size); |
||
119 | |||
120 | - if (secs) |
||
121 | + if (secs) { |
||
122 | ret = test_aead_jiffies(req, enc, *b_size, |
||
123 | secs); |
||
124 | - else |
||
125 | + cond_resched(); |
||
126 | + } else { |
||
127 | ret = test_aead_cycles(req, enc, *b_size); |
||
128 | + } |
||
129 | |||
130 | if (ret) { |
||
131 | pr_err("%s() failed return code=%d\n", e, ret); |
||
132 | @@ -736,12 +738,14 @@ static void test_ahash_speed_common(cons |
||
133 | |||
134 | ahash_request_set_crypt(req, sg, output, speed[i].plen); |
||
135 | |||
136 | - if (secs) |
||
137 | + if (secs) { |
||
138 | ret = test_ahash_jiffies(req, speed[i].blen, |
||
139 | speed[i].plen, output, secs); |
||
140 | - else |
||
141 | + cond_resched(); |
||
142 | + } else { |
||
143 | ret = test_ahash_cycles(req, speed[i].blen, |
||
144 | speed[i].plen, output); |
||
145 | + } |
||
146 | |||
147 | if (ret) { |
||
148 | pr_err("hashing failed ret=%d\n", ret); |
||
149 | @@ -959,12 +963,14 @@ static void test_skcipher_speed(const ch |
||
150 | |||
151 | skcipher_request_set_crypt(req, sg, sg, *b_size, iv); |
||
152 | |||
153 | - if (secs) |
||
154 | + if (secs) { |
||
155 | ret = test_acipher_jiffies(req, enc, |
||
156 | *b_size, secs); |
||
157 | - else |
||
158 | + cond_resched(); |
||
159 | + } else { |
||
160 | ret = test_acipher_cycles(req, enc, |
||
161 | *b_size); |
||
162 | + } |
||
163 | |||
164 | if (ret) { |
||
165 | pr_err("%s() failed flags=%x\n", e, |
||
166 | @@ -1336,6 +1342,10 @@ static int do_test(const char *alg, u32 |
||
167 | ret += tcrypt_test("hmac(sha3-512)"); |
||
168 | break; |
||
169 | |||
170 | + case 115: |
||
171 | + ret += tcrypt_test("rsa"); |
||
172 | + break; |
||
173 | + |
||
174 | case 150: |
||
175 | ret += tcrypt_test("ansi_cprng"); |
||
176 | break; |
||
177 | @@ -1397,6 +1407,9 @@ static int do_test(const char *alg, u32 |
||
178 | case 190: |
||
179 | ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))"); |
||
180 | break; |
||
181 | + case 191: |
||
182 | + ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))"); |
||
183 | + break; |
||
184 | case 200: |
||
185 | test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, |
||
186 | speed_template_16_24_32); |
||
187 | --- a/crypto/testmgr.c |
||
188 | +++ b/crypto/testmgr.c |
||
189 | @@ -117,6 +117,13 @@ struct drbg_test_suite { |
||
190 | unsigned int count; |
||
191 | }; |
||
192 | |||
193 | +struct tls_test_suite { |
||
194 | + struct { |
||
195 | + struct tls_testvec *vecs; |
||
196 | + unsigned int count; |
||
197 | + } enc, dec; |
||
198 | +}; |
||
199 | + |
||
200 | struct akcipher_test_suite { |
||
201 | const struct akcipher_testvec *vecs; |
||
202 | unsigned int count; |
||
203 | @@ -140,6 +147,7 @@ struct alg_test_desc { |
||
204 | struct hash_test_suite hash; |
||
205 | struct cprng_test_suite cprng; |
||
206 | struct drbg_test_suite drbg; |
||
207 | + struct tls_test_suite tls; |
||
208 | struct akcipher_test_suite akcipher; |
||
209 | struct kpp_test_suite kpp; |
||
210 | } suite; |
||
211 | @@ -991,6 +999,233 @@ static int test_aead(struct crypto_aead |
||
212 | return 0; |
||
213 | } |
||
214 | |||
215 | +static int __test_tls(struct crypto_aead *tfm, int enc, |
||
216 | + struct tls_testvec *template, unsigned int tcount, |
||
217 | + const bool diff_dst) |
||
218 | +{ |
||
219 | + const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)); |
||
220 | + unsigned int i, k, authsize; |
||
221 | + char *q; |
||
222 | + struct aead_request *req; |
||
223 | + struct scatterlist *sg; |
||
224 | + struct scatterlist *sgout; |
||
225 | + const char *e, *d; |
||
226 | + struct tcrypt_result result; |
||
227 | + void *input; |
||
228 | + void *output; |
||
229 | + void *assoc; |
||
230 | + char *iv; |
||
231 | + char *key; |
||
232 | + char *xbuf[XBUFSIZE]; |
||
233 | + char *xoutbuf[XBUFSIZE]; |
||
234 | + char *axbuf[XBUFSIZE]; |
||
235 | + int ret = -ENOMEM; |
||
236 | + |
||
237 | + if (testmgr_alloc_buf(xbuf)) |
||
238 | + goto out_noxbuf; |
||
239 | + |
||
240 | + if (diff_dst && testmgr_alloc_buf(xoutbuf)) |
||
241 | + goto out_nooutbuf; |
||
242 | + |
||
243 | + if (testmgr_alloc_buf(axbuf)) |
||
244 | + goto out_noaxbuf; |
||
245 | + |
||
246 | + iv = kzalloc(MAX_IVLEN, GFP_KERNEL); |
||
247 | + if (!iv) |
||
248 | + goto out_noiv; |
||
249 | + |
||
250 | + key = kzalloc(MAX_KEYLEN, GFP_KERNEL); |
||
251 | + if (!key) |
||
252 | + goto out_nokey; |
||
253 | + |
||
254 | + sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL); |
||
255 | + if (!sg) |
||
256 | + goto out_nosg; |
||
257 | + |
||
258 | + sgout = sg + 8; |
||
259 | + |
||
260 | + d = diff_dst ? "-ddst" : ""; |
||
261 | + e = enc ? "encryption" : "decryption"; |
||
262 | + |
||
263 | + init_completion(&result.completion); |
||
264 | + |
||
265 | + req = aead_request_alloc(tfm, GFP_KERNEL); |
||
266 | + if (!req) { |
||
267 | + pr_err("alg: tls%s: Failed to allocate request for %s\n", |
||
268 | + d, algo); |
||
269 | + goto out; |
||
270 | + } |
||
271 | + |
||
272 | + aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
||
273 | + tcrypt_complete, &result); |
||
274 | + |
||
275 | + for (i = 0; i < tcount; i++) { |
||
276 | + input = xbuf[0]; |
||
277 | + assoc = axbuf[0]; |
||
278 | + |
||
279 | + ret = -EINVAL; |
||
280 | + if (WARN_ON(template[i].ilen > PAGE_SIZE || |
||
281 | + template[i].alen > PAGE_SIZE)) |
||
282 | + goto out; |
||
283 | + |
||
284 | + memcpy(assoc, template[i].assoc, template[i].alen); |
||
285 | + memcpy(input, template[i].input, template[i].ilen); |
||
286 | + |
||
287 | + if (template[i].iv) |
||
288 | + memcpy(iv, template[i].iv, MAX_IVLEN); |
||
289 | + else |
||
290 | + memset(iv, 0, MAX_IVLEN); |
||
291 | + |
||
292 | + crypto_aead_clear_flags(tfm, ~0); |
||
293 | + |
||
294 | + if (template[i].klen > MAX_KEYLEN) { |
||
295 | + pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n", |
||
296 | + d, i, algo, template[i].klen, MAX_KEYLEN); |
||
297 | + ret = -EINVAL; |
||
298 | + goto out; |
||
299 | + } |
||
300 | + memcpy(key, template[i].key, template[i].klen); |
||
301 | + |
||
302 | + ret = crypto_aead_setkey(tfm, key, template[i].klen); |
||
303 | + if (!ret == template[i].fail) { |
||
304 | + pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n", |
||
305 | + d, i, algo, crypto_aead_get_flags(tfm)); |
||
306 | + goto out; |
||
307 | + } else if (ret) |
||
308 | + continue; |
||
309 | + |
||
310 | + authsize = 20; |
||
311 | + ret = crypto_aead_setauthsize(tfm, authsize); |
||
312 | + if (ret) { |
||
313 | + pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n", |
||
314 | + d, authsize, i, algo); |
||
315 | + goto out; |
||
316 | + } |
||
317 | + |
||
318 | + k = !!template[i].alen; |
||
319 | + sg_init_table(sg, k + 1); |
||
320 | + sg_set_buf(&sg[0], assoc, template[i].alen); |
||
321 | + sg_set_buf(&sg[k], input, (enc ? template[i].rlen : |
||
322 | + template[i].ilen)); |
||
323 | + output = input; |
||
324 | + |
||
325 | + if (diff_dst) { |
||
326 | + sg_init_table(sgout, k + 1); |
||
327 | + sg_set_buf(&sgout[0], assoc, template[i].alen); |
||
328 | + |
||
329 | + output = xoutbuf[0]; |
||
330 | + sg_set_buf(&sgout[k], output, |
||
331 | + (enc ? template[i].rlen : template[i].ilen)); |
||
332 | + } |
||
333 | + |
||
334 | + aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, |
||
335 | + template[i].ilen, iv); |
||
336 | + |
||
337 | + aead_request_set_ad(req, template[i].alen); |
||
338 | + |
||
339 | + ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); |
||
340 | + |
||
341 | + switch (ret) { |
||
342 | + case 0: |
||
343 | + if (template[i].novrfy) { |
||
344 | + /* verification was supposed to fail */ |
||
345 | + pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n", |
||
346 | + d, e, i, algo); |
||
347 | + /* so really, we got a bad message */ |
||
348 | + ret = -EBADMSG; |
||
349 | + goto out; |
||
350 | + } |
||
351 | + break; |
||
352 | + case -EINPROGRESS: |
||
353 | + case -EBUSY: |
||
354 | + wait_for_completion(&result.completion); |
||
355 | + reinit_completion(&result.completion); |
||
356 | + ret = result.err; |
||
357 | + if (!ret) |
||
358 | + break; |
||
359 | + case -EBADMSG: |
||
360 | + /* verification failure was expected */ |
||
361 | + if (template[i].novrfy) |
||
362 | + continue; |
||
363 | + /* fall through */ |
||
364 | + default: |
||
365 | + pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n", |
||
366 | + d, e, i, algo, -ret); |
||
367 | + goto out; |
||
368 | + } |
||
369 | + |
||
370 | + q = output; |
||
371 | + if (memcmp(q, template[i].result, template[i].rlen)) { |
||
372 | + pr_err("alg: tls%s: Test %d failed on %s for %s\n", |
||
373 | + d, i, e, algo); |
||
374 | + hexdump(q, template[i].rlen); |
||
375 | + pr_err("should be:\n"); |
||
376 | + hexdump(template[i].result, template[i].rlen); |
||
377 | + ret = -EINVAL; |
||
378 | + goto out; |
||
379 | + } |
||
380 | + } |
||
381 | + |
||
382 | +out: |
||
383 | + aead_request_free(req); |
||
384 | + |
||
385 | + kfree(sg); |
||
386 | +out_nosg: |
||
387 | + kfree(key); |
||
388 | +out_nokey: |
||
389 | + kfree(iv); |
||
390 | +out_noiv: |
||
391 | + testmgr_free_buf(axbuf); |
||
392 | +out_noaxbuf: |
||
393 | + if (diff_dst) |
||
394 | + testmgr_free_buf(xoutbuf); |
||
395 | +out_nooutbuf: |
||
396 | + testmgr_free_buf(xbuf); |
||
397 | +out_noxbuf: |
||
398 | + return ret; |
||
399 | +} |
||
400 | + |
||
401 | +static int test_tls(struct crypto_aead *tfm, int enc, |
||
402 | + struct tls_testvec *template, unsigned int tcount) |
||
403 | +{ |
||
404 | + int ret; |
||
405 | + /* test 'dst == src' case */ |
||
406 | + ret = __test_tls(tfm, enc, template, tcount, false); |
||
407 | + if (ret) |
||
408 | + return ret; |
||
409 | + /* test 'dst != src' case */ |
||
410 | + return __test_tls(tfm, enc, template, tcount, true); |
||
411 | +} |
||
412 | + |
||
413 | +static int alg_test_tls(const struct alg_test_desc *desc, const char *driver, |
||
414 | + u32 type, u32 mask) |
||
415 | +{ |
||
416 | + struct crypto_aead *tfm; |
||
417 | + int err = 0; |
||
418 | + |
||
419 | + tfm = crypto_alloc_aead(driver, type, mask); |
||
420 | + if (IS_ERR(tfm)) { |
||
421 | + pr_err("alg: aead: Failed to load transform for %s: %ld\n", |
||
422 | + driver, PTR_ERR(tfm)); |
||
423 | + return PTR_ERR(tfm); |
||
424 | + } |
||
425 | + |
||
426 | + if (desc->suite.tls.enc.vecs) { |
||
427 | + err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs, |
||
428 | + desc->suite.tls.enc.count); |
||
429 | + if (err) |
||
430 | + goto out; |
||
431 | + } |
||
432 | + |
||
433 | + if (!err && desc->suite.tls.dec.vecs) |
||
434 | + err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs, |
||
435 | + desc->suite.tls.dec.count); |
||
436 | + |
||
437 | +out: |
||
438 | + crypto_free_aead(tfm); |
||
439 | + return err; |
||
440 | +} |
||
441 | + |
||
442 | static int test_cipher(struct crypto_cipher *tfm, int enc, |
||
443 | const struct cipher_testvec *template, |
||
444 | unsigned int tcount) |
||
445 | @@ -3518,6 +3753,15 @@ static const struct alg_test_desc alg_te |
||
446 | .hash = __VECS(tgr192_tv_template) |
||
447 | } |
||
448 | }, { |
||
449 | + .alg = "tls10(hmac(sha1),cbc(aes))", |
||
450 | + .test = alg_test_tls, |
||
451 | + .suite = { |
||
452 | + .tls = { |
||
453 | + .enc = __VECS(tls_enc_tv_template), |
||
454 | + .dec = __VECS(tls_dec_tv_template) |
||
455 | + } |
||
456 | + } |
||
457 | + }, { |
||
458 | .alg = "vmac(aes)", |
||
459 | .test = alg_test_hash, |
||
460 | .suite = { |
||
461 | --- a/crypto/testmgr.h |
||
462 | +++ b/crypto/testmgr.h |
||
463 | @@ -125,6 +125,20 @@ struct drbg_testvec { |
||
464 | size_t expectedlen; |
||
465 | }; |
||
466 | |||
467 | +struct tls_testvec { |
||
468 | + char *key; /* wrapped keys for encryption and authentication */ |
||
469 | + char *iv; /* initialization vector */ |
||
470 | + char *input; /* input data */ |
||
471 | + char *assoc; /* associated data: seq num, type, version, input len */ |
||
472 | + char *result; /* result data */ |
||
473 | + unsigned char fail; /* the test failure is expected */ |
||
474 | + unsigned char novrfy; /* dec verification failure expected */ |
||
475 | + unsigned char klen; /* key length */ |
||
476 | + unsigned short ilen; /* input data length */ |
||
477 | + unsigned short alen; /* associated data length */ |
||
478 | + unsigned short rlen; /* result length */ |
||
479 | +}; |
||
480 | + |
||
481 | struct akcipher_testvec { |
||
482 | const unsigned char *key; |
||
483 | const unsigned char *m; |
||
484 | @@ -153,6 +167,211 @@ struct kpp_testvec { |
||
485 | static const char zeroed_string[48]; |
||
486 | |||
487 | /* |
||
488 | + * TLS1.0 synthetic test vectors |
||
489 | + */ |
||
490 | +static struct tls_testvec tls_enc_tv_template[] = { |
||
491 | + { |
||
492 | +#ifdef __LITTLE_ENDIAN |
||
493 | + .key = "\x08\x00" /* rta length */ |
||
494 | + "\x01\x00" /* rta type */ |
||
495 | +#else |
||
496 | + .key = "\x00\x08" /* rta length */ |
||
497 | + "\x00\x01" /* rta type */ |
||
498 | +#endif |
||
499 | + "\x00\x00\x00\x10" /* enc key length */ |
||
500 | + "authenticationkey20benckeyis16_bytes", |
||
501 | + .klen = 8 + 20 + 16, |
||
502 | + .iv = "iv0123456789abcd", |
||
503 | + .input = "Single block msg", |
||
504 | + .ilen = 16, |
||
505 | + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" |
||
506 | + "\x00\x03\x01\x00\x10", |
||
507 | + .alen = 13, |
||
508 | + .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1" |
||
509 | + "\x59\x79\x1e\x91\x5f\x52\x14\x9c" |
||
510 | + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73" |
||
511 | + "\xdc\x89\x47\x49\x49\xcb\x30\x6b" |
||
512 | + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02" |
||
513 | + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61", |
||
514 | + .rlen = 16 + 20 + 12, |
||
515 | + }, { |
||
516 | +#ifdef __LITTLE_ENDIAN |
||
517 | + .key = "\x08\x00" /* rta length */ |
||
518 | + "\x01\x00" /* rta type */ |
||
519 | +#else |
||
520 | + .key = "\x00\x08" /* rta length */ |
||
521 | + "\x00\x01" /* rta type */ |
||
522 | +#endif |
||
523 | + "\x00\x00\x00\x10" /* enc key length */ |
||
524 | + "authenticationkey20benckeyis16_bytes", |
||
525 | + .klen = 8 + 20 + 16, |
||
526 | + .iv = "iv0123456789abcd", |
||
527 | + .input = "", |
||
528 | + .ilen = 0, |
||
529 | + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" |
||
530 | + "\x00\x03\x01\x00\x00", |
||
531 | + .alen = 13, |
||
532 | + .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67" |
||
533 | + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a" |
||
534 | + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45" |
||
535 | + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a", |
||
536 | + .rlen = 20 + 12, |
||
537 | + }, { |
||
538 | +#ifdef __LITTLE_ENDIAN |
||
539 | + .key = "\x08\x00" /* rta length */ |
||
540 | + "\x01\x00" /* rta type */ |
||
541 | +#else |
||
542 | + .key = "\x00\x08" /* rta length */ |
||
543 | + "\x00\x01" /* rta type */ |
||
544 | +#endif |
||
545 | + "\x00\x00\x00\x10" /* enc key length */ |
||
546 | + "authenticationkey20benckeyis16_bytes", |
||
547 | + .klen = 8 + 20 + 16, |
||
548 | + .iv = "iv0123456789abcd", |
||
549 | + .input = "285 bytes plaintext285 bytes plaintext285 bytes" |
||
550 | + " plaintext285 bytes plaintext285 bytes plaintext285" |
||
551 | + " bytes plaintext285 bytes plaintext285 bytes" |
||
552 | + " plaintext285 bytes plaintext285 bytes plaintext285" |
||
553 | + " bytes plaintext285 bytes plaintext285 bytes" |
||
554 | + " plaintext285 bytes plaintext285 bytes plaintext285" |
||
555 | + " bytes plaintext285 bytes plaintext", |
||
556 | + .ilen = 285, |
||
557 | + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" |
||
558 | + "\x00\x03\x01\x01\x1d", |
||
559 | + .alen = 13, |
||
560 | + .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd" |
||
561 | + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90" |
||
562 | + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a" |
||
563 | + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94" |
||
564 | + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51" |
||
565 | + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31" |
||
566 | + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8" |
||
567 | + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50" |
||
568 | + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac" |
||
569 | + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14" |
||
570 | + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d" |
||
571 | + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8" |
||
572 | + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c" |
||
573 | + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74" |
||
574 | + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e" |
||
575 | + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b" |
||
576 | + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d" |
||
577 | + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13" |
||
578 | + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d" |
||
579 | + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5" |
||
580 | + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2" |
||
581 | + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20" |
||
582 | + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1" |
||
583 | + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e" |
||
584 | + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7" |
||
585 | + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e" |
||
586 | + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65", |
||
587 | + .rlen = 285 + 20 + 15, |
||
588 | + } |
||
589 | +}; |
||
590 | + |
||
591 | +static struct tls_testvec tls_dec_tv_template[] = { |
||
592 | + { |
||
593 | +#ifdef __LITTLE_ENDIAN |
||
594 | + .key = "\x08\x00" /* rta length */ |
||
595 | + "\x01\x00" /* rta type */ |
||
596 | +#else |
||
597 | + .key = "\x00\x08" /* rta length */ |
||
598 | + "\x00\x01" /* rta type */ |
||
599 | +#endif |
||
600 | + "\x00\x00\x00\x10" /* enc key length */ |
||
601 | + "authenticationkey20benckeyis16_bytes", |
||
602 | + .klen = 8 + 20 + 16, |
||
603 | + .iv = "iv0123456789abcd", |
||
604 | + .input = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1" |
||
605 | + "\x59\x79\x1e\x91\x5f\x52\x14\x9c" |
||
606 | + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73" |
||
607 | + "\xdc\x89\x47\x49\x49\xcb\x30\x6b" |
||
608 | + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02" |
||
609 | + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61", |
||
610 | + .ilen = 16 + 20 + 12, |
||
611 | + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" |
||
612 | + "\x00\x03\x01\x00\x30", |
||
613 | + .alen = 13, |
||
614 | + .result = "Single block msg", |
||
615 | + .rlen = 16, |
||
616 | + }, { |
||
617 | +#ifdef __LITTLE_ENDIAN |
||
618 | + .key = "\x08\x00" /* rta length */ |
||
619 | + "\x01\x00" /* rta type */ |
||
620 | +#else |
||
621 | + .key = "\x00\x08" /* rta length */ |
||
622 | + "\x00\x01" /* rta type */ |
||
623 | +#endif |
||
624 | + "\x00\x00\x00\x10" /* enc key length */ |
||
625 | + "authenticationkey20benckeyis16_bytes", |
||
626 | + .klen = 8 + 20 + 16, |
||
627 | + .iv = "iv0123456789abcd", |
||
628 | + .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67" |
||
629 | + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a" |
||
630 | + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45" |
||
631 | + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a", |
||
632 | + .ilen = 20 + 12, |
||
633 | + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" |
||
634 | + "\x00\x03\x01\x00\x20", |
||
635 | + .alen = 13, |
||
636 | + .result = "", |
||
637 | + .rlen = 0, |
||
638 | + }, { |
||
639 | +#ifdef __LITTLE_ENDIAN |
||
640 | + .key = "\x08\x00" /* rta length */ |
||
641 | + "\x01\x00" /* rta type */ |
||
642 | +#else |
||
643 | + .key = "\x00\x08" /* rta length */ |
||
644 | + "\x00\x01" /* rta type */ |
||
645 | +#endif |
||
646 | + "\x00\x00\x00\x10" /* enc key length */ |
||
647 | + "authenticationkey20benckeyis16_bytes", |
||
648 | + .klen = 8 + 20 + 16, |
||
649 | + .iv = "iv0123456789abcd", |
||
650 | + .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd" |
||
651 | + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90" |
||
652 | + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a" |
||
653 | + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94" |
||
654 | + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51" |
||
655 | + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31" |
||
656 | + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8" |
||
657 | + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50" |
||
658 | + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac" |
||
659 | + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14" |
||
660 | + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d" |
||
661 | + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8" |
||
662 | + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c" |
||
663 | + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74" |
||
664 | + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e" |
||
665 | + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b" |
||
666 | + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d" |
||
667 | + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13" |
||
668 | + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d" |
||
669 | + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5" |
||
670 | + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2" |
||
671 | + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20" |
||
672 | + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1" |
||
673 | + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e" |
||
674 | + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7" |
||
675 | + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e" |
||
676 | + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65", |
||
677 | + |
||
678 | + .ilen = 285 + 20 + 15, |
||
679 | + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" |
||
680 | + "\x00\x03\x01\x01\x40", |
||
681 | + .alen = 13, |
||
682 | + .result = "285 bytes plaintext285 bytes plaintext285 bytes" |
||
683 | + " plaintext285 bytes plaintext285 bytes plaintext285" |
||
684 | + " bytes plaintext285 bytes plaintext285 bytes" |
||
685 | + " plaintext285 bytes plaintext285 bytes plaintext285" |
||
686 | + " bytes plaintext285 bytes plaintext285 bytes" |
||
687 | + " plaintext285 bytes plaintext285 bytes plaintext", |
||
688 | + .rlen = 285, |
||
689 | + } |
||
690 | +}; |
||
691 | + |
||
692 | +/* |
||
693 | * RSA test vectors. Borrowed from openSSL. |
||
694 | */ |
||
695 | static const struct akcipher_testvec rsa_tv_template[] = { |
||
696 | --- /dev/null |
||
697 | +++ b/crypto/tls.c |
||
698 | @@ -0,0 +1,607 @@ |
||
699 | +/* |
||
700 | + * Copyright 2013 Freescale Semiconductor, Inc. |
||
701 | + * Copyright 2017 NXP Semiconductor, Inc. |
||
702 | + * |
||
703 | + * This program is free software; you can redistribute it and/or modify it |
||
704 | + * under the terms of the GNU General Public License as published by the Free |
||
705 | + * Software Foundation; either version 2 of the License, or (at your option) |
||
706 | + * any later version. |
||
707 | + * |
||
708 | + */ |
||
709 | + |
||
710 | +#include <crypto/internal/aead.h> |
||
711 | +#include <crypto/internal/hash.h> |
||
712 | +#include <crypto/internal/skcipher.h> |
||
713 | +#include <crypto/authenc.h> |
||
714 | +#include <crypto/null.h> |
||
715 | +#include <crypto/scatterwalk.h> |
||
716 | +#include <linux/err.h> |
||
717 | +#include <linux/init.h> |
||
718 | +#include <linux/module.h> |
||
719 | +#include <linux/rtnetlink.h> |
||
720 | + |
||
721 | +struct tls_instance_ctx { |
||
722 | + struct crypto_ahash_spawn auth; |
||
723 | + struct crypto_skcipher_spawn enc; |
||
724 | +}; |
||
725 | + |
||
726 | +struct crypto_tls_ctx { |
||
727 | + unsigned int reqoff; |
||
728 | + struct crypto_ahash *auth; |
||
729 | + struct crypto_skcipher *enc; |
||
730 | + struct crypto_skcipher *null; |
||
731 | +}; |
||
732 | + |
||
733 | +struct tls_request_ctx { |
||
734 | + /* |
||
735 | + * cryptlen holds the payload length in the case of encryption or |
||
736 | + * payload_len + icv_len + padding_len in case of decryption |
||
737 | + */ |
||
738 | + unsigned int cryptlen; |
||
739 | + /* working space for partial results */ |
||
740 | + struct scatterlist tmp[2]; |
||
741 | + struct scatterlist cipher[2]; |
||
742 | + struct scatterlist dst[2]; |
||
743 | + char tail[]; |
||
744 | +}; |
||
745 | + |
||
746 | +struct async_op { |
||
747 | + struct completion completion; |
||
748 | + int err; |
||
749 | +}; |
||
750 | + |
||
751 | +static void tls_async_op_done(struct crypto_async_request *req, int err) |
||
752 | +{ |
||
753 | + struct async_op *areq = req->data; |
||
754 | + |
||
755 | + if (err == -EINPROGRESS) |
||
756 | + return; |
||
757 | + |
||
758 | + areq->err = err; |
||
759 | + complete(&areq->completion); |
||
760 | +} |
||
761 | + |
||
762 | +static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key, |
||
763 | + unsigned int keylen) |
||
764 | +{ |
||
765 | + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls); |
||
766 | + struct crypto_ahash *auth = ctx->auth; |
||
767 | + struct crypto_skcipher *enc = ctx->enc; |
||
768 | + struct crypto_authenc_keys keys; |
||
769 | + int err = -EINVAL; |
||
770 | + |
||
771 | + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
||
772 | + goto badkey; |
||
773 | + |
||
774 | + crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); |
||
775 | + crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) & |
||
776 | + CRYPTO_TFM_REQ_MASK); |
||
777 | + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen); |
||
778 | + crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) & |
||
779 | + CRYPTO_TFM_RES_MASK); |
||
780 | + |
||
781 | + if (err) |
||
782 | + goto out; |
||
783 | + |
||
784 | + crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); |
||
785 | + crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) & |
||
786 | + CRYPTO_TFM_REQ_MASK); |
||
787 | + err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen); |
||
788 | + crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) & |
||
789 | + CRYPTO_TFM_RES_MASK); |
||
790 | + |
||
791 | +out: |
||
792 | + return err; |
||
793 | + |
||
794 | +badkey: |
||
795 | + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN); |
||
796 | + goto out; |
||
797 | +} |
||
798 | + |
||
799 | +/** |
||
800 | + * crypto_tls_genicv - Calculate hmac digest for a TLS record |
||
801 | + * @hash: (output) buffer to save the digest into |
||
802 | + * @src: (input) scatterlist with the assoc and payload data |
||
803 | + * @srclen: (input) size of the source buffer (assoclen + cryptlen) |
||
804 | + * @req: (input) aead request |
||
805 | + **/ |
||
806 | +static int crypto_tls_genicv(u8 *hash, struct scatterlist *src, |
||
807 | + unsigned int srclen, struct aead_request *req) |
||
808 | +{ |
||
809 | + struct crypto_aead *tls = crypto_aead_reqtfm(req); |
||
810 | + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls); |
||
811 | + struct tls_request_ctx *treq_ctx = aead_request_ctx(req); |
||
812 | + struct async_op ahash_op; |
||
813 | + struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff); |
||
814 | + unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
||
815 | + int err = -EBADMSG; |
||
816 | + |
||
817 | + /* Bail out if the request assoc len is 0 */ |
||
818 | + if (!req->assoclen) |
||
819 | + return err; |
||
820 | + |
||
821 | + init_completion(&ahash_op.completion); |
||
822 | + |
||
823 | + /* the hash transform to be executed comes from the original request */ |
||
824 | + ahash_request_set_tfm(ahreq, ctx->auth); |
||
825 | + /* prepare the hash request with input data and result pointer */ |
||
826 | + ahash_request_set_crypt(ahreq, src, hash, srclen); |
||
827 | + /* set the notifier for when the async hash function returns */ |
||
828 | + ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, |
||
829 | + tls_async_op_done, &ahash_op); |
||
830 | + |
||
831 | + /* Calculate the digest on the given data. The result is put in hash */ |
||
832 | + err = crypto_ahash_digest(ahreq); |
||
833 | + if (err == -EINPROGRESS) { |
||
834 | + err = wait_for_completion_interruptible(&ahash_op.completion); |
||
835 | + if (!err) |
||
836 | + err = ahash_op.err; |
||
837 | + } |
||
838 | + |
||
839 | + return err; |
||
840 | +} |
||
841 | + |
||
842 | +/** |
||
843 | + * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record |
||
844 | + * @hash: (output) buffer to save the digest and padding into |
||
845 | + * @phashlen: (output) the size of digest + padding |
||
846 | + * @req: (input) aead request |
||
847 | + **/ |
||
848 | +static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen, |
||
849 | + struct aead_request *req) |
||
850 | +{ |
||
851 | + struct crypto_aead *tls = crypto_aead_reqtfm(req); |
||
852 | + unsigned int hash_size = crypto_aead_authsize(tls); |
||
853 | + unsigned int block_size = crypto_aead_blocksize(tls); |
||
854 | + unsigned int srclen = req->cryptlen + hash_size; |
||
855 | + unsigned int icvlen = req->cryptlen + req->assoclen; |
||
856 | + unsigned int padlen; |
||
857 | + int err; |
||
858 | + |
||
859 | + err = crypto_tls_genicv(hash, req->src, icvlen, req); |
||
860 | + if (err) |
||
861 | + goto out; |
||
862 | + |
||
863 | + /* add padding after digest */ |
||
864 | + padlen = block_size - (srclen % block_size); |
||
865 | + memset(hash + hash_size, padlen - 1, padlen); |
||
866 | + |
||
867 | + *phashlen = hash_size + padlen; |
||
868 | +out: |
||
869 | + return err; |
||
870 | +} |
||
871 | + |
||
872 | +static int crypto_tls_copy_data(struct aead_request *req, |
||
873 | + struct scatterlist *src, |
||
874 | + struct scatterlist *dst, |
||
875 | + unsigned int len) |
||
876 | +{ |
||
877 | + struct crypto_aead *tls = crypto_aead_reqtfm(req); |
||
878 | + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls); |
||
879 | + SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null); |
||
880 | + |
||
881 | + skcipher_request_set_tfm(skreq, ctx->null); |
||
882 | + skcipher_request_set_callback(skreq, aead_request_flags(req), |
||
883 | + NULL, NULL); |
||
884 | + skcipher_request_set_crypt(skreq, src, dst, len, NULL); |
||
885 | + |
||
886 | + return crypto_skcipher_encrypt(skreq); |
||
887 | +} |
||
888 | + |
||
889 | +static int crypto_tls_encrypt(struct aead_request *req) |
||
890 | +{ |
||
891 | + struct crypto_aead *tls = crypto_aead_reqtfm(req); |
||
892 | + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls); |
||
893 | + struct tls_request_ctx *treq_ctx = aead_request_ctx(req); |
||
894 | + struct skcipher_request *skreq; |
||
895 | + struct scatterlist *cipher = treq_ctx->cipher; |
||
896 | + struct scatterlist *tmp = treq_ctx->tmp; |
||
897 | + struct scatterlist *sg, *src, *dst; |
||
898 | + unsigned int cryptlen, phashlen; |
||
899 | + u8 *hash = treq_ctx->tail; |
||
900 | + int err; |
||
901 | + |
||
902 | + /* |
||
903 | + * The hash result is saved at the beginning of the tls request ctx |
||
904 | + * and is aligned as required by the hash transform. Enough space was |
||
905 | + * allocated in crypto_tls_init_tfm to accommodate the difference. The |
||
906 | + * requests themselves start later at treq_ctx->tail + ctx->reqoff so |
||
907 | + * the result is not overwritten by the second (cipher) request. |
||
908 | + */ |
||
909 | + hash = (u8 *)ALIGN((unsigned long)hash + |
||
910 | + crypto_ahash_alignmask(ctx->auth), |
||
911 | + crypto_ahash_alignmask(ctx->auth) + 1); |
||
912 | + |
||
913 | + /* |
||
914 | + * STEP 1: create ICV together with necessary padding |
||
915 | + */ |
||
916 | + err = crypto_tls_gen_padicv(hash, &phashlen, req); |
||
917 | + if (err) |
||
918 | + return err; |
||
919 | + |
||
920 | + /* |
||
921 | + * STEP 2: Hash and padding are combined with the payload |
||
922 | + * depending on the form it arrives. Scatter tables must have at least |
||
923 | + * one page of data before chaining with another table and can't have |
||
924 | + * an empty data page. The following code addresses these requirements. |
||
925 | + * |
||
926 | + * If the payload is empty, only the hash is encrypted, otherwise the |
||
927 | + * payload scatterlist is merged with the hash. A special merging case |
||
928 | + * is when the payload has only one page of data. In that case the |
||
929 | + * payload page is moved to another scatterlist and prepared there for |
||
930 | + * encryption. |
||
931 | + */ |
||
932 | + if (req->cryptlen) { |
||
933 | + src = scatterwalk_ffwd(tmp, req->src, req->assoclen); |
||
934 | + |
||
935 | + sg_init_table(cipher, 2); |
||
936 | + sg_set_buf(cipher + 1, hash, phashlen); |
||
937 | + |
||
938 | + if (sg_is_last(src)) { |
||
939 | + sg_set_page(cipher, sg_page(src), req->cryptlen, |
||
940 | + src->offset); |
||
941 | + src = cipher; |
||
942 | + } else { |
||
943 | + unsigned int rem_len = req->cryptlen; |
||
944 | + |
||
945 | + for (sg = src; rem_len > sg->length; sg = sg_next(sg)) |
||
946 | + rem_len -= min(rem_len, sg->length); |
||
947 | + |
||
948 | + sg_set_page(cipher, sg_page(sg), rem_len, sg->offset); |
||
949 | + sg_chain(sg, 1, cipher); |
||
950 | + } |
||
951 | + } else { |
||
952 | + sg_init_one(cipher, hash, phashlen); |
||
953 | + src = cipher; |
||
954 | + } |
||
955 | + |
||
956 | + /** |
||
957 | + * If src != dst copy the associated data from source to destination. |
||
958 | + * In both cases fast-forward passed the associated data in the dest. |
||
959 | + */ |
||
960 | + if (req->src != req->dst) { |
||
961 | + err = crypto_tls_copy_data(req, req->src, req->dst, |
||
962 | + req->assoclen); |
||
963 | + if (err) |
||
964 | + return err; |
||
965 | + } |
||
966 | + dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen); |
||
967 | + |
||
968 | + /* |
||
969 | + * STEP 3: encrypt the frame and return the result |
||
970 | + */ |
||
971 | + cryptlen = req->cryptlen + phashlen; |
||
972 | + |
||
973 | + /* |
||
974 | + * The hash and the cipher are applied at different times and their |
||
975 | + * requests can use the same memory space without interference |
||
976 | + */ |
||
977 | + skreq = (void *)(treq_ctx->tail + ctx->reqoff); |
||
978 | + skcipher_request_set_tfm(skreq, ctx->enc); |
||
979 | + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv); |
||
980 | + skcipher_request_set_callback(skreq, aead_request_flags(req), |
||
981 | + req->base.complete, req->base.data); |
||
982 | + /* |
||
983 | + * Apply the cipher transform. The result will be in req->dst when the |
||
984 | + * asynchronuous call terminates |
||
985 | + */ |
||
986 | + err = crypto_skcipher_encrypt(skreq); |
||
987 | + |
||
988 | + return err; |
||
989 | +} |
||
990 | + |
||
991 | +static int crypto_tls_decrypt(struct aead_request *req) |
||
992 | +{ |
||
993 | + struct crypto_aead *tls = crypto_aead_reqtfm(req); |
||
994 | + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls); |
||
995 | + struct tls_request_ctx *treq_ctx = aead_request_ctx(req); |
||
996 | + unsigned int cryptlen = req->cryptlen; |
||
997 | + unsigned int hash_size = crypto_aead_authsize(tls); |
||
998 | + unsigned int block_size = crypto_aead_blocksize(tls); |
||
999 | + struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff); |
||
1000 | + struct scatterlist *tmp = treq_ctx->tmp; |
||
1001 | + struct scatterlist *src, *dst; |
||
1002 | + |
||
1003 | + u8 padding[255]; /* padding can be 0-255 bytes */ |
||
1004 | + u8 pad_size; |
||
1005 | + u16 *len_field; |
||
1006 | + u8 *ihash, *hash = treq_ctx->tail; |
||
1007 | + |
||
1008 | + int paderr = 0; |
||
1009 | + int err = -EINVAL; |
||
1010 | + int i; |
||
1011 | + struct async_op ciph_op; |
||
1012 | + |
||
1013 | + /* |
||
1014 | + * Rule out bad packets. The input packet length must be at least one |
||
1015 | + * byte more than the hash_size |
||
1016 | + */ |
||
1017 | + if (cryptlen <= hash_size || cryptlen % block_size) |
||
1018 | + goto out; |
||
1019 | + |
||
1020 | + /* |
||
1021 | + * Step 1 - Decrypt the source. Fast-forward past the associated data |
||
1022 | + * to the encrypted data. The result will be overwritten in place so |
||
1023 | + * that the decrypted data will be adjacent to the associated data. The |
||
1024 | + * last step (computing the hash) will have it's input data already |
||
1025 | + * prepared and ready to be accessed at req->src. |
||
1026 | + */ |
||
1027 | + src = scatterwalk_ffwd(tmp, req->src, req->assoclen); |
||
1028 | + dst = src; |
||
1029 | + |
||
1030 | + init_completion(&ciph_op.completion); |
||
1031 | + skcipher_request_set_tfm(skreq, ctx->enc); |
||
1032 | + skcipher_request_set_callback(skreq, aead_request_flags(req), |
||
1033 | + tls_async_op_done, &ciph_op); |
||
1034 | + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv); |
||
1035 | + err = crypto_skcipher_decrypt(skreq); |
||
1036 | + if (err == -EINPROGRESS) { |
||
1037 | + err = wait_for_completion_interruptible(&ciph_op.completion); |
||
1038 | + if (!err) |
||
1039 | + err = ciph_op.err; |
||
1040 | + } |
||
1041 | + if (err) |
||
1042 | + goto out; |
||
1043 | + |
||
1044 | + /* |
||
1045 | + * Step 2 - Verify padding |
||
1046 | + * Retrieve the last byte of the payload; this is the padding size. |
||
1047 | + */ |
||
1048 | + cryptlen -= 1; |
||
1049 | + scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0); |
||
1050 | + |
||
1051 | + /* RFC recommendation for invalid padding size. */ |
||
1052 | + if (cryptlen < pad_size + hash_size) { |
||
1053 | + pad_size = 0; |
||
1054 | + paderr = -EBADMSG; |
||
1055 | + } |
||
1056 | + cryptlen -= pad_size; |
||
1057 | + scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0); |
||
1058 | + |
||
1059 | + /* Padding content must be equal with pad_size. We verify it all */ |
||
1060 | + for (i = 0; i < pad_size; i++) |
||
1061 | + if (padding[i] != pad_size) |
||
1062 | + paderr = -EBADMSG; |
||
1063 | + |
||
1064 | + /* |
||
1065 | + * Step 3 - Verify hash |
||
1066 | + * Align the digest result as required by the hash transform. Enough |
||
1067 | + * space was allocated in crypto_tls_init_tfm |
||
1068 | + */ |
||
1069 | + hash = (u8 *)ALIGN((unsigned long)hash + |
||
1070 | + crypto_ahash_alignmask(ctx->auth), |
||
1071 | + crypto_ahash_alignmask(ctx->auth) + 1); |
||
1072 | + /* |
||
1073 | + * Two bytes at the end of the associated data make the length field. |
||
1074 | + * It must be updated with the length of the cleartext message before |
||
1075 | + * the hash is calculated. |
||
1076 | + */ |
||
1077 | + len_field = sg_virt(req->src) + req->assoclen - 2; |
||
1078 | + cryptlen -= hash_size; |
||
1079 | + *len_field = htons(cryptlen); |
||
1080 | + |
||
1081 | + /* This is the hash from the decrypted packet. Save it for later */ |
||
1082 | + ihash = hash + hash_size; |
||
1083 | + scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0); |
||
1084 | + |
||
1085 | + /* Now compute and compare our ICV with the one from the packet */ |
||
1086 | + err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req); |
||
1087 | + if (!err) |
||
1088 | + err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0; |
||
1089 | + |
||
1090 | + if (req->src != req->dst) { |
||
1091 | + err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen + |
||
1092 | + req->assoclen); |
||
1093 | + if (err) |
||
1094 | + goto out; |
||
1095 | + } |
||
1096 | + |
||
1097 | + /* return the first found error */ |
||
1098 | + if (paderr) |
||
1099 | + err = paderr; |
||
1100 | + |
||
1101 | +out: |
||
1102 | + aead_request_complete(req, err); |
||
1103 | + return err; |
||
1104 | +} |
||
1105 | + |
||
1106 | +static int crypto_tls_init_tfm(struct crypto_aead *tfm) |
||
1107 | +{ |
||
1108 | + struct aead_instance *inst = aead_alg_instance(tfm); |
||
1109 | + struct tls_instance_ctx *ictx = aead_instance_ctx(inst); |
||
1110 | + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm); |
||
1111 | + struct crypto_ahash *auth; |
||
1112 | + struct crypto_skcipher *enc; |
||
1113 | + struct crypto_skcipher *null; |
||
1114 | + int err; |
||
1115 | + |
||
1116 | + auth = crypto_spawn_ahash(&ictx->auth); |
||
1117 | + if (IS_ERR(auth)) |
||
1118 | + return PTR_ERR(auth); |
||
1119 | + |
||
1120 | + enc = crypto_spawn_skcipher(&ictx->enc); |
||
1121 | + err = PTR_ERR(enc); |
||
1122 | + if (IS_ERR(enc)) |
||
1123 | + goto err_free_ahash; |
||
1124 | + |
||
1125 | + null = crypto_get_default_null_skcipher2(); |
||
1126 | + err = PTR_ERR(null); |
||
1127 | + if (IS_ERR(null)) |
||
1128 | + goto err_free_skcipher; |
||
1129 | + |
||
1130 | + ctx->auth = auth; |
||
1131 | + ctx->enc = enc; |
||
1132 | + ctx->null = null; |
||
1133 | + |
||
1134 | + /* |
||
1135 | + * Allow enough space for two digests. The two digests will be compared |
||
1136 | + * during the decryption phase. One will come from the decrypted packet |
||
1137 | + * and the other will be calculated. For encryption, one digest is |
||
1138 | + * padded (up to a cipher blocksize) and chained with the payload |
||
1139 | + */ |
||
1140 | + ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) + |
||
1141 | + crypto_ahash_alignmask(auth), |
||
1142 | + crypto_ahash_alignmask(auth) + 1) + |
||
1143 | + max(crypto_ahash_digestsize(auth), |
||
1144 | + crypto_skcipher_blocksize(enc)); |
||
1145 | + |
||
1146 | + crypto_aead_set_reqsize(tfm, |
||
1147 | + sizeof(struct tls_request_ctx) + |
||
1148 | + ctx->reqoff + |
||
1149 | + max_t(unsigned int, |
||
1150 | + crypto_ahash_reqsize(auth) + |
||
1151 | + sizeof(struct ahash_request), |
||
1152 | + crypto_skcipher_reqsize(enc) + |
||
1153 | + sizeof(struct skcipher_request))); |
||
1154 | + |
||
1155 | + return 0; |
||
1156 | + |
||
1157 | +err_free_skcipher: |
||
1158 | + crypto_free_skcipher(enc); |
||
1159 | +err_free_ahash: |
||
1160 | + crypto_free_ahash(auth); |
||
1161 | + return err; |
||
1162 | +} |
||
1163 | + |
||
1164 | +static void crypto_tls_exit_tfm(struct crypto_aead *tfm) |
||
1165 | +{ |
||
1166 | + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm); |
||
1167 | + |
||
1168 | + crypto_free_ahash(ctx->auth); |
||
1169 | + crypto_free_skcipher(ctx->enc); |
||
1170 | + crypto_put_default_null_skcipher2(); |
||
1171 | +} |
||
1172 | + |
||
1173 | +static void crypto_tls_free(struct aead_instance *inst) |
||
1174 | +{ |
||
1175 | + struct tls_instance_ctx *ctx = aead_instance_ctx(inst); |
||
1176 | + |
||
1177 | + crypto_drop_skcipher(&ctx->enc); |
||
1178 | + crypto_drop_ahash(&ctx->auth); |
||
1179 | + kfree(inst); |
||
1180 | +} |
||
1181 | + |
||
1182 | +static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb) |
||
1183 | +{ |
||
1184 | + struct crypto_attr_type *algt; |
||
1185 | + struct aead_instance *inst; |
||
1186 | + struct hash_alg_common *auth; |
||
1187 | + struct crypto_alg *auth_base; |
||
1188 | + struct skcipher_alg *enc; |
||
1189 | + struct tls_instance_ctx *ctx; |
||
1190 | + const char *enc_name; |
||
1191 | + int err; |
||
1192 | + |
||
1193 | + algt = crypto_get_attr_type(tb); |
||
1194 | + if (IS_ERR(algt)) |
||
1195 | + return PTR_ERR(algt); |
||
1196 | + |
||
1197 | + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) |
||
1198 | + return -EINVAL; |
||
1199 | + |
||
1200 | + auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, |
||
1201 | + CRYPTO_ALG_TYPE_AHASH_MASK | |
||
1202 | + crypto_requires_sync(algt->type, algt->mask)); |
||
1203 | + if (IS_ERR(auth)) |
||
1204 | + return PTR_ERR(auth); |
||
1205 | + |
||
1206 | + auth_base = &auth->base; |
||
1207 | + |
||
1208 | + enc_name = crypto_attr_alg_name(tb[2]); |
||
1209 | + err = PTR_ERR(enc_name); |
||
1210 | + if (IS_ERR(enc_name)) |
||
1211 | + goto out_put_auth; |
||
1212 | + |
||
1213 | + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
||
1214 | + err = -ENOMEM; |
||
1215 | + if (!inst) |
||
1216 | + goto out_put_auth; |
||
1217 | + |
||
1218 | + ctx = aead_instance_ctx(inst); |
||
1219 | + |
||
1220 | + err = crypto_init_ahash_spawn(&ctx->auth, auth, |
||
1221 | + aead_crypto_instance(inst)); |
||
1222 | + if (err) |
||
1223 | + goto err_free_inst; |
||
1224 | + |
||
1225 | + crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); |
||
1226 | + err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, |
||
1227 | + crypto_requires_sync(algt->type, |
||
1228 | + algt->mask)); |
||
1229 | + if (err) |
||
1230 | + goto err_drop_auth; |
||
1231 | + |
||
1232 | + enc = crypto_spawn_skcipher_alg(&ctx->enc); |
||
1233 | + |
||
1234 | + err = -ENAMETOOLONG; |
||
1235 | + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
||
1236 | + "tls10(%s,%s)", auth_base->cra_name, |
||
1237 | + enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME) |
||
1238 | + goto err_drop_enc; |
||
1239 | + |
||
1240 | + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, |
||
1241 | + "tls10(%s,%s)", auth_base->cra_driver_name, |
||
1242 | + enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) |
||
1243 | + goto err_drop_enc; |
||
1244 | + |
||
1245 | + inst->alg.base.cra_flags = (auth_base->cra_flags | |
||
1246 | + enc->base.cra_flags) & CRYPTO_ALG_ASYNC; |
||
1247 | + inst->alg.base.cra_priority = enc->base.cra_priority * 10 + |
||
1248 | + auth_base->cra_priority; |
||
1249 | + inst->alg.base.cra_blocksize = enc->base.cra_blocksize; |
||
1250 | + inst->alg.base.cra_alignmask = auth_base->cra_alignmask | |
||
1251 | + enc->base.cra_alignmask; |
||
1252 | + inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx); |
||
1253 | + |
||
1254 | + inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc); |
||
1255 | + inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc); |
||
1256 | + inst->alg.maxauthsize = auth->digestsize; |
||
1257 | + |
||
1258 | + inst->alg.init = crypto_tls_init_tfm; |
||
1259 | + inst->alg.exit = crypto_tls_exit_tfm; |
||
1260 | + |
||
1261 | + inst->alg.setkey = crypto_tls_setkey; |
||
1262 | + inst->alg.encrypt = crypto_tls_encrypt; |
||
1263 | + inst->alg.decrypt = crypto_tls_decrypt; |
||
1264 | + |
||
1265 | + inst->free = crypto_tls_free; |
||
1266 | + |
||
1267 | + err = aead_register_instance(tmpl, inst); |
||
1268 | + if (err) |
||
1269 | + goto err_drop_enc; |
||
1270 | + |
||
1271 | +out: |
||
1272 | + crypto_mod_put(auth_base); |
||
1273 | + return err; |
||
1274 | + |
||
1275 | +err_drop_enc: |
||
1276 | + crypto_drop_skcipher(&ctx->enc); |
||
1277 | +err_drop_auth: |
||
1278 | + crypto_drop_ahash(&ctx->auth); |
||
1279 | +err_free_inst: |
||
1280 | + kfree(inst); |
||
1281 | +out_put_auth: |
||
1282 | + goto out; |
||
1283 | +} |
||
1284 | + |
||
1285 | +static struct crypto_template crypto_tls_tmpl = { |
||
1286 | + .name = "tls10", |
||
1287 | + .create = crypto_tls_create, |
||
1288 | + .module = THIS_MODULE, |
||
1289 | +}; |
||
1290 | + |
||
1291 | +static int __init crypto_tls_module_init(void) |
||
1292 | +{ |
||
1293 | + return crypto_register_template(&crypto_tls_tmpl); |
||
1294 | +} |
||
1295 | + |
||
1296 | +static void __exit crypto_tls_module_exit(void) |
||
1297 | +{ |
||
1298 | + crypto_unregister_template(&crypto_tls_tmpl); |
||
1299 | +} |
||
1300 | + |
||
1301 | +module_init(crypto_tls_module_init); |
||
1302 | +module_exit(crypto_tls_module_exit); |
||
1303 | + |
||
1304 | +MODULE_LICENSE("GPL"); |
||
1305 | +MODULE_DESCRIPTION("TLS 1.0 record encryption"); |
||
1306 | --- a/drivers/crypto/Makefile |
||
1307 | +++ b/drivers/crypto/Makefile |
||
1308 | @@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chel |
||
1309 | obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/ |
||
1310 | obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/ |
||
1311 | obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o |
||
1312 | -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/ |
||
1313 | +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/ |
||
1314 | obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o |
||
1315 | obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o |
||
1316 | obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o |
||
1317 | --- a/drivers/crypto/caam/Kconfig |
||
1318 | +++ b/drivers/crypto/caam/Kconfig |
||
1319 | @@ -1,7 +1,11 @@ |
||
1320 | +config CRYPTO_DEV_FSL_CAAM_COMMON |
||
1321 | + tristate |
||
1322 | + |
||
1323 | config CRYPTO_DEV_FSL_CAAM |
||
1324 | - tristate "Freescale CAAM-Multicore driver backend" |
||
1325 | + tristate "Freescale CAAM-Multicore platform driver backend" |
||
1326 | depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE |
||
1327 | select SOC_BUS |
||
1328 | + select CRYPTO_DEV_FSL_CAAM_COMMON |
||
1329 | help |
||
1330 | Enables the driver module for Freescale's Cryptographic Accelerator |
||
1331 | and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). |
||
1332 | @@ -12,9 +16,16 @@ config CRYPTO_DEV_FSL_CAAM |
||
1333 | To compile this driver as a module, choose M here: the module |
||
1334 | will be called caam. |
||
1335 | |||
1336 | +if CRYPTO_DEV_FSL_CAAM |
||
1337 | + |
||
1338 | +config CRYPTO_DEV_FSL_CAAM_DEBUG |
||
1339 | + bool "Enable debug output in CAAM driver" |
||
1340 | + help |
||
1341 | + Selecting this will enable printing of various debug |
||
1342 | + information in the CAAM driver. |
||
1343 | + |
||
1344 | config CRYPTO_DEV_FSL_CAAM_JR |
||
1345 | tristate "Freescale CAAM Job Ring driver backend" |
||
1346 | - depends on CRYPTO_DEV_FSL_CAAM |
||
1347 | default y |
||
1348 | help |
||
1349 | Enables the driver module for Job Rings which are part of |
||
1350 | @@ -25,9 +36,10 @@ config CRYPTO_DEV_FSL_CAAM_JR |
||
1351 | To compile this driver as a module, choose M here: the module |
||
1352 | will be called caam_jr. |
||
1353 | |||
1354 | +if CRYPTO_DEV_FSL_CAAM_JR |
||
1355 | + |
||
1356 | config CRYPTO_DEV_FSL_CAAM_RINGSIZE |
||
1357 | int "Job Ring size" |
||
1358 | - depends on CRYPTO_DEV_FSL_CAAM_JR |
||
1359 | range 2 9 |
||
1360 | default "9" |
||
1361 | help |
||
1362 | @@ -45,7 +57,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE |
||
1363 | |||
1364 | config CRYPTO_DEV_FSL_CAAM_INTC |
||
1365 | bool "Job Ring interrupt coalescing" |
||
1366 | - depends on CRYPTO_DEV_FSL_CAAM_JR |
||
1367 | help |
||
1368 | Enable the Job Ring's interrupt coalescing feature. |
||
1369 | |||
1370 | @@ -75,7 +86,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL |
||
1371 | |||
1372 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API |
||
1373 | tristate "Register algorithm implementations with the Crypto API" |
||
1374 | - depends on CRYPTO_DEV_FSL_CAAM_JR |
||
1375 | default y |
||
1376 | select CRYPTO_AEAD |
||
1377 | select CRYPTO_AUTHENC |
||
1378 | @@ -90,7 +100,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API |
||
1379 | |||
1380 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI |
||
1381 | tristate "Queue Interface as Crypto API backend" |
||
1382 | - depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET |
||
1383 | + depends on FSL_SDK_DPA && NET |
||
1384 | default y |
||
1385 | select CRYPTO_AUTHENC |
||
1386 | select CRYPTO_BLKCIPHER |
||
1387 | @@ -107,7 +117,6 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI |
||
1388 | |||
1389 | config CRYPTO_DEV_FSL_CAAM_AHASH_API |
||
1390 | tristate "Register hash algorithm implementations with Crypto API" |
||
1391 | - depends on CRYPTO_DEV_FSL_CAAM_JR |
||
1392 | default y |
||
1393 | select CRYPTO_HASH |
||
1394 | help |
||
1395 | @@ -119,7 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API |
||
1396 | |||
1397 | config CRYPTO_DEV_FSL_CAAM_PKC_API |
||
1398 | tristate "Register public key cryptography implementations with Crypto API" |
||
1399 | - depends on CRYPTO_DEV_FSL_CAAM_JR |
||
1400 | default y |
||
1401 | select CRYPTO_RSA |
||
1402 | help |
||
1403 | @@ -131,7 +139,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API |
||
1404 | |||
1405 | config CRYPTO_DEV_FSL_CAAM_RNG_API |
||
1406 | tristate "Register caam device for hwrng API" |
||
1407 | - depends on CRYPTO_DEV_FSL_CAAM_JR |
||
1408 | default y |
||
1409 | select CRYPTO_RNG |
||
1410 | select HW_RANDOM |
||
1411 | @@ -142,13 +149,31 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API |
||
1412 | To compile this as a module, choose M here: the module |
||
1413 | will be called caamrng. |
||
1414 | |||
1415 | -config CRYPTO_DEV_FSL_CAAM_DEBUG |
||
1416 | - bool "Enable debug output in CAAM driver" |
||
1417 | - depends on CRYPTO_DEV_FSL_CAAM |
||
1418 | - help |
||
1419 | - Selecting this will enable printing of various debug |
||
1420 | - information in the CAAM driver. |
||
1421 | +endif # CRYPTO_DEV_FSL_CAAM_JR |
||
1422 | + |
||
1423 | +endif # CRYPTO_DEV_FSL_CAAM |
||
1424 | + |
||
1425 | +config CRYPTO_DEV_FSL_DPAA2_CAAM |
||
1426 | + tristate "QorIQ DPAA2 CAAM (DPSECI) driver" |
||
1427 | + depends on FSL_MC_DPIO |
||
1428 | + select CRYPTO_DEV_FSL_CAAM_COMMON |
||
1429 | + select CRYPTO_BLKCIPHER |
||
1430 | + select CRYPTO_AUTHENC |
||
1431 | + select CRYPTO_AEAD |
||
1432 | + select CRYPTO_HASH |
||
1433 | + ---help--- |
||
1434 | + CAAM driver for QorIQ Data Path Acceleration Architecture 2. |
||
1435 | + It handles DPSECI DPAA2 objects that sit on the Management Complex |
||
1436 | + (MC) fsl-mc bus. |
||
1437 | + |
||
1438 | + To compile this as a module, choose M here: the module |
||
1439 | + will be called dpaa2_caam. |
||
1440 | |||
1441 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC |
||
1442 | def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \ |
||
1443 | - CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) |
||
1444 | + CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \ |
||
1445 | + CRYPTO_DEV_FSL_DPAA2_CAAM) |
||
1446 | + |
||
1447 | +config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC |
||
1448 | + def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \ |
||
1449 | + CRYPTO_DEV_FSL_DPAA2_CAAM) |
||
1450 | --- a/drivers/crypto/caam/Makefile |
||
1451 | +++ b/drivers/crypto/caam/Makefile |
||
1452 | @@ -6,19 +6,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG |
||
1453 | ccflags-y := -DDEBUG |
||
1454 | endif |
||
1455 | |||
1456 | +ccflags-y += -DVERSION=\"\" |
||
1457 | + |
||
1458 | +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o |
||
1459 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o |
||
1460 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o |
||
1461 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o |
||
1462 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o |
||
1463 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o |
||
1464 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o |
||
1465 | +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o |
||
1466 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o |
||
1467 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o |
||
1468 | |||
1469 | caam-objs := ctrl.o |
||
1470 | -caam_jr-objs := jr.o key_gen.o error.o |
||
1471 | +caam_jr-objs := jr.o key_gen.o |
||
1472 | caam_pkc-y := caampkc.o pkc_desc.o |
||
1473 | ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),) |
||
1474 | ccflags-y += -DCONFIG_CAAM_QI |
||
1475 | caam-objs += qi.o |
||
1476 | endif |
||
1477 | + |
||
1478 | +obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o |
||
1479 | + |
||
1480 | +dpaa2_caam-y := caamalg_qi2.o dpseci.o |
||
1481 | --- a/drivers/crypto/caam/caamalg.c |
||
1482 | +++ b/drivers/crypto/caam/caamalg.c |
||
1483 | @@ -108,6 +108,7 @@ struct caam_ctx { |
||
1484 | dma_addr_t sh_desc_dec_dma; |
||
1485 | dma_addr_t sh_desc_givenc_dma; |
||
1486 | dma_addr_t key_dma; |
||
1487 | + enum dma_data_direction dir; |
||
1488 | struct device *jrdev; |
||
1489 | struct alginfo adata; |
||
1490 | struct alginfo cdata; |
||
1491 | @@ -118,6 +119,7 @@ static int aead_null_set_sh_desc(struct |
||
1492 | { |
||
1493 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
1494 | struct device *jrdev = ctx->jrdev; |
||
1495 | + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); |
||
1496 | u32 *desc; |
||
1497 | int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN - |
||
1498 | ctx->adata.keylen_pad; |
||
1499 | @@ -136,9 +138,10 @@ static int aead_null_set_sh_desc(struct |
||
1500 | |||
1501 | /* aead_encrypt shared descriptor */ |
||
1502 | desc = ctx->sh_desc_enc; |
||
1503 | - cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize); |
||
1504 | + cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize, |
||
1505 | + ctrlpriv->era); |
||
1506 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, |
||
1507 | - desc_bytes(desc), DMA_TO_DEVICE); |
||
1508 | + desc_bytes(desc), ctx->dir); |
||
1509 | |||
1510 | /* |
||
1511 | * Job Descriptor and Shared Descriptors |
||
1512 | @@ -154,9 +157,10 @@ static int aead_null_set_sh_desc(struct |
||
1513 | |||
1514 | /* aead_decrypt shared descriptor */ |
||
1515 | desc = ctx->sh_desc_dec; |
||
1516 | - cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize); |
||
1517 | + cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize, |
||
1518 | + ctrlpriv->era); |
||
1519 | dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, |
||
1520 | - desc_bytes(desc), DMA_TO_DEVICE); |
||
1521 | + desc_bytes(desc), ctx->dir); |
||
1522 | |||
1523 | return 0; |
||
1524 | } |
||
1525 | @@ -168,6 +172,7 @@ static int aead_set_sh_desc(struct crypt |
||
1526 | unsigned int ivsize = crypto_aead_ivsize(aead); |
||
1527 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
1528 | struct device *jrdev = ctx->jrdev; |
||
1529 | + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); |
||
1530 | u32 ctx1_iv_off = 0; |
||
1531 | u32 *desc, *nonce = NULL; |
||
1532 | u32 inl_mask; |
||
1533 | @@ -234,9 +239,9 @@ static int aead_set_sh_desc(struct crypt |
||
1534 | desc = ctx->sh_desc_enc; |
||
1535 | cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize, |
||
1536 | ctx->authsize, is_rfc3686, nonce, ctx1_iv_off, |
||
1537 | - false); |
||
1538 | + false, ctrlpriv->era); |
||
1539 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, |
||
1540 | - desc_bytes(desc), DMA_TO_DEVICE); |
||
1541 | + desc_bytes(desc), ctx->dir); |
||
1542 | |||
1543 | skip_enc: |
||
1544 | /* |
||
1545 | @@ -266,9 +271,9 @@ skip_enc: |
||
1546 | desc = ctx->sh_desc_dec; |
||
1547 | cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize, |
||
1548 | ctx->authsize, alg->caam.geniv, is_rfc3686, |
||
1549 | - nonce, ctx1_iv_off, false); |
||
1550 | + nonce, ctx1_iv_off, false, ctrlpriv->era); |
||
1551 | dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, |
||
1552 | - desc_bytes(desc), DMA_TO_DEVICE); |
||
1553 | + desc_bytes(desc), ctx->dir); |
||
1554 | |||
1555 | if (!alg->caam.geniv) |
||
1556 | goto skip_givenc; |
||
1557 | @@ -300,9 +305,9 @@ skip_enc: |
||
1558 | desc = ctx->sh_desc_enc; |
||
1559 | cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize, |
||
1560 | ctx->authsize, is_rfc3686, nonce, |
||
1561 | - ctx1_iv_off, false); |
||
1562 | + ctx1_iv_off, false, ctrlpriv->era); |
||
1563 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, |
||
1564 | - desc_bytes(desc), DMA_TO_DEVICE); |
||
1565 | + desc_bytes(desc), ctx->dir); |
||
1566 | |||
1567 | skip_givenc: |
||
1568 | return 0; |
||
1569 | @@ -323,6 +328,7 @@ static int gcm_set_sh_desc(struct crypto |
||
1570 | { |
||
1571 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
1572 | struct device *jrdev = ctx->jrdev; |
||
1573 | + unsigned int ivsize = crypto_aead_ivsize(aead); |
||
1574 | u32 *desc; |
||
1575 | int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - |
||
1576 | ctx->cdata.keylen; |
||
1577 | @@ -344,9 +350,9 @@ static int gcm_set_sh_desc(struct crypto |
||
1578 | } |
||
1579 | |||
1580 | desc = ctx->sh_desc_enc; |
||
1581 | - cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize); |
||
1582 | + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false); |
||
1583 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, |
||
1584 | - desc_bytes(desc), DMA_TO_DEVICE); |
||
1585 | + desc_bytes(desc), ctx->dir); |
||
1586 | |||
1587 | /* |
||
1588 | * Job Descriptor and Shared Descriptors |
||
1589 | @@ -361,9 +367,9 @@ static int gcm_set_sh_desc(struct crypto |
||
1590 | } |
||
1591 | |||
1592 | desc = ctx->sh_desc_dec; |
||
1593 | - cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize); |
||
1594 | + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false); |
||
1595 | dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, |
||
1596 | - desc_bytes(desc), DMA_TO_DEVICE); |
||
1597 | + desc_bytes(desc), ctx->dir); |
||
1598 | |||
1599 | return 0; |
||
1600 | } |
||
1601 | @@ -382,6 +388,7 @@ static int rfc4106_set_sh_desc(struct cr |
||
1602 | { |
||
1603 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
1604 | struct device *jrdev = ctx->jrdev; |
||
1605 | + unsigned int ivsize = crypto_aead_ivsize(aead); |
||
1606 | u32 *desc; |
||
1607 | int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - |
||
1608 | ctx->cdata.keylen; |
||
1609 | @@ -403,9 +410,10 @@ static int rfc4106_set_sh_desc(struct cr |
||
1610 | } |
||
1611 | |||
1612 | desc = ctx->sh_desc_enc; |
||
1613 | - cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize); |
||
1614 | + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize, |
||
1615 | + false); |
||
1616 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, |
||
1617 | - desc_bytes(desc), DMA_TO_DEVICE); |
||
1618 | + desc_bytes(desc), ctx->dir); |
||
1619 | |||
1620 | /* |
||
1621 | * Job Descriptor and Shared Descriptors |
||
1622 | @@ -420,9 +428,10 @@ static int rfc4106_set_sh_desc(struct cr |
||
1623 | } |
||
1624 | |||
1625 | desc = ctx->sh_desc_dec; |
||
1626 | - cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize); |
||
1627 | + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize, |
||
1628 | + false); |
||
1629 | dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, |
||
1630 | - desc_bytes(desc), DMA_TO_DEVICE); |
||
1631 | + desc_bytes(desc), ctx->dir); |
||
1632 | |||
1633 | return 0; |
||
1634 | } |
||
1635 | @@ -442,6 +451,7 @@ static int rfc4543_set_sh_desc(struct cr |
||
1636 | { |
||
1637 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
1638 | struct device *jrdev = ctx->jrdev; |
||
1639 | + unsigned int ivsize = crypto_aead_ivsize(aead); |
||
1640 | u32 *desc; |
||
1641 | int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - |
||
1642 | ctx->cdata.keylen; |
||
1643 | @@ -463,9 +473,10 @@ static int rfc4543_set_sh_desc(struct cr |
||
1644 | } |
||
1645 | |||
1646 | desc = ctx->sh_desc_enc; |
||
1647 | - cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize); |
||
1648 | + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize, |
||
1649 | + false); |
||
1650 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, |
||
1651 | - desc_bytes(desc), DMA_TO_DEVICE); |
||
1652 | + desc_bytes(desc), ctx->dir); |
||
1653 | |||
1654 | /* |
||
1655 | * Job Descriptor and Shared Descriptors |
||
1656 | @@ -480,9 +491,10 @@ static int rfc4543_set_sh_desc(struct cr |
||
1657 | } |
||
1658 | |||
1659 | desc = ctx->sh_desc_dec; |
||
1660 | - cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize); |
||
1661 | + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize, |
||
1662 | + false); |
||
1663 | dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, |
||
1664 | - desc_bytes(desc), DMA_TO_DEVICE); |
||
1665 | + desc_bytes(desc), ctx->dir); |
||
1666 | |||
1667 | return 0; |
||
1668 | } |
||
1669 | @@ -503,6 +515,7 @@ static int aead_setkey(struct crypto_aea |
||
1670 | { |
||
1671 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
1672 | struct device *jrdev = ctx->jrdev; |
||
1673 | + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); |
||
1674 | struct crypto_authenc_keys keys; |
||
1675 | int ret = 0; |
||
1676 | |||
1677 | @@ -517,6 +530,27 @@ static int aead_setkey(struct crypto_aea |
||
1678 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
||
1679 | #endif |
||
1680 | |||
1681 | + /* |
||
1682 | + * If DKP is supported, use it in the shared descriptor to generate |
||
1683 | + * the split key. |
||
1684 | + */ |
||
1685 | + if (ctrlpriv->era >= 6) { |
||
1686 | + ctx->adata.keylen = keys.authkeylen; |
||
1687 | + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & |
||
1688 | + OP_ALG_ALGSEL_MASK); |
||
1689 | + |
||
1690 | + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) |
||
1691 | + goto badkey; |
||
1692 | + |
||
1693 | + memcpy(ctx->key, keys.authkey, keys.authkeylen); |
||
1694 | + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, |
||
1695 | + keys.enckeylen); |
||
1696 | + dma_sync_single_for_device(jrdev, ctx->key_dma, |
||
1697 | + ctx->adata.keylen_pad + |
||
1698 | + keys.enckeylen, ctx->dir); |
||
1699 | + goto skip_split_key; |
||
1700 | + } |
||
1701 | + |
||
1702 | ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey, |
||
1703 | keys.authkeylen, CAAM_MAX_KEY_SIZE - |
||
1704 | keys.enckeylen); |
||
1705 | @@ -527,12 +561,14 @@ static int aead_setkey(struct crypto_aea |
||
1706 | /* postpend encryption key to auth split key */ |
||
1707 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); |
||
1708 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + |
||
1709 | - keys.enckeylen, DMA_TO_DEVICE); |
||
1710 | + keys.enckeylen, ctx->dir); |
||
1711 | #ifdef DEBUG |
||
1712 | print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", |
||
1713 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, |
||
1714 | ctx->adata.keylen_pad + keys.enckeylen, 1); |
||
1715 | #endif |
||
1716 | + |
||
1717 | +skip_split_key: |
||
1718 | ctx->cdata.keylen = keys.enckeylen; |
||
1719 | return aead_set_sh_desc(aead); |
||
1720 | badkey: |
||
1721 | @@ -552,7 +588,7 @@ static int gcm_setkey(struct crypto_aead |
||
1722 | #endif |
||
1723 | |||
1724 | memcpy(ctx->key, key, keylen); |
||
1725 | - dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); |
||
1726 | + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); |
||
1727 | ctx->cdata.keylen = keylen; |
||
1728 | |||
1729 | return gcm_set_sh_desc(aead); |
||
1730 | @@ -580,7 +616,7 @@ static int rfc4106_setkey(struct crypto_ |
||
1731 | */ |
||
1732 | ctx->cdata.keylen = keylen - 4; |
||
1733 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, |
||
1734 | - DMA_TO_DEVICE); |
||
1735 | + ctx->dir); |
||
1736 | return rfc4106_set_sh_desc(aead); |
||
1737 | } |
||
1738 | |||
1739 | @@ -606,7 +642,7 @@ static int rfc4543_setkey(struct crypto_ |
||
1740 | */ |
||
1741 | ctx->cdata.keylen = keylen - 4; |
||
1742 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, |
||
1743 | - DMA_TO_DEVICE); |
||
1744 | + ctx->dir); |
||
1745 | return rfc4543_set_sh_desc(aead); |
||
1746 | } |
||
1747 | |||
1748 | @@ -658,21 +694,21 @@ static int ablkcipher_setkey(struct cryp |
||
1749 | cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, |
||
1750 | ctx1_iv_off); |
||
1751 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, |
||
1752 | - desc_bytes(desc), DMA_TO_DEVICE); |
||
1753 | + desc_bytes(desc), ctx->dir); |
||
1754 | |||
1755 | /* ablkcipher_decrypt shared descriptor */ |
||
1756 | desc = ctx->sh_desc_dec; |
||
1757 | cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, |
||
1758 | ctx1_iv_off); |
||
1759 | dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, |
||
1760 | - desc_bytes(desc), DMA_TO_DEVICE); |
||
1761 | + desc_bytes(desc), ctx->dir); |
||
1762 | |||
1763 | /* ablkcipher_givencrypt shared descriptor */ |
||
1764 | desc = ctx->sh_desc_givenc; |
||
1765 | cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686, |
||
1766 | ctx1_iv_off); |
||
1767 | dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma, |
||
1768 | - desc_bytes(desc), DMA_TO_DEVICE); |
||
1769 | + desc_bytes(desc), ctx->dir); |
||
1770 | |||
1771 | return 0; |
||
1772 | } |
||
1773 | @@ -701,13 +737,13 @@ static int xts_ablkcipher_setkey(struct |
||
1774 | desc = ctx->sh_desc_enc; |
||
1775 | cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata); |
||
1776 | dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, |
||
1777 | - desc_bytes(desc), DMA_TO_DEVICE); |
||
1778 | + desc_bytes(desc), ctx->dir); |
||
1779 | |||
1780 | /* xts_ablkcipher_decrypt shared descriptor */ |
||
1781 | desc = ctx->sh_desc_dec; |
||
1782 | cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata); |
||
1783 | dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, |
||
1784 | - desc_bytes(desc), DMA_TO_DEVICE); |
||
1785 | + desc_bytes(desc), ctx->dir); |
||
1786 | |||
1787 | return 0; |
||
1788 | } |
||
1789 | @@ -987,9 +1023,6 @@ static void init_aead_job(struct aead_re |
||
1790 | append_seq_out_ptr(desc, dst_dma, |
||
1791 | req->assoclen + req->cryptlen - authsize, |
||
1792 | out_options); |
||
1793 | - |
||
1794 | - /* REG3 = assoclen */ |
||
1795 | - append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); |
||
1796 | } |
||
1797 | |||
1798 | static void init_gcm_job(struct aead_request *req, |
||
1799 | @@ -1004,6 +1037,7 @@ static void init_gcm_job(struct aead_req |
||
1800 | unsigned int last; |
||
1801 | |||
1802 | init_aead_job(req, edesc, all_contig, encrypt); |
||
1803 | + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); |
||
1804 | |||
1805 | /* BUG This should not be specific to generic GCM. */ |
||
1806 | last = 0; |
||
1807 | @@ -1030,6 +1064,7 @@ static void init_authenc_job(struct aead |
||
1808 | struct caam_aead_alg, aead); |
||
1809 | unsigned int ivsize = crypto_aead_ivsize(aead); |
||
1810 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
1811 | + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); |
||
1812 | const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == |
||
1813 | OP_ALG_AAI_CTR_MOD128); |
||
1814 | const bool is_rfc3686 = alg->caam.rfc3686; |
||
1815 | @@ -1053,6 +1088,15 @@ static void init_authenc_job(struct aead |
||
1816 | |||
1817 | init_aead_job(req, edesc, all_contig, encrypt); |
||
1818 | |||
1819 | + /* |
||
1820 | + * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports |
||
1821 | + * having DPOVRD as destination. |
||
1822 | + */ |
||
1823 | + if (ctrlpriv->era < 3) |
||
1824 | + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); |
||
1825 | + else |
||
1826 | + append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen); |
||
1827 | + |
||
1828 | if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv)) |
||
1829 | append_load_as_imm(desc, req->iv, ivsize, |
||
1830 | LDST_CLASS_1_CCB | |
||
1831 | @@ -3203,9 +3247,11 @@ struct caam_crypto_alg { |
||
1832 | struct caam_alg_entry caam; |
||
1833 | }; |
||
1834 | |||
1835 | -static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) |
||
1836 | +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, |
||
1837 | + bool uses_dkp) |
||
1838 | { |
||
1839 | dma_addr_t dma_addr; |
||
1840 | + struct caam_drv_private *priv; |
||
1841 | |||
1842 | ctx->jrdev = caam_jr_alloc(); |
||
1843 | if (IS_ERR(ctx->jrdev)) { |
||
1844 | @@ -3213,10 +3259,16 @@ static int caam_init_common(struct caam_ |
||
1845 | return PTR_ERR(ctx->jrdev); |
||
1846 | } |
||
1847 | |||
1848 | + priv = dev_get_drvdata(ctx->jrdev->parent); |
||
1849 | + if (priv->era >= 6 && uses_dkp) |
||
1850 | + ctx->dir = DMA_BIDIRECTIONAL; |
||
1851 | + else |
||
1852 | + ctx->dir = DMA_TO_DEVICE; |
||
1853 | + |
||
1854 | dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc, |
||
1855 | offsetof(struct caam_ctx, |
||
1856 | sh_desc_enc_dma), |
||
1857 | - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); |
||
1858 | + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); |
||
1859 | if (dma_mapping_error(ctx->jrdev, dma_addr)) { |
||
1860 | dev_err(ctx->jrdev, "unable to map key, shared descriptors\n"); |
||
1861 | caam_jr_free(ctx->jrdev); |
||
1862 | @@ -3244,7 +3296,7 @@ static int caam_cra_init(struct crypto_t |
||
1863 | container_of(alg, struct caam_crypto_alg, crypto_alg); |
||
1864 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); |
||
1865 | |||
1866 | - return caam_init_common(ctx, &caam_alg->caam); |
||
1867 | + return caam_init_common(ctx, &caam_alg->caam, false); |
||
1868 | } |
||
1869 | |||
1870 | static int caam_aead_init(struct crypto_aead *tfm) |
||
1871 | @@ -3254,14 +3306,15 @@ static int caam_aead_init(struct crypto_ |
||
1872 | container_of(alg, struct caam_aead_alg, aead); |
||
1873 | struct caam_ctx *ctx = crypto_aead_ctx(tfm); |
||
1874 | |||
1875 | - return caam_init_common(ctx, &caam_alg->caam); |
||
1876 | + return caam_init_common(ctx, &caam_alg->caam, |
||
1877 | + alg->setkey == aead_setkey); |
||
1878 | } |
||
1879 | |||
1880 | static void caam_exit_common(struct caam_ctx *ctx) |
||
1881 | { |
||
1882 | dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma, |
||
1883 | offsetof(struct caam_ctx, sh_desc_enc_dma), |
||
1884 | - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); |
||
1885 | + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); |
||
1886 | caam_jr_free(ctx->jrdev); |
||
1887 | } |
||
1888 | |||
1889 | --- a/drivers/crypto/caam/caamalg_desc.c |
||
1890 | +++ b/drivers/crypto/caam/caamalg_desc.c |
||
1891 | @@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *d |
||
1892 | * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor |
||
1893 | * (non-protocol) with no (null) encryption. |
||
1894 | * @desc: pointer to buffer used for descriptor construction |
||
1895 | - * @adata: pointer to authentication transform definitions. Note that since a |
||
1896 | - * split key is to be used, the size of the split key itself is |
||
1897 | - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, |
||
1898 | - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. |
||
1899 | + * @adata: pointer to authentication transform definitions. |
||
1900 | + * A split key is required for SEC Era < 6; the size of the split key |
||
1901 | + * is specified in this case. Valid algorithm values - one of |
||
1902 | + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed |
||
1903 | + * with OP_ALG_AAI_HMAC_PRECOMP. |
||
1904 | * @icvsize: integrity check value (ICV) size (truncated or full) |
||
1905 | - * |
||
1906 | - * Note: Requires an MDHA split key. |
||
1907 | + * @era: SEC Era |
||
1908 | */ |
||
1909 | void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, |
||
1910 | - unsigned int icvsize) |
||
1911 | + unsigned int icvsize, int era) |
||
1912 | { |
||
1913 | u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; |
||
1914 | |||
1915 | @@ -63,13 +63,18 @@ void cnstr_shdsc_aead_null_encap(u32 * c |
||
1916 | /* Skip if already shared */ |
||
1917 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | |
||
1918 | JUMP_COND_SHRD); |
||
1919 | - if (adata->key_inline) |
||
1920 | - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, |
||
1921 | - adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT | |
||
1922 | - KEY_ENC); |
||
1923 | - else |
||
1924 | - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | |
||
1925 | - KEY_DEST_MDHA_SPLIT | KEY_ENC); |
||
1926 | + if (era < 6) { |
||
1927 | + if (adata->key_inline) |
||
1928 | + append_key_as_imm(desc, adata->key_virt, |
||
1929 | + adata->keylen_pad, adata->keylen, |
||
1930 | + CLASS_2 | KEY_DEST_MDHA_SPLIT | |
||
1931 | + KEY_ENC); |
||
1932 | + else |
||
1933 | + append_key(desc, adata->key_dma, adata->keylen, |
||
1934 | + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); |
||
1935 | + } else { |
||
1936 | + append_proto_dkp(desc, adata); |
||
1937 | + } |
||
1938 | set_jump_tgt_here(desc, key_jump_cmd); |
||
1939 | |||
1940 | /* assoclen + cryptlen = seqinlen */ |
||
1941 | @@ -121,16 +126,16 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_enca |
||
1942 | * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor |
||
1943 | * (non-protocol) with no (null) decryption. |
||
1944 | * @desc: pointer to buffer used for descriptor construction |
||
1945 | - * @adata: pointer to authentication transform definitions. Note that since a |
||
1946 | - * split key is to be used, the size of the split key itself is |
||
1947 | - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, |
||
1948 | - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. |
||
1949 | + * @adata: pointer to authentication transform definitions. |
||
1950 | + * A split key is required for SEC Era < 6; the size of the split key |
||
1951 | + * is specified in this case. Valid algorithm values - one of |
||
1952 | + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed |
||
1953 | + * with OP_ALG_AAI_HMAC_PRECOMP. |
||
1954 | * @icvsize: integrity check value (ICV) size (truncated or full) |
||
1955 | - * |
||
1956 | - * Note: Requires an MDHA split key. |
||
1957 | + * @era: SEC Era |
||
1958 | */ |
||
1959 | void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, |
||
1960 | - unsigned int icvsize) |
||
1961 | + unsigned int icvsize, int era) |
||
1962 | { |
||
1963 | u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd; |
||
1964 | |||
1965 | @@ -139,13 +144,18 @@ void cnstr_shdsc_aead_null_decap(u32 * c |
||
1966 | /* Skip if already shared */ |
||
1967 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | |
||
1968 | JUMP_COND_SHRD); |
||
1969 | - if (adata->key_inline) |
||
1970 | - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, |
||
1971 | - adata->keylen, CLASS_2 | |
||
1972 | - KEY_DEST_MDHA_SPLIT | KEY_ENC); |
||
1973 | - else |
||
1974 | - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | |
||
1975 | - KEY_DEST_MDHA_SPLIT | KEY_ENC); |
||
1976 | + if (era < 6) { |
||
1977 | + if (adata->key_inline) |
||
1978 | + append_key_as_imm(desc, adata->key_virt, |
||
1979 | + adata->keylen_pad, adata->keylen, |
||
1980 | + CLASS_2 | KEY_DEST_MDHA_SPLIT | |
||
1981 | + KEY_ENC); |
||
1982 | + else |
||
1983 | + append_key(desc, adata->key_dma, adata->keylen, |
||
1984 | + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); |
||
1985 | + } else { |
||
1986 | + append_proto_dkp(desc, adata); |
||
1987 | + } |
||
1988 | set_jump_tgt_here(desc, key_jump_cmd); |
||
1989 | |||
1990 | /* Class 2 operation */ |
||
1991 | @@ -204,7 +214,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_deca |
||
1992 | static void init_sh_desc_key_aead(u32 * const desc, |
||
1993 | struct alginfo * const cdata, |
||
1994 | struct alginfo * const adata, |
||
1995 | - const bool is_rfc3686, u32 *nonce) |
||
1996 | + const bool is_rfc3686, u32 *nonce, int era) |
||
1997 | { |
||
1998 | u32 *key_jump_cmd; |
||
1999 | unsigned int enckeylen = cdata->keylen; |
||
2000 | @@ -224,13 +234,18 @@ static void init_sh_desc_key_aead(u32 * |
||
2001 | if (is_rfc3686) |
||
2002 | enckeylen -= CTR_RFC3686_NONCE_SIZE; |
||
2003 | |||
2004 | - if (adata->key_inline) |
||
2005 | - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, |
||
2006 | - adata->keylen, CLASS_2 | |
||
2007 | - KEY_DEST_MDHA_SPLIT | KEY_ENC); |
||
2008 | - else |
||
2009 | - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | |
||
2010 | - KEY_DEST_MDHA_SPLIT | KEY_ENC); |
||
2011 | + if (era < 6) { |
||
2012 | + if (adata->key_inline) |
||
2013 | + append_key_as_imm(desc, adata->key_virt, |
||
2014 | + adata->keylen_pad, adata->keylen, |
||
2015 | + CLASS_2 | KEY_DEST_MDHA_SPLIT | |
||
2016 | + KEY_ENC); |
||
2017 | + else |
||
2018 | + append_key(desc, adata->key_dma, adata->keylen, |
||
2019 | + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); |
||
2020 | + } else { |
||
2021 | + append_proto_dkp(desc, adata); |
||
2022 | + } |
||
2023 | |||
2024 | if (cdata->key_inline) |
||
2025 | append_key_as_imm(desc, cdata->key_virt, enckeylen, |
||
2026 | @@ -261,26 +276,27 @@ static void init_sh_desc_key_aead(u32 * |
||
2027 | * @cdata: pointer to block cipher transform definitions |
||
2028 | * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed |
||
2029 | * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. |
||
2030 | - * @adata: pointer to authentication transform definitions. Note that since a |
||
2031 | - * split key is to be used, the size of the split key itself is |
||
2032 | - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, |
||
2033 | - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. |
||
2034 | + * @adata: pointer to authentication transform definitions. |
||
2035 | + * A split key is required for SEC Era < 6; the size of the split key |
||
2036 | + * is specified in this case. Valid algorithm values - one of |
||
2037 | + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed |
||
2038 | + * with OP_ALG_AAI_HMAC_PRECOMP. |
||
2039 | * @ivsize: initialization vector size |
||
2040 | * @icvsize: integrity check value (ICV) size (truncated or full) |
||
2041 | * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template |
||
2042 | * @nonce: pointer to rfc3686 nonce |
||
2043 | * @ctx1_iv_off: IV offset in CONTEXT1 register |
||
2044 | * @is_qi: true when called from caam/qi |
||
2045 | - * |
||
2046 | - * Note: Requires an MDHA split key. |
||
2047 | + * @era: SEC Era |
||
2048 | */ |
||
2049 | void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, |
||
2050 | struct alginfo *adata, unsigned int ivsize, |
||
2051 | unsigned int icvsize, const bool is_rfc3686, |
||
2052 | - u32 *nonce, const u32 ctx1_iv_off, const bool is_qi) |
||
2053 | + u32 *nonce, const u32 ctx1_iv_off, const bool is_qi, |
||
2054 | + int era) |
||
2055 | { |
||
2056 | /* Note: Context registers are saved. */ |
||
2057 | - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); |
||
2058 | + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); |
||
2059 | |||
2060 | /* Class 2 operation */ |
||
2061 | append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | |
||
2062 | @@ -306,8 +322,13 @@ void cnstr_shdsc_aead_encap(u32 * const |
||
2063 | } |
||
2064 | |||
2065 | /* Read and write assoclen bytes */ |
||
2066 | - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
||
2067 | - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); |
||
2068 | + if (is_qi || era < 3) { |
||
2069 | + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
||
2070 | + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); |
||
2071 | + } else { |
||
2072 | + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ); |
||
2073 | + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ); |
||
2074 | + } |
||
2075 | |||
2076 | /* Skip assoc data */ |
||
2077 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); |
||
2078 | @@ -350,27 +371,27 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap); |
||
2079 | * @cdata: pointer to block cipher transform definitions |
||
2080 | * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed |
||
2081 | * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. |
||
2082 | - * @adata: pointer to authentication transform definitions. Note that since a |
||
2083 | - * split key is to be used, the size of the split key itself is |
||
2084 | - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, |
||
2085 | - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. |
||
2086 | + * @adata: pointer to authentication transform definitions. |
||
2087 | + * A split key is required for SEC Era < 6; the size of the split key |
||
2088 | + * is specified in this case. Valid algorithm values - one of |
||
2089 | + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed |
||
2090 | + * with OP_ALG_AAI_HMAC_PRECOMP. |
||
2091 | * @ivsize: initialization vector size |
||
2092 | * @icvsize: integrity check value (ICV) size (truncated or full) |
||
2093 | * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template |
||
2094 | * @nonce: pointer to rfc3686 nonce |
||
2095 | * @ctx1_iv_off: IV offset in CONTEXT1 register |
||
2096 | * @is_qi: true when called from caam/qi |
||
2097 | - * |
||
2098 | - * Note: Requires an MDHA split key. |
||
2099 | + * @era: SEC Era |
||
2100 | */ |
||
2101 | void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, |
||
2102 | struct alginfo *adata, unsigned int ivsize, |
||
2103 | unsigned int icvsize, const bool geniv, |
||
2104 | const bool is_rfc3686, u32 *nonce, |
||
2105 | - const u32 ctx1_iv_off, const bool is_qi) |
||
2106 | + const u32 ctx1_iv_off, const bool is_qi, int era) |
||
2107 | { |
||
2108 | /* Note: Context registers are saved. */ |
||
2109 | - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); |
||
2110 | + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); |
||
2111 | |||
2112 | /* Class 2 operation */ |
||
2113 | append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | |
||
2114 | @@ -397,11 +418,23 @@ void cnstr_shdsc_aead_decap(u32 * const |
||
2115 | } |
||
2116 | |||
2117 | /* Read and write assoclen bytes */ |
||
2118 | - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
||
2119 | - if (geniv) |
||
2120 | - append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize); |
||
2121 | - else |
||
2122 | - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); |
||
2123 | + if (is_qi || era < 3) { |
||
2124 | + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
||
2125 | + if (geniv) |
||
2126 | + append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, |
||
2127 | + ivsize); |
||
2128 | + else |
||
2129 | + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, |
||
2130 | + CAAM_CMD_SZ); |
||
2131 | + } else { |
||
2132 | + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ); |
||
2133 | + if (geniv) |
||
2134 | + append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM, |
||
2135 | + ivsize); |
||
2136 | + else |
||
2137 | + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, |
||
2138 | + CAAM_CMD_SZ); |
||
2139 | + } |
||
2140 | |||
2141 | /* Skip assoc data */ |
||
2142 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); |
||
2143 | @@ -456,29 +489,29 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap); |
||
2144 | * @cdata: pointer to block cipher transform definitions |
||
2145 | * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed |
||
2146 | * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. |
||
2147 | - * @adata: pointer to authentication transform definitions. Note that since a |
||
2148 | - * split key is to be used, the size of the split key itself is |
||
2149 | - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, |
||
2150 | - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. |
||
2151 | + * @adata: pointer to authentication transform definitions. |
||
2152 | + * A split key is required for SEC Era < 6; the size of the split key |
||
2153 | + * is specified in this case. Valid algorithm values - one of |
||
2154 | + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed |
||
2155 | + * with OP_ALG_AAI_HMAC_PRECOMP. |
||
2156 | * @ivsize: initialization vector size |
||
2157 | * @icvsize: integrity check value (ICV) size (truncated or full) |
||
2158 | * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template |
||
2159 | * @nonce: pointer to rfc3686 nonce |
||
2160 | * @ctx1_iv_off: IV offset in CONTEXT1 register |
||
2161 | * @is_qi: true when called from caam/qi |
||
2162 | - * |
||
2163 | - * Note: Requires an MDHA split key. |
||
2164 | + * @era: SEC Era |
||
2165 | */ |
||
2166 | void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, |
||
2167 | struct alginfo *adata, unsigned int ivsize, |
||
2168 | unsigned int icvsize, const bool is_rfc3686, |
||
2169 | u32 *nonce, const u32 ctx1_iv_off, |
||
2170 | - const bool is_qi) |
||
2171 | + const bool is_qi, int era) |
||
2172 | { |
||
2173 | u32 geniv, moveiv; |
||
2174 | |||
2175 | /* Note: Context registers are saved. */ |
||
2176 | - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); |
||
2177 | + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); |
||
2178 | |||
2179 | if (is_qi) { |
||
2180 | u32 *wait_load_cmd; |
||
2181 | @@ -528,8 +561,13 @@ copy_iv: |
||
2182 | OP_ALG_ENCRYPT); |
||
2183 | |||
2184 | /* Read and write assoclen bytes */ |
||
2185 | - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
||
2186 | - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); |
||
2187 | + if (is_qi || era < 3) { |
||
2188 | + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
||
2189 | + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); |
||
2190 | + } else { |
||
2191 | + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ); |
||
2192 | + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ); |
||
2193 | + } |
||
2194 | |||
2195 | /* Skip assoc data */ |
||
2196 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); |
||
2197 | @@ -583,14 +621,431 @@ copy_iv: |
||
2198 | EXPORT_SYMBOL(cnstr_shdsc_aead_givencap); |
||
2199 | |||
2200 | /** |
||
2201 | + * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor |
||
2202 | + * @desc: pointer to buffer used for descriptor construction |
||
2203 | + * @cdata: pointer to block cipher transform definitions |
||
2204 | + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed |
||
2205 | + * with OP_ALG_AAI_CBC |
||
2206 | + * @adata: pointer to authentication transform definitions. |
||
2207 | + * A split key is required for SEC Era < 6; the size of the split key |
||
2208 | + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1 |
||
2209 | + * ANDed with OP_ALG_AAI_HMAC_PRECOMP. |
||
2210 | + * @assoclen: associated data length |
||
2211 | + * @ivsize: initialization vector size |
||
2212 | + * @authsize: authentication data size |
||
2213 | + * @blocksize: block cipher size |
||
2214 | + * @era: SEC Era |
||
2215 | + */ |
||
2216 | +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata, |
||
2217 | + struct alginfo *adata, unsigned int assoclen, |
||
2218 | + unsigned int ivsize, unsigned int authsize, |
||
2219 | + unsigned int blocksize, int era) |
||
2220 | +{ |
||
2221 | + u32 *key_jump_cmd, *zero_payload_jump_cmd; |
||
2222 | + u32 genpad, idx_ld_datasz, idx_ld_pad, stidx; |
||
2223 | + |
||
2224 | + /* |
||
2225 | + * Compute the index (in bytes) for the LOAD with destination of |
||
2226 | + * Class 1 Data Size Register and for the LOAD that generates padding |
||
2227 | + */ |
||
2228 | + if (adata->key_inline) { |
||
2229 | + idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad + |
||
2230 | + cdata->keylen - 4 * CAAM_CMD_SZ; |
||
2231 | + idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad + |
||
2232 | + cdata->keylen - 2 * CAAM_CMD_SZ; |
||
2233 | + } else { |
||
2234 | + idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ - |
||
2235 | + 4 * CAAM_CMD_SZ; |
||
2236 | + idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ - |
||
2237 | + 2 * CAAM_CMD_SZ; |
||
2238 | + } |
||
2239 | + |
||
2240 | + stidx = 1 << HDR_START_IDX_SHIFT; |
||
2241 | + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx); |
||
2242 | + |
||
2243 | + /* skip key loading if they are loaded due to sharing */ |
||
2244 | + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | |
||
2245 | + JUMP_COND_SHRD); |
||
2246 | + |
||
2247 | + if (era < 6) { |
||
2248 | + if (adata->key_inline) |
||
2249 | + append_key_as_imm(desc, adata->key_virt, |
||
2250 | + adata->keylen_pad, adata->keylen, |
||
2251 | + CLASS_2 | KEY_DEST_MDHA_SPLIT | |
||
2252 | + KEY_ENC); |
||
2253 | + else |
||
2254 | + append_key(desc, adata->key_dma, adata->keylen, |
||
2255 | + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); |
||
2256 | + } else { |
||
2257 | + append_proto_dkp(desc, adata); |
||
2258 | + } |
||
2259 | + |
||
2260 | + if (cdata->key_inline) |
||
2261 | + append_key_as_imm(desc, cdata->key_virt, cdata->keylen, |
||
2262 | + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); |
||
2263 | + else |
||
2264 | + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | |
||
2265 | + KEY_DEST_CLASS_REG); |
||
2266 | + |
||
2267 | + set_jump_tgt_here(desc, key_jump_cmd); |
||
2268 | + |
||
2269 | + /* class 2 operation */ |
||
2270 | + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | |
||
2271 | + OP_ALG_ENCRYPT); |
||
2272 | + /* class 1 operation */ |
||
2273 | + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | |
||
2274 | + OP_ALG_ENCRYPT); |
||
2275 | + |
||
2276 | + /* payloadlen = input data length - (assoclen + ivlen) */ |
||
2277 | + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize); |
||
2278 | + |
||
2279 | + /* math1 = payloadlen + icvlen */ |
||
2280 | + append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize); |
||
2281 | + |
||
2282 | + /* padlen = block_size - math1 % block_size */ |
||
2283 | + append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1); |
||
2284 | + append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize); |
||
2285 | + |
||
2286 | + /* cryptlen = payloadlen + icvlen + padlen */ |
||
2287 | + append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4); |
||
2288 | + |
||
2289 | + /* |
||
2290 | + * update immediate data with the padding length value |
||
2291 | + * for the LOAD in the class 1 data size register. |
||
2292 | + */ |
||
2293 | + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 | |
||
2294 | + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7); |
||
2295 | + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF | |
||
2296 | + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8); |
||
2297 | + |
||
2298 | + /* overwrite PL field for the padding iNFO FIFO entry */ |
||
2299 | + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 | |
||
2300 | + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7); |
||
2301 | + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF | |
||
2302 | + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8); |
||
2303 | + |
||
2304 | + /* store encrypted payload, icv and padding */ |
||
2305 | + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF); |
||
2306 | + |
||
2307 | + /* if payload length is zero, jump to zero-payload commands */ |
||
2308 | + append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4); |
||
2309 | + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | |
||
2310 | + JUMP_COND_MATH_Z); |
||
2311 | + |
||
2312 | + /* load iv in context1 */ |
||
2313 | + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX | |
||
2314 | + LDST_CLASS_1_CCB | ivsize); |
||
2315 | + |
||
2316 | + /* read assoc for authentication */ |
||
2317 | + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 | |
||
2318 | + FIFOLD_TYPE_MSG); |
||
2319 | + /* insnoop payload */ |
||
2320 | + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG | |
||
2321 | + FIFOLD_TYPE_LAST2 | FIFOLDST_VLF); |
||
2322 | + |
||
2323 | + /* jump the zero-payload commands */ |
||
2324 | + append_jump(desc, JUMP_TEST_ALL | 3); |
||
2325 | + |
||
2326 | + /* zero-payload commands */ |
||
2327 | + set_jump_tgt_here(desc, zero_payload_jump_cmd); |
||
2328 | + |
||
2329 | + /* load iv in context1 */ |
||
2330 | + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX | |
||
2331 | + LDST_CLASS_1_CCB | ivsize); |
||
2332 | + |
||
2333 | + /* assoc data is the only data for authentication */ |
||
2334 | + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 | |
||
2335 | + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2); |
||
2336 | + |
||
2337 | + /* send icv to encryption */ |
||
2338 | + append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO | |
||
2339 | + authsize); |
||
2340 | + |
||
2341 | + /* update class 1 data size register with padding length */ |
||
2342 | + append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB | |
||
2343 | + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); |
||
2344 | + |
||
2345 | + /* generate padding and send it to encryption */ |
||
2346 | + genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 | |
||
2347 | + NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N; |
||
2348 | + append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB | |
||
2349 | + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); |
||
2350 | + |
||
2351 | +#ifdef DEBUG |
||
2352 | + print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ", |
||
2353 | + DUMP_PREFIX_ADDRESS, 16, 4, desc, |
||
2354 | + desc_bytes(desc), 1); |
||
2355 | +#endif |
||
2356 | +} |
||
2357 | +EXPORT_SYMBOL(cnstr_shdsc_tls_encap); |
||
2358 | + |
||
2359 | +/** |
||
2360 | + * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor |
||
2361 | + * @desc: pointer to buffer used for descriptor construction |
||
2362 | + * @cdata: pointer to block cipher transform definitions |
||
2363 | + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed |
||
2364 | + * with OP_ALG_AAI_CBC |
||
2365 | + * @adata: pointer to authentication transform definitions. |
||
2366 | + * A split key is required for SEC Era < 6; the size of the split key |
||
2367 | + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1 |
||
2368 | + * ANDed with OP_ALG_AAI_HMAC_PRECOMP. |
||
2369 | + * @assoclen: associated data length |
||
2370 | + * @ivsize: initialization vector size |
||
2371 | + * @authsize: authentication data size |
||
2372 | + * @blocksize: block cipher size |
||
2373 | + * @era: SEC Era |
||
2374 | + */ |
||
2375 | +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata, |
||
2376 | + struct alginfo *adata, unsigned int assoclen, |
||
2377 | + unsigned int ivsize, unsigned int authsize, |
||
2378 | + unsigned int blocksize, int era) |
||
2379 | +{ |
||
2380 | + u32 stidx, jumpback; |
||
2381 | + u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd; |
||
2382 | + /* |
||
2383 | + * Pointer Size bool determines the size of address pointers. |
||
2384 | + * false - Pointers fit in one 32-bit word. |
||
2385 | + * true - Pointers fit in two 32-bit words. |
||
2386 | + */ |
||
2387 | + static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ); |
||
2388 | + |
||
2389 | + stidx = 1 << HDR_START_IDX_SHIFT; |
||
2390 | + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx); |
||
2391 | + |
||
2392 | + /* skip key loading if they are loaded due to sharing */ |
||
2393 | + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | |
||
2394 | + JUMP_COND_SHRD); |
||
2395 | + |
||
2396 | + if (era < 6) |
||
2397 | + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | |
||
2398 | + KEY_DEST_MDHA_SPLIT | KEY_ENC); |
||
2399 | + else |
||
2400 | + append_proto_dkp(desc, adata); |
||
2401 | + |
||
2402 | + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | |
||
2403 | + KEY_DEST_CLASS_REG); |
||
2404 | + |
||
2405 | + set_jump_tgt_here(desc, key_jump_cmd); |
||
2406 | + |
||
2407 | + /* class 2 operation */ |
||
2408 | + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | |
||
2409 | + OP_ALG_DECRYPT | OP_ALG_ICV_ON); |
||
2410 | + /* class 1 operation */ |
||
2411 | + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | |
||
2412 | + OP_ALG_DECRYPT); |
||
2413 | + |
||
2414 | + /* VSIL = input data length - 2 * block_size */ |
||
2415 | + append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 * |
||
2416 | + blocksize); |
||
2417 | + |
||
2418 | + /* |
||
2419 | + * payloadlen + icvlen + padlen = input data length - (assoclen + |
||
2420 | + * ivsize) |
||
2421 | + */ |
||
2422 | + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize); |
||
2423 | + |
||
2424 | + /* skip data to the last but one cipher block */ |
||
2425 | + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF); |
||
2426 | + |
||
2427 | + /* load iv for the last cipher block */ |
||
2428 | + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX | |
||
2429 | + LDST_CLASS_1_CCB | ivsize); |
||
2430 | + |
||
2431 | + /* read last cipher block */ |
||
2432 | + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG | |
||
2433 | + FIFOLD_TYPE_LAST1 | blocksize); |
||
2434 | + |
||
2435 | + /* move decrypted block into math0 and math1 */ |
||
2436 | + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 | |
||
2437 | + blocksize); |
||
2438 | + |
||
2439 | + /* reset AES CHA */ |
||
2440 | + append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB | |
||
2441 | + LDST_SRCDST_WORD_CHACTRL | LDST_IMM); |
||
2442 | + |
||
2443 | + /* rewind input sequence */ |
||
2444 | + append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO); |
||
2445 | + |
||
2446 | + /* key1 is in decryption form */ |
||
2447 | + append_operation(desc, cdata->algtype | OP_ALG_AAI_DK | |
||
2448 | + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT); |
||
2449 | + |
||
2450 | + /* load iv in context1 */ |
||
2451 | + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB | |
||
2452 | + LDST_SRCDST_WORD_CLASS_CTX | ivsize); |
||
2453 | + |
||
2454 | + /* read sequence number */ |
||
2455 | + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG); |
||
2456 | + /* load Type, Version and Len fields in math0 */ |
||
2457 | + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO | |
||
2458 | + LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5); |
||
2459 | + |
||
2460 | + /* compute (padlen - 1) */ |
||
2461 | + append_math_and_imm_u64(desc, REG1, REG1, IMM, 255); |
||
2462 | + |
||
2463 | + /* math2 = icvlen + (padlen - 1) + 1 */ |
||
2464 | + append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1); |
||
2465 | + |
||
2466 | + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1); |
||
2467 | + |
||
2468 | + /* VSOL = payloadlen + icvlen + padlen */ |
||
2469 | + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4); |
||
2470 | + |
||
2471 | + if (caam_little_end) |
||
2472 | + append_moveb(desc, MOVE_WAITCOMP | |
||
2473 | + MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8); |
||
2474 | + |
||
2475 | + /* update Len field */ |
||
2476 | + append_math_sub(desc, REG0, REG0, REG2, 8); |
||
2477 | + |
||
2478 | + /* store decrypted payload, icv and padding */ |
||
2479 | + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF); |
||
2480 | + |
||
2481 | + /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/ |
||
2482 | + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4); |
||
2483 | + |
||
2484 | + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | |
||
2485 | + JUMP_COND_MATH_Z); |
||
2486 | + |
||
2487 | + /* send Type, Version and Len(pre ICV) fields to authentication */ |
||
2488 | + append_move(desc, MOVE_WAITCOMP | |
||
2489 | + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO | |
||
2490 | + (3 << MOVE_OFFSET_SHIFT) | 5); |
||
2491 | + |
||
2492 | + /* outsnooping payload */ |
||
2493 | + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | |
||
2494 | + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 | |
||
2495 | + FIFOLDST_VLF); |
||
2496 | + skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2); |
||
2497 | + |
||
2498 | + set_jump_tgt_here(desc, zero_payload_jump_cmd); |
||
2499 | + /* send Type, Version and Len(pre ICV) fields to authentication */ |
||
2500 | + append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS | |
||
2501 | + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO | |
||
2502 | + (3 << MOVE_OFFSET_SHIFT) | 5); |
||
2503 | + |
||
2504 | + set_jump_tgt_here(desc, skip_zero_jump_cmd); |
||
2505 | + append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4); |
||
2506 | + |
||
2507 | + /* load icvlen and padlen */ |
||
2508 | + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG | |
||
2509 | + FIFOLD_TYPE_LAST1 | FIFOLDST_VLF); |
||
2510 | + |
||
2511 | + /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */ |
||
2512 | + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4); |
||
2513 | + |
||
2514 | + /* |
||
2515 | + * Start a new input sequence using the SEQ OUT PTR command options, |
||
2516 | + * pointer and length used when the current output sequence was defined. |
||
2517 | + */ |
||
2518 | + if (ps) { |
||
2519 | + /* |
||
2520 | + * Move the lower 32 bits of Shared Descriptor address, the |
||
2521 | + * SEQ OUT PTR command, Output Pointer (2 words) and |
||
2522 | + * Output Length into math registers. |
||
2523 | + */ |
||
2524 | + if (caam_little_end) |
||
2525 | + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF | |
||
2526 | + MOVE_DEST_MATH0 | |
||
2527 | + (55 * 4 << MOVE_OFFSET_SHIFT) | 20); |
||
2528 | + else |
||
2529 | + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF | |
||
2530 | + MOVE_DEST_MATH0 | |
||
2531 | + (54 * 4 << MOVE_OFFSET_SHIFT) | 20); |
||
2532 | + |
||
2533 | + /* Transform SEQ OUT PTR command in SEQ IN PTR command */ |
||
2534 | + append_math_and_imm_u32(desc, REG0, REG0, IMM, |
||
2535 | + ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR)); |
||
2536 | + /* Append a JUMP command after the copied fields */ |
||
2537 | + jumpback = CMD_JUMP | (char)-9; |
||
2538 | + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM | |
||
2539 | + LDST_SRCDST_WORD_DECO_MATH2 | |
||
2540 | + (4 << LDST_OFFSET_SHIFT)); |
||
2541 | + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1); |
||
2542 | + /* Move the updated fields back to the Job Descriptor */ |
||
2543 | + if (caam_little_end) |
||
2544 | + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 | |
||
2545 | + MOVE_DEST_DESCBUF | |
||
2546 | + (55 * 4 << MOVE_OFFSET_SHIFT) | 24); |
||
2547 | + else |
||
2548 | + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 | |
||
2549 | + MOVE_DEST_DESCBUF | |
||
2550 | + (54 * 4 << MOVE_OFFSET_SHIFT) | 24); |
||
2551 | + |
||
2552 | + /* |
||
2553 | + * Read the new SEQ IN PTR command, Input Pointer, Input Length |
||
2554 | + * and then jump back to the next command from the |
||
2555 | + * Shared Descriptor. |
||
2556 | + */ |
||
2557 | + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6); |
||
2558 | + } else { |
||
2559 | + /* |
||
2560 | + * Move the SEQ OUT PTR command, Output Pointer (1 word) and |
||
2561 | + * Output Length into math registers. |
||
2562 | + */ |
||
2563 | + if (caam_little_end) |
||
2564 | + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF | |
||
2565 | + MOVE_DEST_MATH0 | |
||
2566 | + (54 * 4 << MOVE_OFFSET_SHIFT) | 12); |
||
2567 | + else |
||
2568 | + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF | |
||
2569 | + MOVE_DEST_MATH0 | |
||
2570 | + (53 * 4 << MOVE_OFFSET_SHIFT) | 12); |
||
2571 | + |
||
2572 | + /* Transform SEQ OUT PTR command in SEQ IN PTR command */ |
||
2573 | + append_math_and_imm_u64(desc, REG0, REG0, IMM, |
||
2574 | + ~(((u64)(CMD_SEQ_IN_PTR ^ |
||
2575 | + CMD_SEQ_OUT_PTR)) << 32)); |
||
2576 | + /* Append a JUMP command after the copied fields */ |
||
2577 | + jumpback = CMD_JUMP | (char)-7; |
||
2578 | + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM | |
||
2579 | + LDST_SRCDST_WORD_DECO_MATH1 | |
||
2580 | + (4 << LDST_OFFSET_SHIFT)); |
||
2581 | + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1); |
||
2582 | + /* Move the updated fields back to the Job Descriptor */ |
||
2583 | + if (caam_little_end) |
||
2584 | + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 | |
||
2585 | + MOVE_DEST_DESCBUF | |
||
2586 | + (54 * 4 << MOVE_OFFSET_SHIFT) | 16); |
||
2587 | + else |
||
2588 | + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 | |
||
2589 | + MOVE_DEST_DESCBUF | |
||
2590 | + (53 * 4 << MOVE_OFFSET_SHIFT) | 16); |
||
2591 | + |
||
2592 | + /* |
||
2593 | + * Read the new SEQ IN PTR command, Input Pointer, Input Length |
||
2594 | + * and then jump back to the next command from the |
||
2595 | + * Shared Descriptor. |
||
2596 | + */ |
||
2597 | + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5); |
||
2598 | + } |
||
2599 | + |
||
2600 | + /* skip payload */ |
||
2601 | + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF); |
||
2602 | + /* check icv */ |
||
2603 | + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV | |
||
2604 | + FIFOLD_TYPE_LAST2 | authsize); |
||
2605 | + |
||
2606 | +#ifdef DEBUG |
||
2607 | + print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ", |
||
2608 | + DUMP_PREFIX_ADDRESS, 16, 4, desc, |
||
2609 | + desc_bytes(desc), 1); |
||
2610 | +#endif |
||
2611 | +} |
||
2612 | +EXPORT_SYMBOL(cnstr_shdsc_tls_decap); |
||
2613 | + |
||
2614 | +/** |
||
2615 | * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor |
||
2616 | * @desc: pointer to buffer used for descriptor construction |
||
2617 | * @cdata: pointer to block cipher transform definitions |
||
2618 | * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. |
||
2619 | + * @ivsize: initialization vector size |
||
2620 | * @icvsize: integrity check value (ICV) size (truncated or full) |
||
2621 | + * @is_qi: true when called from caam/qi |
||
2622 | */ |
||
2623 | void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, |
||
2624 | - unsigned int icvsize) |
||
2625 | + unsigned int ivsize, unsigned int icvsize, |
||
2626 | + const bool is_qi) |
||
2627 | { |
||
2628 | u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1, |
||
2629 | *zero_assoc_jump_cmd2; |
||
2630 | @@ -612,11 +1067,35 @@ void cnstr_shdsc_gcm_encap(u32 * const d |
||
2631 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | |
||
2632 | OP_ALG_ENCRYPT); |
||
2633 | |||
2634 | + if (is_qi) { |
||
2635 | + u32 *wait_load_cmd; |
||
2636 | + |
||
2637 | + /* REG3 = assoclen */ |
||
2638 | + append_seq_load(desc, 4, LDST_CLASS_DECO | |
||
2639 | + LDST_SRCDST_WORD_DECO_MATH3 | |
||
2640 | + (4 << LDST_OFFSET_SHIFT)); |
||
2641 | + |
||
2642 | + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | |
||
2643 | + JUMP_COND_CALM | JUMP_COND_NCP | |
||
2644 | + JUMP_COND_NOP | JUMP_COND_NIP | |
||
2645 | + JUMP_COND_NIFP); |
||
2646 | + set_jump_tgt_here(desc, wait_load_cmd); |
||
2647 | + |
||
2648 | + append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM, |
||
2649 | + ivsize); |
||
2650 | + } else { |
||
2651 | + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, |
||
2652 | + CAAM_CMD_SZ); |
||
2653 | + } |
||
2654 | + |
||
2655 | /* if assoclen + cryptlen is ZERO, skip to ICV write */ |
||
2656 | - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); |
||
2657 | zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | |
||
2658 | JUMP_COND_MATH_Z); |
||
2659 | |||
2660 | + if (is_qi) |
||
2661 | + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | |
||
2662 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); |
||
2663 | + |
||
2664 | /* if assoclen is ZERO, skip reading the assoc data */ |
||
2665 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
||
2666 | zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | |
||
2667 | @@ -648,8 +1127,11 @@ void cnstr_shdsc_gcm_encap(u32 * const d |
||
2668 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | |
||
2669 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); |
||
2670 | |||
2671 | - /* jump the zero-payload commands */ |
||
2672 | - append_jump(desc, JUMP_TEST_ALL | 2); |
||
2673 | + /* jump to ICV writing */ |
||
2674 | + if (is_qi) |
||
2675 | + append_jump(desc, JUMP_TEST_ALL | 4); |
||
2676 | + else |
||
2677 | + append_jump(desc, JUMP_TEST_ALL | 2); |
||
2678 | |||
2679 | /* zero-payload commands */ |
||
2680 | set_jump_tgt_here(desc, zero_payload_jump_cmd); |
||
2681 | @@ -657,10 +1139,18 @@ void cnstr_shdsc_gcm_encap(u32 * const d |
||
2682 | /* read assoc data */ |
||
2683 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | |
||
2684 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1); |
||
2685 | + if (is_qi) |
||
2686 | + /* jump to ICV writing */ |
||
2687 | + append_jump(desc, JUMP_TEST_ALL | 2); |
||
2688 | |||
2689 | /* There is no input data */ |
||
2690 | set_jump_tgt_here(desc, zero_assoc_jump_cmd2); |
||
2691 | |||
2692 | + if (is_qi) |
||
2693 | + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | |
||
2694 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | |
||
2695 | + FIFOLD_TYPE_LAST1); |
||
2696 | + |
||
2697 | /* write ICV */ |
||
2698 | append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | |
||
2699 | LDST_SRCDST_BYTE_CONTEXT); |
||
2700 | @@ -677,10 +1167,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_encap); |
||
2701 | * @desc: pointer to buffer used for descriptor construction |
||
2702 | * @cdata: pointer to block cipher transform definitions |
||
2703 | * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. |
||
2704 | + * @ivsize: initialization vector size |
||
2705 | * @icvsize: integrity check value (ICV) size (truncated or full) |
||
2706 | + * @is_qi: true when called from caam/qi |
||
2707 | */ |
||
2708 | void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata, |
||
2709 | - unsigned int icvsize) |
||
2710 | + unsigned int ivsize, unsigned int icvsize, |
||
2711 | + const bool is_qi) |
||
2712 | { |
||
2713 | u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1; |
||
2714 | |||
2715 | @@ -701,6 +1194,24 @@ void cnstr_shdsc_gcm_decap(u32 * const d |
||
2716 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | |
||
2717 | OP_ALG_DECRYPT | OP_ALG_ICV_ON); |
||
2718 | |||
2719 | + if (is_qi) { |
||
2720 | + u32 *wait_load_cmd; |
||
2721 | + |
||
2722 | + /* REG3 = assoclen */ |
||
2723 | + append_seq_load(desc, 4, LDST_CLASS_DECO | |
||
2724 | + LDST_SRCDST_WORD_DECO_MATH3 | |
||
2725 | + (4 << LDST_OFFSET_SHIFT)); |
||
2726 | + |
||
2727 | + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | |
||
2728 | + JUMP_COND_CALM | JUMP_COND_NCP | |
||
2729 | + JUMP_COND_NOP | JUMP_COND_NIP | |
||
2730 | + JUMP_COND_NIFP); |
||
2731 | + set_jump_tgt_here(desc, wait_load_cmd); |
||
2732 | + |
||
2733 | + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | |
||
2734 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); |
||
2735 | + } |
||
2736 | + |
||
2737 | /* if assoclen is ZERO, skip reading the assoc data */ |
||
2738 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
||
2739 | zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | |
||
2740 | @@ -753,10 +1264,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_decap); |
||
2741 | * @desc: pointer to buffer used for descriptor construction |
||
2742 | * @cdata: pointer to block cipher transform definitions |
||
2743 | * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. |
||
2744 | + * @ivsize: initialization vector size |
||
2745 | * @icvsize: integrity check value (ICV) size (truncated or full) |
||
2746 | + * @is_qi: true when called from caam/qi |
||
2747 | */ |
||
2748 | void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata, |
||
2749 | - unsigned int icvsize) |
||
2750 | + unsigned int ivsize, unsigned int icvsize, |
||
2751 | + const bool is_qi) |
||
2752 | { |
||
2753 | u32 *key_jump_cmd; |
||
2754 | |||
2755 | @@ -777,7 +1291,29 @@ void cnstr_shdsc_rfc4106_encap(u32 * con |
||
2756 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | |
||
2757 | OP_ALG_ENCRYPT); |
||
2758 | |||
2759 | - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8); |
||
2760 | + if (is_qi) { |
||
2761 | + u32 *wait_load_cmd; |
||
2762 | + |
||
2763 | + /* REG3 = assoclen */ |
||
2764 | + append_seq_load(desc, 4, LDST_CLASS_DECO | |
||
2765 | + LDST_SRCDST_WORD_DECO_MATH3 | |
||
2766 | + (4 << LDST_OFFSET_SHIFT)); |
||
2767 | + |
||
2768 | + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | |
||
2769 | + JUMP_COND_CALM | JUMP_COND_NCP | |
||
2770 | + JUMP_COND_NOP | JUMP_COND_NIP | |
||
2771 | + JUMP_COND_NIFP); |
||
2772 | + set_jump_tgt_here(desc, wait_load_cmd); |
||
2773 | + |
||
2774 | + /* Read salt and IV */ |
||
2775 | + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt + |
||
2776 | + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 | |
||
2777 | + FIFOLD_TYPE_IV); |
||
2778 | + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | |
||
2779 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); |
||
2780 | + } |
||
2781 | + |
||
2782 | + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize); |
||
2783 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); |
||
2784 | |||
2785 | /* Read assoc data */ |
||
2786 | @@ -785,7 +1321,7 @@ void cnstr_shdsc_rfc4106_encap(u32 * con |
||
2787 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); |
||
2788 | |||
2789 | /* Skip IV */ |
||
2790 | - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); |
||
2791 | + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP); |
||
2792 | |||
2793 | /* Will read cryptlen bytes */ |
||
2794 | append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); |
||
2795 | @@ -824,10 +1360,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap) |
||
2796 | * @desc: pointer to buffer used for descriptor construction |
||
2797 | * @cdata: pointer to block cipher transform definitions |
||
2798 | * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. |
||
2799 | + * @ivsize: initialization vector size |
||
2800 | * @icvsize: integrity check value (ICV) size (truncated or full) |
||
2801 | + * @is_qi: true when called from caam/qi |
||
2802 | */ |
||
2803 | void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata, |
||
2804 | - unsigned int icvsize) |
||
2805 | + unsigned int ivsize, unsigned int icvsize, |
||
2806 | + const bool is_qi) |
||
2807 | { |
||
2808 | u32 *key_jump_cmd; |
||
2809 | |||
2810 | @@ -849,7 +1388,29 @@ void cnstr_shdsc_rfc4106_decap(u32 * con |
||
2811 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | |
||
2812 | OP_ALG_DECRYPT | OP_ALG_ICV_ON); |
||
2813 | |||
2814 | - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8); |
||
2815 | + if (is_qi) { |
||
2816 | + u32 *wait_load_cmd; |
||
2817 | + |
||
2818 | + /* REG3 = assoclen */ |
||
2819 | + append_seq_load(desc, 4, LDST_CLASS_DECO | |
||
2820 | + LDST_SRCDST_WORD_DECO_MATH3 | |
||
2821 | + (4 << LDST_OFFSET_SHIFT)); |
||
2822 | + |
||
2823 | + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | |
||
2824 | + JUMP_COND_CALM | JUMP_COND_NCP | |
||
2825 | + JUMP_COND_NOP | JUMP_COND_NIP | |
||
2826 | + JUMP_COND_NIFP); |
||
2827 | + set_jump_tgt_here(desc, wait_load_cmd); |
||
2828 | + |
||
2829 | + /* Read salt and IV */ |
||
2830 | + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt + |
||
2831 | + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 | |
||
2832 | + FIFOLD_TYPE_IV); |
||
2833 | + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | |
||
2834 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); |
||
2835 | + } |
||
2836 | + |
||
2837 | + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize); |
||
2838 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); |
||
2839 | |||
2840 | /* Read assoc data */ |
||
2841 | @@ -857,7 +1418,7 @@ void cnstr_shdsc_rfc4106_decap(u32 * con |
||
2842 | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); |
||
2843 | |||
2844 | /* Skip IV */ |
||
2845 | - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); |
||
2846 | + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP); |
||
2847 | |||
2848 | /* Will read cryptlen bytes */ |
||
2849 | append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ); |
||
2850 | @@ -896,10 +1457,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap) |
||
2851 | * @desc: pointer to buffer used for descriptor construction |
||
2852 | * @cdata: pointer to block cipher transform definitions |
||
2853 | * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. |
||
2854 | + * @ivsize: initialization vector size |
||
2855 | * @icvsize: integrity check value (ICV) size (truncated or full) |
||
2856 | + * @is_qi: true when called from caam/qi |
||
2857 | */ |
||
2858 | void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata, |
||
2859 | - unsigned int icvsize) |
||
2860 | + unsigned int ivsize, unsigned int icvsize, |
||
2861 | + const bool is_qi) |
||
2862 | { |
||
2863 | u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; |
||
2864 | |||
2865 | @@ -920,6 +1484,18 @@ void cnstr_shdsc_rfc4543_encap(u32 * con |
||
2866 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | |
||
2867 | OP_ALG_ENCRYPT); |
||
2868 | |||
2869 | + if (is_qi) { |
||
2870 | + /* assoclen is not needed, skip it */ |
||
2871 | + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP); |
||
2872 | + |
||
2873 | + /* Read salt and IV */ |
||
2874 | + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt + |
||
2875 | + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 | |
||
2876 | + FIFOLD_TYPE_IV); |
||
2877 | + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | |
||
2878 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); |
||
2879 | + } |
||
2880 | + |
||
2881 | /* assoclen + cryptlen = seqinlen */ |
||
2882 | append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ); |
||
2883 | |||
2884 | @@ -966,10 +1542,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap) |
||
2885 | * @desc: pointer to buffer used for descriptor construction |
||
2886 | * @cdata: pointer to block cipher transform definitions |
||
2887 | * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. |
||
2888 | + * @ivsize: initialization vector size |
||
2889 | * @icvsize: integrity check value (ICV) size (truncated or full) |
||
2890 | + * @is_qi: true when called from caam/qi |
||
2891 | */ |
||
2892 | void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata, |
||
2893 | - unsigned int icvsize) |
||
2894 | + unsigned int ivsize, unsigned int icvsize, |
||
2895 | + const bool is_qi) |
||
2896 | { |
||
2897 | u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; |
||
2898 | |||
2899 | @@ -990,6 +1569,18 @@ void cnstr_shdsc_rfc4543_decap(u32 * con |
||
2900 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | |
||
2901 | OP_ALG_DECRYPT | OP_ALG_ICV_ON); |
||
2902 | |||
2903 | + if (is_qi) { |
||
2904 | + /* assoclen is not needed, skip it */ |
||
2905 | + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP); |
||
2906 | + |
||
2907 | + /* Read salt and IV */ |
||
2908 | + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt + |
||
2909 | + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 | |
||
2910 | + FIFOLD_TYPE_IV); |
||
2911 | + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | |
||
2912 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); |
||
2913 | + } |
||
2914 | + |
||
2915 | /* assoclen + cryptlen = seqoutlen */ |
||
2916 | append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ); |
||
2917 | |||
2918 | @@ -1075,7 +1666,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 * |
||
2919 | |||
2920 | /* Load nonce into CONTEXT1 reg */ |
||
2921 | if (is_rfc3686) { |
||
2922 | - u8 *nonce = cdata->key_virt + cdata->keylen; |
||
2923 | + const u8 *nonce = cdata->key_virt + cdata->keylen; |
||
2924 | |||
2925 | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, |
||
2926 | LDST_CLASS_IND_CCB | |
||
2927 | @@ -1140,7 +1731,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 * |
||
2928 | |||
2929 | /* Load nonce into CONTEXT1 reg */ |
||
2930 | if (is_rfc3686) { |
||
2931 | - u8 *nonce = cdata->key_virt + cdata->keylen; |
||
2932 | + const u8 *nonce = cdata->key_virt + cdata->keylen; |
||
2933 | |||
2934 | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, |
||
2935 | LDST_CLASS_IND_CCB | |
||
2936 | @@ -1209,7 +1800,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32 |
||
2937 | |||
2938 | /* Load Nonce into CONTEXT1 reg */ |
||
2939 | if (is_rfc3686) { |
||
2940 | - u8 *nonce = cdata->key_virt + cdata->keylen; |
||
2941 | + const u8 *nonce = cdata->key_virt + cdata->keylen; |
||
2942 | |||
2943 | append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, |
||
2944 | LDST_CLASS_IND_CCB | |
||
2945 | --- a/drivers/crypto/caam/caamalg_desc.h |
||
2946 | +++ b/drivers/crypto/caam/caamalg_desc.h |
||
2947 | @@ -17,6 +17,9 @@ |
||
2948 | #define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ) |
||
2949 | #define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ) |
||
2950 | |||
2951 | +#define DESC_TLS_BASE (4 * CAAM_CMD_SZ) |
||
2952 | +#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ) |
||
2953 | + |
||
2954 | /* Note: Nonce is counted in cdata.keylen */ |
||
2955 | #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ) |
||
2956 | |||
2957 | @@ -27,14 +30,20 @@ |
||
2958 | #define DESC_GCM_BASE (3 * CAAM_CMD_SZ) |
||
2959 | #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ) |
||
2960 | #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ) |
||
2961 | +#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ) |
||
2962 | +#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ) |
||
2963 | |||
2964 | #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ) |
||
2965 | #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) |
||
2966 | #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) |
||
2967 | +#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ) |
||
2968 | +#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ) |
||
2969 | |||
2970 | #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ) |
||
2971 | #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ) |
||
2972 | #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ) |
||
2973 | +#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ) |
||
2974 | +#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ) |
||
2975 | |||
2976 | #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) |
||
2977 | #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ |
||
2978 | @@ -43,46 +52,62 @@ |
||
2979 | 15 * CAAM_CMD_SZ) |
||
2980 | |||
2981 | void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, |
||
2982 | - unsigned int icvsize); |
||
2983 | + unsigned int icvsize, int era); |
||
2984 | |||
2985 | void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, |
||
2986 | - unsigned int icvsize); |
||
2987 | + unsigned int icvsize, int era); |
||
2988 | |||
2989 | void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, |
||
2990 | struct alginfo *adata, unsigned int ivsize, |
||
2991 | unsigned int icvsize, const bool is_rfc3686, |
||
2992 | u32 *nonce, const u32 ctx1_iv_off, |
||
2993 | - const bool is_qi); |
||
2994 | + const bool is_qi, int era); |
||
2995 | |||
2996 | void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, |
||
2997 | struct alginfo *adata, unsigned int ivsize, |
||
2998 | unsigned int icvsize, const bool geniv, |
||
2999 | const bool is_rfc3686, u32 *nonce, |
||
3000 | - const u32 ctx1_iv_off, const bool is_qi); |
||
3001 | + const u32 ctx1_iv_off, const bool is_qi, int era); |
||
3002 | |||
3003 | void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, |
||
3004 | struct alginfo *adata, unsigned int ivsize, |
||
3005 | unsigned int icvsize, const bool is_rfc3686, |
||
3006 | u32 *nonce, const u32 ctx1_iv_off, |
||
3007 | - const bool is_qi); |
||
3008 | + const bool is_qi, int era); |
||
3009 | + |
||
3010 | +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata, |
||
3011 | + struct alginfo *adata, unsigned int assoclen, |
||
3012 | + unsigned int ivsize, unsigned int authsize, |
||
3013 | + unsigned int blocksize, int era); |
||
3014 | + |
||
3015 | +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata, |
||
3016 | + struct alginfo *adata, unsigned int assoclen, |
||
3017 | + unsigned int ivsize, unsigned int authsize, |
||
3018 | + unsigned int blocksize, int era); |
||
3019 | |||
3020 | void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, |
||
3021 | - unsigned int icvsize); |
||
3022 | + unsigned int ivsize, unsigned int icvsize, |
||
3023 | + const bool is_qi); |
||
3024 | |||
3025 | void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata, |
||
3026 | - unsigned int icvsize); |
||
3027 | + unsigned int ivsize, unsigned int icvsize, |
||
3028 | + const bool is_qi); |
||
3029 | |||
3030 | void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata, |
||
3031 | - unsigned int icvsize); |
||
3032 | + unsigned int ivsize, unsigned int icvsize, |
||
3033 | + const bool is_qi); |
||
3034 | |||
3035 | void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata, |
||
3036 | - unsigned int icvsize); |
||
3037 | + unsigned int ivsize, unsigned int icvsize, |
||
3038 | + const bool is_qi); |
||
3039 | |||
3040 | void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata, |
||
3041 | - unsigned int icvsize); |
||
3042 | + unsigned int ivsize, unsigned int icvsize, |
||
3043 | + const bool is_qi); |
||
3044 | |||
3045 | void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata, |
||
3046 | - unsigned int icvsize); |
||
3047 | + unsigned int ivsize, unsigned int icvsize, |
||
3048 | + const bool is_qi); |
||
3049 | |||
3050 | void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata, |
||
3051 | unsigned int ivsize, const bool is_rfc3686, |
||
3052 | --- a/drivers/crypto/caam/caamalg_qi.c |
||
3053 | +++ b/drivers/crypto/caam/caamalg_qi.c |
||
3054 | @@ -7,7 +7,7 @@ |
||
3055 | */ |
||
3056 | |||
3057 | #include "compat.h" |
||
3058 | - |
||
3059 | +#include "ctrl.h" |
||
3060 | #include "regs.h" |
||
3061 | #include "intern.h" |
||
3062 | #include "desc_constr.h" |
||
3063 | @@ -53,6 +53,7 @@ struct caam_ctx { |
||
3064 | u32 sh_desc_givenc[DESC_MAX_USED_LEN]; |
||
3065 | u8 key[CAAM_MAX_KEY_SIZE]; |
||
3066 | dma_addr_t key_dma; |
||
3067 | + enum dma_data_direction dir; |
||
3068 | struct alginfo adata; |
||
3069 | struct alginfo cdata; |
||
3070 | unsigned int authsize; |
||
3071 | @@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypt |
||
3072 | const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == |
||
3073 | OP_ALG_AAI_CTR_MOD128); |
||
3074 | const bool is_rfc3686 = alg->caam.rfc3686; |
||
3075 | + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); |
||
3076 | |||
3077 | if (!ctx->cdata.keylen || !ctx->authsize) |
||
3078 | return 0; |
||
3079 | @@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypt |
||
3080 | |||
3081 | cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, |
||
3082 | ivsize, ctx->authsize, is_rfc3686, nonce, |
||
3083 | - ctx1_iv_off, true); |
||
3084 | + ctx1_iv_off, true, ctrlpriv->era); |
||
3085 | |||
3086 | skip_enc: |
||
3087 | /* aead_decrypt shared descriptor */ |
||
3088 | @@ -149,7 +151,8 @@ skip_enc: |
||
3089 | |||
3090 | cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, |
||
3091 | ivsize, ctx->authsize, alg->caam.geniv, |
||
3092 | - is_rfc3686, nonce, ctx1_iv_off, true); |
||
3093 | + is_rfc3686, nonce, ctx1_iv_off, true, |
||
3094 | + ctrlpriv->era); |
||
3095 | |||
3096 | if (!alg->caam.geniv) |
||
3097 | goto skip_givenc; |
||
3098 | @@ -176,7 +179,7 @@ skip_enc: |
||
3099 | |||
3100 | cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, |
||
3101 | ivsize, ctx->authsize, is_rfc3686, nonce, |
||
3102 | - ctx1_iv_off, true); |
||
3103 | + ctx1_iv_off, true, ctrlpriv->era); |
||
3104 | |||
3105 | skip_givenc: |
||
3106 | return 0; |
||
3107 | @@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aea |
||
3108 | { |
||
3109 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
3110 | struct device *jrdev = ctx->jrdev; |
||
3111 | + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); |
||
3112 | struct crypto_authenc_keys keys; |
||
3113 | int ret = 0; |
||
3114 | |||
3115 | @@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aea |
||
3116 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
||
3117 | #endif |
||
3118 | |||
3119 | + /* |
||
3120 | + * If DKP is supported, use it in the shared descriptor to generate |
||
3121 | + * the split key. |
||
3122 | + */ |
||
3123 | + if (ctrlpriv->era >= 6) { |
||
3124 | + ctx->adata.keylen = keys.authkeylen; |
||
3125 | + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & |
||
3126 | + OP_ALG_ALGSEL_MASK); |
||
3127 | + |
||
3128 | + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) |
||
3129 | + goto badkey; |
||
3130 | + |
||
3131 | + memcpy(ctx->key, keys.authkey, keys.authkeylen); |
||
3132 | + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, |
||
3133 | + keys.enckeylen); |
||
3134 | + dma_sync_single_for_device(jrdev, ctx->key_dma, |
||
3135 | + ctx->adata.keylen_pad + |
||
3136 | + keys.enckeylen, ctx->dir); |
||
3137 | + goto skip_split_key; |
||
3138 | + } |
||
3139 | + |
||
3140 | ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, |
||
3141 | keys.authkeylen, CAAM_MAX_KEY_SIZE - |
||
3142 | keys.enckeylen); |
||
3143 | @@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aea |
||
3144 | /* postpend encryption key to auth split key */ |
||
3145 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); |
||
3146 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + |
||
3147 | - keys.enckeylen, DMA_TO_DEVICE); |
||
3148 | + keys.enckeylen, ctx->dir); |
||
3149 | #ifdef DEBUG |
||
3150 | print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", |
||
3151 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, |
||
3152 | ctx->adata.keylen_pad + keys.enckeylen, 1); |
||
3153 | #endif |
||
3154 | |||
3155 | +skip_split_key: |
||
3156 | ctx->cdata.keylen = keys.enckeylen; |
||
3157 | |||
3158 | ret = aead_set_sh_desc(aead); |
||
3159 | @@ -258,6 +284,468 @@ badkey: |
||
3160 | return -EINVAL; |
||
3161 | } |
||
3162 | |||
3163 | +static int tls_set_sh_desc(struct crypto_aead *tls) |
||
3164 | +{ |
||
3165 | + struct caam_ctx *ctx = crypto_aead_ctx(tls); |
||
3166 | + unsigned int ivsize = crypto_aead_ivsize(tls); |
||
3167 | + unsigned int blocksize = crypto_aead_blocksize(tls); |
||
3168 | + unsigned int assoclen = 13; /* always 13 bytes for TLS */ |
||
3169 | + unsigned int data_len[2]; |
||
3170 | + u32 inl_mask; |
||
3171 | + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); |
||
3172 | + |
||
3173 | + if (!ctx->cdata.keylen || !ctx->authsize) |
||
3174 | + return 0; |
||
3175 | + |
||
3176 | + /* |
||
3177 | + * TLS 1.0 encrypt shared descriptor |
||
3178 | + * Job Descriptor and Shared Descriptor |
||
3179 | + * must fit into the 64-word Descriptor h/w Buffer |
||
3180 | + */ |
||
3181 | + data_len[0] = ctx->adata.keylen_pad; |
||
3182 | + data_len[1] = ctx->cdata.keylen; |
||
3183 | + |
||
3184 | + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len, |
||
3185 | + &inl_mask, ARRAY_SIZE(data_len)) < 0) |
||
3186 | + return -EINVAL; |
||
3187 | + |
||
3188 | + if (inl_mask & 1) |
||
3189 | + ctx->adata.key_virt = ctx->key; |
||
3190 | + else |
||
3191 | + ctx->adata.key_dma = ctx->key_dma; |
||
3192 | + |
||
3193 | + if (inl_mask & 2) |
||
3194 | + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; |
||
3195 | + else |
||
3196 | + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; |
||
3197 | + |
||
3198 | + ctx->adata.key_inline = !!(inl_mask & 1); |
||
3199 | + ctx->cdata.key_inline = !!(inl_mask & 2); |
||
3200 | + |
||
3201 | + cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, |
||
3202 | + assoclen, ivsize, ctx->authsize, blocksize, |
||
3203 | + ctrlpriv->era); |
||
3204 | + |
||
3205 | + /* |
||
3206 | + * TLS 1.0 decrypt shared descriptor |
||
3207 | + * Keys do not fit inline, regardless of algorithms used |
||
3208 | + */ |
||
3209 | + ctx->adata.key_inline = false; |
||
3210 | + ctx->adata.key_dma = ctx->key_dma; |
||
3211 | + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; |
||
3212 | + |
||
3213 | + cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, |
||
3214 | + assoclen, ivsize, ctx->authsize, blocksize, |
||
3215 | + ctrlpriv->era); |
||
3216 | + |
||
3217 | + return 0; |
||
3218 | +} |
||
3219 | + |
||
3220 | +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize) |
||
3221 | +{ |
||
3222 | + struct caam_ctx *ctx = crypto_aead_ctx(tls); |
||
3223 | + |
||
3224 | + ctx->authsize = authsize; |
||
3225 | + tls_set_sh_desc(tls); |
||
3226 | + |
||
3227 | + return 0; |
||
3228 | +} |
||
3229 | + |
||
3230 | +static int tls_setkey(struct crypto_aead *tls, const u8 *key, |
||
3231 | + unsigned int keylen) |
||
3232 | +{ |
||
3233 | + struct caam_ctx *ctx = crypto_aead_ctx(tls); |
||
3234 | + struct device *jrdev = ctx->jrdev; |
||
3235 | + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); |
||
3236 | + struct crypto_authenc_keys keys; |
||
3237 | + int ret = 0; |
||
3238 | + |
||
3239 | + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
||
3240 | + goto badkey; |
||
3241 | + |
||
3242 | +#ifdef DEBUG |
||
3243 | + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n", |
||
3244 | + keys.authkeylen + keys.enckeylen, keys.enckeylen, |
||
3245 | + keys.authkeylen); |
||
3246 | + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", |
||
3247 | + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
||
3248 | +#endif |
||
3249 | + |
||
3250 | + /* |
||
3251 | + * If DKP is supported, use it in the shared descriptor to generate |
||
3252 | + * the split key. |
||
3253 | + */ |
||
3254 | + if (ctrlpriv->era >= 6) { |
||
3255 | + ctx->adata.keylen = keys.authkeylen; |
||
3256 | + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & |
||
3257 | + OP_ALG_ALGSEL_MASK); |
||
3258 | + |
||
3259 | + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) |
||
3260 | + goto badkey; |
||
3261 | + |
||
3262 | + memcpy(ctx->key, keys.authkey, keys.authkeylen); |
||
3263 | + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, |
||
3264 | + keys.enckeylen); |
||
3265 | + dma_sync_single_for_device(jrdev, ctx->key_dma, |
||
3266 | + ctx->adata.keylen_pad + |
||
3267 | + keys.enckeylen, ctx->dir); |
||
3268 | + goto skip_split_key; |
||
3269 | + } |
||
3270 | + |
||
3271 | + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, |
||
3272 | + keys.authkeylen, CAAM_MAX_KEY_SIZE - |
||
3273 | + keys.enckeylen); |
||
3274 | + if (ret) |
||
3275 | + goto badkey; |
||
3276 | + |
||
3277 | + /* postpend encryption key to auth split key */ |
||
3278 | + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); |
||
3279 | + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + |
||
3280 | + keys.enckeylen, ctx->dir); |
||
3281 | + |
||
3282 | +#ifdef DEBUG |
||
3283 | + dev_err(jrdev, "split keylen %d split keylen padded %d\n", |
||
3284 | + ctx->adata.keylen, ctx->adata.keylen_pad); |
||
3285 | + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", |
||
3286 | + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, |
||
3287 | + ctx->adata.keylen_pad + keys.enckeylen, 1); |
||
3288 | +#endif |
||
3289 | + |
||
3290 | +skip_split_key: |
||
3291 | + ctx->cdata.keylen = keys.enckeylen; |
||
3292 | + |
||
3293 | + ret = tls_set_sh_desc(tls); |
||
3294 | + if (ret) |
||
3295 | + goto badkey; |
||
3296 | + |
||
3297 | + /* Now update the driver contexts with the new shared descriptor */ |
||
3298 | + if (ctx->drv_ctx[ENCRYPT]) { |
||
3299 | + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], |
||
3300 | + ctx->sh_desc_enc); |
||
3301 | + if (ret) { |
||
3302 | + dev_err(jrdev, "driver enc context update failed\n"); |
||
3303 | + goto badkey; |
||
3304 | + } |
||
3305 | + } |
||
3306 | + |
||
3307 | + if (ctx->drv_ctx[DECRYPT]) { |
||
3308 | + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], |
||
3309 | + ctx->sh_desc_dec); |
||
3310 | + if (ret) { |
||
3311 | + dev_err(jrdev, "driver dec context update failed\n"); |
||
3312 | + goto badkey; |
||
3313 | + } |
||
3314 | + } |
||
3315 | + |
||
3316 | + return ret; |
||
3317 | +badkey: |
||
3318 | + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN); |
||
3319 | + return -EINVAL; |
||
3320 | +} |
||
3321 | + |
||
3322 | +static int gcm_set_sh_desc(struct crypto_aead *aead) |
||
3323 | +{ |
||
3324 | + struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
3325 | + unsigned int ivsize = crypto_aead_ivsize(aead); |
||
3326 | + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - |
||
3327 | + ctx->cdata.keylen; |
||
3328 | + |
||
3329 | + if (!ctx->cdata.keylen || !ctx->authsize) |
||
3330 | + return 0; |
||
3331 | + |
||
3332 | + /* |
||
3333 | + * Job Descriptor and Shared Descriptor |
||
3334 | + * must fit into the 64-word Descriptor h/w Buffer |
||
3335 | + */ |
||
3336 | + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) { |
||
3337 | + ctx->cdata.key_inline = true; |
||
3338 | + ctx->cdata.key_virt = ctx->key; |
||
3339 | + } else { |
||
3340 | + ctx->cdata.key_inline = false; |
||
3341 | + ctx->cdata.key_dma = ctx->key_dma; |
||
3342 | + } |
||
3343 | + |
||
3344 | + cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, |
||
3345 | + ctx->authsize, true); |
||
3346 | + |
||
3347 | + /* |
||
3348 | + * Job Descriptor and Shared Descriptor |
||
3349 | + * must fit into the 64-word Descriptor h/w Buffer |
||
3350 | + */ |
||
3351 | + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) { |
||
3352 | + ctx->cdata.key_inline = true; |
||
3353 | + ctx->cdata.key_virt = ctx->key; |
||
3354 | + } else { |
||
3355 | + ctx->cdata.key_inline = false; |
||
3356 | + ctx->cdata.key_dma = ctx->key_dma; |
||
3357 | + } |
||
3358 | + |
||
3359 | + cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, |
||
3360 | + ctx->authsize, true); |
||
3361 | + |
||
3362 | + return 0; |
||
3363 | +} |
||
3364 | + |
||
3365 | +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) |
||
3366 | +{ |
||
3367 | + struct caam_ctx *ctx = crypto_aead_ctx(authenc); |
||
3368 | + |
||
3369 | + ctx->authsize = authsize; |
||
3370 | + gcm_set_sh_desc(authenc); |
||
3371 | + |
||
3372 | + return 0; |
||
3373 | +} |
||
3374 | + |
||
3375 | +static int gcm_setkey(struct crypto_aead *aead, |
||
3376 | + const u8 *key, unsigned int keylen) |
||
3377 | +{ |
||
3378 | + struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
3379 | + struct device *jrdev = ctx->jrdev; |
||
3380 | + int ret; |
||
3381 | + |
||
3382 | +#ifdef DEBUG |
||
3383 | + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", |
||
3384 | + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
||
3385 | +#endif |
||
3386 | + |
||
3387 | + memcpy(ctx->key, key, keylen); |
||
3388 | + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); |
||
3389 | + ctx->cdata.keylen = keylen; |
||
3390 | + |
||
3391 | + ret = gcm_set_sh_desc(aead); |
||
3392 | + if (ret) |
||
3393 | + return ret; |
||
3394 | + |
||
3395 | + /* Now update the driver contexts with the new shared descriptor */ |
||
3396 | + if (ctx->drv_ctx[ENCRYPT]) { |
||
3397 | + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], |
||
3398 | + ctx->sh_desc_enc); |
||
3399 | + if (ret) { |
||
3400 | + dev_err(jrdev, "driver enc context update failed\n"); |
||
3401 | + return ret; |
||
3402 | + } |
||
3403 | + } |
||
3404 | + |
||
3405 | + if (ctx->drv_ctx[DECRYPT]) { |
||
3406 | + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], |
||
3407 | + ctx->sh_desc_dec); |
||
3408 | + if (ret) { |
||
3409 | + dev_err(jrdev, "driver dec context update failed\n"); |
||
3410 | + return ret; |
||
3411 | + } |
||
3412 | + } |
||
3413 | + |
||
3414 | + return 0; |
||
3415 | +} |
||
3416 | + |
||
3417 | +static int rfc4106_set_sh_desc(struct crypto_aead *aead) |
||
3418 | +{ |
||
3419 | + struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
3420 | + unsigned int ivsize = crypto_aead_ivsize(aead); |
||
3421 | + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - |
||
3422 | + ctx->cdata.keylen; |
||
3423 | + |
||
3424 | + if (!ctx->cdata.keylen || !ctx->authsize) |
||
3425 | + return 0; |
||
3426 | + |
||
3427 | + ctx->cdata.key_virt = ctx->key; |
||
3428 | + |
||
3429 | + /* |
||
3430 | + * Job Descriptor and Shared Descriptor |
||
3431 | + * must fit into the 64-word Descriptor h/w Buffer |
||
3432 | + */ |
||
3433 | + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) { |
||
3434 | + ctx->cdata.key_inline = true; |
||
3435 | + } else { |
||
3436 | + ctx->cdata.key_inline = false; |
||
3437 | + ctx->cdata.key_dma = ctx->key_dma; |
||
3438 | + } |
||
3439 | + |
||
3440 | + cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, |
||
3441 | + ctx->authsize, true); |
||
3442 | + |
||
3443 | + /* |
||
3444 | + * Job Descriptor and Shared Descriptor |
||
3445 | + * must fit into the 64-word Descriptor h/w Buffer |
||
3446 | + */ |
||
3447 | + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) { |
||
3448 | + ctx->cdata.key_inline = true; |
||
3449 | + } else { |
||
3450 | + ctx->cdata.key_inline = false; |
||
3451 | + ctx->cdata.key_dma = ctx->key_dma; |
||
3452 | + } |
||
3453 | + |
||
3454 | + cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, |
||
3455 | + ctx->authsize, true); |
||
3456 | + |
||
3457 | + return 0; |
||
3458 | +} |
||
3459 | + |
||
3460 | +static int rfc4106_setauthsize(struct crypto_aead *authenc, |
||
3461 | + unsigned int authsize) |
||
3462 | +{ |
||
3463 | + struct caam_ctx *ctx = crypto_aead_ctx(authenc); |
||
3464 | + |
||
3465 | + ctx->authsize = authsize; |
||
3466 | + rfc4106_set_sh_desc(authenc); |
||
3467 | + |
||
3468 | + return 0; |
||
3469 | +} |
||
3470 | + |
||
3471 | +static int rfc4106_setkey(struct crypto_aead *aead, |
||
3472 | + const u8 *key, unsigned int keylen) |
||
3473 | +{ |
||
3474 | + struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
3475 | + struct device *jrdev = ctx->jrdev; |
||
3476 | + int ret; |
||
3477 | + |
||
3478 | + if (keylen < 4) |
||
3479 | + return -EINVAL; |
||
3480 | + |
||
3481 | +#ifdef DEBUG |
||
3482 | + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", |
||
3483 | + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
||
3484 | +#endif |
||
3485 | + |
||
3486 | + memcpy(ctx->key, key, keylen); |
||
3487 | + /* |
||
3488 | + * The last four bytes of the key material are used as the salt value |
||
3489 | + * in the nonce. Update the AES key length. |
||
3490 | + */ |
||
3491 | + ctx->cdata.keylen = keylen - 4; |
||
3492 | + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, |
||
3493 | + ctx->dir); |
||
3494 | + |
||
3495 | + ret = rfc4106_set_sh_desc(aead); |
||
3496 | + if (ret) |
||
3497 | + return ret; |
||
3498 | + |
||
3499 | + /* Now update the driver contexts with the new shared descriptor */ |
||
3500 | + if (ctx->drv_ctx[ENCRYPT]) { |
||
3501 | + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], |
||
3502 | + ctx->sh_desc_enc); |
||
3503 | + if (ret) { |
||
3504 | + dev_err(jrdev, "driver enc context update failed\n"); |
||
3505 | + return ret; |
||
3506 | + } |
||
3507 | + } |
||
3508 | + |
||
3509 | + if (ctx->drv_ctx[DECRYPT]) { |
||
3510 | + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], |
||
3511 | + ctx->sh_desc_dec); |
||
3512 | + if (ret) { |
||
3513 | + dev_err(jrdev, "driver dec context update failed\n"); |
||
3514 | + return ret; |
||
3515 | + } |
||
3516 | + } |
||
3517 | + |
||
3518 | + return 0; |
||
3519 | +} |
||
3520 | + |
||
3521 | +static int rfc4543_set_sh_desc(struct crypto_aead *aead) |
||
3522 | +{ |
||
3523 | + struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
3524 | + unsigned int ivsize = crypto_aead_ivsize(aead); |
||
3525 | + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - |
||
3526 | + ctx->cdata.keylen; |
||
3527 | + |
||
3528 | + if (!ctx->cdata.keylen || !ctx->authsize) |
||
3529 | + return 0; |
||
3530 | + |
||
3531 | + ctx->cdata.key_virt = ctx->key; |
||
3532 | + |
||
3533 | + /* |
||
3534 | + * Job Descriptor and Shared Descriptor |
||
3535 | + * must fit into the 64-word Descriptor h/w Buffer |
||
3536 | + */ |
||
3537 | + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) { |
||
3538 | + ctx->cdata.key_inline = true; |
||
3539 | + } else { |
||
3540 | + ctx->cdata.key_inline = false; |
||
3541 | + ctx->cdata.key_dma = ctx->key_dma; |
||
3542 | + } |
||
3543 | + |
||
3544 | + cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, |
||
3545 | + ctx->authsize, true); |
||
3546 | + |
||
3547 | + /* |
||
3548 | + * Job Descriptor and Shared Descriptor |
||
3549 | + * must fit into the 64-word Descriptor h/w Buffer |
||
3550 | + */ |
||
3551 | + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) { |
||
3552 | + ctx->cdata.key_inline = true; |
||
3553 | + } else { |
||
3554 | + ctx->cdata.key_inline = false; |
||
3555 | + ctx->cdata.key_dma = ctx->key_dma; |
||
3556 | + } |
||
3557 | + |
||
3558 | + cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, |
||
3559 | + ctx->authsize, true); |
||
3560 | + |
||
3561 | + return 0; |
||
3562 | +} |
||
3563 | + |
||
3564 | +static int rfc4543_setauthsize(struct crypto_aead *authenc, |
||
3565 | + unsigned int authsize) |
||
3566 | +{ |
||
3567 | + struct caam_ctx *ctx = crypto_aead_ctx(authenc); |
||
3568 | + |
||
3569 | + ctx->authsize = authsize; |
||
3570 | + rfc4543_set_sh_desc(authenc); |
||
3571 | + |
||
3572 | + return 0; |
||
3573 | +} |
||
3574 | + |
||
3575 | +static int rfc4543_setkey(struct crypto_aead *aead, |
||
3576 | + const u8 *key, unsigned int keylen) |
||
3577 | +{ |
||
3578 | + struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
3579 | + struct device *jrdev = ctx->jrdev; |
||
3580 | + int ret; |
||
3581 | + |
||
3582 | + if (keylen < 4) |
||
3583 | + return -EINVAL; |
||
3584 | + |
||
3585 | +#ifdef DEBUG |
||
3586 | + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", |
||
3587 | + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
||
3588 | +#endif |
||
3589 | + |
||
3590 | + memcpy(ctx->key, key, keylen); |
||
3591 | + /* |
||
3592 | + * The last four bytes of the key material are used as the salt value |
||
3593 | + * in the nonce. Update the AES key length. |
||
3594 | + */ |
||
3595 | + ctx->cdata.keylen = keylen - 4; |
||
3596 | + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, |
||
3597 | + ctx->dir); |
||
3598 | + |
||
3599 | + ret = rfc4543_set_sh_desc(aead); |
||
3600 | + if (ret) |
||
3601 | + return ret; |
||
3602 | + |
||
3603 | + /* Now update the driver contexts with the new shared descriptor */ |
||
3604 | + if (ctx->drv_ctx[ENCRYPT]) { |
||
3605 | + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], |
||
3606 | + ctx->sh_desc_enc); |
||
3607 | + if (ret) { |
||
3608 | + dev_err(jrdev, "driver enc context update failed\n"); |
||
3609 | + return ret; |
||
3610 | + } |
||
3611 | + } |
||
3612 | + |
||
3613 | + if (ctx->drv_ctx[DECRYPT]) { |
||
3614 | + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], |
||
3615 | + ctx->sh_desc_dec); |
||
3616 | + if (ret) { |
||
3617 | + dev_err(jrdev, "driver dec context update failed\n"); |
||
3618 | + return ret; |
||
3619 | + } |
||
3620 | + } |
||
3621 | + |
||
3622 | + return 0; |
||
3623 | +} |
||
3624 | + |
||
3625 | static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, |
||
3626 | const u8 *key, unsigned int keylen) |
||
3627 | { |
||
3628 | @@ -414,6 +902,29 @@ struct aead_edesc { |
||
3629 | }; |
||
3630 | |||
3631 | /* |
||
3632 | + * tls_edesc - s/w-extended tls descriptor |
||
3633 | + * @src_nents: number of segments in input scatterlist |
||
3634 | + * @dst_nents: number of segments in output scatterlist |
||
3635 | + * @iv_dma: dma address of iv for checking continuity and link table |
||
3636 | + * @qm_sg_bytes: length of dma mapped h/w link table |
||
3637 | + * @tmp: array of scatterlists used by 'scatterwalk_ffwd' |
||
3638 | + * @qm_sg_dma: bus physical mapped address of h/w link table |
||
3639 | + * @drv_req: driver-specific request structure |
||
3640 | + * @sgt: the h/w link table, followed by IV |
||
3641 | + */ |
||
3642 | +struct tls_edesc { |
||
3643 | + int src_nents; |
||
3644 | + int dst_nents; |
||
3645 | + dma_addr_t iv_dma; |
||
3646 | + int qm_sg_bytes; |
||
3647 | + dma_addr_t qm_sg_dma; |
||
3648 | + struct scatterlist tmp[2]; |
||
3649 | + struct scatterlist *dst; |
||
3650 | + struct caam_drv_req drv_req; |
||
3651 | + struct qm_sg_entry sgt[0]; |
||
3652 | +}; |
||
3653 | + |
||
3654 | +/* |
||
3655 | * ablkcipher_edesc - s/w-extended ablkcipher descriptor |
||
3656 | * @src_nents: number of segments in input scatterlist |
||
3657 | * @dst_nents: number of segments in output scatterlist |
||
3658 | @@ -508,6 +1019,19 @@ static void aead_unmap(struct device *de |
||
3659 | dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); |
||
3660 | } |
||
3661 | |||
3662 | +static void tls_unmap(struct device *dev, |
||
3663 | + struct tls_edesc *edesc, |
||
3664 | + struct aead_request *req) |
||
3665 | +{ |
||
3666 | + struct crypto_aead *aead = crypto_aead_reqtfm(req); |
||
3667 | + int ivsize = crypto_aead_ivsize(aead); |
||
3668 | + |
||
3669 | + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents, |
||
3670 | + edesc->dst_nents, edesc->iv_dma, ivsize, |
||
3671 | + edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma, |
||
3672 | + edesc->qm_sg_bytes); |
||
3673 | +} |
||
3674 | + |
||
3675 | static void ablkcipher_unmap(struct device *dev, |
||
3676 | struct ablkcipher_edesc *edesc, |
||
3677 | struct ablkcipher_request *req) |
||
3678 | @@ -532,8 +1056,18 @@ static void aead_done(struct caam_drv_re |
||
3679 | qidev = caam_ctx->qidev; |
||
3680 | |||
3681 | if (unlikely(status)) { |
||
3682 | + u32 ssrc = status & JRSTA_SSRC_MASK; |
||
3683 | + u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; |
||
3684 | + |
||
3685 | caam_jr_strstatus(qidev, status); |
||
3686 | - ecode = -EIO; |
||
3687 | + /* |
||
3688 | + * verify hw auth check passed else return -EBADMSG |
||
3689 | + */ |
||
3690 | + if (ssrc == JRSTA_SSRC_CCB_ERROR && |
||
3691 | + err_id == JRSTA_CCBERR_ERRID_ICVCHK) |
||
3692 | + ecode = -EBADMSG; |
||
3693 | + else |
||
3694 | + ecode = -EIO; |
||
3695 | } |
||
3696 | |||
3697 | edesc = container_of(drv_req, typeof(*edesc), drv_req); |
||
3698 | @@ -785,6 +1319,260 @@ static int aead_decrypt(struct aead_requ |
||
3699 | return aead_crypt(req, false); |
||
3700 | } |
||
3701 | |||
3702 | +static int ipsec_gcm_encrypt(struct aead_request *req) |
||
3703 | +{ |
||
3704 | + if (req->assoclen < 8) |
||
3705 | + return -EINVAL; |
||
3706 | + |
||
3707 | + return aead_crypt(req, true); |
||
3708 | +} |
||
3709 | + |
||
3710 | +static int ipsec_gcm_decrypt(struct aead_request *req) |
||
3711 | +{ |
||
3712 | + if (req->assoclen < 8) |
||
3713 | + return -EINVAL; |
||
3714 | + |
||
3715 | + return aead_crypt(req, false); |
||
3716 | +} |
||
3717 | + |
||
3718 | +static void tls_done(struct caam_drv_req *drv_req, u32 status) |
||
3719 | +{ |
||
3720 | + struct device *qidev; |
||
3721 | + struct tls_edesc *edesc; |
||
3722 | + struct aead_request *aead_req = drv_req->app_ctx; |
||
3723 | + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); |
||
3724 | + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead); |
||
3725 | + int ecode = 0; |
||
3726 | + |
||
3727 | + qidev = caam_ctx->qidev; |
||
3728 | + |
||
3729 | + if (unlikely(status)) { |
||
3730 | + caam_jr_strstatus(qidev, status); |
||
3731 | + ecode = -EIO; |
||
3732 | + } |
||
3733 | + |
||
3734 | + edesc = container_of(drv_req, typeof(*edesc), drv_req); |
||
3735 | + tls_unmap(qidev, edesc, aead_req); |
||
3736 | + |
||
3737 | + aead_request_complete(aead_req, ecode); |
||
3738 | + qi_cache_free(edesc); |
||
3739 | +} |
||
3740 | + |
||
3741 | +/* |
||
3742 | + * allocate and map the tls extended descriptor |
||
3743 | + */ |
||
3744 | +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt) |
||
3745 | +{ |
||
3746 | + struct crypto_aead *aead = crypto_aead_reqtfm(req); |
||
3747 | + struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
3748 | + unsigned int blocksize = crypto_aead_blocksize(aead); |
||
3749 | + unsigned int padsize, authsize; |
||
3750 | + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), |
||
3751 | + typeof(*alg), aead); |
||
3752 | + struct device *qidev = ctx->qidev; |
||
3753 | + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
||
3754 | + GFP_KERNEL : GFP_ATOMIC; |
||
3755 | + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; |
||
3756 | + struct tls_edesc *edesc; |
||
3757 | + dma_addr_t qm_sg_dma, iv_dma = 0; |
||
3758 | + int ivsize = 0; |
||
3759 | + u8 *iv; |
||
3760 | + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes; |
||
3761 | + int in_len, out_len; |
||
3762 | + struct qm_sg_entry *sg_table, *fd_sgt; |
||
3763 | + struct caam_drv_ctx *drv_ctx; |
||
3764 | + enum optype op_type = encrypt ? ENCRYPT : DECRYPT; |
||
3765 | + struct scatterlist *dst; |
||
3766 | + |
||
3767 | + if (encrypt) { |
||
3768 | + padsize = blocksize - ((req->cryptlen + ctx->authsize) % |
||
3769 | + blocksize); |
||
3770 | + authsize = ctx->authsize + padsize; |
||
3771 | + } else { |
||
3772 | + authsize = ctx->authsize; |
||
3773 | + } |
||
3774 | + |
||
3775 | + drv_ctx = get_drv_ctx(ctx, op_type); |
||
3776 | + if (unlikely(IS_ERR_OR_NULL(drv_ctx))) |
||
3777 | + return (struct tls_edesc *)drv_ctx; |
||
3778 | + |
||
3779 | + /* allocate space for base edesc, link tables and IV */ |
||
3780 | + edesc = qi_cache_alloc(GFP_DMA | flags); |
||
3781 | + if (unlikely(!edesc)) { |
||
3782 | + dev_err(qidev, "could not allocate extended descriptor\n"); |
||
3783 | + return ERR_PTR(-ENOMEM); |
||
3784 | + } |
||
3785 | + |
||
3786 | + if (likely(req->src == req->dst)) { |
||
3787 | + src_nents = sg_nents_for_len(req->src, req->assoclen + |
||
3788 | + req->cryptlen + |
||
3789 | + (encrypt ? authsize : 0)); |
||
3790 | + if (unlikely(src_nents < 0)) { |
||
3791 | + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", |
||
3792 | + req->assoclen + req->cryptlen + |
||
3793 | + (encrypt ? authsize : 0)); |
||
3794 | + qi_cache_free(edesc); |
||
3795 | + return ERR_PTR(src_nents); |
||
3796 | + } |
||
3797 | + |
||
3798 | + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, |
||
3799 | + DMA_BIDIRECTIONAL); |
||
3800 | + if (unlikely(!mapped_src_nents)) { |
||
3801 | + dev_err(qidev, "unable to map source\n"); |
||
3802 | + qi_cache_free(edesc); |
||
3803 | + return ERR_PTR(-ENOMEM); |
||
3804 | + } |
||
3805 | + dst = req->dst; |
||
3806 | + } else { |
||
3807 | + src_nents = sg_nents_for_len(req->src, req->assoclen + |
||
3808 | + req->cryptlen); |
||
3809 | + if (unlikely(src_nents < 0)) { |
||
3810 | + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", |
||
3811 | + req->assoclen + req->cryptlen); |
||
3812 | + qi_cache_free(edesc); |
||
3813 | + return ERR_PTR(src_nents); |
||
3814 | + } |
||
3815 | + |
||
3816 | + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen); |
||
3817 | + dst_nents = sg_nents_for_len(dst, req->cryptlen + |
||
3818 | + (encrypt ? authsize : 0)); |
||
3819 | + if (unlikely(dst_nents < 0)) { |
||
3820 | + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", |
||
3821 | + req->cryptlen + |
||
3822 | + (encrypt ? authsize : 0)); |
||
3823 | + qi_cache_free(edesc); |
||
3824 | + return ERR_PTR(dst_nents); |
||
3825 | + } |
||
3826 | + |
||
3827 | + if (src_nents) { |
||
3828 | + mapped_src_nents = dma_map_sg(qidev, req->src, |
||
3829 | + src_nents, DMA_TO_DEVICE); |
||
3830 | + if (unlikely(!mapped_src_nents)) { |
||
3831 | + dev_err(qidev, "unable to map source\n"); |
||
3832 | + qi_cache_free(edesc); |
||
3833 | + return ERR_PTR(-ENOMEM); |
||
3834 | + } |
||
3835 | + } else { |
||
3836 | + mapped_src_nents = 0; |
||
3837 | + } |
||
3838 | + |
||
3839 | + mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents, |
||
3840 | + DMA_FROM_DEVICE); |
||
3841 | + if (unlikely(!mapped_dst_nents)) { |
||
3842 | + dev_err(qidev, "unable to map destination\n"); |
||
3843 | + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); |
||
3844 | + qi_cache_free(edesc); |
||
3845 | + return ERR_PTR(-ENOMEM); |
||
3846 | + } |
||
3847 | + } |
||
3848 | + |
||
3849 | + /* |
||
3850 | + * Create S/G table: IV, src, dst. |
||
3851 | + * Input is not contiguous. |
||
3852 | + */ |
||
3853 | + qm_sg_ents = 1 + mapped_src_nents + |
||
3854 | + (mapped_dst_nents > 1 ? mapped_dst_nents : 0); |
||
3855 | + sg_table = &edesc->sgt[0]; |
||
3856 | + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); |
||
3857 | + |
||
3858 | + ivsize = crypto_aead_ivsize(aead); |
||
3859 | + iv = (u8 *)(sg_table + qm_sg_ents); |
||
3860 | + /* Make sure IV is located in a DMAable area */ |
||
3861 | + memcpy(iv, req->iv, ivsize); |
||
3862 | + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); |
||
3863 | + if (dma_mapping_error(qidev, iv_dma)) { |
||
3864 | + dev_err(qidev, "unable to map IV\n"); |
||
3865 | + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0, 0, |
||
3866 | + 0, 0); |
||
3867 | + qi_cache_free(edesc); |
||
3868 | + return ERR_PTR(-ENOMEM); |
||
3869 | + } |
||
3870 | + |
||
3871 | + edesc->src_nents = src_nents; |
||
3872 | + edesc->dst_nents = dst_nents; |
||
3873 | + edesc->dst = dst; |
||
3874 | + edesc->iv_dma = iv_dma; |
||
3875 | + edesc->drv_req.app_ctx = req; |
||
3876 | + edesc->drv_req.cbk = tls_done; |
||
3877 | + edesc->drv_req.drv_ctx = drv_ctx; |
||
3878 | + |
||
3879 | + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); |
||
3880 | + qm_sg_index = 1; |
||
3881 | + |
||
3882 | + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); |
||
3883 | + qm_sg_index += mapped_src_nents; |
||
3884 | + |
||
3885 | + if (mapped_dst_nents > 1) |
||
3886 | + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table + |
||
3887 | + qm_sg_index, 0); |
||
3888 | + |
||
3889 | + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); |
||
3890 | + if (dma_mapping_error(qidev, qm_sg_dma)) { |
||
3891 | + dev_err(qidev, "unable to map S/G table\n"); |
||
3892 | + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma, |
||
3893 | + ivsize, op_type, 0, 0); |
||
3894 | + qi_cache_free(edesc); |
||
3895 | + return ERR_PTR(-ENOMEM); |
||
3896 | + } |
||
3897 | + |
||
3898 | + edesc->qm_sg_dma = qm_sg_dma; |
||
3899 | + edesc->qm_sg_bytes = qm_sg_bytes; |
||
3900 | + |
||
3901 | + out_len = req->cryptlen + (encrypt ? authsize : 0); |
||
3902 | + in_len = ivsize + req->assoclen + req->cryptlen; |
||
3903 | + |
||
3904 | + fd_sgt = &edesc->drv_req.fd_sgt[0]; |
||
3905 | + |
||
3906 | + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0); |
||
3907 | + |
||
3908 | + if (req->dst == req->src) |
||
3909 | + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + |
||
3910 | + (sg_nents_for_len(req->src, req->assoclen) + |
||
3911 | + 1) * sizeof(*sg_table), out_len, 0); |
||
3912 | + else if (mapped_dst_nents == 1) |
||
3913 | + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0); |
||
3914 | + else |
||
3915 | + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) * |
||
3916 | + qm_sg_index, out_len, 0); |
||
3917 | + |
||
3918 | + return edesc; |
||
3919 | +} |
||
3920 | + |
||
3921 | +static int tls_crypt(struct aead_request *req, bool encrypt) |
||
3922 | +{ |
||
3923 | + struct tls_edesc *edesc; |
||
3924 | + struct crypto_aead *aead = crypto_aead_reqtfm(req); |
||
3925 | + struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
3926 | + int ret; |
||
3927 | + |
||
3928 | + if (unlikely(caam_congested)) |
||
3929 | + return -EAGAIN; |
||
3930 | + |
||
3931 | + edesc = tls_edesc_alloc(req, encrypt); |
||
3932 | + if (IS_ERR_OR_NULL(edesc)) |
||
3933 | + return PTR_ERR(edesc); |
||
3934 | + |
||
3935 | + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); |
||
3936 | + if (!ret) { |
||
3937 | + ret = -EINPROGRESS; |
||
3938 | + } else { |
||
3939 | + tls_unmap(ctx->qidev, edesc, req); |
||
3940 | + qi_cache_free(edesc); |
||
3941 | + } |
||
3942 | + |
||
3943 | + return ret; |
||
3944 | +} |
||
3945 | + |
||
3946 | +static int tls_encrypt(struct aead_request *req) |
||
3947 | +{ |
||
3948 | + return tls_crypt(req, true); |
||
3949 | +} |
||
3950 | + |
||
3951 | +static int tls_decrypt(struct aead_request *req) |
||
3952 | +{ |
||
3953 | + return tls_crypt(req, false); |
||
3954 | +} |
||
3955 | + |
||
3956 | static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status) |
||
3957 | { |
||
3958 | struct ablkcipher_edesc *edesc; |
||
3959 | @@ -1308,6 +2096,61 @@ static struct caam_alg_template driver_a |
||
3960 | }; |
||
3961 | |||
3962 | static struct caam_aead_alg driver_aeads[] = { |
||
3963 | + { |
||
3964 | + .aead = { |
||
3965 | + .base = { |
||
3966 | + .cra_name = "rfc4106(gcm(aes))", |
||
3967 | + .cra_driver_name = "rfc4106-gcm-aes-caam-qi", |
||
3968 | + .cra_blocksize = 1, |
||
3969 | + }, |
||
3970 | + .setkey = rfc4106_setkey, |
||
3971 | + .setauthsize = rfc4106_setauthsize, |
||
3972 | + .encrypt = ipsec_gcm_encrypt, |
||
3973 | + .decrypt = ipsec_gcm_decrypt, |
||
3974 | + .ivsize = 8, |
||
3975 | + .maxauthsize = AES_BLOCK_SIZE, |
||
3976 | + }, |
||
3977 | + .caam = { |
||
3978 | + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, |
||
3979 | + }, |
||
3980 | + }, |
||
3981 | + { |
||
3982 | + .aead = { |
||
3983 | + .base = { |
||
3984 | + .cra_name = "rfc4543(gcm(aes))", |
||
3985 | + .cra_driver_name = "rfc4543-gcm-aes-caam-qi", |
||
3986 | + .cra_blocksize = 1, |
||
3987 | + }, |
||
3988 | + .setkey = rfc4543_setkey, |
||
3989 | + .setauthsize = rfc4543_setauthsize, |
||
3990 | + .encrypt = ipsec_gcm_encrypt, |
||
3991 | + .decrypt = ipsec_gcm_decrypt, |
||
3992 | + .ivsize = 8, |
||
3993 | + .maxauthsize = AES_BLOCK_SIZE, |
||
3994 | + }, |
||
3995 | + .caam = { |
||
3996 | + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, |
||
3997 | + }, |
||
3998 | + }, |
||
3999 | + /* Galois Counter Mode */ |
||
4000 | + { |
||
4001 | + .aead = { |
||
4002 | + .base = { |
||
4003 | + .cra_name = "gcm(aes)", |
||
4004 | + .cra_driver_name = "gcm-aes-caam-qi", |
||
4005 | + .cra_blocksize = 1, |
||
4006 | + }, |
||
4007 | + .setkey = gcm_setkey, |
||
4008 | + .setauthsize = gcm_setauthsize, |
||
4009 | + .encrypt = aead_encrypt, |
||
4010 | + .decrypt = aead_decrypt, |
||
4011 | + .ivsize = 12, |
||
4012 | + .maxauthsize = AES_BLOCK_SIZE, |
||
4013 | + }, |
||
4014 | + .caam = { |
||
4015 | + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, |
||
4016 | + } |
||
4017 | + }, |
||
4018 | /* single-pass ipsec_esp descriptor */ |
||
4019 | { |
||
4020 | .aead = { |
||
4021 | @@ -2118,6 +2961,26 @@ static struct caam_aead_alg driver_aeads |
||
4022 | .geniv = true, |
||
4023 | } |
||
4024 | }, |
||
4025 | + { |
||
4026 | + .aead = { |
||
4027 | + .base = { |
||
4028 | + .cra_name = "tls10(hmac(sha1),cbc(aes))", |
||
4029 | + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi", |
||
4030 | + .cra_blocksize = AES_BLOCK_SIZE, |
||
4031 | + }, |
||
4032 | + .setkey = tls_setkey, |
||
4033 | + .setauthsize = tls_setauthsize, |
||
4034 | + .encrypt = tls_encrypt, |
||
4035 | + .decrypt = tls_decrypt, |
||
4036 | + .ivsize = AES_BLOCK_SIZE, |
||
4037 | + .maxauthsize = SHA1_DIGEST_SIZE, |
||
4038 | + }, |
||
4039 | + .caam = { |
||
4040 | + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
||
4041 | + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
||
4042 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
4043 | + } |
||
4044 | + } |
||
4045 | }; |
||
4046 | |||
4047 | struct caam_crypto_alg { |
||
4048 | @@ -2126,9 +2989,20 @@ struct caam_crypto_alg { |
||
4049 | struct caam_alg_entry caam; |
||
4050 | }; |
||
4051 | |||
4052 | -static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) |
||
4053 | +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, |
||
4054 | + bool uses_dkp) |
||
4055 | { |
||
4056 | struct caam_drv_private *priv; |
||
4057 | + /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */ |
||
4058 | + static const u8 digest_size[] = { |
||
4059 | + MD5_DIGEST_SIZE, |
||
4060 | + SHA1_DIGEST_SIZE, |
||
4061 | + SHA224_DIGEST_SIZE, |
||
4062 | + SHA256_DIGEST_SIZE, |
||
4063 | + SHA384_DIGEST_SIZE, |
||
4064 | + SHA512_DIGEST_SIZE |
||
4065 | + }; |
||
4066 | + u8 op_id; |
||
4067 | |||
4068 | /* |
||
4069 | * distribute tfms across job rings to ensure in-order |
||
4070 | @@ -2140,8 +3014,14 @@ static int caam_init_common(struct caam_ |
||
4071 | return PTR_ERR(ctx->jrdev); |
||
4072 | } |
||
4073 | |||
4074 | + priv = dev_get_drvdata(ctx->jrdev->parent); |
||
4075 | + if (priv->era >= 6 && uses_dkp) |
||
4076 | + ctx->dir = DMA_BIDIRECTIONAL; |
||
4077 | + else |
||
4078 | + ctx->dir = DMA_TO_DEVICE; |
||
4079 | + |
||
4080 | ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key), |
||
4081 | - DMA_TO_DEVICE); |
||
4082 | + ctx->dir); |
||
4083 | if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { |
||
4084 | dev_err(ctx->jrdev, "unable to map key\n"); |
||
4085 | caam_jr_free(ctx->jrdev); |
||
4086 | @@ -2152,7 +3032,22 @@ static int caam_init_common(struct caam_ |
||
4087 | ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; |
||
4088 | ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; |
||
4089 | |||
4090 | - priv = dev_get_drvdata(ctx->jrdev->parent); |
||
4091 | + if (ctx->adata.algtype) { |
||
4092 | + op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK) |
||
4093 | + >> OP_ALG_ALGSEL_SHIFT; |
||
4094 | + if (op_id < ARRAY_SIZE(digest_size)) { |
||
4095 | + ctx->authsize = digest_size[op_id]; |
||
4096 | + } else { |
||
4097 | + dev_err(ctx->jrdev, |
||
4098 | + "incorrect op_id %d; must be less than %zu\n", |
||
4099 | + op_id, ARRAY_SIZE(digest_size)); |
||
4100 | + caam_jr_free(ctx->jrdev); |
||
4101 | + return -EINVAL; |
||
4102 | + } |
||
4103 | + } else { |
||
4104 | + ctx->authsize = 0; |
||
4105 | + } |
||
4106 | + |
||
4107 | ctx->qidev = priv->qidev; |
||
4108 | |||
4109 | spin_lock_init(&ctx->lock); |
||
4110 | @@ -2170,7 +3065,7 @@ static int caam_cra_init(struct crypto_t |
||
4111 | crypto_alg); |
||
4112 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); |
||
4113 | |||
4114 | - return caam_init_common(ctx, &caam_alg->caam); |
||
4115 | + return caam_init_common(ctx, &caam_alg->caam, false); |
||
4116 | } |
||
4117 | |||
4118 | static int caam_aead_init(struct crypto_aead *tfm) |
||
4119 | @@ -2180,7 +3075,9 @@ static int caam_aead_init(struct crypto_ |
||
4120 | aead); |
||
4121 | struct caam_ctx *ctx = crypto_aead_ctx(tfm); |
||
4122 | |||
4123 | - return caam_init_common(ctx, &caam_alg->caam); |
||
4124 | + return caam_init_common(ctx, &caam_alg->caam, |
||
4125 | + (alg->setkey == aead_setkey) || |
||
4126 | + (alg->setkey == tls_setkey)); |
||
4127 | } |
||
4128 | |||
4129 | static void caam_exit_common(struct caam_ctx *ctx) |
||
4130 | @@ -2189,8 +3086,7 @@ static void caam_exit_common(struct caam |
||
4131 | caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); |
||
4132 | caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]); |
||
4133 | |||
4134 | - dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), |
||
4135 | - DMA_TO_DEVICE); |
||
4136 | + dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir); |
||
4137 | |||
4138 | caam_jr_free(ctx->jrdev); |
||
4139 | } |
||
4140 | @@ -2315,6 +3211,11 @@ static int __init caam_qi_algapi_init(vo |
||
4141 | if (!priv || !priv->qi_present) |
||
4142 | return -ENODEV; |
||
4143 | |||
4144 | + if (caam_dpaa2) { |
||
4145 | + dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n"); |
||
4146 | + return -ENODEV; |
||
4147 | + } |
||
4148 | + |
||
4149 | INIT_LIST_HEAD(&alg_list); |
||
4150 | |||
4151 | /* |
||
4152 | --- /dev/null |
||
4153 | +++ b/drivers/crypto/caam/caamalg_qi2.c |
||
4154 | @@ -0,0 +1,5691 @@ |
||
4155 | +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) |
||
4156 | +/* |
||
4157 | + * Copyright 2015-2016 Freescale Semiconductor Inc. |
||
4158 | + * Copyright 2017-2018 NXP |
||
4159 | + */ |
||
4160 | + |
||
4161 | +#include <linux/fsl/mc.h> |
||
4162 | +#include "compat.h" |
||
4163 | +#include "regs.h" |
||
4164 | +#include "caamalg_qi2.h" |
||
4165 | +#include "dpseci_cmd.h" |
||
4166 | +#include "desc_constr.h" |
||
4167 | +#include "error.h" |
||
4168 | +#include "sg_sw_sec4.h" |
||
4169 | +#include "sg_sw_qm2.h" |
||
4170 | +#include "key_gen.h" |
||
4171 | +#include "caamalg_desc.h" |
||
4172 | +#include "caamhash_desc.h" |
||
4173 | +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h" |
||
4174 | +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h" |
||
4175 | + |
||
4176 | +#define CAAM_CRA_PRIORITY 2000 |
||
4177 | + |
||
4178 | +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */ |
||
4179 | +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \ |
||
4180 | + SHA512_DIGEST_SIZE * 2) |
||
4181 | + |
||
4182 | +#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM |
||
4183 | +bool caam_little_end; |
||
4184 | +EXPORT_SYMBOL(caam_little_end); |
||
4185 | +bool caam_imx; |
||
4186 | +EXPORT_SYMBOL(caam_imx); |
||
4187 | +#endif |
||
4188 | + |
||
4189 | +/* |
||
4190 | + * This is a a cache of buffers, from which the users of CAAM QI driver |
||
4191 | + * can allocate short buffers. It's speedier than doing kmalloc on the hotpath. |
||
4192 | + * NOTE: A more elegant solution would be to have some headroom in the frames |
||
4193 | + * being processed. This can be added by the dpaa2-eth driver. This would |
||
4194 | + * pose a problem for userspace application processing which cannot |
||
4195 | + * know of this limitation. So for now, this will work. |
||
4196 | + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here |
||
4197 | + */ |
||
4198 | +static struct kmem_cache *qi_cache; |
||
4199 | + |
||
4200 | +struct caam_alg_entry { |
||
4201 | + struct device *dev; |
||
4202 | + int class1_alg_type; |
||
4203 | + int class2_alg_type; |
||
4204 | + bool rfc3686; |
||
4205 | + bool geniv; |
||
4206 | +}; |
||
4207 | + |
||
4208 | +struct caam_aead_alg { |
||
4209 | + struct aead_alg aead; |
||
4210 | + struct caam_alg_entry caam; |
||
4211 | + bool registered; |
||
4212 | +}; |
||
4213 | + |
||
4214 | +struct caam_skcipher_alg { |
||
4215 | + struct skcipher_alg skcipher; |
||
4216 | + struct caam_alg_entry caam; |
||
4217 | + bool registered; |
||
4218 | +}; |
||
4219 | + |
||
4220 | +/** |
||
4221 | + * caam_ctx - per-session context |
||
4222 | + * @flc: Flow Contexts array |
||
4223 | + * @key: virtual address of the key(s): [authentication key], encryption key |
||
4224 | + * @flc_dma: I/O virtual addresses of the Flow Contexts |
||
4225 | + * @key_dma: I/O virtual address of the key |
||
4226 | + * @dir: DMA direction for mapping key and Flow Contexts |
||
4227 | + * @dev: dpseci device |
||
4228 | + * @adata: authentication algorithm details |
||
4229 | + * @cdata: encryption algorithm details |
||
4230 | + * @authsize: authentication tag (a.k.a. ICV / MAC) size |
||
4231 | + */ |
||
4232 | +struct caam_ctx { |
||
4233 | + struct caam_flc flc[NUM_OP]; |
||
4234 | + u8 key[CAAM_MAX_KEY_SIZE]; |
||
4235 | + dma_addr_t flc_dma[NUM_OP]; |
||
4236 | + dma_addr_t key_dma; |
||
4237 | + enum dma_data_direction dir; |
||
4238 | + struct device *dev; |
||
4239 | + struct alginfo adata; |
||
4240 | + struct alginfo cdata; |
||
4241 | + unsigned int authsize; |
||
4242 | +}; |
||
4243 | + |
||
4244 | +void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv, |
||
4245 | + dma_addr_t iova_addr) |
||
4246 | +{ |
||
4247 | + phys_addr_t phys_addr; |
||
4248 | + |
||
4249 | + phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) : |
||
4250 | + iova_addr; |
||
4251 | + |
||
4252 | + return phys_to_virt(phys_addr); |
||
4253 | +} |
||
4254 | + |
||
4255 | +/* |
||
4256 | + * qi_cache_zalloc - Allocate buffers from CAAM-QI cache |
||
4257 | + * |
||
4258 | + * Allocate data on the hotpath. Instead of using kzalloc, one can use the |
||
4259 | + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers |
||
4260 | + * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for |
||
4261 | + * hosting 16 SG entries. |
||
4262 | + * |
||
4263 | + * @flags - flags that would be used for the equivalent kmalloc(..) call |
||
4264 | + * |
||
4265 | + * Returns a pointer to a retrieved buffer on success or NULL on failure. |
||
4266 | + */ |
||
4267 | +static inline void *qi_cache_zalloc(gfp_t flags) |
||
4268 | +{ |
||
4269 | + return kmem_cache_zalloc(qi_cache, flags); |
||
4270 | +} |
||
4271 | + |
||
4272 | +/* |
||
4273 | + * qi_cache_free - Frees buffers allocated from CAAM-QI cache |
||
4274 | + * |
||
4275 | + * @obj - buffer previously allocated by qi_cache_zalloc |
||
4276 | + * |
||
4277 | + * No checking is being done, the call is a passthrough call to |
||
4278 | + * kmem_cache_free(...) |
||
4279 | + */ |
||
4280 | +static inline void qi_cache_free(void *obj) |
||
4281 | +{ |
||
4282 | + kmem_cache_free(qi_cache, obj); |
||
4283 | +} |
||
4284 | + |
||
4285 | +static struct caam_request *to_caam_req(struct crypto_async_request *areq) |
||
4286 | +{ |
||
4287 | + switch (crypto_tfm_alg_type(areq->tfm)) { |
||
4288 | + case CRYPTO_ALG_TYPE_SKCIPHER: |
||
4289 | + return skcipher_request_ctx(skcipher_request_cast(areq)); |
||
4290 | + case CRYPTO_ALG_TYPE_AEAD: |
||
4291 | + return aead_request_ctx(container_of(areq, struct aead_request, |
||
4292 | + base)); |
||
4293 | + case CRYPTO_ALG_TYPE_AHASH: |
||
4294 | + return ahash_request_ctx(ahash_request_cast(areq)); |
||
4295 | + default: |
||
4296 | + return ERR_PTR(-EINVAL); |
||
4297 | + } |
||
4298 | +} |
||
4299 | + |
||
4300 | +static void caam_unmap(struct device *dev, struct scatterlist *src, |
||
4301 | + struct scatterlist *dst, int src_nents, |
||
4302 | + int dst_nents, dma_addr_t iv_dma, int ivsize, |
||
4303 | + dma_addr_t qm_sg_dma, int qm_sg_bytes) |
||
4304 | +{ |
||
4305 | + if (dst != src) { |
||
4306 | + if (src_nents) |
||
4307 | + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); |
||
4308 | + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); |
||
4309 | + } else { |
||
4310 | + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); |
||
4311 | + } |
||
4312 | + |
||
4313 | + if (iv_dma) |
||
4314 | + dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); |
||
4315 | + |
||
4316 | + if (qm_sg_bytes) |
||
4317 | + dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); |
||
4318 | +} |
||
4319 | + |
||
4320 | +static int aead_set_sh_desc(struct crypto_aead *aead) |
||
4321 | +{ |
||
4322 | + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), |
||
4323 | + typeof(*alg), aead); |
||
4324 | + struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
4325 | + unsigned int ivsize = crypto_aead_ivsize(aead); |
||
4326 | + struct device *dev = ctx->dev; |
||
4327 | + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); |
||
4328 | + struct caam_flc *flc; |
||
4329 | + u32 *desc; |
||
4330 | + u32 ctx1_iv_off = 0; |
||
4331 | + u32 *nonce = NULL; |
||
4332 | + unsigned int data_len[2]; |
||
4333 | + u32 inl_mask; |
||
4334 | + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == |
||
4335 | + OP_ALG_AAI_CTR_MOD128); |
||
4336 | + const bool is_rfc3686 = alg->caam.rfc3686; |
||
4337 | + |
||
4338 | + if (!ctx->cdata.keylen || !ctx->authsize) |
||
4339 | + return 0; |
||
4340 | + |
||
4341 | + /* |
||
4342 | + * AES-CTR needs to load IV in CONTEXT1 reg |
||
4343 | + * at an offset of 128bits (16bytes) |
||
4344 | + * CONTEXT1[255:128] = IV |
||
4345 | + */ |
||
4346 | + if (ctr_mode) |
||
4347 | + ctx1_iv_off = 16; |
||
4348 | + |
||
4349 | + /* |
||
4350 | + * RFC3686 specific: |
||
4351 | + * CONTEXT1[255:128] = {NONCE, IV, COUNTER} |
||
4352 | + */ |
||
4353 | + if (is_rfc3686) { |
||
4354 | + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; |
||
4355 | + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + |
||
4356 | + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); |
||
4357 | + } |
||
4358 | + |
||
4359 | + data_len[0] = ctx->adata.keylen_pad; |
||
4360 | + data_len[1] = ctx->cdata.keylen; |
||
4361 | + |
||
4362 | + /* aead_encrypt shared descriptor */ |
||
4363 | + if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN : |
||
4364 | + DESC_QI_AEAD_ENC_LEN) + |
||
4365 | + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), |
||
4366 | + DESC_JOB_IO_LEN, data_len, &inl_mask, |
||
4367 | + ARRAY_SIZE(data_len)) < 0) |
||
4368 | + return -EINVAL; |
||
4369 | + |
||
4370 | + if (inl_mask & 1) |
||
4371 | + ctx->adata.key_virt = ctx->key; |
||
4372 | + else |
||
4373 | + ctx->adata.key_dma = ctx->key_dma; |
||
4374 | + |
||
4375 | + if (inl_mask & 2) |
||
4376 | + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; |
||
4377 | + else |
||
4378 | + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; |
||
4379 | + |
||
4380 | + ctx->adata.key_inline = !!(inl_mask & 1); |
||
4381 | + ctx->cdata.key_inline = !!(inl_mask & 2); |
||
4382 | + |
||
4383 | + flc = &ctx->flc[ENCRYPT]; |
||
4384 | + desc = flc->sh_desc; |
||
4385 | + |
||
4386 | + if (alg->caam.geniv) |
||
4387 | + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, |
||
4388 | + ivsize, ctx->authsize, is_rfc3686, |
||
4389 | + nonce, ctx1_iv_off, true, |
||
4390 | + priv->sec_attr.era); |
||
4391 | + else |
||
4392 | + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, |
||
4393 | + ivsize, ctx->authsize, is_rfc3686, nonce, |
||
4394 | + ctx1_iv_off, true, priv->sec_attr.era); |
||
4395 | + |
||
4396 | + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ |
||
4397 | + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], |
||
4398 | + sizeof(flc->flc) + desc_bytes(desc), |
||
4399 | + ctx->dir); |
||
4400 | + |
||
4401 | + /* aead_decrypt shared descriptor */ |
||
4402 | + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN + |
||
4403 | + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), |
||
4404 | + DESC_JOB_IO_LEN, data_len, &inl_mask, |
||
4405 | + ARRAY_SIZE(data_len)) < 0) |
||
4406 | + return -EINVAL; |
||
4407 | + |
||
4408 | + if (inl_mask & 1) |
||
4409 | + ctx->adata.key_virt = ctx->key; |
||
4410 | + else |
||
4411 | + ctx->adata.key_dma = ctx->key_dma; |
||
4412 | + |
||
4413 | + if (inl_mask & 2) |
||
4414 | + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; |
||
4415 | + else |
||
4416 | + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; |
||
4417 | + |
||
4418 | + ctx->adata.key_inline = !!(inl_mask & 1); |
||
4419 | + ctx->cdata.key_inline = !!(inl_mask & 2); |
||
4420 | + |
||
4421 | + flc = &ctx->flc[DECRYPT]; |
||
4422 | + desc = flc->sh_desc; |
||
4423 | + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, |
||
4424 | + ivsize, ctx->authsize, alg->caam.geniv, |
||
4425 | + is_rfc3686, nonce, ctx1_iv_off, true, |
||
4426 | + priv->sec_attr.era); |
||
4427 | + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ |
||
4428 | + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], |
||
4429 | + sizeof(flc->flc) + desc_bytes(desc), |
||
4430 | + ctx->dir); |
||
4431 | + |
||
4432 | + return 0; |
||
4433 | +} |
||
4434 | + |
||
4435 | +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) |
||
4436 | +{ |
||
4437 | + struct caam_ctx *ctx = crypto_aead_ctx(authenc); |
||
4438 | + |
||
4439 | + ctx->authsize = authsize; |
||
4440 | + aead_set_sh_desc(authenc); |
||
4441 | + |
||
4442 | + return 0; |
||
4443 | +} |
||
4444 | + |
||
4445 | +struct split_key_sh_result { |
||
4446 | + struct completion completion; |
||
4447 | + int err; |
||
4448 | + struct device *dev; |
||
4449 | +}; |
||
4450 | + |
||
4451 | +static void split_key_sh_done(void *cbk_ctx, u32 err) |
||
4452 | +{ |
||
4453 | + struct split_key_sh_result *res = cbk_ctx; |
||
4454 | + |
||
4455 | +#ifdef DEBUG |
||
4456 | + dev_err(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
||
4457 | +#endif |
||
4458 | + |
||
4459 | + if (err) |
||
4460 | + caam_qi2_strstatus(res->dev, err); |
||
4461 | + |
||
4462 | + res->err = err; |
||
4463 | + complete(&res->completion); |
||
4464 | +} |
||
4465 | + |
||
4466 | +static int aead_setkey(struct crypto_aead *aead, const u8 *key, |
||
4467 | + unsigned int keylen) |
||
4468 | +{ |
||
4469 | + struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
4470 | + struct device *dev = ctx->dev; |
||
4471 | + struct crypto_authenc_keys keys; |
||
4472 | + |
||
4473 | + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
||
4474 | + goto badkey; |
||
4475 | + |
||
4476 | +#ifdef DEBUG |
||
4477 | + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n", |
||
4478 | + keys.authkeylen + keys.enckeylen, keys.enckeylen, |
||
4479 | + keys.authkeylen); |
||
4480 | + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", |
||
4481 | + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
||
4482 | +#endif |
||
4483 | + |
||
4484 | + ctx->adata.keylen = keys.authkeylen; |
||
4485 | + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & |
||
4486 | + OP_ALG_ALGSEL_MASK); |
||
4487 | + |
||
4488 | + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) |
||
4489 | + goto badkey; |
||
4490 | + |
||
4491 | + memcpy(ctx->key, keys.authkey, keys.authkeylen); |
||
4492 | + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); |
||
4493 | + dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad + |
||
4494 | + keys.enckeylen, ctx->dir); |
||
4495 | +#ifdef DEBUG |
||
4496 | + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", |
||
4497 | + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, |
||
4498 | + ctx->adata.keylen_pad + keys.enckeylen, 1); |
||
4499 | +#endif |
||
4500 | + |
||
4501 | + ctx->cdata.keylen = keys.enckeylen; |
||
4502 | + |
||
4503 | + return aead_set_sh_desc(aead); |
||
4504 | +badkey: |
||
4505 | + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); |
||
4506 | + return -EINVAL; |
||
4507 | +} |
||
4508 | + |
||
4509 | +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, |
||
4510 | + bool encrypt) |
||
4511 | +{ |
||
4512 | + struct crypto_aead *aead = crypto_aead_reqtfm(req); |
||
4513 | + struct caam_request *req_ctx = aead_request_ctx(req); |
||
4514 | + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; |
||
4515 | + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; |
||
4516 | + struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
4517 | + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), |
||
4518 | + typeof(*alg), aead); |
||
4519 | + struct device *dev = ctx->dev; |
||
4520 | + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
||
4521 | + GFP_KERNEL : GFP_ATOMIC; |
||
4522 | + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; |
||
4523 | + struct aead_edesc *edesc; |
||
4524 | + dma_addr_t qm_sg_dma, iv_dma = 0; |
||
4525 | + int ivsize = 0; |
||
4526 | + unsigned int authsize = ctx->authsize; |
||
4527 | + int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes; |
||
4528 | + int in_len, out_len; |
||
4529 | + struct dpaa2_sg_entry *sg_table; |
||
4530 | + |
||
4531 | + /* allocate space for base edesc, link tables and IV */ |
||
4532 | + edesc = qi_cache_zalloc(GFP_DMA | flags); |
||
4533 | + if (unlikely(!edesc)) { |
||
4534 | + dev_err(dev, "could not allocate extended descriptor\n"); |
||
4535 | + return ERR_PTR(-ENOMEM); |
||
4536 | + } |
||
4537 | + |
||
4538 | + if (unlikely(req->dst != req->src)) { |
||
4539 | + src_nents = sg_nents_for_len(req->src, req->assoclen + |
||
4540 | + req->cryptlen); |
||
4541 | + if (unlikely(src_nents < 0)) { |
||
4542 | + dev_err(dev, "Insufficient bytes (%d) in src S/G\n", |
||
4543 | + req->assoclen + req->cryptlen); |
||
4544 | + qi_cache_free(edesc); |
||
4545 | + return ERR_PTR(src_nents); |
||
4546 | + } |
||
4547 | + |
||
4548 | + dst_nents = sg_nents_for_len(req->dst, req->assoclen + |
||
4549 | + req->cryptlen + |
||
4550 | + (encrypt ? authsize : |
||
4551 | + (-authsize))); |
||
4552 | + if (unlikely(dst_nents < 0)) { |
||
4553 | + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n", |
||
4554 | + req->assoclen + req->cryptlen + |
||
4555 | + (encrypt ? authsize : (-authsize))); |
||
4556 | + qi_cache_free(edesc); |
||
4557 | + return ERR_PTR(dst_nents); |
||
4558 | + } |
||
4559 | + |
||
4560 | + if (src_nents) { |
||
4561 | + mapped_src_nents = dma_map_sg(dev, req->src, src_nents, |
||
4562 | + DMA_TO_DEVICE); |
||
4563 | + if (unlikely(!mapped_src_nents)) { |
||
4564 | + dev_err(dev, "unable to map source\n"); |
||
4565 | + qi_cache_free(edesc); |
||
4566 | + return ERR_PTR(-ENOMEM); |
||
4567 | + } |
||
4568 | + } else { |
||
4569 | + mapped_src_nents = 0; |
||
4570 | + } |
||
4571 | + |
||
4572 | + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, |
||
4573 | + DMA_FROM_DEVICE); |
||
4574 | + if (unlikely(!mapped_dst_nents)) { |
||
4575 | + dev_err(dev, "unable to map destination\n"); |
||
4576 | + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); |
||
4577 | + qi_cache_free(edesc); |
||
4578 | + return ERR_PTR(-ENOMEM); |
||
4579 | + } |
||
4580 | + } else { |
||
4581 | + src_nents = sg_nents_for_len(req->src, req->assoclen + |
||
4582 | + req->cryptlen + |
||
4583 | + (encrypt ? authsize : 0)); |
||
4584 | + if (unlikely(src_nents < 0)) { |
||
4585 | + dev_err(dev, "Insufficient bytes (%d) in src S/G\n", |
||
4586 | + req->assoclen + req->cryptlen + |
||
4587 | + (encrypt ? authsize : 0)); |
||
4588 | + qi_cache_free(edesc); |
||
4589 | + return ERR_PTR(src_nents); |
||
4590 | + } |
||
4591 | + |
||
4592 | + mapped_src_nents = dma_map_sg(dev, req->src, src_nents, |
||
4593 | + DMA_BIDIRECTIONAL); |
||
4594 | + if (unlikely(!mapped_src_nents)) { |
||
4595 | + dev_err(dev, "unable to map source\n"); |
||
4596 | + qi_cache_free(edesc); |
||
4597 | + return ERR_PTR(-ENOMEM); |
||
4598 | + } |
||
4599 | + } |
||
4600 | + |
||
4601 | + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) |
||
4602 | + ivsize = crypto_aead_ivsize(aead); |
||
4603 | + |
||
4604 | + /* |
||
4605 | + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. |
||
4606 | + * Input is not contiguous. |
||
4607 | + */ |
||
4608 | + qm_sg_nents = 1 + !!ivsize + mapped_src_nents + |
||
4609 | + (mapped_dst_nents > 1 ? mapped_dst_nents : 0); |
||
4610 | + sg_table = &edesc->sgt[0]; |
||
4611 | + qm_sg_bytes = qm_sg_nents * sizeof(*sg_table); |
||
4612 | + if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > |
||
4613 | + CAAM_QI_MEMCACHE_SIZE)) { |
||
4614 | + dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", |
||
4615 | + qm_sg_nents, ivsize); |
||
4616 | + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, |
||
4617 | + 0, 0, 0); |
||
4618 | + qi_cache_free(edesc); |
||
4619 | + return ERR_PTR(-ENOMEM); |
||
4620 | + } |
||
4621 | + |
||
4622 | + if (ivsize) { |
||
4623 | + u8 *iv = (u8 *)(sg_table + qm_sg_nents); |
||
4624 | + |
||
4625 | + /* Make sure IV is located in a DMAable area */ |
||
4626 | + memcpy(iv, req->iv, ivsize); |
||
4627 | + |
||
4628 | + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); |
||
4629 | + if (dma_mapping_error(dev, iv_dma)) { |
||
4630 | + dev_err(dev, "unable to map IV\n"); |
||
4631 | + caam_unmap(dev, req->src, req->dst, src_nents, |
||
4632 | + dst_nents, 0, 0, 0, 0); |
||
4633 | + qi_cache_free(edesc); |
||
4634 | + return ERR_PTR(-ENOMEM); |
||
4635 | + } |
||
4636 | + } |
||
4637 | + |
||
4638 | + edesc->src_nents = src_nents; |
||
4639 | + edesc->dst_nents = dst_nents; |
||
4640 | + edesc->iv_dma = iv_dma; |
||
4641 | + |
||
4642 | + edesc->assoclen = cpu_to_caam32(req->assoclen); |
||
4643 | + edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4, |
||
4644 | + DMA_TO_DEVICE); |
||
4645 | + if (dma_mapping_error(dev, edesc->assoclen_dma)) { |
||
4646 | + dev_err(dev, "unable to map assoclen\n"); |
||
4647 | + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, |
||
4648 | + iv_dma, ivsize, 0, 0); |
||
4649 | + qi_cache_free(edesc); |
||
4650 | + return ERR_PTR(-ENOMEM); |
||
4651 | + } |
||
4652 | + |
||
4653 | + dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0); |
||
4654 | + qm_sg_index++; |
||
4655 | + if (ivsize) { |
||
4656 | + dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); |
||
4657 | + qm_sg_index++; |
||
4658 | + } |
||
4659 | + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); |
||
4660 | + qm_sg_index += mapped_src_nents; |
||
4661 | + |
||
4662 | + if (mapped_dst_nents > 1) |
||
4663 | + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + |
||
4664 | + qm_sg_index, 0); |
||
4665 | + |
||
4666 | + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); |
||
4667 | + if (dma_mapping_error(dev, qm_sg_dma)) { |
||
4668 | + dev_err(dev, "unable to map S/G table\n"); |
||
4669 | + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); |
||
4670 | + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, |
||
4671 | + iv_dma, ivsize, 0, 0); |
||
4672 | + qi_cache_free(edesc); |
||
4673 | + return ERR_PTR(-ENOMEM); |
||
4674 | + } |
||
4675 | + |
||
4676 | + edesc->qm_sg_dma = qm_sg_dma; |
||
4677 | + edesc->qm_sg_bytes = qm_sg_bytes; |
||
4678 | + |
||
4679 | + out_len = req->assoclen + req->cryptlen + |
||
4680 | + (encrypt ? ctx->authsize : (-ctx->authsize)); |
||
4681 | + in_len = 4 + ivsize + req->assoclen + req->cryptlen; |
||
4682 | + |
||
4683 | + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
||
4684 | + dpaa2_fl_set_final(in_fle, true); |
||
4685 | + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); |
||
4686 | + dpaa2_fl_set_addr(in_fle, qm_sg_dma); |
||
4687 | + dpaa2_fl_set_len(in_fle, in_len); |
||
4688 | + |
||
4689 | + if (req->dst == req->src) { |
||
4690 | + if (mapped_src_nents == 1) { |
||
4691 | + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); |
||
4692 | + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src)); |
||
4693 | + } else { |
||
4694 | + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); |
||
4695 | + dpaa2_fl_set_addr(out_fle, qm_sg_dma + |
||
4696 | + (1 + !!ivsize) * sizeof(*sg_table)); |
||
4697 | + } |
||
4698 | + } else if (mapped_dst_nents == 1) { |
||
4699 | + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); |
||
4700 | + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst)); |
||
4701 | + } else { |
||
4702 | + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); |
||
4703 | + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index * |
||
4704 | + sizeof(*sg_table)); |
||
4705 | + } |
||
4706 | + |
||
4707 | + dpaa2_fl_set_len(out_fle, out_len); |
||
4708 | + |
||
4709 | + return edesc; |
||
4710 | +} |
||
4711 | + |
||
4712 | +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, |
||
4713 | + bool encrypt) |
||
4714 | +{ |
||
4715 | + struct crypto_aead *tls = crypto_aead_reqtfm(req); |
||
4716 | + unsigned int blocksize = crypto_aead_blocksize(tls); |
||
4717 | + unsigned int padsize, authsize; |
||
4718 | + struct caam_request *req_ctx = aead_request_ctx(req); |
||
4719 | + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; |
||
4720 | + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; |
||
4721 | + struct caam_ctx *ctx = crypto_aead_ctx(tls); |
||
4722 | + struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls), |
||
4723 | + typeof(*alg), aead); |
||
4724 | + struct device *dev = ctx->dev; |
||
4725 | + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
||
4726 | + GFP_KERNEL : GFP_ATOMIC; |
||
4727 | + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; |
||
4728 | + struct tls_edesc *edesc; |
||
4729 | + dma_addr_t qm_sg_dma, iv_dma = 0; |
||
4730 | + int ivsize = 0; |
||
4731 | + u8 *iv; |
||
4732 | + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes; |
||
4733 | + int in_len, out_len; |
||
4734 | + struct dpaa2_sg_entry *sg_table; |
||
4735 | + struct scatterlist *dst; |
||
4736 | + |
||
4737 | + if (encrypt) { |
||
4738 | + padsize = blocksize - ((req->cryptlen + ctx->authsize) % |
||
4739 | + blocksize); |
||
4740 | + authsize = ctx->authsize + padsize; |
||
4741 | + } else { |
||
4742 | + authsize = ctx->authsize; |
||
4743 | + } |
||
4744 | + |
||
4745 | + /* allocate space for base edesc, link tables and IV */ |
||
4746 | + edesc = qi_cache_zalloc(GFP_DMA | flags); |
||
4747 | + if (unlikely(!edesc)) { |
||
4748 | + dev_err(dev, "could not allocate extended descriptor\n"); |
||
4749 | + return ERR_PTR(-ENOMEM); |
||
4750 | + } |
||
4751 | + |
||
4752 | + if (likely(req->src == req->dst)) { |
||
4753 | + src_nents = sg_nents_for_len(req->src, req->assoclen + |
||
4754 | + req->cryptlen + |
||
4755 | + (encrypt ? authsize : 0)); |
||
4756 | + if (unlikely(src_nents < 0)) { |
||
4757 | + dev_err(dev, "Insufficient bytes (%d) in src S/G\n", |
||
4758 | + req->assoclen + req->cryptlen + |
||
4759 | + (encrypt ? authsize : 0)); |
||
4760 | + qi_cache_free(edesc); |
||
4761 | + return ERR_PTR(src_nents); |
||
4762 | + } |
||
4763 | + |
||
4764 | + mapped_src_nents = dma_map_sg(dev, req->src, src_nents, |
||
4765 | + DMA_BIDIRECTIONAL); |
||
4766 | + if (unlikely(!mapped_src_nents)) { |
||
4767 | + dev_err(dev, "unable to map source\n"); |
||
4768 | + qi_cache_free(edesc); |
||
4769 | + return ERR_PTR(-ENOMEM); |
||
4770 | + } |
||
4771 | + dst = req->dst; |
||
4772 | + } else { |
||
4773 | + src_nents = sg_nents_for_len(req->src, req->assoclen + |
||
4774 | + req->cryptlen); |
||
4775 | + if (unlikely(src_nents < 0)) { |
||
4776 | + dev_err(dev, "Insufficient bytes (%d) in src S/G\n", |
||
4777 | + req->assoclen + req->cryptlen); |
||
4778 | + qi_cache_free(edesc); |
||
4779 | + return ERR_PTR(src_nents); |
||
4780 | + } |
||
4781 | + |
||
4782 | + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen); |
||
4783 | + dst_nents = sg_nents_for_len(dst, req->cryptlen + |
||
4784 | + (encrypt ? authsize : 0)); |
||
4785 | + if (unlikely(dst_nents < 0)) { |
||
4786 | + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n", |
||
4787 | + req->cryptlen + |
||
4788 | + (encrypt ? authsize : 0)); |
||
4789 | + qi_cache_free(edesc); |
||
4790 | + return ERR_PTR(dst_nents); |
||
4791 | + } |
||
4792 | + |
||
4793 | + if (src_nents) { |
||
4794 | + mapped_src_nents = dma_map_sg(dev, req->src, |
||
4795 | + src_nents, DMA_TO_DEVICE); |
||
4796 | + if (unlikely(!mapped_src_nents)) { |
||
4797 | + dev_err(dev, "unable to map source\n"); |
||
4798 | + qi_cache_free(edesc); |
||
4799 | + return ERR_PTR(-ENOMEM); |
||
4800 | + } |
||
4801 | + } else { |
||
4802 | + mapped_src_nents = 0; |
||
4803 | + } |
||
4804 | + |
||
4805 | + mapped_dst_nents = dma_map_sg(dev, dst, dst_nents, |
||
4806 | + DMA_FROM_DEVICE); |
||
4807 | + if (unlikely(!mapped_dst_nents)) { |
||
4808 | + dev_err(dev, "unable to map destination\n"); |
||
4809 | + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); |
||
4810 | + qi_cache_free(edesc); |
||
4811 | + return ERR_PTR(-ENOMEM); |
||
4812 | + } |
||
4813 | + } |
||
4814 | + |
||
4815 | + /* |
||
4816 | + * Create S/G table: IV, src, dst. |
||
4817 | + * Input is not contiguous. |
||
4818 | + */ |
||
4819 | + qm_sg_ents = 1 + mapped_src_nents + |
||
4820 | + (mapped_dst_nents > 1 ? mapped_dst_nents : 0); |
||
4821 | + sg_table = &edesc->sgt[0]; |
||
4822 | + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); |
||
4823 | + |
||
4824 | + ivsize = crypto_aead_ivsize(tls); |
||
4825 | + iv = (u8 *)(sg_table + qm_sg_ents); |
||
4826 | + /* Make sure IV is located in a DMAable area */ |
||
4827 | + memcpy(iv, req->iv, ivsize); |
||
4828 | + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); |
||
4829 | + if (dma_mapping_error(dev, iv_dma)) { |
||
4830 | + dev_err(dev, "unable to map IV\n"); |
||
4831 | + caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0, 0, |
||
4832 | + 0); |
||
4833 | + qi_cache_free(edesc); |
||
4834 | + return ERR_PTR(-ENOMEM); |
||
4835 | + } |
||
4836 | + |
||
4837 | + edesc->src_nents = src_nents; |
||
4838 | + edesc->dst_nents = dst_nents; |
||
4839 | + edesc->dst = dst; |
||
4840 | + edesc->iv_dma = iv_dma; |
||
4841 | + |
||
4842 | + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); |
||
4843 | + qm_sg_index = 1; |
||
4844 | + |
||
4845 | + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); |
||
4846 | + qm_sg_index += mapped_src_nents; |
||
4847 | + |
||
4848 | + if (mapped_dst_nents > 1) |
||
4849 | + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table + |
||
4850 | + qm_sg_index, 0); |
||
4851 | + |
||
4852 | + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); |
||
4853 | + if (dma_mapping_error(dev, qm_sg_dma)) { |
||
4854 | + dev_err(dev, "unable to map S/G table\n"); |
||
4855 | + caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma, |
||
4856 | + ivsize, 0, 0); |
||
4857 | + qi_cache_free(edesc); |
||
4858 | + return ERR_PTR(-ENOMEM); |
||
4859 | + } |
||
4860 | + |
||
4861 | + edesc->qm_sg_dma = qm_sg_dma; |
||
4862 | + edesc->qm_sg_bytes = qm_sg_bytes; |
||
4863 | + |
||
4864 | + out_len = req->cryptlen + (encrypt ? authsize : 0); |
||
4865 | + in_len = ivsize + req->assoclen + req->cryptlen; |
||
4866 | + |
||
4867 | + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
||
4868 | + dpaa2_fl_set_final(in_fle, true); |
||
4869 | + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); |
||
4870 | + dpaa2_fl_set_addr(in_fle, qm_sg_dma); |
||
4871 | + dpaa2_fl_set_len(in_fle, in_len); |
||
4872 | + |
||
4873 | + if (req->dst == req->src) { |
||
4874 | + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); |
||
4875 | + dpaa2_fl_set_addr(out_fle, qm_sg_dma + |
||
4876 | + (sg_nents_for_len(req->src, req->assoclen) + |
||
4877 | + 1) * sizeof(*sg_table)); |
||
4878 | + } else if (mapped_dst_nents == 1) { |
||
4879 | + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); |
||
4880 | + dpaa2_fl_set_addr(out_fle, sg_dma_address(dst)); |
||
4881 | + } else { |
||
4882 | + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); |
||
4883 | + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index * |
||
4884 | + sizeof(*sg_table)); |
||
4885 | + } |
||
4886 | + |
||
4887 | + dpaa2_fl_set_len(out_fle, out_len); |
||
4888 | + |
||
4889 | + return edesc; |
||
4890 | +} |
||
4891 | + |
||
4892 | +static int tls_set_sh_desc(struct crypto_aead *tls) |
||
4893 | +{ |
||
4894 | + struct caam_ctx *ctx = crypto_aead_ctx(tls); |
||
4895 | + unsigned int ivsize = crypto_aead_ivsize(tls); |
||
4896 | + unsigned int blocksize = crypto_aead_blocksize(tls); |
||
4897 | + struct device *dev = ctx->dev; |
||
4898 | + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); |
||
4899 | + struct caam_flc *flc; |
||
4900 | + u32 *desc; |
||
4901 | + unsigned int assoclen = 13; /* always 13 bytes for TLS */ |
||
4902 | + unsigned int data_len[2]; |
||
4903 | + u32 inl_mask; |
||
4904 | + |
||
4905 | + if (!ctx->cdata.keylen || !ctx->authsize) |
||
4906 | + return 0; |
||
4907 | + |
||
4908 | + /* |
||
4909 | + * TLS 1.0 encrypt shared descriptor |
||
4910 | + * Job Descriptor and Shared Descriptor |
||
4911 | + * must fit into the 64-word Descriptor h/w Buffer |
||
4912 | + */ |
||
4913 | + data_len[0] = ctx->adata.keylen_pad; |
||
4914 | + data_len[1] = ctx->cdata.keylen; |
||
4915 | + |
||
4916 | + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len, |
||
4917 | + &inl_mask, ARRAY_SIZE(data_len)) < 0) |
||
4918 | + return -EINVAL; |
||
4919 | + |
||
4920 | + if (inl_mask & 1) |
||
4921 | + ctx->adata.key_virt = ctx->key; |
||
4922 | + else |
||
4923 | + ctx->adata.key_dma = ctx->key_dma; |
||
4924 | + |
||
4925 | + if (inl_mask & 2) |
||
4926 | + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; |
||
4927 | + else |
||
4928 | + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; |
||
4929 | + |
||
4930 | + ctx->adata.key_inline = !!(inl_mask & 1); |
||
4931 | + ctx->cdata.key_inline = !!(inl_mask & 2); |
||
4932 | + |
||
4933 | + flc = &ctx->flc[ENCRYPT]; |
||
4934 | + desc = flc->sh_desc; |
||
4935 | + cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata, |
||
4936 | + assoclen, ivsize, ctx->authsize, blocksize, |
||
4937 | + priv->sec_attr.era); |
||
4938 | + flc->flc[1] = cpu_to_caam32(desc_len(desc)); |
||
4939 | + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], |
||
4940 | + sizeof(flc->flc) + desc_bytes(desc), |
||
4941 | + ctx->dir); |
||
4942 | + |
||
4943 | + /* |
||
4944 | + * TLS 1.0 decrypt shared descriptor |
||
4945 | + * Keys do not fit inline, regardless of algorithms used |
||
4946 | + */ |
||
4947 | + ctx->adata.key_inline = false; |
||
4948 | + ctx->adata.key_dma = ctx->key_dma; |
||
4949 | + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; |
||
4950 | + |
||
4951 | + flc = &ctx->flc[DECRYPT]; |
||
4952 | + desc = flc->sh_desc; |
||
4953 | + cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize, |
||
4954 | + ctx->authsize, blocksize, priv->sec_attr.era); |
||
4955 | + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ |
||
4956 | + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], |
||
4957 | + sizeof(flc->flc) + desc_bytes(desc), |
||
4958 | + ctx->dir); |
||
4959 | + |
||
4960 | + return 0; |
||
4961 | +} |
||
4962 | + |
||
4963 | +static int tls_setkey(struct crypto_aead *tls, const u8 *key, |
||
4964 | + unsigned int keylen) |
||
4965 | +{ |
||
4966 | + struct caam_ctx *ctx = crypto_aead_ctx(tls); |
||
4967 | + struct device *dev = ctx->dev; |
||
4968 | + struct crypto_authenc_keys keys; |
||
4969 | + |
||
4970 | + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
||
4971 | + goto badkey; |
||
4972 | + |
||
4973 | +#ifdef DEBUG |
||
4974 | + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n", |
||
4975 | + keys.authkeylen + keys.enckeylen, keys.enckeylen, |
||
4976 | + keys.authkeylen); |
||
4977 | + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", |
||
4978 | + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
||
4979 | +#endif |
||
4980 | + |
||
4981 | + ctx->adata.keylen = keys.authkeylen; |
||
4982 | + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & |
||
4983 | + OP_ALG_ALGSEL_MASK); |
||
4984 | + |
||
4985 | + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) |
||
4986 | + goto badkey; |
||
4987 | + |
||
4988 | + memcpy(ctx->key, keys.authkey, keys.authkeylen); |
||
4989 | + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); |
||
4990 | + dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad + |
||
4991 | + keys.enckeylen, ctx->dir); |
||
4992 | +#ifdef DEBUG |
||
4993 | + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", |
||
4994 | + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, |
||
4995 | + ctx->adata.keylen_pad + keys.enckeylen, 1); |
||
4996 | +#endif |
||
4997 | + |
||
4998 | + ctx->cdata.keylen = keys.enckeylen; |
||
4999 | + |
||
5000 | + return tls_set_sh_desc(tls); |
||
5001 | +badkey: |
||
5002 | + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN); |
||
5003 | + return -EINVAL; |
||
5004 | +} |
||
5005 | + |
||
5006 | +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize) |
||
5007 | +{ |
||
5008 | + struct caam_ctx *ctx = crypto_aead_ctx(tls); |
||
5009 | + |
||
5010 | + ctx->authsize = authsize; |
||
5011 | + tls_set_sh_desc(tls); |
||
5012 | + |
||
5013 | + return 0; |
||
5014 | +} |
||
5015 | + |
||
5016 | +static int gcm_set_sh_desc(struct crypto_aead *aead) |
||
5017 | +{ |
||
5018 | + struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
5019 | + struct device *dev = ctx->dev; |
||
5020 | + unsigned int ivsize = crypto_aead_ivsize(aead); |
||
5021 | + struct caam_flc *flc; |
||
5022 | + u32 *desc; |
||
5023 | + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - |
||
5024 | + ctx->cdata.keylen; |
||
5025 | + |
||
5026 | + if (!ctx->cdata.keylen || !ctx->authsize) |
||
5027 | + return 0; |
||
5028 | + |
||
5029 | + /* |
||
5030 | + * AES GCM encrypt shared descriptor |
||
5031 | + * Job Descriptor and Shared Descriptor |
||
5032 | + * must fit into the 64-word Descriptor h/w Buffer |
||
5033 | + */ |
||
5034 | + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) { |
||
5035 | + ctx->cdata.key_inline = true; |
||
5036 | + ctx->cdata.key_virt = ctx->key; |
||
5037 | + } else { |
||
5038 | + ctx->cdata.key_inline = false; |
||
5039 | + ctx->cdata.key_dma = ctx->key_dma; |
||
5040 | + } |
||
5041 | + |
||
5042 | + flc = &ctx->flc[ENCRYPT]; |
||
5043 | + desc = flc->sh_desc; |
||
5044 | + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true); |
||
5045 | + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ |
||
5046 | + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], |
||
5047 | + sizeof(flc->flc) + desc_bytes(desc), |
||
5048 | + ctx->dir); |
||
5049 | + |
||
5050 | + /* |
||
5051 | + * Job Descriptor and Shared Descriptors |
||
5052 | + * must all fit into the 64-word Descriptor h/w Buffer |
||
5053 | + */ |
||
5054 | + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) { |
||
5055 | + ctx->cdata.key_inline = true; |
||
5056 | + ctx->cdata.key_virt = ctx->key; |
||
5057 | + } else { |
||
5058 | + ctx->cdata.key_inline = false; |
||
5059 | + ctx->cdata.key_dma = ctx->key_dma; |
||
5060 | + } |
||
5061 | + |
||
5062 | + flc = &ctx->flc[DECRYPT]; |
||
5063 | + desc = flc->sh_desc; |
||
5064 | + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true); |
||
5065 | + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ |
||
5066 | + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], |
||
5067 | + sizeof(flc->flc) + desc_bytes(desc), |
||
5068 | + ctx->dir); |
||
5069 | + |
||
5070 | + return 0; |
||
5071 | +} |
||
5072 | + |
||
5073 | +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) |
||
5074 | +{ |
||
5075 | + struct caam_ctx *ctx = crypto_aead_ctx(authenc); |
||
5076 | + |
||
5077 | + ctx->authsize = authsize; |
||
5078 | + gcm_set_sh_desc(authenc); |
||
5079 | + |
||
5080 | + return 0; |
||
5081 | +} |
||
5082 | + |
||
5083 | +static int gcm_setkey(struct crypto_aead *aead, |
||
5084 | + const u8 *key, unsigned int keylen) |
||
5085 | +{ |
||
5086 | + struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
5087 | + struct device *dev = ctx->dev; |
||
5088 | + |
||
5089 | +#ifdef DEBUG |
||
5090 | + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", |
||
5091 | + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
||
5092 | +#endif |
||
5093 | + |
||
5094 | + memcpy(ctx->key, key, keylen); |
||
5095 | + dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir); |
||
5096 | + ctx->cdata.keylen = keylen; |
||
5097 | + |
||
5098 | + return gcm_set_sh_desc(aead); |
||
5099 | +} |
||
5100 | + |
||
5101 | +static int rfc4106_set_sh_desc(struct crypto_aead *aead) |
||
5102 | +{ |
||
5103 | + struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
5104 | + struct device *dev = ctx->dev; |
||
5105 | + unsigned int ivsize = crypto_aead_ivsize(aead); |
||
5106 | + struct caam_flc *flc; |
||
5107 | + u32 *desc; |
||
5108 | + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - |
||
5109 | + ctx->cdata.keylen; |
||
5110 | + |
||
5111 | + if (!ctx->cdata.keylen || !ctx->authsize) |
||
5112 | + return 0; |
||
5113 | + |
||
5114 | + ctx->cdata.key_virt = ctx->key; |
||
5115 | + |
||
5116 | + /* |
||
5117 | + * RFC4106 encrypt shared descriptor |
||
5118 | + * Job Descriptor and Shared Descriptor |
||
5119 | + * must fit into the 64-word Descriptor h/w Buffer |
||
5120 | + */ |
||
5121 | + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) { |
||
5122 | + ctx->cdata.key_inline = true; |
||
5123 | + } else { |
||
5124 | + ctx->cdata.key_inline = false; |
||
5125 | + ctx->cdata.key_dma = ctx->key_dma; |
||
5126 | + } |
||
5127 | + |
||
5128 | + flc = &ctx->flc[ENCRYPT]; |
||
5129 | + desc = flc->sh_desc; |
||
5130 | + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize, |
||
5131 | + true); |
||
5132 | + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ |
||
5133 | + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], |
||
5134 | + sizeof(flc->flc) + desc_bytes(desc), |
||
5135 | + ctx->dir); |
||
5136 | + |
||
5137 | + /* |
||
5138 | + * Job Descriptor and Shared Descriptors |
||
5139 | + * must all fit into the 64-word Descriptor h/w Buffer |
||
5140 | + */ |
||
5141 | + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) { |
||
5142 | + ctx->cdata.key_inline = true; |
||
5143 | + } else { |
||
5144 | + ctx->cdata.key_inline = false; |
||
5145 | + ctx->cdata.key_dma = ctx->key_dma; |
||
5146 | + } |
||
5147 | + |
||
5148 | + flc = &ctx->flc[DECRYPT]; |
||
5149 | + desc = flc->sh_desc; |
||
5150 | + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize, |
||
5151 | + true); |
||
5152 | + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ |
||
5153 | + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], |
||
5154 | + sizeof(flc->flc) + desc_bytes(desc), |
||
5155 | + ctx->dir); |
||
5156 | + |
||
5157 | + return 0; |
||
5158 | +} |
||
5159 | + |
||
5160 | +static int rfc4106_setauthsize(struct crypto_aead *authenc, |
||
5161 | + unsigned int authsize) |
||
5162 | +{ |
||
5163 | + struct caam_ctx *ctx = crypto_aead_ctx(authenc); |
||
5164 | + |
||
5165 | + ctx->authsize = authsize; |
||
5166 | + rfc4106_set_sh_desc(authenc); |
||
5167 | + |
||
5168 | + return 0; |
||
5169 | +} |
||
5170 | + |
||
5171 | +static int rfc4106_setkey(struct crypto_aead *aead, |
||
5172 | + const u8 *key, unsigned int keylen) |
||
5173 | +{ |
||
5174 | + struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
5175 | + struct device *dev = ctx->dev; |
||
5176 | + |
||
5177 | + if (keylen < 4) |
||
5178 | + return -EINVAL; |
||
5179 | + |
||
5180 | +#ifdef DEBUG |
||
5181 | + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", |
||
5182 | + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
||
5183 | +#endif |
||
5184 | + |
||
5185 | + memcpy(ctx->key, key, keylen); |
||
5186 | + /* |
||
5187 | + * The last four bytes of the key material are used as the salt value |
||
5188 | + * in the nonce. Update the AES key length. |
||
5189 | + */ |
||
5190 | + ctx->cdata.keylen = keylen - 4; |
||
5191 | + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen, |
||
5192 | + ctx->dir); |
||
5193 | + |
||
5194 | + return rfc4106_set_sh_desc(aead); |
||
5195 | +} |
||
5196 | + |
||
5197 | +static int rfc4543_set_sh_desc(struct crypto_aead *aead) |
||
5198 | +{ |
||
5199 | + struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
5200 | + struct device *dev = ctx->dev; |
||
5201 | + unsigned int ivsize = crypto_aead_ivsize(aead); |
||
5202 | + struct caam_flc *flc; |
||
5203 | + u32 *desc; |
||
5204 | + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - |
||
5205 | + ctx->cdata.keylen; |
||
5206 | + |
||
5207 | + if (!ctx->cdata.keylen || !ctx->authsize) |
||
5208 | + return 0; |
||
5209 | + |
||
5210 | + ctx->cdata.key_virt = ctx->key; |
||
5211 | + |
||
5212 | + /* |
||
5213 | + * RFC4543 encrypt shared descriptor |
||
5214 | + * Job Descriptor and Shared Descriptor |
||
5215 | + * must fit into the 64-word Descriptor h/w Buffer |
||
5216 | + */ |
||
5217 | + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) { |
||
5218 | + ctx->cdata.key_inline = true; |
||
5219 | + } else { |
||
5220 | + ctx->cdata.key_inline = false; |
||
5221 | + ctx->cdata.key_dma = ctx->key_dma; |
||
5222 | + } |
||
5223 | + |
||
5224 | + flc = &ctx->flc[ENCRYPT]; |
||
5225 | + desc = flc->sh_desc; |
||
5226 | + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize, |
||
5227 | + true); |
||
5228 | + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ |
||
5229 | + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], |
||
5230 | + sizeof(flc->flc) + desc_bytes(desc), |
||
5231 | + ctx->dir); |
||
5232 | + |
||
5233 | + /* |
||
5234 | + * Job Descriptor and Shared Descriptors |
||
5235 | + * must all fit into the 64-word Descriptor h/w Buffer |
||
5236 | + */ |
||
5237 | + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) { |
||
5238 | + ctx->cdata.key_inline = true; |
||
5239 | + } else { |
||
5240 | + ctx->cdata.key_inline = false; |
||
5241 | + ctx->cdata.key_dma = ctx->key_dma; |
||
5242 | + } |
||
5243 | + |
||
5244 | + flc = &ctx->flc[DECRYPT]; |
||
5245 | + desc = flc->sh_desc; |
||
5246 | + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize, |
||
5247 | + true); |
||
5248 | + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ |
||
5249 | + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], |
||
5250 | + sizeof(flc->flc) + desc_bytes(desc), |
||
5251 | + ctx->dir); |
||
5252 | + |
||
5253 | + return 0; |
||
5254 | +} |
||
5255 | + |
||
5256 | +static int rfc4543_setauthsize(struct crypto_aead *authenc, |
||
5257 | + unsigned int authsize) |
||
5258 | +{ |
||
5259 | + struct caam_ctx *ctx = crypto_aead_ctx(authenc); |
||
5260 | + |
||
5261 | + ctx->authsize = authsize; |
||
5262 | + rfc4543_set_sh_desc(authenc); |
||
5263 | + |
||
5264 | + return 0; |
||
5265 | +} |
||
5266 | + |
||
5267 | +static int rfc4543_setkey(struct crypto_aead *aead, |
||
5268 | + const u8 *key, unsigned int keylen) |
||
5269 | +{ |
||
5270 | + struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
5271 | + struct device *dev = ctx->dev; |
||
5272 | + |
||
5273 | + if (keylen < 4) |
||
5274 | + return -EINVAL; |
||
5275 | + |
||
5276 | +#ifdef DEBUG |
||
5277 | + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", |
||
5278 | + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
||
5279 | +#endif |
||
5280 | + |
||
5281 | + memcpy(ctx->key, key, keylen); |
||
5282 | + /* |
||
5283 | + * The last four bytes of the key material are used as the salt value |
||
5284 | + * in the nonce. Update the AES key length. |
||
5285 | + */ |
||
5286 | + ctx->cdata.keylen = keylen - 4; |
||
5287 | + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen, |
||
5288 | + ctx->dir); |
||
5289 | + |
||
5290 | + return rfc4543_set_sh_desc(aead); |
||
5291 | +} |
||
5292 | + |
||
5293 | +static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, |
||
5294 | + unsigned int keylen) |
||
5295 | +{ |
||
5296 | + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
||
5297 | + struct caam_skcipher_alg *alg = |
||
5298 | + container_of(crypto_skcipher_alg(skcipher), |
||
5299 | + struct caam_skcipher_alg, skcipher); |
||
5300 | + struct device *dev = ctx->dev; |
||
5301 | + struct caam_flc *flc; |
||
5302 | + unsigned int ivsize = crypto_skcipher_ivsize(skcipher); |
||
5303 | + u32 *desc; |
||
5304 | + u32 ctx1_iv_off = 0; |
||
5305 | + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == |
||
5306 | + OP_ALG_AAI_CTR_MOD128); |
||
5307 | + const bool is_rfc3686 = alg->caam.rfc3686; |
||
5308 | + |
||
5309 | +#ifdef DEBUG |
||
5310 | + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", |
||
5311 | + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
||
5312 | +#endif |
||
5313 | + /* |
||
5314 | + * AES-CTR needs to load IV in CONTEXT1 reg |
||
5315 | + * at an offset of 128bits (16bytes) |
||
5316 | + * CONTEXT1[255:128] = IV |
||
5317 | + */ |
||
5318 | + if (ctr_mode) |
||
5319 | + ctx1_iv_off = 16; |
||
5320 | + |
||
5321 | + /* |
||
5322 | + * RFC3686 specific: |
||
5323 | + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} |
||
5324 | + * | *key = {KEY, NONCE} |
||
5325 | + */ |
||
5326 | + if (is_rfc3686) { |
||
5327 | + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; |
||
5328 | + keylen -= CTR_RFC3686_NONCE_SIZE; |
||
5329 | + } |
||
5330 | + |
||
5331 | + ctx->cdata.keylen = keylen; |
||
5332 | + ctx->cdata.key_virt = key; |
||
5333 | + ctx->cdata.key_inline = true; |
||
5334 | + |
||
5335 | + /* skcipher_encrypt shared descriptor */ |
||
5336 | + flc = &ctx->flc[ENCRYPT]; |
||
5337 | + desc = flc->sh_desc; |
||
5338 | + cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, |
||
5339 | + is_rfc3686, ctx1_iv_off); |
||
5340 | + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ |
||
5341 | + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], |
||
5342 | + sizeof(flc->flc) + desc_bytes(desc), |
||
5343 | + ctx->dir); |
||
5344 | + |
||
5345 | + /* skcipher_decrypt shared descriptor */ |
||
5346 | + flc = &ctx->flc[DECRYPT]; |
||
5347 | + desc = flc->sh_desc; |
||
5348 | + cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, |
||
5349 | + is_rfc3686, ctx1_iv_off); |
||
5350 | + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ |
||
5351 | + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], |
||
5352 | + sizeof(flc->flc) + desc_bytes(desc), |
||
5353 | + ctx->dir); |
||
5354 | + |
||
5355 | + return 0; |
||
5356 | +} |
||
5357 | + |
||
5358 | +static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, |
||
5359 | + unsigned int keylen) |
||
5360 | +{ |
||
5361 | + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
||
5362 | + struct device *dev = ctx->dev; |
||
5363 | + struct caam_flc *flc; |
||
5364 | + u32 *desc; |
||
5365 | + |
||
5366 | + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { |
||
5367 | + dev_err(dev, "key size mismatch\n"); |
||
5368 | + crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
||
5369 | + return -EINVAL; |
||
5370 | + } |
||
5371 | + |
||
5372 | + ctx->cdata.keylen = keylen; |
||
5373 | + ctx->cdata.key_virt = key; |
||
5374 | + ctx->cdata.key_inline = true; |
||
5375 | + |
||
5376 | + /* xts_skcipher_encrypt shared descriptor */ |
||
5377 | + flc = &ctx->flc[ENCRYPT]; |
||
5378 | + desc = flc->sh_desc; |
||
5379 | + cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata); |
||
5380 | + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ |
||
5381 | + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], |
||
5382 | + sizeof(flc->flc) + desc_bytes(desc), |
||
5383 | + ctx->dir); |
||
5384 | + |
||
5385 | + /* xts_skcipher_decrypt shared descriptor */ |
||
5386 | + flc = &ctx->flc[DECRYPT]; |
||
5387 | + desc = flc->sh_desc; |
||
5388 | + cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata); |
||
5389 | + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ |
||
5390 | + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], |
||
5391 | + sizeof(flc->flc) + desc_bytes(desc), |
||
5392 | + ctx->dir); |
||
5393 | + |
||
5394 | + return 0; |
||
5395 | +} |
||
5396 | + |
||
5397 | +static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) |
||
5398 | +{ |
||
5399 | + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
||
5400 | + struct caam_request *req_ctx = skcipher_request_ctx(req); |
||
5401 | + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; |
||
5402 | + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; |
||
5403 | + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
||
5404 | + struct device *dev = ctx->dev; |
||
5405 | + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
||
5406 | + GFP_KERNEL : GFP_ATOMIC; |
||
5407 | + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; |
||
5408 | + struct skcipher_edesc *edesc; |
||
5409 | + dma_addr_t iv_dma; |
||
5410 | + u8 *iv; |
||
5411 | + int ivsize = crypto_skcipher_ivsize(skcipher); |
||
5412 | + int dst_sg_idx, qm_sg_ents, qm_sg_bytes; |
||
5413 | + struct dpaa2_sg_entry *sg_table; |
||
5414 | + |
||
5415 | + src_nents = sg_nents_for_len(req->src, req->cryptlen); |
||
5416 | + if (unlikely(src_nents < 0)) { |
||
5417 | + dev_err(dev, "Insufficient bytes (%d) in src S/G\n", |
||
5418 | + req->cryptlen); |
||
5419 | + return ERR_PTR(src_nents); |
||
5420 | + } |
||
5421 | + |
||
5422 | + if (unlikely(req->dst != req->src)) { |
||
5423 | + dst_nents = sg_nents_for_len(req->dst, req->cryptlen); |
||
5424 | + if (unlikely(dst_nents < 0)) { |
||
5425 | + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n", |
||
5426 | + req->cryptlen); |
||
5427 | + return ERR_PTR(dst_nents); |
||
5428 | + } |
||
5429 | + |
||
5430 | + mapped_src_nents = dma_map_sg(dev, req->src, src_nents, |
||
5431 | + DMA_TO_DEVICE); |
||
5432 | + if (unlikely(!mapped_src_nents)) { |
||
5433 | + dev_err(dev, "unable to map source\n"); |
||
5434 | + return ERR_PTR(-ENOMEM); |
||
5435 | + } |
||
5436 | + |
||
5437 | + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, |
||
5438 | + DMA_FROM_DEVICE); |
||
5439 | + if (unlikely(!mapped_dst_nents)) { |
||
5440 | + dev_err(dev, "unable to map destination\n"); |
||
5441 | + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); |
||
5442 | + return ERR_PTR(-ENOMEM); |
||
5443 | + } |
||
5444 | + } else { |
||
5445 | + mapped_src_nents = dma_map_sg(dev, req->src, src_nents, |
||
5446 | + DMA_BIDIRECTIONAL); |
||
5447 | + if (unlikely(!mapped_src_nents)) { |
||
5448 | + dev_err(dev, "unable to map source\n"); |
||
5449 | + return ERR_PTR(-ENOMEM); |
||
5450 | + } |
||
5451 | + } |
||
5452 | + |
||
5453 | + qm_sg_ents = 1 + mapped_src_nents; |
||
5454 | + dst_sg_idx = qm_sg_ents; |
||
5455 | + |
||
5456 | + qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; |
||
5457 | + qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry); |
||
5458 | + if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + |
||
5459 | + ivsize > CAAM_QI_MEMCACHE_SIZE)) { |
||
5460 | + dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", |
||
5461 | + qm_sg_ents, ivsize); |
||
5462 | + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, |
||
5463 | + 0, 0, 0); |
||
5464 | + return ERR_PTR(-ENOMEM); |
||
5465 | + } |
||
5466 | + |
||
5467 | + /* allocate space for base edesc, link tables and IV */ |
||
5468 | + edesc = qi_cache_zalloc(GFP_DMA | flags); |
||
5469 | + if (unlikely(!edesc)) { |
||
5470 | + dev_err(dev, "could not allocate extended descriptor\n"); |
||
5471 | + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, |
||
5472 | + 0, 0, 0); |
||
5473 | + return ERR_PTR(-ENOMEM); |
||
5474 | + } |
||
5475 | + |
||
5476 | + /* Make sure IV is located in a DMAable area */ |
||
5477 | + sg_table = &edesc->sgt[0]; |
||
5478 | + iv = (u8 *)(sg_table + qm_sg_ents); |
||
5479 | + memcpy(iv, req->iv, ivsize); |
||
5480 | + |
||
5481 | + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); |
||
5482 | + if (dma_mapping_error(dev, iv_dma)) { |
||
5483 | + dev_err(dev, "unable to map IV\n"); |
||
5484 | + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, |
||
5485 | + 0, 0, 0); |
||
5486 | + qi_cache_free(edesc); |
||
5487 | + return ERR_PTR(-ENOMEM); |
||
5488 | + } |
||
5489 | + |
||
5490 | + edesc->src_nents = src_nents; |
||
5491 | + edesc->dst_nents = dst_nents; |
||
5492 | + edesc->iv_dma = iv_dma; |
||
5493 | + edesc->qm_sg_bytes = qm_sg_bytes; |
||
5494 | + |
||
5495 | + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); |
||
5496 | + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); |
||
5497 | + |
||
5498 | + if (mapped_dst_nents > 1) |
||
5499 | + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + |
||
5500 | + dst_sg_idx, 0); |
||
5501 | + |
||
5502 | + edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes, |
||
5503 | + DMA_TO_DEVICE); |
||
5504 | + if (dma_mapping_error(dev, edesc->qm_sg_dma)) { |
||
5505 | + dev_err(dev, "unable to map S/G table\n"); |
||
5506 | + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, |
||
5507 | + iv_dma, ivsize, 0, 0); |
||
5508 | + qi_cache_free(edesc); |
||
5509 | + return ERR_PTR(-ENOMEM); |
||
5510 | + } |
||
5511 | + |
||
5512 | + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
||
5513 | + dpaa2_fl_set_final(in_fle, true); |
||
5514 | + dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize); |
||
5515 | + dpaa2_fl_set_len(out_fle, req->cryptlen); |
||
5516 | + |
||
5517 | + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); |
||
5518 | + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); |
||
5519 | + |
||
5520 | + if (req->src == req->dst) { |
||
5521 | + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); |
||
5522 | + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + |
||
5523 | + sizeof(*sg_table)); |
||
5524 | + } else if (mapped_dst_nents > 1) { |
||
5525 | + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); |
||
5526 | + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx * |
||
5527 | + sizeof(*sg_table)); |
||
5528 | + } else { |
||
5529 | + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); |
||
5530 | + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst)); |
||
5531 | + } |
||
5532 | + |
||
5533 | + return edesc; |
||
5534 | +} |
||
5535 | + |
||
5536 | +static void aead_unmap(struct device *dev, struct aead_edesc *edesc, |
||
5537 | + struct aead_request *req) |
||
5538 | +{ |
||
5539 | + struct crypto_aead *aead = crypto_aead_reqtfm(req); |
||
5540 | + int ivsize = crypto_aead_ivsize(aead); |
||
5541 | + |
||
5542 | + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, |
||
5543 | + edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); |
||
5544 | + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); |
||
5545 | +} |
||
5546 | + |
||
5547 | +static void tls_unmap(struct device *dev, struct tls_edesc *edesc, |
||
5548 | + struct aead_request *req) |
||
5549 | +{ |
||
5550 | + struct crypto_aead *tls = crypto_aead_reqtfm(req); |
||
5551 | + int ivsize = crypto_aead_ivsize(tls); |
||
5552 | + |
||
5553 | + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents, |
||
5554 | + edesc->dst_nents, edesc->iv_dma, ivsize, edesc->qm_sg_dma, |
||
5555 | + edesc->qm_sg_bytes); |
||
5556 | +} |
||
5557 | + |
||
5558 | +static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, |
||
5559 | + struct skcipher_request *req) |
||
5560 | +{ |
||
5561 | + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
||
5562 | + int ivsize = crypto_skcipher_ivsize(skcipher); |
||
5563 | + |
||
5564 | + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, |
||
5565 | + edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); |
||
5566 | +} |
||
5567 | + |
||
5568 | +static void aead_encrypt_done(void *cbk_ctx, u32 status) |
||
5569 | +{ |
||
5570 | + struct crypto_async_request *areq = cbk_ctx; |
||
5571 | + struct aead_request *req = container_of(areq, struct aead_request, |
||
5572 | + base); |
||
5573 | + struct caam_request *req_ctx = to_caam_req(areq); |
||
5574 | + struct aead_edesc *edesc = req_ctx->edesc; |
||
5575 | + struct crypto_aead *aead = crypto_aead_reqtfm(req); |
||
5576 | + struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
5577 | + int ecode = 0; |
||
5578 | + |
||
5579 | +#ifdef DEBUG |
||
5580 | + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); |
||
5581 | +#endif |
||
5582 | + |
||
5583 | + if (unlikely(status)) { |
||
5584 | + caam_qi2_strstatus(ctx->dev, status); |
||
5585 | + ecode = -EIO; |
||
5586 | + } |
||
5587 | + |
||
5588 | + aead_unmap(ctx->dev, edesc, req); |
||
5589 | + qi_cache_free(edesc); |
||
5590 | + aead_request_complete(req, ecode); |
||
5591 | +} |
||
5592 | + |
||
5593 | +static void aead_decrypt_done(void *cbk_ctx, u32 status) |
||
5594 | +{ |
||
5595 | + struct crypto_async_request *areq = cbk_ctx; |
||
5596 | + struct aead_request *req = container_of(areq, struct aead_request, |
||
5597 | + base); |
||
5598 | + struct caam_request *req_ctx = to_caam_req(areq); |
||
5599 | + struct aead_edesc *edesc = req_ctx->edesc; |
||
5600 | + struct crypto_aead *aead = crypto_aead_reqtfm(req); |
||
5601 | + struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
5602 | + int ecode = 0; |
||
5603 | + |
||
5604 | +#ifdef DEBUG |
||
5605 | + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); |
||
5606 | +#endif |
||
5607 | + |
||
5608 | + if (unlikely(status)) { |
||
5609 | + caam_qi2_strstatus(ctx->dev, status); |
||
5610 | + /* |
||
5611 | + * verify hw auth check passed else return -EBADMSG |
||
5612 | + */ |
||
5613 | + if ((status & JRSTA_CCBERR_ERRID_MASK) == |
||
5614 | + JRSTA_CCBERR_ERRID_ICVCHK) |
||
5615 | + ecode = -EBADMSG; |
||
5616 | + else |
||
5617 | + ecode = -EIO; |
||
5618 | + } |
||
5619 | + |
||
5620 | + aead_unmap(ctx->dev, edesc, req); |
||
5621 | + qi_cache_free(edesc); |
||
5622 | + aead_request_complete(req, ecode); |
||
5623 | +} |
||
5624 | + |
||
5625 | +static int aead_encrypt(struct aead_request *req) |
||
5626 | +{ |
||
5627 | + struct aead_edesc *edesc; |
||
5628 | + struct crypto_aead *aead = crypto_aead_reqtfm(req); |
||
5629 | + struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
5630 | + struct caam_request *caam_req = aead_request_ctx(req); |
||
5631 | + int ret; |
||
5632 | + |
||
5633 | + /* allocate extended descriptor */ |
||
5634 | + edesc = aead_edesc_alloc(req, true); |
||
5635 | + if (IS_ERR(edesc)) |
||
5636 | + return PTR_ERR(edesc); |
||
5637 | + |
||
5638 | + caam_req->flc = &ctx->flc[ENCRYPT]; |
||
5639 | + caam_req->flc_dma = ctx->flc_dma[ENCRYPT]; |
||
5640 | + caam_req->cbk = aead_encrypt_done; |
||
5641 | + caam_req->ctx = &req->base; |
||
5642 | + caam_req->edesc = edesc; |
||
5643 | + ret = dpaa2_caam_enqueue(ctx->dev, caam_req); |
||
5644 | + if (ret != -EINPROGRESS && |
||
5645 | + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { |
||
5646 | + aead_unmap(ctx->dev, edesc, req); |
||
5647 | + qi_cache_free(edesc); |
||
5648 | + } |
||
5649 | + |
||
5650 | + return ret; |
||
5651 | +} |
||
5652 | + |
||
5653 | +static int aead_decrypt(struct aead_request *req) |
||
5654 | +{ |
||
5655 | + struct aead_edesc *edesc; |
||
5656 | + struct crypto_aead *aead = crypto_aead_reqtfm(req); |
||
5657 | + struct caam_ctx *ctx = crypto_aead_ctx(aead); |
||
5658 | + struct caam_request *caam_req = aead_request_ctx(req); |
||
5659 | + int ret; |
||
5660 | + |
||
5661 | + /* allocate extended descriptor */ |
||
5662 | + edesc = aead_edesc_alloc(req, false); |
||
5663 | + if (IS_ERR(edesc)) |
||
5664 | + return PTR_ERR(edesc); |
||
5665 | + |
||
5666 | + caam_req->flc = &ctx->flc[DECRYPT]; |
||
5667 | + caam_req->flc_dma = ctx->flc_dma[DECRYPT]; |
||
5668 | + caam_req->cbk = aead_decrypt_done; |
||
5669 | + caam_req->ctx = &req->base; |
||
5670 | + caam_req->edesc = edesc; |
||
5671 | + ret = dpaa2_caam_enqueue(ctx->dev, caam_req); |
||
5672 | + if (ret != -EINPROGRESS && |
||
5673 | + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { |
||
5674 | + aead_unmap(ctx->dev, edesc, req); |
||
5675 | + qi_cache_free(edesc); |
||
5676 | + } |
||
5677 | + |
||
5678 | + return ret; |
||
5679 | +} |
||
5680 | + |
||
5681 | +static void tls_encrypt_done(void *cbk_ctx, u32 status) |
||
5682 | +{ |
||
5683 | + struct crypto_async_request *areq = cbk_ctx; |
||
5684 | + struct aead_request *req = container_of(areq, struct aead_request, |
||
5685 | + base); |
||
5686 | + struct caam_request *req_ctx = to_caam_req(areq); |
||
5687 | + struct tls_edesc *edesc = req_ctx->edesc; |
||
5688 | + struct crypto_aead *tls = crypto_aead_reqtfm(req); |
||
5689 | + struct caam_ctx *ctx = crypto_aead_ctx(tls); |
||
5690 | + int ecode = 0; |
||
5691 | + |
||
5692 | +#ifdef DEBUG |
||
5693 | + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); |
||
5694 | +#endif |
||
5695 | + |
||
5696 | + if (unlikely(status)) { |
||
5697 | + caam_qi2_strstatus(ctx->dev, status); |
||
5698 | + ecode = -EIO; |
||
5699 | + } |
||
5700 | + |
||
5701 | + tls_unmap(ctx->dev, edesc, req); |
||
5702 | + qi_cache_free(edesc); |
||
5703 | + aead_request_complete(req, ecode); |
||
5704 | +} |
||
5705 | + |
||
5706 | +static void tls_decrypt_done(void *cbk_ctx, u32 status) |
||
5707 | +{ |
||
5708 | + struct crypto_async_request *areq = cbk_ctx; |
||
5709 | + struct aead_request *req = container_of(areq, struct aead_request, |
||
5710 | + base); |
||
5711 | + struct caam_request *req_ctx = to_caam_req(areq); |
||
5712 | + struct tls_edesc *edesc = req_ctx->edesc; |
||
5713 | + struct crypto_aead *tls = crypto_aead_reqtfm(req); |
||
5714 | + struct caam_ctx *ctx = crypto_aead_ctx(tls); |
||
5715 | + int ecode = 0; |
||
5716 | + |
||
5717 | +#ifdef DEBUG |
||
5718 | + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); |
||
5719 | +#endif |
||
5720 | + |
||
5721 | + if (unlikely(status)) { |
||
5722 | + caam_qi2_strstatus(ctx->dev, status); |
||
5723 | + /* |
||
5724 | + * verify hw auth check passed else return -EBADMSG |
||
5725 | + */ |
||
5726 | + if ((status & JRSTA_CCBERR_ERRID_MASK) == |
||
5727 | + JRSTA_CCBERR_ERRID_ICVCHK) |
||
5728 | + ecode = -EBADMSG; |
||
5729 | + else |
||
5730 | + ecode = -EIO; |
||
5731 | + } |
||
5732 | + |
||
5733 | + tls_unmap(ctx->dev, edesc, req); |
||
5734 | + qi_cache_free(edesc); |
||
5735 | + aead_request_complete(req, ecode); |
||
5736 | +} |
||
5737 | + |
||
5738 | +static int tls_encrypt(struct aead_request *req) |
||
5739 | +{ |
||
5740 | + struct tls_edesc *edesc; |
||
5741 | + struct crypto_aead *tls = crypto_aead_reqtfm(req); |
||
5742 | + struct caam_ctx *ctx = crypto_aead_ctx(tls); |
||
5743 | + struct caam_request *caam_req = aead_request_ctx(req); |
||
5744 | + int ret; |
||
5745 | + |
||
5746 | + /* allocate extended descriptor */ |
||
5747 | + edesc = tls_edesc_alloc(req, true); |
||
5748 | + if (IS_ERR(edesc)) |
||
5749 | + return PTR_ERR(edesc); |
||
5750 | + |
||
5751 | + caam_req->flc = &ctx->flc[ENCRYPT]; |
||
5752 | + caam_req->flc_dma = ctx->flc_dma[ENCRYPT]; |
||
5753 | + caam_req->cbk = tls_encrypt_done; |
||
5754 | + caam_req->ctx = &req->base; |
||
5755 | + caam_req->edesc = edesc; |
||
5756 | + ret = dpaa2_caam_enqueue(ctx->dev, caam_req); |
||
5757 | + if (ret != -EINPROGRESS && |
||
5758 | + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { |
||
5759 | + tls_unmap(ctx->dev, edesc, req); |
||
5760 | + qi_cache_free(edesc); |
||
5761 | + } |
||
5762 | + |
||
5763 | + return ret; |
||
5764 | +} |
||
5765 | + |
||
5766 | +static int tls_decrypt(struct aead_request *req) |
||
5767 | +{ |
||
5768 | + struct tls_edesc *edesc; |
||
5769 | + struct crypto_aead *tls = crypto_aead_reqtfm(req); |
||
5770 | + struct caam_ctx *ctx = crypto_aead_ctx(tls); |
||
5771 | + struct caam_request *caam_req = aead_request_ctx(req); |
||
5772 | + int ret; |
||
5773 | + |
||
5774 | + /* allocate extended descriptor */ |
||
5775 | + edesc = tls_edesc_alloc(req, false); |
||
5776 | + if (IS_ERR(edesc)) |
||
5777 | + return PTR_ERR(edesc); |
||
5778 | + |
||
5779 | + caam_req->flc = &ctx->flc[DECRYPT]; |
||
5780 | + caam_req->flc_dma = ctx->flc_dma[DECRYPT]; |
||
5781 | + caam_req->cbk = tls_decrypt_done; |
||
5782 | + caam_req->ctx = &req->base; |
||
5783 | + caam_req->edesc = edesc; |
||
5784 | + ret = dpaa2_caam_enqueue(ctx->dev, caam_req); |
||
5785 | + if (ret != -EINPROGRESS && |
||
5786 | + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { |
||
5787 | + tls_unmap(ctx->dev, edesc, req); |
||
5788 | + qi_cache_free(edesc); |
||
5789 | + } |
||
5790 | + |
||
5791 | + return ret; |
||
5792 | +} |
||
5793 | + |
||
5794 | +static int ipsec_gcm_encrypt(struct aead_request *req) |
||
5795 | +{ |
||
5796 | + if (req->assoclen < 8) |
||
5797 | + return -EINVAL; |
||
5798 | + |
||
5799 | + return aead_encrypt(req); |
||
5800 | +} |
||
5801 | + |
||
5802 | +static int ipsec_gcm_decrypt(struct aead_request *req) |
||
5803 | +{ |
||
5804 | + if (req->assoclen < 8) |
||
5805 | + return -EINVAL; |
||
5806 | + |
||
5807 | + return aead_decrypt(req); |
||
5808 | +} |
||
5809 | + |
||
5810 | +static void skcipher_encrypt_done(void *cbk_ctx, u32 status) |
||
5811 | +{ |
||
5812 | + struct crypto_async_request *areq = cbk_ctx; |
||
5813 | + struct skcipher_request *req = skcipher_request_cast(areq); |
||
5814 | + struct caam_request *req_ctx = to_caam_req(areq); |
||
5815 | + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
||
5816 | + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
||
5817 | + struct skcipher_edesc *edesc = req_ctx->edesc; |
||
5818 | + int ecode = 0; |
||
5819 | + int ivsize = crypto_skcipher_ivsize(skcipher); |
||
5820 | + |
||
5821 | +#ifdef DEBUG |
||
5822 | + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); |
||
5823 | +#endif |
||
5824 | + |
||
5825 | + if (unlikely(status)) { |
||
5826 | + caam_qi2_strstatus(ctx->dev, status); |
||
5827 | + ecode = -EIO; |
||
5828 | + } |
||
5829 | + |
||
5830 | +#ifdef DEBUG |
||
5831 | + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ", |
||
5832 | + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, |
||
5833 | + edesc->src_nents > 1 ? 100 : ivsize, 1); |
||
5834 | + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", |
||
5835 | + DUMP_PREFIX_ADDRESS, 16, 4, req->dst, |
||
5836 | + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); |
||
5837 | +#endif |
||
5838 | + |
||
5839 | + skcipher_unmap(ctx->dev, edesc, req); |
||
5840 | + |
||
5841 | + /* |
||
5842 | + * The crypto API expects us to set the IV (req->iv) to the last |
||
5843 | + * ciphertext block. This is used e.g. by the CTS mode. |
||
5844 | + */ |
||
5845 | + scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize, |
||
5846 | + ivsize, 0); |
||
5847 | + |
||
5848 | + qi_cache_free(edesc); |
||
5849 | + skcipher_request_complete(req, ecode); |
||
5850 | +} |
||
5851 | + |
||
5852 | +static void skcipher_decrypt_done(void *cbk_ctx, u32 status) |
||
5853 | +{ |
||
5854 | + struct crypto_async_request *areq = cbk_ctx; |
||
5855 | + struct skcipher_request *req = skcipher_request_cast(areq); |
||
5856 | + struct caam_request *req_ctx = to_caam_req(areq); |
||
5857 | + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
||
5858 | + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
||
5859 | + struct skcipher_edesc *edesc = req_ctx->edesc; |
||
5860 | + int ecode = 0; |
||
5861 | +#ifdef DEBUG |
||
5862 | + int ivsize = crypto_skcipher_ivsize(skcipher); |
||
5863 | + |
||
5864 | + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); |
||
5865 | +#endif |
||
5866 | + |
||
5867 | + if (unlikely(status)) { |
||
5868 | + caam_qi2_strstatus(ctx->dev, status); |
||
5869 | + ecode = -EIO; |
||
5870 | + } |
||
5871 | + |
||
5872 | +#ifdef DEBUG |
||
5873 | + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ", |
||
5874 | + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, |
||
5875 | + edesc->src_nents > 1 ? 100 : ivsize, 1); |
||
5876 | + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", |
||
5877 | + DUMP_PREFIX_ADDRESS, 16, 4, req->dst, |
||
5878 | + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); |
||
5879 | +#endif |
||
5880 | + |
||
5881 | + skcipher_unmap(ctx->dev, edesc, req); |
||
5882 | + qi_cache_free(edesc); |
||
5883 | + skcipher_request_complete(req, ecode); |
||
5884 | +} |
||
5885 | + |
||
5886 | +static int skcipher_encrypt(struct skcipher_request *req) |
||
5887 | +{ |
||
5888 | + struct skcipher_edesc *edesc; |
||
5889 | + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
||
5890 | + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
||
5891 | + struct caam_request *caam_req = skcipher_request_ctx(req); |
||
5892 | + int ret; |
||
5893 | + |
||
5894 | + /* allocate extended descriptor */ |
||
5895 | + edesc = skcipher_edesc_alloc(req); |
||
5896 | + if (IS_ERR(edesc)) |
||
5897 | + return PTR_ERR(edesc); |
||
5898 | + |
||
5899 | + caam_req->flc = &ctx->flc[ENCRYPT]; |
||
5900 | + caam_req->flc_dma = ctx->flc_dma[ENCRYPT]; |
||
5901 | + caam_req->cbk = skcipher_encrypt_done; |
||
5902 | + caam_req->ctx = &req->base; |
||
5903 | + caam_req->edesc = edesc; |
||
5904 | + ret = dpaa2_caam_enqueue(ctx->dev, caam_req); |
||
5905 | + if (ret != -EINPROGRESS && |
||
5906 | + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { |
||
5907 | + skcipher_unmap(ctx->dev, edesc, req); |
||
5908 | + qi_cache_free(edesc); |
||
5909 | + } |
||
5910 | + |
||
5911 | + return ret; |
||
5912 | +} |
||
5913 | + |
||
5914 | +static int skcipher_decrypt(struct skcipher_request *req) |
||
5915 | +{ |
||
5916 | + struct skcipher_edesc *edesc; |
||
5917 | + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
||
5918 | + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
||
5919 | + struct caam_request *caam_req = skcipher_request_ctx(req); |
||
5920 | + int ivsize = crypto_skcipher_ivsize(skcipher); |
||
5921 | + int ret; |
||
5922 | + |
||
5923 | + /* allocate extended descriptor */ |
||
5924 | + edesc = skcipher_edesc_alloc(req); |
||
5925 | + if (IS_ERR(edesc)) |
||
5926 | + return PTR_ERR(edesc); |
||
5927 | + |
||
5928 | + /* |
||
5929 | + * The crypto API expects us to set the IV (req->iv) to the last |
||
5930 | + * ciphertext block. |
||
5931 | + */ |
||
5932 | + scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize, |
||
5933 | + ivsize, 0); |
||
5934 | + |
||
5935 | + caam_req->flc = &ctx->flc[DECRYPT]; |
||
5936 | + caam_req->flc_dma = ctx->flc_dma[DECRYPT]; |
||
5937 | + caam_req->cbk = skcipher_decrypt_done; |
||
5938 | + caam_req->ctx = &req->base; |
||
5939 | + caam_req->edesc = edesc; |
||
5940 | + ret = dpaa2_caam_enqueue(ctx->dev, caam_req); |
||
5941 | + if (ret != -EINPROGRESS && |
||
5942 | + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { |
||
5943 | + skcipher_unmap(ctx->dev, edesc, req); |
||
5944 | + qi_cache_free(edesc); |
||
5945 | + } |
||
5946 | + |
||
5947 | + return ret; |
||
5948 | +} |
||
5949 | + |
||
5950 | +static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam, |
||
5951 | + bool uses_dkp) |
||
5952 | +{ |
||
5953 | + dma_addr_t dma_addr; |
||
5954 | + int i; |
||
5955 | + |
||
5956 | + /* copy descriptor header template value */ |
||
5957 | + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; |
||
5958 | + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; |
||
5959 | + |
||
5960 | + ctx->dev = caam->dev; |
||
5961 | + ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; |
||
5962 | + |
||
5963 | + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, |
||
5964 | + offsetof(struct caam_ctx, flc_dma), |
||
5965 | + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); |
||
5966 | + if (dma_mapping_error(ctx->dev, dma_addr)) { |
||
5967 | + dev_err(ctx->dev, "unable to map key, shared descriptors\n"); |
||
5968 | + return -ENOMEM; |
||
5969 | + } |
||
5970 | + |
||
5971 | + for (i = 0; i < NUM_OP; i++) |
||
5972 | + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]); |
||
5973 | + ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]); |
||
5974 | + |
||
5975 | + return 0; |
||
5976 | +} |
||
5977 | + |
||
5978 | +static int caam_cra_init_skcipher(struct crypto_skcipher *tfm) |
||
5979 | +{ |
||
5980 | + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); |
||
5981 | + struct caam_skcipher_alg *caam_alg = |
||
5982 | + container_of(alg, typeof(*caam_alg), skcipher); |
||
5983 | + |
||
5984 | + crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request)); |
||
5985 | + return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false); |
||
5986 | +} |
||
5987 | + |
||
5988 | +static int caam_cra_init_aead(struct crypto_aead *tfm) |
||
5989 | +{ |
||
5990 | + struct aead_alg *alg = crypto_aead_alg(tfm); |
||
5991 | + struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg), |
||
5992 | + aead); |
||
5993 | + |
||
5994 | + crypto_aead_set_reqsize(tfm, sizeof(struct caam_request)); |
||
5995 | + return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam, |
||
5996 | + (alg->setkey == aead_setkey) || |
||
5997 | + (alg->setkey == tls_setkey)); |
||
5998 | +} |
||
5999 | + |
||
6000 | +static void caam_exit_common(struct caam_ctx *ctx) |
||
6001 | +{ |
||
6002 | + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], |
||
6003 | + offsetof(struct caam_ctx, flc_dma), ctx->dir, |
||
6004 | + DMA_ATTR_SKIP_CPU_SYNC); |
||
6005 | +} |
||
6006 | + |
||
6007 | +static void caam_cra_exit(struct crypto_skcipher *tfm) |
||
6008 | +{ |
||
6009 | + caam_exit_common(crypto_skcipher_ctx(tfm)); |
||
6010 | +} |
||
6011 | + |
||
6012 | +static void caam_cra_exit_aead(struct crypto_aead *tfm) |
||
6013 | +{ |
||
6014 | + caam_exit_common(crypto_aead_ctx(tfm)); |
||
6015 | +} |
||
6016 | + |
||
6017 | +static struct caam_skcipher_alg driver_algs[] = { |
||
6018 | + { |
||
6019 | + .skcipher = { |
||
6020 | + .base = { |
||
6021 | + .cra_name = "cbc(aes)", |
||
6022 | + .cra_driver_name = "cbc-aes-caam-qi2", |
||
6023 | + .cra_blocksize = AES_BLOCK_SIZE, |
||
6024 | + }, |
||
6025 | + .setkey = skcipher_setkey, |
||
6026 | + .encrypt = skcipher_encrypt, |
||
6027 | + .decrypt = skcipher_decrypt, |
||
6028 | + .min_keysize = AES_MIN_KEY_SIZE, |
||
6029 | + .max_keysize = AES_MAX_KEY_SIZE, |
||
6030 | + .ivsize = AES_BLOCK_SIZE, |
||
6031 | + }, |
||
6032 | + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
||
6033 | + }, |
||
6034 | + { |
||
6035 | + .skcipher = { |
||
6036 | + .base = { |
||
6037 | + .cra_name = "cbc(des3_ede)", |
||
6038 | + .cra_driver_name = "cbc-3des-caam-qi2", |
||
6039 | + .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
||
6040 | + }, |
||
6041 | + .setkey = skcipher_setkey, |
||
6042 | + .encrypt = skcipher_encrypt, |
||
6043 | + .decrypt = skcipher_decrypt, |
||
6044 | + .min_keysize = DES3_EDE_KEY_SIZE, |
||
6045 | + .max_keysize = DES3_EDE_KEY_SIZE, |
||
6046 | + .ivsize = DES3_EDE_BLOCK_SIZE, |
||
6047 | + }, |
||
6048 | + .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
||
6049 | + }, |
||
6050 | + { |
||
6051 | + .skcipher = { |
||
6052 | + .base = { |
||
6053 | + .cra_name = "cbc(des)", |
||
6054 | + .cra_driver_name = "cbc-des-caam-qi2", |
||
6055 | + .cra_blocksize = DES_BLOCK_SIZE, |
||
6056 | + }, |
||
6057 | + .setkey = skcipher_setkey, |
||
6058 | + .encrypt = skcipher_encrypt, |
||
6059 | + .decrypt = skcipher_decrypt, |
||
6060 | + .min_keysize = DES_KEY_SIZE, |
||
6061 | + .max_keysize = DES_KEY_SIZE, |
||
6062 | + .ivsize = DES_BLOCK_SIZE, |
||
6063 | + }, |
||
6064 | + .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
||
6065 | + }, |
||
6066 | + { |
||
6067 | + .skcipher = { |
||
6068 | + .base = { |
||
6069 | + .cra_name = "ctr(aes)", |
||
6070 | + .cra_driver_name = "ctr-aes-caam-qi2", |
||
6071 | + .cra_blocksize = 1, |
||
6072 | + }, |
||
6073 | + .setkey = skcipher_setkey, |
||
6074 | + .encrypt = skcipher_encrypt, |
||
6075 | + .decrypt = skcipher_decrypt, |
||
6076 | + .min_keysize = AES_MIN_KEY_SIZE, |
||
6077 | + .max_keysize = AES_MAX_KEY_SIZE, |
||
6078 | + .ivsize = AES_BLOCK_SIZE, |
||
6079 | + .chunksize = AES_BLOCK_SIZE, |
||
6080 | + }, |
||
6081 | + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | |
||
6082 | + OP_ALG_AAI_CTR_MOD128, |
||
6083 | + }, |
||
6084 | + { |
||
6085 | + .skcipher = { |
||
6086 | + .base = { |
||
6087 | + .cra_name = "rfc3686(ctr(aes))", |
||
6088 | + .cra_driver_name = "rfc3686-ctr-aes-caam-qi2", |
||
6089 | + .cra_blocksize = 1, |
||
6090 | + }, |
||
6091 | + .setkey = skcipher_setkey, |
||
6092 | + .encrypt = skcipher_encrypt, |
||
6093 | + .decrypt = skcipher_decrypt, |
||
6094 | + .min_keysize = AES_MIN_KEY_SIZE + |
||
6095 | + CTR_RFC3686_NONCE_SIZE, |
||
6096 | + .max_keysize = AES_MAX_KEY_SIZE + |
||
6097 | + CTR_RFC3686_NONCE_SIZE, |
||
6098 | + .ivsize = CTR_RFC3686_IV_SIZE, |
||
6099 | + .chunksize = AES_BLOCK_SIZE, |
||
6100 | + }, |
||
6101 | + .caam = { |
||
6102 | + .class1_alg_type = OP_ALG_ALGSEL_AES | |
||
6103 | + OP_ALG_AAI_CTR_MOD128, |
||
6104 | + .rfc3686 = true, |
||
6105 | + }, |
||
6106 | + }, |
||
6107 | + { |
||
6108 | + .skcipher = { |
||
6109 | + .base = { |
||
6110 | + .cra_name = "xts(aes)", |
||
6111 | + .cra_driver_name = "xts-aes-caam-qi2", |
||
6112 | + .cra_blocksize = AES_BLOCK_SIZE, |
||
6113 | + }, |
||
6114 | + .setkey = xts_skcipher_setkey, |
||
6115 | + .encrypt = skcipher_encrypt, |
||
6116 | + .decrypt = skcipher_decrypt, |
||
6117 | + .min_keysize = 2 * AES_MIN_KEY_SIZE, |
||
6118 | + .max_keysize = 2 * AES_MAX_KEY_SIZE, |
||
6119 | + .ivsize = AES_BLOCK_SIZE, |
||
6120 | + }, |
||
6121 | + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, |
||
6122 | + } |
||
6123 | +}; |
||
6124 | + |
||
6125 | +static struct caam_aead_alg driver_aeads[] = { |
||
6126 | + { |
||
6127 | + .aead = { |
||
6128 | + .base = { |
||
6129 | + .cra_name = "rfc4106(gcm(aes))", |
||
6130 | + .cra_driver_name = "rfc4106-gcm-aes-caam-qi2", |
||
6131 | + .cra_blocksize = 1, |
||
6132 | + }, |
||
6133 | + .setkey = rfc4106_setkey, |
||
6134 | + .setauthsize = rfc4106_setauthsize, |
||
6135 | + .encrypt = ipsec_gcm_encrypt, |
||
6136 | + .decrypt = ipsec_gcm_decrypt, |
||
6137 | + .ivsize = 8, |
||
6138 | + .maxauthsize = AES_BLOCK_SIZE, |
||
6139 | + }, |
||
6140 | + .caam = { |
||
6141 | + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, |
||
6142 | + }, |
||
6143 | + }, |
||
6144 | + { |
||
6145 | + .aead = { |
||
6146 | + .base = { |
||
6147 | + .cra_name = "rfc4543(gcm(aes))", |
||
6148 | + .cra_driver_name = "rfc4543-gcm-aes-caam-qi2", |
||
6149 | + .cra_blocksize = 1, |
||
6150 | + }, |
||
6151 | + .setkey = rfc4543_setkey, |
||
6152 | + .setauthsize = rfc4543_setauthsize, |
||
6153 | + .encrypt = ipsec_gcm_encrypt, |
||
6154 | + .decrypt = ipsec_gcm_decrypt, |
||
6155 | + .ivsize = 8, |
||
6156 | + .maxauthsize = AES_BLOCK_SIZE, |
||
6157 | + }, |
||
6158 | + .caam = { |
||
6159 | + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, |
||
6160 | + }, |
||
6161 | + }, |
||
6162 | + /* Galois Counter Mode */ |
||
6163 | + { |
||
6164 | + .aead = { |
||
6165 | + .base = { |
||
6166 | + .cra_name = "gcm(aes)", |
||
6167 | + .cra_driver_name = "gcm-aes-caam-qi2", |
||
6168 | + .cra_blocksize = 1, |
||
6169 | + }, |
||
6170 | + .setkey = gcm_setkey, |
||
6171 | + .setauthsize = gcm_setauthsize, |
||
6172 | + .encrypt = aead_encrypt, |
||
6173 | + .decrypt = aead_decrypt, |
||
6174 | + .ivsize = 12, |
||
6175 | + .maxauthsize = AES_BLOCK_SIZE, |
||
6176 | + }, |
||
6177 | + .caam = { |
||
6178 | + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, |
||
6179 | + } |
||
6180 | + }, |
||
6181 | + /* single-pass ipsec_esp descriptor */ |
||
6182 | + { |
||
6183 | + .aead = { |
||
6184 | + .base = { |
||
6185 | + .cra_name = "authenc(hmac(md5),cbc(aes))", |
||
6186 | + .cra_driver_name = "authenc-hmac-md5-" |
||
6187 | + "cbc-aes-caam-qi2", |
||
6188 | + .cra_blocksize = AES_BLOCK_SIZE, |
||
6189 | + }, |
||
6190 | + .setkey = aead_setkey, |
||
6191 | + .setauthsize = aead_setauthsize, |
||
6192 | + .encrypt = aead_encrypt, |
||
6193 | + .decrypt = aead_decrypt, |
||
6194 | + .ivsize = AES_BLOCK_SIZE, |
||
6195 | + .maxauthsize = MD5_DIGEST_SIZE, |
||
6196 | + }, |
||
6197 | + .caam = { |
||
6198 | + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
||
6199 | + .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
||
6200 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6201 | + } |
||
6202 | + }, |
||
6203 | + { |
||
6204 | + .aead = { |
||
6205 | + .base = { |
||
6206 | + .cra_name = "echainiv(authenc(hmac(md5)," |
||
6207 | + "cbc(aes)))", |
||
6208 | + .cra_driver_name = "echainiv-authenc-hmac-md5-" |
||
6209 | + "cbc-aes-caam-qi2", |
||
6210 | + .cra_blocksize = AES_BLOCK_SIZE, |
||
6211 | + }, |
||
6212 | + .setkey = aead_setkey, |
||
6213 | + .setauthsize = aead_setauthsize, |
||
6214 | + .encrypt = aead_encrypt, |
||
6215 | + .decrypt = aead_decrypt, |
||
6216 | + .ivsize = AES_BLOCK_SIZE, |
||
6217 | + .maxauthsize = MD5_DIGEST_SIZE, |
||
6218 | + }, |
||
6219 | + .caam = { |
||
6220 | + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
||
6221 | + .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
||
6222 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6223 | + .geniv = true, |
||
6224 | + } |
||
6225 | + }, |
||
6226 | + { |
||
6227 | + .aead = { |
||
6228 | + .base = { |
||
6229 | + .cra_name = "authenc(hmac(sha1),cbc(aes))", |
||
6230 | + .cra_driver_name = "authenc-hmac-sha1-" |
||
6231 | + "cbc-aes-caam-qi2", |
||
6232 | + .cra_blocksize = AES_BLOCK_SIZE, |
||
6233 | + }, |
||
6234 | + .setkey = aead_setkey, |
||
6235 | + .setauthsize = aead_setauthsize, |
||
6236 | + .encrypt = aead_encrypt, |
||
6237 | + .decrypt = aead_decrypt, |
||
6238 | + .ivsize = AES_BLOCK_SIZE, |
||
6239 | + .maxauthsize = SHA1_DIGEST_SIZE, |
||
6240 | + }, |
||
6241 | + .caam = { |
||
6242 | + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
||
6243 | + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
||
6244 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6245 | + } |
||
6246 | + }, |
||
6247 | + { |
||
6248 | + .aead = { |
||
6249 | + .base = { |
||
6250 | + .cra_name = "echainiv(authenc(hmac(sha1)," |
||
6251 | + "cbc(aes)))", |
||
6252 | + .cra_driver_name = "echainiv-authenc-" |
||
6253 | + "hmac-sha1-cbc-aes-caam-qi2", |
||
6254 | + .cra_blocksize = AES_BLOCK_SIZE, |
||
6255 | + }, |
||
6256 | + .setkey = aead_setkey, |
||
6257 | + .setauthsize = aead_setauthsize, |
||
6258 | + .encrypt = aead_encrypt, |
||
6259 | + .decrypt = aead_decrypt, |
||
6260 | + .ivsize = AES_BLOCK_SIZE, |
||
6261 | + .maxauthsize = SHA1_DIGEST_SIZE, |
||
6262 | + }, |
||
6263 | + .caam = { |
||
6264 | + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
||
6265 | + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
||
6266 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6267 | + .geniv = true, |
||
6268 | + }, |
||
6269 | + }, |
||
6270 | + { |
||
6271 | + .aead = { |
||
6272 | + .base = { |
||
6273 | + .cra_name = "authenc(hmac(sha224),cbc(aes))", |
||
6274 | + .cra_driver_name = "authenc-hmac-sha224-" |
||
6275 | + "cbc-aes-caam-qi2", |
||
6276 | + .cra_blocksize = AES_BLOCK_SIZE, |
||
6277 | + }, |
||
6278 | + .setkey = aead_setkey, |
||
6279 | + .setauthsize = aead_setauthsize, |
||
6280 | + .encrypt = aead_encrypt, |
||
6281 | + .decrypt = aead_decrypt, |
||
6282 | + .ivsize = AES_BLOCK_SIZE, |
||
6283 | + .maxauthsize = SHA224_DIGEST_SIZE, |
||
6284 | + }, |
||
6285 | + .caam = { |
||
6286 | + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
||
6287 | + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
||
6288 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6289 | + } |
||
6290 | + }, |
||
6291 | + { |
||
6292 | + .aead = { |
||
6293 | + .base = { |
||
6294 | + .cra_name = "echainiv(authenc(hmac(sha224)," |
||
6295 | + "cbc(aes)))", |
||
6296 | + .cra_driver_name = "echainiv-authenc-" |
||
6297 | + "hmac-sha224-cbc-aes-caam-qi2", |
||
6298 | + .cra_blocksize = AES_BLOCK_SIZE, |
||
6299 | + }, |
||
6300 | + .setkey = aead_setkey, |
||
6301 | + .setauthsize = aead_setauthsize, |
||
6302 | + .encrypt = aead_encrypt, |
||
6303 | + .decrypt = aead_decrypt, |
||
6304 | + .ivsize = AES_BLOCK_SIZE, |
||
6305 | + .maxauthsize = SHA224_DIGEST_SIZE, |
||
6306 | + }, |
||
6307 | + .caam = { |
||
6308 | + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
||
6309 | + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
||
6310 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6311 | + .geniv = true, |
||
6312 | + } |
||
6313 | + }, |
||
6314 | + { |
||
6315 | + .aead = { |
||
6316 | + .base = { |
||
6317 | + .cra_name = "authenc(hmac(sha256),cbc(aes))", |
||
6318 | + .cra_driver_name = "authenc-hmac-sha256-" |
||
6319 | + "cbc-aes-caam-qi2", |
||
6320 | + .cra_blocksize = AES_BLOCK_SIZE, |
||
6321 | + }, |
||
6322 | + .setkey = aead_setkey, |
||
6323 | + .setauthsize = aead_setauthsize, |
||
6324 | + .encrypt = aead_encrypt, |
||
6325 | + .decrypt = aead_decrypt, |
||
6326 | + .ivsize = AES_BLOCK_SIZE, |
||
6327 | + .maxauthsize = SHA256_DIGEST_SIZE, |
||
6328 | + }, |
||
6329 | + .caam = { |
||
6330 | + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
||
6331 | + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
||
6332 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6333 | + } |
||
6334 | + }, |
||
6335 | + { |
||
6336 | + .aead = { |
||
6337 | + .base = { |
||
6338 | + .cra_name = "echainiv(authenc(hmac(sha256)," |
||
6339 | + "cbc(aes)))", |
||
6340 | + .cra_driver_name = "echainiv-authenc-" |
||
6341 | + "hmac-sha256-cbc-aes-" |
||
6342 | + "caam-qi2", |
||
6343 | + .cra_blocksize = AES_BLOCK_SIZE, |
||
6344 | + }, |
||
6345 | + .setkey = aead_setkey, |
||
6346 | + .setauthsize = aead_setauthsize, |
||
6347 | + .encrypt = aead_encrypt, |
||
6348 | + .decrypt = aead_decrypt, |
||
6349 | + .ivsize = AES_BLOCK_SIZE, |
||
6350 | + .maxauthsize = SHA256_DIGEST_SIZE, |
||
6351 | + }, |
||
6352 | + .caam = { |
||
6353 | + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
||
6354 | + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
||
6355 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6356 | + .geniv = true, |
||
6357 | + } |
||
6358 | + }, |
||
6359 | + { |
||
6360 | + .aead = { |
||
6361 | + .base = { |
||
6362 | + .cra_name = "authenc(hmac(sha384),cbc(aes))", |
||
6363 | + .cra_driver_name = "authenc-hmac-sha384-" |
||
6364 | + "cbc-aes-caam-qi2", |
||
6365 | + .cra_blocksize = AES_BLOCK_SIZE, |
||
6366 | + }, |
||
6367 | + .setkey = aead_setkey, |
||
6368 | + .setauthsize = aead_setauthsize, |
||
6369 | + .encrypt = aead_encrypt, |
||
6370 | + .decrypt = aead_decrypt, |
||
6371 | + .ivsize = AES_BLOCK_SIZE, |
||
6372 | + .maxauthsize = SHA384_DIGEST_SIZE, |
||
6373 | + }, |
||
6374 | + .caam = { |
||
6375 | + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
||
6376 | + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
||
6377 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6378 | + } |
||
6379 | + }, |
||
6380 | + { |
||
6381 | + .aead = { |
||
6382 | + .base = { |
||
6383 | + .cra_name = "echainiv(authenc(hmac(sha384)," |
||
6384 | + "cbc(aes)))", |
||
6385 | + .cra_driver_name = "echainiv-authenc-" |
||
6386 | + "hmac-sha384-cbc-aes-" |
||
6387 | + "caam-qi2", |
||
6388 | + .cra_blocksize = AES_BLOCK_SIZE, |
||
6389 | + }, |
||
6390 | + .setkey = aead_setkey, |
||
6391 | + .setauthsize = aead_setauthsize, |
||
6392 | + .encrypt = aead_encrypt, |
||
6393 | + .decrypt = aead_decrypt, |
||
6394 | + .ivsize = AES_BLOCK_SIZE, |
||
6395 | + .maxauthsize = SHA384_DIGEST_SIZE, |
||
6396 | + }, |
||
6397 | + .caam = { |
||
6398 | + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
||
6399 | + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
||
6400 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6401 | + .geniv = true, |
||
6402 | + } |
||
6403 | + }, |
||
6404 | + { |
||
6405 | + .aead = { |
||
6406 | + .base = { |
||
6407 | + .cra_name = "authenc(hmac(sha512),cbc(aes))", |
||
6408 | + .cra_driver_name = "authenc-hmac-sha512-" |
||
6409 | + "cbc-aes-caam-qi2", |
||
6410 | + .cra_blocksize = AES_BLOCK_SIZE, |
||
6411 | + }, |
||
6412 | + .setkey = aead_setkey, |
||
6413 | + .setauthsize = aead_setauthsize, |
||
6414 | + .encrypt = aead_encrypt, |
||
6415 | + .decrypt = aead_decrypt, |
||
6416 | + .ivsize = AES_BLOCK_SIZE, |
||
6417 | + .maxauthsize = SHA512_DIGEST_SIZE, |
||
6418 | + }, |
||
6419 | + .caam = { |
||
6420 | + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
||
6421 | + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
||
6422 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6423 | + } |
||
6424 | + }, |
||
6425 | + { |
||
6426 | + .aead = { |
||
6427 | + .base = { |
||
6428 | + .cra_name = "echainiv(authenc(hmac(sha512)," |
||
6429 | + "cbc(aes)))", |
||
6430 | + .cra_driver_name = "echainiv-authenc-" |
||
6431 | + "hmac-sha512-cbc-aes-" |
||
6432 | + "caam-qi2", |
||
6433 | + .cra_blocksize = AES_BLOCK_SIZE, |
||
6434 | + }, |
||
6435 | + .setkey = aead_setkey, |
||
6436 | + .setauthsize = aead_setauthsize, |
||
6437 | + .encrypt = aead_encrypt, |
||
6438 | + .decrypt = aead_decrypt, |
||
6439 | + .ivsize = AES_BLOCK_SIZE, |
||
6440 | + .maxauthsize = SHA512_DIGEST_SIZE, |
||
6441 | + }, |
||
6442 | + .caam = { |
||
6443 | + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
||
6444 | + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
||
6445 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6446 | + .geniv = true, |
||
6447 | + } |
||
6448 | + }, |
||
6449 | + { |
||
6450 | + .aead = { |
||
6451 | + .base = { |
||
6452 | + .cra_name = "authenc(hmac(md5),cbc(des3_ede))", |
||
6453 | + .cra_driver_name = "authenc-hmac-md5-" |
||
6454 | + "cbc-des3_ede-caam-qi2", |
||
6455 | + .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
||
6456 | + }, |
||
6457 | + .setkey = aead_setkey, |
||
6458 | + .setauthsize = aead_setauthsize, |
||
6459 | + .encrypt = aead_encrypt, |
||
6460 | + .decrypt = aead_decrypt, |
||
6461 | + .ivsize = DES3_EDE_BLOCK_SIZE, |
||
6462 | + .maxauthsize = MD5_DIGEST_SIZE, |
||
6463 | + }, |
||
6464 | + .caam = { |
||
6465 | + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
||
6466 | + .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
||
6467 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6468 | + } |
||
6469 | + }, |
||
6470 | + { |
||
6471 | + .aead = { |
||
6472 | + .base = { |
||
6473 | + .cra_name = "echainiv(authenc(hmac(md5)," |
||
6474 | + "cbc(des3_ede)))", |
||
6475 | + .cra_driver_name = "echainiv-authenc-hmac-md5-" |
||
6476 | + "cbc-des3_ede-caam-qi2", |
||
6477 | + .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
||
6478 | + }, |
||
6479 | + .setkey = aead_setkey, |
||
6480 | + .setauthsize = aead_setauthsize, |
||
6481 | + .encrypt = aead_encrypt, |
||
6482 | + .decrypt = aead_decrypt, |
||
6483 | + .ivsize = DES3_EDE_BLOCK_SIZE, |
||
6484 | + .maxauthsize = MD5_DIGEST_SIZE, |
||
6485 | + }, |
||
6486 | + .caam = { |
||
6487 | + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
||
6488 | + .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
||
6489 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6490 | + .geniv = true, |
||
6491 | + } |
||
6492 | + }, |
||
6493 | + { |
||
6494 | + .aead = { |
||
6495 | + .base = { |
||
6496 | + .cra_name = "authenc(hmac(sha1)," |
||
6497 | + "cbc(des3_ede))", |
||
6498 | + .cra_driver_name = "authenc-hmac-sha1-" |
||
6499 | + "cbc-des3_ede-caam-qi2", |
||
6500 | + .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
||
6501 | + }, |
||
6502 | + .setkey = aead_setkey, |
||
6503 | + .setauthsize = aead_setauthsize, |
||
6504 | + .encrypt = aead_encrypt, |
||
6505 | + .decrypt = aead_decrypt, |
||
6506 | + .ivsize = DES3_EDE_BLOCK_SIZE, |
||
6507 | + .maxauthsize = SHA1_DIGEST_SIZE, |
||
6508 | + }, |
||
6509 | + .caam = { |
||
6510 | + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
||
6511 | + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
||
6512 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6513 | + }, |
||
6514 | + }, |
||
6515 | + { |
||
6516 | + .aead = { |
||
6517 | + .base = { |
||
6518 | + .cra_name = "echainiv(authenc(hmac(sha1)," |
||
6519 | + "cbc(des3_ede)))", |
||
6520 | + .cra_driver_name = "echainiv-authenc-" |
||
6521 | + "hmac-sha1-" |
||
6522 | + "cbc-des3_ede-caam-qi2", |
||
6523 | + .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
||
6524 | + }, |
||
6525 | + .setkey = aead_setkey, |
||
6526 | + .setauthsize = aead_setauthsize, |
||
6527 | + .encrypt = aead_encrypt, |
||
6528 | + .decrypt = aead_decrypt, |
||
6529 | + .ivsize = DES3_EDE_BLOCK_SIZE, |
||
6530 | + .maxauthsize = SHA1_DIGEST_SIZE, |
||
6531 | + }, |
||
6532 | + .caam = { |
||
6533 | + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
||
6534 | + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
||
6535 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6536 | + .geniv = true, |
||
6537 | + } |
||
6538 | + }, |
||
6539 | + { |
||
6540 | + .aead = { |
||
6541 | + .base = { |
||
6542 | + .cra_name = "authenc(hmac(sha224)," |
||
6543 | + "cbc(des3_ede))", |
||
6544 | + .cra_driver_name = "authenc-hmac-sha224-" |
||
6545 | + "cbc-des3_ede-caam-qi2", |
||
6546 | + .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
||
6547 | + }, |
||
6548 | + .setkey = aead_setkey, |
||
6549 | + .setauthsize = aead_setauthsize, |
||
6550 | + .encrypt = aead_encrypt, |
||
6551 | + .decrypt = aead_decrypt, |
||
6552 | + .ivsize = DES3_EDE_BLOCK_SIZE, |
||
6553 | + .maxauthsize = SHA224_DIGEST_SIZE, |
||
6554 | + }, |
||
6555 | + .caam = { |
||
6556 | + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
||
6557 | + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
||
6558 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6559 | + }, |
||
6560 | + }, |
||
6561 | + { |
||
6562 | + .aead = { |
||
6563 | + .base = { |
||
6564 | + .cra_name = "echainiv(authenc(hmac(sha224)," |
||
6565 | + "cbc(des3_ede)))", |
||
6566 | + .cra_driver_name = "echainiv-authenc-" |
||
6567 | + "hmac-sha224-" |
||
6568 | + "cbc-des3_ede-caam-qi2", |
||
6569 | + .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
||
6570 | + }, |
||
6571 | + .setkey = aead_setkey, |
||
6572 | + .setauthsize = aead_setauthsize, |
||
6573 | + .encrypt = aead_encrypt, |
||
6574 | + .decrypt = aead_decrypt, |
||
6575 | + .ivsize = DES3_EDE_BLOCK_SIZE, |
||
6576 | + .maxauthsize = SHA224_DIGEST_SIZE, |
||
6577 | + }, |
||
6578 | + .caam = { |
||
6579 | + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
||
6580 | + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
||
6581 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6582 | + .geniv = true, |
||
6583 | + } |
||
6584 | + }, |
||
6585 | + { |
||
6586 | + .aead = { |
||
6587 | + .base = { |
||
6588 | + .cra_name = "authenc(hmac(sha256)," |
||
6589 | + "cbc(des3_ede))", |
||
6590 | + .cra_driver_name = "authenc-hmac-sha256-" |
||
6591 | + "cbc-des3_ede-caam-qi2", |
||
6592 | + .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
||
6593 | + }, |
||
6594 | + .setkey = aead_setkey, |
||
6595 | + .setauthsize = aead_setauthsize, |
||
6596 | + .encrypt = aead_encrypt, |
||
6597 | + .decrypt = aead_decrypt, |
||
6598 | + .ivsize = DES3_EDE_BLOCK_SIZE, |
||
6599 | + .maxauthsize = SHA256_DIGEST_SIZE, |
||
6600 | + }, |
||
6601 | + .caam = { |
||
6602 | + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
||
6603 | + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
||
6604 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6605 | + }, |
||
6606 | + }, |
||
6607 | + { |
||
6608 | + .aead = { |
||
6609 | + .base = { |
||
6610 | + .cra_name = "echainiv(authenc(hmac(sha256)," |
||
6611 | + "cbc(des3_ede)))", |
||
6612 | + .cra_driver_name = "echainiv-authenc-" |
||
6613 | + "hmac-sha256-" |
||
6614 | + "cbc-des3_ede-caam-qi2", |
||
6615 | + .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
||
6616 | + }, |
||
6617 | + .setkey = aead_setkey, |
||
6618 | + .setauthsize = aead_setauthsize, |
||
6619 | + .encrypt = aead_encrypt, |
||
6620 | + .decrypt = aead_decrypt, |
||
6621 | + .ivsize = DES3_EDE_BLOCK_SIZE, |
||
6622 | + .maxauthsize = SHA256_DIGEST_SIZE, |
||
6623 | + }, |
||
6624 | + .caam = { |
||
6625 | + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
||
6626 | + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
||
6627 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6628 | + .geniv = true, |
||
6629 | + } |
||
6630 | + }, |
||
6631 | + { |
||
6632 | + .aead = { |
||
6633 | + .base = { |
||
6634 | + .cra_name = "authenc(hmac(sha384)," |
||
6635 | + "cbc(des3_ede))", |
||
6636 | + .cra_driver_name = "authenc-hmac-sha384-" |
||
6637 | + "cbc-des3_ede-caam-qi2", |
||
6638 | + .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
||
6639 | + }, |
||
6640 | + .setkey = aead_setkey, |
||
6641 | + .setauthsize = aead_setauthsize, |
||
6642 | + .encrypt = aead_encrypt, |
||
6643 | + .decrypt = aead_decrypt, |
||
6644 | + .ivsize = DES3_EDE_BLOCK_SIZE, |
||
6645 | + .maxauthsize = SHA384_DIGEST_SIZE, |
||
6646 | + }, |
||
6647 | + .caam = { |
||
6648 | + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
||
6649 | + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
||
6650 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6651 | + }, |
||
6652 | + }, |
||
6653 | + { |
||
6654 | + .aead = { |
||
6655 | + .base = { |
||
6656 | + .cra_name = "echainiv(authenc(hmac(sha384)," |
||
6657 | + "cbc(des3_ede)))", |
||
6658 | + .cra_driver_name = "echainiv-authenc-" |
||
6659 | + "hmac-sha384-" |
||
6660 | + "cbc-des3_ede-caam-qi2", |
||
6661 | + .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
||
6662 | + }, |
||
6663 | + .setkey = aead_setkey, |
||
6664 | + .setauthsize = aead_setauthsize, |
||
6665 | + .encrypt = aead_encrypt, |
||
6666 | + .decrypt = aead_decrypt, |
||
6667 | + .ivsize = DES3_EDE_BLOCK_SIZE, |
||
6668 | + .maxauthsize = SHA384_DIGEST_SIZE, |
||
6669 | + }, |
||
6670 | + .caam = { |
||
6671 | + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
||
6672 | + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
||
6673 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6674 | + .geniv = true, |
||
6675 | + } |
||
6676 | + }, |
||
6677 | + { |
||
6678 | + .aead = { |
||
6679 | + .base = { |
||
6680 | + .cra_name = "authenc(hmac(sha512)," |
||
6681 | + "cbc(des3_ede))", |
||
6682 | + .cra_driver_name = "authenc-hmac-sha512-" |
||
6683 | + "cbc-des3_ede-caam-qi2", |
||
6684 | + .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
||
6685 | + }, |
||
6686 | + .setkey = aead_setkey, |
||
6687 | + .setauthsize = aead_setauthsize, |
||
6688 | + .encrypt = aead_encrypt, |
||
6689 | + .decrypt = aead_decrypt, |
||
6690 | + .ivsize = DES3_EDE_BLOCK_SIZE, |
||
6691 | + .maxauthsize = SHA512_DIGEST_SIZE, |
||
6692 | + }, |
||
6693 | + .caam = { |
||
6694 | + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
||
6695 | + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
||
6696 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6697 | + }, |
||
6698 | + }, |
||
6699 | + { |
||
6700 | + .aead = { |
||
6701 | + .base = { |
||
6702 | + .cra_name = "echainiv(authenc(hmac(sha512)," |
||
6703 | + "cbc(des3_ede)))", |
||
6704 | + .cra_driver_name = "echainiv-authenc-" |
||
6705 | + "hmac-sha512-" |
||
6706 | + "cbc-des3_ede-caam-qi2", |
||
6707 | + .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
||
6708 | + }, |
||
6709 | + .setkey = aead_setkey, |
||
6710 | + .setauthsize = aead_setauthsize, |
||
6711 | + .encrypt = aead_encrypt, |
||
6712 | + .decrypt = aead_decrypt, |
||
6713 | + .ivsize = DES3_EDE_BLOCK_SIZE, |
||
6714 | + .maxauthsize = SHA512_DIGEST_SIZE, |
||
6715 | + }, |
||
6716 | + .caam = { |
||
6717 | + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, |
||
6718 | + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
||
6719 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6720 | + .geniv = true, |
||
6721 | + } |
||
6722 | + }, |
||
6723 | + { |
||
6724 | + .aead = { |
||
6725 | + .base = { |
||
6726 | + .cra_name = "authenc(hmac(md5),cbc(des))", |
||
6727 | + .cra_driver_name = "authenc-hmac-md5-" |
||
6728 | + "cbc-des-caam-qi2", |
||
6729 | + .cra_blocksize = DES_BLOCK_SIZE, |
||
6730 | + }, |
||
6731 | + .setkey = aead_setkey, |
||
6732 | + .setauthsize = aead_setauthsize, |
||
6733 | + .encrypt = aead_encrypt, |
||
6734 | + .decrypt = aead_decrypt, |
||
6735 | + .ivsize = DES_BLOCK_SIZE, |
||
6736 | + .maxauthsize = MD5_DIGEST_SIZE, |
||
6737 | + }, |
||
6738 | + .caam = { |
||
6739 | + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
||
6740 | + .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
||
6741 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6742 | + }, |
||
6743 | + }, |
||
6744 | + { |
||
6745 | + .aead = { |
||
6746 | + .base = { |
||
6747 | + .cra_name = "echainiv(authenc(hmac(md5)," |
||
6748 | + "cbc(des)))", |
||
6749 | + .cra_driver_name = "echainiv-authenc-hmac-md5-" |
||
6750 | + "cbc-des-caam-qi2", |
||
6751 | + .cra_blocksize = DES_BLOCK_SIZE, |
||
6752 | + }, |
||
6753 | + .setkey = aead_setkey, |
||
6754 | + .setauthsize = aead_setauthsize, |
||
6755 | + .encrypt = aead_encrypt, |
||
6756 | + .decrypt = aead_decrypt, |
||
6757 | + .ivsize = DES_BLOCK_SIZE, |
||
6758 | + .maxauthsize = MD5_DIGEST_SIZE, |
||
6759 | + }, |
||
6760 | + .caam = { |
||
6761 | + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
||
6762 | + .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
||
6763 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6764 | + .geniv = true, |
||
6765 | + } |
||
6766 | + }, |
||
6767 | + { |
||
6768 | + .aead = { |
||
6769 | + .base = { |
||
6770 | + .cra_name = "authenc(hmac(sha1),cbc(des))", |
||
6771 | + .cra_driver_name = "authenc-hmac-sha1-" |
||
6772 | + "cbc-des-caam-qi2", |
||
6773 | + .cra_blocksize = DES_BLOCK_SIZE, |
||
6774 | + }, |
||
6775 | + .setkey = aead_setkey, |
||
6776 | + .setauthsize = aead_setauthsize, |
||
6777 | + .encrypt = aead_encrypt, |
||
6778 | + .decrypt = aead_decrypt, |
||
6779 | + .ivsize = DES_BLOCK_SIZE, |
||
6780 | + .maxauthsize = SHA1_DIGEST_SIZE, |
||
6781 | + }, |
||
6782 | + .caam = { |
||
6783 | + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
||
6784 | + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
||
6785 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6786 | + }, |
||
6787 | + }, |
||
6788 | + { |
||
6789 | + .aead = { |
||
6790 | + .base = { |
||
6791 | + .cra_name = "echainiv(authenc(hmac(sha1)," |
||
6792 | + "cbc(des)))", |
||
6793 | + .cra_driver_name = "echainiv-authenc-" |
||
6794 | + "hmac-sha1-cbc-des-caam-qi2", |
||
6795 | + .cra_blocksize = DES_BLOCK_SIZE, |
||
6796 | + }, |
||
6797 | + .setkey = aead_setkey, |
||
6798 | + .setauthsize = aead_setauthsize, |
||
6799 | + .encrypt = aead_encrypt, |
||
6800 | + .decrypt = aead_decrypt, |
||
6801 | + .ivsize = DES_BLOCK_SIZE, |
||
6802 | + .maxauthsize = SHA1_DIGEST_SIZE, |
||
6803 | + }, |
||
6804 | + .caam = { |
||
6805 | + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
||
6806 | + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
||
6807 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6808 | + .geniv = true, |
||
6809 | + } |
||
6810 | + }, |
||
6811 | + { |
||
6812 | + .aead = { |
||
6813 | + .base = { |
||
6814 | + .cra_name = "authenc(hmac(sha224),cbc(des))", |
||
6815 | + .cra_driver_name = "authenc-hmac-sha224-" |
||
6816 | + "cbc-des-caam-qi2", |
||
6817 | + .cra_blocksize = DES_BLOCK_SIZE, |
||
6818 | + }, |
||
6819 | + .setkey = aead_setkey, |
||
6820 | + .setauthsize = aead_setauthsize, |
||
6821 | + .encrypt = aead_encrypt, |
||
6822 | + .decrypt = aead_decrypt, |
||
6823 | + .ivsize = DES_BLOCK_SIZE, |
||
6824 | + .maxauthsize = SHA224_DIGEST_SIZE, |
||
6825 | + }, |
||
6826 | + .caam = { |
||
6827 | + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
||
6828 | + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
||
6829 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6830 | + }, |
||
6831 | + }, |
||
6832 | + { |
||
6833 | + .aead = { |
||
6834 | + .base = { |
||
6835 | + .cra_name = "echainiv(authenc(hmac(sha224)," |
||
6836 | + "cbc(des)))", |
||
6837 | + .cra_driver_name = "echainiv-authenc-" |
||
6838 | + "hmac-sha224-cbc-des-" |
||
6839 | + "caam-qi2", |
||
6840 | + .cra_blocksize = DES_BLOCK_SIZE, |
||
6841 | + }, |
||
6842 | + .setkey = aead_setkey, |
||
6843 | + .setauthsize = aead_setauthsize, |
||
6844 | + .encrypt = aead_encrypt, |
||
6845 | + .decrypt = aead_decrypt, |
||
6846 | + .ivsize = DES_BLOCK_SIZE, |
||
6847 | + .maxauthsize = SHA224_DIGEST_SIZE, |
||
6848 | + }, |
||
6849 | + .caam = { |
||
6850 | + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
||
6851 | + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
||
6852 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6853 | + .geniv = true, |
||
6854 | + } |
||
6855 | + }, |
||
6856 | + { |
||
6857 | + .aead = { |
||
6858 | + .base = { |
||
6859 | + .cra_name = "authenc(hmac(sha256),cbc(des))", |
||
6860 | + .cra_driver_name = "authenc-hmac-sha256-" |
||
6861 | + "cbc-des-caam-qi2", |
||
6862 | + .cra_blocksize = DES_BLOCK_SIZE, |
||
6863 | + }, |
||
6864 | + .setkey = aead_setkey, |
||
6865 | + .setauthsize = aead_setauthsize, |
||
6866 | + .encrypt = aead_encrypt, |
||
6867 | + .decrypt = aead_decrypt, |
||
6868 | + .ivsize = DES_BLOCK_SIZE, |
||
6869 | + .maxauthsize = SHA256_DIGEST_SIZE, |
||
6870 | + }, |
||
6871 | + .caam = { |
||
6872 | + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
||
6873 | + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
||
6874 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6875 | + }, |
||
6876 | + }, |
||
6877 | + { |
||
6878 | + .aead = { |
||
6879 | + .base = { |
||
6880 | + .cra_name = "echainiv(authenc(hmac(sha256)," |
||
6881 | + "cbc(des)))", |
||
6882 | + .cra_driver_name = "echainiv-authenc-" |
||
6883 | + "hmac-sha256-cbc-desi-" |
||
6884 | + "caam-qi2", |
||
6885 | + .cra_blocksize = DES_BLOCK_SIZE, |
||
6886 | + }, |
||
6887 | + .setkey = aead_setkey, |
||
6888 | + .setauthsize = aead_setauthsize, |
||
6889 | + .encrypt = aead_encrypt, |
||
6890 | + .decrypt = aead_decrypt, |
||
6891 | + .ivsize = DES_BLOCK_SIZE, |
||
6892 | + .maxauthsize = SHA256_DIGEST_SIZE, |
||
6893 | + }, |
||
6894 | + .caam = { |
||
6895 | + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
||
6896 | + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
||
6897 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6898 | + .geniv = true, |
||
6899 | + }, |
||
6900 | + }, |
||
6901 | + { |
||
6902 | + .aead = { |
||
6903 | + .base = { |
||
6904 | + .cra_name = "authenc(hmac(sha384),cbc(des))", |
||
6905 | + .cra_driver_name = "authenc-hmac-sha384-" |
||
6906 | + "cbc-des-caam-qi2", |
||
6907 | + .cra_blocksize = DES_BLOCK_SIZE, |
||
6908 | + }, |
||
6909 | + .setkey = aead_setkey, |
||
6910 | + .setauthsize = aead_setauthsize, |
||
6911 | + .encrypt = aead_encrypt, |
||
6912 | + .decrypt = aead_decrypt, |
||
6913 | + .ivsize = DES_BLOCK_SIZE, |
||
6914 | + .maxauthsize = SHA384_DIGEST_SIZE, |
||
6915 | + }, |
||
6916 | + .caam = { |
||
6917 | + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
||
6918 | + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
||
6919 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6920 | + }, |
||
6921 | + }, |
||
6922 | + { |
||
6923 | + .aead = { |
||
6924 | + .base = { |
||
6925 | + .cra_name = "echainiv(authenc(hmac(sha384)," |
||
6926 | + "cbc(des)))", |
||
6927 | + .cra_driver_name = "echainiv-authenc-" |
||
6928 | + "hmac-sha384-cbc-des-" |
||
6929 | + "caam-qi2", |
||
6930 | + .cra_blocksize = DES_BLOCK_SIZE, |
||
6931 | + }, |
||
6932 | + .setkey = aead_setkey, |
||
6933 | + .setauthsize = aead_setauthsize, |
||
6934 | + .encrypt = aead_encrypt, |
||
6935 | + .decrypt = aead_decrypt, |
||
6936 | + .ivsize = DES_BLOCK_SIZE, |
||
6937 | + .maxauthsize = SHA384_DIGEST_SIZE, |
||
6938 | + }, |
||
6939 | + .caam = { |
||
6940 | + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
||
6941 | + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
||
6942 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6943 | + .geniv = true, |
||
6944 | + } |
||
6945 | + }, |
||
6946 | + { |
||
6947 | + .aead = { |
||
6948 | + .base = { |
||
6949 | + .cra_name = "authenc(hmac(sha512),cbc(des))", |
||
6950 | + .cra_driver_name = "authenc-hmac-sha512-" |
||
6951 | + "cbc-des-caam-qi2", |
||
6952 | + .cra_blocksize = DES_BLOCK_SIZE, |
||
6953 | + }, |
||
6954 | + .setkey = aead_setkey, |
||
6955 | + .setauthsize = aead_setauthsize, |
||
6956 | + .encrypt = aead_encrypt, |
||
6957 | + .decrypt = aead_decrypt, |
||
6958 | + .ivsize = DES_BLOCK_SIZE, |
||
6959 | + .maxauthsize = SHA512_DIGEST_SIZE, |
||
6960 | + }, |
||
6961 | + .caam = { |
||
6962 | + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
||
6963 | + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
||
6964 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6965 | + } |
||
6966 | + }, |
||
6967 | + { |
||
6968 | + .aead = { |
||
6969 | + .base = { |
||
6970 | + .cra_name = "echainiv(authenc(hmac(sha512)," |
||
6971 | + "cbc(des)))", |
||
6972 | + .cra_driver_name = "echainiv-authenc-" |
||
6973 | + "hmac-sha512-cbc-des-" |
||
6974 | + "caam-qi2", |
||
6975 | + .cra_blocksize = DES_BLOCK_SIZE, |
||
6976 | + }, |
||
6977 | + .setkey = aead_setkey, |
||
6978 | + .setauthsize = aead_setauthsize, |
||
6979 | + .encrypt = aead_encrypt, |
||
6980 | + .decrypt = aead_decrypt, |
||
6981 | + .ivsize = DES_BLOCK_SIZE, |
||
6982 | + .maxauthsize = SHA512_DIGEST_SIZE, |
||
6983 | + }, |
||
6984 | + .caam = { |
||
6985 | + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, |
||
6986 | + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
||
6987 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
6988 | + .geniv = true, |
||
6989 | + } |
||
6990 | + }, |
||
6991 | + { |
||
6992 | + .aead = { |
||
6993 | + .base = { |
||
6994 | + .cra_name = "authenc(hmac(md5)," |
||
6995 | + "rfc3686(ctr(aes)))", |
||
6996 | + .cra_driver_name = "authenc-hmac-md5-" |
||
6997 | + "rfc3686-ctr-aes-caam-qi2", |
||
6998 | + .cra_blocksize = 1, |
||
6999 | + }, |
||
7000 | + .setkey = aead_setkey, |
||
7001 | + .setauthsize = aead_setauthsize, |
||
7002 | + .encrypt = aead_encrypt, |
||
7003 | + .decrypt = aead_decrypt, |
||
7004 | + .ivsize = CTR_RFC3686_IV_SIZE, |
||
7005 | + .maxauthsize = MD5_DIGEST_SIZE, |
||
7006 | + }, |
||
7007 | + .caam = { |
||
7008 | + .class1_alg_type = OP_ALG_ALGSEL_AES | |
||
7009 | + OP_ALG_AAI_CTR_MOD128, |
||
7010 | + .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
||
7011 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
7012 | + .rfc3686 = true, |
||
7013 | + }, |
||
7014 | + }, |
||
7015 | + { |
||
7016 | + .aead = { |
||
7017 | + .base = { |
||
7018 | + .cra_name = "seqiv(authenc(" |
||
7019 | + "hmac(md5),rfc3686(ctr(aes))))", |
||
7020 | + .cra_driver_name = "seqiv-authenc-hmac-md5-" |
||
7021 | + "rfc3686-ctr-aes-caam-qi2", |
||
7022 | + .cra_blocksize = 1, |
||
7023 | + }, |
||
7024 | + .setkey = aead_setkey, |
||
7025 | + .setauthsize = aead_setauthsize, |
||
7026 | + .encrypt = aead_encrypt, |
||
7027 | + .decrypt = aead_decrypt, |
||
7028 | + .ivsize = CTR_RFC3686_IV_SIZE, |
||
7029 | + .maxauthsize = MD5_DIGEST_SIZE, |
||
7030 | + }, |
||
7031 | + .caam = { |
||
7032 | + .class1_alg_type = OP_ALG_ALGSEL_AES | |
||
7033 | + OP_ALG_AAI_CTR_MOD128, |
||
7034 | + .class2_alg_type = OP_ALG_ALGSEL_MD5 | |
||
7035 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
7036 | + .rfc3686 = true, |
||
7037 | + .geniv = true, |
||
7038 | + }, |
||
7039 | + }, |
||
7040 | + { |
||
7041 | + .aead = { |
||
7042 | + .base = { |
||
7043 | + .cra_name = "authenc(hmac(sha1)," |
||
7044 | + "rfc3686(ctr(aes)))", |
||
7045 | + .cra_driver_name = "authenc-hmac-sha1-" |
||
7046 | + "rfc3686-ctr-aes-caam-qi2", |
||
7047 | + .cra_blocksize = 1, |
||
7048 | + }, |
||
7049 | + .setkey = aead_setkey, |
||
7050 | + .setauthsize = aead_setauthsize, |
||
7051 | + .encrypt = aead_encrypt, |
||
7052 | + .decrypt = aead_decrypt, |
||
7053 | + .ivsize = CTR_RFC3686_IV_SIZE, |
||
7054 | + .maxauthsize = SHA1_DIGEST_SIZE, |
||
7055 | + }, |
||
7056 | + .caam = { |
||
7057 | + .class1_alg_type = OP_ALG_ALGSEL_AES | |
||
7058 | + OP_ALG_AAI_CTR_MOD128, |
||
7059 | + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
||
7060 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
7061 | + .rfc3686 = true, |
||
7062 | + }, |
||
7063 | + }, |
||
7064 | + { |
||
7065 | + .aead = { |
||
7066 | + .base = { |
||
7067 | + .cra_name = "seqiv(authenc(" |
||
7068 | + "hmac(sha1),rfc3686(ctr(aes))))", |
||
7069 | + .cra_driver_name = "seqiv-authenc-hmac-sha1-" |
||
7070 | + "rfc3686-ctr-aes-caam-qi2", |
||
7071 | + .cra_blocksize = 1, |
||
7072 | + }, |
||
7073 | + .setkey = aead_setkey, |
||
7074 | + .setauthsize = aead_setauthsize, |
||
7075 | + .encrypt = aead_encrypt, |
||
7076 | + .decrypt = aead_decrypt, |
||
7077 | + .ivsize = CTR_RFC3686_IV_SIZE, |
||
7078 | + .maxauthsize = SHA1_DIGEST_SIZE, |
||
7079 | + }, |
||
7080 | + .caam = { |
||
7081 | + .class1_alg_type = OP_ALG_ALGSEL_AES | |
||
7082 | + OP_ALG_AAI_CTR_MOD128, |
||
7083 | + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
||
7084 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
7085 | + .rfc3686 = true, |
||
7086 | + .geniv = true, |
||
7087 | + }, |
||
7088 | + }, |
||
7089 | + { |
||
7090 | + .aead = { |
||
7091 | + .base = { |
||
7092 | + .cra_name = "authenc(hmac(sha224)," |
||
7093 | + "rfc3686(ctr(aes)))", |
||
7094 | + .cra_driver_name = "authenc-hmac-sha224-" |
||
7095 | + "rfc3686-ctr-aes-caam-qi2", |
||
7096 | + .cra_blocksize = 1, |
||
7097 | + }, |
||
7098 | + .setkey = aead_setkey, |
||
7099 | + .setauthsize = aead_setauthsize, |
||
7100 | + .encrypt = aead_encrypt, |
||
7101 | + .decrypt = aead_decrypt, |
||
7102 | + .ivsize = CTR_RFC3686_IV_SIZE, |
||
7103 | + .maxauthsize = SHA224_DIGEST_SIZE, |
||
7104 | + }, |
||
7105 | + .caam = { |
||
7106 | + .class1_alg_type = OP_ALG_ALGSEL_AES | |
||
7107 | + OP_ALG_AAI_CTR_MOD128, |
||
7108 | + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
||
7109 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
7110 | + .rfc3686 = true, |
||
7111 | + }, |
||
7112 | + }, |
||
7113 | + { |
||
7114 | + .aead = { |
||
7115 | + .base = { |
||
7116 | + .cra_name = "seqiv(authenc(" |
||
7117 | + "hmac(sha224),rfc3686(ctr(aes))))", |
||
7118 | + .cra_driver_name = "seqiv-authenc-hmac-sha224-" |
||
7119 | + "rfc3686-ctr-aes-caam-qi2", |
||
7120 | + .cra_blocksize = 1, |
||
7121 | + }, |
||
7122 | + .setkey = aead_setkey, |
||
7123 | + .setauthsize = aead_setauthsize, |
||
7124 | + .encrypt = aead_encrypt, |
||
7125 | + .decrypt = aead_decrypt, |
||
7126 | + .ivsize = CTR_RFC3686_IV_SIZE, |
||
7127 | + .maxauthsize = SHA224_DIGEST_SIZE, |
||
7128 | + }, |
||
7129 | + .caam = { |
||
7130 | + .class1_alg_type = OP_ALG_ALGSEL_AES | |
||
7131 | + OP_ALG_AAI_CTR_MOD128, |
||
7132 | + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | |
||
7133 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
7134 | + .rfc3686 = true, |
||
7135 | + .geniv = true, |
||
7136 | + }, |
||
7137 | + }, |
||
7138 | + { |
||
7139 | + .aead = { |
||
7140 | + .base = { |
||
7141 | + .cra_name = "authenc(hmac(sha256)," |
||
7142 | + "rfc3686(ctr(aes)))", |
||
7143 | + .cra_driver_name = "authenc-hmac-sha256-" |
||
7144 | + "rfc3686-ctr-aes-caam-qi2", |
||
7145 | + .cra_blocksize = 1, |
||
7146 | + }, |
||
7147 | + .setkey = aead_setkey, |
||
7148 | + .setauthsize = aead_setauthsize, |
||
7149 | + .encrypt = aead_encrypt, |
||
7150 | + .decrypt = aead_decrypt, |
||
7151 | + .ivsize = CTR_RFC3686_IV_SIZE, |
||
7152 | + .maxauthsize = SHA256_DIGEST_SIZE, |
||
7153 | + }, |
||
7154 | + .caam = { |
||
7155 | + .class1_alg_type = OP_ALG_ALGSEL_AES | |
||
7156 | + OP_ALG_AAI_CTR_MOD128, |
||
7157 | + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
||
7158 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
7159 | + .rfc3686 = true, |
||
7160 | + }, |
||
7161 | + }, |
||
7162 | + { |
||
7163 | + .aead = { |
||
7164 | + .base = { |
||
7165 | + .cra_name = "seqiv(authenc(hmac(sha256)," |
||
7166 | + "rfc3686(ctr(aes))))", |
||
7167 | + .cra_driver_name = "seqiv-authenc-hmac-sha256-" |
||
7168 | + "rfc3686-ctr-aes-caam-qi2", |
||
7169 | + .cra_blocksize = 1, |
||
7170 | + }, |
||
7171 | + .setkey = aead_setkey, |
||
7172 | + .setauthsize = aead_setauthsize, |
||
7173 | + .encrypt = aead_encrypt, |
||
7174 | + .decrypt = aead_decrypt, |
||
7175 | + .ivsize = CTR_RFC3686_IV_SIZE, |
||
7176 | + .maxauthsize = SHA256_DIGEST_SIZE, |
||
7177 | + }, |
||
7178 | + .caam = { |
||
7179 | + .class1_alg_type = OP_ALG_ALGSEL_AES | |
||
7180 | + OP_ALG_AAI_CTR_MOD128, |
||
7181 | + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | |
||
7182 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
7183 | + .rfc3686 = true, |
||
7184 | + .geniv = true, |
||
7185 | + }, |
||
7186 | + }, |
||
7187 | + { |
||
7188 | + .aead = { |
||
7189 | + .base = { |
||
7190 | + .cra_name = "authenc(hmac(sha384)," |
||
7191 | + "rfc3686(ctr(aes)))", |
||
7192 | + .cra_driver_name = "authenc-hmac-sha384-" |
||
7193 | + "rfc3686-ctr-aes-caam-qi2", |
||
7194 | + .cra_blocksize = 1, |
||
7195 | + }, |
||
7196 | + .setkey = aead_setkey, |
||
7197 | + .setauthsize = aead_setauthsize, |
||
7198 | + .encrypt = aead_encrypt, |
||
7199 | + .decrypt = aead_decrypt, |
||
7200 | + .ivsize = CTR_RFC3686_IV_SIZE, |
||
7201 | + .maxauthsize = SHA384_DIGEST_SIZE, |
||
7202 | + }, |
||
7203 | + .caam = { |
||
7204 | + .class1_alg_type = OP_ALG_ALGSEL_AES | |
||
7205 | + OP_ALG_AAI_CTR_MOD128, |
||
7206 | + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
||
7207 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
7208 | + .rfc3686 = true, |
||
7209 | + }, |
||
7210 | + }, |
||
7211 | + { |
||
7212 | + .aead = { |
||
7213 | + .base = { |
||
7214 | + .cra_name = "seqiv(authenc(hmac(sha384)," |
||
7215 | + "rfc3686(ctr(aes))))", |
||
7216 | + .cra_driver_name = "seqiv-authenc-hmac-sha384-" |
||
7217 | + "rfc3686-ctr-aes-caam-qi2", |
||
7218 | + .cra_blocksize = 1, |
||
7219 | + }, |
||
7220 | + .setkey = aead_setkey, |
||
7221 | + .setauthsize = aead_setauthsize, |
||
7222 | + .encrypt = aead_encrypt, |
||
7223 | + .decrypt = aead_decrypt, |
||
7224 | + .ivsize = CTR_RFC3686_IV_SIZE, |
||
7225 | + .maxauthsize = SHA384_DIGEST_SIZE, |
||
7226 | + }, |
||
7227 | + .caam = { |
||
7228 | + .class1_alg_type = OP_ALG_ALGSEL_AES | |
||
7229 | + OP_ALG_AAI_CTR_MOD128, |
||
7230 | + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | |
||
7231 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
7232 | + .rfc3686 = true, |
||
7233 | + .geniv = true, |
||
7234 | + }, |
||
7235 | + }, |
||
7236 | + { |
||
7237 | + .aead = { |
||
7238 | + .base = { |
||
7239 | + .cra_name = "authenc(hmac(sha512)," |
||
7240 | + "rfc3686(ctr(aes)))", |
||
7241 | + .cra_driver_name = "authenc-hmac-sha512-" |
||
7242 | + "rfc3686-ctr-aes-caam-qi2", |
||
7243 | + .cra_blocksize = 1, |
||
7244 | + }, |
||
7245 | + .setkey = aead_setkey, |
||
7246 | + .setauthsize = aead_setauthsize, |
||
7247 | + .encrypt = aead_encrypt, |
||
7248 | + .decrypt = aead_decrypt, |
||
7249 | + .ivsize = CTR_RFC3686_IV_SIZE, |
||
7250 | + .maxauthsize = SHA512_DIGEST_SIZE, |
||
7251 | + }, |
||
7252 | + .caam = { |
||
7253 | + .class1_alg_type = OP_ALG_ALGSEL_AES | |
||
7254 | + OP_ALG_AAI_CTR_MOD128, |
||
7255 | + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
||
7256 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
7257 | + .rfc3686 = true, |
||
7258 | + }, |
||
7259 | + }, |
||
7260 | + { |
||
7261 | + .aead = { |
||
7262 | + .base = { |
||
7263 | + .cra_name = "seqiv(authenc(hmac(sha512)," |
||
7264 | + "rfc3686(ctr(aes))))", |
||
7265 | + .cra_driver_name = "seqiv-authenc-hmac-sha512-" |
||
7266 | + "rfc3686-ctr-aes-caam-qi2", |
||
7267 | + .cra_blocksize = 1, |
||
7268 | + }, |
||
7269 | + .setkey = aead_setkey, |
||
7270 | + .setauthsize = aead_setauthsize, |
||
7271 | + .encrypt = aead_encrypt, |
||
7272 | + .decrypt = aead_decrypt, |
||
7273 | + .ivsize = CTR_RFC3686_IV_SIZE, |
||
7274 | + .maxauthsize = SHA512_DIGEST_SIZE, |
||
7275 | + }, |
||
7276 | + .caam = { |
||
7277 | + .class1_alg_type = OP_ALG_ALGSEL_AES | |
||
7278 | + OP_ALG_AAI_CTR_MOD128, |
||
7279 | + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | |
||
7280 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
7281 | + .rfc3686 = true, |
||
7282 | + .geniv = true, |
||
7283 | + }, |
||
7284 | + }, |
||
7285 | + { |
||
7286 | + .aead = { |
||
7287 | + .base = { |
||
7288 | + .cra_name = "tls10(hmac(sha1),cbc(aes))", |
||
7289 | + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2", |
||
7290 | + .cra_blocksize = AES_BLOCK_SIZE, |
||
7291 | + }, |
||
7292 | + .setkey = tls_setkey, |
||
7293 | + .setauthsize = tls_setauthsize, |
||
7294 | + .encrypt = tls_encrypt, |
||
7295 | + .decrypt = tls_decrypt, |
||
7296 | + .ivsize = AES_BLOCK_SIZE, |
||
7297 | + .maxauthsize = SHA1_DIGEST_SIZE, |
||
7298 | + }, |
||
7299 | + .caam = { |
||
7300 | + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, |
||
7301 | + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | |
||
7302 | + OP_ALG_AAI_HMAC_PRECOMP, |
||
7303 | + }, |
||
7304 | + }, |
||
7305 | +}; |
||
7306 | + |
||
7307 | +static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) |
||
7308 | +{ |
||
7309 | + struct skcipher_alg *alg = &t_alg->skcipher; |
||
7310 | + |
||
7311 | + alg->base.cra_module = THIS_MODULE; |
||
7312 | + alg->base.cra_priority = CAAM_CRA_PRIORITY; |
||
7313 | + alg->base.cra_ctxsize = sizeof(struct caam_ctx); |
||
7314 | + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; |
||
7315 | + |
||
7316 | + alg->init = caam_cra_init_skcipher; |
||
7317 | + alg->exit = caam_cra_exit; |
||
7318 | +} |
||
7319 | + |
||
7320 | +static void caam_aead_alg_init(struct caam_aead_alg *t_alg) |
||
7321 | +{ |
||
7322 | + struct aead_alg *alg = &t_alg->aead; |
||
7323 | + |
||
7324 | + alg->base.cra_module = THIS_MODULE; |
||
7325 | + alg->base.cra_priority = CAAM_CRA_PRIORITY; |
||
7326 | + alg->base.cra_ctxsize = sizeof(struct caam_ctx); |
||
7327 | + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; |
||
7328 | + |
||
7329 | + alg->init = caam_cra_init_aead; |
||
7330 | + alg->exit = caam_cra_exit_aead; |
||
7331 | +} |
||
7332 | + |
||
7333 | +/* max hash key is max split key size */ |
||
7334 | +#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) |
||
7335 | + |
||
7336 | +#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE |
||
7337 | +#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE |
||
7338 | + |
||
7339 | +#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ |
||
7340 | + CAAM_MAX_HASH_KEY_SIZE) |
||
7341 | +#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) |
||
7342 | + |
||
7343 | +/* caam context sizes for hashes: running digest + 8 */ |
||
7344 | +#define HASH_MSG_LEN 8 |
||
7345 | +#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) |
||
7346 | + |
||
7347 | +enum hash_optype { |
||
7348 | + UPDATE = 0, |
||
7349 | + UPDATE_FIRST, |
||
7350 | + FINALIZE, |
||
7351 | + DIGEST, |
||
7352 | + HASH_NUM_OP |
||
7353 | +}; |
||
7354 | + |
||
7355 | +/** |
||
7356 | + * caam_hash_ctx - ahash per-session context |
||
7357 | + * @flc: Flow Contexts array |
||
7358 | + * @flc_dma: I/O virtual addresses of the Flow Contexts |
||
7359 | + * @key: virtual address of the authentication key |
||
7360 | + * @dev: dpseci device |
||
7361 | + * @ctx_len: size of Context Register |
||
7362 | + * @adata: hashing algorithm details |
||
7363 | + */ |
||
7364 | +struct caam_hash_ctx { |
||
7365 | + struct caam_flc flc[HASH_NUM_OP]; |
||
7366 | + dma_addr_t flc_dma[HASH_NUM_OP]; |
||
7367 | + u8 key[CAAM_MAX_HASH_KEY_SIZE]; |
||
7368 | + struct device *dev; |
||
7369 | + int ctx_len; |
||
7370 | + struct alginfo adata; |
||
7371 | +}; |
||
7372 | + |
||
7373 | +/* ahash state */ |
||
7374 | +struct caam_hash_state { |
||
7375 | + struct caam_request caam_req; |
||
7376 | + dma_addr_t buf_dma; |
||
7377 | + dma_addr_t ctx_dma; |
||
7378 | + u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; |
||
7379 | + int buflen_0; |
||
7380 | + u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; |
||
7381 | + int buflen_1; |
||
7382 | + u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; |
||
7383 | + int (*update)(struct ahash_request *req); |
||
7384 | + int (*final)(struct ahash_request *req); |
||
7385 | + int (*finup)(struct ahash_request *req); |
||
7386 | + int current_buf; |
||
7387 | +}; |
||
7388 | + |
||
7389 | +struct caam_export_state { |
||
7390 | + u8 buf[CAAM_MAX_HASH_BLOCK_SIZE]; |
||
7391 | + u8 caam_ctx[MAX_CTX_LEN]; |
||
7392 | + int buflen; |
||
7393 | + int (*update)(struct ahash_request *req); |
||
7394 | + int (*final)(struct ahash_request *req); |
||
7395 | + int (*finup)(struct ahash_request *req); |
||
7396 | +}; |
||
7397 | + |
||
7398 | +static inline void switch_buf(struct caam_hash_state *state) |
||
7399 | +{ |
||
7400 | + state->current_buf ^= 1; |
||
7401 | +} |
||
7402 | + |
||
7403 | +static inline u8 *current_buf(struct caam_hash_state *state) |
||
7404 | +{ |
||
7405 | + return state->current_buf ? state->buf_1 : state->buf_0; |
||
7406 | +} |
||
7407 | + |
||
7408 | +static inline u8 *alt_buf(struct caam_hash_state *state) |
||
7409 | +{ |
||
7410 | + return state->current_buf ? state->buf_0 : state->buf_1; |
||
7411 | +} |
||
7412 | + |
||
7413 | +static inline int *current_buflen(struct caam_hash_state *state) |
||
7414 | +{ |
||
7415 | + return state->current_buf ? &state->buflen_1 : &state->buflen_0; |
||
7416 | +} |
||
7417 | + |
||
7418 | +static inline int *alt_buflen(struct caam_hash_state *state) |
||
7419 | +{ |
||
7420 | + return state->current_buf ? &state->buflen_0 : &state->buflen_1; |
||
7421 | +} |
||
7422 | + |
||
7423 | +/* Map current buffer in state (if length > 0) and put it in link table */ |
||
7424 | +static inline int buf_map_to_qm_sg(struct device *dev, |
||
7425 | + struct dpaa2_sg_entry *qm_sg, |
||
7426 | + struct caam_hash_state *state) |
||
7427 | +{ |
||
7428 | + int buflen = *current_buflen(state); |
||
7429 | + |
||
7430 | + if (!buflen) |
||
7431 | + return 0; |
||
7432 | + |
||
7433 | + state->buf_dma = dma_map_single(dev, current_buf(state), buflen, |
||
7434 | + DMA_TO_DEVICE); |
||
7435 | + if (dma_mapping_error(dev, state->buf_dma)) { |
||
7436 | + dev_err(dev, "unable to map buf\n"); |
||
7437 | + state->buf_dma = 0; |
||
7438 | + return -ENOMEM; |
||
7439 | + } |
||
7440 | + |
||
7441 | + dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0); |
||
7442 | + |
||
7443 | + return 0; |
||
7444 | +} |
||
7445 | + |
||
7446 | +/* Map state->caam_ctx, and add it to link table */ |
||
7447 | +static inline int ctx_map_to_qm_sg(struct device *dev, |
||
7448 | + struct caam_hash_state *state, int ctx_len, |
||
7449 | + struct dpaa2_sg_entry *qm_sg, u32 flag) |
||
7450 | +{ |
||
7451 | + state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag); |
||
7452 | + if (dma_mapping_error(dev, state->ctx_dma)) { |
||
7453 | + dev_err(dev, "unable to map ctx\n"); |
||
7454 | + state->ctx_dma = 0; |
||
7455 | + return -ENOMEM; |
||
7456 | + } |
||
7457 | + |
||
7458 | + dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0); |
||
7459 | + |
||
7460 | + return 0; |
||
7461 | +} |
||
7462 | + |
||
7463 | +static int ahash_set_sh_desc(struct crypto_ahash *ahash) |
||
7464 | +{ |
||
7465 | + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
||
7466 | + int digestsize = crypto_ahash_digestsize(ahash); |
||
7467 | + struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev); |
||
7468 | + struct caam_flc *flc; |
||
7469 | + u32 *desc; |
||
7470 | + |
||
7471 | + ctx->adata.key_virt = ctx->key; |
||
7472 | + ctx->adata.key_inline = true; |
||
7473 | + |
||
7474 | + /* ahash_update shared descriptor */ |
||
7475 | + flc = &ctx->flc[UPDATE]; |
||
7476 | + desc = flc->sh_desc; |
||
7477 | + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, |
||
7478 | + ctx->ctx_len, true, priv->sec_attr.era); |
||
7479 | + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ |
||
7480 | + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE], |
||
7481 | + desc_bytes(desc), DMA_BIDIRECTIONAL); |
||
7482 | +#ifdef DEBUG |
||
7483 | + print_hex_dump(KERN_ERR, |
||
7484 | + "ahash update shdesc@" __stringify(__LINE__)": ", |
||
7485 | + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
||
7486 | +#endif |
||
7487 | + |
||
7488 | + /* ahash_update_first shared descriptor */ |
||
7489 | + flc = &ctx->flc[UPDATE_FIRST]; |
||
7490 | + desc = flc->sh_desc; |
||
7491 | + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, |
||
7492 | + ctx->ctx_len, false, priv->sec_attr.era); |
||
7493 | + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ |
||
7494 | + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST], |
||
7495 | + desc_bytes(desc), DMA_BIDIRECTIONAL); |
||
7496 | +#ifdef DEBUG |
||
7497 | + print_hex_dump(KERN_ERR, |
||
7498 | + "ahash update first shdesc@" __stringify(__LINE__)": ", |
||
7499 | + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
||
7500 | +#endif |
||
7501 | + |
||
7502 | + /* ahash_final shared descriptor */ |
||
7503 | + flc = &ctx->flc[FINALIZE]; |
||
7504 | + desc = flc->sh_desc; |
||
7505 | + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, |
||
7506 | + ctx->ctx_len, true, priv->sec_attr.era); |
||
7507 | + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ |
||
7508 | + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE], |
||
7509 | + desc_bytes(desc), DMA_BIDIRECTIONAL); |
||
7510 | +#ifdef DEBUG |
||
7511 | + print_hex_dump(KERN_ERR, |
||
7512 | + "ahash final shdesc@" __stringify(__LINE__)": ", |
||
7513 | + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
||
7514 | +#endif |
||
7515 | + |
||
7516 | + /* ahash_digest shared descriptor */ |
||
7517 | + flc = &ctx->flc[DIGEST]; |
||
7518 | + desc = flc->sh_desc; |
||
7519 | + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, |
||
7520 | + ctx->ctx_len, false, priv->sec_attr.era); |
||
7521 | + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ |
||
7522 | + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST], |
||
7523 | + desc_bytes(desc), DMA_BIDIRECTIONAL); |
||
7524 | +#ifdef DEBUG |
||
7525 | + print_hex_dump(KERN_ERR, |
||
7526 | + "ahash digest shdesc@" __stringify(__LINE__)": ", |
||
7527 | + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
||
7528 | +#endif |
||
7529 | + |
||
7530 | + return 0; |
||
7531 | +} |
||
7532 | + |
||
7533 | +/* Digest hash size if it is too large */ |
||
7534 | +static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, |
||
7535 | + u32 *keylen, u8 *key_out, u32 digestsize) |
||
7536 | +{ |
||
7537 | + struct caam_request *req_ctx; |
||
7538 | + u32 *desc; |
||
7539 | + struct split_key_sh_result result; |
||
7540 | + dma_addr_t src_dma, dst_dma; |
||
7541 | + struct caam_flc *flc; |
||
7542 | + dma_addr_t flc_dma; |
||
7543 | + int ret = -ENOMEM; |
||
7544 | + struct dpaa2_fl_entry *in_fle, *out_fle; |
||
7545 | + |
||
7546 | + req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA); |
||
7547 | + if (!req_ctx) |
||
7548 | + return -ENOMEM; |
||
7549 | + |
||
7550 | + in_fle = &req_ctx->fd_flt[1]; |
||
7551 | + out_fle = &req_ctx->fd_flt[0]; |
||
7552 | + |
||
7553 | + flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA); |
||
7554 | + if (!flc) |
||
7555 | + goto err_flc; |
||
7556 | + |
||
7557 | + src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen, |
||
7558 | + DMA_TO_DEVICE); |
||
7559 | + if (dma_mapping_error(ctx->dev, src_dma)) { |
||
7560 | + dev_err(ctx->dev, "unable to map key input memory\n"); |
||
7561 | + goto err_src_dma; |
||
7562 | + } |
||
7563 | + dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize, |
||
7564 | + DMA_FROM_DEVICE); |
||
7565 | + if (dma_mapping_error(ctx->dev, dst_dma)) { |
||
7566 | + dev_err(ctx->dev, "unable to map key output memory\n"); |
||
7567 | + goto err_dst_dma; |
||
7568 | + } |
||
7569 | + |
||
7570 | + desc = flc->sh_desc; |
||
7571 | + |
||
7572 | + init_sh_desc(desc, 0); |
||
7573 | + |
||
7574 | + /* descriptor to perform unkeyed hash on key_in */ |
||
7575 | + append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | |
||
7576 | + OP_ALG_AS_INITFINAL); |
||
7577 | + append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | |
||
7578 | + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); |
||
7579 | + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | |
||
7580 | + LDST_SRCDST_BYTE_CONTEXT); |
||
7581 | + |
||
7582 | + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ |
||
7583 | + flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) + |
||
7584 | + desc_bytes(desc), DMA_TO_DEVICE); |
||
7585 | + if (dma_mapping_error(ctx->dev, flc_dma)) { |
||
7586 | + dev_err(ctx->dev, "unable to map shared descriptor\n"); |
||
7587 | + goto err_flc_dma; |
||
7588 | + } |
||
7589 | + |
||
7590 | + dpaa2_fl_set_final(in_fle, true); |
||
7591 | + dpaa2_fl_set_format(in_fle, dpaa2_fl_single); |
||
7592 | + dpaa2_fl_set_addr(in_fle, src_dma); |
||
7593 | + dpaa2_fl_set_len(in_fle, *keylen); |
||
7594 | + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); |
||
7595 | + dpaa2_fl_set_addr(out_fle, dst_dma); |
||
7596 | + dpaa2_fl_set_len(out_fle, digestsize); |
||
7597 | + |
||
7598 | +#ifdef DEBUG |
||
7599 | + print_hex_dump(KERN_ERR, "key_in@" __stringify(__LINE__)": ", |
||
7600 | + DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); |
||
7601 | + print_hex_dump(KERN_ERR, "shdesc@" __stringify(__LINE__)": ", |
||
7602 | + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
||
7603 | +#endif |
||
7604 | + |
||
7605 | + result.err = 0; |
||
7606 | + init_completion(&result.completion); |
||
7607 | + result.dev = ctx->dev; |
||
7608 | + |
||
7609 | + req_ctx->flc = flc; |
||
7610 | + req_ctx->flc_dma = flc_dma; |
||
7611 | + req_ctx->cbk = split_key_sh_done; |
||
7612 | + req_ctx->ctx = &result; |
||
7613 | + |
||
7614 | + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); |
||
7615 | + if (ret == -EINPROGRESS) { |
||
7616 | + /* in progress */ |
||
7617 | + wait_for_completion(&result.completion); |
||
7618 | + ret = result.err; |
||
7619 | +#ifdef DEBUG |
||
7620 | + print_hex_dump(KERN_ERR, |
||
7621 | + "digested key@" __stringify(__LINE__)": ", |
||
7622 | + DUMP_PREFIX_ADDRESS, 16, 4, key_in, digestsize, |
||
7623 | + 1); |
||
7624 | +#endif |
||
7625 | + } |
||
7626 | + |
||
7627 | + dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc), |
||
7628 | + DMA_TO_DEVICE); |
||
7629 | +err_flc_dma: |
||
7630 | + dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE); |
||
7631 | +err_dst_dma: |
||
7632 | + dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE); |
||
7633 | +err_src_dma: |
||
7634 | + kfree(flc); |
||
7635 | +err_flc: |
||
7636 | + kfree(req_ctx); |
||
7637 | + |
||
7638 | + *keylen = digestsize; |
||
7639 | + |
||
7640 | + return ret; |
||
7641 | +} |
||
7642 | + |
||
7643 | +static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, |
||
7644 | + unsigned int keylen) |
||
7645 | +{ |
||
7646 | + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
||
7647 | + unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base); |
||
7648 | + unsigned int digestsize = crypto_ahash_digestsize(ahash); |
||
7649 | + int ret; |
||
7650 | + u8 *hashed_key = NULL; |
||
7651 | + |
||
7652 | +#ifdef DEBUG |
||
7653 | + dev_err(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize); |
||
7654 | +#endif |
||
7655 | + |
||
7656 | + if (keylen > blocksize) { |
||
7657 | + hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key), |
||
7658 | + GFP_KERNEL | GFP_DMA); |
||
7659 | + if (!hashed_key) |
||
7660 | + return -ENOMEM; |
||
7661 | + ret = hash_digest_key(ctx, key, &keylen, hashed_key, |
||
7662 | + digestsize); |
||
7663 | + if (ret) |
||
7664 | + goto bad_free_key; |
||
7665 | + key = hashed_key; |
||
7666 | + } |
||
7667 | + |
||
7668 | + ctx->adata.keylen = keylen; |
||
7669 | + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & |
||
7670 | + OP_ALG_ALGSEL_MASK); |
||
7671 | + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) |
||
7672 | + goto bad_free_key; |
||
7673 | + |
||
7674 | + memcpy(ctx->key, key, keylen); |
||
7675 | + |
||
7676 | + kfree(hashed_key); |
||
7677 | + return ahash_set_sh_desc(ahash); |
||
7678 | +bad_free_key: |
||
7679 | + kfree(hashed_key); |
||
7680 | + crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); |
||
7681 | + return -EINVAL; |
||
7682 | +} |
||
7683 | + |
||
7684 | +static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc, |
||
7685 | + struct ahash_request *req, int dst_len) |
||
7686 | +{ |
||
7687 | + struct caam_hash_state *state = ahash_request_ctx(req); |
||
7688 | + |
||
7689 | + if (edesc->src_nents) |
||
7690 | + dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); |
||
7691 | + if (edesc->dst_dma) |
||
7692 | + dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); |
||
7693 | + |
||
7694 | + if (edesc->qm_sg_bytes) |
||
7695 | + dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes, |
||
7696 | + DMA_TO_DEVICE); |
||
7697 | + |
||
7698 | + if (state->buf_dma) { |
||
7699 | + dma_unmap_single(dev, state->buf_dma, *current_buflen(state), |
||
7700 | + DMA_TO_DEVICE); |
||
7701 | + state->buf_dma = 0; |
||
7702 | + } |
||
7703 | +} |
||
7704 | + |
||
7705 | +static inline void ahash_unmap_ctx(struct device *dev, |
||
7706 | + struct ahash_edesc *edesc, |
||
7707 | + struct ahash_request *req, int dst_len, |
||
7708 | + u32 flag) |
||
7709 | +{ |
||
7710 | + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
||
7711 | + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
||
7712 | + struct caam_hash_state *state = ahash_request_ctx(req); |
||
7713 | + |
||
7714 | + if (state->ctx_dma) { |
||
7715 | + dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag); |
||
7716 | + state->ctx_dma = 0; |
||
7717 | + } |
||
7718 | + ahash_unmap(dev, edesc, req, dst_len); |
||
7719 | +} |
||
7720 | + |
||
7721 | +static void ahash_done(void *cbk_ctx, u32 status) |
||
7722 | +{ |
||
7723 | + struct crypto_async_request *areq = cbk_ctx; |
||
7724 | + struct ahash_request *req = ahash_request_cast(areq); |
||
7725 | + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
||
7726 | + struct caam_hash_state *state = ahash_request_ctx(req); |
||
7727 | + struct ahash_edesc *edesc = state->caam_req.edesc; |
||
7728 | + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
||
7729 | + int digestsize = crypto_ahash_digestsize(ahash); |
||
7730 | + int ecode = 0; |
||
7731 | + |
||
7732 | +#ifdef DEBUG |
||
7733 | + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); |
||
7734 | +#endif |
||
7735 | + |
||
7736 | + if (unlikely(status)) { |
||
7737 | + caam_qi2_strstatus(ctx->dev, status); |
||
7738 | + ecode = -EIO; |
||
7739 | + } |
||
7740 | + |
||
7741 | + ahash_unmap(ctx->dev, edesc, req, digestsize); |
||
7742 | + qi_cache_free(edesc); |
||
7743 | + |
||
7744 | +#ifdef DEBUG |
||
7745 | + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ", |
||
7746 | + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
||
7747 | + ctx->ctx_len, 1); |
||
7748 | + if (req->result) |
||
7749 | + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ", |
||
7750 | + DUMP_PREFIX_ADDRESS, 16, 4, req->result, |
||
7751 | + digestsize, 1); |
||
7752 | +#endif |
||
7753 | + |
||
7754 | + req->base.complete(&req->base, ecode); |
||
7755 | +} |
||
7756 | + |
||
7757 | +static void ahash_done_bi(void *cbk_ctx, u32 status) |
||
7758 | +{ |
||
7759 | + struct crypto_async_request *areq = cbk_ctx; |
||
7760 | + struct ahash_request *req = ahash_request_cast(areq); |
||
7761 | + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
||
7762 | + struct caam_hash_state *state = ahash_request_ctx(req); |
||
7763 | + struct ahash_edesc *edesc = state->caam_req.edesc; |
||
7764 | + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
||
7765 | + int ecode = 0; |
||
7766 | +#ifdef DEBUG |
||
7767 | + int digestsize = crypto_ahash_digestsize(ahash); |
||
7768 | + |
||
7769 | + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); |
||
7770 | +#endif |
||
7771 | + |
||
7772 | + if (unlikely(status)) { |
||
7773 | + caam_qi2_strstatus(ctx->dev, status); |
||
7774 | + ecode = -EIO; |
||
7775 | + } |
||
7776 | + |
||
7777 | + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); |
||
7778 | + switch_buf(state); |
||
7779 | + qi_cache_free(edesc); |
||
7780 | + |
||
7781 | +#ifdef DEBUG |
||
7782 | + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ", |
||
7783 | + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
||
7784 | + ctx->ctx_len, 1); |
||
7785 | + if (req->result) |
||
7786 | + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ", |
||
7787 | + DUMP_PREFIX_ADDRESS, 16, 4, req->result, |
||
7788 | + digestsize, 1); |
||
7789 | +#endif |
||
7790 | + |
||
7791 | + req->base.complete(&req->base, ecode); |
||
7792 | +} |
||
7793 | + |
||
7794 | +static void ahash_done_ctx_src(void *cbk_ctx, u32 status) |
||
7795 | +{ |
||
7796 | + struct crypto_async_request *areq = cbk_ctx; |
||
7797 | + struct ahash_request *req = ahash_request_cast(areq); |
||
7798 | + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
||
7799 | + struct caam_hash_state *state = ahash_request_ctx(req); |
||
7800 | + struct ahash_edesc *edesc = state->caam_req.edesc; |
||
7801 | + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
||
7802 | + int digestsize = crypto_ahash_digestsize(ahash); |
||
7803 | + int ecode = 0; |
||
7804 | + |
||
7805 | +#ifdef DEBUG |
||
7806 | + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); |
||
7807 | +#endif |
||
7808 | + |
||
7809 | + if (unlikely(status)) { |
||
7810 | + caam_qi2_strstatus(ctx->dev, status); |
||
7811 | + ecode = -EIO; |
||
7812 | + } |
||
7813 | + |
||
7814 | + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE); |
||
7815 | + qi_cache_free(edesc); |
||
7816 | + |
||
7817 | +#ifdef DEBUG |
||
7818 | + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ", |
||
7819 | + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
||
7820 | + ctx->ctx_len, 1); |
||
7821 | + if (req->result) |
||
7822 | + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ", |
||
7823 | + DUMP_PREFIX_ADDRESS, 16, 4, req->result, |
||
7824 | + digestsize, 1); |
||
7825 | +#endif |
||
7826 | + |
||
7827 | + req->base.complete(&req->base, ecode); |
||
7828 | +} |
||
7829 | + |
||
7830 | +static void ahash_done_ctx_dst(void *cbk_ctx, u32 status) |
||
7831 | +{ |
||
7832 | + struct crypto_async_request *areq = cbk_ctx; |
||
7833 | + struct ahash_request *req = ahash_request_cast(areq); |
||
7834 | + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
||
7835 | + struct caam_hash_state *state = ahash_request_ctx(req); |
||
7836 | + struct ahash_edesc *edesc = state->caam_req.edesc; |
||
7837 | + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
||
7838 | + int ecode = 0; |
||
7839 | +#ifdef DEBUG |
||
7840 | + int digestsize = crypto_ahash_digestsize(ahash); |
||
7841 | + |
||
7842 | + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); |
||
7843 | +#endif |
||
7844 | + |
||
7845 | + if (unlikely(status)) { |
||
7846 | + caam_qi2_strstatus(ctx->dev, status); |
||
7847 | + ecode = -EIO; |
||
7848 | + } |
||
7849 | + |
||
7850 | + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); |
||
7851 | + switch_buf(state); |
||
7852 | + qi_cache_free(edesc); |
||
7853 | + |
||
7854 | +#ifdef DEBUG |
||
7855 | + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ", |
||
7856 | + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
||
7857 | + ctx->ctx_len, 1); |
||
7858 | + if (req->result) |
||
7859 | + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ", |
||
7860 | + DUMP_PREFIX_ADDRESS, 16, 4, req->result, |
||
7861 | + digestsize, 1); |
||
7862 | +#endif |
||
7863 | + |
||
7864 | + req->base.complete(&req->base, ecode); |
||
7865 | +} |
||
7866 | + |
||
7867 | +static int ahash_update_ctx(struct ahash_request *req) |
||
7868 | +{ |
||
7869 | + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
||
7870 | + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
||
7871 | + struct caam_hash_state *state = ahash_request_ctx(req); |
||
7872 | + struct caam_request *req_ctx = &state->caam_req; |
||
7873 | + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; |
||
7874 | + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; |
||
7875 | + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
||
7876 | + GFP_KERNEL : GFP_ATOMIC; |
||
7877 | + u8 *buf = current_buf(state); |
||
7878 | + int *buflen = current_buflen(state); |
||
7879 | + u8 *next_buf = alt_buf(state); |
||
7880 | + int *next_buflen = alt_buflen(state), last_buflen; |
||
7881 | + int in_len = *buflen + req->nbytes, to_hash; |
||
7882 | + int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index; |
||
7883 | + struct ahash_edesc *edesc; |
||
7884 | + int ret = 0; |
||
7885 | + |
||
7886 | + last_buflen = *next_buflen; |
||
7887 | + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); |
||
7888 | + to_hash = in_len - *next_buflen; |
||
7889 | + |
||
7890 | + if (to_hash) { |
||
7891 | + struct dpaa2_sg_entry *sg_table; |
||
7892 | + |
||
7893 | + src_nents = sg_nents_for_len(req->src, |
||
7894 | + req->nbytes - (*next_buflen)); |
||
7895 | + if (src_nents < 0) { |
||
7896 | + dev_err(ctx->dev, "Invalid number of src SG.\n"); |
||
7897 | + return src_nents; |
||
7898 | + } |
||
7899 | + |
||
7900 | + if (src_nents) { |
||
7901 | + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, |
||
7902 | + DMA_TO_DEVICE); |
||
7903 | + if (!mapped_nents) { |
||
7904 | + dev_err(ctx->dev, "unable to DMA map source\n"); |
||
7905 | + return -ENOMEM; |
||
7906 | + } |
||
7907 | + } else { |
||
7908 | + mapped_nents = 0; |
||
7909 | + } |
||
7910 | + |
||
7911 | + /* allocate space for base edesc and link tables */ |
||
7912 | + edesc = qi_cache_zalloc(GFP_DMA | flags); |
||
7913 | + if (!edesc) { |
||
7914 | + dma_unmap_sg(ctx->dev, req->src, src_nents, |
||
7915 | + DMA_TO_DEVICE); |
||
7916 | + return -ENOMEM; |
||
7917 | + } |
||
7918 | + |
||
7919 | + edesc->src_nents = src_nents; |
||
7920 | + qm_sg_src_index = 1 + (*buflen ? 1 : 0); |
||
7921 | + qm_sg_bytes = (qm_sg_src_index + mapped_nents) * |
||
7922 | + sizeof(*sg_table); |
||
7923 | + sg_table = &edesc->sgt[0]; |
||
7924 | + |
||
7925 | + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, |
||
7926 | + DMA_BIDIRECTIONAL); |
||
7927 | + if (ret) |
||
7928 | + goto unmap_ctx; |
||
7929 | + |
||
7930 | + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state); |
||
7931 | + if (ret) |
||
7932 | + goto unmap_ctx; |
||
7933 | + |
||
7934 | + if (mapped_nents) { |
||
7935 | + sg_to_qm_sg_last(req->src, mapped_nents, |
||
7936 | + sg_table + qm_sg_src_index, 0); |
||
7937 | + if (*next_buflen) |
||
7938 | + scatterwalk_map_and_copy(next_buf, req->src, |
||
7939 | + to_hash - *buflen, |
||
7940 | + *next_buflen, 0); |
||
7941 | + } else { |
||
7942 | + dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, |
||
7943 | + true); |
||
7944 | + } |
||
7945 | + |
||
7946 | + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, |
||
7947 | + qm_sg_bytes, DMA_TO_DEVICE); |
||
7948 | + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { |
||
7949 | + dev_err(ctx->dev, "unable to map S/G table\n"); |
||
7950 | + ret = -ENOMEM; |
||
7951 | + goto unmap_ctx; |
||
7952 | + } |
||
7953 | + edesc->qm_sg_bytes = qm_sg_bytes; |
||
7954 | + |
||
7955 | + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
||
7956 | + dpaa2_fl_set_final(in_fle, true); |
||
7957 | + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); |
||
7958 | + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); |
||
7959 | + dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash); |
||
7960 | + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); |
||
7961 | + dpaa2_fl_set_addr(out_fle, state->ctx_dma); |
||
7962 | + dpaa2_fl_set_len(out_fle, ctx->ctx_len); |
||
7963 | + |
||
7964 | + req_ctx->flc = &ctx->flc[UPDATE]; |
||
7965 | + req_ctx->flc_dma = ctx->flc_dma[UPDATE]; |
||
7966 | + req_ctx->cbk = ahash_done_bi; |
||
7967 | + req_ctx->ctx = &req->base; |
||
7968 | + req_ctx->edesc = edesc; |
||
7969 | + |
||
7970 | + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); |
||
7971 | + if (ret != -EINPROGRESS && |
||
7972 | + !(ret == -EBUSY && |
||
7973 | + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
||
7974 | + goto unmap_ctx; |
||
7975 | + } else if (*next_buflen) { |
||
7976 | + scatterwalk_map_and_copy(buf + *buflen, req->src, 0, |
||
7977 | + req->nbytes, 0); |
||
7978 | + *buflen = *next_buflen; |
||
7979 | + *next_buflen = last_buflen; |
||
7980 | + } |
||
7981 | +#ifdef DEBUG |
||
7982 | + print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ", |
||
7983 | + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); |
||
7984 | + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ", |
||
7985 | + DUMP_PREFIX_ADDRESS, 16, 4, next_buf, |
||
7986 | + *next_buflen, 1); |
||
7987 | +#endif |
||
7988 | + |
||
7989 | + return ret; |
||
7990 | +unmap_ctx: |
||
7991 | + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); |
||
7992 | + qi_cache_free(edesc); |
||
7993 | + return ret; |
||
7994 | +} |
||
7995 | + |
||
7996 | +static int ahash_final_ctx(struct ahash_request *req) |
||
7997 | +{ |
||
7998 | + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
||
7999 | + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
||
8000 | + struct caam_hash_state *state = ahash_request_ctx(req); |
||
8001 | + struct caam_request *req_ctx = &state->caam_req; |
||
8002 | + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; |
||
8003 | + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; |
||
8004 | + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
||
8005 | + GFP_KERNEL : GFP_ATOMIC; |
||
8006 | + int buflen = *current_buflen(state); |
||
8007 | + int qm_sg_bytes, qm_sg_src_index; |
||
8008 | + int digestsize = crypto_ahash_digestsize(ahash); |
||
8009 | + struct ahash_edesc *edesc; |
||
8010 | + struct dpaa2_sg_entry *sg_table; |
||
8011 | + int ret; |
||
8012 | + |
||
8013 | + /* allocate space for base edesc and link tables */ |
||
8014 | + edesc = qi_cache_zalloc(GFP_DMA | flags); |
||
8015 | + if (!edesc) |
||
8016 | + return -ENOMEM; |
||
8017 | + |
||
8018 | + qm_sg_src_index = 1 + (buflen ? 1 : 0); |
||
8019 | + qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table); |
||
8020 | + sg_table = &edesc->sgt[0]; |
||
8021 | + |
||
8022 | + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, |
||
8023 | + DMA_TO_DEVICE); |
||
8024 | + if (ret) |
||
8025 | + goto unmap_ctx; |
||
8026 | + |
||
8027 | + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state); |
||
8028 | + if (ret) |
||
8029 | + goto unmap_ctx; |
||
8030 | + |
||
8031 | + dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true); |
||
8032 | + |
||
8033 | + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, |
||
8034 | + DMA_TO_DEVICE); |
||
8035 | + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { |
||
8036 | + dev_err(ctx->dev, "unable to map S/G table\n"); |
||
8037 | + ret = -ENOMEM; |
||
8038 | + goto unmap_ctx; |
||
8039 | + } |
||
8040 | + edesc->qm_sg_bytes = qm_sg_bytes; |
||
8041 | + |
||
8042 | + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize, |
||
8043 | + DMA_FROM_DEVICE); |
||
8044 | + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) { |
||
8045 | + dev_err(ctx->dev, "unable to map dst\n"); |
||
8046 | + edesc->dst_dma = 0; |
||
8047 | + ret = -ENOMEM; |
||
8048 | + goto unmap_ctx; |
||
8049 | + } |
||
8050 | + |
||
8051 | + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
||
8052 | + dpaa2_fl_set_final(in_fle, true); |
||
8053 | + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); |
||
8054 | + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); |
||
8055 | + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen); |
||
8056 | + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); |
||
8057 | + dpaa2_fl_set_addr(out_fle, edesc->dst_dma); |
||
8058 | + dpaa2_fl_set_len(out_fle, digestsize); |
||
8059 | + |
||
8060 | + req_ctx->flc = &ctx->flc[FINALIZE]; |
||
8061 | + req_ctx->flc_dma = ctx->flc_dma[FINALIZE]; |
||
8062 | + req_ctx->cbk = ahash_done_ctx_src; |
||
8063 | + req_ctx->ctx = &req->base; |
||
8064 | + req_ctx->edesc = edesc; |
||
8065 | + |
||
8066 | + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); |
||
8067 | + if (ret == -EINPROGRESS || |
||
8068 | + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
||
8069 | + return ret; |
||
8070 | + |
||
8071 | +unmap_ctx: |
||
8072 | + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE); |
||
8073 | + qi_cache_free(edesc); |
||
8074 | + return ret; |
||
8075 | +} |
||
8076 | + |
||
8077 | +static int ahash_finup_ctx(struct ahash_request *req) |
||
8078 | +{ |
||
8079 | + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
||
8080 | + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
||
8081 | + struct caam_hash_state *state = ahash_request_ctx(req); |
||
8082 | + struct caam_request *req_ctx = &state->caam_req; |
||
8083 | + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; |
||
8084 | + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; |
||
8085 | + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
||
8086 | + GFP_KERNEL : GFP_ATOMIC; |
||
8087 | + int buflen = *current_buflen(state); |
||
8088 | + int qm_sg_bytes, qm_sg_src_index; |
||
8089 | + int src_nents, mapped_nents; |
||
8090 | + int digestsize = crypto_ahash_digestsize(ahash); |
||
8091 | + struct ahash_edesc *edesc; |
||
8092 | + struct dpaa2_sg_entry *sg_table; |
||
8093 | + int ret; |
||
8094 | + |
||
8095 | + src_nents = sg_nents_for_len(req->src, req->nbytes); |
||
8096 | + if (src_nents < 0) { |
||
8097 | + dev_err(ctx->dev, "Invalid number of src SG.\n"); |
||
8098 | + return src_nents; |
||
8099 | + } |
||
8100 | + |
||
8101 | + if (src_nents) { |
||
8102 | + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, |
||
8103 | + DMA_TO_DEVICE); |
||
8104 | + if (!mapped_nents) { |
||
8105 | + dev_err(ctx->dev, "unable to DMA map source\n"); |
||
8106 | + return -ENOMEM; |
||
8107 | + } |
||
8108 | + } else { |
||
8109 | + mapped_nents = 0; |
||
8110 | + } |
||
8111 | + |
||
8112 | + /* allocate space for base edesc and link tables */ |
||
8113 | + edesc = qi_cache_zalloc(GFP_DMA | flags); |
||
8114 | + if (!edesc) { |
||
8115 | + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); |
||
8116 | + return -ENOMEM; |
||
8117 | + } |
||
8118 | + |
||
8119 | + edesc->src_nents = src_nents; |
||
8120 | + qm_sg_src_index = 1 + (buflen ? 1 : 0); |
||
8121 | + qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table); |
||
8122 | + sg_table = &edesc->sgt[0]; |
||
8123 | + |
||
8124 | + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, |
||
8125 | + DMA_TO_DEVICE); |
||
8126 | + if (ret) |
||
8127 | + goto unmap_ctx; |
||
8128 | + |
||
8129 | + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state); |
||
8130 | + if (ret) |
||
8131 | + goto unmap_ctx; |
||
8132 | + |
||
8133 | + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0); |
||
8134 | + |
||
8135 | + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, |
||
8136 | + DMA_TO_DEVICE); |
||
8137 | + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { |
||
8138 | + dev_err(ctx->dev, "unable to map S/G table\n"); |
||
8139 | + ret = -ENOMEM; |
||
8140 | + goto unmap_ctx; |
||
8141 | + } |
||
8142 | + edesc->qm_sg_bytes = qm_sg_bytes; |
||
8143 | + |
||
8144 | + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize, |
||
8145 | + DMA_FROM_DEVICE); |
||
8146 | + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) { |
||
8147 | + dev_err(ctx->dev, "unable to map dst\n"); |
||
8148 | + edesc->dst_dma = 0; |
||
8149 | + ret = -ENOMEM; |
||
8150 | + goto unmap_ctx; |
||
8151 | + } |
||
8152 | + |
||
8153 | + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
||
8154 | + dpaa2_fl_set_final(in_fle, true); |
||
8155 | + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); |
||
8156 | + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); |
||
8157 | + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes); |
||
8158 | + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); |
||
8159 | + dpaa2_fl_set_addr(out_fle, edesc->dst_dma); |
||
8160 | + dpaa2_fl_set_len(out_fle, digestsize); |
||
8161 | + |
||
8162 | + req_ctx->flc = &ctx->flc[FINALIZE]; |
||
8163 | + req_ctx->flc_dma = ctx->flc_dma[FINALIZE]; |
||
8164 | + req_ctx->cbk = ahash_done_ctx_src; |
||
8165 | + req_ctx->ctx = &req->base; |
||
8166 | + req_ctx->edesc = edesc; |
||
8167 | + |
||
8168 | + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); |
||
8169 | + if (ret == -EINPROGRESS || |
||
8170 | + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
||
8171 | + return ret; |
||
8172 | + |
||
8173 | +unmap_ctx: |
||
8174 | + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE); |
||
8175 | + qi_cache_free(edesc); |
||
8176 | + return ret; |
||
8177 | +} |
||
8178 | + |
||
8179 | +static int ahash_digest(struct ahash_request *req) |
||
8180 | +{ |
||
8181 | + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
||
8182 | + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
||
8183 | + struct caam_hash_state *state = ahash_request_ctx(req); |
||
8184 | + struct caam_request *req_ctx = &state->caam_req; |
||
8185 | + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; |
||
8186 | + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; |
||
8187 | + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
||
8188 | + GFP_KERNEL : GFP_ATOMIC; |
||
8189 | + int digestsize = crypto_ahash_digestsize(ahash); |
||
8190 | + int src_nents, mapped_nents; |
||
8191 | + struct ahash_edesc *edesc; |
||
8192 | + int ret = -ENOMEM; |
||
8193 | + |
||
8194 | + state->buf_dma = 0; |
||
8195 | + |
||
8196 | + src_nents = sg_nents_for_len(req->src, req->nbytes); |
||
8197 | + if (src_nents < 0) { |
||
8198 | + dev_err(ctx->dev, "Invalid number of src SG.\n"); |
||
8199 | + return src_nents; |
||
8200 | + } |
||
8201 | + |
||
8202 | + if (src_nents) { |
||
8203 | + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, |
||
8204 | + DMA_TO_DEVICE); |
||
8205 | + if (!mapped_nents) { |
||
8206 | + dev_err(ctx->dev, "unable to map source for DMA\n"); |
||
8207 | + return ret; |
||
8208 | + } |
||
8209 | + } else { |
||
8210 | + mapped_nents = 0; |
||
8211 | + } |
||
8212 | + |
||
8213 | + /* allocate space for base edesc and link tables */ |
||
8214 | + edesc = qi_cache_zalloc(GFP_DMA | flags); |
||
8215 | + if (!edesc) { |
||
8216 | + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); |
||
8217 | + return ret; |
||
8218 | + } |
||
8219 | + |
||
8220 | + edesc->src_nents = src_nents; |
||
8221 | + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
||
8222 | + |
||
8223 | + if (mapped_nents > 1) { |
||
8224 | + int qm_sg_bytes; |
||
8225 | + struct dpaa2_sg_entry *sg_table = &edesc->sgt[0]; |
||
8226 | + |
||
8227 | + qm_sg_bytes = mapped_nents * sizeof(*sg_table); |
||
8228 | + sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0); |
||
8229 | + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, |
||
8230 | + qm_sg_bytes, DMA_TO_DEVICE); |
||
8231 | + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { |
||
8232 | + dev_err(ctx->dev, "unable to map S/G table\n"); |
||
8233 | + goto unmap; |
||
8234 | + } |
||
8235 | + edesc->qm_sg_bytes = qm_sg_bytes; |
||
8236 | + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); |
||
8237 | + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); |
||
8238 | + } else { |
||
8239 | + dpaa2_fl_set_format(in_fle, dpaa2_fl_single); |
||
8240 | + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src)); |
||
8241 | + } |
||
8242 | + |
||
8243 | + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize, |
||
8244 | + DMA_FROM_DEVICE); |
||
8245 | + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) { |
||
8246 | + dev_err(ctx->dev, "unable to map dst\n"); |
||
8247 | + edesc->dst_dma = 0; |
||
8248 | + goto unmap; |
||
8249 | + } |
||
8250 | + |
||
8251 | + dpaa2_fl_set_final(in_fle, true); |
||
8252 | + dpaa2_fl_set_len(in_fle, req->nbytes); |
||
8253 | + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); |
||
8254 | + dpaa2_fl_set_addr(out_fle, edesc->dst_dma); |
||
8255 | + dpaa2_fl_set_len(out_fle, digestsize); |
||
8256 | + |
||
8257 | + req_ctx->flc = &ctx->flc[DIGEST]; |
||
8258 | + req_ctx->flc_dma = ctx->flc_dma[DIGEST]; |
||
8259 | + req_ctx->cbk = ahash_done; |
||
8260 | + req_ctx->ctx = &req->base; |
||
8261 | + req_ctx->edesc = edesc; |
||
8262 | + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); |
||
8263 | + if (ret == -EINPROGRESS || |
||
8264 | + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
||
8265 | + return ret; |
||
8266 | + |
||
8267 | +unmap: |
||
8268 | + ahash_unmap(ctx->dev, edesc, req, digestsize); |
||
8269 | + qi_cache_free(edesc); |
||
8270 | + return ret; |
||
8271 | +} |
||
8272 | + |
||
8273 | +static int ahash_final_no_ctx(struct ahash_request *req) |
||
8274 | +{ |
||
8275 | + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
||
8276 | + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
||
8277 | + struct caam_hash_state *state = ahash_request_ctx(req); |
||
8278 | + struct caam_request *req_ctx = &state->caam_req; |
||
8279 | + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; |
||
8280 | + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; |
||
8281 | + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
||
8282 | + GFP_KERNEL : GFP_ATOMIC; |
||
8283 | + u8 *buf = current_buf(state); |
||
8284 | + int buflen = *current_buflen(state); |
||
8285 | + int digestsize = crypto_ahash_digestsize(ahash); |
||
8286 | + struct ahash_edesc *edesc; |
||
8287 | + int ret = -ENOMEM; |
||
8288 | + |
||
8289 | + /* allocate space for base edesc and link tables */ |
||
8290 | + edesc = qi_cache_zalloc(GFP_DMA | flags); |
||
8291 | + if (!edesc) |
||
8292 | + return ret; |
||
8293 | + |
||
8294 | + state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE); |
||
8295 | + if (dma_mapping_error(ctx->dev, state->buf_dma)) { |
||
8296 | + dev_err(ctx->dev, "unable to map src\n"); |
||
8297 | + goto unmap; |
||
8298 | + } |
||
8299 | + |
||
8300 | + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize, |
||
8301 | + DMA_FROM_DEVICE); |
||
8302 | + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) { |
||
8303 | + dev_err(ctx->dev, "unable to map dst\n"); |
||
8304 | + edesc->dst_dma = 0; |
||
8305 | + goto unmap; |
||
8306 | + } |
||
8307 | + |
||
8308 | + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
||
8309 | + dpaa2_fl_set_final(in_fle, true); |
||
8310 | + dpaa2_fl_set_format(in_fle, dpaa2_fl_single); |
||
8311 | + dpaa2_fl_set_addr(in_fle, state->buf_dma); |
||
8312 | + dpaa2_fl_set_len(in_fle, buflen); |
||
8313 | + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); |
||
8314 | + dpaa2_fl_set_addr(out_fle, edesc->dst_dma); |
||
8315 | + dpaa2_fl_set_len(out_fle, digestsize); |
||
8316 | + |
||
8317 | + req_ctx->flc = &ctx->flc[DIGEST]; |
||
8318 | + req_ctx->flc_dma = ctx->flc_dma[DIGEST]; |
||
8319 | + req_ctx->cbk = ahash_done; |
||
8320 | + req_ctx->ctx = &req->base; |
||
8321 | + req_ctx->edesc = edesc; |
||
8322 | + |
||
8323 | + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); |
||
8324 | + if (ret == -EINPROGRESS || |
||
8325 | + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
||
8326 | + return ret; |
||
8327 | + |
||
8328 | +unmap: |
||
8329 | + ahash_unmap(ctx->dev, edesc, req, digestsize); |
||
8330 | + qi_cache_free(edesc); |
||
8331 | + return ret; |
||
8332 | +} |
||
8333 | + |
||
8334 | +static int ahash_update_no_ctx(struct ahash_request *req) |
||
8335 | +{ |
||
8336 | + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
||
8337 | + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
||
8338 | + struct caam_hash_state *state = ahash_request_ctx(req); |
||
8339 | + struct caam_request *req_ctx = &state->caam_req; |
||
8340 | + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; |
||
8341 | + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; |
||
8342 | + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
||
8343 | + GFP_KERNEL : GFP_ATOMIC; |
||
8344 | + u8 *buf = current_buf(state); |
||
8345 | + int *buflen = current_buflen(state); |
||
8346 | + u8 *next_buf = alt_buf(state); |
||
8347 | + int *next_buflen = alt_buflen(state); |
||
8348 | + int in_len = *buflen + req->nbytes, to_hash; |
||
8349 | + int qm_sg_bytes, src_nents, mapped_nents; |
||
8350 | + struct ahash_edesc *edesc; |
||
8351 | + int ret = 0; |
||
8352 | + |
||
8353 | + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); |
||
8354 | + to_hash = in_len - *next_buflen; |
||
8355 | + |
||
8356 | + if (to_hash) { |
||
8357 | + struct dpaa2_sg_entry *sg_table; |
||
8358 | + |
||
8359 | + src_nents = sg_nents_for_len(req->src, |
||
8360 | + req->nbytes - *next_buflen); |
||
8361 | + if (src_nents < 0) { |
||
8362 | + dev_err(ctx->dev, "Invalid number of src SG.\n"); |
||
8363 | + return src_nents; |
||
8364 | + } |
||
8365 | + |
||
8366 | + if (src_nents) { |
||
8367 | + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, |
||
8368 | + DMA_TO_DEVICE); |
||
8369 | + if (!mapped_nents) { |
||
8370 | + dev_err(ctx->dev, "unable to DMA map source\n"); |
||
8371 | + return -ENOMEM; |
||
8372 | + } |
||
8373 | + } else { |
||
8374 | + mapped_nents = 0; |
||
8375 | + } |
||
8376 | + |
||
8377 | + /* allocate space for base edesc and link tables */ |
||
8378 | + edesc = qi_cache_zalloc(GFP_DMA | flags); |
||
8379 | + if (!edesc) { |
||
8380 | + dma_unmap_sg(ctx->dev, req->src, src_nents, |
||
8381 | + DMA_TO_DEVICE); |
||
8382 | + return -ENOMEM; |
||
8383 | + } |
||
8384 | + |
||
8385 | + edesc->src_nents = src_nents; |
||
8386 | + qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table); |
||
8387 | + sg_table = &edesc->sgt[0]; |
||
8388 | + |
||
8389 | + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); |
||
8390 | + if (ret) |
||
8391 | + goto unmap_ctx; |
||
8392 | + |
||
8393 | + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0); |
||
8394 | + |
||
8395 | + if (*next_buflen) |
||
8396 | + scatterwalk_map_and_copy(next_buf, req->src, |
||
8397 | + to_hash - *buflen, |
||
8398 | + *next_buflen, 0); |
||
8399 | + |
||
8400 | + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, |
||
8401 | + qm_sg_bytes, DMA_TO_DEVICE); |
||
8402 | + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { |
||
8403 | + dev_err(ctx->dev, "unable to map S/G table\n"); |
||
8404 | + ret = -ENOMEM; |
||
8405 | + goto unmap_ctx; |
||
8406 | + } |
||
8407 | + edesc->qm_sg_bytes = qm_sg_bytes; |
||
8408 | + |
||
8409 | + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, |
||
8410 | + ctx->ctx_len, DMA_FROM_DEVICE); |
||
8411 | + if (dma_mapping_error(ctx->dev, state->ctx_dma)) { |
||
8412 | + dev_err(ctx->dev, "unable to map ctx\n"); |
||
8413 | + state->ctx_dma = 0; |
||
8414 | + ret = -ENOMEM; |
||
8415 | + goto unmap_ctx; |
||
8416 | + } |
||
8417 | + |
||
8418 | + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
||
8419 | + dpaa2_fl_set_final(in_fle, true); |
||
8420 | + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); |
||
8421 | + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); |
||
8422 | + dpaa2_fl_set_len(in_fle, to_hash); |
||
8423 | + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); |
||
8424 | + dpaa2_fl_set_addr(out_fle, state->ctx_dma); |
||
8425 | + dpaa2_fl_set_len(out_fle, ctx->ctx_len); |
||
8426 | + |
||
8427 | + req_ctx->flc = &ctx->flc[UPDATE_FIRST]; |
||
8428 | + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST]; |
||
8429 | + req_ctx->cbk = ahash_done_ctx_dst; |
||
8430 | + req_ctx->ctx = &req->base; |
||
8431 | + req_ctx->edesc = edesc; |
||
8432 | + |
||
8433 | + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); |
||
8434 | + if (ret != -EINPROGRESS && |
||
8435 | + !(ret == -EBUSY && |
||
8436 | + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
||
8437 | + goto unmap_ctx; |
||
8438 | + |
||
8439 | + state->update = ahash_update_ctx; |
||
8440 | + state->finup = ahash_finup_ctx; |
||
8441 | + state->final = ahash_final_ctx; |
||
8442 | + } else if (*next_buflen) { |
||
8443 | + scatterwalk_map_and_copy(buf + *buflen, req->src, 0, |
||
8444 | + req->nbytes, 0); |
||
8445 | + *buflen = *next_buflen; |
||
8446 | + *next_buflen = 0; |
||
8447 | + } |
||
8448 | +#ifdef DEBUG |
||
8449 | + print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ", |
||
8450 | + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); |
||
8451 | + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ", |
||
8452 | + DUMP_PREFIX_ADDRESS, 16, 4, next_buf, |
||
8453 | + *next_buflen, 1); |
||
8454 | +#endif |
||
8455 | + |
||
8456 | + return ret; |
||
8457 | +unmap_ctx: |
||
8458 | + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); |
||
8459 | + qi_cache_free(edesc); |
||
8460 | + return ret; |
||
8461 | +} |
||
8462 | + |
||
8463 | +static int ahash_finup_no_ctx(struct ahash_request *req) |
||
8464 | +{ |
||
8465 | + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
||
8466 | + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
||
8467 | + struct caam_hash_state *state = ahash_request_ctx(req); |
||
8468 | + struct caam_request *req_ctx = &state->caam_req; |
||
8469 | + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; |
||
8470 | + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; |
||
8471 | + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
||
8472 | + GFP_KERNEL : GFP_ATOMIC; |
||
8473 | + int buflen = *current_buflen(state); |
||
8474 | + int qm_sg_bytes, src_nents, mapped_nents; |
||
8475 | + int digestsize = crypto_ahash_digestsize(ahash); |
||
8476 | + struct ahash_edesc *edesc; |
||
8477 | + struct dpaa2_sg_entry *sg_table; |
||
8478 | + int ret; |
||
8479 | + |
||
8480 | + src_nents = sg_nents_for_len(req->src, req->nbytes); |
||
8481 | + if (src_nents < 0) { |
||
8482 | + dev_err(ctx->dev, "Invalid number of src SG.\n"); |
||
8483 | + return src_nents; |
||
8484 | + } |
||
8485 | + |
||
8486 | + if (src_nents) { |
||
8487 | + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, |
||
8488 | + DMA_TO_DEVICE); |
||
8489 | + if (!mapped_nents) { |
||
8490 | + dev_err(ctx->dev, "unable to DMA map source\n"); |
||
8491 | + return -ENOMEM; |
||
8492 | + } |
||
8493 | + } else { |
||
8494 | + mapped_nents = 0; |
||
8495 | + } |
||
8496 | + |
||
8497 | + /* allocate space for base edesc and link tables */ |
||
8498 | + edesc = qi_cache_zalloc(GFP_DMA | flags); |
||
8499 | + if (!edesc) { |
||
8500 | + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); |
||
8501 | + return -ENOMEM; |
||
8502 | + } |
||
8503 | + |
||
8504 | + edesc->src_nents = src_nents; |
||
8505 | + qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table); |
||
8506 | + sg_table = &edesc->sgt[0]; |
||
8507 | + |
||
8508 | + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); |
||
8509 | + if (ret) |
||
8510 | + goto unmap; |
||
8511 | + |
||
8512 | + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0); |
||
8513 | + |
||
8514 | + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, |
||
8515 | + DMA_TO_DEVICE); |
||
8516 | + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { |
||
8517 | + dev_err(ctx->dev, "unable to map S/G table\n"); |
||
8518 | + ret = -ENOMEM; |
||
8519 | + goto unmap; |
||
8520 | + } |
||
8521 | + edesc->qm_sg_bytes = qm_sg_bytes; |
||
8522 | + |
||
8523 | + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize, |
||
8524 | + DMA_FROM_DEVICE); |
||
8525 | + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) { |
||
8526 | + dev_err(ctx->dev, "unable to map dst\n"); |
||
8527 | + edesc->dst_dma = 0; |
||
8528 | + ret = -ENOMEM; |
||
8529 | + goto unmap; |
||
8530 | + } |
||
8531 | + |
||
8532 | + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
||
8533 | + dpaa2_fl_set_final(in_fle, true); |
||
8534 | + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); |
||
8535 | + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); |
||
8536 | + dpaa2_fl_set_len(in_fle, buflen + req->nbytes); |
||
8537 | + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); |
||
8538 | + dpaa2_fl_set_addr(out_fle, edesc->dst_dma); |
||
8539 | + dpaa2_fl_set_len(out_fle, digestsize); |
||
8540 | + |
||
8541 | + req_ctx->flc = &ctx->flc[DIGEST]; |
||
8542 | + req_ctx->flc_dma = ctx->flc_dma[DIGEST]; |
||
8543 | + req_ctx->cbk = ahash_done; |
||
8544 | + req_ctx->ctx = &req->base; |
||
8545 | + req_ctx->edesc = edesc; |
||
8546 | + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); |
||
8547 | + if (ret != -EINPROGRESS && |
||
8548 | + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
||
8549 | + goto unmap; |
||
8550 | + |
||
8551 | + return ret; |
||
8552 | +unmap: |
||
8553 | + ahash_unmap(ctx->dev, edesc, req, digestsize); |
||
8554 | + qi_cache_free(edesc); |
||
8555 | + return -ENOMEM; |
||
8556 | +} |
||
8557 | + |
||
8558 | +static int ahash_update_first(struct ahash_request *req) |
||
8559 | +{ |
||
8560 | + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
||
8561 | + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
||
8562 | + struct caam_hash_state *state = ahash_request_ctx(req); |
||
8563 | + struct caam_request *req_ctx = &state->caam_req; |
||
8564 | + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; |
||
8565 | + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; |
||
8566 | + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
||
8567 | + GFP_KERNEL : GFP_ATOMIC; |
||
8568 | + u8 *next_buf = alt_buf(state); |
||
8569 | + int *next_buflen = alt_buflen(state); |
||
8570 | + int to_hash; |
||
8571 | + int src_nents, mapped_nents; |
||
8572 | + struct ahash_edesc *edesc; |
||
8573 | + int ret = 0; |
||
8574 | + |
||
8575 | + *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - |
||
8576 | + 1); |
||
8577 | + to_hash = req->nbytes - *next_buflen; |
||
8578 | + |
||
8579 | + if (to_hash) { |
||
8580 | + struct dpaa2_sg_entry *sg_table; |
||
8581 | + |
||
8582 | + src_nents = sg_nents_for_len(req->src, |
||
8583 | + req->nbytes - (*next_buflen)); |
||
8584 | + if (src_nents < 0) { |
||
8585 | + dev_err(ctx->dev, "Invalid number of src SG.\n"); |
||
8586 | + return src_nents; |
||
8587 | + } |
||
8588 | + |
||
8589 | + if (src_nents) { |
||
8590 | + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, |
||
8591 | + DMA_TO_DEVICE); |
||
8592 | + if (!mapped_nents) { |
||
8593 | + dev_err(ctx->dev, "unable to map source for DMA\n"); |
||
8594 | + return -ENOMEM; |
||
8595 | + } |
||
8596 | + } else { |
||
8597 | + mapped_nents = 0; |
||
8598 | + } |
||
8599 | + |
||
8600 | + /* allocate space for base edesc and link tables */ |
||
8601 | + edesc = qi_cache_zalloc(GFP_DMA | flags); |
||
8602 | + if (!edesc) { |
||
8603 | + dma_unmap_sg(ctx->dev, req->src, src_nents, |
||
8604 | + DMA_TO_DEVICE); |
||
8605 | + return -ENOMEM; |
||
8606 | + } |
||
8607 | + |
||
8608 | + edesc->src_nents = src_nents; |
||
8609 | + sg_table = &edesc->sgt[0]; |
||
8610 | + |
||
8611 | + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
||
8612 | + dpaa2_fl_set_final(in_fle, true); |
||
8613 | + dpaa2_fl_set_len(in_fle, to_hash); |
||
8614 | + |
||
8615 | + if (mapped_nents > 1) { |
||
8616 | + int qm_sg_bytes; |
||
8617 | + |
||
8618 | + sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0); |
||
8619 | + qm_sg_bytes = mapped_nents * sizeof(*sg_table); |
||
8620 | + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, |
||
8621 | + qm_sg_bytes, |
||
8622 | + DMA_TO_DEVICE); |
||
8623 | + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { |
||
8624 | + dev_err(ctx->dev, "unable to map S/G table\n"); |
||
8625 | + ret = -ENOMEM; |
||
8626 | + goto unmap_ctx; |
||
8627 | + } |
||
8628 | + edesc->qm_sg_bytes = qm_sg_bytes; |
||
8629 | + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); |
||
8630 | + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); |
||
8631 | + } else { |
||
8632 | + dpaa2_fl_set_format(in_fle, dpaa2_fl_single); |
||
8633 | + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src)); |
||
8634 | + } |
||
8635 | + |
||
8636 | + if (*next_buflen) |
||
8637 | + scatterwalk_map_and_copy(next_buf, req->src, to_hash, |
||
8638 | + *next_buflen, 0); |
||
8639 | + |
||
8640 | + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, |
||
8641 | + ctx->ctx_len, DMA_FROM_DEVICE); |
||
8642 | + if (dma_mapping_error(ctx->dev, state->ctx_dma)) { |
||
8643 | + dev_err(ctx->dev, "unable to map ctx\n"); |
||
8644 | + state->ctx_dma = 0; |
||
8645 | + ret = -ENOMEM; |
||
8646 | + goto unmap_ctx; |
||
8647 | + } |
||
8648 | + |
||
8649 | + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); |
||
8650 | + dpaa2_fl_set_addr(out_fle, state->ctx_dma); |
||
8651 | + dpaa2_fl_set_len(out_fle, ctx->ctx_len); |
||
8652 | + |
||
8653 | + req_ctx->flc = &ctx->flc[UPDATE_FIRST]; |
||
8654 | + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST]; |
||
8655 | + req_ctx->cbk = ahash_done_ctx_dst; |
||
8656 | + req_ctx->ctx = &req->base; |
||
8657 | + req_ctx->edesc = edesc; |
||
8658 | + |
||
8659 | + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); |
||
8660 | + if (ret != -EINPROGRESS && |
||
8661 | + !(ret == -EBUSY && req->base.flags & |
||
8662 | + CRYPTO_TFM_REQ_MAY_BACKLOG)) |
||
8663 | + goto unmap_ctx; |
||
8664 | + |
||
8665 | + state->update = ahash_update_ctx; |
||
8666 | + state->finup = ahash_finup_ctx; |
||
8667 | + state->final = ahash_final_ctx; |
||
8668 | + } else if (*next_buflen) { |
||
8669 | + state->update = ahash_update_no_ctx; |
||
8670 | + state->finup = ahash_finup_no_ctx; |
||
8671 | + state->final = ahash_final_no_ctx; |
||
8672 | + scatterwalk_map_and_copy(next_buf, req->src, 0, |
||
8673 | + req->nbytes, 0); |
||
8674 | + switch_buf(state); |
||
8675 | + } |
||
8676 | +#ifdef DEBUG |
||
8677 | + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ", |
||
8678 | + DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1); |
||
8679 | +#endif |
||
8680 | + |
||
8681 | + return ret; |
||
8682 | +unmap_ctx: |
||
8683 | + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); |
||
8684 | + qi_cache_free(edesc); |
||
8685 | + return ret; |
||
8686 | +} |
||
8687 | + |
||
8688 | +static int ahash_finup_first(struct ahash_request *req) |
||
8689 | +{ |
||
8690 | + return ahash_digest(req); |
||
8691 | +} |
||
8692 | + |
||
8693 | +static int ahash_init(struct ahash_request *req) |
||
8694 | +{ |
||
8695 | + struct caam_hash_state *state = ahash_request_ctx(req); |
||
8696 | + |
||
8697 | + state->update = ahash_update_first; |
||
8698 | + state->finup = ahash_finup_first; |
||
8699 | + state->final = ahash_final_no_ctx; |
||
8700 | + |
||
8701 | + state->ctx_dma = 0; |
||
8702 | + state->current_buf = 0; |
||
8703 | + state->buf_dma = 0; |
||
8704 | + state->buflen_0 = 0; |
||
8705 | + state->buflen_1 = 0; |
||
8706 | + |
||
8707 | + return 0; |
||
8708 | +} |
||
8709 | + |
||
8710 | +static int ahash_update(struct ahash_request *req) |
||
8711 | +{ |
||
8712 | + struct caam_hash_state *state = ahash_request_ctx(req); |
||
8713 | + |
||
8714 | + return state->update(req); |
||
8715 | +} |
||
8716 | + |
||
8717 | +static int ahash_finup(struct ahash_request *req) |
||
8718 | +{ |
||
8719 | + struct caam_hash_state *state = ahash_request_ctx(req); |
||
8720 | + |
||
8721 | + return state->finup(req); |
||
8722 | +} |
||
8723 | + |
||
8724 | +static int ahash_final(struct ahash_request *req) |
||
8725 | +{ |
||
8726 | + struct caam_hash_state *state = ahash_request_ctx(req); |
||
8727 | + |
||
8728 | + return state->final(req); |
||
8729 | +} |
||
8730 | + |
||
8731 | +static int ahash_export(struct ahash_request *req, void *out) |
||
8732 | +{ |
||
8733 | + struct caam_hash_state *state = ahash_request_ctx(req); |
||
8734 | + struct caam_export_state *export = out; |
||
8735 | + int len; |
||
8736 | + u8 *buf; |
||
8737 | + |
||
8738 | + if (state->current_buf) { |
||
8739 | + buf = state->buf_1; |
||
8740 | + len = state->buflen_1; |
||
8741 | + } else { |
||
8742 | + buf = state->buf_0; |
||
8743 | + len = state->buflen_0; |
||
8744 | + } |
||
8745 | + |
||
8746 | + memcpy(export->buf, buf, len); |
||
8747 | + memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); |
||
8748 | + export->buflen = len; |
||
8749 | + export->update = state->update; |
||
8750 | + export->final = state->final; |
||
8751 | + export->finup = state->finup; |
||
8752 | + |
||
8753 | + return 0; |
||
8754 | +} |
||
8755 | + |
||
8756 | +static int ahash_import(struct ahash_request *req, const void *in) |
||
8757 | +{ |
||
8758 | + struct caam_hash_state *state = ahash_request_ctx(req); |
||
8759 | + const struct caam_export_state *export = in; |
||
8760 | + |
||
8761 | + memset(state, 0, sizeof(*state)); |
||
8762 | + memcpy(state->buf_0, export->buf, export->buflen); |
||
8763 | + memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); |
||
8764 | + state->buflen_0 = export->buflen; |
||
8765 | + state->update = export->update; |
||
8766 | + state->final = export->final; |
||
8767 | + state->finup = export->finup; |
||
8768 | + |
||
8769 | + return 0; |
||
8770 | +} |
||
8771 | + |
||
8772 | +struct caam_hash_template { |
||
8773 | + char name[CRYPTO_MAX_ALG_NAME]; |
||
8774 | + char driver_name[CRYPTO_MAX_ALG_NAME]; |
||
8775 | + char hmac_name[CRYPTO_MAX_ALG_NAME]; |
||
8776 | + char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; |
||
8777 | + unsigned int blocksize; |
||
8778 | + struct ahash_alg template_ahash; |
||
8779 | + u32 alg_type; |
||
8780 | +}; |
||
8781 | + |
||
8782 | +/* ahash descriptors */ |
||
8783 | +static struct caam_hash_template driver_hash[] = { |
||
8784 | + { |
||
8785 | + .name = "sha1", |
||
8786 | + .driver_name = "sha1-caam-qi2", |
||
8787 | + .hmac_name = "hmac(sha1)", |
||
8788 | + .hmac_driver_name = "hmac-sha1-caam-qi2", |
||
8789 | + .blocksize = SHA1_BLOCK_SIZE, |
||
8790 | + .template_ahash = { |
||
8791 | + .init = ahash_init, |
||
8792 | + .update = ahash_update, |
||
8793 | + .final = ahash_final, |
||
8794 | + .finup = ahash_finup, |
||
8795 | + .digest = ahash_digest, |
||
8796 | + .export = ahash_export, |
||
8797 | + .import = ahash_import, |
||
8798 | + .setkey = ahash_setkey, |
||
8799 | + .halg = { |
||
8800 | + .digestsize = SHA1_DIGEST_SIZE, |
||
8801 | + .statesize = sizeof(struct caam_export_state), |
||
8802 | + }, |
||
8803 | + }, |
||
8804 | + .alg_type = OP_ALG_ALGSEL_SHA1, |
||
8805 | + }, { |
||
8806 | + .name = "sha224", |
||
8807 | + .driver_name = "sha224-caam-qi2", |
||
8808 | + .hmac_name = "hmac(sha224)", |
||
8809 | + .hmac_driver_name = "hmac-sha224-caam-qi2", |
||
8810 | + .blocksize = SHA224_BLOCK_SIZE, |
||
8811 | + .template_ahash = { |
||
8812 | + .init = ahash_init, |
||
8813 | + .update = ahash_update, |
||
8814 | + .final = ahash_final, |
||
8815 | + .finup = ahash_finup, |
||
8816 | + .digest = ahash_digest, |
||
8817 | + .export = ahash_export, |
||
8818 | + .import = ahash_import, |
||
8819 | + .setkey = ahash_setkey, |
||
8820 | + .halg = { |
||
8821 | + .digestsize = SHA224_DIGEST_SIZE, |
||
8822 | + .statesize = sizeof(struct caam_export_state), |
||
8823 | + }, |
||
8824 | + }, |
||
8825 | + .alg_type = OP_ALG_ALGSEL_SHA224, |
||
8826 | + }, { |
||
8827 | + .name = "sha256", |
||
8828 | + .driver_name = "sha256-caam-qi2", |
||
8829 | + .hmac_name = "hmac(sha256)", |
||
8830 | + .hmac_driver_name = "hmac-sha256-caam-qi2", |
||
8831 | + .blocksize = SHA256_BLOCK_SIZE, |
||
8832 | + .template_ahash = { |
||
8833 | + .init = ahash_init, |
||
8834 | + .update = ahash_update, |
||
8835 | + .final = ahash_final, |
||
8836 | + .finup = ahash_finup, |
||
8837 | + .digest = ahash_digest, |
||
8838 | + .export = ahash_export, |
||
8839 | + .import = ahash_import, |
||
8840 | + .setkey = ahash_setkey, |
||
8841 | + .halg = { |
||
8842 | + .digestsize = SHA256_DIGEST_SIZE, |
||
8843 | + .statesize = sizeof(struct caam_export_state), |
||
8844 | + }, |
||
8845 | + }, |
||
8846 | + .alg_type = OP_ALG_ALGSEL_SHA256, |
||
8847 | + }, { |
||
8848 | + .name = "sha384", |
||
8849 | + .driver_name = "sha384-caam-qi2", |
||
8850 | + .hmac_name = "hmac(sha384)", |
||
8851 | + .hmac_driver_name = "hmac-sha384-caam-qi2", |
||
8852 | + .blocksize = SHA384_BLOCK_SIZE, |
||
8853 | + .template_ahash = { |
||
8854 | + .init = ahash_init, |
||
8855 | + .update = ahash_update, |
||
8856 | + .final = ahash_final, |
||
8857 | + .finup = ahash_finup, |
||
8858 | + .digest = ahash_digest, |
||
8859 | + .export = ahash_export, |
||
8860 | + .import = ahash_import, |
||
8861 | + .setkey = ahash_setkey, |
||
8862 | + .halg = { |
||
8863 | + .digestsize = SHA384_DIGEST_SIZE, |
||
8864 | + .statesize = sizeof(struct caam_export_state), |
||
8865 | + }, |
||
8866 | + }, |
||
8867 | + .alg_type = OP_ALG_ALGSEL_SHA384, |
||
8868 | + }, { |
||
8869 | + .name = "sha512", |
||
8870 | + .driver_name = "sha512-caam-qi2", |
||
8871 | + .hmac_name = "hmac(sha512)", |
||
8872 | + .hmac_driver_name = "hmac-sha512-caam-qi2", |
||
8873 | + .blocksize = SHA512_BLOCK_SIZE, |
||
8874 | + .template_ahash = { |
||
8875 | + .init = ahash_init, |
||
8876 | + .update = ahash_update, |
||
8877 | + .final = ahash_final, |
||
8878 | + .finup = ahash_finup, |
||
8879 | + .digest = ahash_digest, |
||
8880 | + .export = ahash_export, |
||
8881 | + .import = ahash_import, |
||
8882 | + .setkey = ahash_setkey, |
||
8883 | + .halg = { |
||
8884 | + .digestsize = SHA512_DIGEST_SIZE, |
||
8885 | + .statesize = sizeof(struct caam_export_state), |
||
8886 | + }, |
||
8887 | + }, |
||
8888 | + .alg_type = OP_ALG_ALGSEL_SHA512, |
||
8889 | + }, { |
||
8890 | + .name = "md5", |
||
8891 | + .driver_name = "md5-caam-qi2", |
||
8892 | + .hmac_name = "hmac(md5)", |
||
8893 | + .hmac_driver_name = "hmac-md5-caam-qi2", |
||
8894 | + .blocksize = MD5_BLOCK_WORDS * 4, |
||
8895 | + .template_ahash = { |
||
8896 | + .init = ahash_init, |
||
8897 | + .update = ahash_update, |
||
8898 | + .final = ahash_final, |
||
8899 | + .finup = ahash_finup, |
||
8900 | + .digest = ahash_digest, |
||
8901 | + .export = ahash_export, |
||
8902 | + .import = ahash_import, |
||
8903 | + .setkey = ahash_setkey, |
||
8904 | + .halg = { |
||
8905 | + .digestsize = MD5_DIGEST_SIZE, |
||
8906 | + .statesize = sizeof(struct caam_export_state), |
||
8907 | + }, |
||
8908 | + }, |
||
8909 | + .alg_type = OP_ALG_ALGSEL_MD5, |
||
8910 | + } |
||
8911 | +}; |
||
8912 | + |
||
8913 | +struct caam_hash_alg { |
||
8914 | + struct list_head entry; |
||
8915 | + struct device *dev; |
||
8916 | + int alg_type; |
||
8917 | + struct ahash_alg ahash_alg; |
||
8918 | +}; |
||
8919 | + |
||
8920 | +static int caam_hash_cra_init(struct crypto_tfm *tfm) |
||
8921 | +{ |
||
8922 | + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); |
||
8923 | + struct crypto_alg *base = tfm->__crt_alg; |
||
8924 | + struct hash_alg_common *halg = |
||
8925 | + container_of(base, struct hash_alg_common, base); |
||
8926 | + struct ahash_alg *alg = |
||
8927 | + container_of(halg, struct ahash_alg, halg); |
||
8928 | + struct caam_hash_alg *caam_hash = |
||
8929 | + container_of(alg, struct caam_hash_alg, ahash_alg); |
||
8930 | + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
||
8931 | + /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ |
||
8932 | + static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, |
||
8933 | + HASH_MSG_LEN + SHA1_DIGEST_SIZE, |
||
8934 | + HASH_MSG_LEN + 32, |
||
8935 | + HASH_MSG_LEN + SHA256_DIGEST_SIZE, |
||
8936 | + HASH_MSG_LEN + 64, |
||
8937 | + HASH_MSG_LEN + SHA512_DIGEST_SIZE }; |
||
8938 | + dma_addr_t dma_addr; |
||
8939 | + int i; |
||
8940 | + |
||
8941 | + ctx->dev = caam_hash->dev; |
||
8942 | + |
||
8943 | + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc), |
||
8944 | + DMA_BIDIRECTIONAL, |
||
8945 | + DMA_ATTR_SKIP_CPU_SYNC); |
||
8946 | + if (dma_mapping_error(ctx->dev, dma_addr)) { |
||
8947 | + dev_err(ctx->dev, "unable to map shared descriptors\n"); |
||
8948 | + return -ENOMEM; |
||
8949 | + } |
||
8950 | + |
||
8951 | + for (i = 0; i < HASH_NUM_OP; i++) |
||
8952 | + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]); |
||
8953 | + |
||
8954 | + /* copy descriptor header template value */ |
||
8955 | + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; |
||
8956 | + |
||
8957 | + ctx->ctx_len = runninglen[(ctx->adata.algtype & |
||
8958 | + OP_ALG_ALGSEL_SUBMASK) >> |
||
8959 | + OP_ALG_ALGSEL_SHIFT]; |
||
8960 | + |
||
8961 | + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
||
8962 | + sizeof(struct caam_hash_state)); |
||
8963 | + |
||
8964 | + return ahash_set_sh_desc(ahash); |
||
8965 | +} |
||
8966 | + |
||
8967 | +static void caam_hash_cra_exit(struct crypto_tfm *tfm) |
||
8968 | +{ |
||
8969 | + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
||
8970 | + |
||
8971 | + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc), |
||
8972 | + DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); |
||
8973 | +} |
||
8974 | + |
||
8975 | +static struct caam_hash_alg *caam_hash_alloc(struct device *dev, |
||
8976 | + struct caam_hash_template *template, bool keyed) |
||
8977 | +{ |
||
8978 | + struct caam_hash_alg *t_alg; |
||
8979 | + struct ahash_alg *halg; |
||
8980 | + struct crypto_alg *alg; |
||
8981 | + |
||
8982 | + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); |
||
8983 | + if (!t_alg) |
||
8984 | + return ERR_PTR(-ENOMEM); |
||
8985 | + |
||
8986 | + t_alg->ahash_alg = template->template_ahash; |
||
8987 | + halg = &t_alg->ahash_alg; |
||
8988 | + alg = &halg->halg.base; |
||
8989 | + |
||
8990 | + if (keyed) { |
||
8991 | + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", |
||
8992 | + template->hmac_name); |
||
8993 | + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", |
||
8994 | + template->hmac_driver_name); |
||
8995 | + } else { |
||
8996 | + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", |
||
8997 | + template->name); |
||
8998 | + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", |
||
8999 | + template->driver_name); |
||
9000 | + t_alg->ahash_alg.setkey = NULL; |
||
9001 | + } |
||
9002 | + alg->cra_module = THIS_MODULE; |
||
9003 | + alg->cra_init = caam_hash_cra_init; |
||
9004 | + alg->cra_exit = caam_hash_cra_exit; |
||
9005 | + alg->cra_ctxsize = sizeof(struct caam_hash_ctx); |
||
9006 | + alg->cra_priority = CAAM_CRA_PRIORITY; |
||
9007 | + alg->cra_blocksize = template->blocksize; |
||
9008 | + alg->cra_alignmask = 0; |
||
9009 | + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH; |
||
9010 | + alg->cra_type = &crypto_ahash_type; |
||
9011 | + |
||
9012 | + t_alg->alg_type = template->alg_type; |
||
9013 | + t_alg->dev = dev; |
||
9014 | + |
||
9015 | + return t_alg; |
||
9016 | +} |
||
9017 | + |
||
9018 | +static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx) |
||
9019 | +{ |
||
9020 | + struct dpaa2_caam_priv_per_cpu *ppriv; |
||
9021 | + |
||
9022 | + ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx); |
||
9023 | + napi_schedule_irqoff(&ppriv->napi); |
||
9024 | +} |
||
9025 | + |
||
9026 | +static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv) |
||
9027 | +{ |
||
9028 | + struct device *dev = priv->dev; |
||
9029 | + struct dpaa2_io_notification_ctx *nctx; |
||
9030 | + struct dpaa2_caam_priv_per_cpu *ppriv; |
||
9031 | + int err, i = 0, cpu; |
||
9032 | + |
||
9033 | + for_each_online_cpu(cpu) { |
||
9034 | + ppriv = per_cpu_ptr(priv->ppriv, cpu); |
||
9035 | + ppriv->priv = priv; |
||
9036 | + nctx = &ppriv->nctx; |
||
9037 | + nctx->is_cdan = 0; |
||
9038 | + nctx->id = ppriv->rsp_fqid; |
||
9039 | + nctx->desired_cpu = cpu; |
||
9040 | + nctx->cb = dpaa2_caam_fqdan_cb; |
||
9041 | + |
||
9042 | + /* Register notification callbacks */ |
||
9043 | + err = dpaa2_io_service_register(NULL, nctx); |
||
9044 | + if (unlikely(err)) { |
||
9045 | + dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu); |
||
9046 | + nctx->cb = NULL; |
||
9047 | + /* |
||
9048 | + * If no affine DPIO for this core, there's probably |
||
9049 | + * none available for next cores either. Signal we want |
||
9050 | + * to retry later, in case the DPIO devices weren't |
||
9051 | + * probed yet. |
||
9052 | + */ |
||
9053 | + err = -EPROBE_DEFER; |
||
9054 | + goto err; |
||
9055 | + } |
||
9056 | + |
||
9057 | + ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE, |
||
9058 | + dev); |
||
9059 | + if (unlikely(!ppriv->store)) { |
||
9060 | + dev_err(dev, "dpaa2_io_store_create() failed\n"); |
||
9061 | + goto err; |
||
9062 | + } |
||
9063 | + |
||
9064 | + if (++i == priv->num_pairs) |
||
9065 | + break; |
||
9066 | + } |
||
9067 | + |
||
9068 | + return 0; |
||
9069 | + |
||
9070 | +err: |
||
9071 | + for_each_online_cpu(cpu) { |
||
9072 | + ppriv = per_cpu_ptr(priv->ppriv, cpu); |
||
9073 | + if (!ppriv->nctx.cb) |
||
9074 | + break; |
||
9075 | + dpaa2_io_service_deregister(NULL, &ppriv->nctx); |
||
9076 | + } |
||
9077 | + |
||
9078 | + for_each_online_cpu(cpu) { |
||
9079 | + ppriv = per_cpu_ptr(priv->ppriv, cpu); |
||
9080 | + if (!ppriv->store) |
||
9081 | + break; |
||
9082 | + dpaa2_io_store_destroy(ppriv->store); |
||
9083 | + } |
||
9084 | + |
||
9085 | + return err; |
||
9086 | +} |
||
9087 | + |
||
9088 | +static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv) |
||
9089 | +{ |
||
9090 | + struct dpaa2_caam_priv_per_cpu *ppriv; |
||
9091 | + int i = 0, cpu; |
||
9092 | + |
||
9093 | + for_each_online_cpu(cpu) { |
||
9094 | + ppriv = per_cpu_ptr(priv->ppriv, cpu); |
||
9095 | + dpaa2_io_service_deregister(NULL, &ppriv->nctx); |
||
9096 | + dpaa2_io_store_destroy(ppriv->store); |
||
9097 | + |
||
9098 | + if (++i == priv->num_pairs) |
||
9099 | + return; |
||
9100 | + } |
||
9101 | +} |
||
9102 | + |
||
9103 | +static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv) |
||
9104 | +{ |
||
9105 | + struct dpseci_rx_queue_cfg rx_queue_cfg; |
||
9106 | + struct device *dev = priv->dev; |
||
9107 | + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); |
||
9108 | + struct dpaa2_caam_priv_per_cpu *ppriv; |
||
9109 | + int err = 0, i = 0, cpu; |
||
9110 | + |
||
9111 | + /* Configure Rx queues */ |
||
9112 | + for_each_online_cpu(cpu) { |
||
9113 | + ppriv = per_cpu_ptr(priv->ppriv, cpu); |
||
9114 | + |
||
9115 | + rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST | |
||
9116 | + DPSECI_QUEUE_OPT_USER_CTX; |
||
9117 | + rx_queue_cfg.order_preservation_en = 0; |
||
9118 | + rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO; |
||
9119 | + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id; |
||
9120 | + /* |
||
9121 | + * Rx priority (WQ) doesn't really matter, since we use |
||
9122 | + * pull mode, i.e. volatile dequeues from specific FQs |
||
9123 | + */ |
||
9124 | + rx_queue_cfg.dest_cfg.priority = 0; |
||
9125 | + rx_queue_cfg.user_ctx = ppriv->nctx.qman64; |
||
9126 | + |
||
9127 | + err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i, |
||
9128 | + &rx_queue_cfg); |
||
9129 | + if (err) { |
||
9130 | + dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n", |
||
9131 | + err); |
||
9132 | + return err; |
||
9133 | + } |
||
9134 | + |
||
9135 | + if (++i == priv->num_pairs) |
||
9136 | + break; |
||
9137 | + } |
||
9138 | + |
||
9139 | + return err; |
||
9140 | +} |
||
9141 | + |
||
9142 | +static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv) |
||
9143 | +{ |
||
9144 | + struct device *dev = priv->dev; |
||
9145 | + |
||
9146 | + if (!priv->cscn_mem) |
||
9147 | + return; |
||
9148 | + |
||
9149 | + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); |
||
9150 | + kfree(priv->cscn_mem); |
||
9151 | +} |
||
9152 | + |
||
9153 | +static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv) |
||
9154 | +{ |
||
9155 | + struct device *dev = priv->dev; |
||
9156 | + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); |
||
9157 | + |
||
9158 | + dpaa2_dpseci_congestion_free(priv); |
||
9159 | + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle); |
||
9160 | +} |
||
9161 | + |
||
9162 | +static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv, |
||
9163 | + const struct dpaa2_fd *fd) |
||
9164 | +{ |
||
9165 | + struct caam_request *req; |
||
9166 | + u32 fd_err; |
||
9167 | + |
||
9168 | + if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) { |
||
9169 | + dev_err(priv->dev, "Only Frame List FD format is supported!\n"); |
||
9170 | + return; |
||
9171 | + } |
||
9172 | + |
||
9173 | + fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK; |
||
9174 | + if (unlikely(fd_err)) |
||
9175 | + dev_err(priv->dev, "FD error: %08x\n", fd_err); |
||
9176 | + |
||
9177 | + /* |
||
9178 | + * FD[ADDR] is guaranteed to be valid, irrespective of errors reported |
||
9179 | + * in FD[ERR] or FD[FRC]. |
||
9180 | + */ |
||
9181 | + req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd)); |
||
9182 | + dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt), |
||
9183 | + DMA_BIDIRECTIONAL); |
||
9184 | + req->cbk(req->ctx, dpaa2_fd_get_frc(fd)); |
||
9185 | +} |
||
9186 | + |
||
9187 | +static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv) |
||
9188 | +{ |
||
9189 | + int err; |
||
9190 | + |
||
9191 | + /* Retry while portal is busy */ |
||
9192 | + do { |
||
9193 | + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid, |
||
9194 | + ppriv->store); |
||
9195 | + } while (err == -EBUSY); |
||
9196 | + |
||
9197 | + if (unlikely(err)) |
||
9198 | + dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err); |
||
9199 | + |
||
9200 | + return err; |
||
9201 | +} |
||
9202 | + |
||
9203 | +static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv) |
||
9204 | +{ |
||
9205 | + struct dpaa2_dq *dq; |
||
9206 | + int cleaned = 0, is_last; |
||
9207 | + |
||
9208 | + do { |
||
9209 | + dq = dpaa2_io_store_next(ppriv->store, &is_last); |
||
9210 | + if (unlikely(!dq)) { |
||
9211 | + if (unlikely(!is_last)) { |
||
9212 | + dev_dbg(ppriv->priv->dev, |
||
9213 | + "FQ %d returned no valid frames\n", |
||
9214 | + ppriv->rsp_fqid); |
||
9215 | + /* |
||
9216 | + * MUST retry until we get some sort of |
||
9217 | + * valid response token (be it "empty dequeue" |
||
9218 | + * or a valid frame). |
||
9219 | + */ |
||
9220 | + continue; |
||
9221 | + } |
||
9222 | + break; |
||
9223 | + } |
||
9224 | + |
||
9225 | + /* Process FD */ |
||
9226 | + dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq)); |
||
9227 | + cleaned++; |
||
9228 | + } while (!is_last); |
||
9229 | + |
||
9230 | + return cleaned; |
||
9231 | +} |
||
9232 | + |
||
9233 | +static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget) |
||
9234 | +{ |
||
9235 | + struct dpaa2_caam_priv_per_cpu *ppriv; |
||
9236 | + struct dpaa2_caam_priv *priv; |
||
9237 | + int err, cleaned = 0, store_cleaned; |
||
9238 | + |
||
9239 | + ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi); |
||
9240 | + priv = ppriv->priv; |
||
9241 | + |
||
9242 | + if (unlikely(dpaa2_caam_pull_fq(ppriv))) |
||
9243 | + return 0; |
||
9244 | + |
||
9245 | + do { |
||
9246 | + store_cleaned = dpaa2_caam_store_consume(ppriv); |
||
9247 | + cleaned += store_cleaned; |
||
9248 | + |
||
9249 | + if (store_cleaned == 0 || |
||
9250 | + cleaned > budget - DPAA2_CAAM_STORE_SIZE) |
||
9251 | + break; |
||
9252 | + |
||
9253 | + /* Try to dequeue some more */ |
||
9254 | + err = dpaa2_caam_pull_fq(ppriv); |
||
9255 | + if (unlikely(err)) |
||
9256 | + break; |
||
9257 | + } while (1); |
||
9258 | + |
||
9259 | + if (cleaned < budget) { |
||
9260 | + napi_complete_done(napi, cleaned); |
||
9261 | + err = dpaa2_io_service_rearm(NULL, &ppriv->nctx); |
||
9262 | + if (unlikely(err)) |
||
9263 | + dev_err(priv->dev, "Notification rearm failed: %d\n", |
||
9264 | + err); |
||
9265 | + } |
||
9266 | + |
||
9267 | + return cleaned; |
||
9268 | +} |
||
9269 | + |
||
9270 | +static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv, |
||
9271 | + u16 token) |
||
9272 | +{ |
||
9273 | + struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 }; |
||
9274 | + struct device *dev = priv->dev; |
||
9275 | + int err; |
||
9276 | + |
||
9277 | + /* |
||
9278 | + * Congestion group feature supported starting with DPSECI API v5.1 |
||
9279 | + * and only when object has been created with this capability. |
||
9280 | + */ |
||
9281 | + if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) || |
||
9282 | + !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG)) |
||
9283 | + return 0; |
||
9284 | + |
||
9285 | + priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN, |
||
9286 | + GFP_KERNEL | GFP_DMA); |
||
9287 | + if (!priv->cscn_mem) |
||
9288 | + return -ENOMEM; |
||
9289 | + |
||
9290 | + priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN); |
||
9291 | + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned, |
||
9292 | + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); |
||
9293 | + if (dma_mapping_error(dev, priv->cscn_dma)) { |
||
9294 | + dev_err(dev, "Error mapping CSCN memory area\n"); |
||
9295 | + err = -ENOMEM; |
||
9296 | + goto err_dma_map; |
||
9297 | + } |
||
9298 | + |
||
9299 | + cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES; |
||
9300 | + cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH; |
||
9301 | + cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH; |
||
9302 | + cong_notif_cfg.message_ctx = (u64)priv; |
||
9303 | + cong_notif_cfg.message_iova = priv->cscn_dma; |
||
9304 | + cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER | |
||
9305 | + DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT | |
||
9306 | + DPSECI_CGN_MODE_COHERENT_WRITE; |
||
9307 | + |
||
9308 | + err = dpseci_set_congestion_notification(priv->mc_io, 0, token, |
||
9309 | + &cong_notif_cfg); |
||
9310 | + if (err) { |
||
9311 | + dev_err(dev, "dpseci_set_congestion_notification failed\n"); |
||
9312 | + goto err_set_cong; |
||
9313 | + } |
||
9314 | + |
||
9315 | + return 0; |
||
9316 | + |
||
9317 | +err_set_cong: |
||
9318 | + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); |
||
9319 | +err_dma_map: |
||
9320 | + kfree(priv->cscn_mem); |
||
9321 | + |
||
9322 | + return err; |
||
9323 | +} |
||
9324 | + |
||
9325 | +static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) |
||
9326 | +{ |
||
9327 | + struct device *dev = &ls_dev->dev; |
||
9328 | + struct dpaa2_caam_priv *priv; |
||
9329 | + struct dpaa2_caam_priv_per_cpu *ppriv; |
||
9330 | + int err, cpu; |
||
9331 | + u8 i; |
||
9332 | + |
||
9333 | + priv = dev_get_drvdata(dev); |
||
9334 | + |
||
9335 | + priv->dev = dev; |
||
9336 | + priv->dpsec_id = ls_dev->obj_desc.id; |
||
9337 | + |
||
9338 | + /* Get a handle for the DPSECI this interface is associate with */ |
||
9339 | + err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle); |
||
9340 | + if (err) { |
||
9341 | + dev_err(dev, "dpsec_open() failed: %d\n", err); |
||
9342 | + goto err_open; |
||
9343 | + } |
||
9344 | + |
||
9345 | + dev_info(dev, "Opened dpseci object successfully\n"); |
||
9346 | + |
||
9347 | + err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver, |
||
9348 | + &priv->minor_ver); |
||
9349 | + if (err) { |
||
9350 | + dev_err(dev, "dpseci_get_api_version() failed\n"); |
||
9351 | + goto err_get_vers; |
||
9352 | + } |
||
9353 | + |
||
9354 | + err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle, |
||
9355 | + &priv->dpseci_attr); |
||
9356 | + if (err) { |
||
9357 | + dev_err(dev, "dpseci_get_attributes() failed\n"); |
||
9358 | + goto err_get_vers; |
||
9359 | + } |
||
9360 | + |
||
9361 | + err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle, |
||
9362 | + &priv->sec_attr); |
||
9363 | + if (err) { |
||
9364 | + dev_err(dev, "dpseci_get_sec_attr() failed\n"); |
||
9365 | + goto err_get_vers; |
||
9366 | + } |
||
9367 | + |
||
9368 | + err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle); |
||
9369 | + if (err) { |
||
9370 | + dev_err(dev, "setup_congestion() failed\n"); |
||
9371 | + goto err_get_vers; |
||
9372 | + } |
||
9373 | + |
||
9374 | + priv->num_pairs = min(priv->dpseci_attr.num_rx_queues, |
||
9375 | + priv->dpseci_attr.num_tx_queues); |
||
9376 | + if (priv->num_pairs > num_online_cpus()) { |
||
9377 | + dev_warn(dev, "%d queues won't be used\n", |
||
9378 | + priv->num_pairs - num_online_cpus()); |
||
9379 | + priv->num_pairs = num_online_cpus(); |
||
9380 | + } |
||
9381 | + |
||
9382 | + for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) { |
||
9383 | + err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i, |
||
9384 | + &priv->rx_queue_attr[i]); |
||
9385 | + if (err) { |
||
9386 | + dev_err(dev, "dpseci_get_rx_queue() failed\n"); |
||
9387 | + goto err_get_rx_queue; |
||
9388 | + } |
||
9389 | + } |
||
9390 | + |
||
9391 | + for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) { |
||
9392 | + err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i, |
||
9393 | + &priv->tx_queue_attr[i]); |
||
9394 | + if (err) { |
||
9395 | + dev_err(dev, "dpseci_get_tx_queue() failed\n"); |
||
9396 | + goto err_get_rx_queue; |
||
9397 | + } |
||
9398 | + } |
||
9399 | + |
||
9400 | + i = 0; |
||
9401 | + for_each_online_cpu(cpu) { |
||
9402 | + dev_info(dev, "pair %d: rx queue %d, tx queue %d\n", i, |
||
9403 | + priv->rx_queue_attr[i].fqid, |
||
9404 | + priv->tx_queue_attr[i].fqid); |
||
9405 | + |
||
9406 | + ppriv = per_cpu_ptr(priv->ppriv, cpu); |
||
9407 | + ppriv->req_fqid = priv->tx_queue_attr[i].fqid; |
||
9408 | + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid; |
||
9409 | + ppriv->prio = i; |
||
9410 | + |
||
9411 | + ppriv->net_dev.dev = *dev; |
||
9412 | + INIT_LIST_HEAD(&ppriv->net_dev.napi_list); |
||
9413 | + netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll, |
||
9414 | + DPAA2_CAAM_NAPI_WEIGHT); |
||
9415 | + if (++i == priv->num_pairs) |
||
9416 | + break; |
||
9417 | + } |
||
9418 | + |
||
9419 | + return 0; |
||
9420 | + |
||
9421 | +err_get_rx_queue: |
||
9422 | + dpaa2_dpseci_congestion_free(priv); |
||
9423 | +err_get_vers: |
||
9424 | + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle); |
||
9425 | +err_open: |
||
9426 | + return err; |
||
9427 | +} |
||
9428 | + |
||
9429 | +static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv) |
||
9430 | +{ |
||
9431 | + struct device *dev = priv->dev; |
||
9432 | + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); |
||
9433 | + struct dpaa2_caam_priv_per_cpu *ppriv; |
||
9434 | + int err, i; |
||
9435 | + |
||
9436 | + for (i = 0; i < priv->num_pairs; i++) { |
||
9437 | + ppriv = per_cpu_ptr(priv->ppriv, i); |
||
9438 | + napi_enable(&ppriv->napi); |
||
9439 | + } |
||
9440 | + |
||
9441 | + err = dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle); |
||
9442 | + if (err) { |
||
9443 | + dev_err(dev, "dpseci_enable() failed\n"); |
||
9444 | + return err; |
||
9445 | + } |
||
9446 | + |
||
9447 | + dev_info(dev, "DPSECI version %d.%d\n", |
||
9448 | + priv->major_ver, |
||
9449 | + priv->minor_ver); |
||
9450 | + |
||
9451 | + return 0; |
||
9452 | +} |
||
9453 | + |
||
9454 | +static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv) |
||
9455 | +{ |
||
9456 | + struct device *dev = priv->dev; |
||
9457 | + struct dpaa2_caam_priv_per_cpu *ppriv; |
||
9458 | + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); |
||
9459 | + int i, err = 0, enabled; |
||
9460 | + |
||
9461 | + err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle); |
||
9462 | + if (err) { |
||
9463 | + dev_err(dev, "dpseci_disable() failed\n"); |
||
9464 | + return err; |
||
9465 | + } |
||
9466 | + |
||
9467 | + err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled); |
||
9468 | + if (err) { |
||
9469 | + dev_err(dev, "dpseci_is_enabled() failed\n"); |
||
9470 | + return err; |
||
9471 | + } |
||
9472 | + |
||
9473 | + dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true"); |
||
9474 | + |
||
9475 | + for (i = 0; i < priv->num_pairs; i++) { |
||
9476 | + ppriv = per_cpu_ptr(priv->ppriv, i); |
||
9477 | + napi_disable(&ppriv->napi); |
||
9478 | + netif_napi_del(&ppriv->napi); |
||
9479 | + } |
||
9480 | + |
||
9481 | + return 0; |
||
9482 | +} |
||
9483 | + |
||
9484 | +static struct list_head hash_list; |
||
9485 | + |
||
9486 | +static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev) |
||
9487 | +{ |
||
9488 | + struct device *dev; |
||
9489 | + struct dpaa2_caam_priv *priv; |
||
9490 | + int i, err = 0; |
||
9491 | + bool registered = false; |
||
9492 | + |
||
9493 | + /* |
||
9494 | + * There is no way to get CAAM endianness - there is no direct register |
||
9495 | + * space access and MC f/w does not provide this attribute. |
||
9496 | + * All DPAA2-based SoCs have little endian CAAM, thus hard-code this |
||
9497 | + * property. |
||
9498 | + */ |
||
9499 | + caam_little_end = true; |
||
9500 | + |
||
9501 | + caam_imx = false; |
||
9502 | + |
||
9503 | + dev = &dpseci_dev->dev; |
||
9504 | + |
||
9505 | + priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
||
9506 | + if (!priv) |
||
9507 | + return -ENOMEM; |
||
9508 | + |
||
9509 | + dev_set_drvdata(dev, priv); |
||
9510 | + |
||
9511 | + priv->domain = iommu_get_domain_for_dev(dev); |
||
9512 | + |
||
9513 | + qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE, |
||
9514 | + 0, SLAB_CACHE_DMA, NULL); |
||
9515 | + if (!qi_cache) { |
||
9516 | + dev_err(dev, "Can't allocate SEC cache\n"); |
||
9517 | + err = -ENOMEM; |
||
9518 | + goto err_qicache; |
||
9519 | + } |
||
9520 | + |
||
9521 | + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49)); |
||
9522 | + if (err) { |
||
9523 | + dev_err(dev, "dma_set_mask_and_coherent() failed\n"); |
||
9524 | + goto err_dma_mask; |
||
9525 | + } |
||
9526 | + |
||
9527 | + /* Obtain a MC portal */ |
||
9528 | + err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io); |
||
9529 | + if (err) { |
||
9530 | + if (err == -ENXIO) |
||
9531 | + err = -EPROBE_DEFER; |
||
9532 | + else |
||
9533 | + dev_err(dev, "MC portal allocation failed\n"); |
||
9534 | + |
||
9535 | + goto err_dma_mask; |
||
9536 | + } |
||
9537 | + |
||
9538 | + priv->ppriv = alloc_percpu(*priv->ppriv); |
||
9539 | + if (!priv->ppriv) { |
||
9540 | + dev_err(dev, "alloc_percpu() failed\n"); |
||
9541 | + goto err_alloc_ppriv; |
||
9542 | + } |
||
9543 | + |
||
9544 | + /* DPSECI initialization */ |
||
9545 | + err = dpaa2_dpseci_setup(dpseci_dev); |
||
9546 | + if (err < 0) { |
||
9547 | + dev_err(dev, "dpaa2_dpseci_setup() failed\n"); |
||
9548 | + goto err_dpseci_setup; |
||
9549 | + } |
||
9550 | + |
||
9551 | + /* DPIO */ |
||
9552 | + err = dpaa2_dpseci_dpio_setup(priv); |
||
9553 | + if (err) { |
||
9554 | + dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n"); |
||
9555 | + goto err_dpio_setup; |
||
9556 | + } |
||
9557 | + |
||
9558 | + /* DPSECI binding to DPIO */ |
||
9559 | + err = dpaa2_dpseci_bind(priv); |
||
9560 | + if (err) { |
||
9561 | + dev_err(dev, "dpaa2_dpseci_bind() failed\n"); |
||
9562 | + goto err_bind; |
||
9563 | + } |
||
9564 | + |
||
9565 | + /* DPSECI enable */ |
||
9566 | + err = dpaa2_dpseci_enable(priv); |
||
9567 | + if (err) { |
||
9568 | + dev_err(dev, "dpaa2_dpseci_enable() failed"); |
||
9569 | + goto err_bind; |
||
9570 | + } |
||
9571 | + |
||
9572 | + /* register crypto algorithms the device supports */ |
||
9573 | + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
||
9574 | + struct caam_skcipher_alg *t_alg = driver_algs + i; |
||
9575 | + u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; |
||
9576 | + |
||
9577 | + /* Skip DES algorithms if not supported by device */ |
||
9578 | + if (!priv->sec_attr.des_acc_num && |
||
9579 | + ((alg_sel == OP_ALG_ALGSEL_3DES) || |
||
9580 | + (alg_sel == OP_ALG_ALGSEL_DES))) |
||
9581 | + continue; |
||
9582 | + |
||
9583 | + /* Skip AES algorithms if not supported by device */ |
||
9584 | + if (!priv->sec_attr.aes_acc_num && |
||
9585 | + (alg_sel == OP_ALG_ALGSEL_AES)) |
||
9586 | + continue; |
||
9587 | + |
||
9588 | + t_alg->caam.dev = dev; |
||
9589 | + caam_skcipher_alg_init(t_alg); |
||
9590 | + |
||
9591 | + err = crypto_register_skcipher(&t_alg->skcipher); |
||
9592 | + if (err) { |
||
9593 | + dev_warn(dev, "%s alg registration failed: %d\n", |
||
9594 | + t_alg->skcipher.base.cra_driver_name, err); |
||
9595 | + continue; |
||
9596 | + } |
||
9597 | + |
||
9598 | + t_alg->registered = true; |
||
9599 | + registered = true; |
||
9600 | + } |
||
9601 | + |
||
9602 | + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { |
||
9603 | + struct caam_aead_alg *t_alg = driver_aeads + i; |
||
9604 | + u32 c1_alg_sel = t_alg->caam.class1_alg_type & |
||
9605 | + OP_ALG_ALGSEL_MASK; |
||
9606 | + u32 c2_alg_sel = t_alg->caam.class2_alg_type & |
||
9607 | + OP_ALG_ALGSEL_MASK; |
||
9608 | + |
||
9609 | + /* Skip DES algorithms if not supported by device */ |
||
9610 | + if (!priv->sec_attr.des_acc_num && |
||
9611 | + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) || |
||
9612 | + (c1_alg_sel == OP_ALG_ALGSEL_DES))) |
||
9613 | + continue; |
||
9614 | + |
||
9615 | + /* Skip AES algorithms if not supported by device */ |
||
9616 | + if (!priv->sec_attr.aes_acc_num && |
||
9617 | + (c1_alg_sel == OP_ALG_ALGSEL_AES)) |
||
9618 | + continue; |
||
9619 | + |
||
9620 | + /* |
||
9621 | + * Skip algorithms requiring message digests |
||
9622 | + * if MD not supported by device. |
||
9623 | + */ |
||
9624 | + if (!priv->sec_attr.md_acc_num && c2_alg_sel) |
||
9625 | + continue; |
||
9626 | + |
||
9627 | + t_alg->caam.dev = dev; |
||
9628 | + caam_aead_alg_init(t_alg); |
||
9629 | + |
||
9630 | + err = crypto_register_aead(&t_alg->aead); |
||
9631 | + if (err) { |
||
9632 | + dev_warn(dev, "%s alg registration failed: %d\n", |
||
9633 | + t_alg->aead.base.cra_driver_name, err); |
||
9634 | + continue; |
||
9635 | + } |
||
9636 | + |
||
9637 | + t_alg->registered = true; |
||
9638 | + registered = true; |
||
9639 | + } |
||
9640 | + if (registered) |
||
9641 | + dev_info(dev, "algorithms registered in /proc/crypto\n"); |
||
9642 | + |
||
9643 | + /* register hash algorithms the device supports */ |
||
9644 | + INIT_LIST_HEAD(&hash_list); |
||
9645 | + |
||
9646 | + /* |
||
9647 | + * Skip registration of any hashing algorithms if MD block |
||
9648 | + * is not present. |
||
9649 | + */ |
||
9650 | + if (!priv->sec_attr.md_acc_num) |
||
9651 | + return 0; |
||
9652 | + |
||
9653 | + for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { |
||
9654 | + struct caam_hash_alg *t_alg; |
||
9655 | + struct caam_hash_template *alg = driver_hash + i; |
||
9656 | + |
||
9657 | + /* register hmac version */ |
||
9658 | + t_alg = caam_hash_alloc(dev, alg, true); |
||
9659 | + if (IS_ERR(t_alg)) { |
||
9660 | + err = PTR_ERR(t_alg); |
||
9661 | + dev_warn(dev, "%s hash alg allocation failed: %d\n", |
||
9662 | + alg->driver_name, err); |
||
9663 | + continue; |
||
9664 | + } |
||
9665 | + |
||
9666 | + err = crypto_register_ahash(&t_alg->ahash_alg); |
||
9667 | + if (err) { |
||
9668 | + dev_warn(dev, "%s alg registration failed: %d\n", |
||
9669 | + t_alg->ahash_alg.halg.base.cra_driver_name, |
||
9670 | + err); |
||
9671 | + kfree(t_alg); |
||
9672 | + } else { |
||
9673 | + list_add_tail(&t_alg->entry, &hash_list); |
||
9674 | + } |
||
9675 | + |
||
9676 | + /* register unkeyed version */ |
||
9677 | + t_alg = caam_hash_alloc(dev, alg, false); |
||
9678 | + if (IS_ERR(t_alg)) { |
||
9679 | + err = PTR_ERR(t_alg); |
||
9680 | + dev_warn(dev, "%s alg allocation failed: %d\n", |
||
9681 | + alg->driver_name, err); |
||
9682 | + continue; |
||
9683 | + } |
||
9684 | + |
||
9685 | + err = crypto_register_ahash(&t_alg->ahash_alg); |
||
9686 | + if (err) { |
||
9687 | + dev_warn(dev, "%s alg registration failed: %d\n", |
||
9688 | + t_alg->ahash_alg.halg.base.cra_driver_name, |
||
9689 | + err); |
||
9690 | + kfree(t_alg); |
||
9691 | + } else { |
||
9692 | + list_add_tail(&t_alg->entry, &hash_list); |
||
9693 | + } |
||
9694 | + } |
||
9695 | + if (!list_empty(&hash_list)) |
||
9696 | + dev_info(dev, "hash algorithms registered in /proc/crypto\n"); |
||
9697 | + |
||
9698 | + return err; |
||
9699 | + |
||
9700 | +err_bind: |
||
9701 | + dpaa2_dpseci_dpio_free(priv); |
||
9702 | +err_dpio_setup: |
||
9703 | + dpaa2_dpseci_free(priv); |
||
9704 | +err_dpseci_setup: |
||
9705 | + free_percpu(priv->ppriv); |
||
9706 | +err_alloc_ppriv: |
||
9707 | + fsl_mc_portal_free(priv->mc_io); |
||
9708 | +err_dma_mask: |
||
9709 | + kmem_cache_destroy(qi_cache); |
||
9710 | +err_qicache: |
||
9711 | + dev_set_drvdata(dev, NULL); |
||
9712 | + |
||
9713 | + return err; |
||
9714 | +} |
||
9715 | + |
||
9716 | +static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev) |
||
9717 | +{ |
||
9718 | + struct device *dev; |
||
9719 | + struct dpaa2_caam_priv *priv; |
||
9720 | + int i; |
||
9721 | + |
||
9722 | + dev = &ls_dev->dev; |
||
9723 | + priv = dev_get_drvdata(dev); |
||
9724 | + |
||
9725 | + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { |
||
9726 | + struct caam_aead_alg *t_alg = driver_aeads + i; |
||
9727 | + |
||
9728 | + if (t_alg->registered) |
||
9729 | + crypto_unregister_aead(&t_alg->aead); |
||
9730 | + } |
||
9731 | + |
||
9732 | + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
||
9733 | + struct caam_skcipher_alg *t_alg = driver_algs + i; |
||
9734 | + |
||
9735 | + if (t_alg->registered) |
||
9736 | + crypto_unregister_skcipher(&t_alg->skcipher); |
||
9737 | + } |
||
9738 | + |
||
9739 | + if (hash_list.next) { |
||
9740 | + struct caam_hash_alg *t_hash_alg, *p; |
||
9741 | + |
||
9742 | + list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) { |
||
9743 | + crypto_unregister_ahash(&t_hash_alg->ahash_alg); |
||
9744 | + list_del(&t_hash_alg->entry); |
||
9745 | + kfree(t_hash_alg); |
||
9746 | + } |
||
9747 | + } |
||
9748 | + |
||
9749 | + dpaa2_dpseci_disable(priv); |
||
9750 | + dpaa2_dpseci_dpio_free(priv); |
||
9751 | + dpaa2_dpseci_free(priv); |
||
9752 | + free_percpu(priv->ppriv); |
||
9753 | + fsl_mc_portal_free(priv->mc_io); |
||
9754 | + dev_set_drvdata(dev, NULL); |
||
9755 | + kmem_cache_destroy(qi_cache); |
||
9756 | + |
||
9757 | + return 0; |
||
9758 | +} |
||
9759 | + |
||
9760 | +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req) |
||
9761 | +{ |
||
9762 | + struct dpaa2_fd fd; |
||
9763 | + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); |
||
9764 | + int err = 0, i, id; |
||
9765 | + |
||
9766 | + if (IS_ERR(req)) |
||
9767 | + return PTR_ERR(req); |
||
9768 | + |
||
9769 | + if (priv->cscn_mem) { |
||
9770 | + dma_sync_single_for_cpu(priv->dev, priv->cscn_dma, |
||
9771 | + DPAA2_CSCN_SIZE, |
||
9772 | + DMA_FROM_DEVICE); |
||
9773 | + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) { |
||
9774 | + dev_dbg_ratelimited(dev, "Dropping request\n"); |
||
9775 | + return -EBUSY; |
||
9776 | + } |
||
9777 | + } |
||
9778 | + |
||
9779 | + dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma); |
||
9780 | + |
||
9781 | + req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt), |
||
9782 | + DMA_BIDIRECTIONAL); |
||
9783 | + if (dma_mapping_error(dev, req->fd_flt_dma)) { |
||
9784 | + dev_err(dev, "DMA mapping error for QI enqueue request\n"); |
||
9785 | + goto err_out; |
||
9786 | + } |
||
9787 | + |
||
9788 | + memset(&fd, 0, sizeof(fd)); |
||
9789 | + dpaa2_fd_set_format(&fd, dpaa2_fd_list); |
||
9790 | + dpaa2_fd_set_addr(&fd, req->fd_flt_dma); |
||
9791 | + dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1])); |
||
9792 | + dpaa2_fd_set_flc(&fd, req->flc_dma); |
||
9793 | + |
||
9794 | + /* |
||
9795 | + * There is no guarantee that preemption is disabled here, |
||
9796 | + * thus take action. |
||
9797 | + */ |
||
9798 | + preempt_disable(); |
||
9799 | + id = smp_processor_id() % priv->dpseci_attr.num_tx_queues; |
||
9800 | + for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) { |
||
9801 | + err = dpaa2_io_service_enqueue_fq(NULL, |
||
9802 | + priv->tx_queue_attr[id].fqid, |
||
9803 | + &fd); |
||
9804 | + if (err != -EBUSY) |
||
9805 | + break; |
||
9806 | + } |
||
9807 | + preempt_enable(); |
||
9808 | + |
||
9809 | + if (unlikely(err < 0)) { |
||
9810 | + dev_err(dev, "Error enqueuing frame: %d\n", err); |
||
9811 | + goto err_out; |
||
9812 | + } |
||
9813 | + |
||
9814 | + return -EINPROGRESS; |
||
9815 | + |
||
9816 | +err_out: |
||
9817 | + dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt), |
||
9818 | + DMA_BIDIRECTIONAL); |
||
9819 | + return -EIO; |
||
9820 | +} |
||
9821 | +EXPORT_SYMBOL(dpaa2_caam_enqueue); |
||
9822 | + |
||
9823 | +const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = { |
||
9824 | + { |
||
9825 | + .vendor = FSL_MC_VENDOR_FREESCALE, |
||
9826 | + .obj_type = "dpseci", |
||
9827 | + }, |
||
9828 | + { .vendor = 0x0 } |
||
9829 | +}; |
||
9830 | + |
||
9831 | +static struct fsl_mc_driver dpaa2_caam_driver = { |
||
9832 | + .driver = { |
||
9833 | + .name = KBUILD_MODNAME, |
||
9834 | + .owner = THIS_MODULE, |
||
9835 | + }, |
||
9836 | + .probe = dpaa2_caam_probe, |
||
9837 | + .remove = dpaa2_caam_remove, |
||
9838 | + .match_id_table = dpaa2_caam_match_id_table |
||
9839 | +}; |
||
9840 | + |
||
9841 | +MODULE_LICENSE("Dual BSD/GPL"); |
||
9842 | +MODULE_AUTHOR("Freescale Semiconductor, Inc"); |
||
9843 | +MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver"); |
||
9844 | + |
||
9845 | +module_fsl_mc_driver(dpaa2_caam_driver); |
||
9846 | --- /dev/null |
||
9847 | +++ b/drivers/crypto/caam/caamalg_qi2.h |
||
9848 | @@ -0,0 +1,274 @@ |
||
9849 | +/* |
||
9850 | + * Copyright 2015-2016 Freescale Semiconductor Inc. |
||
9851 | + * Copyright 2017 NXP |
||
9852 | + * |
||
9853 | + * Redistribution and use in source and binary forms, with or without |
||
9854 | + * modification, are permitted provided that the following conditions are met: |
||
9855 | + * * Redistributions of source code must retain the above copyright |
||
9856 | + * notice, this list of conditions and the following disclaimer. |
||
9857 | + * * Redistributions in binary form must reproduce the above copyright |
||
9858 | + * notice, this list of conditions and the following disclaimer in the |
||
9859 | + * documentation and/or other materials provided with the distribution. |
||
9860 | + * * Neither the names of the above-listed copyright holders nor the |
||
9861 | + * names of any contributors may be used to endorse or promote products |
||
9862 | + * derived from this software without specific prior written permission. |
||
9863 | + * |
||
9864 | + * |
||
9865 | + * ALTERNATIVELY, this software may be distributed under the terms of the |
||
9866 | + * GNU General Public License ("GPL") as published by the Free Software |
||
9867 | + * Foundation, either version 2 of that License or (at your option) any |
||
9868 | + * later version. |
||
9869 | + * |
||
9870 | + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
||
9871 | + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
||
9872 | + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
||
9873 | + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE |
||
9874 | + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
||
9875 | + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
||
9876 | + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
||
9877 | + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
||
9878 | + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
||
9879 | + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
||
9880 | + * POSSIBILITY OF SUCH DAMAGE. |
||
9881 | + */ |
||
9882 | + |
||
9883 | +#ifndef _CAAMALG_QI2_H_ |
||
9884 | +#define _CAAMALG_QI2_H_ |
||
9885 | + |
||
9886 | +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h" |
||
9887 | +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h" |
||
9888 | +#include <linux/threads.h> |
||
9889 | +#include "dpseci.h" |
||
9890 | +#include "desc_constr.h" |
||
9891 | + |
||
9892 | +#define DPAA2_CAAM_STORE_SIZE 16 |
||
9893 | +/* NAPI weight *must* be a multiple of the store size. */ |
||
9894 | +#define DPAA2_CAAM_NAPI_WEIGHT 64 |
||
9895 | + |
||
9896 | +/* The congestion entrance threshold was chosen so that on LS2088 |
||
9897 | + * we support the maximum throughput for the available memory |
||
9898 | + */ |
||
9899 | +#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024) |
||
9900 | +#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10) |
||
9901 | + |
||
9902 | +/** |
||
9903 | + * dpaa2_caam_priv - driver private data |
||
9904 | + * @dpseci_id: DPSECI object unique ID |
||
9905 | + * @major_ver: DPSECI major version |
||
9906 | + * @minor_ver: DPSECI minor version |
||
9907 | + * @dpseci_attr: DPSECI attributes |
||
9908 | + * @sec_attr: SEC engine attributes |
||
9909 | + * @rx_queue_attr: array of Rx queue attributes |
||
9910 | + * @tx_queue_attr: array of Tx queue attributes |
||
9911 | + * @cscn_mem: pointer to memory region containing the |
||
9912 | + * dpaa2_cscn struct; it's size is larger than |
||
9913 | + * sizeof(struct dpaa2_cscn) to accommodate alignment |
||
9914 | + * @cscn_mem_aligned: pointer to struct dpaa2_cscn; it is computed |
||
9915 | + * as PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN) |
||
9916 | + * @cscn_dma: dma address used by the QMAN to write CSCN messages |
||
9917 | + * @dev: device associated with the DPSECI object |
||
9918 | + * @mc_io: pointer to MC portal's I/O object |
||
9919 | + * @domain: IOMMU domain |
||
9920 | + * @ppriv: per CPU pointers to privata data |
||
9921 | + */ |
||
9922 | +struct dpaa2_caam_priv { |
||
9923 | + int dpsec_id; |
||
9924 | + |
||
9925 | + u16 major_ver; |
||
9926 | + u16 minor_ver; |
||
9927 | + |
||
9928 | + struct dpseci_attr dpseci_attr; |
||
9929 | + struct dpseci_sec_attr sec_attr; |
||
9930 | + struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_MAX_QUEUE_NUM]; |
||
9931 | + struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_MAX_QUEUE_NUM]; |
||
9932 | + int num_pairs; |
||
9933 | + |
||
9934 | + /* congestion */ |
||
9935 | + void *cscn_mem; |
||
9936 | + void *cscn_mem_aligned; |
||
9937 | + dma_addr_t cscn_dma; |
||
9938 | + |
||
9939 | + struct device *dev; |
||
9940 | + struct fsl_mc_io *mc_io; |
||
9941 | + struct iommu_domain *domain; |
||
9942 | + |
||
9943 | + struct dpaa2_caam_priv_per_cpu __percpu *ppriv; |
||
9944 | +}; |
||
9945 | + |
||
9946 | +/** |
||
9947 | + * dpaa2_caam_priv_per_cpu - per CPU private data |
||
9948 | + * @napi: napi structure |
||
9949 | + * @net_dev: netdev used by napi |
||
9950 | + * @req_fqid: (virtual) request (Tx / enqueue) FQID |
||
9951 | + * @rsp_fqid: (virtual) response (Rx / dequeue) FQID |
||
9952 | + * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr |
||
9953 | + * @nctx: notification context of response FQ |
||
9954 | + * @store: where dequeued frames are stored |
||
9955 | + * @priv: backpointer to dpaa2_caam_priv |
||
9956 | + */ |
||
9957 | +struct dpaa2_caam_priv_per_cpu { |
||
9958 | + struct napi_struct napi; |
||
9959 | + struct net_device net_dev; |
||
9960 | + int req_fqid; |
||
9961 | + int rsp_fqid; |
||
9962 | + int prio; |
||
9963 | + struct dpaa2_io_notification_ctx nctx; |
||
9964 | + struct dpaa2_io_store *store; |
||
9965 | + struct dpaa2_caam_priv *priv; |
||
9966 | +}; |
||
9967 | + |
||
9968 | +/* |
||
9969 | + * The CAAM QI hardware constructs a job descriptor which points |
||
9970 | + * to shared descriptor (as pointed by context_a of FQ to CAAM). |
||
9971 | + * When the job descriptor is executed by deco, the whole job |
||
9972 | + * descriptor together with shared descriptor gets loaded in |
||
9973 | + * deco buffer which is 64 words long (each 32-bit). |
||
9974 | + * |
||
9975 | + * The job descriptor constructed by QI hardware has layout: |
||
9976 | + * |
||
9977 | + * HEADER (1 word) |
||
9978 | + * Shdesc ptr (1 or 2 words) |
||
9979 | + * SEQ_OUT_PTR (1 word) |
||
9980 | + * Out ptr (1 or 2 words) |
||
9981 | + * Out length (1 word) |
||
9982 | + * SEQ_IN_PTR (1 word) |
||
9983 | + * In ptr (1 or 2 words) |
||
9984 | + * In length (1 word) |
||
9985 | + * |
||
9986 | + * The shdesc ptr is used to fetch shared descriptor contents |
||
9987 | + * into deco buffer. |
||
9988 | + * |
||
9989 | + * Apart from shdesc contents, the total number of words that |
||
9990 | + * get loaded in deco buffer are '8' or '11'. The remaining words |
||
9991 | + * in deco buffer can be used for storing shared descriptor. |
||
9992 | + */ |
||
9993 | +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ) |
||
9994 | + |
||
9995 | +/* Length of a single buffer in the QI driver memory cache */ |
||
9996 | +#define CAAM_QI_MEMCACHE_SIZE 512 |
||
9997 | + |
||
9998 | +/* |
||
9999 | + * aead_edesc - s/w-extended aead descriptor |
||
10000 | + * @src_nents: number of segments in input scatterlist |
||
10001 | + * @dst_nents: number of segments in output scatterlist |
||
10002 | + * @iv_dma: dma address of iv for checking continuity and link table |
||
10003 | + * @qm_sg_bytes: length of dma mapped h/w link table |
||
10004 | + * @qm_sg_dma: bus physical mapped address of h/w link table |
||
10005 | + * @assoclen: associated data length, in CAAM endianness |
||
10006 | + * @assoclen_dma: bus physical mapped address of req->assoclen |
||
10007 | + * @sgt: the h/w link table, followed by IV |
||
10008 | + */ |
||
10009 | +struct aead_edesc { |
||
10010 | + int src_nents; |
||
10011 | + int dst_nents; |
||
10012 | + dma_addr_t iv_dma; |
||
10013 | + int qm_sg_bytes; |
||
10014 | + dma_addr_t qm_sg_dma; |
||
10015 | + unsigned int assoclen; |
||
10016 | + dma_addr_t assoclen_dma; |
||
10017 | + struct dpaa2_sg_entry sgt[0]; |
||
10018 | +}; |
||
10019 | + |
||
10020 | +/* |
||
10021 | + * tls_edesc - s/w-extended tls descriptor |
||
10022 | + * @src_nents: number of segments in input scatterlist |
||
10023 | + * @dst_nents: number of segments in output scatterlist |
||
10024 | + * @iv_dma: dma address of iv for checking continuity and link table |
||
10025 | + * @qm_sg_bytes: length of dma mapped h/w link table |
||
10026 | + * @qm_sg_dma: bus physical mapped address of h/w link table |
||
10027 | + * @tmp: array of scatterlists used by 'scatterwalk_ffwd' |
||
10028 | + * @dst: pointer to output scatterlist, usefull for unmapping |
||
10029 | + * @sgt: the h/w link table, followed by IV |
||
10030 | + */ |
||
10031 | +struct tls_edesc { |
||
10032 | + int src_nents; |
||
10033 | + int dst_nents; |
||
10034 | + dma_addr_t iv_dma; |
||
10035 | + int qm_sg_bytes; |
||
10036 | + dma_addr_t qm_sg_dma; |
||
10037 | + struct scatterlist tmp[2]; |
||
10038 | + struct scatterlist *dst; |
||
10039 | + struct dpaa2_sg_entry sgt[0]; |
||
10040 | +}; |
||
10041 | + |
||
10042 | +/* |
||
10043 | + * skcipher_edesc - s/w-extended skcipher descriptor |
||
10044 | + * @src_nents: number of segments in input scatterlist |
||
10045 | + * @dst_nents: number of segments in output scatterlist |
||
10046 | + * @iv_dma: dma address of iv for checking continuity and link table |
||
10047 | + * @qm_sg_bytes: length of dma mapped qm_sg space |
||
10048 | + * @qm_sg_dma: I/O virtual address of h/w link table |
||
10049 | + * @sgt: the h/w link table, followed by IV |
||
10050 | + */ |
||
10051 | +struct skcipher_edesc { |
||
10052 | + int src_nents; |
||
10053 | + int dst_nents; |
||
10054 | + dma_addr_t iv_dma; |
||
10055 | + int qm_sg_bytes; |
||
10056 | + dma_addr_t qm_sg_dma; |
||
10057 | + struct dpaa2_sg_entry sgt[0]; |
||
10058 | +}; |
||
10059 | + |
||
10060 | +/* |
||
10061 | + * ahash_edesc - s/w-extended ahash descriptor |
||
10062 | + * @dst_dma: I/O virtual address of req->result |
||
10063 | + * @qm_sg_dma: I/O virtual address of h/w link table |
||
10064 | + * @src_nents: number of segments in input scatterlist |
||
10065 | + * @qm_sg_bytes: length of dma mapped qm_sg space |
||
10066 | + * @sgt: pointer to h/w link table |
||
10067 | + */ |
||
10068 | +struct ahash_edesc { |
||
10069 | + dma_addr_t dst_dma; |
||
10070 | + dma_addr_t qm_sg_dma; |
||
10071 | + int src_nents; |
||
10072 | + int qm_sg_bytes; |
||
10073 | + struct dpaa2_sg_entry sgt[0]; |
||
10074 | +}; |
||
10075 | + |
||
10076 | +/** |
||
10077 | + * caam_flc - Flow Context (FLC) |
||
10078 | + * @flc: Flow Context options |
||
10079 | + * @sh_desc: Shared Descriptor |
||
10080 | + */ |
||
10081 | +struct caam_flc { |
||
10082 | + u32 flc[16]; |
||
10083 | + u32 sh_desc[MAX_SDLEN]; |
||
10084 | +} ____cacheline_aligned; |
||
10085 | + |
||
10086 | +enum optype { |
||
10087 | + ENCRYPT = 0, |
||
10088 | + DECRYPT, |
||
10089 | + NUM_OP |
||
10090 | +}; |
||
10091 | + |
||
10092 | +/** |
||
10093 | + * caam_request - the request structure the driver application should fill while |
||
10094 | + * submitting a job to driver. |
||
10095 | + * @fd_flt: Frame list table defining input and output |
||
10096 | + * fd_flt[0] - FLE pointing to output buffer |
||
10097 | + * fd_flt[1] - FLE pointing to input buffer |
||
10098 | + * @fd_flt_dma: DMA address for the frame list table |
||
10099 | + * @flc: Flow Context |
||
10100 | + * @flc_dma: I/O virtual address of Flow Context |
||
10101 | + * @cbk: Callback function to invoke when job is completed |
||
10102 | + * @ctx: arbit context attached with request by the application |
||
10103 | + * @edesc: extended descriptor; points to one of {skcipher,aead}_edesc |
||
10104 | + */ |
||
10105 | +struct caam_request { |
||
10106 | + struct dpaa2_fl_entry fd_flt[2]; |
||
10107 | + dma_addr_t fd_flt_dma; |
||
10108 | + struct caam_flc *flc; |
||
10109 | + dma_addr_t flc_dma; |
||
10110 | + void (*cbk)(void *ctx, u32 err); |
||
10111 | + void *ctx; |
||
10112 | + void *edesc; |
||
10113 | +}; |
||
10114 | + |
||
10115 | +/** |
||
10116 | + * dpaa2_caam_enqueue() - enqueue a crypto request |
||
10117 | + * @dev: device associated with the DPSECI object |
||
10118 | + * @req: pointer to caam_request |
||
10119 | + */ |
||
10120 | +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req); |
||
10121 | + |
||
10122 | +#endif /* _CAAMALG_QI2_H_ */ |
||
10123 | --- a/drivers/crypto/caam/caamhash.c |
||
10124 | +++ b/drivers/crypto/caam/caamhash.c |
||
10125 | @@ -62,6 +62,7 @@ |
||
10126 | #include "error.h" |
||
10127 | #include "sg_sw_sec4.h" |
||
10128 | #include "key_gen.h" |
||
10129 | +#include "caamhash_desc.h" |
||
10130 | |||
10131 | #define CAAM_CRA_PRIORITY 3000 |
||
10132 | |||
10133 | @@ -71,14 +72,6 @@ |
||
10134 | #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE |
||
10135 | #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE |
||
10136 | |||
10137 | -/* length of descriptors text */ |
||
10138 | -#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ) |
||
10139 | -#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) |
||
10140 | -#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) |
||
10141 | -#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) |
||
10142 | -#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) |
||
10143 | -#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) |
||
10144 | - |
||
10145 | #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ |
||
10146 | CAAM_MAX_HASH_KEY_SIZE) |
||
10147 | #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) |
||
10148 | @@ -107,6 +100,7 @@ struct caam_hash_ctx { |
||
10149 | dma_addr_t sh_desc_update_first_dma; |
||
10150 | dma_addr_t sh_desc_fin_dma; |
||
10151 | dma_addr_t sh_desc_digest_dma; |
||
10152 | + enum dma_data_direction dir; |
||
10153 | struct device *jrdev; |
||
10154 | u8 key[CAAM_MAX_HASH_KEY_SIZE]; |
||
10155 | int ctx_len; |
||
10156 | @@ -218,7 +212,7 @@ static inline int buf_map_to_sec4_sg(str |
||
10157 | } |
||
10158 | |||
10159 | /* Map state->caam_ctx, and add it to link table */ |
||
10160 | -static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, |
||
10161 | +static inline int ctx_map_to_sec4_sg(struct device *jrdev, |
||
10162 | struct caam_hash_state *state, int ctx_len, |
||
10163 | struct sec4_sg_entry *sec4_sg, u32 flag) |
||
10164 | { |
||
10165 | @@ -234,68 +228,22 @@ static inline int ctx_map_to_sec4_sg(u32 |
||
10166 | return 0; |
||
10167 | } |
||
10168 | |||
10169 | -/* |
||
10170 | - * For ahash update, final and finup (import_ctx = true) |
||
10171 | - * import context, read and write to seqout |
||
10172 | - * For ahash firsts and digest (import_ctx = false) |
||
10173 | - * read and write to seqout |
||
10174 | - */ |
||
10175 | -static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize, |
||
10176 | - struct caam_hash_ctx *ctx, bool import_ctx) |
||
10177 | -{ |
||
10178 | - u32 op = ctx->adata.algtype; |
||
10179 | - u32 *skip_key_load; |
||
10180 | - |
||
10181 | - init_sh_desc(desc, HDR_SHARE_SERIAL); |
||
10182 | - |
||
10183 | - /* Append key if it has been set; ahash update excluded */ |
||
10184 | - if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) { |
||
10185 | - /* Skip key loading if already shared */ |
||
10186 | - skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | |
||
10187 | - JUMP_COND_SHRD); |
||
10188 | - |
||
10189 | - append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad, |
||
10190 | - ctx->adata.keylen, CLASS_2 | |
||
10191 | - KEY_DEST_MDHA_SPLIT | KEY_ENC); |
||
10192 | - |
||
10193 | - set_jump_tgt_here(desc, skip_key_load); |
||
10194 | - |
||
10195 | - op |= OP_ALG_AAI_HMAC_PRECOMP; |
||
10196 | - } |
||
10197 | - |
||
10198 | - /* If needed, import context from software */ |
||
10199 | - if (import_ctx) |
||
10200 | - append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB | |
||
10201 | - LDST_SRCDST_BYTE_CONTEXT); |
||
10202 | - |
||
10203 | - /* Class 2 operation */ |
||
10204 | - append_operation(desc, op | state | OP_ALG_ENCRYPT); |
||
10205 | - |
||
10206 | - /* |
||
10207 | - * Load from buf and/or src and write to req->result or state->context |
||
10208 | - * Calculate remaining bytes to read |
||
10209 | - */ |
||
10210 | - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); |
||
10211 | - /* Read remaining bytes */ |
||
10212 | - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | |
||
10213 | - FIFOLD_TYPE_MSG | KEY_VLF); |
||
10214 | - /* Store class2 context bytes */ |
||
10215 | - append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | |
||
10216 | - LDST_SRCDST_BYTE_CONTEXT); |
||
10217 | -} |
||
10218 | - |
||
10219 | static int ahash_set_sh_desc(struct crypto_ahash *ahash) |
||
10220 | { |
||
10221 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
||
10222 | int digestsize = crypto_ahash_digestsize(ahash); |
||
10223 | struct device *jrdev = ctx->jrdev; |
||
10224 | + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); |
||
10225 | u32 *desc; |
||
10226 | |||
10227 | + ctx->adata.key_virt = ctx->key; |
||
10228 | + |
||
10229 | /* ahash_update shared descriptor */ |
||
10230 | desc = ctx->sh_desc_update; |
||
10231 | - ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true); |
||
10232 | + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, |
||
10233 | + ctx->ctx_len, true, ctrlpriv->era); |
||
10234 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, |
||
10235 | - desc_bytes(desc), DMA_TO_DEVICE); |
||
10236 | + desc_bytes(desc), ctx->dir); |
||
10237 | #ifdef DEBUG |
||
10238 | print_hex_dump(KERN_ERR, |
||
10239 | "ahash update shdesc@"__stringify(__LINE__)": ", |
||
10240 | @@ -304,9 +252,10 @@ static int ahash_set_sh_desc(struct cryp |
||
10241 | |||
10242 | /* ahash_update_first shared descriptor */ |
||
10243 | desc = ctx->sh_desc_update_first; |
||
10244 | - ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false); |
||
10245 | + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, |
||
10246 | + ctx->ctx_len, false, ctrlpriv->era); |
||
10247 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, |
||
10248 | - desc_bytes(desc), DMA_TO_DEVICE); |
||
10249 | + desc_bytes(desc), ctx->dir); |
||
10250 | #ifdef DEBUG |
||
10251 | print_hex_dump(KERN_ERR, |
||
10252 | "ahash update first shdesc@"__stringify(__LINE__)": ", |
||
10253 | @@ -315,9 +264,10 @@ static int ahash_set_sh_desc(struct cryp |
||
10254 | |||
10255 | /* ahash_final shared descriptor */ |
||
10256 | desc = ctx->sh_desc_fin; |
||
10257 | - ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true); |
||
10258 | + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, |
||
10259 | + ctx->ctx_len, true, ctrlpriv->era); |
||
10260 | dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, |
||
10261 | - desc_bytes(desc), DMA_TO_DEVICE); |
||
10262 | + desc_bytes(desc), ctx->dir); |
||
10263 | #ifdef DEBUG |
||
10264 | print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", |
||
10265 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
||
10266 | @@ -326,9 +276,10 @@ static int ahash_set_sh_desc(struct cryp |
||
10267 | |||
10268 | /* ahash_digest shared descriptor */ |
||
10269 | desc = ctx->sh_desc_digest; |
||
10270 | - ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false); |
||
10271 | + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, |
||
10272 | + ctx->ctx_len, false, ctrlpriv->era); |
||
10273 | dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, |
||
10274 | - desc_bytes(desc), DMA_TO_DEVICE); |
||
10275 | + desc_bytes(desc), ctx->dir); |
||
10276 | #ifdef DEBUG |
||
10277 | print_hex_dump(KERN_ERR, |
||
10278 | "ahash digest shdesc@"__stringify(__LINE__)": ", |
||
10279 | @@ -421,6 +372,7 @@ static int ahash_setkey(struct crypto_ah |
||
10280 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
||
10281 | int blocksize = crypto_tfm_alg_blocksize(&ahash->base); |
||
10282 | int digestsize = crypto_ahash_digestsize(ahash); |
||
10283 | + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); |
||
10284 | int ret; |
||
10285 | u8 *hashed_key = NULL; |
||
10286 | |||
10287 | @@ -441,16 +393,26 @@ static int ahash_setkey(struct crypto_ah |
||
10288 | key = hashed_key; |
||
10289 | } |
||
10290 | |||
10291 | - ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen, |
||
10292 | - CAAM_MAX_HASH_KEY_SIZE); |
||
10293 | - if (ret) |
||
10294 | - goto bad_free_key; |
||
10295 | + /* |
||
10296 | + * If DKP is supported, use it in the shared descriptor to generate |
||
10297 | + * the split key. |
||
10298 | + */ |
||
10299 | + if (ctrlpriv->era >= 6) { |
||
10300 | + ctx->adata.key_inline = true; |
||
10301 | + ctx->adata.keylen = keylen; |
||
10302 | + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & |
||
10303 | + OP_ALG_ALGSEL_MASK); |
||
10304 | |||
10305 | -#ifdef DEBUG |
||
10306 | - print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", |
||
10307 | - DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, |
||
10308 | - ctx->adata.keylen_pad, 1); |
||
10309 | -#endif |
||
10310 | + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) |
||
10311 | + goto bad_free_key; |
||
10312 | + |
||
10313 | + memcpy(ctx->key, key, keylen); |
||
10314 | + } else { |
||
10315 | + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, |
||
10316 | + keylen, CAAM_MAX_HASH_KEY_SIZE); |
||
10317 | + if (ret) |
||
10318 | + goto bad_free_key; |
||
10319 | + } |
||
10320 | |||
10321 | kfree(hashed_key); |
||
10322 | return ahash_set_sh_desc(ahash); |
||
10323 | @@ -773,7 +735,7 @@ static int ahash_update_ctx(struct ahash |
||
10324 | edesc->src_nents = src_nents; |
||
10325 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
||
10326 | |||
10327 | - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, |
||
10328 | + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, |
||
10329 | edesc->sec4_sg, DMA_BIDIRECTIONAL); |
||
10330 | if (ret) |
||
10331 | goto unmap_ctx; |
||
10332 | @@ -871,9 +833,8 @@ static int ahash_final_ctx(struct ahash_ |
||
10333 | desc = edesc->hw_desc; |
||
10334 | |||
10335 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
||
10336 | - edesc->src_nents = 0; |
||
10337 | |||
10338 | - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, |
||
10339 | + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, |
||
10340 | edesc->sec4_sg, DMA_TO_DEVICE); |
||
10341 | if (ret) |
||
10342 | goto unmap_ctx; |
||
10343 | @@ -967,7 +928,7 @@ static int ahash_finup_ctx(struct ahash_ |
||
10344 | |||
10345 | edesc->src_nents = src_nents; |
||
10346 | |||
10347 | - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, |
||
10348 | + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, |
||
10349 | edesc->sec4_sg, DMA_TO_DEVICE); |
||
10350 | if (ret) |
||
10351 | goto unmap_ctx; |
||
10352 | @@ -1126,7 +1087,6 @@ static int ahash_final_no_ctx(struct aha |
||
10353 | dev_err(jrdev, "unable to map dst\n"); |
||
10354 | goto unmap; |
||
10355 | } |
||
10356 | - edesc->src_nents = 0; |
||
10357 | |||
10358 | #ifdef DEBUG |
||
10359 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
||
10360 | @@ -1208,7 +1168,6 @@ static int ahash_update_no_ctx(struct ah |
||
10361 | |||
10362 | edesc->src_nents = src_nents; |
||
10363 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
||
10364 | - edesc->dst_dma = 0; |
||
10365 | |||
10366 | ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); |
||
10367 | if (ret) |
||
10368 | @@ -1420,7 +1379,6 @@ static int ahash_update_first(struct aha |
||
10369 | } |
||
10370 | |||
10371 | edesc->src_nents = src_nents; |
||
10372 | - edesc->dst_dma = 0; |
||
10373 | |||
10374 | ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, |
||
10375 | to_hash); |
||
10376 | @@ -1722,6 +1680,7 @@ static int caam_hash_cra_init(struct cry |
||
10377 | HASH_MSG_LEN + 64, |
||
10378 | HASH_MSG_LEN + SHA512_DIGEST_SIZE }; |
||
10379 | dma_addr_t dma_addr; |
||
10380 | + struct caam_drv_private *priv; |
||
10381 | |||
10382 | /* |
||
10383 | * Get a Job ring from Job Ring driver to ensure in-order |
||
10384 | @@ -1733,10 +1692,13 @@ static int caam_hash_cra_init(struct cry |
||
10385 | return PTR_ERR(ctx->jrdev); |
||
10386 | } |
||
10387 | |||
10388 | + priv = dev_get_drvdata(ctx->jrdev->parent); |
||
10389 | + ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; |
||
10390 | + |
||
10391 | dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, |
||
10392 | offsetof(struct caam_hash_ctx, |
||
10393 | sh_desc_update_dma), |
||
10394 | - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); |
||
10395 | + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); |
||
10396 | if (dma_mapping_error(ctx->jrdev, dma_addr)) { |
||
10397 | dev_err(ctx->jrdev, "unable to map shared descriptors\n"); |
||
10398 | caam_jr_free(ctx->jrdev); |
||
10399 | @@ -1771,7 +1733,7 @@ static void caam_hash_cra_exit(struct cr |
||
10400 | dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, |
||
10401 | offsetof(struct caam_hash_ctx, |
||
10402 | sh_desc_update_dma), |
||
10403 | - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); |
||
10404 | + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); |
||
10405 | caam_jr_free(ctx->jrdev); |
||
10406 | } |
||
10407 | |||
10408 | --- /dev/null |
||
10409 | +++ b/drivers/crypto/caam/caamhash_desc.c |
||
10410 | @@ -0,0 +1,108 @@ |
||
10411 | +/* |
||
10412 | + * Shared descriptors for ahash algorithms |
||
10413 | + * |
||
10414 | + * Copyright 2017 NXP |
||
10415 | + * |
||
10416 | + * Redistribution and use in source and binary forms, with or without |
||
10417 | + * modification, are permitted provided that the following conditions are met: |
||
10418 | + * * Redistributions of source code must retain the above copyright |
||
10419 | + * notice, this list of conditions and the following disclaimer. |
||
10420 | + * * Redistributions in binary form must reproduce the above copyright |
||
10421 | + * notice, this list of conditions and the following disclaimer in the |
||
10422 | + * documentation and/or other materials provided with the distribution. |
||
10423 | + * * Neither the names of the above-listed copyright holders nor the |
||
10424 | + * names of any contributors may be used to endorse or promote products |
||
10425 | + * derived from this software without specific prior written permission. |
||
10426 | + * |
||
10427 | + * |
||
10428 | + * ALTERNATIVELY, this software may be distributed under the terms of the |
||
10429 | + * GNU General Public License ("GPL") as published by the Free Software |
||
10430 | + * Foundation, either version 2 of that License or (at your option) any |
||
10431 | + * later version. |
||
10432 | + * |
||
10433 | + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
||
10434 | + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
||
10435 | + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
||
10436 | + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE |
||
10437 | + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
||
10438 | + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
||
10439 | + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
||
10440 | + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
||
10441 | + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
||
10442 | + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
||
10443 | + * POSSIBILITY OF SUCH DAMAGE. |
||
10444 | + */ |
||
10445 | + |
||
10446 | +#include "compat.h" |
||
10447 | +#include "desc_constr.h" |
||
10448 | +#include "caamhash_desc.h" |
||
10449 | + |
||
10450 | +/** |
||
10451 | + * cnstr_shdsc_ahash - ahash shared descriptor |
||
10452 | + * @desc: pointer to buffer used for descriptor construction |
||
10453 | + * @adata: pointer to authentication transform definitions. |
||
10454 | + * A split key is required for SEC Era < 6; the size of the split key |
||
10455 | + * is specified in this case. |
||
10456 | + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224, |
||
10457 | + * SHA256, SHA384, SHA512}. |
||
10458 | + * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE} |
||
10459 | + * @digestsize: algorithm's digest size |
||
10460 | + * @ctx_len: size of Context Register |
||
10461 | + * @import_ctx: true if previous Context Register needs to be restored |
||
10462 | + * must be true for ahash update and final |
||
10463 | + * must be false for for ahash first and digest |
||
10464 | + * @era: SEC Era |
||
10465 | + */ |
||
10466 | +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state, |
||
10467 | + int digestsize, int ctx_len, bool import_ctx, int era) |
||
10468 | +{ |
||
10469 | + u32 op = adata->algtype; |
||
10470 | + |
||
10471 | + init_sh_desc(desc, HDR_SHARE_SERIAL); |
||
10472 | + |
||
10473 | + /* Append key if it has been set; ahash update excluded */ |
||
10474 | + if (state != OP_ALG_AS_UPDATE && adata->keylen) { |
||
10475 | + u32 *skip_key_load; |
||
10476 | + |
||
10477 | + /* Skip key loading if already shared */ |
||
10478 | + skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | |
||
10479 | + JUMP_COND_SHRD); |
||
10480 | + |
||
10481 | + if (era < 6) |
||
10482 | + append_key_as_imm(desc, adata->key_virt, |
||
10483 | + adata->keylen_pad, |
||
10484 | + adata->keylen, CLASS_2 | |
||
10485 | + KEY_DEST_MDHA_SPLIT | KEY_ENC); |
||
10486 | + else |
||
10487 | + append_proto_dkp(desc, adata); |
||
10488 | + |
||
10489 | + set_jump_tgt_here(desc, skip_key_load); |
||
10490 | + |
||
10491 | + op |= OP_ALG_AAI_HMAC_PRECOMP; |
||
10492 | + } |
||
10493 | + |
||
10494 | + /* If needed, import context from software */ |
||
10495 | + if (import_ctx) |
||
10496 | + append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB | |
||
10497 | + LDST_SRCDST_BYTE_CONTEXT); |
||
10498 | + |
||
10499 | + /* Class 2 operation */ |
||
10500 | + append_operation(desc, op | state | OP_ALG_ENCRYPT); |
||
10501 | + |
||
10502 | + /* |
||
10503 | + * Load from buf and/or src and write to req->result or state->context |
||
10504 | + * Calculate remaining bytes to read |
||
10505 | + */ |
||
10506 | + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); |
||
10507 | + /* Read remaining bytes */ |
||
10508 | + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | |
||
10509 | + FIFOLD_TYPE_MSG | KEY_VLF); |
||
10510 | + /* Store class2 context bytes */ |
||
10511 | + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | |
||
10512 | + LDST_SRCDST_BYTE_CONTEXT); |
||
10513 | +} |
||
10514 | +EXPORT_SYMBOL(cnstr_shdsc_ahash); |
||
10515 | + |
||
10516 | +MODULE_LICENSE("Dual BSD/GPL"); |
||
10517 | +MODULE_DESCRIPTION("FSL CAAM ahash descriptors support"); |
||
10518 | +MODULE_AUTHOR("NXP Semiconductors"); |
||
10519 | --- /dev/null |
||
10520 | +++ b/drivers/crypto/caam/caamhash_desc.h |
||
10521 | @@ -0,0 +1,49 @@ |
||
10522 | +/* |
||
10523 | + * Shared descriptors for ahash algorithms |
||
10524 | + * |
||
10525 | + * Copyright 2017 NXP |
||
10526 | + * |
||
10527 | + * Redistribution and use in source and binary forms, with or without |
||
10528 | + * modification, are permitted provided that the following conditions are met: |
||
10529 | + * * Redistributions of source code must retain the above copyright |
||
10530 | + * notice, this list of conditions and the following disclaimer. |
||
10531 | + * * Redistributions in binary form must reproduce the above copyright |
||
10532 | + * notice, this list of conditions and the following disclaimer in the |
||
10533 | + * documentation and/or other materials provided with the distribution. |
||
10534 | + * * Neither the names of the above-listed copyright holders nor the |
||
10535 | + * names of any contributors may be used to endorse or promote products |
||
10536 | + * derived from this software without specific prior written permission. |
||
10537 | + * |
||
10538 | + * |
||
10539 | + * ALTERNATIVELY, this software may be distributed under the terms of the |
||
10540 | + * GNU General Public License ("GPL") as published by the Free Software |
||
10541 | + * Foundation, either version 2 of that License or (at your option) any |
||
10542 | + * later version. |
||
10543 | + * |
||
10544 | + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
||
10545 | + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
||
10546 | + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
||
10547 | + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE |
||
10548 | + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
||
10549 | + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
||
10550 | + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
||
10551 | + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
||
10552 | + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
||
10553 | + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
||
10554 | + * POSSIBILITY OF SUCH DAMAGE. |
||
10555 | + */ |
||
10556 | + |
||
10557 | +#ifndef _CAAMHASH_DESC_H_ |
||
10558 | +#define _CAAMHASH_DESC_H_ |
||
10559 | + |
||
10560 | +/* length of descriptors text */ |
||
10561 | +#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ) |
||
10562 | +#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) |
||
10563 | +#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) |
||
10564 | +#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) |
||
10565 | +#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) |
||
10566 | + |
||
10567 | +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state, |
||
10568 | + int digestsize, int ctx_len, bool import_ctx, int era); |
||
10569 | + |
||
10570 | +#endif /* _CAAMHASH_DESC_H_ */ |
||
10571 | --- a/drivers/crypto/caam/compat.h |
||
10572 | +++ b/drivers/crypto/caam/compat.h |
||
10573 | @@ -17,6 +17,7 @@ |
||
10574 | #include <linux/of_platform.h> |
||
10575 | #include <linux/dma-mapping.h> |
||
10576 | #include <linux/io.h> |
||
10577 | +#include <linux/iommu.h> |
||
10578 | #include <linux/spinlock.h> |
||
10579 | #include <linux/rtnetlink.h> |
||
10580 | #include <linux/in.h> |
||
10581 | @@ -38,6 +39,7 @@ |
||
10582 | #include <crypto/authenc.h> |
||
10583 | #include <crypto/akcipher.h> |
||
10584 | #include <crypto/scatterwalk.h> |
||
10585 | +#include <crypto/skcipher.h> |
||
10586 | #include <crypto/internal/skcipher.h> |
||
10587 | #include <crypto/internal/hash.h> |
||
10588 | #include <crypto/internal/rsa.h> |
||
10589 | --- a/drivers/crypto/caam/ctrl.c |
||
10590 | +++ b/drivers/crypto/caam/ctrl.c |
||
10591 | @@ -27,6 +27,8 @@ EXPORT_SYMBOL(caam_imx); |
||
10592 | #include "qi.h" |
||
10593 | #endif |
||
10594 | |||
10595 | +static struct platform_device *caam_dma_dev; |
||
10596 | + |
||
10597 | /* |
||
10598 | * i.MX targets tend to have clock control subsystems that can |
||
10599 | * enable/disable clocking to our device. |
||
10600 | @@ -332,6 +334,9 @@ static int caam_remove(struct platform_d |
||
10601 | debugfs_remove_recursive(ctrlpriv->dfs_root); |
||
10602 | #endif |
||
10603 | |||
10604 | + if (caam_dma_dev) |
||
10605 | + platform_device_unregister(caam_dma_dev); |
||
10606 | + |
||
10607 | /* Unmap controller region */ |
||
10608 | iounmap(ctrl); |
||
10609 | |||
10610 | @@ -433,6 +438,10 @@ static int caam_probe(struct platform_de |
||
10611 | {.family = "Freescale i.MX"}, |
||
10612 | {}, |
||
10613 | }; |
||
10614 | + static struct platform_device_info caam_dma_pdev_info = { |
||
10615 | + .name = "caam-dma", |
||
10616 | + .id = PLATFORM_DEVID_NONE |
||
10617 | + }; |
||
10618 | struct device *dev; |
||
10619 | struct device_node *nprop, *np; |
||
10620 | struct caam_ctrl __iomem *ctrl; |
||
10621 | @@ -615,6 +624,8 @@ static int caam_probe(struct platform_de |
||
10622 | goto iounmap_ctrl; |
||
10623 | } |
||
10624 | |||
10625 | + ctrlpriv->era = caam_get_era(); |
||
10626 | + |
||
10627 | ret = of_platform_populate(nprop, caam_match, NULL, dev); |
||
10628 | if (ret) { |
||
10629 | dev_err(dev, "JR platform devices creation error\n"); |
||
10630 | @@ -671,6 +682,16 @@ static int caam_probe(struct platform_de |
||
10631 | goto caam_remove; |
||
10632 | } |
||
10633 | |||
10634 | + caam_dma_pdev_info.parent = dev; |
||
10635 | + caam_dma_pdev_info.dma_mask = dma_get_mask(dev); |
||
10636 | + caam_dma_dev = platform_device_register_full(&caam_dma_pdev_info); |
||
10637 | + if (IS_ERR(caam_dma_dev)) { |
||
10638 | + dev_err(dev, "Unable to create and register caam-dma dev\n"); |
||
10639 | + caam_dma_dev = 0; |
||
10640 | + } else { |
||
10641 | + set_dma_ops(&caam_dma_dev->dev, get_dma_ops(dev)); |
||
10642 | + } |
||
10643 | + |
||
10644 | cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls); |
||
10645 | |||
10646 | /* |
||
10647 | @@ -746,7 +767,7 @@ static int caam_probe(struct platform_de |
||
10648 | |||
10649 | /* Report "alive" for developer to see */ |
||
10650 | dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, |
||
10651 | - caam_get_era()); |
||
10652 | + ctrlpriv->era); |
||
10653 | dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n", |
||
10654 | ctrlpriv->total_jobrs, ctrlpriv->qi_present, |
||
10655 | caam_dpaa2 ? "yes" : "no"); |
||
10656 | --- a/drivers/crypto/caam/desc.h |
||
10657 | +++ b/drivers/crypto/caam/desc.h |
||
10658 | @@ -42,6 +42,7 @@ |
||
10659 | #define CMD_SEQ_LOAD (0x03 << CMD_SHIFT) |
||
10660 | #define CMD_FIFO_LOAD (0x04 << CMD_SHIFT) |
||
10661 | #define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT) |
||
10662 | +#define CMD_MOVEB (0x07 << CMD_SHIFT) |
||
10663 | #define CMD_STORE (0x0a << CMD_SHIFT) |
||
10664 | #define CMD_SEQ_STORE (0x0b << CMD_SHIFT) |
||
10665 | #define CMD_FIFO_STORE (0x0c << CMD_SHIFT) |
||
10666 | @@ -355,6 +356,7 @@ |
||
10667 | #define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT) |
||
10668 | #define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT) |
||
10669 | #define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT) |
||
10670 | +#define FIFOLD_TYPE_IFIFO (0x0f << FIFOLD_TYPE_SHIFT) |
||
10671 | |||
10672 | /* Other types. Need to OR in last/flush bits as desired */ |
||
10673 | #define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT) |
||
10674 | @@ -408,6 +410,7 @@ |
||
10675 | #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT) |
||
10676 | #define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT) |
||
10677 | #define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT) |
||
10678 | +#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT) |
||
10679 | #define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT) |
||
10680 | |||
10681 | /* |
||
10682 | @@ -444,6 +447,18 @@ |
||
10683 | #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT) |
||
10684 | #define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT) |
||
10685 | #define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT) |
||
10686 | +#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT) |
||
10687 | +#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT) |
||
10688 | +#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT) |
||
10689 | +#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT) |
||
10690 | +#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT) |
||
10691 | +#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT) |
||
10692 | +#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT) |
||
10693 | +#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT) |
||
10694 | +#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT) |
||
10695 | +#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT) |
||
10696 | +#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT) |
||
10697 | +#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT) |
||
10698 | |||
10699 | /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */ |
||
10700 | #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT) |
||
10701 | @@ -1093,6 +1108,22 @@ |
||
10702 | /* MacSec protinfos */ |
||
10703 | #define OP_PCL_MACSEC 0x0001 |
||
10704 | |||
10705 | +/* Derived Key Protocol (DKP) Protinfo */ |
||
10706 | +#define OP_PCL_DKP_SRC_SHIFT 14 |
||
10707 | +#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT) |
||
10708 | +#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT) |
||
10709 | +#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT) |
||
10710 | +#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT) |
||
10711 | +#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT) |
||
10712 | +#define OP_PCL_DKP_DST_SHIFT 12 |
||
10713 | +#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT) |
||
10714 | +#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT) |
||
10715 | +#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT) |
||
10716 | +#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT) |
||
10717 | +#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT) |
||
10718 | +#define OP_PCL_DKP_KEY_SHIFT 0 |
||
10719 | +#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT) |
||
10720 | + |
||
10721 | /* PKI unidirectional protocol protinfo bits */ |
||
10722 | #define OP_PCL_PKPROT_TEST 0x0008 |
||
10723 | #define OP_PCL_PKPROT_DECRYPT 0x0004 |
||
10724 | @@ -1440,10 +1471,11 @@ |
||
10725 | #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT) |
||
10726 | #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT) |
||
10727 | #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT) |
||
10728 | -#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT) |
||
10729 | +#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT) |
||
10730 | #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT) |
||
10731 | #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT) |
||
10732 | #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT) |
||
10733 | +#define MATH_SRC1_ZERO (0x0f << MATH_SRC1_SHIFT) |
||
10734 | |||
10735 | /* Destination selectors */ |
||
10736 | #define MATH_DEST_SHIFT 8 |
||
10737 | @@ -1452,6 +1484,7 @@ |
||
10738 | #define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT) |
||
10739 | #define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT) |
||
10740 | #define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT) |
||
10741 | +#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT) |
||
10742 | #define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT) |
||
10743 | #define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT) |
||
10744 | #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT) |
||
10745 | @@ -1624,4 +1657,31 @@ |
||
10746 | /* Frame Descriptor Command for Replacement Job Descriptor */ |
||
10747 | #define FD_CMD_REPLACE_JOB_DESC 0x20000000 |
||
10748 | |||
10749 | +/* CHA Control Register bits */ |
||
10750 | +#define CCTRL_RESET_CHA_ALL 0x1 |
||
10751 | +#define CCTRL_RESET_CHA_AESA 0x2 |
||
10752 | +#define CCTRL_RESET_CHA_DESA 0x4 |
||
10753 | +#define CCTRL_RESET_CHA_AFHA 0x8 |
||
10754 | +#define CCTRL_RESET_CHA_KFHA 0x10 |
||
10755 | +#define CCTRL_RESET_CHA_SF8A 0x20 |
||
10756 | +#define CCTRL_RESET_CHA_PKHA 0x40 |
||
10757 | +#define CCTRL_RESET_CHA_MDHA 0x80 |
||
10758 | +#define CCTRL_RESET_CHA_CRCA 0x100 |
||
10759 | +#define CCTRL_RESET_CHA_RNG 0x200 |
||
10760 | +#define CCTRL_RESET_CHA_SF9A 0x400 |
||
10761 | +#define CCTRL_RESET_CHA_ZUCE 0x800 |
||
10762 | +#define CCTRL_RESET_CHA_ZUCA 0x1000 |
||
10763 | +#define CCTRL_UNLOAD_PK_A0 0x10000 |
||
10764 | +#define CCTRL_UNLOAD_PK_A1 0x20000 |
||
10765 | +#define CCTRL_UNLOAD_PK_A2 0x40000 |
||
10766 | +#define CCTRL_UNLOAD_PK_A3 0x80000 |
||
10767 | +#define CCTRL_UNLOAD_PK_B0 0x100000 |
||
10768 | +#define CCTRL_UNLOAD_PK_B1 0x200000 |
||
10769 | +#define CCTRL_UNLOAD_PK_B2 0x400000 |
||
10770 | +#define CCTRL_UNLOAD_PK_B3 0x800000 |
||
10771 | +#define CCTRL_UNLOAD_PK_N 0x1000000 |
||
10772 | +#define CCTRL_UNLOAD_PK_A 0x4000000 |
||
10773 | +#define CCTRL_UNLOAD_PK_B 0x8000000 |
||
10774 | +#define CCTRL_UNLOAD_SBOX 0x10000000 |
||
10775 | + |
||
10776 | #endif /* DESC_H */ |
||
10777 | --- a/drivers/crypto/caam/desc_constr.h |
||
10778 | +++ b/drivers/crypto/caam/desc_constr.h |
||
10779 | @@ -109,7 +109,7 @@ static inline void init_job_desc_shared( |
||
10780 | append_ptr(desc, ptr); |
||
10781 | } |
||
10782 | |||
10783 | -static inline void append_data(u32 * const desc, void *data, int len) |
||
10784 | +static inline void append_data(u32 * const desc, const void *data, int len) |
||
10785 | { |
||
10786 | u32 *offset = desc_end(desc); |
||
10787 | |||
10788 | @@ -172,7 +172,7 @@ static inline void append_cmd_ptr_extlen |
||
10789 | append_cmd(desc, len); |
||
10790 | } |
||
10791 | |||
10792 | -static inline void append_cmd_data(u32 * const desc, void *data, int len, |
||
10793 | +static inline void append_cmd_data(u32 * const desc, const void *data, int len, |
||
10794 | u32 command) |
||
10795 | { |
||
10796 | append_cmd(desc, command | IMMEDIATE | len); |
||
10797 | @@ -189,6 +189,7 @@ static inline u32 *append_##cmd(u32 * co |
||
10798 | } |
||
10799 | APPEND_CMD_RET(jump, JUMP) |
||
10800 | APPEND_CMD_RET(move, MOVE) |
||
10801 | +APPEND_CMD_RET(moveb, MOVEB) |
||
10802 | |||
10803 | static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd) |
||
10804 | { |
||
10805 | @@ -271,7 +272,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN) |
||
10806 | APPEND_SEQ_PTR_INTLEN(out, OUT) |
||
10807 | |||
10808 | #define APPEND_CMD_PTR_TO_IMM(cmd, op) \ |
||
10809 | -static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \ |
||
10810 | +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \ |
||
10811 | unsigned int len, u32 options) \ |
||
10812 | { \ |
||
10813 | PRINT_POS; \ |
||
10814 | @@ -312,7 +313,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_ |
||
10815 | * from length of immediate data provided, e.g., split keys |
||
10816 | */ |
||
10817 | #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \ |
||
10818 | -static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \ |
||
10819 | +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \ |
||
10820 | unsigned int data_len, \ |
||
10821 | unsigned int len, u32 options) \ |
||
10822 | { \ |
||
10823 | @@ -452,7 +453,7 @@ struct alginfo { |
||
10824 | unsigned int keylen_pad; |
||
10825 | union { |
||
10826 | dma_addr_t key_dma; |
||
10827 | - void *key_virt; |
||
10828 | + const void *key_virt; |
||
10829 | }; |
||
10830 | bool key_inline; |
||
10831 | }; |
||
10832 | @@ -496,4 +497,45 @@ static inline int desc_inline_query(unsi |
||
10833 | return (rem_bytes >= 0) ? 0 : -1; |
||
10834 | } |
||
10835 | |||
10836 | +/** |
||
10837 | + * append_proto_dkp - Derived Key Protocol (DKP): key -> split key |
||
10838 | + * @desc: pointer to buffer used for descriptor construction |
||
10839 | + * @adata: pointer to authentication transform definitions. |
||
10840 | + * keylen should be the length of initial key, while keylen_pad |
||
10841 | + * the length of the derived (split) key. |
||
10842 | + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224, |
||
10843 | + * SHA256, SHA384, SHA512}. |
||
10844 | + */ |
||
10845 | +static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata) |
||
10846 | +{ |
||
10847 | + u32 protid; |
||
10848 | + |
||
10849 | + /* |
||
10850 | + * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*} |
||
10851 | + * to OP_PCLID_DKP_{MD5, SHA*} |
||
10852 | + */ |
||
10853 | + protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) | |
||
10854 | + (0x20 << OP_ALG_ALGSEL_SHIFT); |
||
10855 | + |
||
10856 | + if (adata->key_inline) { |
||
10857 | + int words; |
||
10858 | + |
||
10859 | + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid | |
||
10860 | + OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM | |
||
10861 | + adata->keylen); |
||
10862 | + append_data(desc, adata->key_virt, adata->keylen); |
||
10863 | + |
||
10864 | + /* Reserve space in descriptor buffer for the derived key */ |
||
10865 | + words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) - |
||
10866 | + ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ; |
||
10867 | + if (words) |
||
10868 | + (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words); |
||
10869 | + } else { |
||
10870 | + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid | |
||
10871 | + OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR | |
||
10872 | + adata->keylen); |
||
10873 | + append_ptr(desc, adata->key_dma); |
||
10874 | + } |
||
10875 | +} |
||
10876 | + |
||
10877 | #endif /* DESC_CONSTR_H */ |
||
10878 | --- /dev/null |
||
10879 | +++ b/drivers/crypto/caam/dpseci.c |
||
10880 | @@ -0,0 +1,865 @@ |
||
10881 | +/* |
||
10882 | + * Copyright 2013-2016 Freescale Semiconductor Inc. |
||
10883 | + * Copyright 2017 NXP |
||
10884 | + * |
||
10885 | + * Redistribution and use in source and binary forms, with or without |
||
10886 | + * modification, are permitted provided that the following conditions are met: |
||
10887 | + * * Redistributions of source code must retain the above copyright |
||
10888 | + * notice, this list of conditions and the following disclaimer. |
||
10889 | + * * Redistributions in binary form must reproduce the above copyright |
||
10890 | + * notice, this list of conditions and the following disclaimer in the |
||
10891 | + * documentation and/or other materials provided with the distribution. |
||
10892 | + * * Neither the names of the above-listed copyright holders nor the |
||
10893 | + * names of any contributors may be used to endorse or promote products |
||
10894 | + * derived from this software without specific prior written permission. |
||
10895 | + * |
||
10896 | + * |
||
10897 | + * ALTERNATIVELY, this software may be distributed under the terms of the |
||
10898 | + * GNU General Public License ("GPL") as published by the Free Software |
||
10899 | + * Foundation, either version 2 of that License or (at your option) any |
||
10900 | + * later version. |
||
10901 | + * |
||
10902 | + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
||
10903 | + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
||
10904 | + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
||
10905 | + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE |
||
10906 | + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
||
10907 | + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
||
10908 | + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
||
10909 | + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
||
10910 | + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
||
10911 | + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
||
10912 | + * POSSIBILITY OF SUCH DAMAGE. |
||
10913 | + */ |
||
10914 | + |
||
10915 | +#include <linux/fsl/mc.h> |
||
10916 | +#include "../../../drivers/staging/fsl-mc/include/dpopr.h" |
||
10917 | +#include "dpseci.h" |
||
10918 | +#include "dpseci_cmd.h" |
||
10919 | + |
||
10920 | +/** |
||
10921 | + * dpseci_open() - Open a control session for the specified object |
||
10922 | + * @mc_io: Pointer to MC portal's I/O object |
||
10923 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
10924 | + * @dpseci_id: DPSECI unique ID |
||
10925 | + * @token: Returned token; use in subsequent API calls |
||
10926 | + * |
||
10927 | + * This function can be used to open a control session for an already created |
||
10928 | + * object; an object may have been declared in the DPL or by calling the |
||
10929 | + * dpseci_create() function. |
||
10930 | + * This function returns a unique authentication token, associated with the |
||
10931 | + * specific object ID and the specific MC portal; this token must be used in all |
||
10932 | + * subsequent commands for this specific object. |
||
10933 | + * |
||
10934 | + * Return: '0' on success, error code otherwise |
||
10935 | + */ |
||
10936 | +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id, |
||
10937 | + u16 *token) |
||
10938 | +{ |
||
10939 | + struct fsl_mc_command cmd = { 0 }; |
||
10940 | + struct dpseci_cmd_open *cmd_params; |
||
10941 | + int err; |
||
10942 | + |
||
10943 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN, |
||
10944 | + cmd_flags, |
||
10945 | + 0); |
||
10946 | + cmd_params = (struct dpseci_cmd_open *)cmd.params; |
||
10947 | + cmd_params->dpseci_id = cpu_to_le32(dpseci_id); |
||
10948 | + err = mc_send_command(mc_io, &cmd); |
||
10949 | + if (err) |
||
10950 | + return err; |
||
10951 | + |
||
10952 | + *token = mc_cmd_hdr_read_token(&cmd); |
||
10953 | + |
||
10954 | + return 0; |
||
10955 | +} |
||
10956 | + |
||
10957 | +/** |
||
10958 | + * dpseci_close() - Close the control session of the object |
||
10959 | + * @mc_io: Pointer to MC portal's I/O object |
||
10960 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
10961 | + * @token: Token of DPSECI object |
||
10962 | + * |
||
10963 | + * After this function is called, no further operations are allowed on the |
||
10964 | + * object without opening a new control session. |
||
10965 | + * |
||
10966 | + * Return: '0' on success, error code otherwise |
||
10967 | + */ |
||
10968 | +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) |
||
10969 | +{ |
||
10970 | + struct fsl_mc_command cmd = { 0 }; |
||
10971 | + |
||
10972 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE, |
||
10973 | + cmd_flags, |
||
10974 | + token); |
||
10975 | + return mc_send_command(mc_io, &cmd); |
||
10976 | +} |
||
10977 | + |
||
10978 | +/** |
||
10979 | + * dpseci_create() - Create the DPSECI object |
||
10980 | + * @mc_io: Pointer to MC portal's I/O object |
||
10981 | + * @dprc_token: Parent container token; '0' for default container |
||
10982 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
10983 | + * @cfg: Configuration structure |
||
10984 | + * @obj_id: returned object id |
||
10985 | + * |
||
10986 | + * Create the DPSECI object, allocate required resources and perform required |
||
10987 | + * initialization. |
||
10988 | + * |
||
10989 | + * The object can be created either by declaring it in the DPL file, or by |
||
10990 | + * calling this function. |
||
10991 | + * |
||
10992 | + * The function accepts an authentication token of a parent container that this |
||
10993 | + * object should be assigned to. The token can be '0' so the object will be |
||
10994 | + * assigned to the default container. |
||
10995 | + * The newly created object can be opened with the returned object id and using |
||
10996 | + * the container's associated tokens and MC portals. |
||
10997 | + * |
||
10998 | + * Return: '0' on success, error code otherwise |
||
10999 | + */ |
||
11000 | +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags, |
||
11001 | + const struct dpseci_cfg *cfg, u32 *obj_id) |
||
11002 | +{ |
||
11003 | + struct fsl_mc_command cmd = { 0 }; |
||
11004 | + struct dpseci_cmd_create *cmd_params; |
||
11005 | + int i, err; |
||
11006 | + |
||
11007 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE, |
||
11008 | + cmd_flags, |
||
11009 | + dprc_token); |
||
11010 | + cmd_params = (struct dpseci_cmd_create *)cmd.params; |
||
11011 | + for (i = 0; i < 8; i++) |
||
11012 | + cmd_params->priorities[i] = cfg->priorities[i]; |
||
11013 | + for (i = 0; i < 8; i++) |
||
11014 | + cmd_params->priorities2[i] = cfg->priorities[8 + i]; |
||
11015 | + cmd_params->num_tx_queues = cfg->num_tx_queues; |
||
11016 | + cmd_params->num_rx_queues = cfg->num_rx_queues; |
||
11017 | + cmd_params->options = cpu_to_le32(cfg->options); |
||
11018 | + err = mc_send_command(mc_io, &cmd); |
||
11019 | + if (err) |
||
11020 | + return err; |
||
11021 | + |
||
11022 | + *obj_id = mc_cmd_read_object_id(&cmd); |
||
11023 | + |
||
11024 | + return 0; |
||
11025 | +} |
||
11026 | + |
||
11027 | +/** |
||
11028 | + * dpseci_destroy() - Destroy the DPSECI object and release all its resources |
||
11029 | + * @mc_io: Pointer to MC portal's I/O object |
||
11030 | + * @dprc_token: Parent container token; '0' for default container |
||
11031 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11032 | + * @object_id: The object id; it must be a valid id within the container that |
||
11033 | + * created this object |
||
11034 | + * |
||
11035 | + * The function accepts the authentication token of the parent container that |
||
11036 | + * created the object (not the one that currently owns the object). The object |
||
11037 | + * is searched within parent using the provided 'object_id'. |
||
11038 | + * All tokens to the object must be closed before calling destroy. |
||
11039 | + * |
||
11040 | + * Return: '0' on success, error code otherwise |
||
11041 | + */ |
||
11042 | +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags, |
||
11043 | + u32 object_id) |
||
11044 | +{ |
||
11045 | + struct fsl_mc_command cmd = { 0 }; |
||
11046 | + struct dpseci_cmd_destroy *cmd_params; |
||
11047 | + |
||
11048 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY, |
||
11049 | + cmd_flags, |
||
11050 | + dprc_token); |
||
11051 | + cmd_params = (struct dpseci_cmd_destroy *)cmd.params; |
||
11052 | + cmd_params->object_id = cpu_to_le32(object_id); |
||
11053 | + |
||
11054 | + return mc_send_command(mc_io, &cmd); |
||
11055 | +} |
||
11056 | + |
||
11057 | +/** |
||
11058 | + * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames |
||
11059 | + * @mc_io: Pointer to MC portal's I/O object |
||
11060 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11061 | + * @token: Token of DPSECI object |
||
11062 | + * |
||
11063 | + * Return: '0' on success, error code otherwise |
||
11064 | + */ |
||
11065 | +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) |
||
11066 | +{ |
||
11067 | + struct fsl_mc_command cmd = { 0 }; |
||
11068 | + |
||
11069 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE, |
||
11070 | + cmd_flags, |
||
11071 | + token); |
||
11072 | + return mc_send_command(mc_io, &cmd); |
||
11073 | +} |
||
11074 | + |
||
11075 | +/** |
||
11076 | + * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames |
||
11077 | + * @mc_io: Pointer to MC portal's I/O object |
||
11078 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11079 | + * @token: Token of DPSECI object |
||
11080 | + * |
||
11081 | + * Return: '0' on success, error code otherwise |
||
11082 | + */ |
||
11083 | +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) |
||
11084 | +{ |
||
11085 | + struct fsl_mc_command cmd = { 0 }; |
||
11086 | + |
||
11087 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE, |
||
11088 | + cmd_flags, |
||
11089 | + token); |
||
11090 | + |
||
11091 | + return mc_send_command(mc_io, &cmd); |
||
11092 | +} |
||
11093 | + |
||
11094 | +/** |
||
11095 | + * dpseci_is_enabled() - Check if the DPSECI is enabled. |
||
11096 | + * @mc_io: Pointer to MC portal's I/O object |
||
11097 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11098 | + * @token: Token of DPSECI object |
||
11099 | + * @en: Returns '1' if object is enabled; '0' otherwise |
||
11100 | + * |
||
11101 | + * Return: '0' on success, error code otherwise |
||
11102 | + */ |
||
11103 | +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11104 | + int *en) |
||
11105 | +{ |
||
11106 | + struct fsl_mc_command cmd = { 0 }; |
||
11107 | + struct dpseci_rsp_is_enabled *rsp_params; |
||
11108 | + int err; |
||
11109 | + |
||
11110 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED, |
||
11111 | + cmd_flags, |
||
11112 | + token); |
||
11113 | + err = mc_send_command(mc_io, &cmd); |
||
11114 | + if (err) |
||
11115 | + return err; |
||
11116 | + |
||
11117 | + rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params; |
||
11118 | + *en = dpseci_get_field(rsp_params->is_enabled, ENABLE); |
||
11119 | + |
||
11120 | + return 0; |
||
11121 | +} |
||
11122 | + |
||
11123 | +/** |
||
11124 | + * dpseci_reset() - Reset the DPSECI, returns the object to initial state. |
||
11125 | + * @mc_io: Pointer to MC portal's I/O object |
||
11126 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11127 | + * @token: Token of DPSECI object |
||
11128 | + * |
||
11129 | + * Return: '0' on success, error code otherwise |
||
11130 | + */ |
||
11131 | +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) |
||
11132 | +{ |
||
11133 | + struct fsl_mc_command cmd = { 0 }; |
||
11134 | + |
||
11135 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET, |
||
11136 | + cmd_flags, |
||
11137 | + token); |
||
11138 | + |
||
11139 | + return mc_send_command(mc_io, &cmd); |
||
11140 | +} |
||
11141 | + |
||
11142 | +/** |
||
11143 | + * dpseci_get_irq_enable() - Get overall interrupt state |
||
11144 | + * @mc_io: Pointer to MC portal's I/O object |
||
11145 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11146 | + * @token: Token of DPSECI object |
||
11147 | + * @irq_index: The interrupt index to configure |
||
11148 | + * @en: Returned Interrupt state - enable = 1, disable = 0 |
||
11149 | + * |
||
11150 | + * Return: '0' on success, error code otherwise |
||
11151 | + */ |
||
11152 | +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11153 | + u8 irq_index, u8 *en) |
||
11154 | +{ |
||
11155 | + struct fsl_mc_command cmd = { 0 }; |
||
11156 | + struct dpseci_cmd_irq_enable *cmd_params; |
||
11157 | + struct dpseci_rsp_get_irq_enable *rsp_params; |
||
11158 | + int err; |
||
11159 | + |
||
11160 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE, |
||
11161 | + cmd_flags, |
||
11162 | + token); |
||
11163 | + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params; |
||
11164 | + cmd_params->irq_index = irq_index; |
||
11165 | + err = mc_send_command(mc_io, &cmd); |
||
11166 | + if (err) |
||
11167 | + return err; |
||
11168 | + |
||
11169 | + rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params; |
||
11170 | + *en = rsp_params->enable_state; |
||
11171 | + |
||
11172 | + return 0; |
||
11173 | +} |
||
11174 | + |
||
11175 | +/** |
||
11176 | + * dpseci_set_irq_enable() - Set overall interrupt state. |
||
11177 | + * @mc_io: Pointer to MC portal's I/O object |
||
11178 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11179 | + * @token: Token of DPSECI object |
||
11180 | + * @irq_index: The interrupt index to configure |
||
11181 | + * @en: Interrupt state - enable = 1, disable = 0 |
||
11182 | + * |
||
11183 | + * Allows GPP software to control when interrupts are generated. |
||
11184 | + * Each interrupt can have up to 32 causes. The enable/disable control's the |
||
11185 | + * overall interrupt state. If the interrupt is disabled no causes will cause |
||
11186 | + * an interrupt. |
||
11187 | + * |
||
11188 | + * Return: '0' on success, error code otherwise |
||
11189 | + */ |
||
11190 | +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11191 | + u8 irq_index, u8 en) |
||
11192 | +{ |
||
11193 | + struct fsl_mc_command cmd = { 0 }; |
||
11194 | + struct dpseci_cmd_irq_enable *cmd_params; |
||
11195 | + |
||
11196 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE, |
||
11197 | + cmd_flags, |
||
11198 | + token); |
||
11199 | + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params; |
||
11200 | + cmd_params->irq_index = irq_index; |
||
11201 | + cmd_params->enable_state = en; |
||
11202 | + |
||
11203 | + return mc_send_command(mc_io, &cmd); |
||
11204 | +} |
||
11205 | + |
||
11206 | +/** |
||
11207 | + * dpseci_get_irq_mask() - Get interrupt mask. |
||
11208 | + * @mc_io: Pointer to MC portal's I/O object |
||
11209 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11210 | + * @token: Token of DPSECI object |
||
11211 | + * @irq_index: The interrupt index to configure |
||
11212 | + * @mask: Returned event mask to trigger interrupt |
||
11213 | + * |
||
11214 | + * Every interrupt can have up to 32 causes and the interrupt model supports |
||
11215 | + * masking/unmasking each cause independently. |
||
11216 | + * |
||
11217 | + * Return: '0' on success, error code otherwise |
||
11218 | + */ |
||
11219 | +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11220 | + u8 irq_index, u32 *mask) |
||
11221 | +{ |
||
11222 | + struct fsl_mc_command cmd = { 0 }; |
||
11223 | + struct dpseci_cmd_irq_mask *cmd_params; |
||
11224 | + int err; |
||
11225 | + |
||
11226 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK, |
||
11227 | + cmd_flags, |
||
11228 | + token); |
||
11229 | + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params; |
||
11230 | + cmd_params->irq_index = irq_index; |
||
11231 | + err = mc_send_command(mc_io, &cmd); |
||
11232 | + if (err) |
||
11233 | + return err; |
||
11234 | + |
||
11235 | + *mask = le32_to_cpu(cmd_params->mask); |
||
11236 | + |
||
11237 | + return 0; |
||
11238 | +} |
||
11239 | + |
||
11240 | +/** |
||
11241 | + * dpseci_set_irq_mask() - Set interrupt mask. |
||
11242 | + * @mc_io: Pointer to MC portal's I/O object |
||
11243 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11244 | + * @token: Token of DPSECI object |
||
11245 | + * @irq_index: The interrupt index to configure |
||
11246 | + * @mask: event mask to trigger interrupt; |
||
11247 | + * each bit: |
||
11248 | + * 0 = ignore event |
||
11249 | + * 1 = consider event for asserting IRQ |
||
11250 | + * |
||
11251 | + * Every interrupt can have up to 32 causes and the interrupt model supports |
||
11252 | + * masking/unmasking each cause independently |
||
11253 | + * |
||
11254 | + * Return: '0' on success, error code otherwise |
||
11255 | + */ |
||
11256 | +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11257 | + u8 irq_index, u32 mask) |
||
11258 | +{ |
||
11259 | + struct fsl_mc_command cmd = { 0 }; |
||
11260 | + struct dpseci_cmd_irq_mask *cmd_params; |
||
11261 | + |
||
11262 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK, |
||
11263 | + cmd_flags, |
||
11264 | + token); |
||
11265 | + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params; |
||
11266 | + cmd_params->mask = cpu_to_le32(mask); |
||
11267 | + cmd_params->irq_index = irq_index; |
||
11268 | + |
||
11269 | + return mc_send_command(mc_io, &cmd); |
||
11270 | +} |
||
11271 | + |
||
11272 | +/** |
||
11273 | + * dpseci_get_irq_status() - Get the current status of any pending interrupts |
||
11274 | + * @mc_io: Pointer to MC portal's I/O object |
||
11275 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11276 | + * @token: Token of DPSECI object |
||
11277 | + * @irq_index: The interrupt index to configure |
||
11278 | + * @status: Returned interrupts status - one bit per cause: |
||
11279 | + * 0 = no interrupt pending |
||
11280 | + * 1 = interrupt pending |
||
11281 | + * |
||
11282 | + * Return: '0' on success, error code otherwise |
||
11283 | + */ |
||
11284 | +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11285 | + u8 irq_index, u32 *status) |
||
11286 | +{ |
||
11287 | + struct fsl_mc_command cmd = { 0 }; |
||
11288 | + struct dpseci_cmd_irq_status *cmd_params; |
||
11289 | + int err; |
||
11290 | + |
||
11291 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS, |
||
11292 | + cmd_flags, |
||
11293 | + token); |
||
11294 | + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params; |
||
11295 | + cmd_params->status = cpu_to_le32(*status); |
||
11296 | + cmd_params->irq_index = irq_index; |
||
11297 | + err = mc_send_command(mc_io, &cmd); |
||
11298 | + if (err) |
||
11299 | + return err; |
||
11300 | + |
||
11301 | + *status = le32_to_cpu(cmd_params->status); |
||
11302 | + |
||
11303 | + return 0; |
||
11304 | +} |
||
11305 | + |
||
11306 | +/** |
||
11307 | + * dpseci_clear_irq_status() - Clear a pending interrupt's status |
||
11308 | + * @mc_io: Pointer to MC portal's I/O object |
||
11309 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11310 | + * @token: Token of DPSECI object |
||
11311 | + * @irq_index: The interrupt index to configure |
||
11312 | + * @status: bits to clear (W1C) - one bit per cause: |
||
11313 | + * 0 = don't change |
||
11314 | + * 1 = clear status bit |
||
11315 | + * |
||
11316 | + * Return: '0' on success, error code otherwise |
||
11317 | + */ |
||
11318 | +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11319 | + u8 irq_index, u32 status) |
||
11320 | +{ |
||
11321 | + struct fsl_mc_command cmd = { 0 }; |
||
11322 | + struct dpseci_cmd_irq_status *cmd_params; |
||
11323 | + |
||
11324 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS, |
||
11325 | + cmd_flags, |
||
11326 | + token); |
||
11327 | + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params; |
||
11328 | + cmd_params->status = cpu_to_le32(status); |
||
11329 | + cmd_params->irq_index = irq_index; |
||
11330 | + |
||
11331 | + return mc_send_command(mc_io, &cmd); |
||
11332 | +} |
||
11333 | + |
||
11334 | +/** |
||
11335 | + * dpseci_get_attributes() - Retrieve DPSECI attributes |
||
11336 | + * @mc_io: Pointer to MC portal's I/O object |
||
11337 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11338 | + * @token: Token of DPSECI object |
||
11339 | + * @attr: Returned object's attributes |
||
11340 | + * |
||
11341 | + * Return: '0' on success, error code otherwise |
||
11342 | + */ |
||
11343 | +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11344 | + struct dpseci_attr *attr) |
||
11345 | +{ |
||
11346 | + struct fsl_mc_command cmd = { 0 }; |
||
11347 | + struct dpseci_rsp_get_attributes *rsp_params; |
||
11348 | + int err; |
||
11349 | + |
||
11350 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR, |
||
11351 | + cmd_flags, |
||
11352 | + token); |
||
11353 | + err = mc_send_command(mc_io, &cmd); |
||
11354 | + if (err) |
||
11355 | + return err; |
||
11356 | + |
||
11357 | + rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params; |
||
11358 | + attr->id = le32_to_cpu(rsp_params->id); |
||
11359 | + attr->num_tx_queues = rsp_params->num_tx_queues; |
||
11360 | + attr->num_rx_queues = rsp_params->num_rx_queues; |
||
11361 | + attr->options = le32_to_cpu(rsp_params->options); |
||
11362 | + |
||
11363 | + return 0; |
||
11364 | +} |
||
11365 | + |
||
11366 | +/** |
||
11367 | + * dpseci_set_rx_queue() - Set Rx queue configuration |
||
11368 | + * @mc_io: Pointer to MC portal's I/O object |
||
11369 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11370 | + * @token: Token of DPSECI object |
||
11371 | + * @queue: Select the queue relative to number of priorities configured at |
||
11372 | + * DPSECI creation; use DPSECI_ALL_QUEUES to configure all |
||
11373 | + * Rx queues identically. |
||
11374 | + * @cfg: Rx queue configuration |
||
11375 | + * |
||
11376 | + * Return: '0' on success, error code otherwise |
||
11377 | + */ |
||
11378 | +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11379 | + u8 queue, const struct dpseci_rx_queue_cfg *cfg) |
||
11380 | +{ |
||
11381 | + struct fsl_mc_command cmd = { 0 }; |
||
11382 | + struct dpseci_cmd_queue *cmd_params; |
||
11383 | + |
||
11384 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE, |
||
11385 | + cmd_flags, |
||
11386 | + token); |
||
11387 | + cmd_params = (struct dpseci_cmd_queue *)cmd.params; |
||
11388 | + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); |
||
11389 | + cmd_params->priority = cfg->dest_cfg.priority; |
||
11390 | + cmd_params->queue = queue; |
||
11391 | + dpseci_set_field(cmd_params->dest_type, DEST_TYPE, |
||
11392 | + cfg->dest_cfg.dest_type); |
||
11393 | + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx); |
||
11394 | + cmd_params->options = cpu_to_le32(cfg->options); |
||
11395 | + dpseci_set_field(cmd_params->order_preservation_en, ORDER_PRESERVATION, |
||
11396 | + cfg->order_preservation_en); |
||
11397 | + |
||
11398 | + return mc_send_command(mc_io, &cmd); |
||
11399 | +} |
||
11400 | + |
||
11401 | +/** |
||
11402 | + * dpseci_get_rx_queue() - Retrieve Rx queue attributes |
||
11403 | + * @mc_io: Pointer to MC portal's I/O object |
||
11404 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11405 | + * @token: Token of DPSECI object |
||
11406 | + * @queue: Select the queue relative to number of priorities configured at |
||
11407 | + * DPSECI creation |
||
11408 | + * @attr: Returned Rx queue attributes |
||
11409 | + * |
||
11410 | + * Return: '0' on success, error code otherwise |
||
11411 | + */ |
||
11412 | +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11413 | + u8 queue, struct dpseci_rx_queue_attr *attr) |
||
11414 | +{ |
||
11415 | + struct fsl_mc_command cmd = { 0 }; |
||
11416 | + struct dpseci_cmd_queue *cmd_params; |
||
11417 | + int err; |
||
11418 | + |
||
11419 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE, |
||
11420 | + cmd_flags, |
||
11421 | + token); |
||
11422 | + cmd_params = (struct dpseci_cmd_queue *)cmd.params; |
||
11423 | + cmd_params->queue = queue; |
||
11424 | + err = mc_send_command(mc_io, &cmd); |
||
11425 | + if (err) |
||
11426 | + return err; |
||
11427 | + |
||
11428 | + attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id); |
||
11429 | + attr->dest_cfg.priority = cmd_params->priority; |
||
11430 | + attr->dest_cfg.dest_type = dpseci_get_field(cmd_params->dest_type, |
||
11431 | + DEST_TYPE); |
||
11432 | + attr->user_ctx = le64_to_cpu(cmd_params->user_ctx); |
||
11433 | + attr->fqid = le32_to_cpu(cmd_params->fqid); |
||
11434 | + attr->order_preservation_en = |
||
11435 | + dpseci_get_field(cmd_params->order_preservation_en, |
||
11436 | + ORDER_PRESERVATION); |
||
11437 | + |
||
11438 | + return 0; |
||
11439 | +} |
||
11440 | + |
||
11441 | +/** |
||
11442 | + * dpseci_get_tx_queue() - Retrieve Tx queue attributes |
||
11443 | + * @mc_io: Pointer to MC portal's I/O object |
||
11444 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11445 | + * @token: Token of DPSECI object |
||
11446 | + * @queue: Select the queue relative to number of priorities configured at |
||
11447 | + * DPSECI creation |
||
11448 | + * @attr: Returned Tx queue attributes |
||
11449 | + * |
||
11450 | + * Return: '0' on success, error code otherwise |
||
11451 | + */ |
||
11452 | +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11453 | + u8 queue, struct dpseci_tx_queue_attr *attr) |
||
11454 | +{ |
||
11455 | + struct fsl_mc_command cmd = { 0 }; |
||
11456 | + struct dpseci_cmd_queue *cmd_params; |
||
11457 | + struct dpseci_rsp_get_tx_queue *rsp_params; |
||
11458 | + int err; |
||
11459 | + |
||
11460 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE, |
||
11461 | + cmd_flags, |
||
11462 | + token); |
||
11463 | + cmd_params = (struct dpseci_cmd_queue *)cmd.params; |
||
11464 | + cmd_params->queue = queue; |
||
11465 | + err = mc_send_command(mc_io, &cmd); |
||
11466 | + if (err) |
||
11467 | + return err; |
||
11468 | + |
||
11469 | + rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params; |
||
11470 | + attr->fqid = le32_to_cpu(rsp_params->fqid); |
||
11471 | + attr->priority = rsp_params->priority; |
||
11472 | + |
||
11473 | + return 0; |
||
11474 | +} |
||
11475 | + |
||
11476 | +/** |
||
11477 | + * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes |
||
11478 | + * @mc_io: Pointer to MC portal's I/O object |
||
11479 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11480 | + * @token: Token of DPSECI object |
||
11481 | + * @attr: Returned SEC attributes |
||
11482 | + * |
||
11483 | + * Return: '0' on success, error code otherwise |
||
11484 | + */ |
||
11485 | +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11486 | + struct dpseci_sec_attr *attr) |
||
11487 | +{ |
||
11488 | + struct fsl_mc_command cmd = { 0 }; |
||
11489 | + struct dpseci_rsp_get_sec_attr *rsp_params; |
||
11490 | + int err; |
||
11491 | + |
||
11492 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR, |
||
11493 | + cmd_flags, |
||
11494 | + token); |
||
11495 | + err = mc_send_command(mc_io, &cmd); |
||
11496 | + if (err) |
||
11497 | + return err; |
||
11498 | + |
||
11499 | + rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params; |
||
11500 | + attr->ip_id = le16_to_cpu(rsp_params->ip_id); |
||
11501 | + attr->major_rev = rsp_params->major_rev; |
||
11502 | + attr->minor_rev = rsp_params->minor_rev; |
||
11503 | + attr->era = rsp_params->era; |
||
11504 | + attr->deco_num = rsp_params->deco_num; |
||
11505 | + attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num; |
||
11506 | + attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num; |
||
11507 | + attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num; |
||
11508 | + attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num; |
||
11509 | + attr->crc_acc_num = rsp_params->crc_acc_num; |
||
11510 | + attr->pk_acc_num = rsp_params->pk_acc_num; |
||
11511 | + attr->kasumi_acc_num = rsp_params->kasumi_acc_num; |
||
11512 | + attr->rng_acc_num = rsp_params->rng_acc_num; |
||
11513 | + attr->md_acc_num = rsp_params->md_acc_num; |
||
11514 | + attr->arc4_acc_num = rsp_params->arc4_acc_num; |
||
11515 | + attr->des_acc_num = rsp_params->des_acc_num; |
||
11516 | + attr->aes_acc_num = rsp_params->aes_acc_num; |
||
11517 | + attr->ccha_acc_num = rsp_params->ccha_acc_num; |
||
11518 | + attr->ptha_acc_num = rsp_params->ptha_acc_num; |
||
11519 | + |
||
11520 | + return 0; |
||
11521 | +} |
||
11522 | + |
||
11523 | +/** |
||
11524 | + * dpseci_get_sec_counters() - Retrieve SEC accelerator counters |
||
11525 | + * @mc_io: Pointer to MC portal's I/O object |
||
11526 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11527 | + * @token: Token of DPSECI object |
||
11528 | + * @counters: Returned SEC counters |
||
11529 | + * |
||
11530 | + * Return: '0' on success, error code otherwise |
||
11531 | + */ |
||
11532 | +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11533 | + struct dpseci_sec_counters *counters) |
||
11534 | +{ |
||
11535 | + struct fsl_mc_command cmd = { 0 }; |
||
11536 | + struct dpseci_rsp_get_sec_counters *rsp_params; |
||
11537 | + int err; |
||
11538 | + |
||
11539 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS, |
||
11540 | + cmd_flags, |
||
11541 | + token); |
||
11542 | + err = mc_send_command(mc_io, &cmd); |
||
11543 | + if (err) |
||
11544 | + return err; |
||
11545 | + |
||
11546 | + rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params; |
||
11547 | + counters->dequeued_requests = |
||
11548 | + le64_to_cpu(rsp_params->dequeued_requests); |
||
11549 | + counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests); |
||
11550 | + counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests); |
||
11551 | + counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes); |
||
11552 | + counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes); |
||
11553 | + counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes); |
||
11554 | + counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes); |
||
11555 | + |
||
11556 | + return 0; |
||
11557 | +} |
||
11558 | + |
||
11559 | +/** |
||
11560 | + * dpseci_get_api_version() - Get Data Path SEC Interface API version |
||
11561 | + * @mc_io: Pointer to MC portal's I/O object |
||
11562 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11563 | + * @major_ver: Major version of data path sec API |
||
11564 | + * @minor_ver: Minor version of data path sec API |
||
11565 | + * |
||
11566 | + * Return: '0' on success, error code otherwise |
||
11567 | + */ |
||
11568 | +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags, |
||
11569 | + u16 *major_ver, u16 *minor_ver) |
||
11570 | +{ |
||
11571 | + struct fsl_mc_command cmd = { 0 }; |
||
11572 | + struct dpseci_rsp_get_api_version *rsp_params; |
||
11573 | + int err; |
||
11574 | + |
||
11575 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION, |
||
11576 | + cmd_flags, 0); |
||
11577 | + err = mc_send_command(mc_io, &cmd); |
||
11578 | + if (err) |
||
11579 | + return err; |
||
11580 | + |
||
11581 | + rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params; |
||
11582 | + *major_ver = le16_to_cpu(rsp_params->major); |
||
11583 | + *minor_ver = le16_to_cpu(rsp_params->minor); |
||
11584 | + |
||
11585 | + return 0; |
||
11586 | +} |
||
11587 | + |
||
11588 | +/** |
||
11589 | + * dpseci_set_opr() - Set Order Restoration configuration |
||
11590 | + * @mc_io: Pointer to MC portal's I/O object |
||
11591 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11592 | + * @token: Token of DPSECI object |
||
11593 | + * @index: The queue index |
||
11594 | + * @options: Configuration mode options; can be OPR_OPT_CREATE or |
||
11595 | + * OPR_OPT_RETIRE |
||
11596 | + * @cfg: Configuration options for the OPR |
||
11597 | + * |
||
11598 | + * Return: '0' on success, error code otherwise |
||
11599 | + */ |
||
11600 | +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index, |
||
11601 | + u8 options, struct opr_cfg *cfg) |
||
11602 | +{ |
||
11603 | + struct fsl_mc_command cmd = { 0 }; |
||
11604 | + struct dpseci_cmd_opr *cmd_params; |
||
11605 | + |
||
11606 | + cmd.header = mc_encode_cmd_header( |
||
11607 | + DPSECI_CMDID_SET_OPR, |
||
11608 | + cmd_flags, |
||
11609 | + token); |
||
11610 | + cmd_params = (struct dpseci_cmd_opr *)cmd.params; |
||
11611 | + cmd_params->index = index; |
||
11612 | + cmd_params->options = options; |
||
11613 | + cmd_params->oloe = cfg->oloe; |
||
11614 | + cmd_params->oeane = cfg->oeane; |
||
11615 | + cmd_params->olws = cfg->olws; |
||
11616 | + cmd_params->oa = cfg->oa; |
||
11617 | + cmd_params->oprrws = cfg->oprrws; |
||
11618 | + |
||
11619 | + return mc_send_command(mc_io, &cmd); |
||
11620 | +} |
||
11621 | + |
||
11622 | +/** |
||
11623 | + * dpseci_get_opr() - Retrieve Order Restoration config and query |
||
11624 | + * @mc_io: Pointer to MC portal's I/O object |
||
11625 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11626 | + * @token: Token of DPSECI object |
||
11627 | + * @index: The queue index |
||
11628 | + * @cfg: Returned OPR configuration |
||
11629 | + * @qry: Returned OPR query |
||
11630 | + * |
||
11631 | + * Return: '0' on success, error code otherwise |
||
11632 | + */ |
||
11633 | +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index, |
||
11634 | + struct opr_cfg *cfg, struct opr_qry *qry) |
||
11635 | +{ |
||
11636 | + struct fsl_mc_command cmd = { 0 }; |
||
11637 | + struct dpseci_cmd_opr *cmd_params; |
||
11638 | + struct dpseci_rsp_get_opr *rsp_params; |
||
11639 | + int err; |
||
11640 | + |
||
11641 | + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR, |
||
11642 | + cmd_flags, |
||
11643 | + token); |
||
11644 | + cmd_params = (struct dpseci_cmd_opr *)cmd.params; |
||
11645 | + cmd_params->index = index; |
||
11646 | + err = mc_send_command(mc_io, &cmd); |
||
11647 | + if (err) |
||
11648 | + return err; |
||
11649 | + |
||
11650 | + rsp_params = (struct dpseci_rsp_get_opr *)cmd.params; |
||
11651 | + qry->rip = dpseci_get_field(rsp_params->flags, OPR_RIP); |
||
11652 | + qry->enable = dpseci_get_field(rsp_params->flags, OPR_ENABLE); |
||
11653 | + cfg->oloe = rsp_params->oloe; |
||
11654 | + cfg->oeane = rsp_params->oeane; |
||
11655 | + cfg->olws = rsp_params->olws; |
||
11656 | + cfg->oa = rsp_params->oa; |
||
11657 | + cfg->oprrws = rsp_params->oprrws; |
||
11658 | + qry->nesn = le16_to_cpu(rsp_params->nesn); |
||
11659 | + qry->ndsn = le16_to_cpu(rsp_params->ndsn); |
||
11660 | + qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq); |
||
11661 | + qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS); |
||
11662 | + qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq); |
||
11663 | + qry->hseq_nlis = dpseci_get_field(rsp_params->hseq_nlis, OPR_HSEQ_NLIS); |
||
11664 | + qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr); |
||
11665 | + qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr); |
||
11666 | + qry->opr_vid = le16_to_cpu(rsp_params->opr_vid); |
||
11667 | + qry->opr_id = le16_to_cpu(rsp_params->opr_id); |
||
11668 | + |
||
11669 | + return 0; |
||
11670 | +} |
||
11671 | + |
||
11672 | +/** |
||
11673 | + * dpseci_set_congestion_notification() - Set congestion group |
||
11674 | + * notification configuration |
||
11675 | + * @mc_io: Pointer to MC portal's I/O object |
||
11676 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11677 | + * @token: Token of DPSECI object |
||
11678 | + * @cfg: congestion notification configuration |
||
11679 | + * |
||
11680 | + * Return: '0' on success, error code otherwise |
||
11681 | + */ |
||
11682 | +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags, |
||
11683 | + u16 token, const struct dpseci_congestion_notification_cfg *cfg) |
||
11684 | +{ |
||
11685 | + struct fsl_mc_command cmd = { 0 }; |
||
11686 | + struct dpseci_cmd_congestion_notification *cmd_params; |
||
11687 | + |
||
11688 | + cmd.header = mc_encode_cmd_header( |
||
11689 | + DPSECI_CMDID_SET_CONGESTION_NOTIFICATION, |
||
11690 | + cmd_flags, |
||
11691 | + token); |
||
11692 | + cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params; |
||
11693 | + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); |
||
11694 | + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode); |
||
11695 | + cmd_params->priority = cfg->dest_cfg.priority; |
||
11696 | + dpseci_set_field(cmd_params->options, CGN_DEST_TYPE, |
||
11697 | + cfg->dest_cfg.dest_type); |
||
11698 | + dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units); |
||
11699 | + cmd_params->message_iova = cpu_to_le64(cfg->message_iova); |
||
11700 | + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx); |
||
11701 | + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry); |
||
11702 | + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit); |
||
11703 | + |
||
11704 | + return mc_send_command(mc_io, &cmd); |
||
11705 | +} |
||
11706 | + |
||
11707 | +/** |
||
11708 | + * dpseci_get_congestion_notification() - Get congestion group notification |
||
11709 | + * configuration |
||
11710 | + * @mc_io: Pointer to MC portal's I/O object |
||
11711 | + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' |
||
11712 | + * @token: Token of DPSECI object |
||
11713 | + * @cfg: congestion notification configuration |
||
11714 | + * |
||
11715 | + * Return: '0' on success, error code otherwise |
||
11716 | + */ |
||
11717 | +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags, |
||
11718 | + u16 token, struct dpseci_congestion_notification_cfg *cfg) |
||
11719 | +{ |
||
11720 | + struct fsl_mc_command cmd = { 0 }; |
||
11721 | + struct dpseci_cmd_congestion_notification *rsp_params; |
||
11722 | + int err; |
||
11723 | + |
||
11724 | + cmd.header = mc_encode_cmd_header( |
||
11725 | + DPSECI_CMDID_GET_CONGESTION_NOTIFICATION, |
||
11726 | + cmd_flags, |
||
11727 | + token); |
||
11728 | + err = mc_send_command(mc_io, &cmd); |
||
11729 | + if (err) |
||
11730 | + return err; |
||
11731 | + |
||
11732 | + rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params; |
||
11733 | + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id); |
||
11734 | + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode); |
||
11735 | + cfg->dest_cfg.priority = rsp_params->priority; |
||
11736 | + cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options, |
||
11737 | + CGN_DEST_TYPE); |
||
11738 | + cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS); |
||
11739 | + cfg->message_iova = le64_to_cpu(rsp_params->message_iova); |
||
11740 | + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx); |
||
11741 | + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry); |
||
11742 | + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit); |
||
11743 | + |
||
11744 | + return 0; |
||
11745 | +} |
||
11746 | --- /dev/null |
||
11747 | +++ b/drivers/crypto/caam/dpseci.h |
||
11748 | @@ -0,0 +1,433 @@ |
||
11749 | +/* |
||
11750 | + * Copyright 2013-2016 Freescale Semiconductor Inc. |
||
11751 | + * Copyright 2017 NXP |
||
11752 | + * |
||
11753 | + * Redistribution and use in source and binary forms, with or without |
||
11754 | + * modification, are permitted provided that the following conditions are met: |
||
11755 | + * * Redistributions of source code must retain the above copyright |
||
11756 | + * notice, this list of conditions and the following disclaimer. |
||
11757 | + * * Redistributions in binary form must reproduce the above copyright |
||
11758 | + * notice, this list of conditions and the following disclaimer in the |
||
11759 | + * documentation and/or other materials provided with the distribution. |
||
11760 | + * * Neither the names of the above-listed copyright holders nor the |
||
11761 | + * names of any contributors may be used to endorse or promote products |
||
11762 | + * derived from this software without specific prior written permission. |
||
11763 | + * |
||
11764 | + * |
||
11765 | + * ALTERNATIVELY, this software may be distributed under the terms of the |
||
11766 | + * GNU General Public License ("GPL") as published by the Free Software |
||
11767 | + * Foundation, either version 2 of that License or (at your option) any |
||
11768 | + * later version. |
||
11769 | + * |
||
11770 | + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
||
11771 | + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
||
11772 | + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
||
11773 | + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE |
||
11774 | + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
||
11775 | + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
||
11776 | + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
||
11777 | + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
||
11778 | + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
||
11779 | + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
||
11780 | + * POSSIBILITY OF SUCH DAMAGE. |
||
11781 | + */ |
||
11782 | +#ifndef _DPSECI_H_ |
||
11783 | +#define _DPSECI_H_ |
||
11784 | + |
||
11785 | +/* |
||
11786 | + * Data Path SEC Interface API |
||
11787 | + * Contains initialization APIs and runtime control APIs for DPSECI |
||
11788 | + */ |
||
11789 | + |
||
11790 | +struct fsl_mc_io; |
||
11791 | +struct opr_cfg; |
||
11792 | +struct opr_qry; |
||
11793 | + |
||
11794 | +/** |
||
11795 | + * General DPSECI macros |
||
11796 | + */ |
||
11797 | + |
||
11798 | +/** |
||
11799 | + * Maximum number of Tx/Rx queues per DPSECI object |
||
11800 | + */ |
||
11801 | +#define DPSECI_MAX_QUEUE_NUM 16 |
||
11802 | + |
||
11803 | +/** |
||
11804 | + * All queues considered; see dpseci_set_rx_queue() |
||
11805 | + */ |
||
11806 | +#define DPSECI_ALL_QUEUES (u8)(-1) |
||
11807 | + |
||
11808 | +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id, |
||
11809 | + u16 *token); |
||
11810 | + |
||
11811 | +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); |
||
11812 | + |
||
11813 | +/** |
||
11814 | + * Enable the Congestion Group support |
||
11815 | + */ |
||
11816 | +#define DPSECI_OPT_HAS_CG 0x000020 |
||
11817 | + |
||
11818 | +/** |
||
11819 | + * Enable the Order Restoration support |
||
11820 | + */ |
||
11821 | +#define DPSECI_OPT_HAS_OPR 0x000040 |
||
11822 | + |
||
11823 | +/** |
||
11824 | + * Order Point Records are shared for the entire DPSECI |
||
11825 | + */ |
||
11826 | +#define DPSECI_OPT_OPR_SHARED 0x000080 |
||
11827 | + |
||
11828 | +/** |
||
11829 | + * struct dpseci_cfg - Structure representing DPSECI configuration |
||
11830 | + * @options: Any combination of the following options: |
||
11831 | + * DPSECI_OPT_HAS_CG |
||
11832 | + * DPSECI_OPT_HAS_OPR |
||
11833 | + * DPSECI_OPT_OPR_SHARED |
||
11834 | + * @num_tx_queues: num of queues towards the SEC |
||
11835 | + * @num_rx_queues: num of queues back from the SEC |
||
11836 | + * @priorities: Priorities for the SEC hardware processing; |
||
11837 | + * each place in the array is the priority of the tx queue |
||
11838 | + * towards the SEC; |
||
11839 | + * valid priorities are configured with values 1-8; |
||
11840 | + */ |
||
11841 | +struct dpseci_cfg { |
||
11842 | + u32 options; |
||
11843 | + u8 num_tx_queues; |
||
11844 | + u8 num_rx_queues; |
||
11845 | + u8 priorities[DPSECI_MAX_QUEUE_NUM]; |
||
11846 | +}; |
||
11847 | + |
||
11848 | +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags, |
||
11849 | + const struct dpseci_cfg *cfg, u32 *obj_id); |
||
11850 | + |
||
11851 | +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags, |
||
11852 | + u32 object_id); |
||
11853 | + |
||
11854 | +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); |
||
11855 | + |
||
11856 | +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); |
||
11857 | + |
||
11858 | +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11859 | + int *en); |
||
11860 | + |
||
11861 | +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); |
||
11862 | + |
||
11863 | +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11864 | + u8 irq_index, u8 *en); |
||
11865 | + |
||
11866 | +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11867 | + u8 irq_index, u8 en); |
||
11868 | + |
||
11869 | +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11870 | + u8 irq_index, u32 *mask); |
||
11871 | + |
||
11872 | +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11873 | + u8 irq_index, u32 mask); |
||
11874 | + |
||
11875 | +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11876 | + u8 irq_index, u32 *status); |
||
11877 | + |
||
11878 | +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11879 | + u8 irq_index, u32 status); |
||
11880 | + |
||
11881 | +/** |
||
11882 | + * struct dpseci_attr - Structure representing DPSECI attributes |
||
11883 | + * @id: DPSECI object ID |
||
11884 | + * @num_tx_queues: number of queues towards the SEC |
||
11885 | + * @num_rx_queues: number of queues back from the SEC |
||
11886 | + * @options: any combination of the following options: |
||
11887 | + * DPSECI_OPT_HAS_CG |
||
11888 | + * DPSECI_OPT_HAS_OPR |
||
11889 | + * DPSECI_OPT_OPR_SHARED |
||
11890 | + */ |
||
11891 | +struct dpseci_attr { |
||
11892 | + int id; |
||
11893 | + u8 num_tx_queues; |
||
11894 | + u8 num_rx_queues; |
||
11895 | + u32 options; |
||
11896 | +}; |
||
11897 | + |
||
11898 | +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11899 | + struct dpseci_attr *attr); |
||
11900 | + |
||
11901 | +/** |
||
11902 | + * enum dpseci_dest - DPSECI destination types |
||
11903 | + * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode |
||
11904 | + * and does not generate FQDAN notifications; user is expected to dequeue |
||
11905 | + * from the queue based on polling or other user-defined method |
||
11906 | + * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN |
||
11907 | + * notifications to the specified DPIO; user is expected to dequeue from |
||
11908 | + * the queue only after notification is received |
||
11909 | + * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate |
||
11910 | + * FQDAN notifications, but is connected to the specified DPCON object; |
||
11911 | + * user is expected to dequeue from the DPCON channel |
||
11912 | + */ |
||
11913 | +enum dpseci_dest { |
||
11914 | + DPSECI_DEST_NONE = 0, |
||
11915 | + DPSECI_DEST_DPIO, |
||
11916 | + DPSECI_DEST_DPCON |
||
11917 | +}; |
||
11918 | + |
||
11919 | +/** |
||
11920 | + * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters |
||
11921 | + * @dest_type: Destination type |
||
11922 | + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type |
||
11923 | + * @priority: Priority selection within the DPIO or DPCON channel; valid values |
||
11924 | + * are 0-1 or 0-7, depending on the number of priorities in that channel; |
||
11925 | + * not relevant for 'DPSECI_DEST_NONE' option |
||
11926 | + */ |
||
11927 | +struct dpseci_dest_cfg { |
||
11928 | + enum dpseci_dest dest_type; |
||
11929 | + int dest_id; |
||
11930 | + u8 priority; |
||
11931 | +}; |
||
11932 | + |
||
11933 | +/** |
||
11934 | + * DPSECI queue modification options |
||
11935 | + */ |
||
11936 | + |
||
11937 | +/** |
||
11938 | + * Select to modify the user's context associated with the queue |
||
11939 | + */ |
||
11940 | +#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001 |
||
11941 | + |
||
11942 | +/** |
||
11943 | + * Select to modify the queue's destination |
||
11944 | + */ |
||
11945 | +#define DPSECI_QUEUE_OPT_DEST 0x00000002 |
||
11946 | + |
||
11947 | +/** |
||
11948 | + * Select to modify the queue's order preservation |
||
11949 | + */ |
||
11950 | +#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004 |
||
11951 | + |
||
11952 | +/** |
||
11953 | + * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration |
||
11954 | + * @options: Flags representing the suggested modifications to the queue; |
||
11955 | + * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags |
||
11956 | + * @order_preservation_en: order preservation configuration for the rx queue |
||
11957 | + * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options' |
||
11958 | + * @user_ctx: User context value provided in the frame descriptor of each |
||
11959 | + * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained |
||
11960 | + * in 'options' |
||
11961 | + * @dest_cfg: Queue destination parameters; valid only if |
||
11962 | + * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options' |
||
11963 | + */ |
||
11964 | +struct dpseci_rx_queue_cfg { |
||
11965 | + u32 options; |
||
11966 | + int order_preservation_en; |
||
11967 | + u64 user_ctx; |
||
11968 | + struct dpseci_dest_cfg dest_cfg; |
||
11969 | +}; |
||
11970 | + |
||
11971 | +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11972 | + u8 queue, const struct dpseci_rx_queue_cfg *cfg); |
||
11973 | + |
||
11974 | +/** |
||
11975 | + * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues |
||
11976 | + * @user_ctx: User context value provided in the frame descriptor of each |
||
11977 | + * dequeued frame |
||
11978 | + * @order_preservation_en: Status of the order preservation configuration on the |
||
11979 | + * queue |
||
11980 | + * @dest_cfg: Queue destination configuration |
||
11981 | + * @fqid: Virtual FQID value to be used for dequeue operations |
||
11982 | + */ |
||
11983 | +struct dpseci_rx_queue_attr { |
||
11984 | + u64 user_ctx; |
||
11985 | + int order_preservation_en; |
||
11986 | + struct dpseci_dest_cfg dest_cfg; |
||
11987 | + u32 fqid; |
||
11988 | +}; |
||
11989 | + |
||
11990 | +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
11991 | + u8 queue, struct dpseci_rx_queue_attr *attr); |
||
11992 | + |
||
11993 | +/** |
||
11994 | + * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues |
||
11995 | + * @fqid: Virtual FQID to be used for sending frames to SEC hardware |
||
11996 | + * @priority: SEC hardware processing priority for the queue |
||
11997 | + */ |
||
11998 | +struct dpseci_tx_queue_attr { |
||
11999 | + u32 fqid; |
||
12000 | + u8 priority; |
||
12001 | +}; |
||
12002 | + |
||
12003 | +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
12004 | + u8 queue, struct dpseci_tx_queue_attr *attr); |
||
12005 | + |
||
12006 | +/** |
||
12007 | + * struct dpseci_sec_attr - Structure representing attributes of the SEC |
||
12008 | + * hardware accelerator |
||
12009 | + * @ip_id: ID for SEC |
||
12010 | + * @major_rev: Major revision number for SEC |
||
12011 | + * @minor_rev: Minor revision number for SEC |
||
12012 | + * @era: SEC Era |
||
12013 | + * @deco_num: The number of copies of the DECO that are implemented in this |
||
12014 | + * version of SEC |
||
12015 | + * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this |
||
12016 | + * version of SEC |
||
12017 | + * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this |
||
12018 | + * version of SEC |
||
12019 | + * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are |
||
12020 | + * implemented in this version of SEC |
||
12021 | + * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are |
||
12022 | + * implemented in this version of SEC |
||
12023 | + * @crc_acc_num: The number of copies of the CRC module that are implemented in |
||
12024 | + * this version of SEC |
||
12025 | + * @pk_acc_num: The number of copies of the Public Key module that are |
||
12026 | + * implemented in this version of SEC |
||
12027 | + * @kasumi_acc_num: The number of copies of the Kasumi module that are |
||
12028 | + * implemented in this version of SEC |
||
12029 | + * @rng_acc_num: The number of copies of the Random Number Generator that are |
||
12030 | + * implemented in this version of SEC |
||
12031 | + * @md_acc_num: The number of copies of the MDHA (Hashing module) that are |
||
12032 | + * implemented in this version of SEC |
||
12033 | + * @arc4_acc_num: The number of copies of the ARC4 module that are implemented |
||
12034 | + * in this version of SEC |
||
12035 | + * @des_acc_num: The number of copies of the DES module that are implemented in |
||
12036 | + * this version of SEC |
||
12037 | + * @aes_acc_num: The number of copies of the AES module that are implemented in |
||
12038 | + * this version of SEC |
||
12039 | + * @ccha_acc_num: The number of copies of the ChaCha20 module that are |
||
12040 | + * implemented in this version of SEC. |
||
12041 | + * @ptha_acc_num: The number of copies of the Poly1305 module that are |
||
12042 | + * implemented in this version of SEC. |
||
12043 | + **/ |
||
12044 | +struct dpseci_sec_attr { |
||
12045 | + u16 ip_id; |
||
12046 | + u8 major_rev; |
||
12047 | + u8 minor_rev; |
||
12048 | + u8 era; |
||
12049 | + u8 deco_num; |
||
12050 | + u8 zuc_auth_acc_num; |
||
12051 | + u8 zuc_enc_acc_num; |
||
12052 | + u8 snow_f8_acc_num; |
||
12053 | + u8 snow_f9_acc_num; |
||
12054 | + u8 crc_acc_num; |
||
12055 | + u8 pk_acc_num; |
||
12056 | + u8 kasumi_acc_num; |
||
12057 | + u8 rng_acc_num; |
||
12058 | + u8 md_acc_num; |
||
12059 | + u8 arc4_acc_num; |
||
12060 | + u8 des_acc_num; |
||
12061 | + u8 aes_acc_num; |
||
12062 | + u8 ccha_acc_num; |
||
12063 | + u8 ptha_acc_num; |
||
12064 | +}; |
||
12065 | + |
||
12066 | +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
12067 | + struct dpseci_sec_attr *attr); |
||
12068 | + |
||
12069 | +/** |
||
12070 | + * struct dpseci_sec_counters - Structure representing global SEC counters and |
||
12071 | + * not per dpseci counters |
||
12072 | + * @dequeued_requests: Number of Requests Dequeued |
||
12073 | + * @ob_enc_requests: Number of Outbound Encrypt Requests |
||
12074 | + * @ib_dec_requests: Number of Inbound Decrypt Requests |
||
12075 | + * @ob_enc_bytes: Number of Outbound Bytes Encrypted |
||
12076 | + * @ob_prot_bytes: Number of Outbound Bytes Protected |
||
12077 | + * @ib_dec_bytes: Number of Inbound Bytes Decrypted |
||
12078 | + * @ib_valid_bytes: Number of Inbound Bytes Validated |
||
12079 | + */ |
||
12080 | +struct dpseci_sec_counters { |
||
12081 | + u64 dequeued_requests; |
||
12082 | + u64 ob_enc_requests; |
||
12083 | + u64 ib_dec_requests; |
||
12084 | + u64 ob_enc_bytes; |
||
12085 | + u64 ob_prot_bytes; |
||
12086 | + u64 ib_dec_bytes; |
||
12087 | + u64 ib_valid_bytes; |
||
12088 | +}; |
||
12089 | + |
||
12090 | +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, |
||
12091 | + struct dpseci_sec_counters *counters); |
||
12092 | + |
||
12093 | +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags, |
||
12094 | + u16 *major_ver, u16 *minor_ver); |
||
12095 | + |
||
12096 | +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index, |
||
12097 | + u8 options, struct opr_cfg *cfg); |
||
12098 | + |
||
12099 | +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index, |
||
12100 | + struct opr_cfg *cfg, struct opr_qry *qry); |
||
12101 | + |
||
12102 | +/** |
||
12103 | + * enum dpseci_congestion_unit - DPSECI congestion units |
||
12104 | + * @DPSECI_CONGESTION_UNIT_BYTES: bytes units |
||
12105 | + * @DPSECI_CONGESTION_UNIT_FRAMES: frames units |
||
12106 | + */ |
||
12107 | +enum dpseci_congestion_unit { |
||
12108 | + DPSECI_CONGESTION_UNIT_BYTES = 0, |
||
12109 | + DPSECI_CONGESTION_UNIT_FRAMES |
||
12110 | +}; |
||
12111 | + |
||
12112 | +/** |
||
12113 | + * CSCN message is written to message_iova once entering a |
||
12114 | + * congestion state (see 'threshold_entry') |
||
12115 | + */ |
||
12116 | +#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001 |
||
12117 | + |
||
12118 | +/** |
||
12119 | + * CSCN message is written to message_iova once exiting a |
||
12120 | + * congestion state (see 'threshold_exit') |
||
12121 | + */ |
||
12122 | +#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002 |
||
12123 | + |
||
12124 | +/** |
||
12125 | + * CSCN write will attempt to allocate into a cache (coherent write); |
||
12126 | + * valid only if 'DPSECI_CGN_MODE_WRITE_MEM_<X>' is selected |
||
12127 | + */ |
||
12128 | +#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004 |
||
12129 | + |
||
12130 | +/** |
||
12131 | + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to |
||
12132 | + * DPIO/DPCON's WQ channel once entering a congestion state |
||
12133 | + * (see 'threshold_entry') |
||
12134 | + */ |
||
12135 | +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008 |
||
12136 | + |
||
12137 | +/** |
||
12138 | + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to |
||
12139 | + * DPIO/DPCON's WQ channel once exiting a congestion state |
||
12140 | + * (see 'threshold_exit') |
||
12141 | + */ |
||
12142 | +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010 |
||
12143 | + |
||
12144 | +/** |
||
12145 | + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' when the CSCN is written |
||
12146 | + * to the sw-portal's DQRR, the DQRI interrupt is asserted immediately |
||
12147 | + * (if enabled) |
||
12148 | + */ |
||
12149 | +#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020 |
||
12150 | + |
||
12151 | +/** |
||
12152 | + * struct dpseci_congestion_notification_cfg - congestion notification |
||
12153 | + * configuration |
||
12154 | + * @units: units type |
||
12155 | + * @threshold_entry: above this threshold we enter a congestion state. |
||
12156 | + * set it to '0' to disable it |
||
12157 | + * @threshold_exit: below this threshold we exit the congestion state. |
||
12158 | + * @message_ctx: The context that will be part of the CSCN message |
||
12159 | + * @message_iova: I/O virtual address (must be in DMA-able memory), |
||
12160 | + * must be 16B aligned; |
||
12161 | + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel |
||
12162 | + * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>' |
||
12163 | + * values |
||
12164 | + */ |
||
12165 | +struct dpseci_congestion_notification_cfg { |
||
12166 | + enum dpseci_congestion_unit units; |
||
12167 | + u32 threshold_entry; |
||
12168 | + u32 threshold_exit; |
||
12169 | + u64 message_ctx; |
||
12170 | + u64 message_iova; |
||
12171 | + struct dpseci_dest_cfg dest_cfg; |
||
12172 | + u16 notification_mode; |
||
12173 | +}; |
||
12174 | + |
||
12175 | +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags, |
||
12176 | + u16 token, const struct dpseci_congestion_notification_cfg *cfg); |
||
12177 | + |
||
12178 | +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags, |
||
12179 | + u16 token, struct dpseci_congestion_notification_cfg *cfg); |
||
12180 | + |
||
12181 | +#endif /* _DPSECI_H_ */ |
||
12182 | --- /dev/null |
||
12183 | +++ b/drivers/crypto/caam/dpseci_cmd.h |
||
12184 | @@ -0,0 +1,287 @@ |
||
12185 | +/* |
||
12186 | + * Copyright 2013-2016 Freescale Semiconductor Inc. |
||
12187 | + * Copyright 2017 NXP |
||
12188 | + * |
||
12189 | + * Redistribution and use in source and binary forms, with or without |
||
12190 | + * modification, are permitted provided that the following conditions are met: |
||
12191 | + * * Redistributions of source code must retain the above copyright |
||
12192 | + * notice, this list of conditions and the following disclaimer. |
||
12193 | + * * Redistributions in binary form must reproduce the above copyright |
||
12194 | + * notice, this list of conditions and the following disclaimer in the |
||
12195 | + * documentation and/or other materials provided with the distribution. |
||
12196 | + * * Neither the names of the above-listed copyright holders nor the |
||
12197 | + * names of any contributors may be used to endorse or promote products |
||
12198 | + * derived from this software without specific prior written permission. |
||
12199 | + * |
||
12200 | + * |
||
12201 | + * ALTERNATIVELY, this software may be distributed under the terms of the |
||
12202 | + * GNU General Public License ("GPL") as published by the Free Software |
||
12203 | + * Foundation, either version 2 of that License or (at your option) any |
||
12204 | + * later version. |
||
12205 | + * |
||
12206 | + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
||
12207 | + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
||
12208 | + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
||
12209 | + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE |
||
12210 | + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
||
12211 | + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
||
12212 | + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
||
12213 | + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
||
12214 | + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
||
12215 | + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
||
12216 | + * POSSIBILITY OF SUCH DAMAGE. |
||
12217 | + */ |
||
12218 | + |
||
12219 | +#ifndef _DPSECI_CMD_H_ |
||
12220 | +#define _DPSECI_CMD_H_ |
||
12221 | + |
||
12222 | +/* DPSECI Version */ |
||
12223 | +#define DPSECI_VER_MAJOR 5 |
||
12224 | +#define DPSECI_VER_MINOR 3 |
||
12225 | + |
||
12226 | +#define DPSECI_VER(maj, min) (((maj) << 16) | (min)) |
||
12227 | +#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR) |
||
12228 | + |
||
12229 | +/* Command versioning */ |
||
12230 | +#define DPSECI_CMD_BASE_VERSION 1 |
||
12231 | +#define DPSECI_CMD_BASE_VERSION_V2 2 |
||
12232 | +#define DPSECI_CMD_BASE_VERSION_V3 3 |
||
12233 | +#define DPSECI_CMD_ID_OFFSET 4 |
||
12234 | + |
||
12235 | +#define DPSECI_CMD_V1(id) (((id) << DPSECI_CMD_ID_OFFSET) | \ |
||
12236 | + DPSECI_CMD_BASE_VERSION) |
||
12237 | + |
||
12238 | +#define DPSECI_CMD_V2(id) (((id) << DPSECI_CMD_ID_OFFSET) | \ |
||
12239 | + DPSECI_CMD_BASE_VERSION_V2) |
||
12240 | + |
||
12241 | +#define DPSECI_CMD_V3(id) (((id) << DPSECI_CMD_ID_OFFSET) | \ |
||
12242 | + DPSECI_CMD_BASE_VERSION_V3) |
||
12243 | + |
||
12244 | +/* Command IDs */ |
||
12245 | +#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800) |
||
12246 | +#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809) |
||
12247 | +#define DPSECI_CMDID_CREATE DPSECI_CMD_V3(0x909) |
||
12248 | +#define DPSECI_CMDID_DESTROY DPSECI_CMD_V1(0x989) |
||
12249 | +#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09) |
||
12250 | + |
||
12251 | +#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002) |
||
12252 | +#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003) |
||
12253 | +#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004) |
||
12254 | +#define DPSECI_CMDID_RESET DPSECI_CMD_V1(0x005) |
||
12255 | +#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006) |
||
12256 | + |
||
12257 | +#define DPSECI_CMDID_SET_IRQ_ENABLE DPSECI_CMD_V1(0x012) |
||
12258 | +#define DPSECI_CMDID_GET_IRQ_ENABLE DPSECI_CMD_V1(0x013) |
||
12259 | +#define DPSECI_CMDID_SET_IRQ_MASK DPSECI_CMD_V1(0x014) |
||
12260 | +#define DPSECI_CMDID_GET_IRQ_MASK DPSECI_CMD_V1(0x015) |
||
12261 | +#define DPSECI_CMDID_GET_IRQ_STATUS DPSECI_CMD_V1(0x016) |
||
12262 | +#define DPSECI_CMDID_CLEAR_IRQ_STATUS DPSECI_CMD_V1(0x017) |
||
12263 | + |
||
12264 | +#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194) |
||
12265 | +#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196) |
||
12266 | +#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197) |
||
12267 | +#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V2(0x198) |
||
12268 | +#define DPSECI_CMDID_GET_SEC_COUNTERS DPSECI_CMD_V1(0x199) |
||
12269 | +#define DPSECI_CMDID_SET_OPR DPSECI_CMD_V1(0x19A) |
||
12270 | +#define DPSECI_CMDID_GET_OPR DPSECI_CMD_V1(0x19B) |
||
12271 | +#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170) |
||
12272 | +#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171) |
||
12273 | + |
||
12274 | +/* Macros for accessing command fields smaller than 1 byte */ |
||
12275 | +#define DPSECI_MASK(field) \ |
||
12276 | + GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \ |
||
12277 | + DPSECI_##field##_SHIFT) |
||
12278 | + |
||
12279 | +#define dpseci_set_field(var, field, val) \ |
||
12280 | + ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field))) |
||
12281 | + |
||
12282 | +#define dpseci_get_field(var, field) \ |
||
12283 | + (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT) |
||
12284 | + |
||
12285 | +struct dpseci_cmd_open { |
||
12286 | + __le32 dpseci_id; |
||
12287 | +}; |
||
12288 | + |
||
12289 | +struct dpseci_cmd_create { |
||
12290 | + u8 priorities[8]; |
||
12291 | + u8 num_tx_queues; |
||
12292 | + u8 num_rx_queues; |
||
12293 | + u8 pad0[6]; |
||
12294 | + __le32 options; |
||
12295 | + __le32 pad1; |
||
12296 | + u8 priorities2[8]; |
||
12297 | +}; |
||
12298 | + |
||
12299 | +struct dpseci_cmd_destroy { |
||
12300 | + __le32 object_id; |
||
12301 | +}; |
||
12302 | + |
||
12303 | +#define DPSECI_ENABLE_SHIFT 0 |
||
12304 | +#define DPSECI_ENABLE_SIZE 1 |
||
12305 | + |
||
12306 | +struct dpseci_rsp_is_enabled { |
||
12307 | + u8 is_enabled; |
||
12308 | +}; |
||
12309 | + |
||
12310 | +struct dpseci_cmd_irq_enable { |
||
12311 | + u8 enable_state; |
||
12312 | + u8 pad[3]; |
||
12313 | + u8 irq_index; |
||
12314 | +}; |
||
12315 | + |
||
12316 | +struct dpseci_rsp_get_irq_enable { |
||
12317 | + u8 enable_state; |
||
12318 | +}; |
||
12319 | + |
||
12320 | +struct dpseci_cmd_irq_mask { |
||
12321 | + __le32 mask; |
||
12322 | + u8 irq_index; |
||
12323 | +}; |
||
12324 | + |
||
12325 | +struct dpseci_cmd_irq_status { |
||
12326 | + __le32 status; |
||
12327 | + u8 irq_index; |
||
12328 | +}; |
||
12329 | + |
||
12330 | +struct dpseci_rsp_get_attributes { |
||
12331 | + __le32 id; |
||
12332 | + __le32 pad0; |
||
12333 | + u8 num_tx_queues; |
||
12334 | + u8 num_rx_queues; |
||
12335 | + u8 pad1[6]; |
||
12336 | + __le32 options; |
||
12337 | +}; |
||
12338 | + |
||
12339 | +#define DPSECI_DEST_TYPE_SHIFT 0 |
||
12340 | +#define DPSECI_DEST_TYPE_SIZE 4 |
||
12341 | + |
||
12342 | +#define DPSECI_ORDER_PRESERVATION_SHIFT 0 |
||
12343 | +#define DPSECI_ORDER_PRESERVATION_SIZE 1 |
||
12344 | + |
||
12345 | +struct dpseci_cmd_queue { |
||
12346 | + __le32 dest_id; |
||
12347 | + u8 priority; |
||
12348 | + u8 queue; |
||
12349 | + u8 dest_type; |
||
12350 | + u8 pad; |
||
12351 | + __le64 user_ctx; |
||
12352 | + union { |
||
12353 | + __le32 options; |
||
12354 | + __le32 fqid; |
||
12355 | + }; |
||
12356 | + u8 order_preservation_en; |
||
12357 | +}; |
||
12358 | + |
||
12359 | +struct dpseci_rsp_get_tx_queue { |
||
12360 | + __le32 pad; |
||
12361 | + __le32 fqid; |
||
12362 | + u8 priority; |
||
12363 | +}; |
||
12364 | + |
||
12365 | +struct dpseci_rsp_get_sec_attr { |
||
12366 | + __le16 ip_id; |
||
12367 | + u8 major_rev; |
||
12368 | + u8 minor_rev; |
||
12369 | + u8 era; |
||
12370 | + u8 pad0[3]; |
||
12371 | + u8 deco_num; |
||
12372 | + u8 zuc_auth_acc_num; |
||
12373 | + u8 zuc_enc_acc_num; |
||
12374 | + u8 pad1; |
||
12375 | + u8 snow_f8_acc_num; |
||
12376 | + u8 snow_f9_acc_num; |
||
12377 | + u8 crc_acc_num; |
||
12378 | + u8 pad2; |
||
12379 | + u8 pk_acc_num; |
||
12380 | + u8 kasumi_acc_num; |
||
12381 | + u8 rng_acc_num; |
||
12382 | + u8 pad3; |
||
12383 | + u8 md_acc_num; |
||
12384 | + u8 arc4_acc_num; |
||
12385 | + u8 des_acc_num; |
||
12386 | + u8 aes_acc_num; |
||
12387 | + u8 ccha_acc_num; |
||
12388 | + u8 ptha_acc_num; |
||
12389 | +}; |
||
12390 | + |
||
12391 | +struct dpseci_rsp_get_sec_counters { |
||
12392 | + __le64 dequeued_requests; |
||
12393 | + __le64 ob_enc_requests; |
||
12394 | + __le64 ib_dec_requests; |
||
12395 | + __le64 ob_enc_bytes; |
||
12396 | + __le64 ob_prot_bytes; |
||
12397 | + __le64 ib_dec_bytes; |
||
12398 | + __le64 ib_valid_bytes; |
||
12399 | +}; |
||
12400 | + |
||
12401 | +struct dpseci_rsp_get_api_version { |
||
12402 | + __le16 major; |
||
12403 | + __le16 minor; |
||
12404 | +}; |
||
12405 | + |
||
12406 | +struct dpseci_cmd_opr { |
||
12407 | + __le16 pad; |
||
12408 | + u8 index; |
||
12409 | + u8 options; |
||
12410 | + u8 pad1[7]; |
||
12411 | + u8 oloe; |
||
12412 | + u8 oeane; |
||
12413 | + u8 olws; |
||
12414 | + u8 oa; |
||
12415 | + u8 oprrws; |
||
12416 | +}; |
||
12417 | + |
||
12418 | +#define DPSECI_OPR_RIP_SHIFT 0 |
||
12419 | +#define DPSECI_OPR_RIP_SIZE 1 |
||
12420 | +#define DPSECI_OPR_ENABLE_SHIFT 1 |
||
12421 | +#define DPSECI_OPR_ENABLE_SIZE 1 |
||
12422 | +#define DPSECI_OPR_TSEQ_NLIS_SHIFT 0 |
||
12423 | +#define DPSECI_OPR_TSEQ_NLIS_SIZE 1 |
||
12424 | +#define DPSECI_OPR_HSEQ_NLIS_SHIFT 0 |
||
12425 | +#define DPSECI_OPR_HSEQ_NLIS_SIZE 1 |
||
12426 | + |
||
12427 | +struct dpseci_rsp_get_opr { |
||
12428 | + __le64 pad; |
||
12429 | + u8 flags; |
||
12430 | + u8 pad0[2]; |
||
12431 | + u8 oloe; |
||
12432 | + u8 oeane; |
||
12433 | + u8 olws; |
||
12434 | + u8 oa; |
||
12435 | + u8 oprrws; |
||
12436 | + __le16 nesn; |
||
12437 | + __le16 pad1; |
||
12438 | + __le16 ndsn; |
||
12439 | + __le16 pad2; |
||
12440 | + __le16 ea_tseq; |
||
12441 | + u8 tseq_nlis; |
||
12442 | + u8 pad3; |
||
12443 | + __le16 ea_hseq; |
||
12444 | + u8 hseq_nlis; |
||
12445 | + u8 pad4; |
||
12446 | + __le16 ea_hptr; |
||
12447 | + __le16 pad5; |
||
12448 | + __le16 ea_tptr; |
||
12449 | + __le16 pad6; |
||
12450 | + __le16 opr_vid; |
||
12451 | + __le16 pad7; |
||
12452 | + __le16 opr_id; |
||
12453 | +}; |
||
12454 | + |
||
12455 | +#define DPSECI_CGN_DEST_TYPE_SHIFT 0 |
||
12456 | +#define DPSECI_CGN_DEST_TYPE_SIZE 4 |
||
12457 | +#define DPSECI_CGN_UNITS_SHIFT 4 |
||
12458 | +#define DPSECI_CGN_UNITS_SIZE 2 |
||
12459 | + |
||
12460 | +struct dpseci_cmd_congestion_notification { |
||
12461 | + __le32 dest_id; |
||
12462 | + __le16 notification_mode; |
||
12463 | + u8 priority; |
||
12464 | + u8 options; |
||
12465 | + __le64 message_iova; |
||
12466 | + __le64 message_ctx; |
||
12467 | + __le32 threshold_entry; |
||
12468 | + __le32 threshold_exit; |
||
12469 | +}; |
||
12470 | + |
||
12471 | +#endif /* _DPSECI_CMD_H_ */ |
||
12472 | --- a/drivers/crypto/caam/error.c |
||
12473 | +++ b/drivers/crypto/caam/error.c |
||
12474 | @@ -108,6 +108,54 @@ static const struct { |
||
12475 | { 0xF1, "3GPP HFN matches or exceeds the Threshold" }, |
||
12476 | }; |
||
12477 | |||
12478 | +static const struct { |
||
12479 | + u8 value; |
||
12480 | + const char *error_text; |
||
12481 | +} qi_error_list[] = { |
||
12482 | + { 0x1F, "Job terminated by FQ or ICID flush" }, |
||
12483 | + { 0x20, "FD format error"}, |
||
12484 | + { 0x21, "FD command format error"}, |
||
12485 | + { 0x23, "FL format error"}, |
||
12486 | + { 0x25, "CRJD specified in FD, but not enabled in FLC"}, |
||
12487 | + { 0x30, "Max. buffer size too small"}, |
||
12488 | + { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"}, |
||
12489 | + { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"}, |
||
12490 | + { 0x33, "Size over/underflow (allocate mode)"}, |
||
12491 | + { 0x34, "Size over/underflow (reuse mode)"}, |
||
12492 | + { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"}, |
||
12493 | + { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"}, |
||
12494 | + { 0x41, "SBC frame format not supported (allocate mode)"}, |
||
12495 | + { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"}, |
||
12496 | + { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"}, |
||
12497 | + { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"}, |
||
12498 | + { 0x45, "DHR correction underflow (reuse mode, single buffer format)"}, |
||
12499 | + { 0x46, "Annotation length exceeds offset (reuse mode)"}, |
||
12500 | + { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"}, |
||
12501 | + { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"}, |
||
12502 | + { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"}, |
||
12503 | + { 0x51, "Unsupported IF reuse mode"}, |
||
12504 | + { 0x52, "Unsupported FL use mode"}, |
||
12505 | + { 0x53, "Unsupported RJD use mode"}, |
||
12506 | + { 0x54, "Unsupported inline descriptor use mode"}, |
||
12507 | + { 0xC0, "Table buffer pool 0 depletion"}, |
||
12508 | + { 0xC1, "Table buffer pool 1 depletion"}, |
||
12509 | + { 0xC2, "Data buffer pool 0 depletion, no OF allocated"}, |
||
12510 | + { 0xC3, "Data buffer pool 1 depletion, no OF allocated"}, |
||
12511 | + { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"}, |
||
12512 | + { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"}, |
||
12513 | + { 0xD0, "FLC read error"}, |
||
12514 | + { 0xD1, "FL read error"}, |
||
12515 | + { 0xD2, "FL write error"}, |
||
12516 | + { 0xD3, "OF SGT write error"}, |
||
12517 | + { 0xD4, "PTA read error"}, |
||
12518 | + { 0xD5, "PTA write error"}, |
||
12519 | + { 0xD6, "OF SGT F-bit write error"}, |
||
12520 | + { 0xD7, "ASA write error"}, |
||
12521 | + { 0xE1, "FLC[ICR]=0 ICID error"}, |
||
12522 | + { 0xE2, "FLC[ICR]=1 ICID error"}, |
||
12523 | + { 0xE4, "source of ICID flush not trusted (BDI = 0)"}, |
||
12524 | +}; |
||
12525 | + |
||
12526 | static const char * const cha_id_list[] = { |
||
12527 | "", |
||
12528 | "AES", |
||
12529 | @@ -236,6 +284,27 @@ static void report_deco_status(struct de |
||
12530 | status, error, idx_str, idx, err_str, err_err_code); |
||
12531 | } |
||
12532 | |||
12533 | +static void report_qi_status(struct device *qidev, const u32 status, |
||
12534 | + const char *error) |
||
12535 | +{ |
||
12536 | + u8 err_id = status & JRSTA_QIERR_ERROR_MASK; |
||
12537 | + const char *err_str = "unidentified error value 0x"; |
||
12538 | + char err_err_code[3] = { 0 }; |
||
12539 | + int i; |
||
12540 | + |
||
12541 | + for (i = 0; i < ARRAY_SIZE(qi_error_list); i++) |
||
12542 | + if (qi_error_list[i].value == err_id) |
||
12543 | + break; |
||
12544 | + |
||
12545 | + if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text) |
||
12546 | + err_str = qi_error_list[i].error_text; |
||
12547 | + else |
||
12548 | + snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id); |
||
12549 | + |
||
12550 | + dev_err(qidev, "%08x: %s: %s%s\n", |
||
12551 | + status, error, err_str, err_err_code); |
||
12552 | +} |
||
12553 | + |
||
12554 | static void report_jr_status(struct device *jrdev, const u32 status, |
||
12555 | const char *error) |
||
12556 | { |
||
12557 | @@ -250,7 +319,7 @@ static void report_cond_code_status(stru |
||
12558 | status, error, __func__); |
||
12559 | } |
||
12560 | |||
12561 | -void caam_jr_strstatus(struct device *jrdev, u32 status) |
||
12562 | +void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2) |
||
12563 | { |
||
12564 | static const struct stat_src { |
||
12565 | void (*report_ssed)(struct device *jrdev, const u32 status, |
||
12566 | @@ -262,7 +331,7 @@ void caam_jr_strstatus(struct device *jr |
||
12567 | { report_ccb_status, "CCB" }, |
||
12568 | { report_jump_status, "Jump" }, |
||
12569 | { report_deco_status, "DECO" }, |
||
12570 | - { NULL, "Queue Manager Interface" }, |
||
12571 | + { report_qi_status, "Queue Manager Interface" }, |
||
12572 | { report_jr_status, "Job Ring" }, |
||
12573 | { report_cond_code_status, "Condition Code" }, |
||
12574 | { NULL, NULL }, |
||
12575 | @@ -288,4 +357,4 @@ void caam_jr_strstatus(struct device *jr |
||
12576 | else |
||
12577 | dev_err(jrdev, "%d: unknown error source\n", ssrc); |
||
12578 | } |
||
12579 | -EXPORT_SYMBOL(caam_jr_strstatus); |
||
12580 | +EXPORT_SYMBOL(caam_strstatus); |
||
12581 | --- a/drivers/crypto/caam/error.h |
||
12582 | +++ b/drivers/crypto/caam/error.h |
||
12583 | @@ -8,7 +8,11 @@ |
||
12584 | #ifndef CAAM_ERROR_H |
||
12585 | #define CAAM_ERROR_H |
||
12586 | #define CAAM_ERROR_STR_MAX 302 |
||
12587 | -void caam_jr_strstatus(struct device *jrdev, u32 status); |
||
12588 | + |
||
12589 | +void caam_strstatus(struct device *dev, u32 status, bool qi_v2); |
||
12590 | + |
||
12591 | +#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false) |
||
12592 | +#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true) |
||
12593 | |||
12594 | void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, |
||
12595 | int rowsize, int groupsize, struct scatterlist *sg, |
||
12596 | --- a/drivers/crypto/caam/intern.h |
||
12597 | +++ b/drivers/crypto/caam/intern.h |
||
12598 | @@ -84,6 +84,7 @@ struct caam_drv_private { |
||
12599 | u8 qi_present; /* Nonzero if QI present in device */ |
||
12600 | int secvio_irq; /* Security violation interrupt number */ |
||
12601 | int virt_en; /* Virtualization enabled in CAAM */ |
||
12602 | + int era; /* CAAM Era (internal HW revision) */ |
||
12603 | |||
12604 | #define RNG4_MAX_HANDLES 2 |
||
12605 | /* RNG4 block */ |
||
12606 | --- a/drivers/crypto/caam/jr.c |
||
12607 | +++ b/drivers/crypto/caam/jr.c |
||
12608 | @@ -23,6 +23,14 @@ struct jr_driver_data { |
||
12609 | |||
12610 | static struct jr_driver_data driver_data; |
||
12611 | |||
12612 | +static int jr_driver_probed; |
||
12613 | + |
||
12614 | +int caam_jr_driver_probed(void) |
||
12615 | +{ |
||
12616 | + return jr_driver_probed; |
||
12617 | +} |
||
12618 | +EXPORT_SYMBOL(caam_jr_driver_probed); |
||
12619 | + |
||
12620 | static int caam_reset_hw_jr(struct device *dev) |
||
12621 | { |
||
12622 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); |
||
12623 | @@ -119,6 +127,8 @@ static int caam_jr_remove(struct platfor |
||
12624 | dev_err(jrdev, "Failed to shut down job ring\n"); |
||
12625 | irq_dispose_mapping(jrpriv->irq); |
||
12626 | |||
12627 | + jr_driver_probed--; |
||
12628 | + |
||
12629 | return ret; |
||
12630 | } |
||
12631 | |||
12632 | @@ -282,6 +292,36 @@ struct device *caam_jr_alloc(void) |
||
12633 | EXPORT_SYMBOL(caam_jr_alloc); |
||
12634 | |||
12635 | /** |
||
12636 | + * caam_jridx_alloc() - Alloc a specific job ring based on its index. |
||
12637 | + * |
||
12638 | + * returns : pointer to the newly allocated physical |
||
12639 | + * JobR dev can be written to if successful. |
||
12640 | + **/ |
||
12641 | +struct device *caam_jridx_alloc(int idx) |
||
12642 | +{ |
||
12643 | + struct caam_drv_private_jr *jrpriv; |
||
12644 | + struct device *dev = ERR_PTR(-ENODEV); |
||
12645 | + |
||
12646 | + spin_lock(&driver_data.jr_alloc_lock); |
||
12647 | + |
||
12648 | + if (list_empty(&driver_data.jr_list)) |
||
12649 | + goto end; |
||
12650 | + |
||
12651 | + list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) { |
||
12652 | + if (jrpriv->ridx == idx) { |
||
12653 | + atomic_inc(&jrpriv->tfm_count); |
||
12654 | + dev = jrpriv->dev; |
||
12655 | + break; |
||
12656 | + } |
||
12657 | + } |
||
12658 | + |
||
12659 | +end: |
||
12660 | + spin_unlock(&driver_data.jr_alloc_lock); |
||
12661 | + return dev; |
||
12662 | +} |
||
12663 | +EXPORT_SYMBOL(caam_jridx_alloc); |
||
12664 | + |
||
12665 | +/** |
||
12666 | * caam_jr_free() - Free the Job Ring |
||
12667 | * @rdev - points to the dev that identifies the Job ring to |
||
12668 | * be released. |
||
12669 | @@ -539,6 +579,8 @@ static int caam_jr_probe(struct platform |
||
12670 | |||
12671 | atomic_set(&jrpriv->tfm_count, 0); |
||
12672 | |||
12673 | + jr_driver_probed++; |
||
12674 | + |
||
12675 | return 0; |
||
12676 | } |
||
12677 | |||
12678 | --- a/drivers/crypto/caam/jr.h |
||
12679 | +++ b/drivers/crypto/caam/jr.h |
||
12680 | @@ -9,7 +9,9 @@ |
||
12681 | #define JR_H |
||
12682 | |||
12683 | /* Prototypes for backend-level services exposed to APIs */ |
||
12684 | +int caam_jr_driver_probed(void); |
||
12685 | struct device *caam_jr_alloc(void); |
||
12686 | +struct device *caam_jridx_alloc(int idx); |
||
12687 | void caam_jr_free(struct device *rdev); |
||
12688 | int caam_jr_enqueue(struct device *dev, u32 *desc, |
||
12689 | void (*cbk)(struct device *dev, u32 *desc, u32 status, |
||
12690 | --- a/drivers/crypto/caam/key_gen.c |
||
12691 | +++ b/drivers/crypto/caam/key_gen.c |
||
12692 | @@ -11,36 +11,6 @@ |
||
12693 | #include "desc_constr.h" |
||
12694 | #include "key_gen.h" |
||
12695 | |||
12696 | -/** |
||
12697 | - * split_key_len - Compute MDHA split key length for a given algorithm |
||
12698 | - * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, |
||
12699 | - * SHA224, SHA384, SHA512. |
||
12700 | - * |
||
12701 | - * Return: MDHA split key length |
||
12702 | - */ |
||
12703 | -static inline u32 split_key_len(u32 hash) |
||
12704 | -{ |
||
12705 | - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ |
||
12706 | - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; |
||
12707 | - u32 idx; |
||
12708 | - |
||
12709 | - idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT; |
||
12710 | - |
||
12711 | - return (u32)(mdpadlen[idx] * 2); |
||
12712 | -} |
||
12713 | - |
||
12714 | -/** |
||
12715 | - * split_key_pad_len - Compute MDHA split key pad length for a given algorithm |
||
12716 | - * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, |
||
12717 | - * SHA224, SHA384, SHA512. |
||
12718 | - * |
||
12719 | - * Return: MDHA split key pad length |
||
12720 | - */ |
||
12721 | -static inline u32 split_key_pad_len(u32 hash) |
||
12722 | -{ |
||
12723 | - return ALIGN(split_key_len(hash), 16); |
||
12724 | -} |
||
12725 | - |
||
12726 | void split_key_done(struct device *dev, u32 *desc, u32 err, |
||
12727 | void *context) |
||
12728 | { |
||
12729 | --- a/drivers/crypto/caam/key_gen.h |
||
12730 | +++ b/drivers/crypto/caam/key_gen.h |
||
12731 | @@ -6,6 +6,36 @@ |
||
12732 | * |
||
12733 | */ |
||
12734 | |||
12735 | +/** |
||
12736 | + * split_key_len - Compute MDHA split key length for a given algorithm |
||
12737 | + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, |
||
12738 | + * SHA224, SHA384, SHA512. |
||
12739 | + * |
||
12740 | + * Return: MDHA split key length |
||
12741 | + */ |
||
12742 | +static inline u32 split_key_len(u32 hash) |
||
12743 | +{ |
||
12744 | + /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ |
||
12745 | + static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; |
||
12746 | + u32 idx; |
||
12747 | + |
||
12748 | + idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT; |
||
12749 | + |
||
12750 | + return (u32)(mdpadlen[idx] * 2); |
||
12751 | +} |
||
12752 | + |
||
12753 | +/** |
||
12754 | + * split_key_pad_len - Compute MDHA split key pad length for a given algorithm |
||
12755 | + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, |
||
12756 | + * SHA224, SHA384, SHA512. |
||
12757 | + * |
||
12758 | + * Return: MDHA split key pad length |
||
12759 | + */ |
||
12760 | +static inline u32 split_key_pad_len(u32 hash) |
||
12761 | +{ |
||
12762 | + return ALIGN(split_key_len(hash), 16); |
||
12763 | +} |
||
12764 | + |
||
12765 | struct split_key_result { |
||
12766 | struct completion completion; |
||
12767 | int err; |
||
12768 | --- a/drivers/crypto/caam/qi.c |
||
12769 | +++ b/drivers/crypto/caam/qi.c |
||
12770 | @@ -9,7 +9,7 @@ |
||
12771 | |||
12772 | #include <linux/cpumask.h> |
||
12773 | #include <linux/kthread.h> |
||
12774 | -#include <soc/fsl/qman.h> |
||
12775 | +#include <linux/fsl_qman.h> |
||
12776 | |||
12777 | #include "regs.h" |
||
12778 | #include "qi.h" |
||
12779 | @@ -105,23 +105,21 @@ static struct kmem_cache *qi_cache; |
||
12780 | int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req) |
||
12781 | { |
||
12782 | struct qm_fd fd; |
||
12783 | - dma_addr_t addr; |
||
12784 | int ret; |
||
12785 | int num_retries = 0; |
||
12786 | |||
12787 | - qm_fd_clear_fd(&fd); |
||
12788 | - qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1])); |
||
12789 | - |
||
12790 | - addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt), |
||
12791 | + fd.cmd = 0; |
||
12792 | + fd.format = qm_fd_compound; |
||
12793 | + fd.cong_weight = caam32_to_cpu(req->fd_sgt[1].length); |
||
12794 | + fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt), |
||
12795 | DMA_BIDIRECTIONAL); |
||
12796 | - if (dma_mapping_error(qidev, addr)) { |
||
12797 | + if (dma_mapping_error(qidev, fd.addr)) { |
||
12798 | dev_err(qidev, "DMA mapping error for QI enqueue request\n"); |
||
12799 | return -EIO; |
||
12800 | } |
||
12801 | - qm_fd_addr_set64(&fd, addr); |
||
12802 | |||
12803 | do { |
||
12804 | - ret = qman_enqueue(req->drv_ctx->req_fq, &fd); |
||
12805 | + ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0); |
||
12806 | if (likely(!ret)) |
||
12807 | return 0; |
||
12808 | |||
12809 | @@ -137,7 +135,7 @@ int caam_qi_enqueue(struct device *qidev |
||
12810 | EXPORT_SYMBOL(caam_qi_enqueue); |
||
12811 | |||
12812 | static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq, |
||
12813 | - const union qm_mr_entry *msg) |
||
12814 | + const struct qm_mr_entry *msg) |
||
12815 | { |
||
12816 | const struct qm_fd *fd; |
||
12817 | struct caam_drv_req *drv_req; |
||
12818 | @@ -145,7 +143,7 @@ static void caam_fq_ern_cb(struct qman_p |
||
12819 | |||
12820 | fd = &msg->ern.fd; |
||
12821 | |||
12822 | - if (qm_fd_get_format(fd) != qm_fd_compound) { |
||
12823 | + if (fd->format != qm_fd_compound) { |
||
12824 | dev_err(qidev, "Non-compound FD from CAAM\n"); |
||
12825 | return; |
||
12826 | } |
||
12827 | @@ -180,20 +178,22 @@ static struct qman_fq *create_caam_req_f |
||
12828 | req_fq->cb.fqs = NULL; |
||
12829 | |||
12830 | ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID | |
||
12831 | - QMAN_FQ_FLAG_TO_DCPORTAL, req_fq); |
||
12832 | + QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED, |
||
12833 | + req_fq); |
||
12834 | if (ret) { |
||
12835 | dev_err(qidev, "Failed to create session req FQ\n"); |
||
12836 | goto create_req_fq_fail; |
||
12837 | } |
||
12838 | |||
12839 | - memset(&opts, 0, sizeof(opts)); |
||
12840 | - opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ | |
||
12841 | - QM_INITFQ_WE_CONTEXTB | |
||
12842 | - QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID); |
||
12843 | - opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE); |
||
12844 | - qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2); |
||
12845 | - opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq)); |
||
12846 | - qm_fqd_context_a_set64(&opts.fqd, hwdesc); |
||
12847 | + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ | |
||
12848 | + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA | |
||
12849 | + QM_INITFQ_WE_CGID; |
||
12850 | + opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE; |
||
12851 | + opts.fqd.dest.channel = qm_channel_caam; |
||
12852 | + opts.fqd.dest.wq = 2; |
||
12853 | + opts.fqd.context_b = qman_fq_fqid(rsp_fq); |
||
12854 | + opts.fqd.context_a.hi = upper_32_bits(hwdesc); |
||
12855 | + opts.fqd.context_a.lo = lower_32_bits(hwdesc); |
||
12856 | opts.fqd.cgid = qipriv.cgr.cgrid; |
||
12857 | |||
12858 | ret = qman_init_fq(req_fq, fq_sched_flag, &opts); |
||
12859 | @@ -207,7 +207,7 @@ static struct qman_fq *create_caam_req_f |
||
12860 | return req_fq; |
||
12861 | |||
12862 | init_req_fq_fail: |
||
12863 | - qman_destroy_fq(req_fq); |
||
12864 | + qman_destroy_fq(req_fq, 0); |
||
12865 | create_req_fq_fail: |
||
12866 | kfree(req_fq); |
||
12867 | return ERR_PTR(ret); |
||
12868 | @@ -275,7 +275,7 @@ empty_fq: |
||
12869 | if (ret) |
||
12870 | dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid); |
||
12871 | |||
12872 | - qman_destroy_fq(fq); |
||
12873 | + qman_destroy_fq(fq, 0); |
||
12874 | kfree(fq); |
||
12875 | |||
12876 | return ret; |
||
12877 | @@ -292,7 +292,7 @@ static int empty_caam_fq(struct qman_fq |
||
12878 | if (ret) |
||
12879 | return ret; |
||
12880 | |||
12881 | - if (!qm_mcr_np_get(&np, frm_cnt)) |
||
12882 | + if (!np.frm_cnt) |
||
12883 | break; |
||
12884 | |||
12885 | msleep(20); |
||
12886 | @@ -572,22 +572,27 @@ static enum qman_cb_dqrr_result caam_rsp |
||
12887 | struct caam_drv_req *drv_req; |
||
12888 | const struct qm_fd *fd; |
||
12889 | struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); |
||
12890 | - u32 status; |
||
12891 | |||
12892 | if (caam_qi_napi_schedule(p, caam_napi)) |
||
12893 | return qman_cb_dqrr_stop; |
||
12894 | |||
12895 | fd = &dqrr->fd; |
||
12896 | - status = be32_to_cpu(fd->status); |
||
12897 | - if (unlikely(status)) |
||
12898 | - dev_err(qidev, "Error: %#x in CAAM response FD\n", status); |
||
12899 | + if (unlikely(fd->status)) { |
||
12900 | + u32 ssrc = fd->status & JRSTA_SSRC_MASK; |
||
12901 | + u8 err_id = fd->status & JRSTA_CCBERR_ERRID_MASK; |
||
12902 | + |
||
12903 | + if (ssrc != JRSTA_SSRC_CCB_ERROR || |
||
12904 | + err_id != JRSTA_CCBERR_ERRID_ICVCHK) |
||
12905 | + dev_err(qidev, "Error: %#x in CAAM response FD\n", |
||
12906 | + fd->status); |
||
12907 | + } |
||
12908 | |||
12909 | - if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) { |
||
12910 | + if (unlikely(fd->format != qm_fd_compound)) { |
||
12911 | dev_err(qidev, "Non-compound FD from CAAM\n"); |
||
12912 | return qman_cb_dqrr_consume; |
||
12913 | } |
||
12914 | |||
12915 | - drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd)); |
||
12916 | + drv_req = (struct caam_drv_req *)phys_to_virt(fd->addr); |
||
12917 | if (unlikely(!drv_req)) { |
||
12918 | dev_err(qidev, |
||
12919 | "Can't find original request for caam response\n"); |
||
12920 | @@ -597,7 +602,7 @@ static enum qman_cb_dqrr_result caam_rsp |
||
12921 | dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd), |
||
12922 | sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL); |
||
12923 | |||
12924 | - drv_req->cbk(drv_req, status); |
||
12925 | + drv_req->cbk(drv_req, fd->status); |
||
12926 | return qman_cb_dqrr_consume; |
||
12927 | } |
||
12928 | |||
12929 | @@ -621,17 +626,18 @@ static int alloc_rsp_fq_cpu(struct devic |
||
12930 | return -ENODEV; |
||
12931 | } |
||
12932 | |||
12933 | - memset(&opts, 0, sizeof(opts)); |
||
12934 | - opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ | |
||
12935 | - QM_INITFQ_WE_CONTEXTB | |
||
12936 | - QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID); |
||
12937 | - opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING | |
||
12938 | - QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE); |
||
12939 | - qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3); |
||
12940 | + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ | |
||
12941 | + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA | |
||
12942 | + QM_INITFQ_WE_CGID; |
||
12943 | + opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH | |
||
12944 | + QM_FQCTRL_CGE; |
||
12945 | + opts.fqd.dest.channel = qman_affine_channel(cpu); |
||
12946 | + opts.fqd.dest.wq = 3; |
||
12947 | opts.fqd.cgid = qipriv.cgr.cgrid; |
||
12948 | opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX | |
||
12949 | QM_STASHING_EXCL_DATA; |
||
12950 | - qm_fqd_set_stashing(&opts.fqd, 0, 1, 1); |
||
12951 | + opts.fqd.context_a.stashing.data_cl = 1; |
||
12952 | + opts.fqd.context_a.stashing.context_cl = 1; |
||
12953 | |||
12954 | ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts); |
||
12955 | if (ret) { |
||
12956 | @@ -662,8 +668,7 @@ static int init_cgr(struct device *qidev |
||
12957 | |||
12958 | qipriv.cgr.cb = cgr_cb; |
||
12959 | memset(&opts, 0, sizeof(opts)); |
||
12960 | - opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | |
||
12961 | - QM_CGR_WE_MODE); |
||
12962 | + opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE; |
||
12963 | opts.cgr.cscn_en = QM_CGR_EN; |
||
12964 | opts.cgr.mode = QMAN_CGR_MODE_FRAME; |
||
12965 | qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1); |
||
12966 | --- a/drivers/crypto/caam/qi.h |
||
12967 | +++ b/drivers/crypto/caam/qi.h |
||
12968 | @@ -9,7 +9,7 @@ |
||
12969 | #ifndef __QI_H__ |
||
12970 | #define __QI_H__ |
||
12971 | |||
12972 | -#include <soc/fsl/qman.h> |
||
12973 | +#include <linux/fsl_qman.h> |
||
12974 | #include "compat.h" |
||
12975 | #include "desc.h" |
||
12976 | #include "desc_constr.h" |
||
12977 | --- a/drivers/crypto/caam/regs.h |
||
12978 | +++ b/drivers/crypto/caam/regs.h |
||
12979 | @@ -627,6 +627,8 @@ struct caam_job_ring { |
||
12980 | #define JRSTA_DECOERR_INVSIGN 0x86 |
||
12981 | #define JRSTA_DECOERR_DSASIGN 0x87 |
||
12982 | |||
12983 | +#define JRSTA_QIERR_ERROR_MASK 0x00ff |
||
12984 | + |
||
12985 | #define JRSTA_CCBERR_JUMP 0x08000000 |
||
12986 | #define JRSTA_CCBERR_INDEX_MASK 0xff00 |
||
12987 | #define JRSTA_CCBERR_INDEX_SHIFT 8 |
||
12988 | --- a/drivers/crypto/caam/sg_sw_qm.h |
||
12989 | +++ b/drivers/crypto/caam/sg_sw_qm.h |
||
12990 | @@ -34,46 +34,61 @@ |
||
12991 | #ifndef __SG_SW_QM_H |
||
12992 | #define __SG_SW_QM_H |
||
12993 | |||
12994 | -#include <soc/fsl/qman.h> |
||
12995 | +#include <linux/fsl_qman.h> |
||
12996 | #include "regs.h" |
||
12997 | |||
12998 | +static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr) |
||
12999 | +{ |
||
13000 | + dma_addr_t addr = qm_sg_ptr->opaque; |
||
13001 | + |
||
13002 | + qm_sg_ptr->opaque = cpu_to_caam64(addr); |
||
13003 | + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl); |
||
13004 | +} |
||
13005 | + |
||
13006 | static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma, |
||
13007 | - u16 offset) |
||
13008 | + u32 len, u16 offset) |
||
13009 | { |
||
13010 | - qm_sg_entry_set64(qm_sg_ptr, dma); |
||
13011 | + qm_sg_ptr->addr = dma; |
||
13012 | + qm_sg_ptr->length = len; |
||
13013 | qm_sg_ptr->__reserved2 = 0; |
||
13014 | qm_sg_ptr->bpid = 0; |
||
13015 | - qm_sg_ptr->offset = cpu_to_be16(offset & QM_SG_OFF_MASK); |
||
13016 | + qm_sg_ptr->__reserved3 = 0; |
||
13017 | + qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK; |
||
13018 | + |
||
13019 | + cpu_to_hw_sg(qm_sg_ptr); |
||
13020 | } |
||
13021 | |||
13022 | static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr, |
||
13023 | dma_addr_t dma, u32 len, u16 offset) |
||
13024 | { |
||
13025 | - __dma_to_qm_sg(qm_sg_ptr, dma, offset); |
||
13026 | - qm_sg_entry_set_len(qm_sg_ptr, len); |
||
13027 | + qm_sg_ptr->extension = 0; |
||
13028 | + qm_sg_ptr->final = 0; |
||
13029 | + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset); |
||
13030 | } |
||
13031 | |||
13032 | static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr, |
||
13033 | dma_addr_t dma, u32 len, u16 offset) |
||
13034 | { |
||
13035 | - __dma_to_qm_sg(qm_sg_ptr, dma, offset); |
||
13036 | - qm_sg_entry_set_f(qm_sg_ptr, len); |
||
13037 | + qm_sg_ptr->extension = 0; |
||
13038 | + qm_sg_ptr->final = 1; |
||
13039 | + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset); |
||
13040 | } |
||
13041 | |||
13042 | static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr, |
||
13043 | dma_addr_t dma, u32 len, u16 offset) |
||
13044 | { |
||
13045 | - __dma_to_qm_sg(qm_sg_ptr, dma, offset); |
||
13046 | - qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | (len & QM_SG_LEN_MASK)); |
||
13047 | + qm_sg_ptr->extension = 1; |
||
13048 | + qm_sg_ptr->final = 0; |
||
13049 | + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset); |
||
13050 | } |
||
13051 | |||
13052 | static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr, |
||
13053 | dma_addr_t dma, u32 len, |
||
13054 | u16 offset) |
||
13055 | { |
||
13056 | - __dma_to_qm_sg(qm_sg_ptr, dma, offset); |
||
13057 | - qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | QM_SG_FIN | |
||
13058 | - (len & QM_SG_LEN_MASK)); |
||
13059 | + qm_sg_ptr->extension = 1; |
||
13060 | + qm_sg_ptr->final = 1; |
||
13061 | + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset); |
||
13062 | } |
||
13063 | |||
13064 | /* |
||
13065 | @@ -102,7 +117,10 @@ static inline void sg_to_qm_sg_last(stru |
||
13066 | struct qm_sg_entry *qm_sg_ptr, u16 offset) |
||
13067 | { |
||
13068 | qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset); |
||
13069 | - qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr)); |
||
13070 | + |
||
13071 | + qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl); |
||
13072 | + qm_sg_ptr->final = 1; |
||
13073 | + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl); |
||
13074 | } |
||
13075 | |||
13076 | #endif /* __SG_SW_QM_H */ |
||
13077 | --- a/drivers/crypto/talitos.c |
||
13078 | +++ b/drivers/crypto/talitos.c |
||
13079 | @@ -1241,6 +1241,14 @@ static int ipsec_esp(struct talitos_edes |
||
13080 | ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4], |
||
13081 | sg_count, areq->assoclen, tbl_off, elen); |
||
13082 | |||
13083 | + /* |
||
13084 | + * In case of SEC 2.x+, cipher in len must include only the ciphertext, |
||
13085 | + * while extent is used for ICV len. |
||
13086 | + */ |
||
13087 | + if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) && |
||
13088 | + (desc->hdr & DESC_HDR_MODE1_MDEU_CICV)) |
||
13089 | + desc->ptr[4].len = cpu_to_be16(cryptlen); |
||
13090 | + |
||
13091 | if (ret > 1) { |
||
13092 | tbl_off += ret; |
||
13093 | sync_needed = true; |