Lines Matching +full:frc +full:- +full:shared
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * Copyright 2015-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2019 NXP
18 #include "dpseci-debugfs.h"
19 #include <linux/dma-mapping.h>
22 #include <soc/fsl/dpaa2-io.h>
23 #include <soc/fsl/dpaa2-fd.h>
37 * being processed. This can be added by the dpaa2-eth driver. This would
40 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
66 * struct caam_ctx - per-session context
99 phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) : in dpaa2_caam_iova_to_virt()
106 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
113 * @flags - flags that would be used for the equivalent kmalloc(..) call
123 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
125 * @obj - buffer previously allocated by qi_cache_zalloc
137 switch (crypto_tfm_alg_type(areq->tfm)) { in to_caam_req()
146 return ERR_PTR(-EINVAL); in to_caam_req()
178 struct device *dev = ctx->dev; in aead_set_sh_desc()
186 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == in aead_set_sh_desc()
188 const bool is_rfc3686 = alg->caam.rfc3686; in aead_set_sh_desc()
190 if (!ctx->cdata.keylen || !ctx->authsize) in aead_set_sh_desc()
194 * AES-CTR needs to load IV in CONTEXT1 reg in aead_set_sh_desc()
207 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + in aead_set_sh_desc()
208 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); in aead_set_sh_desc()
217 ctx->adata.key_virt = ctx->key; in aead_set_sh_desc()
218 ctx->adata.key_dma = ctx->key_dma; in aead_set_sh_desc()
220 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; in aead_set_sh_desc()
221 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; in aead_set_sh_desc()
223 data_len[0] = ctx->adata.keylen_pad; in aead_set_sh_desc()
224 data_len[1] = ctx->cdata.keylen; in aead_set_sh_desc()
226 /* aead_encrypt shared descriptor */ in aead_set_sh_desc()
227 if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN : in aead_set_sh_desc()
232 return -EINVAL; in aead_set_sh_desc()
234 ctx->adata.key_inline = !!(inl_mask & 1); in aead_set_sh_desc()
235 ctx->cdata.key_inline = !!(inl_mask & 2); in aead_set_sh_desc()
237 flc = &ctx->flc[ENCRYPT]; in aead_set_sh_desc()
238 desc = flc->sh_desc; in aead_set_sh_desc()
240 if (alg->caam.geniv) in aead_set_sh_desc()
241 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, in aead_set_sh_desc()
242 ivsize, ctx->authsize, is_rfc3686, in aead_set_sh_desc()
244 priv->sec_attr.era); in aead_set_sh_desc()
246 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, in aead_set_sh_desc()
247 ivsize, ctx->authsize, is_rfc3686, nonce, in aead_set_sh_desc()
248 ctx1_iv_off, true, priv->sec_attr.era); in aead_set_sh_desc()
250 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in aead_set_sh_desc()
251 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], in aead_set_sh_desc()
252 sizeof(flc->flc) + desc_bytes(desc), in aead_set_sh_desc()
253 ctx->dir); in aead_set_sh_desc()
255 /* aead_decrypt shared descriptor */ in aead_set_sh_desc()
260 return -EINVAL; in aead_set_sh_desc()
262 ctx->adata.key_inline = !!(inl_mask & 1); in aead_set_sh_desc()
263 ctx->cdata.key_inline = !!(inl_mask & 2); in aead_set_sh_desc()
265 flc = &ctx->flc[DECRYPT]; in aead_set_sh_desc()
266 desc = flc->sh_desc; in aead_set_sh_desc()
267 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, in aead_set_sh_desc()
268 ivsize, ctx->authsize, alg->caam.geniv, in aead_set_sh_desc()
270 priv->sec_attr.era); in aead_set_sh_desc()
271 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in aead_set_sh_desc()
272 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], in aead_set_sh_desc()
273 sizeof(flc->flc) + desc_bytes(desc), in aead_set_sh_desc()
274 ctx->dir); in aead_set_sh_desc()
283 ctx->authsize = authsize; in aead_setauthsize()
293 struct device *dev = ctx->dev; in aead_setkey()
305 ctx->adata.keylen = keys.authkeylen; in aead_setkey()
306 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & in aead_setkey()
309 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) in aead_setkey()
312 memcpy(ctx->key, keys.authkey, keys.authkeylen); in aead_setkey()
313 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); in aead_setkey()
314 dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad + in aead_setkey()
315 keys.enckeylen, ctx->dir); in aead_setkey()
317 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, in aead_setkey()
318 ctx->adata.keylen_pad + keys.enckeylen, 1); in aead_setkey()
320 ctx->cdata.keylen = keys.enckeylen; in aead_setkey()
326 return -EINVAL; in aead_setkey()
339 err = -EINVAL; in des3_aead_setkey()
356 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; in aead_edesc_alloc()
357 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; in aead_edesc_alloc()
361 struct device *dev = ctx->dev; in aead_edesc_alloc()
362 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in aead_edesc_alloc()
369 unsigned int authsize = ctx->authsize; in aead_edesc_alloc()
378 return ERR_PTR(-ENOMEM); in aead_edesc_alloc()
381 if (unlikely(req->dst != req->src)) { in aead_edesc_alloc()
382 src_len = req->assoclen + req->cryptlen; in aead_edesc_alloc()
383 dst_len = src_len + (encrypt ? authsize : (-authsize)); in aead_edesc_alloc()
385 src_nents = sg_nents_for_len(req->src, src_len); in aead_edesc_alloc()
393 dst_nents = sg_nents_for_len(req->dst, dst_len); in aead_edesc_alloc()
402 mapped_src_nents = dma_map_sg(dev, req->src, src_nents, in aead_edesc_alloc()
407 return ERR_PTR(-ENOMEM); in aead_edesc_alloc()
414 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, in aead_edesc_alloc()
418 dma_unmap_sg(dev, req->src, src_nents, in aead_edesc_alloc()
421 return ERR_PTR(-ENOMEM); in aead_edesc_alloc()
427 src_len = req->assoclen + req->cryptlen + in aead_edesc_alloc()
430 src_nents = sg_nents_for_len(req->src, src_len); in aead_edesc_alloc()
438 mapped_src_nents = dma_map_sg(dev, req->src, src_nents, in aead_edesc_alloc()
443 return ERR_PTR(-ENOMEM); in aead_edesc_alloc()
447 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) in aead_edesc_alloc()
451 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. in aead_edesc_alloc()
465 else if ((req->src == req->dst) && (mapped_src_nents > 1)) in aead_edesc_alloc()
472 sg_table = &edesc->sgt[0]; in aead_edesc_alloc()
478 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, in aead_edesc_alloc()
481 return ERR_PTR(-ENOMEM); in aead_edesc_alloc()
488 memcpy(iv, req->iv, ivsize); in aead_edesc_alloc()
493 caam_unmap(dev, req->src, req->dst, src_nents, in aead_edesc_alloc()
496 return ERR_PTR(-ENOMEM); in aead_edesc_alloc()
500 edesc->src_nents = src_nents; in aead_edesc_alloc()
501 edesc->dst_nents = dst_nents; in aead_edesc_alloc()
502 edesc->iv_dma = iv_dma; in aead_edesc_alloc()
504 if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) == in aead_edesc_alloc()
510 edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize); in aead_edesc_alloc()
512 edesc->assoclen = cpu_to_caam32(req->assoclen); in aead_edesc_alloc()
513 edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4, in aead_edesc_alloc()
515 if (dma_mapping_error(dev, edesc->assoclen_dma)) { in aead_edesc_alloc()
517 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, in aead_edesc_alloc()
520 return ERR_PTR(-ENOMEM); in aead_edesc_alloc()
523 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0); in aead_edesc_alloc()
529 sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0); in aead_edesc_alloc()
533 sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0); in aead_edesc_alloc()
538 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); in aead_edesc_alloc()
539 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, in aead_edesc_alloc()
542 return ERR_PTR(-ENOMEM); in aead_edesc_alloc()
545 edesc->qm_sg_dma = qm_sg_dma; in aead_edesc_alloc()
546 edesc->qm_sg_bytes = qm_sg_bytes; in aead_edesc_alloc()
548 out_len = req->assoclen + req->cryptlen + in aead_edesc_alloc()
549 (encrypt ? ctx->authsize : (-ctx->authsize)); in aead_edesc_alloc()
550 in_len = 4 + ivsize + req->assoclen + req->cryptlen; in aead_edesc_alloc()
552 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); in aead_edesc_alloc()
558 if (req->dst == req->src) { in aead_edesc_alloc()
561 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src)); in aead_edesc_alloc()
577 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst)); in aead_edesc_alloc()
594 struct device *dev = ctx->dev; in chachapoly_set_sh_desc()
598 if (!ctx->cdata.keylen || !ctx->authsize) in chachapoly_set_sh_desc()
601 flc = &ctx->flc[ENCRYPT]; in chachapoly_set_sh_desc()
602 desc = flc->sh_desc; in chachapoly_set_sh_desc()
603 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, in chachapoly_set_sh_desc()
604 ctx->authsize, true, true); in chachapoly_set_sh_desc()
605 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in chachapoly_set_sh_desc()
606 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], in chachapoly_set_sh_desc()
607 sizeof(flc->flc) + desc_bytes(desc), in chachapoly_set_sh_desc()
608 ctx->dir); in chachapoly_set_sh_desc()
610 flc = &ctx->flc[DECRYPT]; in chachapoly_set_sh_desc()
611 desc = flc->sh_desc; in chachapoly_set_sh_desc()
612 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, in chachapoly_set_sh_desc()
613 ctx->authsize, false, true); in chachapoly_set_sh_desc()
614 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in chachapoly_set_sh_desc()
615 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], in chachapoly_set_sh_desc()
616 sizeof(flc->flc) + desc_bytes(desc), in chachapoly_set_sh_desc()
617 ctx->dir); in chachapoly_set_sh_desc()
628 return -EINVAL; in chachapoly_setauthsize()
630 ctx->authsize = authsize; in chachapoly_setauthsize()
639 unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize; in chachapoly_setkey()
642 return -EINVAL; in chachapoly_setkey()
644 memcpy(ctx->key, key, keylen); in chachapoly_setkey()
645 ctx->cdata.key_virt = ctx->key; in chachapoly_setkey()
646 ctx->cdata.keylen = keylen - saltlen; in chachapoly_setkey()
654 struct device *dev = ctx->dev; in gcm_set_sh_desc()
658 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - in gcm_set_sh_desc()
659 ctx->cdata.keylen; in gcm_set_sh_desc()
661 if (!ctx->cdata.keylen || !ctx->authsize) in gcm_set_sh_desc()
665 * AES GCM encrypt shared descriptor in gcm_set_sh_desc()
666 * Job Descriptor and Shared Descriptor in gcm_set_sh_desc()
667 * must fit into the 64-word Descriptor h/w Buffer in gcm_set_sh_desc()
670 ctx->cdata.key_inline = true; in gcm_set_sh_desc()
671 ctx->cdata.key_virt = ctx->key; in gcm_set_sh_desc()
673 ctx->cdata.key_inline = false; in gcm_set_sh_desc()
674 ctx->cdata.key_dma = ctx->key_dma; in gcm_set_sh_desc()
677 flc = &ctx->flc[ENCRYPT]; in gcm_set_sh_desc()
678 desc = flc->sh_desc; in gcm_set_sh_desc()
679 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true); in gcm_set_sh_desc()
680 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in gcm_set_sh_desc()
681 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], in gcm_set_sh_desc()
682 sizeof(flc->flc) + desc_bytes(desc), in gcm_set_sh_desc()
683 ctx->dir); in gcm_set_sh_desc()
686 * Job Descriptor and Shared Descriptors in gcm_set_sh_desc()
687 * must all fit into the 64-word Descriptor h/w Buffer in gcm_set_sh_desc()
690 ctx->cdata.key_inline = true; in gcm_set_sh_desc()
691 ctx->cdata.key_virt = ctx->key; in gcm_set_sh_desc()
693 ctx->cdata.key_inline = false; in gcm_set_sh_desc()
694 ctx->cdata.key_dma = ctx->key_dma; in gcm_set_sh_desc()
697 flc = &ctx->flc[DECRYPT]; in gcm_set_sh_desc()
698 desc = flc->sh_desc; in gcm_set_sh_desc()
699 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true); in gcm_set_sh_desc()
700 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in gcm_set_sh_desc()
701 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], in gcm_set_sh_desc()
702 sizeof(flc->flc) + desc_bytes(desc), in gcm_set_sh_desc()
703 ctx->dir); in gcm_set_sh_desc()
717 ctx->authsize = authsize; in gcm_setauthsize()
727 struct device *dev = ctx->dev; in gcm_setkey()
736 memcpy(ctx->key, key, keylen); in gcm_setkey()
737 dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir); in gcm_setkey()
738 ctx->cdata.keylen = keylen; in gcm_setkey()
746 struct device *dev = ctx->dev; in rfc4106_set_sh_desc()
750 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - in rfc4106_set_sh_desc()
751 ctx->cdata.keylen; in rfc4106_set_sh_desc()
753 if (!ctx->cdata.keylen || !ctx->authsize) in rfc4106_set_sh_desc()
756 ctx->cdata.key_virt = ctx->key; in rfc4106_set_sh_desc()
759 * RFC4106 encrypt shared descriptor in rfc4106_set_sh_desc()
760 * Job Descriptor and Shared Descriptor in rfc4106_set_sh_desc()
761 * must fit into the 64-word Descriptor h/w Buffer in rfc4106_set_sh_desc()
764 ctx->cdata.key_inline = true; in rfc4106_set_sh_desc()
766 ctx->cdata.key_inline = false; in rfc4106_set_sh_desc()
767 ctx->cdata.key_dma = ctx->key_dma; in rfc4106_set_sh_desc()
770 flc = &ctx->flc[ENCRYPT]; in rfc4106_set_sh_desc()
771 desc = flc->sh_desc; in rfc4106_set_sh_desc()
772 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize, in rfc4106_set_sh_desc()
774 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in rfc4106_set_sh_desc()
775 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], in rfc4106_set_sh_desc()
776 sizeof(flc->flc) + desc_bytes(desc), in rfc4106_set_sh_desc()
777 ctx->dir); in rfc4106_set_sh_desc()
780 * Job Descriptor and Shared Descriptors in rfc4106_set_sh_desc()
781 * must all fit into the 64-word Descriptor h/w Buffer in rfc4106_set_sh_desc()
784 ctx->cdata.key_inline = true; in rfc4106_set_sh_desc()
786 ctx->cdata.key_inline = false; in rfc4106_set_sh_desc()
787 ctx->cdata.key_dma = ctx->key_dma; in rfc4106_set_sh_desc()
790 flc = &ctx->flc[DECRYPT]; in rfc4106_set_sh_desc()
791 desc = flc->sh_desc; in rfc4106_set_sh_desc()
792 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize, in rfc4106_set_sh_desc()
794 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in rfc4106_set_sh_desc()
795 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], in rfc4106_set_sh_desc()
796 sizeof(flc->flc) + desc_bytes(desc), in rfc4106_set_sh_desc()
797 ctx->dir); in rfc4106_set_sh_desc()
812 ctx->authsize = authsize; in rfc4106_setauthsize()
822 struct device *dev = ctx->dev; in rfc4106_setkey()
825 ret = aes_check_keylen(keylen - 4); in rfc4106_setkey()
832 memcpy(ctx->key, key, keylen); in rfc4106_setkey()
837 ctx->cdata.keylen = keylen - 4; in rfc4106_setkey()
838 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen, in rfc4106_setkey()
839 ctx->dir); in rfc4106_setkey()
847 struct device *dev = ctx->dev; in rfc4543_set_sh_desc()
851 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - in rfc4543_set_sh_desc()
852 ctx->cdata.keylen; in rfc4543_set_sh_desc()
854 if (!ctx->cdata.keylen || !ctx->authsize) in rfc4543_set_sh_desc()
857 ctx->cdata.key_virt = ctx->key; in rfc4543_set_sh_desc()
860 * RFC4543 encrypt shared descriptor in rfc4543_set_sh_desc()
861 * Job Descriptor and Shared Descriptor in rfc4543_set_sh_desc()
862 * must fit into the 64-word Descriptor h/w Buffer in rfc4543_set_sh_desc()
865 ctx->cdata.key_inline = true; in rfc4543_set_sh_desc()
867 ctx->cdata.key_inline = false; in rfc4543_set_sh_desc()
868 ctx->cdata.key_dma = ctx->key_dma; in rfc4543_set_sh_desc()
871 flc = &ctx->flc[ENCRYPT]; in rfc4543_set_sh_desc()
872 desc = flc->sh_desc; in rfc4543_set_sh_desc()
873 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize, in rfc4543_set_sh_desc()
875 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in rfc4543_set_sh_desc()
876 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], in rfc4543_set_sh_desc()
877 sizeof(flc->flc) + desc_bytes(desc), in rfc4543_set_sh_desc()
878 ctx->dir); in rfc4543_set_sh_desc()
881 * Job Descriptor and Shared Descriptors in rfc4543_set_sh_desc()
882 * must all fit into the 64-word Descriptor h/w Buffer in rfc4543_set_sh_desc()
885 ctx->cdata.key_inline = true; in rfc4543_set_sh_desc()
887 ctx->cdata.key_inline = false; in rfc4543_set_sh_desc()
888 ctx->cdata.key_dma = ctx->key_dma; in rfc4543_set_sh_desc()
891 flc = &ctx->flc[DECRYPT]; in rfc4543_set_sh_desc()
892 desc = flc->sh_desc; in rfc4543_set_sh_desc()
893 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize, in rfc4543_set_sh_desc()
895 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in rfc4543_set_sh_desc()
896 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], in rfc4543_set_sh_desc()
897 sizeof(flc->flc) + desc_bytes(desc), in rfc4543_set_sh_desc()
898 ctx->dir); in rfc4543_set_sh_desc()
909 return -EINVAL; in rfc4543_setauthsize()
911 ctx->authsize = authsize; in rfc4543_setauthsize()
921 struct device *dev = ctx->dev; in rfc4543_setkey()
924 ret = aes_check_keylen(keylen - 4); in rfc4543_setkey()
931 memcpy(ctx->key, key, keylen); in rfc4543_setkey()
936 ctx->cdata.keylen = keylen - 4; in rfc4543_setkey()
937 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen, in rfc4543_setkey()
938 ctx->dir); in rfc4543_setkey()
950 struct device *dev = ctx->dev; in skcipher_setkey()
954 const bool is_rfc3686 = alg->caam.rfc3686; in skcipher_setkey()
959 ctx->cdata.keylen = keylen; in skcipher_setkey()
960 ctx->cdata.key_virt = key; in skcipher_setkey()
961 ctx->cdata.key_inline = true; in skcipher_setkey()
963 /* skcipher_encrypt shared descriptor */ in skcipher_setkey()
964 flc = &ctx->flc[ENCRYPT]; in skcipher_setkey()
965 desc = flc->sh_desc; in skcipher_setkey()
966 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, in skcipher_setkey()
968 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in skcipher_setkey()
969 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], in skcipher_setkey()
970 sizeof(flc->flc) + desc_bytes(desc), in skcipher_setkey()
971 ctx->dir); in skcipher_setkey()
973 /* skcipher_decrypt shared descriptor */ in skcipher_setkey()
974 flc = &ctx->flc[DECRYPT]; in skcipher_setkey()
975 desc = flc->sh_desc; in skcipher_setkey()
976 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, in skcipher_setkey()
978 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in skcipher_setkey()
979 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], in skcipher_setkey()
980 sizeof(flc->flc) + desc_bytes(desc), in skcipher_setkey()
981 ctx->dir); in skcipher_setkey()
1010 keylen -= CTR_RFC3686_NONCE_SIZE; in rfc3686_skcipher_setkey()
1026 * AES-CTR needs to load IV in CONTEXT1 reg in ctr_skcipher_setkey()
1043 return -EINVAL; in chacha20_skcipher_setkey()
1066 struct device *dev = ctx->dev; in xts_skcipher_setkey()
1079 ctx->xts_key_fallback = true; in xts_skcipher_setkey()
1081 if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) { in xts_skcipher_setkey()
1082 err = crypto_skcipher_setkey(ctx->fallback, key, keylen); in xts_skcipher_setkey()
1087 ctx->cdata.keylen = keylen; in xts_skcipher_setkey()
1088 ctx->cdata.key_virt = key; in xts_skcipher_setkey()
1089 ctx->cdata.key_inline = true; in xts_skcipher_setkey()
1091 /* xts_skcipher_encrypt shared descriptor */ in xts_skcipher_setkey()
1092 flc = &ctx->flc[ENCRYPT]; in xts_skcipher_setkey()
1093 desc = flc->sh_desc; in xts_skcipher_setkey()
1094 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata); in xts_skcipher_setkey()
1095 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in xts_skcipher_setkey()
1096 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], in xts_skcipher_setkey()
1097 sizeof(flc->flc) + desc_bytes(desc), in xts_skcipher_setkey()
1098 ctx->dir); in xts_skcipher_setkey()
1100 /* xts_skcipher_decrypt shared descriptor */ in xts_skcipher_setkey()
1101 flc = &ctx->flc[DECRYPT]; in xts_skcipher_setkey()
1102 desc = flc->sh_desc; in xts_skcipher_setkey()
1103 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata); in xts_skcipher_setkey()
1104 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in xts_skcipher_setkey()
1105 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], in xts_skcipher_setkey()
1106 sizeof(flc->flc) + desc_bytes(desc), in xts_skcipher_setkey()
1107 ctx->dir); in xts_skcipher_setkey()
1116 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; in skcipher_edesc_alloc()
1117 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; in skcipher_edesc_alloc()
1119 struct device *dev = ctx->dev; in skcipher_edesc_alloc()
1120 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in skcipher_edesc_alloc()
1130 src_nents = sg_nents_for_len(req->src, req->cryptlen); in skcipher_edesc_alloc()
1133 req->cryptlen); in skcipher_edesc_alloc()
1137 if (unlikely(req->dst != req->src)) { in skcipher_edesc_alloc()
1138 dst_nents = sg_nents_for_len(req->dst, req->cryptlen); in skcipher_edesc_alloc()
1141 req->cryptlen); in skcipher_edesc_alloc()
1145 mapped_src_nents = dma_map_sg(dev, req->src, src_nents, in skcipher_edesc_alloc()
1149 return ERR_PTR(-ENOMEM); in skcipher_edesc_alloc()
1152 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, in skcipher_edesc_alloc()
1156 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); in skcipher_edesc_alloc()
1157 return ERR_PTR(-ENOMEM); in skcipher_edesc_alloc()
1160 mapped_src_nents = dma_map_sg(dev, req->src, src_nents, in skcipher_edesc_alloc()
1164 return ERR_PTR(-ENOMEM); in skcipher_edesc_alloc()
1179 if (req->src != req->dst) in skcipher_edesc_alloc()
1189 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, in skcipher_edesc_alloc()
1191 return ERR_PTR(-ENOMEM); in skcipher_edesc_alloc()
1198 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, in skcipher_edesc_alloc()
1200 return ERR_PTR(-ENOMEM); in skcipher_edesc_alloc()
1204 sg_table = &edesc->sgt[0]; in skcipher_edesc_alloc()
1206 memcpy(iv, req->iv, ivsize); in skcipher_edesc_alloc()
1211 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, in skcipher_edesc_alloc()
1214 return ERR_PTR(-ENOMEM); in skcipher_edesc_alloc()
1217 edesc->src_nents = src_nents; in skcipher_edesc_alloc()
1218 edesc->dst_nents = dst_nents; in skcipher_edesc_alloc()
1219 edesc->iv_dma = iv_dma; in skcipher_edesc_alloc()
1220 edesc->qm_sg_bytes = qm_sg_bytes; in skcipher_edesc_alloc()
1223 sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0); in skcipher_edesc_alloc()
1225 if (req->src != req->dst) in skcipher_edesc_alloc()
1226 sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0); in skcipher_edesc_alloc()
1231 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes, in skcipher_edesc_alloc()
1233 if (dma_mapping_error(dev, edesc->qm_sg_dma)) { in skcipher_edesc_alloc()
1235 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, in skcipher_edesc_alloc()
1238 return ERR_PTR(-ENOMEM); in skcipher_edesc_alloc()
1241 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); in skcipher_edesc_alloc()
1243 dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize); in skcipher_edesc_alloc()
1244 dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize); in skcipher_edesc_alloc()
1247 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); in skcipher_edesc_alloc()
1251 if (req->src == req->dst) in skcipher_edesc_alloc()
1252 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + in skcipher_edesc_alloc()
1255 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx * in skcipher_edesc_alloc()
1267 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, in aead_unmap()
1268 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma, in aead_unmap()
1269 edesc->qm_sg_bytes); in aead_unmap()
1270 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); in aead_unmap()
1279 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, in skcipher_unmap()
1280 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma, in skcipher_unmap()
1281 edesc->qm_sg_bytes); in skcipher_unmap()
1290 struct aead_edesc *edesc = req_ctx->edesc; in aead_encrypt_done()
1295 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); in aead_encrypt_done()
1298 ecode = caam_qi2_strstatus(ctx->dev, status); in aead_encrypt_done()
1300 aead_unmap(ctx->dev, edesc, req); in aead_encrypt_done()
1311 struct aead_edesc *edesc = req_ctx->edesc; in aead_decrypt_done()
1316 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); in aead_decrypt_done()
1319 ecode = caam_qi2_strstatus(ctx->dev, status); in aead_decrypt_done()
1321 aead_unmap(ctx->dev, edesc, req); in aead_decrypt_done()
1339 caam_req->flc = &ctx->flc[ENCRYPT]; in aead_encrypt()
1340 caam_req->flc_dma = ctx->flc_dma[ENCRYPT]; in aead_encrypt()
1341 caam_req->cbk = aead_encrypt_done; in aead_encrypt()
1342 caam_req->ctx = &req->base; in aead_encrypt()
1343 caam_req->edesc = edesc; in aead_encrypt()
1344 ret = dpaa2_caam_enqueue(ctx->dev, caam_req); in aead_encrypt()
1345 if (ret != -EINPROGRESS && in aead_encrypt()
1346 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { in aead_encrypt()
1347 aead_unmap(ctx->dev, edesc, req); in aead_encrypt()
1367 caam_req->flc = &ctx->flc[DECRYPT]; in aead_decrypt()
1368 caam_req->flc_dma = ctx->flc_dma[DECRYPT]; in aead_decrypt()
1369 caam_req->cbk = aead_decrypt_done; in aead_decrypt()
1370 caam_req->ctx = &req->base; in aead_decrypt()
1371 caam_req->edesc = edesc; in aead_decrypt()
1372 ret = dpaa2_caam_enqueue(ctx->dev, caam_req); in aead_decrypt()
1373 if (ret != -EINPROGRESS && in aead_decrypt()
1374 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { in aead_decrypt()
1375 aead_unmap(ctx->dev, edesc, req); in aead_decrypt()
1384 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req); in ipsec_gcm_encrypt()
1389 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req); in ipsec_gcm_decrypt()
1399 struct skcipher_edesc *edesc = req_ctx->edesc; in skcipher_encrypt_done()
1403 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); in skcipher_encrypt_done()
1406 ecode = caam_qi2_strstatus(ctx->dev, status); in skcipher_encrypt_done()
1409 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, in skcipher_encrypt_done()
1410 edesc->src_nents > 1 ? 100 : ivsize, 1); in skcipher_encrypt_done()
1412 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, in skcipher_encrypt_done()
1413 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); in skcipher_encrypt_done()
1415 skcipher_unmap(ctx->dev, edesc, req); in skcipher_encrypt_done()
1418 * The crypto API expects us to set the IV (req->iv) to the last in skcipher_encrypt_done()
1423 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, in skcipher_encrypt_done()
1437 struct skcipher_edesc *edesc = req_ctx->edesc; in skcipher_decrypt_done()
1441 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); in skcipher_decrypt_done()
1444 ecode = caam_qi2_strstatus(ctx->dev, status); in skcipher_decrypt_done()
1447 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, in skcipher_decrypt_done()
1448 edesc->src_nents > 1 ? 100 : ivsize, 1); in skcipher_decrypt_done()
1450 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, in skcipher_decrypt_done()
1451 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); in skcipher_decrypt_done()
1453 skcipher_unmap(ctx->dev, edesc, req); in skcipher_decrypt_done()
1456 * The crypto API expects us to set the IV (req->iv) to the last in skcipher_decrypt_done()
1461 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, in skcipher_decrypt_done()
1473 return !!get_unaligned((u64 *)(req->iv + (ivsize / 2))); in xts_skcipher_ivsize()
1482 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev); in skcipher_encrypt()
1490 if (!req->cryptlen && !ctx->fallback) in skcipher_encrypt()
1493 if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) || in skcipher_encrypt()
1494 ctx->xts_key_fallback)) { in skcipher_encrypt()
1495 skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback); in skcipher_encrypt()
1496 skcipher_request_set_callback(&caam_req->fallback_req, in skcipher_encrypt()
1497 req->base.flags, in skcipher_encrypt()
1498 req->base.complete, in skcipher_encrypt()
1499 req->base.data); in skcipher_encrypt()
1500 skcipher_request_set_crypt(&caam_req->fallback_req, req->src, in skcipher_encrypt()
1501 req->dst, req->cryptlen, req->iv); in skcipher_encrypt()
1503 return crypto_skcipher_encrypt(&caam_req->fallback_req); in skcipher_encrypt()
1511 caam_req->flc = &ctx->flc[ENCRYPT]; in skcipher_encrypt()
1512 caam_req->flc_dma = ctx->flc_dma[ENCRYPT]; in skcipher_encrypt()
1513 caam_req->cbk = skcipher_encrypt_done; in skcipher_encrypt()
1514 caam_req->ctx = &req->base; in skcipher_encrypt()
1515 caam_req->edesc = edesc; in skcipher_encrypt()
1516 ret = dpaa2_caam_enqueue(ctx->dev, caam_req); in skcipher_encrypt()
1517 if (ret != -EINPROGRESS && in skcipher_encrypt()
1518 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { in skcipher_encrypt()
1519 skcipher_unmap(ctx->dev, edesc, req); in skcipher_encrypt()
1532 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev); in skcipher_decrypt()
1540 if (!req->cryptlen && !ctx->fallback) in skcipher_decrypt()
1543 if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) || in skcipher_decrypt()
1544 ctx->xts_key_fallback)) { in skcipher_decrypt()
1545 skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback); in skcipher_decrypt()
1546 skcipher_request_set_callback(&caam_req->fallback_req, in skcipher_decrypt()
1547 req->base.flags, in skcipher_decrypt()
1548 req->base.complete, in skcipher_decrypt()
1549 req->base.data); in skcipher_decrypt()
1550 skcipher_request_set_crypt(&caam_req->fallback_req, req->src, in skcipher_decrypt()
1551 req->dst, req->cryptlen, req->iv); in skcipher_decrypt()
1553 return crypto_skcipher_decrypt(&caam_req->fallback_req); in skcipher_decrypt()
1561 caam_req->flc = &ctx->flc[DECRYPT]; in skcipher_decrypt()
1562 caam_req->flc_dma = ctx->flc_dma[DECRYPT]; in skcipher_decrypt()
1563 caam_req->cbk = skcipher_decrypt_done; in skcipher_decrypt()
1564 caam_req->ctx = &req->base; in skcipher_decrypt()
1565 caam_req->edesc = edesc; in skcipher_decrypt()
1566 ret = dpaa2_caam_enqueue(ctx->dev, caam_req); in skcipher_decrypt()
1567 if (ret != -EINPROGRESS && in skcipher_decrypt()
1568 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { in skcipher_decrypt()
1569 skcipher_unmap(ctx->dev, edesc, req); in skcipher_decrypt()
1583 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; in caam_cra_init()
1584 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; in caam_cra_init()
1586 ctx->dev = caam->dev; in caam_cra_init()
1587 ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; in caam_cra_init()
1589 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, in caam_cra_init()
1591 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); in caam_cra_init()
1592 if (dma_mapping_error(ctx->dev, dma_addr)) { in caam_cra_init()
1593 dev_err(ctx->dev, "unable to map key, shared descriptors\n"); in caam_cra_init()
1594 return -ENOMEM; in caam_cra_init()
1598 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]); in caam_cra_init()
1599 ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]); in caam_cra_init()
1610 u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; in caam_cra_init_skcipher()
1614 const char *tfm_name = crypto_tfm_alg_name(&tfm->base); in caam_cra_init_skcipher()
1620 dev_err(caam_alg->caam.dev, in caam_cra_init_skcipher()
1626 ctx->fallback = fallback; in caam_cra_init_skcipher()
1635 ret = caam_cra_init(ctx, &caam_alg->caam, false); in caam_cra_init_skcipher()
1636 if (ret && ctx->fallback) in caam_cra_init_skcipher()
1637 crypto_free_skcipher(ctx->fallback); in caam_cra_init_skcipher()
1649 return caam_cra_init(crypto_aead_ctx_dma(tfm), &caam_alg->caam, in caam_cra_init_aead()
1650 !caam_alg->caam.nodkp); in caam_cra_init_aead()
1655 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], in caam_exit_common()
1656 offsetof(struct caam_ctx, flc_dma), ctx->dir, in caam_exit_common()
1664 if (ctx->fallback) in caam_cra_exit()
1665 crypto_free_skcipher(ctx->fallback); in caam_cra_exit()
1679 .cra_driver_name = "cbc-aes-caam-qi2",
1695 .cra_driver_name = "cbc-3des-caam-qi2",
1711 .cra_driver_name = "cbc-des-caam-qi2",
1727 .cra_driver_name = "ctr-aes-caam-qi2",
1745 .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1768 .cra_driver_name = "xts-aes-caam-qi2",
1785 .cra_driver_name = "chacha20-caam-qi2",
1804 .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1823 .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1843 .cra_driver_name = "gcm-aes-caam-qi2",
1858 /* single-pass ipsec_esp descriptor */
1863 .cra_driver_name = "authenc-hmac-md5-"
1864 "cbc-aes-caam-qi2",
1885 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1886 "cbc-aes-caam-qi2",
1907 .cra_driver_name = "authenc-hmac-sha1-"
1908 "cbc-aes-caam-qi2",
1929 .cra_driver_name = "echainiv-authenc-"
1930 "hmac-sha1-cbc-aes-caam-qi2",
1951 .cra_driver_name = "authenc-hmac-sha224-"
1952 "cbc-aes-caam-qi2",
1973 .cra_driver_name = "echainiv-authenc-"
1974 "hmac-sha224-cbc-aes-caam-qi2",
1995 .cra_driver_name = "authenc-hmac-sha256-"
1996 "cbc-aes-caam-qi2",
2017 .cra_driver_name = "echainiv-authenc-"
2018 "hmac-sha256-cbc-aes-"
2019 "caam-qi2",
2040 .cra_driver_name = "authenc-hmac-sha384-"
2041 "cbc-aes-caam-qi2",
2062 .cra_driver_name = "echainiv-authenc-"
2063 "hmac-sha384-cbc-aes-"
2064 "caam-qi2",
2085 .cra_driver_name = "authenc-hmac-sha512-"
2086 "cbc-aes-caam-qi2",
2107 .cra_driver_name = "echainiv-authenc-"
2108 "hmac-sha512-cbc-aes-"
2109 "caam-qi2",
2130 .cra_driver_name = "authenc-hmac-md5-"
2131 "cbc-des3_ede-caam-qi2",
2152 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2153 "cbc-des3_ede-caam-qi2",
2175 .cra_driver_name = "authenc-hmac-sha1-"
2176 "cbc-des3_ede-caam-qi2",
2197 .cra_driver_name = "echainiv-authenc-"
2198 "hmac-sha1-"
2199 "cbc-des3_ede-caam-qi2",
2221 .cra_driver_name = "authenc-hmac-sha224-"
2222 "cbc-des3_ede-caam-qi2",
2243 .cra_driver_name = "echainiv-authenc-"
2244 "hmac-sha224-"
2245 "cbc-des3_ede-caam-qi2",
2267 .cra_driver_name = "authenc-hmac-sha256-"
2268 "cbc-des3_ede-caam-qi2",
2289 .cra_driver_name = "echainiv-authenc-"
2290 "hmac-sha256-"
2291 "cbc-des3_ede-caam-qi2",
2313 .cra_driver_name = "authenc-hmac-sha384-"
2314 "cbc-des3_ede-caam-qi2",
2335 .cra_driver_name = "echainiv-authenc-"
2336 "hmac-sha384-"
2337 "cbc-des3_ede-caam-qi2",
2359 .cra_driver_name = "authenc-hmac-sha512-"
2360 "cbc-des3_ede-caam-qi2",
2381 .cra_driver_name = "echainiv-authenc-"
2382 "hmac-sha512-"
2383 "cbc-des3_ede-caam-qi2",
2404 .cra_driver_name = "authenc-hmac-md5-"
2405 "cbc-des-caam-qi2",
2426 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2427 "cbc-des-caam-qi2",
2448 .cra_driver_name = "authenc-hmac-sha1-"
2449 "cbc-des-caam-qi2",
2470 .cra_driver_name = "echainiv-authenc-"
2471 "hmac-sha1-cbc-des-caam-qi2",
2492 .cra_driver_name = "authenc-hmac-sha224-"
2493 "cbc-des-caam-qi2",
2514 .cra_driver_name = "echainiv-authenc-"
2515 "hmac-sha224-cbc-des-"
2516 "caam-qi2",
2537 .cra_driver_name = "authenc-hmac-sha256-"
2538 "cbc-des-caam-qi2",
2559 .cra_driver_name = "echainiv-authenc-"
2560 "hmac-sha256-cbc-des-"
2561 "caam-qi2",
2582 .cra_driver_name = "authenc-hmac-sha384-"
2583 "cbc-des-caam-qi2",
2604 .cra_driver_name = "echainiv-authenc-"
2605 "hmac-sha384-cbc-des-"
2606 "caam-qi2",
2627 .cra_driver_name = "authenc-hmac-sha512-"
2628 "cbc-des-caam-qi2",
2649 .cra_driver_name = "echainiv-authenc-"
2650 "hmac-sha512-cbc-des-"
2651 "caam-qi2",
2673 .cra_driver_name = "authenc-hmac-md5-"
2674 "rfc3686-ctr-aes-caam-qi2",
2697 .cra_driver_name = "seqiv-authenc-hmac-md5-"
2698 "rfc3686-ctr-aes-caam-qi2",
2722 .cra_driver_name = "authenc-hmac-sha1-"
2723 "rfc3686-ctr-aes-caam-qi2",
2746 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
2747 "rfc3686-ctr-aes-caam-qi2",
2771 .cra_driver_name = "authenc-hmac-sha224-"
2772 "rfc3686-ctr-aes-caam-qi2",
2795 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
2796 "rfc3686-ctr-aes-caam-qi2",
2820 .cra_driver_name = "authenc-hmac-sha256-"
2821 "rfc3686-ctr-aes-caam-qi2",
2844 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
2845 "rfc3686-ctr-aes-caam-qi2",
2869 .cra_driver_name = "authenc-hmac-sha384-"
2870 "rfc3686-ctr-aes-caam-qi2",
2893 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
2894 "rfc3686-ctr-aes-caam-qi2",
2917 .cra_driver_name = "rfc7539-chacha20-poly1305-"
2918 "caam-qi2",
2940 .cra_driver_name = "rfc7539esp-chacha20-"
2941 "poly1305-caam-qi2",
2964 .cra_driver_name = "authenc-hmac-sha512-"
2965 "rfc3686-ctr-aes-caam-qi2",
2988 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
2989 "rfc3686-ctr-aes-caam-qi2",
3012 struct skcipher_alg *alg = &t_alg->skcipher; in caam_skcipher_alg_init()
3014 alg->base.cra_module = THIS_MODULE; in caam_skcipher_alg_init()
3015 alg->base.cra_priority = CAAM_CRA_PRIORITY; in caam_skcipher_alg_init()
3016 alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); in caam_skcipher_alg_init()
3017 alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | in caam_skcipher_alg_init()
3020 alg->init = caam_cra_init_skcipher; in caam_skcipher_alg_init()
3021 alg->exit = caam_cra_exit; in caam_skcipher_alg_init()
3026 struct aead_alg *alg = &t_alg->aead; in caam_aead_alg_init()
3028 alg->base.cra_module = THIS_MODULE; in caam_aead_alg_init()
3029 alg->base.cra_priority = CAAM_CRA_PRIORITY; in caam_aead_alg_init()
3030 alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); in caam_aead_alg_init()
3031 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | in caam_aead_alg_init()
3034 alg->init = caam_cra_init_aead; in caam_aead_alg_init()
3035 alg->exit = caam_cra_exit_aead; in caam_aead_alg_init()
3056 * struct caam_hash_ctx - ahash per-session context
3102 int buflen = state->buflen; in buf_map_to_qm_sg()
3107 state->buf_dma = dma_map_single(dev, state->buf, buflen, in buf_map_to_qm_sg()
3109 if (dma_mapping_error(dev, state->buf_dma)) { in buf_map_to_qm_sg()
3111 state->buf_dma = 0; in buf_map_to_qm_sg()
3112 return -ENOMEM; in buf_map_to_qm_sg()
3115 dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0); in buf_map_to_qm_sg()
3120 /* Map state->caam_ctx, and add it to link table */
3125 state->ctx_dma_len = ctx_len; in ctx_map_to_qm_sg()
3126 state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag); in ctx_map_to_qm_sg()
3127 if (dma_mapping_error(dev, state->ctx_dma)) { in ctx_map_to_qm_sg()
3129 state->ctx_dma = 0; in ctx_map_to_qm_sg()
3130 return -ENOMEM; in ctx_map_to_qm_sg()
3133 dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0); in ctx_map_to_qm_sg()
3142 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev); in ahash_set_sh_desc()
3146 /* ahash_update shared descriptor */ in ahash_set_sh_desc()
3147 flc = &ctx->flc[UPDATE]; in ahash_set_sh_desc()
3148 desc = flc->sh_desc; in ahash_set_sh_desc()
3149 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, in ahash_set_sh_desc()
3150 ctx->ctx_len, true, priv->sec_attr.era); in ahash_set_sh_desc()
3151 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in ahash_set_sh_desc()
3152 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE], in ahash_set_sh_desc()
3158 /* ahash_update_first shared descriptor */ in ahash_set_sh_desc()
3159 flc = &ctx->flc[UPDATE_FIRST]; in ahash_set_sh_desc()
3160 desc = flc->sh_desc; in ahash_set_sh_desc()
3161 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, in ahash_set_sh_desc()
3162 ctx->ctx_len, false, priv->sec_attr.era); in ahash_set_sh_desc()
3163 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in ahash_set_sh_desc()
3164 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST], in ahash_set_sh_desc()
3170 /* ahash_final shared descriptor */ in ahash_set_sh_desc()
3171 flc = &ctx->flc[FINALIZE]; in ahash_set_sh_desc()
3172 desc = flc->sh_desc; in ahash_set_sh_desc()
3173 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, in ahash_set_sh_desc()
3174 ctx->ctx_len, true, priv->sec_attr.era); in ahash_set_sh_desc()
3175 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in ahash_set_sh_desc()
3176 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE], in ahash_set_sh_desc()
3182 /* ahash_digest shared descriptor */ in ahash_set_sh_desc()
3183 flc = &ctx->flc[DIGEST]; in ahash_set_sh_desc()
3184 desc = flc->sh_desc; in ahash_set_sh_desc()
3185 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, in ahash_set_sh_desc()
3186 ctx->ctx_len, false, priv->sec_attr.era); in ahash_set_sh_desc()
3187 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in ahash_set_sh_desc()
3188 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST], in ahash_set_sh_desc()
3207 dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); in split_key_sh_done()
3209 res->err = err ? caam_qi2_strstatus(res->dev, err) : 0; in split_key_sh_done()
3210 complete(&res->completion); in split_key_sh_done()
3223 int ret = -ENOMEM; in hash_digest_key()
3228 return -ENOMEM; in hash_digest_key()
3230 in_fle = &req_ctx->fd_flt[1]; in hash_digest_key()
3231 out_fle = &req_ctx->fd_flt[0]; in hash_digest_key()
3237 key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL); in hash_digest_key()
3238 if (dma_mapping_error(ctx->dev, key_dma)) { in hash_digest_key()
3239 dev_err(ctx->dev, "unable to map key memory\n"); in hash_digest_key()
3243 desc = flc->sh_desc; in hash_digest_key()
3248 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | in hash_digest_key()
3255 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in hash_digest_key()
3256 flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) + in hash_digest_key()
3258 if (dma_mapping_error(ctx->dev, flc_dma)) { in hash_digest_key()
3259 dev_err(ctx->dev, "unable to map shared descriptor\n"); in hash_digest_key()
3279 result.dev = ctx->dev; in hash_digest_key()
3281 req_ctx->flc = flc; in hash_digest_key()
3282 req_ctx->flc_dma = flc_dma; in hash_digest_key()
3283 req_ctx->cbk = split_key_sh_done; in hash_digest_key()
3284 req_ctx->ctx = &result; in hash_digest_key()
3286 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); in hash_digest_key()
3287 if (ret == -EINPROGRESS) { in hash_digest_key()
3296 dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc), in hash_digest_key()
3299 dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL); in hash_digest_key()
3314 unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base); in ahash_setkey()
3319 dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize); in ahash_setkey()
3326 return -EOVERFLOW; in ahash_setkey()
3330 return -ENOMEM; in ahash_setkey()
3337 ctx->adata.keylen = keylen; in ahash_setkey()
3338 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & in ahash_setkey()
3340 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) in ahash_setkey()
3343 ctx->adata.key_virt = key; in ahash_setkey()
3344 ctx->adata.key_inline = true; in ahash_setkey()
3352 if (keylen > ctx->adata.keylen_pad) { in ahash_setkey()
3353 memcpy(ctx->key, key, keylen); in ahash_setkey()
3354 dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma, in ahash_setkey()
3355 ctx->adata.keylen_pad, in ahash_setkey()
3364 return -EINVAL; in ahash_setkey()
3372 if (edesc->src_nents) in ahash_unmap()
3373 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); in ahash_unmap()
3375 if (edesc->qm_sg_bytes) in ahash_unmap()
3376 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes, in ahash_unmap()
3379 if (state->buf_dma) { in ahash_unmap()
3380 dma_unmap_single(dev, state->buf_dma, state->buflen, in ahash_unmap()
3382 state->buf_dma = 0; in ahash_unmap()
3392 if (state->ctx_dma) { in ahash_unmap_ctx()
3393 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag); in ahash_unmap_ctx()
3394 state->ctx_dma = 0; in ahash_unmap_ctx()
3405 struct ahash_edesc *edesc = state->caam_req.edesc; in ahash_done()
3410 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); in ahash_done()
3413 ecode = caam_qi2_strstatus(ctx->dev, status); in ahash_done()
3415 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); in ahash_done()
3416 memcpy(req->result, state->caam_ctx, digestsize); in ahash_done()
3420 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, in ahash_done()
3421 ctx->ctx_len, 1); in ahash_done()
3432 struct ahash_edesc *edesc = state->caam_req.edesc; in ahash_done_bi()
3436 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); in ahash_done_bi()
3439 ecode = caam_qi2_strstatus(ctx->dev, status); in ahash_done_bi()
3441 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); in ahash_done_bi()
3444 scatterwalk_map_and_copy(state->buf, req->src, in ahash_done_bi()
3445 req->nbytes - state->next_buflen, in ahash_done_bi()
3446 state->next_buflen, 0); in ahash_done_bi()
3447 state->buflen = state->next_buflen; in ahash_done_bi()
3450 DUMP_PREFIX_ADDRESS, 16, 4, state->buf, in ahash_done_bi()
3451 state->buflen, 1); in ahash_done_bi()
3454 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, in ahash_done_bi()
3455 ctx->ctx_len, 1); in ahash_done_bi()
3456 if (req->result) in ahash_done_bi()
3458 DUMP_PREFIX_ADDRESS, 16, 4, req->result, in ahash_done_bi()
3470 struct ahash_edesc *edesc = state->caam_req.edesc; in ahash_done_ctx_src()
3475 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); in ahash_done_ctx_src()
3478 ecode = caam_qi2_strstatus(ctx->dev, status); in ahash_done_ctx_src()
3480 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); in ahash_done_ctx_src()
3481 memcpy(req->result, state->caam_ctx, digestsize); in ahash_done_ctx_src()
3485 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, in ahash_done_ctx_src()
3486 ctx->ctx_len, 1); in ahash_done_ctx_src()
3497 struct ahash_edesc *edesc = state->caam_req.edesc; in ahash_done_ctx_dst()
3501 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); in ahash_done_ctx_dst()
3504 ecode = caam_qi2_strstatus(ctx->dev, status); in ahash_done_ctx_dst()
3506 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); in ahash_done_ctx_dst()
3509 scatterwalk_map_and_copy(state->buf, req->src, in ahash_done_ctx_dst()
3510 req->nbytes - state->next_buflen, in ahash_done_ctx_dst()
3511 state->next_buflen, 0); in ahash_done_ctx_dst()
3512 state->buflen = state->next_buflen; in ahash_done_ctx_dst()
3515 DUMP_PREFIX_ADDRESS, 16, 4, state->buf, in ahash_done_ctx_dst()
3516 state->buflen, 1); in ahash_done_ctx_dst()
3519 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, in ahash_done_ctx_dst()
3520 ctx->ctx_len, 1); in ahash_done_ctx_dst()
3521 if (req->result) in ahash_done_ctx_dst()
3523 DUMP_PREFIX_ADDRESS, 16, 4, req->result, in ahash_done_ctx_dst()
3534 struct caam_request *req_ctx = &state->caam_req; in ahash_update_ctx()
3535 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; in ahash_update_ctx()
3536 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; in ahash_update_ctx()
3537 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in ahash_update_ctx()
3539 u8 *buf = state->buf; in ahash_update_ctx()
3540 int *buflen = &state->buflen; in ahash_update_ctx()
3541 int *next_buflen = &state->next_buflen; in ahash_update_ctx()
3542 int in_len = *buflen + req->nbytes, to_hash; in ahash_update_ctx()
3547 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); in ahash_update_ctx()
3548 to_hash = in_len - *next_buflen; in ahash_update_ctx()
3552 int src_len = req->nbytes - *next_buflen; in ahash_update_ctx()
3554 src_nents = sg_nents_for_len(req->src, src_len); in ahash_update_ctx()
3556 dev_err(ctx->dev, "Invalid number of src SG.\n"); in ahash_update_ctx()
3561 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, in ahash_update_ctx()
3564 dev_err(ctx->dev, "unable to DMA map source\n"); in ahash_update_ctx()
3565 return -ENOMEM; in ahash_update_ctx()
3574 dma_unmap_sg(ctx->dev, req->src, src_nents, in ahash_update_ctx()
3576 return -ENOMEM; in ahash_update_ctx()
3579 edesc->src_nents = src_nents; in ahash_update_ctx()
3583 sg_table = &edesc->sgt[0]; in ahash_update_ctx()
3585 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, in ahash_update_ctx()
3590 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state); in ahash_update_ctx()
3595 sg_to_qm_sg_last(req->src, src_len, in ahash_update_ctx()
3598 dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, in ahash_update_ctx()
3602 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, in ahash_update_ctx()
3604 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { in ahash_update_ctx()
3605 dev_err(ctx->dev, "unable to map S/G table\n"); in ahash_update_ctx()
3606 ret = -ENOMEM; in ahash_update_ctx()
3609 edesc->qm_sg_bytes = qm_sg_bytes; in ahash_update_ctx()
3611 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); in ahash_update_ctx()
3614 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); in ahash_update_ctx()
3615 dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash); in ahash_update_ctx()
3617 dpaa2_fl_set_addr(out_fle, state->ctx_dma); in ahash_update_ctx()
3618 dpaa2_fl_set_len(out_fle, ctx->ctx_len); in ahash_update_ctx()
3620 req_ctx->flc = &ctx->flc[UPDATE]; in ahash_update_ctx()
3621 req_ctx->flc_dma = ctx->flc_dma[UPDATE]; in ahash_update_ctx()
3622 req_ctx->cbk = ahash_done_bi; in ahash_update_ctx()
3623 req_ctx->ctx = &req->base; in ahash_update_ctx()
3624 req_ctx->edesc = edesc; in ahash_update_ctx()
3626 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); in ahash_update_ctx()
3627 if (ret != -EINPROGRESS && in ahash_update_ctx()
3628 !(ret == -EBUSY && in ahash_update_ctx()
3629 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) in ahash_update_ctx()
3632 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, in ahash_update_ctx()
3633 req->nbytes, 0); in ahash_update_ctx()
3643 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); in ahash_update_ctx()
3653 struct caam_request *req_ctx = &state->caam_req; in ahash_final_ctx()
3654 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; in ahash_final_ctx()
3655 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; in ahash_final_ctx()
3656 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in ahash_final_ctx()
3658 int buflen = state->buflen; in ahash_final_ctx()
3668 return -ENOMEM; in ahash_final_ctx()
3671 sg_table = &edesc->sgt[0]; in ahash_final_ctx()
3673 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, in ahash_final_ctx()
3678 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state); in ahash_final_ctx()
3684 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, in ahash_final_ctx()
3686 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { in ahash_final_ctx()
3687 dev_err(ctx->dev, "unable to map S/G table\n"); in ahash_final_ctx()
3688 ret = -ENOMEM; in ahash_final_ctx()
3691 edesc->qm_sg_bytes = qm_sg_bytes; in ahash_final_ctx()
3693 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); in ahash_final_ctx()
3696 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); in ahash_final_ctx()
3697 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen); in ahash_final_ctx()
3699 dpaa2_fl_set_addr(out_fle, state->ctx_dma); in ahash_final_ctx()
3702 req_ctx->flc = &ctx->flc[FINALIZE]; in ahash_final_ctx()
3703 req_ctx->flc_dma = ctx->flc_dma[FINALIZE]; in ahash_final_ctx()
3704 req_ctx->cbk = ahash_done_ctx_src; in ahash_final_ctx()
3705 req_ctx->ctx = &req->base; in ahash_final_ctx()
3706 req_ctx->edesc = edesc; in ahash_final_ctx()
3708 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); in ahash_final_ctx()
3709 if (ret == -EINPROGRESS || in ahash_final_ctx()
3710 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) in ahash_final_ctx()
3714 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); in ahash_final_ctx()
3724 struct caam_request *req_ctx = &state->caam_req; in ahash_finup_ctx()
3725 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; in ahash_finup_ctx()
3726 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; in ahash_finup_ctx()
3727 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in ahash_finup_ctx()
3729 int buflen = state->buflen; in ahash_finup_ctx()
3737 src_nents = sg_nents_for_len(req->src, req->nbytes); in ahash_finup_ctx()
3739 dev_err(ctx->dev, "Invalid number of src SG.\n"); in ahash_finup_ctx()
3744 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, in ahash_finup_ctx()
3747 dev_err(ctx->dev, "unable to DMA map source\n"); in ahash_finup_ctx()
3748 return -ENOMEM; in ahash_finup_ctx()
3757 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); in ahash_finup_ctx()
3758 return -ENOMEM; in ahash_finup_ctx()
3761 edesc->src_nents = src_nents; in ahash_finup_ctx()
3765 sg_table = &edesc->sgt[0]; in ahash_finup_ctx()
3767 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, in ahash_finup_ctx()
3772 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state); in ahash_finup_ctx()
3776 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0); in ahash_finup_ctx()
3778 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, in ahash_finup_ctx()
3780 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { in ahash_finup_ctx()
3781 dev_err(ctx->dev, "unable to map S/G table\n"); in ahash_finup_ctx()
3782 ret = -ENOMEM; in ahash_finup_ctx()
3785 edesc->qm_sg_bytes = qm_sg_bytes; in ahash_finup_ctx()
3787 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); in ahash_finup_ctx()
3790 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); in ahash_finup_ctx()
3791 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes); in ahash_finup_ctx()
3793 dpaa2_fl_set_addr(out_fle, state->ctx_dma); in ahash_finup_ctx()
3796 req_ctx->flc = &ctx->flc[FINALIZE]; in ahash_finup_ctx()
3797 req_ctx->flc_dma = ctx->flc_dma[FINALIZE]; in ahash_finup_ctx()
3798 req_ctx->cbk = ahash_done_ctx_src; in ahash_finup_ctx()
3799 req_ctx->ctx = &req->base; in ahash_finup_ctx()
3800 req_ctx->edesc = edesc; in ahash_finup_ctx()
3802 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); in ahash_finup_ctx()
3803 if (ret == -EINPROGRESS || in ahash_finup_ctx()
3804 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) in ahash_finup_ctx()
3808 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); in ahash_finup_ctx()
3818 struct caam_request *req_ctx = &state->caam_req; in ahash_digest()
3819 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; in ahash_digest()
3820 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; in ahash_digest()
3821 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in ahash_digest()
3826 int ret = -ENOMEM; in ahash_digest()
3828 state->buf_dma = 0; in ahash_digest()
3830 src_nents = sg_nents_for_len(req->src, req->nbytes); in ahash_digest()
3832 dev_err(ctx->dev, "Invalid number of src SG.\n"); in ahash_digest()
3837 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, in ahash_digest()
3840 dev_err(ctx->dev, "unable to map source for DMA\n"); in ahash_digest()
3850 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); in ahash_digest()
3854 edesc->src_nents = src_nents; in ahash_digest()
3855 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); in ahash_digest()
3859 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0]; in ahash_digest()
3862 sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0); in ahash_digest()
3863 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, in ahash_digest()
3865 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { in ahash_digest()
3866 dev_err(ctx->dev, "unable to map S/G table\n"); in ahash_digest()
3869 edesc->qm_sg_bytes = qm_sg_bytes; in ahash_digest()
3871 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); in ahash_digest()
3874 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src)); in ahash_digest()
3877 state->ctx_dma_len = digestsize; in ahash_digest()
3878 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize, in ahash_digest()
3880 if (dma_mapping_error(ctx->dev, state->ctx_dma)) { in ahash_digest()
3881 dev_err(ctx->dev, "unable to map ctx\n"); in ahash_digest()
3882 state->ctx_dma = 0; in ahash_digest()
3887 dpaa2_fl_set_len(in_fle, req->nbytes); in ahash_digest()
3889 dpaa2_fl_set_addr(out_fle, state->ctx_dma); in ahash_digest()
3892 req_ctx->flc = &ctx->flc[DIGEST]; in ahash_digest()
3893 req_ctx->flc_dma = ctx->flc_dma[DIGEST]; in ahash_digest()
3894 req_ctx->cbk = ahash_done; in ahash_digest()
3895 req_ctx->ctx = &req->base; in ahash_digest()
3896 req_ctx->edesc = edesc; in ahash_digest()
3897 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); in ahash_digest()
3898 if (ret == -EINPROGRESS || in ahash_digest()
3899 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) in ahash_digest()
3903 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); in ahash_digest()
3913 struct caam_request *req_ctx = &state->caam_req; in ahash_final_no_ctx()
3914 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; in ahash_final_no_ctx()
3915 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; in ahash_final_no_ctx()
3916 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in ahash_final_no_ctx()
3918 u8 *buf = state->buf; in ahash_final_no_ctx()
3919 int buflen = state->buflen; in ahash_final_no_ctx()
3922 int ret = -ENOMEM; in ahash_final_no_ctx()
3930 state->buf_dma = dma_map_single(ctx->dev, buf, buflen, in ahash_final_no_ctx()
3932 if (dma_mapping_error(ctx->dev, state->buf_dma)) { in ahash_final_no_ctx()
3933 dev_err(ctx->dev, "unable to map src\n"); in ahash_final_no_ctx()
3938 state->ctx_dma_len = digestsize; in ahash_final_no_ctx()
3939 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize, in ahash_final_no_ctx()
3941 if (dma_mapping_error(ctx->dev, state->ctx_dma)) { in ahash_final_no_ctx()
3942 dev_err(ctx->dev, "unable to map ctx\n"); in ahash_final_no_ctx()
3943 state->ctx_dma = 0; in ahash_final_no_ctx()
3947 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); in ahash_final_no_ctx()
3957 dpaa2_fl_set_addr(in_fle, state->buf_dma); in ahash_final_no_ctx()
3961 dpaa2_fl_set_addr(out_fle, state->ctx_dma); in ahash_final_no_ctx()
3964 req_ctx->flc = &ctx->flc[DIGEST]; in ahash_final_no_ctx()
3965 req_ctx->flc_dma = ctx->flc_dma[DIGEST]; in ahash_final_no_ctx()
3966 req_ctx->cbk = ahash_done; in ahash_final_no_ctx()
3967 req_ctx->ctx = &req->base; in ahash_final_no_ctx()
3968 req_ctx->edesc = edesc; in ahash_final_no_ctx()
3970 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); in ahash_final_no_ctx()
3971 if (ret == -EINPROGRESS || in ahash_final_no_ctx()
3972 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) in ahash_final_no_ctx()
3976 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); in ahash_final_no_ctx()
3986 struct caam_request *req_ctx = &state->caam_req; in ahash_update_no_ctx()
3987 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; in ahash_update_no_ctx()
3988 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; in ahash_update_no_ctx()
3989 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in ahash_update_no_ctx()
3991 u8 *buf = state->buf; in ahash_update_no_ctx()
3992 int *buflen = &state->buflen; in ahash_update_no_ctx()
3993 int *next_buflen = &state->next_buflen; in ahash_update_no_ctx()
3994 int in_len = *buflen + req->nbytes, to_hash; in ahash_update_no_ctx()
3999 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); in ahash_update_no_ctx()
4000 to_hash = in_len - *next_buflen; in ahash_update_no_ctx()
4004 int src_len = req->nbytes - *next_buflen; in ahash_update_no_ctx()
4006 src_nents = sg_nents_for_len(req->src, src_len); in ahash_update_no_ctx()
4008 dev_err(ctx->dev, "Invalid number of src SG.\n"); in ahash_update_no_ctx()
4013 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, in ahash_update_no_ctx()
4016 dev_err(ctx->dev, "unable to DMA map source\n"); in ahash_update_no_ctx()
4017 return -ENOMEM; in ahash_update_no_ctx()
4026 dma_unmap_sg(ctx->dev, req->src, src_nents, in ahash_update_no_ctx()
4028 return -ENOMEM; in ahash_update_no_ctx()
4031 edesc->src_nents = src_nents; in ahash_update_no_ctx()
4034 sg_table = &edesc->sgt[0]; in ahash_update_no_ctx()
4036 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); in ahash_update_no_ctx()
4040 sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0); in ahash_update_no_ctx()
4042 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, in ahash_update_no_ctx()
4044 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { in ahash_update_no_ctx()
4045 dev_err(ctx->dev, "unable to map S/G table\n"); in ahash_update_no_ctx()
4046 ret = -ENOMEM; in ahash_update_no_ctx()
4049 edesc->qm_sg_bytes = qm_sg_bytes; in ahash_update_no_ctx()
4051 state->ctx_dma_len = ctx->ctx_len; in ahash_update_no_ctx()
4052 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, in ahash_update_no_ctx()
4053 ctx->ctx_len, DMA_FROM_DEVICE); in ahash_update_no_ctx()
4054 if (dma_mapping_error(ctx->dev, state->ctx_dma)) { in ahash_update_no_ctx()
4055 dev_err(ctx->dev, "unable to map ctx\n"); in ahash_update_no_ctx()
4056 state->ctx_dma = 0; in ahash_update_no_ctx()
4057 ret = -ENOMEM; in ahash_update_no_ctx()
4061 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); in ahash_update_no_ctx()
4064 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); in ahash_update_no_ctx()
4067 dpaa2_fl_set_addr(out_fle, state->ctx_dma); in ahash_update_no_ctx()
4068 dpaa2_fl_set_len(out_fle, ctx->ctx_len); in ahash_update_no_ctx()
4070 req_ctx->flc = &ctx->flc[UPDATE_FIRST]; in ahash_update_no_ctx()
4071 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST]; in ahash_update_no_ctx()
4072 req_ctx->cbk = ahash_done_ctx_dst; in ahash_update_no_ctx()
4073 req_ctx->ctx = &req->base; in ahash_update_no_ctx()
4074 req_ctx->edesc = edesc; in ahash_update_no_ctx()
4076 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); in ahash_update_no_ctx()
4077 if (ret != -EINPROGRESS && in ahash_update_no_ctx()
4078 !(ret == -EBUSY && in ahash_update_no_ctx()
4079 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) in ahash_update_no_ctx()
4082 state->update = ahash_update_ctx; in ahash_update_no_ctx()
4083 state->finup = ahash_finup_ctx; in ahash_update_no_ctx()
4084 state->final = ahash_final_ctx; in ahash_update_no_ctx()
4086 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, in ahash_update_no_ctx()
4087 req->nbytes, 0); in ahash_update_no_ctx()
4097 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE); in ahash_update_no_ctx()
4107 struct caam_request *req_ctx = &state->caam_req; in ahash_finup_no_ctx()
4108 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; in ahash_finup_no_ctx()
4109 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; in ahash_finup_no_ctx()
4110 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in ahash_finup_no_ctx()
4112 int buflen = state->buflen; in ahash_finup_no_ctx()
4117 int ret = -ENOMEM; in ahash_finup_no_ctx()
4119 src_nents = sg_nents_for_len(req->src, req->nbytes); in ahash_finup_no_ctx()
4121 dev_err(ctx->dev, "Invalid number of src SG.\n"); in ahash_finup_no_ctx()
4126 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, in ahash_finup_no_ctx()
4129 dev_err(ctx->dev, "unable to DMA map source\n"); in ahash_finup_no_ctx()
4139 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); in ahash_finup_no_ctx()
4143 edesc->src_nents = src_nents; in ahash_finup_no_ctx()
4145 sg_table = &edesc->sgt[0]; in ahash_finup_no_ctx()
4147 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); in ahash_finup_no_ctx()
4151 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0); in ahash_finup_no_ctx()
4153 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, in ahash_finup_no_ctx()
4155 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { in ahash_finup_no_ctx()
4156 dev_err(ctx->dev, "unable to map S/G table\n"); in ahash_finup_no_ctx()
4157 ret = -ENOMEM; in ahash_finup_no_ctx()
4160 edesc->qm_sg_bytes = qm_sg_bytes; in ahash_finup_no_ctx()
4162 state->ctx_dma_len = digestsize; in ahash_finup_no_ctx()
4163 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize, in ahash_finup_no_ctx()
4165 if (dma_mapping_error(ctx->dev, state->ctx_dma)) { in ahash_finup_no_ctx()
4166 dev_err(ctx->dev, "unable to map ctx\n"); in ahash_finup_no_ctx()
4167 state->ctx_dma = 0; in ahash_finup_no_ctx()
4168 ret = -ENOMEM; in ahash_finup_no_ctx()
4172 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); in ahash_finup_no_ctx()
4175 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); in ahash_finup_no_ctx()
4176 dpaa2_fl_set_len(in_fle, buflen + req->nbytes); in ahash_finup_no_ctx()
4178 dpaa2_fl_set_addr(out_fle, state->ctx_dma); in ahash_finup_no_ctx()
4181 req_ctx->flc = &ctx->flc[DIGEST]; in ahash_finup_no_ctx()
4182 req_ctx->flc_dma = ctx->flc_dma[DIGEST]; in ahash_finup_no_ctx()
4183 req_ctx->cbk = ahash_done; in ahash_finup_no_ctx()
4184 req_ctx->ctx = &req->base; in ahash_finup_no_ctx()
4185 req_ctx->edesc = edesc; in ahash_finup_no_ctx()
4186 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); in ahash_finup_no_ctx()
4187 if (ret != -EINPROGRESS && in ahash_finup_no_ctx()
4188 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) in ahash_finup_no_ctx()
4193 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); in ahash_finup_no_ctx()
4203 struct caam_request *req_ctx = &state->caam_req; in ahash_update_first()
4204 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; in ahash_update_first()
4205 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; in ahash_update_first()
4206 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in ahash_update_first()
4208 u8 *buf = state->buf; in ahash_update_first()
4209 int *buflen = &state->buflen; in ahash_update_first()
4210 int *next_buflen = &state->next_buflen; in ahash_update_first()
4216 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - in ahash_update_first()
4218 to_hash = req->nbytes - *next_buflen; in ahash_update_first()
4222 int src_len = req->nbytes - *next_buflen; in ahash_update_first()
4224 src_nents = sg_nents_for_len(req->src, src_len); in ahash_update_first()
4226 dev_err(ctx->dev, "Invalid number of src SG.\n"); in ahash_update_first()
4231 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, in ahash_update_first()
4234 dev_err(ctx->dev, "unable to map source for DMA\n"); in ahash_update_first()
4235 return -ENOMEM; in ahash_update_first()
4244 dma_unmap_sg(ctx->dev, req->src, src_nents, in ahash_update_first()
4246 return -ENOMEM; in ahash_update_first()
4249 edesc->src_nents = src_nents; in ahash_update_first()
4250 sg_table = &edesc->sgt[0]; in ahash_update_first()
4252 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); in ahash_update_first()
4259 sg_to_qm_sg_last(req->src, src_len, sg_table, 0); in ahash_update_first()
4262 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, in ahash_update_first()
4265 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { in ahash_update_first()
4266 dev_err(ctx->dev, "unable to map S/G table\n"); in ahash_update_first()
4267 ret = -ENOMEM; in ahash_update_first()
4270 edesc->qm_sg_bytes = qm_sg_bytes; in ahash_update_first()
4272 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); in ahash_update_first()
4275 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src)); in ahash_update_first()
4278 state->ctx_dma_len = ctx->ctx_len; in ahash_update_first()
4279 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, in ahash_update_first()
4280 ctx->ctx_len, DMA_FROM_DEVICE); in ahash_update_first()
4281 if (dma_mapping_error(ctx->dev, state->ctx_dma)) { in ahash_update_first()
4282 dev_err(ctx->dev, "unable to map ctx\n"); in ahash_update_first()
4283 state->ctx_dma = 0; in ahash_update_first()
4284 ret = -ENOMEM; in ahash_update_first()
4289 dpaa2_fl_set_addr(out_fle, state->ctx_dma); in ahash_update_first()
4290 dpaa2_fl_set_len(out_fle, ctx->ctx_len); in ahash_update_first()
4292 req_ctx->flc = &ctx->flc[UPDATE_FIRST]; in ahash_update_first()
4293 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST]; in ahash_update_first()
4294 req_ctx->cbk = ahash_done_ctx_dst; in ahash_update_first()
4295 req_ctx->ctx = &req->base; in ahash_update_first()
4296 req_ctx->edesc = edesc; in ahash_update_first()
4298 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); in ahash_update_first()
4299 if (ret != -EINPROGRESS && in ahash_update_first()
4300 !(ret == -EBUSY && req->base.flags & in ahash_update_first()
4304 state->update = ahash_update_ctx; in ahash_update_first()
4305 state->finup = ahash_finup_ctx; in ahash_update_first()
4306 state->final = ahash_final_ctx; in ahash_update_first()
4308 state->update = ahash_update_no_ctx; in ahash_update_first()
4309 state->finup = ahash_finup_no_ctx; in ahash_update_first()
4310 state->final = ahash_final_no_ctx; in ahash_update_first()
4311 scatterwalk_map_and_copy(buf, req->src, 0, in ahash_update_first()
4312 req->nbytes, 0); in ahash_update_first()
4322 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE); in ahash_update_first()
4336 state->update = ahash_update_first; in ahash_init()
4337 state->finup = ahash_finup_first; in ahash_init()
4338 state->final = ahash_final_no_ctx; in ahash_init()
4340 state->ctx_dma = 0; in ahash_init()
4341 state->ctx_dma_len = 0; in ahash_init()
4342 state->buf_dma = 0; in ahash_init()
4343 state->buflen = 0; in ahash_init()
4344 state->next_buflen = 0; in ahash_init()
4353 return state->update(req); in ahash_update()
4360 return state->finup(req); in ahash_finup()
4367 return state->final(req); in ahash_final()
4374 u8 *buf = state->buf; in ahash_export()
4375 int len = state->buflen; in ahash_export()
4377 memcpy(export->buf, buf, len); in ahash_export()
4378 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); in ahash_export()
4379 export->buflen = len; in ahash_export()
4380 export->update = state->update; in ahash_export()
4381 export->final = state->final; in ahash_export()
4382 export->finup = state->finup; in ahash_export()
4393 memcpy(state->buf, export->buf, export->buflen); in ahash_import()
4394 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); in ahash_import()
4395 state->buflen = export->buflen; in ahash_import()
4396 state->update = export->update; in ahash_import()
4397 state->final = export->final; in ahash_import()
4398 state->finup = export->finup; in ahash_import()
4417 .driver_name = "sha1-caam-qi2",
4419 .hmac_driver_name = "hmac-sha1-caam-qi2",
4438 .driver_name = "sha224-caam-qi2",
4440 .hmac_driver_name = "hmac-sha224-caam-qi2",
4459 .driver_name = "sha256-caam-qi2",
4461 .hmac_driver_name = "hmac-sha256-caam-qi2",
4480 .driver_name = "sha384-caam-qi2",
4482 .hmac_driver_name = "hmac-sha384-caam-qi2",
4501 .driver_name = "sha512-caam-qi2",
4503 .hmac_driver_name = "hmac-sha512-caam-qi2",
4522 .driver_name = "md5-caam-qi2",
4524 .hmac_driver_name = "hmac-md5-caam-qi2",
4555 struct crypto_alg *base = tfm->__crt_alg; in caam_hash_cra_init()
4573 ctx->dev = caam_hash->dev; in caam_hash_cra_init()
4575 if (caam_hash->is_hmac) { in caam_hash_cra_init()
4576 ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key, in caam_hash_cra_init()
4577 ARRAY_SIZE(ctx->key), in caam_hash_cra_init()
4580 if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) { in caam_hash_cra_init()
4581 dev_err(ctx->dev, "unable to map key\n"); in caam_hash_cra_init()
4582 return -ENOMEM; in caam_hash_cra_init()
4586 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc), in caam_hash_cra_init()
4589 if (dma_mapping_error(ctx->dev, dma_addr)) { in caam_hash_cra_init()
4590 dev_err(ctx->dev, "unable to map shared descriptors\n"); in caam_hash_cra_init()
4591 if (ctx->adata.key_dma) in caam_hash_cra_init()
4592 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma, in caam_hash_cra_init()
4593 ARRAY_SIZE(ctx->key), in caam_hash_cra_init()
4596 return -ENOMEM; in caam_hash_cra_init()
4600 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]); in caam_hash_cra_init()
4603 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; in caam_hash_cra_init()
4605 ctx->ctx_len = runninglen[(ctx->adata.algtype & in caam_hash_cra_init()
4612 * For keyed hash algorithms shared descriptors in caam_hash_cra_init()
4615 return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash); in caam_hash_cra_init()
4622 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc), in caam_hash_cra_exit()
4624 if (ctx->adata.key_dma) in caam_hash_cra_exit()
4625 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma, in caam_hash_cra_exit()
4626 ARRAY_SIZE(ctx->key), DMA_TO_DEVICE, in caam_hash_cra_exit()
4639 return ERR_PTR(-ENOMEM); in caam_hash_alloc()
4641 t_alg->ahash_alg = template->template_ahash; in caam_hash_alloc()
4642 halg = &t_alg->ahash_alg; in caam_hash_alloc()
4643 alg = &halg->halg.base; in caam_hash_alloc()
4646 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", in caam_hash_alloc()
4647 template->hmac_name); in caam_hash_alloc()
4648 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", in caam_hash_alloc()
4649 template->hmac_driver_name); in caam_hash_alloc()
4650 t_alg->is_hmac = true; in caam_hash_alloc()
4652 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", in caam_hash_alloc()
4653 template->name); in caam_hash_alloc()
4654 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", in caam_hash_alloc()
4655 template->driver_name); in caam_hash_alloc()
4656 t_alg->ahash_alg.setkey = NULL; in caam_hash_alloc()
4657 t_alg->is_hmac = false; in caam_hash_alloc()
4659 alg->cra_module = THIS_MODULE; in caam_hash_alloc()
4660 alg->cra_init = caam_hash_cra_init; in caam_hash_alloc()
4661 alg->cra_exit = caam_hash_cra_exit; in caam_hash_alloc()
4662 alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding(); in caam_hash_alloc()
4663 alg->cra_priority = CAAM_CRA_PRIORITY; in caam_hash_alloc()
4664 alg->cra_blocksize = template->blocksize; in caam_hash_alloc()
4665 alg->cra_alignmask = 0; in caam_hash_alloc()
4666 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; in caam_hash_alloc()
4668 t_alg->alg_type = template->alg_type; in caam_hash_alloc()
4669 t_alg->dev = dev; in caam_hash_alloc()
4679 napi_schedule_irqoff(&ppriv->napi); in dpaa2_caam_fqdan_cb()
4684 struct device *dev = priv->dev; in dpaa2_dpseci_dpio_setup()
4690 ppriv = per_cpu_ptr(priv->ppriv, cpu); in dpaa2_dpseci_dpio_setup()
4691 ppriv->priv = priv; in dpaa2_dpseci_dpio_setup()
4692 nctx = &ppriv->nctx; in dpaa2_dpseci_dpio_setup()
4693 nctx->is_cdan = 0; in dpaa2_dpseci_dpio_setup()
4694 nctx->id = ppriv->rsp_fqid; in dpaa2_dpseci_dpio_setup()
4695 nctx->desired_cpu = cpu; in dpaa2_dpseci_dpio_setup()
4696 nctx->cb = dpaa2_caam_fqdan_cb; in dpaa2_dpseci_dpio_setup()
4699 ppriv->dpio = dpaa2_io_service_select(cpu); in dpaa2_dpseci_dpio_setup()
4700 err = dpaa2_io_service_register(ppriv->dpio, nctx, dev); in dpaa2_dpseci_dpio_setup()
4703 nctx->cb = NULL; in dpaa2_dpseci_dpio_setup()
4710 err = -EPROBE_DEFER; in dpaa2_dpseci_dpio_setup()
4714 ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE, in dpaa2_dpseci_dpio_setup()
4716 if (unlikely(!ppriv->store)) { in dpaa2_dpseci_dpio_setup()
4718 err = -ENOMEM; in dpaa2_dpseci_dpio_setup()
4722 if (++i == priv->num_pairs) in dpaa2_dpseci_dpio_setup()
4730 ppriv = per_cpu_ptr(priv->ppriv, cpu); in dpaa2_dpseci_dpio_setup()
4731 if (!ppriv->nctx.cb) in dpaa2_dpseci_dpio_setup()
4733 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev); in dpaa2_dpseci_dpio_setup()
4737 ppriv = per_cpu_ptr(priv->ppriv, cpu); in dpaa2_dpseci_dpio_setup()
4738 if (!ppriv->store) in dpaa2_dpseci_dpio_setup()
4740 dpaa2_io_store_destroy(ppriv->store); in dpaa2_dpseci_dpio_setup()
4752 ppriv = per_cpu_ptr(priv->ppriv, cpu); in dpaa2_dpseci_dpio_free()
4753 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, in dpaa2_dpseci_dpio_free()
4754 priv->dev); in dpaa2_dpseci_dpio_free()
4755 dpaa2_io_store_destroy(ppriv->store); in dpaa2_dpseci_dpio_free()
4757 if (++i == priv->num_pairs) in dpaa2_dpseci_dpio_free()
4765 struct device *dev = priv->dev; in dpaa2_dpseci_bind()
4772 ppriv = per_cpu_ptr(priv->ppriv, cpu); in dpaa2_dpseci_bind()
4778 rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id; in dpaa2_dpseci_bind()
4784 rx_queue_cfg.user_ctx = ppriv->nctx.qman64; in dpaa2_dpseci_bind()
4786 err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i, in dpaa2_dpseci_bind()
4794 if (++i == priv->num_pairs) in dpaa2_dpseci_bind()
4803 struct device *dev = priv->dev; in dpaa2_dpseci_congestion_free()
4805 if (!priv->cscn_mem) in dpaa2_dpseci_congestion_free()
4808 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); in dpaa2_dpseci_congestion_free()
4809 kfree(priv->cscn_mem); in dpaa2_dpseci_congestion_free()
4814 struct device *dev = priv->dev; in dpaa2_dpseci_free()
4818 if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) { in dpaa2_dpseci_free()
4819 err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle); in dpaa2_dpseci_free()
4825 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle); in dpaa2_dpseci_free()
4835 dev_err(priv->dev, "Only Frame List FD format is supported!\n"); in dpaa2_caam_process_fd()
4841 dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err); in dpaa2_caam_process_fd()
4845 * in FD[ERR] or FD[FRC]. in dpaa2_caam_process_fd()
4848 dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt), in dpaa2_caam_process_fd()
4850 req->cbk(req->ctx, dpaa2_fd_get_frc(fd)); in dpaa2_caam_process_fd()
4859 err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid, in dpaa2_caam_pull_fq()
4860 ppriv->store); in dpaa2_caam_pull_fq()
4861 } while (err == -EBUSY); in dpaa2_caam_pull_fq()
4864 dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err); in dpaa2_caam_pull_fq()
4875 dq = dpaa2_io_store_next(ppriv->store, &is_last); in dpaa2_caam_store_consume()
4878 dev_dbg(ppriv->priv->dev, in dpaa2_caam_store_consume()
4880 ppriv->rsp_fqid); in dpaa2_caam_store_consume()
4892 dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq)); in dpaa2_caam_store_consume()
4906 priv = ppriv->priv; in dpaa2_dpseci_poll()
4916 cleaned > budget - DPAA2_CAAM_STORE_SIZE) in dpaa2_dpseci_poll()
4927 err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx); in dpaa2_dpseci_poll()
4929 dev_err(priv->dev, "Notification rearm failed: %d\n", in dpaa2_dpseci_poll()
4940 struct device *dev = priv->dev; in dpaa2_dpseci_congestion_setup()
4948 if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) || in dpaa2_dpseci_congestion_setup()
4949 !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG)) in dpaa2_dpseci_congestion_setup()
4952 alignmask = DPAA2_CSCN_ALIGN - 1; in dpaa2_dpseci_congestion_setup()
4953 alignmask |= dma_get_cache_alignment() - 1; in dpaa2_dpseci_congestion_setup()
4954 priv->cscn_mem = kzalloc(ALIGN(DPAA2_CSCN_SIZE, alignmask + 1), in dpaa2_dpseci_congestion_setup()
4956 if (!priv->cscn_mem) in dpaa2_dpseci_congestion_setup()
4957 return -ENOMEM; in dpaa2_dpseci_congestion_setup()
4959 priv->cscn_dma = dma_map_single(dev, priv->cscn_mem, in dpaa2_dpseci_congestion_setup()
4961 if (dma_mapping_error(dev, priv->cscn_dma)) { in dpaa2_dpseci_congestion_setup()
4963 err = -ENOMEM; in dpaa2_dpseci_congestion_setup()
4971 cong_notif_cfg.message_iova = priv->cscn_dma; in dpaa2_dpseci_congestion_setup()
4976 err = dpseci_set_congestion_notification(priv->mc_io, 0, token, in dpaa2_dpseci_congestion_setup()
4986 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); in dpaa2_dpseci_congestion_setup()
4988 kfree(priv->cscn_mem); in dpaa2_dpseci_congestion_setup()
4999 ppriv = per_cpu_ptr(priv->ppriv, i); in free_dpaa2_pcpu_netdev()
5000 free_netdev(ppriv->net_dev); in free_dpaa2_pcpu_netdev()
5006 struct device *dev = &ls_dev->dev; in dpaa2_dpseci_setup()
5013 err = -ENOMEM; in dpaa2_dpseci_setup()
5019 priv->dev = dev; in dpaa2_dpseci_setup()
5020 priv->dpsec_id = ls_dev->obj_desc.id; in dpaa2_dpseci_setup()
5023 err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle); in dpaa2_dpseci_setup()
5029 err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver, in dpaa2_dpseci_setup()
5030 &priv->minor_ver); in dpaa2_dpseci_setup()
5036 dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver); in dpaa2_dpseci_setup()
5038 if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) { in dpaa2_dpseci_setup()
5039 err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle); in dpaa2_dpseci_setup()
5046 err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle, in dpaa2_dpseci_setup()
5047 &priv->dpseci_attr); in dpaa2_dpseci_setup()
5053 err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle, in dpaa2_dpseci_setup()
5054 &priv->sec_attr); in dpaa2_dpseci_setup()
5060 err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle); in dpaa2_dpseci_setup()
5066 priv->num_pairs = min(priv->dpseci_attr.num_rx_queues, in dpaa2_dpseci_setup()
5067 priv->dpseci_attr.num_tx_queues); in dpaa2_dpseci_setup()
5068 if (priv->num_pairs > num_online_cpus()) { in dpaa2_dpseci_setup()
5070 priv->num_pairs - num_online_cpus()); in dpaa2_dpseci_setup()
5071 priv->num_pairs = num_online_cpus(); in dpaa2_dpseci_setup()
5074 for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) { in dpaa2_dpseci_setup()
5075 err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i, in dpaa2_dpseci_setup()
5076 &priv->rx_queue_attr[i]); in dpaa2_dpseci_setup()
5083 for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) { in dpaa2_dpseci_setup()
5084 err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i, in dpaa2_dpseci_setup()
5085 &priv->tx_queue_attr[i]); in dpaa2_dpseci_setup()
5096 j = i % priv->num_pairs; in dpaa2_dpseci_setup()
5098 ppriv = per_cpu_ptr(priv->ppriv, cpu); in dpaa2_dpseci_setup()
5099 ppriv->req_fqid = priv->tx_queue_attr[j].fqid; in dpaa2_dpseci_setup()
5105 if (++i > priv->num_pairs) in dpaa2_dpseci_setup()
5108 ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid; in dpaa2_dpseci_setup()
5109 ppriv->prio = j; in dpaa2_dpseci_setup()
5112 priv->rx_queue_attr[j].fqid, in dpaa2_dpseci_setup()
5113 priv->tx_queue_attr[j].fqid); in dpaa2_dpseci_setup()
5115 ppriv->net_dev = alloc_netdev_dummy(0); in dpaa2_dpseci_setup()
5116 if (!ppriv->net_dev) { in dpaa2_dpseci_setup()
5117 err = -ENOMEM; in dpaa2_dpseci_setup()
5121 ppriv->net_dev->dev = *dev; in dpaa2_dpseci_setup()
5123 netif_napi_add_tx_weight(ppriv->net_dev, &ppriv->napi, in dpaa2_dpseci_setup()
5136 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle); in dpaa2_dpseci_setup()
5146 struct device *dev = priv->dev; in dpaa2_dpseci_enable()
5151 for (i = 0; i < priv->num_pairs; i++) { in dpaa2_dpseci_enable()
5152 ppriv = per_cpu_ptr(priv->ppriv, i); in dpaa2_dpseci_enable()
5153 napi_enable(&ppriv->napi); in dpaa2_dpseci_enable()
5156 return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle); in dpaa2_dpseci_enable()
5161 struct device *dev = priv->dev; in dpaa2_dpseci_disable()
5166 err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle); in dpaa2_dpseci_disable()
5172 err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled); in dpaa2_dpseci_disable()
5180 for (i = 0; i < priv->num_pairs; i++) { in dpaa2_dpseci_disable()
5181 ppriv = per_cpu_ptr(priv->ppriv, i); in dpaa2_dpseci_disable()
5182 napi_disable(&ppriv->napi); in dpaa2_dpseci_disable()
5183 netif_napi_del(&ppriv->napi); in dpaa2_dpseci_disable()
5184 free_netdev(ppriv->net_dev); in dpaa2_dpseci_disable()
5200 * There is no way to get CAAM endianness - there is no direct register in dpaa2_caam_probe()
5202 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this in dpaa2_caam_probe()
5209 dev = &dpseci_dev->dev; in dpaa2_caam_probe()
5213 return -ENOMEM; in dpaa2_caam_probe()
5217 priv->domain = iommu_get_domain_for_dev(dev); in dpaa2_caam_probe()
5223 return -ENOMEM; in dpaa2_caam_probe()
5233 err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io); in dpaa2_caam_probe()
5235 if (err == -ENXIO) in dpaa2_caam_probe()
5236 err = -EPROBE_DEFER; in dpaa2_caam_probe()
5243 priv->ppriv = alloc_percpu(*priv->ppriv); in dpaa2_caam_probe()
5244 if (!priv->ppriv) { in dpaa2_caam_probe()
5246 err = -ENOMEM; in dpaa2_caam_probe()
5283 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; in dpaa2_caam_probe()
5286 if (!priv->sec_attr.des_acc_num && in dpaa2_caam_probe()
5292 if (!priv->sec_attr.aes_acc_num && in dpaa2_caam_probe()
5298 !priv->sec_attr.ccha_acc_num) in dpaa2_caam_probe()
5301 t_alg->caam.dev = dev; in dpaa2_caam_probe()
5304 err = crypto_register_skcipher(&t_alg->skcipher); in dpaa2_caam_probe()
5307 t_alg->skcipher.base.cra_driver_name, err); in dpaa2_caam_probe()
5311 t_alg->registered = true; in dpaa2_caam_probe()
5317 u32 c1_alg_sel = t_alg->caam.class1_alg_type & in dpaa2_caam_probe()
5319 u32 c2_alg_sel = t_alg->caam.class2_alg_type & in dpaa2_caam_probe()
5323 if (!priv->sec_attr.des_acc_num && in dpaa2_caam_probe()
5329 if (!priv->sec_attr.aes_acc_num && in dpaa2_caam_probe()
5335 !priv->sec_attr.ccha_acc_num) in dpaa2_caam_probe()
5340 !priv->sec_attr.ptha_acc_num) in dpaa2_caam_probe()
5348 !priv->sec_attr.md_acc_num) in dpaa2_caam_probe()
5351 t_alg->caam.dev = dev; in dpaa2_caam_probe()
5354 err = crypto_register_aead(&t_alg->aead); in dpaa2_caam_probe()
5357 t_alg->aead.base.cra_driver_name, err); in dpaa2_caam_probe()
5361 t_alg->registered = true; in dpaa2_caam_probe()
5374 if (!priv->sec_attr.md_acc_num) in dpaa2_caam_probe()
5386 alg->hmac_driver_name, err); in dpaa2_caam_probe()
5390 err = crypto_register_ahash(&t_alg->ahash_alg); in dpaa2_caam_probe()
5393 t_alg->ahash_alg.halg.base.cra_driver_name, in dpaa2_caam_probe()
5397 list_add_tail(&t_alg->entry, &hash_list); in dpaa2_caam_probe()
5405 alg->driver_name, err); in dpaa2_caam_probe()
5409 err = crypto_register_ahash(&t_alg->ahash_alg); in dpaa2_caam_probe()
5412 t_alg->ahash_alg.halg.base.cra_driver_name, in dpaa2_caam_probe()
5416 list_add_tail(&t_alg->entry, &hash_list); in dpaa2_caam_probe()
5429 free_percpu(priv->ppriv); in dpaa2_caam_probe()
5431 fsl_mc_portal_free(priv->mc_io); in dpaa2_caam_probe()
5444 dev = &ls_dev->dev; in dpaa2_caam_remove()
5452 if (t_alg->registered) in dpaa2_caam_remove()
5453 crypto_unregister_aead(&t_alg->aead); in dpaa2_caam_remove()
5459 if (t_alg->registered) in dpaa2_caam_remove()
5460 crypto_unregister_skcipher(&t_alg->skcipher); in dpaa2_caam_remove()
5467 crypto_unregister_ahash(&t_hash_alg->ahash_alg); in dpaa2_caam_remove()
5468 list_del(&t_hash_alg->entry); in dpaa2_caam_remove()
5476 free_percpu(priv->ppriv); in dpaa2_caam_remove()
5477 fsl_mc_portal_free(priv->mc_io); in dpaa2_caam_remove()
5491 if (priv->cscn_mem) { in dpaa2_caam_enqueue()
5492 dma_sync_single_for_cpu(priv->dev, priv->cscn_dma, in dpaa2_caam_enqueue()
5495 if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) { in dpaa2_caam_enqueue()
5497 return -EBUSY; in dpaa2_caam_enqueue()
5501 dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma); in dpaa2_caam_enqueue()
5503 req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt), in dpaa2_caam_enqueue()
5505 if (dma_mapping_error(dev, req->fd_flt_dma)) { in dpaa2_caam_enqueue()
5512 dpaa2_fd_set_addr(&fd, req->fd_flt_dma); in dpaa2_caam_enqueue()
5513 dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1])); in dpaa2_caam_enqueue()
5514 dpaa2_fd_set_flc(&fd, req->flc_dma); in dpaa2_caam_enqueue()
5516 ppriv = raw_cpu_ptr(priv->ppriv); in dpaa2_caam_enqueue()
5517 for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) { in dpaa2_caam_enqueue()
5518 err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid, in dpaa2_caam_enqueue()
5520 if (err != -EBUSY) in dpaa2_caam_enqueue()
5531 return -EINPROGRESS; in dpaa2_caam_enqueue()
5534 dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt), in dpaa2_caam_enqueue()
5536 return -EIO; in dpaa2_caam_enqueue()