Lines Matching refs:qidev

74 	struct device *qidev;  member
859 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc); in get_drv_ctx()
919 struct device *qidev; in aead_done() local
926 qidev = caam_ctx->qidev; in aead_done()
929 ecode = caam_jr_strstatus(qidev, status); in aead_done()
932 aead_unmap(qidev, edesc, aead_req); in aead_done()
948 struct device *qidev = ctx->qidev; in aead_edesc_alloc() local
969 dev_err(qidev, "could not allocate extended descriptor\n"); in aead_edesc_alloc()
979 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", in aead_edesc_alloc()
985 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, in aead_edesc_alloc()
988 dev_err(qidev, "unable to map source\n"); in aead_edesc_alloc()
998 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", in aead_edesc_alloc()
1006 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", in aead_edesc_alloc()
1013 mapped_src_nents = dma_map_sg(qidev, req->src, in aead_edesc_alloc()
1016 dev_err(qidev, "unable to map source\n"); in aead_edesc_alloc()
1025 mapped_dst_nents = dma_map_sg(qidev, req->dst, in aead_edesc_alloc()
1029 dev_err(qidev, "unable to map destination\n"); in aead_edesc_alloc()
1030 dma_unmap_sg(qidev, req->src, src_nents, in aead_edesc_alloc()
1068 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", in aead_edesc_alloc()
1070 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, in aead_edesc_alloc()
1082 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); in aead_edesc_alloc()
1083 if (dma_mapping_error(qidev, iv_dma)) { in aead_edesc_alloc()
1084 dev_err(qidev, "unable to map IV\n"); in aead_edesc_alloc()
1085 caam_unmap(qidev, req->src, req->dst, src_nents, in aead_edesc_alloc()
1100 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4, in aead_edesc_alloc()
1102 if (dma_mapping_error(qidev, edesc->assoclen_dma)) { in aead_edesc_alloc()
1103 dev_err(qidev, "unable to map assoclen\n"); in aead_edesc_alloc()
1104 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, in aead_edesc_alloc()
1122 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); in aead_edesc_alloc()
1123 if (dma_mapping_error(qidev, qm_sg_dma)) { in aead_edesc_alloc()
1124 dev_err(qidev, "unable to map S/G table\n"); in aead_edesc_alloc()
1125 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); in aead_edesc_alloc()
1126 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, in aead_edesc_alloc()
1177 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); in aead_crypt()
1181 aead_unmap(ctx->qidev, edesc, req); in aead_crypt()
1222 struct device *qidev = caam_ctx->qidev; in skcipher_done() local
1226 dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); in skcipher_done()
1231 ecode = caam_jr_strstatus(qidev, status); in skcipher_done()
1240 skcipher_unmap(qidev, edesc, req); in skcipher_done()
1259 struct device *qidev = ctx->qidev; in skcipher_edesc_alloc() local
1278 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", in skcipher_edesc_alloc()
1286 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", in skcipher_edesc_alloc()
1291 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, in skcipher_edesc_alloc()
1294 dev_err(qidev, "unable to map source\n"); in skcipher_edesc_alloc()
1298 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, in skcipher_edesc_alloc()
1301 dev_err(qidev, "unable to map destination\n"); in skcipher_edesc_alloc()
1302 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); in skcipher_edesc_alloc()
1306 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, in skcipher_edesc_alloc()
1309 dev_err(qidev, "unable to map source\n"); in skcipher_edesc_alloc()
1337 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", in skcipher_edesc_alloc()
1339 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, in skcipher_edesc_alloc()
1347 dev_err(qidev, "could not allocate extended descriptor\n"); in skcipher_edesc_alloc()
1348 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, in skcipher_edesc_alloc()
1365 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL); in skcipher_edesc_alloc()
1366 if (dma_mapping_error(qidev, iv_dma)) { in skcipher_edesc_alloc()
1367 dev_err(qidev, "unable to map IV\n"); in skcipher_edesc_alloc()
1368 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, in skcipher_edesc_alloc()
1385 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, in skcipher_edesc_alloc()
1387 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { in skcipher_edesc_alloc()
1388 dev_err(qidev, "unable to map S/G table\n"); in skcipher_edesc_alloc()
1389 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, in skcipher_edesc_alloc()
1460 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); in skcipher_crypt()
1464 skcipher_unmap(ctx->qidev, edesc, req); in skcipher_crypt()
2496 ctx->qidev = dev; in caam_init_common()