/linux-6.12.1/drivers/crypto/qce/ |
D | core.c | 40 static void qce_unregister_algs(struct qce_device *qce) in qce_unregister_algs() argument 47 ops->unregister_algs(qce); in qce_unregister_algs() 51 static int qce_register_algs(struct qce_device *qce) in qce_register_algs() argument 58 ret = ops->register_algs(qce); in qce_register_algs() 83 static int qce_handle_queue(struct qce_device *qce, in qce_handle_queue() argument 90 spin_lock_irqsave(&qce->lock, flags); in qce_handle_queue() 93 ret = crypto_enqueue_request(&qce->queue, req); in qce_handle_queue() 96 if (qce->req) { in qce_handle_queue() 97 spin_unlock_irqrestore(&qce->lock, flags); in qce_handle_queue() 101 backlog = crypto_get_backlog(&qce->queue); in qce_handle_queue() [all …]
|
D | common.c | 21 static inline u32 qce_read(struct qce_device *qce, u32 offset) in qce_read() argument 23 return readl(qce->base + offset); in qce_read() 26 static inline void qce_write(struct qce_device *qce, u32 offset, u32 val) in qce_write() argument 28 writel(val, qce->base + offset); in qce_write() 31 static inline void qce_write_array(struct qce_device *qce, u32 offset, in qce_write_array() argument 37 qce_write(qce, offset + i * sizeof(u32), val[i]); in qce_write_array() 41 qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len) in qce_clear_array() argument 46 qce_write(qce, offset + i * sizeof(u32), 0); in qce_clear_array() 49 static u32 qce_config_reg(struct qce_device *qce, int little) in qce_config_reg() argument 51 u32 beats = (qce->burst_size >> 3) - 1; in qce_config_reg() [all …]
|
D | sha.c | 43 struct qce_device *qce = tmpl->qce; in qce_ahash_done() local 44 struct qce_result_dump *result = qce->dma.result_buf; in qce_ahash_done() 49 error = qce_dma_terminate_all(&qce->dma); in qce_ahash_done() 51 dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error); in qce_ahash_done() 53 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); in qce_ahash_done() 54 dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); in qce_ahash_done() 63 error = qce_check_status(qce, &status); in qce_ahash_done() 65 dev_dbg(qce->dev, "ahash operation error (%x)\n", status); in qce_ahash_done() 72 qce->async_req_done(tmpl->qce, error); in qce_ahash_done() 81 struct qce_device *qce = tmpl->qce; in qce_ahash_async_req_handle() local [all …]
|
D | skcipher.c | 33 struct qce_device *qce = tmpl->qce; in qce_skcipher_done() local 34 struct qce_result_dump *result_buf = qce->dma.result_buf; in qce_skcipher_done() 44 error = qce_dma_terminate_all(&qce->dma); in qce_skcipher_done() 46 dev_dbg(qce->dev, "skcipher dma termination error (%d)\n", in qce_skcipher_done() 50 dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src); in qce_skcipher_done() 51 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); in qce_skcipher_done() 55 error = qce_check_status(qce, &status); in qce_skcipher_done() 57 dev_dbg(qce->dev, "skcipher operation error (%x)\n", status); in qce_skcipher_done() 60 qce->async_req_done(tmpl->qce, error); in qce_skcipher_done() 70 struct qce_device *qce = tmpl->qce; in qce_skcipher_async_req_handle() local [all …]
|
D | aead.c | 30 struct qce_device *qce = tmpl->qce; in qce_aead_done() local 31 struct qce_result_dump *result_buf = qce->dma.result_buf; in qce_aead_done() 44 error = qce_dma_terminate_all(&qce->dma); in qce_aead_done() 46 dev_dbg(qce->dev, "aead dma termination error (%d)\n", in qce_aead_done() 49 dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src); in qce_aead_done() 51 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); in qce_aead_done() 66 error = qce_check_status(qce, &status); in qce_aead_done() 68 dev_err(qce->dev, "aead operation error (%x)\n", status); in qce_aead_done() 89 qce->async_req_done(qce, error); in qce_aead_done() 97 struct qce_device *qce = tmpl->qce; in qce_aead_prepare_result_buf() local [all …]
|
D | core.h | 42 int (*async_req_enqueue)(struct qce_device *qce, 44 void (*async_req_done)(struct qce_device *qce, int ret); 56 int (*register_algs)(struct qce_device *qce); 57 void (*unregister_algs)(struct qce_device *qce);
|
D | common.h | 94 struct qce_device *qce; member 100 int qce_check_status(struct qce_device *qce, u32 *status); 101 void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step);
|
/linux-6.12.1/Documentation/devicetree/bindings/crypto/ |
D | qcom-qce.yaml | 4 $id: http://devicetree.org/schemas/crypto/qcom-qce.yaml# 13 This document defines the binding for the QCE crypto 29 - qcom,ipq4019-qce 30 - qcom,sm8150-qce 31 - const: qcom,qce 35 - qcom,ipq6018-qce 36 - qcom,ipq8074-qce 37 - qcom,ipq9574-qce 38 - qcom,msm8996-qce 39 - qcom,qcm2290-qce [all …]
|
/linux-6.12.1/drivers/crypto/ |
D | Kconfig | 569 prompt "Algorithms enabled for QCE acceleration" 576 The QCE engine does not appear to scale as well as the CPU to handle 578 QCE handles only 2 requests in parallel. 635 qce at 256-bytes, 30% faster at 512, and about even at 768-bytes.
|
D | Makefile | 35 obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
|
/linux-6.12.1/arch/arm64/boot/dts/qcom/ |
D | ipq9574.dtsi | 268 compatible = "qcom,ipq9574-qce", "qcom,ipq4019-qce", "qcom,qce";
|
D | sm6115.dtsi | 852 compatible = "qcom,sm6115-qce", "qcom,ipq4019-qce", "qcom,qce";
|
D | sm6350.dtsi | 1227 compatible = "qcom,sm6350-qce", "qcom,sm8150-qce", "qcom,qce";
|
D | sm8350.dtsi | 1817 compatible = "qcom,sm8350-qce", "qcom,sm8150-qce", "qcom,qce";
|
D | sm8550.dtsi | 1961 compatible = "qcom,sm8550-qce", "qcom,sm8150-qce", "qcom,qce";
|
D | sm8450.dtsi | 4565 compatible = "qcom,sm8450-qce", "qcom,sm8150-qce", "qcom,qce";
|
D | sm8650.dtsi | 2502 compatible = "qcom,sm8650-qce", "qcom,sm8150-qce", "qcom,qce";
|
D | sc7280.dtsi | 2415 compatible = "qcom,sc7280-qce", "qcom,sm8150-qce", "qcom,qce";
|
D | sm8250.dtsi | 2606 compatible = "qcom,sm8250-qce", "qcom,sm8150-qce", "qcom,qce";
|
D | sm8150.dtsi | 2154 compatible = "qcom,sm8150-qce", "qcom,qce";
|
/linux-6.12.1/ |
D | MAINTAINERS | 19015 F: Documentation/devicetree/bindings/crypto/qcom-qce.yaml 19016 F: drivers/crypto/qce/
|