Home
last modified time | relevance | path

Searched full:qce (Results 1 – 21 of 21) sorted by relevance

/linux-6.12.1/drivers/crypto/qce/
Dcore.c40 static void qce_unregister_algs(struct qce_device *qce) in qce_unregister_algs() argument
47 ops->unregister_algs(qce); in qce_unregister_algs()
51 static int qce_register_algs(struct qce_device *qce) in qce_register_algs() argument
58 ret = ops->register_algs(qce); in qce_register_algs()
83 static int qce_handle_queue(struct qce_device *qce, in qce_handle_queue() argument
90 spin_lock_irqsave(&qce->lock, flags); in qce_handle_queue()
93 ret = crypto_enqueue_request(&qce->queue, req); in qce_handle_queue()
96 if (qce->req) { in qce_handle_queue()
97 spin_unlock_irqrestore(&qce->lock, flags); in qce_handle_queue()
101 backlog = crypto_get_backlog(&qce->queue); in qce_handle_queue()
[all …]
Dcommon.c21 static inline u32 qce_read(struct qce_device *qce, u32 offset) in qce_read() argument
23 return readl(qce->base + offset); in qce_read()
26 static inline void qce_write(struct qce_device *qce, u32 offset, u32 val) in qce_write() argument
28 writel(val, qce->base + offset); in qce_write()
31 static inline void qce_write_array(struct qce_device *qce, u32 offset, in qce_write_array() argument
37 qce_write(qce, offset + i * sizeof(u32), val[i]); in qce_write_array()
41 qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len) in qce_clear_array() argument
46 qce_write(qce, offset + i * sizeof(u32), 0); in qce_clear_array()
49 static u32 qce_config_reg(struct qce_device *qce, int little) in qce_config_reg() argument
51 u32 beats = (qce->burst_size >> 3) - 1; in qce_config_reg()
[all …]
Dsha.c43 struct qce_device *qce = tmpl->qce; in qce_ahash_done() local
44 struct qce_result_dump *result = qce->dma.result_buf; in qce_ahash_done()
49 error = qce_dma_terminate_all(&qce->dma); in qce_ahash_done()
51 dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error); in qce_ahash_done()
53 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); in qce_ahash_done()
54 dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); in qce_ahash_done()
63 error = qce_check_status(qce, &status); in qce_ahash_done()
65 dev_dbg(qce->dev, "ahash operation error (%x)\n", status); in qce_ahash_done()
72 qce->async_req_done(tmpl->qce, error); in qce_ahash_done()
81 struct qce_device *qce = tmpl->qce; in qce_ahash_async_req_handle() local
[all …]
Dskcipher.c33 struct qce_device *qce = tmpl->qce; in qce_skcipher_done() local
34 struct qce_result_dump *result_buf = qce->dma.result_buf; in qce_skcipher_done()
44 error = qce_dma_terminate_all(&qce->dma); in qce_skcipher_done()
46 dev_dbg(qce->dev, "skcipher dma termination error (%d)\n", in qce_skcipher_done()
50 dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src); in qce_skcipher_done()
51 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); in qce_skcipher_done()
55 error = qce_check_status(qce, &status); in qce_skcipher_done()
57 dev_dbg(qce->dev, "skcipher operation error (%x)\n", status); in qce_skcipher_done()
60 qce->async_req_done(tmpl->qce, error); in qce_skcipher_done()
70 struct qce_device *qce = tmpl->qce; in qce_skcipher_async_req_handle() local
[all …]
Daead.c30 struct qce_device *qce = tmpl->qce; in qce_aead_done() local
31 struct qce_result_dump *result_buf = qce->dma.result_buf; in qce_aead_done()
44 error = qce_dma_terminate_all(&qce->dma); in qce_aead_done()
46 dev_dbg(qce->dev, "aead dma termination error (%d)\n", in qce_aead_done()
49 dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src); in qce_aead_done()
51 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); in qce_aead_done()
66 error = qce_check_status(qce, &status); in qce_aead_done()
68 dev_err(qce->dev, "aead operation error (%x)\n", status); in qce_aead_done()
89 qce->async_req_done(qce, error); in qce_aead_done()
97 struct qce_device *qce = tmpl->qce; in qce_aead_prepare_result_buf() local
[all …]
Dcore.h42 int (*async_req_enqueue)(struct qce_device *qce,
44 void (*async_req_done)(struct qce_device *qce, int ret);
56 int (*register_algs)(struct qce_device *qce);
57 void (*unregister_algs)(struct qce_device *qce);
Dcommon.h94 struct qce_device *qce; member
100 int qce_check_status(struct qce_device *qce, u32 *status);
101 void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step);
/linux-6.12.1/Documentation/devicetree/bindings/crypto/
Dqcom-qce.yaml4 $id: http://devicetree.org/schemas/crypto/qcom-qce.yaml#
13 This document defines the binding for the QCE crypto
29 - qcom,ipq4019-qce
30 - qcom,sm8150-qce
31 - const: qcom,qce
35 - qcom,ipq6018-qce
36 - qcom,ipq8074-qce
37 - qcom,ipq9574-qce
38 - qcom,msm8996-qce
39 - qcom,qcm2290-qce
[all …]
/linux-6.12.1/drivers/crypto/
DKconfig569 prompt "Algorithms enabled for QCE acceleration"
576 The QCE engine does not appear to scale as well as the CPU to handle
578 QCE handles only 2 requests in parallel.
635 qce at 256-bytes, 30% faster at 512, and about even at 768-bytes.
DMakefile35 obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
/linux-6.12.1/arch/arm64/boot/dts/qcom/
Dipq9574.dtsi268 compatible = "qcom,ipq9574-qce", "qcom,ipq4019-qce", "qcom,qce";
Dsm6115.dtsi852 compatible = "qcom,sm6115-qce", "qcom,ipq4019-qce", "qcom,qce";
Dsm6350.dtsi1227 compatible = "qcom,sm6350-qce", "qcom,sm8150-qce", "qcom,qce";
Dsm8350.dtsi1817 compatible = "qcom,sm8350-qce", "qcom,sm8150-qce", "qcom,qce";
Dsm8550.dtsi1961 compatible = "qcom,sm8550-qce", "qcom,sm8150-qce", "qcom,qce";
Dsm8450.dtsi4565 compatible = "qcom,sm8450-qce", "qcom,sm8150-qce", "qcom,qce";
Dsm8650.dtsi2502 compatible = "qcom,sm8650-qce", "qcom,sm8150-qce", "qcom,qce";
Dsc7280.dtsi2415 compatible = "qcom,sc7280-qce", "qcom,sm8150-qce", "qcom,qce";
Dsm8250.dtsi2606 compatible = "qcom,sm8250-qce", "qcom,sm8150-qce", "qcom,qce";
Dsm8150.dtsi2154 compatible = "qcom,sm8150-qce", "qcom,qce";
/linux-6.12.1/
DMAINTAINERS19015 F: Documentation/devicetree/bindings/crypto/qcom-qce.yaml
19016 F: drivers/crypto/qce/