1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
4   */
5  
6  #include <linux/clk.h>
7  #include <linux/dma-mapping.h>
8  #include <linux/interconnect.h>
9  #include <linux/interrupt.h>
10  #include <linux/module.h>
11  #include <linux/mod_devicetable.h>
12  #include <linux/platform_device.h>
13  #include <linux/spinlock.h>
14  #include <linux/types.h>
15  #include <crypto/algapi.h>
16  #include <crypto/internal/hash.h>
17  
18  #include "core.h"
19  #include "cipher.h"
20  #include "sha.h"
21  #include "aead.h"
22  
23  #define QCE_MAJOR_VERSION5	0x05
24  #define QCE_QUEUE_LENGTH	1
25  
26  #define QCE_DEFAULT_MEM_BANDWIDTH	393600
27  
28  static const struct qce_algo_ops *qce_ops[] = {
29  #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
30  	&skcipher_ops,
31  #endif
32  #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
33  	&ahash_ops,
34  #endif
35  #ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
36  	&aead_ops,
37  #endif
38  };
39  
qce_unregister_algs(struct qce_device * qce)40  static void qce_unregister_algs(struct qce_device *qce)
41  {
42  	const struct qce_algo_ops *ops;
43  	int i;
44  
45  	for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
46  		ops = qce_ops[i];
47  		ops->unregister_algs(qce);
48  	}
49  }
50  
qce_register_algs(struct qce_device * qce)51  static int qce_register_algs(struct qce_device *qce)
52  {
53  	const struct qce_algo_ops *ops;
54  	int i, ret = -ENODEV;
55  
56  	for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
57  		ops = qce_ops[i];
58  		ret = ops->register_algs(qce);
59  		if (ret)
60  			break;
61  	}
62  
63  	return ret;
64  }
65  
qce_handle_request(struct crypto_async_request * async_req)66  static int qce_handle_request(struct crypto_async_request *async_req)
67  {
68  	int ret = -EINVAL, i;
69  	const struct qce_algo_ops *ops;
70  	u32 type = crypto_tfm_alg_type(async_req->tfm);
71  
72  	for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
73  		ops = qce_ops[i];
74  		if (type != ops->type)
75  			continue;
76  		ret = ops->async_req_handle(async_req);
77  		break;
78  	}
79  
80  	return ret;
81  }
82  
qce_handle_queue(struct qce_device * qce,struct crypto_async_request * req)83  static int qce_handle_queue(struct qce_device *qce,
84  			    struct crypto_async_request *req)
85  {
86  	struct crypto_async_request *async_req, *backlog;
87  	unsigned long flags;
88  	int ret = 0, err;
89  
90  	spin_lock_irqsave(&qce->lock, flags);
91  
92  	if (req)
93  		ret = crypto_enqueue_request(&qce->queue, req);
94  
95  	/* busy, do not dequeue request */
96  	if (qce->req) {
97  		spin_unlock_irqrestore(&qce->lock, flags);
98  		return ret;
99  	}
100  
101  	backlog = crypto_get_backlog(&qce->queue);
102  	async_req = crypto_dequeue_request(&qce->queue);
103  	if (async_req)
104  		qce->req = async_req;
105  
106  	spin_unlock_irqrestore(&qce->lock, flags);
107  
108  	if (!async_req)
109  		return ret;
110  
111  	if (backlog) {
112  		spin_lock_bh(&qce->lock);
113  		crypto_request_complete(backlog, -EINPROGRESS);
114  		spin_unlock_bh(&qce->lock);
115  	}
116  
117  	err = qce_handle_request(async_req);
118  	if (err) {
119  		qce->result = err;
120  		tasklet_schedule(&qce->done_tasklet);
121  	}
122  
123  	return ret;
124  }
125  
qce_tasklet_req_done(unsigned long data)126  static void qce_tasklet_req_done(unsigned long data)
127  {
128  	struct qce_device *qce = (struct qce_device *)data;
129  	struct crypto_async_request *req;
130  	unsigned long flags;
131  
132  	spin_lock_irqsave(&qce->lock, flags);
133  	req = qce->req;
134  	qce->req = NULL;
135  	spin_unlock_irqrestore(&qce->lock, flags);
136  
137  	if (req)
138  		crypto_request_complete(req, qce->result);
139  
140  	qce_handle_queue(qce, NULL);
141  }
142  
qce_async_request_enqueue(struct qce_device * qce,struct crypto_async_request * req)143  static int qce_async_request_enqueue(struct qce_device *qce,
144  				     struct crypto_async_request *req)
145  {
146  	return qce_handle_queue(qce, req);
147  }
148  
qce_async_request_done(struct qce_device * qce,int ret)149  static void qce_async_request_done(struct qce_device *qce, int ret)
150  {
151  	qce->result = ret;
152  	tasklet_schedule(&qce->done_tasklet);
153  }
154  
qce_check_version(struct qce_device * qce)155  static int qce_check_version(struct qce_device *qce)
156  {
157  	u32 major, minor, step;
158  
159  	qce_get_version(qce, &major, &minor, &step);
160  
161  	/*
162  	 * the driver does not support v5 with minor 0 because it has special
163  	 * alignment requirements.
164  	 */
165  	if (major != QCE_MAJOR_VERSION5 || minor == 0)
166  		return -ENODEV;
167  
168  	qce->burst_size = QCE_BAM_BURST_SIZE;
169  
170  	/*
171  	 * Rx and tx pipes are treated as a pair inside CE.
172  	 * Pipe pair number depends on the actual BAM dma pipe
173  	 * that is used for transfers. The BAM dma pipes are passed
174  	 * from the device tree and used to derive the pipe pair
175  	 * id in the CE driver as follows.
176  	 * 	BAM dma pipes(rx, tx)		CE pipe pair id
177  	 *		0,1				0
178  	 *		2,3				1
179  	 *		4,5				2
180  	 *		6,7				3
181  	 *		...
182  	 */
183  	qce->pipe_pair_id = qce->dma.rxchan->chan_id >> 1;
184  
185  	dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n",
186  		major, minor, step);
187  
188  	return 0;
189  }
190  
qce_crypto_probe(struct platform_device * pdev)191  static int qce_crypto_probe(struct platform_device *pdev)
192  {
193  	struct device *dev = &pdev->dev;
194  	struct qce_device *qce;
195  	int ret;
196  
197  	qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
198  	if (!qce)
199  		return -ENOMEM;
200  
201  	qce->dev = dev;
202  	platform_set_drvdata(pdev, qce);
203  
204  	qce->base = devm_platform_ioremap_resource(pdev, 0);
205  	if (IS_ERR(qce->base))
206  		return PTR_ERR(qce->base);
207  
208  	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
209  	if (ret < 0)
210  		return ret;
211  
212  	qce->core = devm_clk_get_optional(qce->dev, "core");
213  	if (IS_ERR(qce->core))
214  		return PTR_ERR(qce->core);
215  
216  	qce->iface = devm_clk_get_optional(qce->dev, "iface");
217  	if (IS_ERR(qce->iface))
218  		return PTR_ERR(qce->iface);
219  
220  	qce->bus = devm_clk_get_optional(qce->dev, "bus");
221  	if (IS_ERR(qce->bus))
222  		return PTR_ERR(qce->bus);
223  
224  	qce->mem_path = devm_of_icc_get(qce->dev, "memory");
225  	if (IS_ERR(qce->mem_path))
226  		return PTR_ERR(qce->mem_path);
227  
228  	ret = icc_set_bw(qce->mem_path, QCE_DEFAULT_MEM_BANDWIDTH, QCE_DEFAULT_MEM_BANDWIDTH);
229  	if (ret)
230  		return ret;
231  
232  	ret = clk_prepare_enable(qce->core);
233  	if (ret)
234  		goto err_mem_path_disable;
235  
236  	ret = clk_prepare_enable(qce->iface);
237  	if (ret)
238  		goto err_clks_core;
239  
240  	ret = clk_prepare_enable(qce->bus);
241  	if (ret)
242  		goto err_clks_iface;
243  
244  	ret = qce_dma_request(qce->dev, &qce->dma);
245  	if (ret)
246  		goto err_clks;
247  
248  	ret = qce_check_version(qce);
249  	if (ret)
250  		goto err_clks;
251  
252  	spin_lock_init(&qce->lock);
253  	tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
254  		     (unsigned long)qce);
255  	crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
256  
257  	qce->async_req_enqueue = qce_async_request_enqueue;
258  	qce->async_req_done = qce_async_request_done;
259  
260  	ret = qce_register_algs(qce);
261  	if (ret)
262  		goto err_dma;
263  
264  	return 0;
265  
266  err_dma:
267  	qce_dma_release(&qce->dma);
268  err_clks:
269  	clk_disable_unprepare(qce->bus);
270  err_clks_iface:
271  	clk_disable_unprepare(qce->iface);
272  err_clks_core:
273  	clk_disable_unprepare(qce->core);
274  err_mem_path_disable:
275  	icc_set_bw(qce->mem_path, 0, 0);
276  
277  	return ret;
278  }
279  
qce_crypto_remove(struct platform_device * pdev)280  static void qce_crypto_remove(struct platform_device *pdev)
281  {
282  	struct qce_device *qce = platform_get_drvdata(pdev);
283  
284  	tasklet_kill(&qce->done_tasklet);
285  	qce_unregister_algs(qce);
286  	qce_dma_release(&qce->dma);
287  	clk_disable_unprepare(qce->bus);
288  	clk_disable_unprepare(qce->iface);
289  	clk_disable_unprepare(qce->core);
290  }
291  
292  static const struct of_device_id qce_crypto_of_match[] = {
293  	{ .compatible = "qcom,crypto-v5.1", },
294  	{ .compatible = "qcom,crypto-v5.4", },
295  	{ .compatible = "qcom,qce", },
296  	{}
297  };
298  MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
299  
300  static struct platform_driver qce_crypto_driver = {
301  	.probe = qce_crypto_probe,
302  	.remove_new = qce_crypto_remove,
303  	.driver = {
304  		.name = KBUILD_MODNAME,
305  		.of_match_table = qce_crypto_of_match,
306  	},
307  };
308  module_platform_driver(qce_crypto_driver);
309  
310  MODULE_LICENSE("GPL v2");
311  MODULE_DESCRIPTION("Qualcomm crypto engine driver");
312  MODULE_ALIAS("platform:" KBUILD_MODNAME);
313  MODULE_AUTHOR("The Linux Foundation");
314