1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
7 #include <linux/dma-mapping.h>
8 #include <linux/interconnect.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/mod_devicetable.h>
12 #include <linux/platform_device.h>
13 #include <linux/spinlock.h>
14 #include <linux/types.h>
15 #include <crypto/algapi.h>
16 #include <crypto/internal/hash.h>
23 #define QCE_MAJOR_VERSION5 0x05
24 #define QCE_QUEUE_LENGTH 1
26 #define QCE_DEFAULT_MEM_BANDWIDTH 393600
28 static const struct qce_algo_ops *qce_ops[] = {
29 #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
32 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
35 #ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
40 static void qce_unregister_algs(struct qce_device *qce)
42 const struct qce_algo_ops *ops;
45 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
47 ops->unregister_algs(qce);
51 static int qce_register_algs(struct qce_device *qce)
53 const struct qce_algo_ops *ops;
56 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
58 ret = ops->register_algs(qce);
66 static int qce_handle_request(struct crypto_async_request *async_req)
69 const struct qce_algo_ops *ops;
70 u32 type = crypto_tfm_alg_type(async_req->tfm);
72 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
74 if (type != ops->type)
76 ret = ops->async_req_handle(async_req);
83 static int qce_handle_queue(struct qce_device *qce,
84 struct crypto_async_request *req)
86 struct crypto_async_request *async_req, *backlog;
90 spin_lock_irqsave(&qce->lock, flags);
93 ret = crypto_enqueue_request(&qce->queue, req);
95 /* busy, do not dequeue request */
97 spin_unlock_irqrestore(&qce->lock, flags);
101 backlog = crypto_get_backlog(&qce->queue);
102 async_req = crypto_dequeue_request(&qce->queue);
104 qce->req = async_req;
106 spin_unlock_irqrestore(&qce->lock, flags);
112 spin_lock_bh(&qce->lock);
113 crypto_request_complete(backlog, -EINPROGRESS);
114 spin_unlock_bh(&qce->lock);
117 err = qce_handle_request(async_req);
120 tasklet_schedule(&qce->done_tasklet);
126 static void qce_tasklet_req_done(unsigned long data)
128 struct qce_device *qce = (struct qce_device *)data;
129 struct crypto_async_request *req;
132 spin_lock_irqsave(&qce->lock, flags);
135 spin_unlock_irqrestore(&qce->lock, flags);
138 crypto_request_complete(req, qce->result);
140 qce_handle_queue(qce, NULL);
143 static int qce_async_request_enqueue(struct qce_device *qce,
144 struct crypto_async_request *req)
146 return qce_handle_queue(qce, req);
149 static void qce_async_request_done(struct qce_device *qce, int ret)
152 tasklet_schedule(&qce->done_tasklet);
155 static int qce_check_version(struct qce_device *qce)
157 u32 major, minor, step;
159 qce_get_version(qce, &major, &minor, &step);
162 * the driver does not support v5 with minor 0 because it has special
163 * alignment requirements.
165 if (major != QCE_MAJOR_VERSION5 || minor == 0)
168 qce->burst_size = QCE_BAM_BURST_SIZE;
171 * Rx and tx pipes are treated as a pair inside CE.
172 * Pipe pair number depends on the actual BAM dma pipe
173 * that is used for transfers. The BAM dma pipes are passed
174 * from the device tree and used to derive the pipe pair
175 * id in the CE driver as follows.
176 * BAM dma pipes(rx, tx) CE pipe pair id
183 qce->pipe_pair_id = qce->dma.rxchan->chan_id >> 1;
185 dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n",
191 static int qce_crypto_probe(struct platform_device *pdev)
193 struct device *dev = &pdev->dev;
194 struct qce_device *qce;
197 qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
202 platform_set_drvdata(pdev, qce);
204 qce->base = devm_platform_ioremap_resource(pdev, 0);
205 if (IS_ERR(qce->base))
206 return PTR_ERR(qce->base);
208 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
212 qce->core = devm_clk_get_optional(qce->dev, "core");
213 if (IS_ERR(qce->core))
214 return PTR_ERR(qce->core);
216 qce->iface = devm_clk_get_optional(qce->dev, "iface");
217 if (IS_ERR(qce->iface))
218 return PTR_ERR(qce->iface);
220 qce->bus = devm_clk_get_optional(qce->dev, "bus");
221 if (IS_ERR(qce->bus))
222 return PTR_ERR(qce->bus);
224 qce->mem_path = devm_of_icc_get(qce->dev, "memory");
225 if (IS_ERR(qce->mem_path))
226 return PTR_ERR(qce->mem_path);
228 ret = icc_set_bw(qce->mem_path, QCE_DEFAULT_MEM_BANDWIDTH, QCE_DEFAULT_MEM_BANDWIDTH);
232 ret = clk_prepare_enable(qce->core);
234 goto err_mem_path_disable;
236 ret = clk_prepare_enable(qce->iface);
240 ret = clk_prepare_enable(qce->bus);
244 ret = qce_dma_request(qce->dev, &qce->dma);
248 ret = qce_check_version(qce);
252 spin_lock_init(&qce->lock);
253 tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
255 crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
257 qce->async_req_enqueue = qce_async_request_enqueue;
258 qce->async_req_done = qce_async_request_done;
260 ret = qce_register_algs(qce);
267 qce_dma_release(&qce->dma);
269 clk_disable_unprepare(qce->bus);
271 clk_disable_unprepare(qce->iface);
273 clk_disable_unprepare(qce->core);
274 err_mem_path_disable:
275 icc_set_bw(qce->mem_path, 0, 0);
280 static void qce_crypto_remove(struct platform_device *pdev)
282 struct qce_device *qce = platform_get_drvdata(pdev);
284 tasklet_kill(&qce->done_tasklet);
285 qce_unregister_algs(qce);
286 qce_dma_release(&qce->dma);
287 clk_disable_unprepare(qce->bus);
288 clk_disable_unprepare(qce->iface);
289 clk_disable_unprepare(qce->core);
292 static const struct of_device_id qce_crypto_of_match[] = {
293 { .compatible = "qcom,crypto-v5.1", },
294 { .compatible = "qcom,crypto-v5.4", },
295 { .compatible = "qcom,qce", },
298 MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
300 static struct platform_driver qce_crypto_driver = {
301 .probe = qce_crypto_probe,
302 .remove_new = qce_crypto_remove,
304 .name = KBUILD_MODNAME,
305 .of_match_table = qce_crypto_of_match,
308 module_platform_driver(qce_crypto_driver);
310 MODULE_LICENSE("GPL v2");
311 MODULE_DESCRIPTION("Qualcomm crypto engine driver");
312 MODULE_ALIAS("platform:" KBUILD_MODNAME);
313 MODULE_AUTHOR("The Linux Foundation");