1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
7 #include <linux/dma-mapping.h>
8 #include <linux/interrupt.h>
9 #include <linux/module.h>
10 #include <linux/mod_devicetable.h>
11 #include <linux/platform_device.h>
12 #include <linux/spinlock.h>
13 #include <linux/types.h>
14 #include <crypto/algapi.h>
15 #include <crypto/internal/hash.h>
16 #include <crypto/sha.h>
22 #define QCE_MAJOR_VERSION5 0x05
23 #define QCE_QUEUE_LENGTH 1
25 static const struct qce_algo_ops *qce_ops[] = {
26 #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
29 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
34 static void qce_unregister_algs(struct qce_device *qce)
36 const struct qce_algo_ops *ops;
39 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
41 ops->unregister_algs(qce);
45 static int qce_register_algs(struct qce_device *qce)
47 const struct qce_algo_ops *ops;
50 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
52 ret = ops->register_algs(qce);
60 static int qce_handle_request(struct crypto_async_request *async_req)
63 const struct qce_algo_ops *ops;
64 u32 type = crypto_tfm_alg_type(async_req->tfm);
66 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
68 if (type != ops->type)
70 ret = ops->async_req_handle(async_req);
77 static int qce_handle_queue(struct qce_device *qce,
78 struct crypto_async_request *req)
80 struct crypto_async_request *async_req, *backlog;
84 spin_lock_irqsave(&qce->lock, flags);
87 ret = crypto_enqueue_request(&qce->queue, req);
89 /* busy, do not dequeue request */
91 spin_unlock_irqrestore(&qce->lock, flags);
95 backlog = crypto_get_backlog(&qce->queue);
96 async_req = crypto_dequeue_request(&qce->queue);
100 spin_unlock_irqrestore(&qce->lock, flags);
106 spin_lock_bh(&qce->lock);
107 backlog->complete(backlog, -EINPROGRESS);
108 spin_unlock_bh(&qce->lock);
111 err = qce_handle_request(async_req);
114 tasklet_schedule(&qce->done_tasklet);
120 static void qce_tasklet_req_done(unsigned long data)
122 struct qce_device *qce = (struct qce_device *)data;
123 struct crypto_async_request *req;
126 spin_lock_irqsave(&qce->lock, flags);
129 spin_unlock_irqrestore(&qce->lock, flags);
132 req->complete(req, qce->result);
134 qce_handle_queue(qce, NULL);
137 static int qce_async_request_enqueue(struct qce_device *qce,
138 struct crypto_async_request *req)
140 return qce_handle_queue(qce, req);
143 static void qce_async_request_done(struct qce_device *qce, int ret)
146 tasklet_schedule(&qce->done_tasklet);
149 static int qce_check_version(struct qce_device *qce)
151 u32 major, minor, step;
153 qce_get_version(qce, &major, &minor, &step);
156 * the driver does not support v5 with minor 0 because it has special
157 * alignment requirements.
159 if (major != QCE_MAJOR_VERSION5 || minor == 0)
162 qce->burst_size = QCE_BAM_BURST_SIZE;
163 qce->pipe_pair_id = 1;
165 dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n",
171 static int qce_crypto_probe(struct platform_device *pdev)
173 struct device *dev = &pdev->dev;
174 struct qce_device *qce;
177 qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
182 platform_set_drvdata(pdev, qce);
184 qce->base = devm_platform_ioremap_resource(pdev, 0);
185 if (IS_ERR(qce->base))
186 return PTR_ERR(qce->base);
188 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
192 qce->core = devm_clk_get(qce->dev, "core");
193 if (IS_ERR(qce->core))
194 return PTR_ERR(qce->core);
196 qce->iface = devm_clk_get(qce->dev, "iface");
197 if (IS_ERR(qce->iface))
198 return PTR_ERR(qce->iface);
200 qce->bus = devm_clk_get(qce->dev, "bus");
201 if (IS_ERR(qce->bus))
202 return PTR_ERR(qce->bus);
204 ret = clk_prepare_enable(qce->core);
208 ret = clk_prepare_enable(qce->iface);
212 ret = clk_prepare_enable(qce->bus);
216 ret = qce_dma_request(qce->dev, &qce->dma);
220 ret = qce_check_version(qce);
224 spin_lock_init(&qce->lock);
225 tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
227 crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
229 qce->async_req_enqueue = qce_async_request_enqueue;
230 qce->async_req_done = qce_async_request_done;
232 ret = qce_register_algs(qce);
239 qce_dma_release(&qce->dma);
241 clk_disable_unprepare(qce->bus);
243 clk_disable_unprepare(qce->iface);
245 clk_disable_unprepare(qce->core);
249 static int qce_crypto_remove(struct platform_device *pdev)
251 struct qce_device *qce = platform_get_drvdata(pdev);
253 tasklet_kill(&qce->done_tasklet);
254 qce_unregister_algs(qce);
255 qce_dma_release(&qce->dma);
256 clk_disable_unprepare(qce->bus);
257 clk_disable_unprepare(qce->iface);
258 clk_disable_unprepare(qce->core);
262 static const struct of_device_id qce_crypto_of_match[] = {
263 { .compatible = "qcom,crypto-v5.1", },
266 MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
268 static struct platform_driver qce_crypto_driver = {
269 .probe = qce_crypto_probe,
270 .remove = qce_crypto_remove,
272 .name = KBUILD_MODNAME,
273 .of_match_table = qce_crypto_of_match,
276 module_platform_driver(qce_crypto_driver);
278 MODULE_LICENSE("GPL v2");
279 MODULE_DESCRIPTION("Qualcomm crypto engine driver");
280 MODULE_ALIAS("platform:" KBUILD_MODNAME);
281 MODULE_AUTHOR("The Linux Foundation");