d3780be44a763c5eaac1a7d2f0ca143a0005b927
[releases.git] / core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
4  */
5
6 #include <linux/clk.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/interrupt.h>
9 #include <linux/module.h>
10 #include <linux/mod_devicetable.h>
11 #include <linux/platform_device.h>
12 #include <linux/spinlock.h>
13 #include <linux/types.h>
14 #include <crypto/algapi.h>
15 #include <crypto/internal/hash.h>
16
17 #include "core.h"
18 #include "cipher.h"
19 #include "sha.h"
20 #include "aead.h"
21
22 #define QCE_MAJOR_VERSION5      0x05
23 #define QCE_QUEUE_LENGTH        1
24
25 static const struct qce_algo_ops *qce_ops[] = {
26 #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
27         &skcipher_ops,
28 #endif
29 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
30         &ahash_ops,
31 #endif
32 #ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
33         &aead_ops,
34 #endif
35 };
36
37 static void qce_unregister_algs(struct qce_device *qce)
38 {
39         const struct qce_algo_ops *ops;
40         int i;
41
42         for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
43                 ops = qce_ops[i];
44                 ops->unregister_algs(qce);
45         }
46 }
47
48 static int qce_register_algs(struct qce_device *qce)
49 {
50         const struct qce_algo_ops *ops;
51         int i, ret = -ENODEV;
52
53         for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
54                 ops = qce_ops[i];
55                 ret = ops->register_algs(qce);
56                 if (ret)
57                         break;
58         }
59
60         return ret;
61 }
62
63 static int qce_handle_request(struct crypto_async_request *async_req)
64 {
65         int ret = -EINVAL, i;
66         const struct qce_algo_ops *ops;
67         u32 type = crypto_tfm_alg_type(async_req->tfm);
68
69         for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
70                 ops = qce_ops[i];
71                 if (type != ops->type)
72                         continue;
73                 ret = ops->async_req_handle(async_req);
74                 break;
75         }
76
77         return ret;
78 }
79
80 static int qce_handle_queue(struct qce_device *qce,
81                             struct crypto_async_request *req)
82 {
83         struct crypto_async_request *async_req, *backlog;
84         unsigned long flags;
85         int ret = 0, err;
86
87         spin_lock_irqsave(&qce->lock, flags);
88
89         if (req)
90                 ret = crypto_enqueue_request(&qce->queue, req);
91
92         /* busy, do not dequeue request */
93         if (qce->req) {
94                 spin_unlock_irqrestore(&qce->lock, flags);
95                 return ret;
96         }
97
98         backlog = crypto_get_backlog(&qce->queue);
99         async_req = crypto_dequeue_request(&qce->queue);
100         if (async_req)
101                 qce->req = async_req;
102
103         spin_unlock_irqrestore(&qce->lock, flags);
104
105         if (!async_req)
106                 return ret;
107
108         if (backlog) {
109                 spin_lock_bh(&qce->lock);
110                 backlog->complete(backlog, -EINPROGRESS);
111                 spin_unlock_bh(&qce->lock);
112         }
113
114         err = qce_handle_request(async_req);
115         if (err) {
116                 qce->result = err;
117                 tasklet_schedule(&qce->done_tasklet);
118         }
119
120         return ret;
121 }
122
123 static void qce_tasklet_req_done(unsigned long data)
124 {
125         struct qce_device *qce = (struct qce_device *)data;
126         struct crypto_async_request *req;
127         unsigned long flags;
128
129         spin_lock_irqsave(&qce->lock, flags);
130         req = qce->req;
131         qce->req = NULL;
132         spin_unlock_irqrestore(&qce->lock, flags);
133
134         if (req)
135                 req->complete(req, qce->result);
136
137         qce_handle_queue(qce, NULL);
138 }
139
140 static int qce_async_request_enqueue(struct qce_device *qce,
141                                      struct crypto_async_request *req)
142 {
143         return qce_handle_queue(qce, req);
144 }
145
146 static void qce_async_request_done(struct qce_device *qce, int ret)
147 {
148         qce->result = ret;
149         tasklet_schedule(&qce->done_tasklet);
150 }
151
152 static int qce_check_version(struct qce_device *qce)
153 {
154         u32 major, minor, step;
155
156         qce_get_version(qce, &major, &minor, &step);
157
158         /*
159          * the driver does not support v5 with minor 0 because it has special
160          * alignment requirements.
161          */
162         if (major != QCE_MAJOR_VERSION5 || minor == 0)
163                 return -ENODEV;
164
165         qce->burst_size = QCE_BAM_BURST_SIZE;
166
167         /*
168          * Rx and tx pipes are treated as a pair inside CE.
169          * Pipe pair number depends on the actual BAM dma pipe
170          * that is used for transfers. The BAM dma pipes are passed
171          * from the device tree and used to derive the pipe pair
172          * id in the CE driver as follows.
173          *      BAM dma pipes(rx, tx)           CE pipe pair id
174          *              0,1                             0
175          *              2,3                             1
176          *              4,5                             2
177          *              6,7                             3
178          *              ...
179          */
180         qce->pipe_pair_id = qce->dma.rxchan->chan_id >> 1;
181
182         dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n",
183                 major, minor, step);
184
185         return 0;
186 }
187
188 static int qce_crypto_probe(struct platform_device *pdev)
189 {
190         struct device *dev = &pdev->dev;
191         struct qce_device *qce;
192         int ret;
193
194         qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
195         if (!qce)
196                 return -ENOMEM;
197
198         qce->dev = dev;
199         platform_set_drvdata(pdev, qce);
200
201         qce->base = devm_platform_ioremap_resource(pdev, 0);
202         if (IS_ERR(qce->base))
203                 return PTR_ERR(qce->base);
204
205         ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
206         if (ret < 0)
207                 return ret;
208
209         qce->core = devm_clk_get(qce->dev, "core");
210         if (IS_ERR(qce->core))
211                 return PTR_ERR(qce->core);
212
213         qce->iface = devm_clk_get(qce->dev, "iface");
214         if (IS_ERR(qce->iface))
215                 return PTR_ERR(qce->iface);
216
217         qce->bus = devm_clk_get(qce->dev, "bus");
218         if (IS_ERR(qce->bus))
219                 return PTR_ERR(qce->bus);
220
221         ret = clk_prepare_enable(qce->core);
222         if (ret)
223                 return ret;
224
225         ret = clk_prepare_enable(qce->iface);
226         if (ret)
227                 goto err_clks_core;
228
229         ret = clk_prepare_enable(qce->bus);
230         if (ret)
231                 goto err_clks_iface;
232
233         ret = qce_dma_request(qce->dev, &qce->dma);
234         if (ret)
235                 goto err_clks;
236
237         ret = qce_check_version(qce);
238         if (ret)
239                 goto err_clks;
240
241         spin_lock_init(&qce->lock);
242         tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
243                      (unsigned long)qce);
244         crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
245
246         qce->async_req_enqueue = qce_async_request_enqueue;
247         qce->async_req_done = qce_async_request_done;
248
249         ret = qce_register_algs(qce);
250         if (ret)
251                 goto err_dma;
252
253         return 0;
254
255 err_dma:
256         qce_dma_release(&qce->dma);
257 err_clks:
258         clk_disable_unprepare(qce->bus);
259 err_clks_iface:
260         clk_disable_unprepare(qce->iface);
261 err_clks_core:
262         clk_disable_unprepare(qce->core);
263         return ret;
264 }
265
266 static int qce_crypto_remove(struct platform_device *pdev)
267 {
268         struct qce_device *qce = platform_get_drvdata(pdev);
269
270         tasklet_kill(&qce->done_tasklet);
271         qce_unregister_algs(qce);
272         qce_dma_release(&qce->dma);
273         clk_disable_unprepare(qce->bus);
274         clk_disable_unprepare(qce->iface);
275         clk_disable_unprepare(qce->core);
276         return 0;
277 }
278
279 static const struct of_device_id qce_crypto_of_match[] = {
280         { .compatible = "qcom,crypto-v5.1", },
281         { .compatible = "qcom,crypto-v5.4", },
282         {}
283 };
284 MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
285
286 static struct platform_driver qce_crypto_driver = {
287         .probe = qce_crypto_probe,
288         .remove = qce_crypto_remove,
289         .driver = {
290                 .name = KBUILD_MODNAME,
291                 .of_match_table = qce_crypto_of_match,
292         },
293 };
294 module_platform_driver(qce_crypto_driver);
295
296 MODULE_LICENSE("GPL v2");
297 MODULE_DESCRIPTION("Qualcomm crypto engine driver");
298 MODULE_ALIAS("platform:" KBUILD_MODNAME);
299 MODULE_AUTHOR("The Linux Foundation");