GNU Linux-libre 6.8.9-gnu
[releases.git] / drivers / crypto / ccp / ccp-crypto-main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Cryptographic Coprocessor (CCP) crypto API support
4  *
5  * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
6  *
7  * Author: Tom Lendacky <thomas.lendacky@amd.com>
8  */
9
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <linux/ccp.h>
15 #include <linux/scatterlist.h>
16 #include <crypto/internal/hash.h>
17 #include <crypto/internal/akcipher.h>
18
19 #include "ccp-crypto.h"
20
21 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
22 MODULE_LICENSE("GPL");
23 MODULE_VERSION("1.0.0");
24 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
25
26 static unsigned int aes_disable;
27 module_param(aes_disable, uint, 0444);
28 MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value");
29
30 static unsigned int sha_disable;
31 module_param(sha_disable, uint, 0444);
32 MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
33
34 static unsigned int des3_disable;
35 module_param(des3_disable, uint, 0444);
36 MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value");
37
38 static unsigned int rsa_disable;
39 module_param(rsa_disable, uint, 0444);
40 MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
41
42 /* List heads for the supported algorithms */
43 static LIST_HEAD(hash_algs);
44 static LIST_HEAD(skcipher_algs);
45 static LIST_HEAD(aead_algs);
46 static LIST_HEAD(akcipher_algs);
47
48 /* For any tfm, requests for that tfm must be returned on the order
49  * received.  With multiple queues available, the CCP can process more
50  * than one cmd at a time.  Therefore we must maintain a cmd list to insure
51  * the proper ordering of requests on a given tfm.
52  */
53 struct ccp_crypto_queue {
54         struct list_head cmds;
55         struct list_head *backlog;
56         unsigned int cmd_count;
57 };
58
59 #define CCP_CRYPTO_MAX_QLEN     100
60
61 static struct ccp_crypto_queue req_queue;
62 static DEFINE_SPINLOCK(req_queue_lock);
63
64 struct ccp_crypto_cmd {
65         struct list_head entry;
66
67         struct ccp_cmd *cmd;
68
69         /* Save the crypto_tfm and crypto_async_request addresses
70          * separately to avoid any reference to a possibly invalid
71          * crypto_async_request structure after invoking the request
72          * callback
73          */
74         struct crypto_async_request *req;
75         struct crypto_tfm *tfm;
76
77         /* Used for held command processing to determine state */
78         int ret;
79 };
80
81 static inline bool ccp_crypto_success(int err)
82 {
83         if (err && (err != -EINPROGRESS) && (err != -EBUSY))
84                 return false;
85
86         return true;
87 }
88
89 static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
90         struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
91 {
92         struct ccp_crypto_cmd *held = NULL, *tmp;
93         unsigned long flags;
94
95         *backlog = NULL;
96
97         spin_lock_irqsave(&req_queue_lock, flags);
98
99         /* Held cmds will be after the current cmd in the queue so start
100          * searching for a cmd with a matching tfm for submission.
101          */
102         tmp = crypto_cmd;
103         list_for_each_entry_continue(tmp, &req_queue.cmds, entry) {
104                 if (crypto_cmd->tfm != tmp->tfm)
105                         continue;
106                 held = tmp;
107                 break;
108         }
109
110         /* Process the backlog:
111          *   Because cmds can be executed from any point in the cmd list
112          *   special precautions have to be taken when handling the backlog.
113          */
114         if (req_queue.backlog != &req_queue.cmds) {
115                 /* Skip over this cmd if it is the next backlog cmd */
116                 if (req_queue.backlog == &crypto_cmd->entry)
117                         req_queue.backlog = crypto_cmd->entry.next;
118
119                 *backlog = container_of(req_queue.backlog,
120                                         struct ccp_crypto_cmd, entry);
121                 req_queue.backlog = req_queue.backlog->next;
122
123                 /* Skip over this cmd if it is now the next backlog cmd */
124                 if (req_queue.backlog == &crypto_cmd->entry)
125                         req_queue.backlog = crypto_cmd->entry.next;
126         }
127
128         /* Remove the cmd entry from the list of cmds */
129         req_queue.cmd_count--;
130         list_del(&crypto_cmd->entry);
131
132         spin_unlock_irqrestore(&req_queue_lock, flags);
133
134         return held;
135 }
136
137 static void ccp_crypto_complete(void *data, int err)
138 {
139         struct ccp_crypto_cmd *crypto_cmd = data;
140         struct ccp_crypto_cmd *held, *next, *backlog;
141         struct crypto_async_request *req = crypto_cmd->req;
142         struct ccp_ctx *ctx = crypto_tfm_ctx_dma(req->tfm);
143         int ret;
144
145         if (err == -EINPROGRESS) {
146                 /* Only propagate the -EINPROGRESS if necessary */
147                 if (crypto_cmd->ret == -EBUSY) {
148                         crypto_cmd->ret = -EINPROGRESS;
149                         crypto_request_complete(req, -EINPROGRESS);
150                 }
151
152                 return;
153         }
154
155         /* Operation has completed - update the queue before invoking
156          * the completion callbacks and retrieve the next cmd (cmd with
157          * a matching tfm) that can be submitted to the CCP.
158          */
159         held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
160         if (backlog) {
161                 backlog->ret = -EINPROGRESS;
162                 crypto_request_complete(backlog->req, -EINPROGRESS);
163         }
164
165         /* Transition the state from -EBUSY to -EINPROGRESS first */
166         if (crypto_cmd->ret == -EBUSY)
167                 crypto_request_complete(req, -EINPROGRESS);
168
169         /* Completion callbacks */
170         ret = err;
171         if (ctx->complete)
172                 ret = ctx->complete(req, ret);
173         crypto_request_complete(req, ret);
174
175         /* Submit the next cmd */
176         while (held) {
177                 /* Since we have already queued the cmd, we must indicate that
178                  * we can backlog so as not to "lose" this request.
179                  */
180                 held->cmd->flags |= CCP_CMD_MAY_BACKLOG;
181                 ret = ccp_enqueue_cmd(held->cmd);
182                 if (ccp_crypto_success(ret))
183                         break;
184
185                 /* Error occurred, report it and get the next entry */
186                 ctx = crypto_tfm_ctx_dma(held->req->tfm);
187                 if (ctx->complete)
188                         ret = ctx->complete(held->req, ret);
189                 crypto_request_complete(held->req, ret);
190
191                 next = ccp_crypto_cmd_complete(held, &backlog);
192                 if (backlog) {
193                         backlog->ret = -EINPROGRESS;
194                         crypto_request_complete(backlog->req, -EINPROGRESS);
195                 }
196
197                 kfree(held);
198                 held = next;
199         }
200
201         kfree(crypto_cmd);
202 }
203
204 static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
205 {
206         struct ccp_crypto_cmd *active = NULL, *tmp;
207         unsigned long flags;
208         bool free_cmd = true;
209         int ret;
210
211         spin_lock_irqsave(&req_queue_lock, flags);
212
213         /* Check if the cmd can/should be queued */
214         if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
215                 if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) {
216                         ret = -ENOSPC;
217                         goto e_lock;
218                 }
219         }
220
221         /* Look for an entry with the same tfm.  If there is a cmd
222          * with the same tfm in the list then the current cmd cannot
223          * be submitted to the CCP yet.
224          */
225         list_for_each_entry(tmp, &req_queue.cmds, entry) {
226                 if (crypto_cmd->tfm != tmp->tfm)
227                         continue;
228                 active = tmp;
229                 break;
230         }
231
232         ret = -EINPROGRESS;
233         if (!active) {
234                 ret = ccp_enqueue_cmd(crypto_cmd->cmd);
235                 if (!ccp_crypto_success(ret))
236                         goto e_lock;    /* Error, don't queue it */
237         }
238
239         if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
240                 ret = -EBUSY;
241                 if (req_queue.backlog == &req_queue.cmds)
242                         req_queue.backlog = &crypto_cmd->entry;
243         }
244         crypto_cmd->ret = ret;
245
246         req_queue.cmd_count++;
247         list_add_tail(&crypto_cmd->entry, &req_queue.cmds);
248
249         free_cmd = false;
250
251 e_lock:
252         spin_unlock_irqrestore(&req_queue_lock, flags);
253
254         if (free_cmd)
255                 kfree(crypto_cmd);
256
257         return ret;
258 }
259
260 /**
261  * ccp_crypto_enqueue_request - queue an crypto async request for processing
262  *                              by the CCP
263  *
264  * @req: crypto_async_request struct to be processed
265  * @cmd: ccp_cmd struct to be sent to the CCP
266  */
267 int ccp_crypto_enqueue_request(struct crypto_async_request *req,
268                                struct ccp_cmd *cmd)
269 {
270         struct ccp_crypto_cmd *crypto_cmd;
271         gfp_t gfp;
272
273         gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
274
275         crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
276         if (!crypto_cmd)
277                 return -ENOMEM;
278
279         /* The tfm pointer must be saved and not referenced from the
280          * crypto_async_request (req) pointer because it is used after
281          * completion callback for the request and the req pointer
282          * might not be valid anymore.
283          */
284         crypto_cmd->cmd = cmd;
285         crypto_cmd->req = req;
286         crypto_cmd->tfm = req->tfm;
287
288         cmd->callback = ccp_crypto_complete;
289         cmd->data = crypto_cmd;
290
291         if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
292                 cmd->flags |= CCP_CMD_MAY_BACKLOG;
293         else
294                 cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
295
296         return ccp_crypto_enqueue_cmd(crypto_cmd);
297 }
298
299 struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
300                                             struct scatterlist *sg_add)
301 {
302         struct scatterlist *sg, *sg_last = NULL;
303
304         for (sg = table->sgl; sg; sg = sg_next(sg))
305                 if (!sg_page(sg))
306                         break;
307         if (WARN_ON(!sg))
308                 return NULL;
309
310         for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
311                 sg_set_page(sg, sg_page(sg_add), sg_add->length,
312                             sg_add->offset);
313                 sg_last = sg;
314         }
315         if (WARN_ON(sg_add))
316                 return NULL;
317
318         return sg_last;
319 }
320
321 static int ccp_register_algs(void)
322 {
323         int ret;
324
325         if (!aes_disable) {
326                 ret = ccp_register_aes_algs(&skcipher_algs);
327                 if (ret)
328                         return ret;
329
330                 ret = ccp_register_aes_cmac_algs(&hash_algs);
331                 if (ret)
332                         return ret;
333
334                 ret = ccp_register_aes_xts_algs(&skcipher_algs);
335                 if (ret)
336                         return ret;
337
338                 ret = ccp_register_aes_aeads(&aead_algs);
339                 if (ret)
340                         return ret;
341         }
342
343         if (!des3_disable) {
344                 ret = ccp_register_des3_algs(&skcipher_algs);
345                 if (ret)
346                         return ret;
347         }
348
349         if (!sha_disable) {
350                 ret = ccp_register_sha_algs(&hash_algs);
351                 if (ret)
352                         return ret;
353         }
354
355         if (!rsa_disable) {
356                 ret = ccp_register_rsa_algs(&akcipher_algs);
357                 if (ret)
358                         return ret;
359         }
360
361         return 0;
362 }
363
364 static void ccp_unregister_algs(void)
365 {
366         struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
367         struct ccp_crypto_skcipher_alg *ablk_alg, *ablk_tmp;
368         struct ccp_crypto_aead *aead_alg, *aead_tmp;
369         struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp;
370
371         list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
372                 crypto_unregister_ahash(&ahash_alg->alg);
373                 list_del(&ahash_alg->entry);
374                 kfree(ahash_alg);
375         }
376
377         list_for_each_entry_safe(ablk_alg, ablk_tmp, &skcipher_algs, entry) {
378                 crypto_unregister_skcipher(&ablk_alg->alg);
379                 list_del(&ablk_alg->entry);
380                 kfree(ablk_alg);
381         }
382
383         list_for_each_entry_safe(aead_alg, aead_tmp, &aead_algs, entry) {
384                 crypto_unregister_aead(&aead_alg->alg);
385                 list_del(&aead_alg->entry);
386                 kfree(aead_alg);
387         }
388
389         list_for_each_entry_safe(akc_alg, akc_tmp, &akcipher_algs, entry) {
390                 crypto_unregister_akcipher(&akc_alg->alg);
391                 list_del(&akc_alg->entry);
392                 kfree(akc_alg);
393         }
394 }
395
396 static int __init ccp_crypto_init(void)
397 {
398         int ret;
399
400         ret = ccp_present();
401         if (ret) {
402                 pr_err("Cannot load: there are no available CCPs\n");
403                 return ret;
404         }
405
406         INIT_LIST_HEAD(&req_queue.cmds);
407         req_queue.backlog = &req_queue.cmds;
408         req_queue.cmd_count = 0;
409
410         ret = ccp_register_algs();
411         if (ret)
412                 ccp_unregister_algs();
413
414         return ret;
415 }
416
417 static void __exit ccp_crypto_exit(void)
418 {
419         ccp_unregister_algs();
420 }
421
422 module_init(ccp_crypto_init);
423 module_exit(ccp_crypto_exit);