GNU Linux-libre 4.14.295-gnu1
[releases.git] / drivers / crypto / ccp / ccp-crypto-main.c
1 /*
2  * AMD Cryptographic Coprocessor (CCP) crypto API support
3  *
4  * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
5  *
6  * Author: Tom Lendacky <thomas.lendacky@amd.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/ccp.h>
18 #include <linux/scatterlist.h>
19 #include <crypto/internal/hash.h>
20 #include <crypto/internal/akcipher.h>
21
22 #include "ccp-crypto.h"
23
24 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
25 MODULE_LICENSE("GPL");
26 MODULE_VERSION("1.0.0");
27 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
28
29 static unsigned int aes_disable;
30 module_param(aes_disable, uint, 0444);
31 MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value");
32
33 static unsigned int sha_disable;
34 module_param(sha_disable, uint, 0444);
35 MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
36
37 static unsigned int des3_disable;
38 module_param(des3_disable, uint, 0444);
39 MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value");
40
41 static unsigned int rsa_disable;
42 module_param(rsa_disable, uint, 0444);
43 MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
44
45 /* List heads for the supported algorithms */
46 static LIST_HEAD(hash_algs);
47 static LIST_HEAD(cipher_algs);
48 static LIST_HEAD(aead_algs);
49 static LIST_HEAD(akcipher_algs);
50
51 /* For any tfm, requests for that tfm must be returned on the order
52  * received.  With multiple queues available, the CCP can process more
53  * than one cmd at a time.  Therefore we must maintain a cmd list to insure
54  * the proper ordering of requests on a given tfm.
55  */
56 struct ccp_crypto_queue {
57         struct list_head cmds;
58         struct list_head *backlog;
59         unsigned int cmd_count;
60 };
61
62 #define CCP_CRYPTO_MAX_QLEN     100
63
64 static struct ccp_crypto_queue req_queue;
65 static spinlock_t req_queue_lock;
66
67 struct ccp_crypto_cmd {
68         struct list_head entry;
69
70         struct ccp_cmd *cmd;
71
72         /* Save the crypto_tfm and crypto_async_request addresses
73          * separately to avoid any reference to a possibly invalid
74          * crypto_async_request structure after invoking the request
75          * callback
76          */
77         struct crypto_async_request *req;
78         struct crypto_tfm *tfm;
79
80         /* Used for held command processing to determine state */
81         int ret;
82 };
83
84 struct ccp_crypto_cpu {
85         struct work_struct work;
86         struct completion completion;
87         struct ccp_crypto_cmd *crypto_cmd;
88         int err;
89 };
90
91 static inline bool ccp_crypto_success(int err)
92 {
93         if (err && (err != -EINPROGRESS) && (err != -EBUSY))
94                 return false;
95
96         return true;
97 }
98
99 static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
100         struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
101 {
102         struct ccp_crypto_cmd *held = NULL, *tmp;
103         unsigned long flags;
104
105         *backlog = NULL;
106
107         spin_lock_irqsave(&req_queue_lock, flags);
108
109         /* Held cmds will be after the current cmd in the queue so start
110          * searching for a cmd with a matching tfm for submission.
111          */
112         tmp = crypto_cmd;
113         list_for_each_entry_continue(tmp, &req_queue.cmds, entry) {
114                 if (crypto_cmd->tfm != tmp->tfm)
115                         continue;
116                 held = tmp;
117                 break;
118         }
119
120         /* Process the backlog:
121          *   Because cmds can be executed from any point in the cmd list
122          *   special precautions have to be taken when handling the backlog.
123          */
124         if (req_queue.backlog != &req_queue.cmds) {
125                 /* Skip over this cmd if it is the next backlog cmd */
126                 if (req_queue.backlog == &crypto_cmd->entry)
127                         req_queue.backlog = crypto_cmd->entry.next;
128
129                 *backlog = container_of(req_queue.backlog,
130                                         struct ccp_crypto_cmd, entry);
131                 req_queue.backlog = req_queue.backlog->next;
132
133                 /* Skip over this cmd if it is now the next backlog cmd */
134                 if (req_queue.backlog == &crypto_cmd->entry)
135                         req_queue.backlog = crypto_cmd->entry.next;
136         }
137
138         /* Remove the cmd entry from the list of cmds */
139         req_queue.cmd_count--;
140         list_del(&crypto_cmd->entry);
141
142         spin_unlock_irqrestore(&req_queue_lock, flags);
143
144         return held;
145 }
146
147 static void ccp_crypto_complete(void *data, int err)
148 {
149         struct ccp_crypto_cmd *crypto_cmd = data;
150         struct ccp_crypto_cmd *held, *next, *backlog;
151         struct crypto_async_request *req = crypto_cmd->req;
152         struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
153         int ret;
154
155         if (err == -EINPROGRESS) {
156                 /* Only propagate the -EINPROGRESS if necessary */
157                 if (crypto_cmd->ret == -EBUSY) {
158                         crypto_cmd->ret = -EINPROGRESS;
159                         req->complete(req, -EINPROGRESS);
160                 }
161
162                 return;
163         }
164
165         /* Operation has completed - update the queue before invoking
166          * the completion callbacks and retrieve the next cmd (cmd with
167          * a matching tfm) that can be submitted to the CCP.
168          */
169         held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
170         if (backlog) {
171                 backlog->ret = -EINPROGRESS;
172                 backlog->req->complete(backlog->req, -EINPROGRESS);
173         }
174
175         /* Transition the state from -EBUSY to -EINPROGRESS first */
176         if (crypto_cmd->ret == -EBUSY)
177                 req->complete(req, -EINPROGRESS);
178
179         /* Completion callbacks */
180         ret = err;
181         if (ctx->complete)
182                 ret = ctx->complete(req, ret);
183         req->complete(req, ret);
184
185         /* Submit the next cmd */
186         while (held) {
187                 /* Since we have already queued the cmd, we must indicate that
188                  * we can backlog so as not to "lose" this request.
189                  */
190                 held->cmd->flags |= CCP_CMD_MAY_BACKLOG;
191                 ret = ccp_enqueue_cmd(held->cmd);
192                 if (ccp_crypto_success(ret))
193                         break;
194
195                 /* Error occurred, report it and get the next entry */
196                 ctx = crypto_tfm_ctx(held->req->tfm);
197                 if (ctx->complete)
198                         ret = ctx->complete(held->req, ret);
199                 held->req->complete(held->req, ret);
200
201                 next = ccp_crypto_cmd_complete(held, &backlog);
202                 if (backlog) {
203                         backlog->ret = -EINPROGRESS;
204                         backlog->req->complete(backlog->req, -EINPROGRESS);
205                 }
206
207                 kfree(held);
208                 held = next;
209         }
210
211         kfree(crypto_cmd);
212 }
213
214 static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
215 {
216         struct ccp_crypto_cmd *active = NULL, *tmp;
217         unsigned long flags;
218         bool free_cmd = true;
219         int ret;
220
221         spin_lock_irqsave(&req_queue_lock, flags);
222
223         /* Check if the cmd can/should be queued */
224         if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
225                 ret = -EBUSY;
226                 if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
227                         goto e_lock;
228         }
229
230         /* Look for an entry with the same tfm.  If there is a cmd
231          * with the same tfm in the list then the current cmd cannot
232          * be submitted to the CCP yet.
233          */
234         list_for_each_entry(tmp, &req_queue.cmds, entry) {
235                 if (crypto_cmd->tfm != tmp->tfm)
236                         continue;
237                 active = tmp;
238                 break;
239         }
240
241         ret = -EINPROGRESS;
242         if (!active) {
243                 ret = ccp_enqueue_cmd(crypto_cmd->cmd);
244                 if (!ccp_crypto_success(ret))
245                         goto e_lock;    /* Error, don't queue it */
246                 if ((ret == -EBUSY) &&
247                     !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
248                         goto e_lock;    /* Not backlogging, don't queue it */
249         }
250
251         if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
252                 ret = -EBUSY;
253                 if (req_queue.backlog == &req_queue.cmds)
254                         req_queue.backlog = &crypto_cmd->entry;
255         }
256         crypto_cmd->ret = ret;
257
258         req_queue.cmd_count++;
259         list_add_tail(&crypto_cmd->entry, &req_queue.cmds);
260
261         free_cmd = false;
262
263 e_lock:
264         spin_unlock_irqrestore(&req_queue_lock, flags);
265
266         if (free_cmd)
267                 kfree(crypto_cmd);
268
269         return ret;
270 }
271
272 /**
273  * ccp_crypto_enqueue_request - queue an crypto async request for processing
274  *                              by the CCP
275  *
276  * @req: crypto_async_request struct to be processed
277  * @cmd: ccp_cmd struct to be sent to the CCP
278  */
279 int ccp_crypto_enqueue_request(struct crypto_async_request *req,
280                                struct ccp_cmd *cmd)
281 {
282         struct ccp_crypto_cmd *crypto_cmd;
283         gfp_t gfp;
284
285         gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
286
287         crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
288         if (!crypto_cmd)
289                 return -ENOMEM;
290
291         /* The tfm pointer must be saved and not referenced from the
292          * crypto_async_request (req) pointer because it is used after
293          * completion callback for the request and the req pointer
294          * might not be valid anymore.
295          */
296         crypto_cmd->cmd = cmd;
297         crypto_cmd->req = req;
298         crypto_cmd->tfm = req->tfm;
299
300         cmd->callback = ccp_crypto_complete;
301         cmd->data = crypto_cmd;
302
303         if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
304                 cmd->flags |= CCP_CMD_MAY_BACKLOG;
305         else
306                 cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
307
308         return ccp_crypto_enqueue_cmd(crypto_cmd);
309 }
310
311 struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
312                                             struct scatterlist *sg_add)
313 {
314         struct scatterlist *sg, *sg_last = NULL;
315
316         for (sg = table->sgl; sg; sg = sg_next(sg))
317                 if (!sg_page(sg))
318                         break;
319         if (WARN_ON(!sg))
320                 return NULL;
321
322         for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
323                 sg_set_page(sg, sg_page(sg_add), sg_add->length,
324                             sg_add->offset);
325                 sg_last = sg;
326         }
327         if (WARN_ON(sg_add))
328                 return NULL;
329
330         return sg_last;
331 }
332
333 static int ccp_register_algs(void)
334 {
335         int ret;
336
337         if (!aes_disable) {
338                 ret = ccp_register_aes_algs(&cipher_algs);
339                 if (ret)
340                         return ret;
341
342                 ret = ccp_register_aes_cmac_algs(&hash_algs);
343                 if (ret)
344                         return ret;
345
346                 ret = ccp_register_aes_xts_algs(&cipher_algs);
347                 if (ret)
348                         return ret;
349
350                 ret = ccp_register_aes_aeads(&aead_algs);
351                 if (ret)
352                         return ret;
353         }
354
355         if (!des3_disable) {
356                 ret = ccp_register_des3_algs(&cipher_algs);
357                 if (ret)
358                         return ret;
359         }
360
361         if (!sha_disable) {
362                 ret = ccp_register_sha_algs(&hash_algs);
363                 if (ret)
364                         return ret;
365         }
366
367         if (!rsa_disable) {
368                 ret = ccp_register_rsa_algs(&akcipher_algs);
369                 if (ret)
370                         return ret;
371         }
372
373         return 0;
374 }
375
376 static void ccp_unregister_algs(void)
377 {
378         struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
379         struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
380         struct ccp_crypto_aead *aead_alg, *aead_tmp;
381         struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp;
382
383         list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
384                 crypto_unregister_ahash(&ahash_alg->alg);
385                 list_del(&ahash_alg->entry);
386                 kfree(ahash_alg);
387         }
388
389         list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) {
390                 crypto_unregister_alg(&ablk_alg->alg);
391                 list_del(&ablk_alg->entry);
392                 kfree(ablk_alg);
393         }
394
395         list_for_each_entry_safe(aead_alg, aead_tmp, &aead_algs, entry) {
396                 crypto_unregister_aead(&aead_alg->alg);
397                 list_del(&aead_alg->entry);
398                 kfree(aead_alg);
399         }
400
401         list_for_each_entry_safe(akc_alg, akc_tmp, &akcipher_algs, entry) {
402                 crypto_unregister_akcipher(&akc_alg->alg);
403                 list_del(&akc_alg->entry);
404                 kfree(akc_alg);
405         }
406 }
407
408 static int ccp_crypto_init(void)
409 {
410         int ret;
411
412         ret = ccp_present();
413         if (ret)
414                 return ret;
415
416         spin_lock_init(&req_queue_lock);
417         INIT_LIST_HEAD(&req_queue.cmds);
418         req_queue.backlog = &req_queue.cmds;
419         req_queue.cmd_count = 0;
420
421         ret = ccp_register_algs();
422         if (ret)
423                 ccp_unregister_algs();
424
425         return ret;
426 }
427
428 static void ccp_crypto_exit(void)
429 {
430         ccp_unregister_algs();
431 }
432
433 module_init(ccp_crypto_init);
434 module_exit(ccp_crypto_exit);