2 * AMD Cryptographic Coprocessor (CCP) driver
4 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 * Author: Gary R Hook <gary.hook@amd.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/kernel.h>
15 #include <linux/kthread.h>
16 #include <linux/sched.h>
17 #include <linux/interrupt.h>
18 #include <linux/spinlock.h>
19 #include <linux/spinlock_types.h>
20 #include <linux/types.h>
21 #include <linux/mutex.h>
22 #include <linux/delay.h>
23 #include <linux/hw_random.h>
24 #include <linux/cpu.h>
26 #include <asm/cpu_device_id.h>
28 #include <linux/ccp.h>
32 struct ccp_tasklet_data {
33 struct completion completion;
37 /* Human-readable error strings */
38 #define CCP_MAX_ERROR_CODE 64
39 static char *ccp_error_codes[] = {
43 "ILLEGAL_FUNCTION_TYPE",
44 "ILLEGAL_FUNCTION_MODE",
45 "ILLEGAL_FUNCTION_ENCRYPT",
46 "ILLEGAL_FUNCTION_SIZE",
47 "Zlib_MISSING_INIT_EOM",
48 "ILLEGAL_FUNCTION_RSVD",
49 "ILLEGAL_BUFFER_LENGTH",
56 "Zlib_ILLEGAL_MULTI_QUEUE",
57 "Zlib_ILLEGAL_JOBID_CHANGE",
62 "IDMA1_AXI_SLAVE_FAULT",
68 "ZLIB_UNEXPECTED_EOM",
71 "ZLIB_UNDEFINED_SYMBOL",
72 "ZLIB_UNDEFINED_DISTANCE_S",
73 "ZLIB_CODE_LENGTH_SYMBOL",
74 "ZLIB _VHB_ILLEGAL_FETCH",
75 "ZLIB_UNCOMPRESSED_LEN",
77 "ZLIB_CHECKSUM_MISMATCH0",
85 void ccp_log_error(struct ccp_device *d, unsigned int e)
87 if (WARN_ON(e >= CCP_MAX_ERROR_CODE))
90 if (e < ARRAY_SIZE(ccp_error_codes))
91 dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]);
93 dev_err(d->dev, "CCP error %d: Unknown Error\n", e);
96 /* List of CCPs, CCP count, read-write access lock, and access functions
98 * Lock structure: get ccp_unit_lock for reading whenever we need to
99 * examine the CCP list. While holding it for reading we can acquire
100 * the RR lock to update the round-robin next-CCP pointer. The unit lock
101 * must be acquired before the RR lock.
103 * If the unit-lock is acquired for writing, we have total control over
104 * the list, so there's no value in getting the RR lock.
106 static DEFINE_RWLOCK(ccp_unit_lock);
107 static LIST_HEAD(ccp_units);
109 /* Round-robin counter */
110 static DEFINE_SPINLOCK(ccp_rr_lock);
111 static struct ccp_device *ccp_rr;
114 * ccp_add_device - add a CCP device to the list
116 * @ccp: ccp_device struct pointer
118 * Put this CCP on the unit list, which makes it available
121 * Returns zero if a CCP device is present, -ENODEV otherwise.
123 void ccp_add_device(struct ccp_device *ccp)
127 write_lock_irqsave(&ccp_unit_lock, flags);
128 list_add_tail(&ccp->entry, &ccp_units);
130 /* We already have the list lock (we're first) so this
131 * pointer can't change on us. Set its initial value.
134 write_unlock_irqrestore(&ccp_unit_lock, flags);
138 * ccp_del_device - remove a CCP device from the list
140 * @ccp: ccp_device struct pointer
142 * Remove this unit from the list of devices. If the next device
143 * up for use is this one, adjust the pointer. If this is the last
144 * device, NULL the pointer.
146 void ccp_del_device(struct ccp_device *ccp)
150 write_lock_irqsave(&ccp_unit_lock, flags);
152 /* ccp_unit_lock is read/write; any read access
153 * will be suspended while we make changes to the
154 * list and RR pointer.
156 if (list_is_last(&ccp_rr->entry, &ccp_units))
157 ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
160 ccp_rr = list_next_entry(ccp_rr, entry);
162 list_del(&ccp->entry);
163 if (list_empty(&ccp_units))
165 write_unlock_irqrestore(&ccp_unit_lock, flags);
170 int ccp_register_rng(struct ccp_device *ccp)
174 dev_dbg(ccp->dev, "Registering RNG...\n");
175 /* Register an RNG */
176 ccp->hwrng.name = ccp->rngname;
177 ccp->hwrng.read = ccp_trng_read;
178 ret = hwrng_register(&ccp->hwrng);
180 dev_err(ccp->dev, "error registering hwrng (%d)\n", ret);
185 void ccp_unregister_rng(struct ccp_device *ccp)
188 hwrng_unregister(&ccp->hwrng);
191 static struct ccp_device *ccp_get_device(void)
194 struct ccp_device *dp = NULL;
196 /* We round-robin through the unit list.
197 * The (ccp_rr) pointer refers to the next unit to use.
199 read_lock_irqsave(&ccp_unit_lock, flags);
200 if (!list_empty(&ccp_units)) {
201 spin_lock(&ccp_rr_lock);
203 if (list_is_last(&ccp_rr->entry, &ccp_units))
204 ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
207 ccp_rr = list_next_entry(ccp_rr, entry);
208 spin_unlock(&ccp_rr_lock);
210 read_unlock_irqrestore(&ccp_unit_lock, flags);
216 * ccp_present - check if a CCP device is present
218 * Returns zero if a CCP device is present, -ENODEV otherwise.
220 int ccp_present(void)
225 read_lock_irqsave(&ccp_unit_lock, flags);
226 ret = list_empty(&ccp_units);
227 read_unlock_irqrestore(&ccp_unit_lock, flags);
229 return ret ? -ENODEV : 0;
231 EXPORT_SYMBOL_GPL(ccp_present);
234 * ccp_version - get the version of the CCP device
236 * Returns the version from the first unit on the list;
237 * otherwise a zero if no CCP device is present
239 unsigned int ccp_version(void)
241 struct ccp_device *dp;
245 read_lock_irqsave(&ccp_unit_lock, flags);
246 if (!list_empty(&ccp_units)) {
247 dp = list_first_entry(&ccp_units, struct ccp_device, entry);
248 ret = dp->vdata->version;
250 read_unlock_irqrestore(&ccp_unit_lock, flags);
254 EXPORT_SYMBOL_GPL(ccp_version);
257 * ccp_enqueue_cmd - queue an operation for processing by the CCP
259 * @cmd: ccp_cmd struct to be processed
261 * Queue a cmd to be processed by the CCP. If queueing the cmd
262 * would exceed the defined length of the cmd queue the cmd will
263 * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
264 * result in a return code of -EBUSY.
266 * The callback routine specified in the ccp_cmd struct will be
267 * called to notify the caller of completion (if the cmd was not
268 * backlogged) or advancement out of the backlog. If the cmd has
269 * advanced out of the backlog the "err" value of the callback
270 * will be -EINPROGRESS. Any other "err" value during callback is
271 * the result of the operation.
273 * The cmd has been successfully queued if:
274 * the return code is -EINPROGRESS or
275 * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
277 int ccp_enqueue_cmd(struct ccp_cmd *cmd)
279 struct ccp_device *ccp;
284 /* Some commands might need to be sent to a specific device */
285 ccp = cmd->ccp ? cmd->ccp : ccp_get_device();
290 /* Caller must supply a callback routine */
296 spin_lock_irqsave(&ccp->cmd_lock, flags);
298 i = ccp->cmd_q_count;
300 if (ccp->cmd_count >= MAX_CMD_QLEN) {
302 if (cmd->flags & CCP_CMD_MAY_BACKLOG)
303 list_add_tail(&cmd->entry, &ccp->backlog);
307 list_add_tail(&cmd->entry, &ccp->cmd);
309 /* Find an idle queue */
310 if (!ccp->suspending) {
311 for (i = 0; i < ccp->cmd_q_count; i++) {
312 if (ccp->cmd_q[i].active)
320 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
322 /* If we found an idle queue, wake it up */
323 if (i < ccp->cmd_q_count)
324 wake_up_process(ccp->cmd_q[i].kthread);
328 EXPORT_SYMBOL_GPL(ccp_enqueue_cmd);
330 static void ccp_do_cmd_backlog(struct work_struct *work)
332 struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
333 struct ccp_device *ccp = cmd->ccp;
337 cmd->callback(cmd->data, -EINPROGRESS);
339 spin_lock_irqsave(&ccp->cmd_lock, flags);
342 list_add_tail(&cmd->entry, &ccp->cmd);
344 /* Find an idle queue */
345 for (i = 0; i < ccp->cmd_q_count; i++) {
346 if (ccp->cmd_q[i].active)
352 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
354 /* If we found an idle queue, wake it up */
355 if (i < ccp->cmd_q_count)
356 wake_up_process(ccp->cmd_q[i].kthread);
359 static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
361 struct ccp_device *ccp = cmd_q->ccp;
362 struct ccp_cmd *cmd = NULL;
363 struct ccp_cmd *backlog = NULL;
366 spin_lock_irqsave(&ccp->cmd_lock, flags);
370 if (ccp->suspending) {
371 cmd_q->suspended = 1;
373 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
374 wake_up_interruptible(&ccp->suspend_queue);
379 if (ccp->cmd_count) {
382 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
383 list_del(&cmd->entry);
388 if (!list_empty(&ccp->backlog)) {
389 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
391 list_del(&backlog->entry);
394 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
397 INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
398 schedule_work(&backlog->work);
404 static void ccp_do_cmd_complete(unsigned long data)
406 struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data;
407 struct ccp_cmd *cmd = tdata->cmd;
409 cmd->callback(cmd->data, cmd->ret);
411 complete(&tdata->completion);
415 * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue
417 * @data: thread-specific data
419 int ccp_cmd_queue_thread(void *data)
421 struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
423 struct ccp_tasklet_data tdata;
424 struct tasklet_struct tasklet;
426 tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata);
428 set_current_state(TASK_INTERRUPTIBLE);
429 while (!kthread_should_stop()) {
432 set_current_state(TASK_INTERRUPTIBLE);
434 cmd = ccp_dequeue_cmd(cmd_q);
438 __set_current_state(TASK_RUNNING);
440 /* Execute the command */
441 cmd->ret = ccp_run_cmd(cmd_q, cmd);
443 /* Schedule the completion callback */
445 init_completion(&tdata.completion);
446 tasklet_schedule(&tasklet);
447 wait_for_completion(&tdata.completion);
450 __set_current_state(TASK_RUNNING);
456 * ccp_alloc_struct - allocate and initialize the ccp_device struct
458 * @dev: device struct of the CCP
460 struct ccp_device *ccp_alloc_struct(struct sp_device *sp)
462 struct device *dev = sp->dev;
463 struct ccp_device *ccp;
465 ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL);
470 ccp->axcache = sp->axcache;
472 INIT_LIST_HEAD(&ccp->cmd);
473 INIT_LIST_HEAD(&ccp->backlog);
475 spin_lock_init(&ccp->cmd_lock);
476 mutex_init(&ccp->req_mutex);
477 mutex_init(&ccp->sb_mutex);
478 ccp->sb_count = KSB_COUNT;
481 /* Initialize the wait queues */
482 init_waitqueue_head(&ccp->sb_queue);
483 init_waitqueue_head(&ccp->suspend_queue);
485 snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", sp->ord);
486 snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", sp->ord);
491 int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
493 struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
495 int len = min_t(int, sizeof(trng_value), max);
497 /* Locking is provided by the caller so we can update device
498 * hwrng-related fields safely
500 trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
502 /* Zero is returned if not data is available or if a
503 * bad-entropy error is present. Assume an error if
504 * we exceed TRNG_RETRIES reads of zero.
506 if (ccp->hwrng_retries++ > TRNG_RETRIES)
512 /* Reset the counter and save the rng value */
513 ccp->hwrng_retries = 0;
514 memcpy(data, &trng_value, len);
520 bool ccp_queues_suspended(struct ccp_device *ccp)
522 unsigned int suspended = 0;
526 spin_lock_irqsave(&ccp->cmd_lock, flags);
528 for (i = 0; i < ccp->cmd_q_count; i++)
529 if (ccp->cmd_q[i].suspended)
532 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
534 return ccp->cmd_q_count == suspended;
537 int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
539 struct ccp_device *ccp = sp->ccp_data;
543 /* If there's no device there's nothing to do */
547 spin_lock_irqsave(&ccp->cmd_lock, flags);
551 /* Wake all the queue kthreads to prepare for suspend */
552 for (i = 0; i < ccp->cmd_q_count; i++)
553 wake_up_process(ccp->cmd_q[i].kthread);
555 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
557 /* Wait for all queue kthreads to say they're done */
558 while (!ccp_queues_suspended(ccp))
559 wait_event_interruptible(ccp->suspend_queue,
560 ccp_queues_suspended(ccp));
565 int ccp_dev_resume(struct sp_device *sp)
567 struct ccp_device *ccp = sp->ccp_data;
571 /* If there's no device there's nothing to do */
575 spin_lock_irqsave(&ccp->cmd_lock, flags);
579 /* Wake up all the kthreads */
580 for (i = 0; i < ccp->cmd_q_count; i++) {
581 ccp->cmd_q[i].suspended = 0;
582 wake_up_process(ccp->cmd_q[i].kthread);
585 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
591 int ccp_dev_init(struct sp_device *sp)
593 struct device *dev = sp->dev;
594 struct ccp_device *ccp;
598 ccp = ccp_alloc_struct(sp);
603 ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata;
604 if (!ccp->vdata || !ccp->vdata->version) {
606 dev_err(dev, "missing driver data\n");
610 ccp->use_tasklet = sp->use_tasklet;
612 ccp->io_regs = sp->io_map + ccp->vdata->offset;
613 if (ccp->vdata->setup)
614 ccp->vdata->setup(ccp);
616 ret = ccp->vdata->perform->init(ccp);
620 dev_notice(dev, "ccp enabled\n");
627 dev_notice(dev, "ccp initialization failed\n");
632 void ccp_dev_destroy(struct sp_device *sp)
634 struct ccp_device *ccp = sp->ccp_data;
639 ccp->vdata->perform->destroy(ccp);