2 * AMD Cryptographic Coprocessor (CCP) driver
4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 * Author: Gary R Hook <gary.hook@amd.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/kthread.h>
17 #include <linux/sched.h>
18 #include <linux/interrupt.h>
19 #include <linux/spinlock.h>
20 #include <linux/spinlock_types.h>
21 #include <linux/types.h>
22 #include <linux/mutex.h>
23 #include <linux/delay.h>
24 #include <linux/hw_random.h>
25 #include <linux/cpu.h>
27 #include <asm/cpu_device_id.h>
29 #include <linux/ccp.h>
33 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
34 MODULE_LICENSE("GPL");
35 MODULE_VERSION("1.0.0");
36 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
38 struct ccp_tasklet_data {
39 struct completion completion;
43 /* Human-readable error strings */
44 #define CCP_MAX_ERROR_CODE 64
45 static char *ccp_error_codes[] = {
49 "ILLEGAL_FUNCTION_TYPE",
50 "ILLEGAL_FUNCTION_MODE",
51 "ILLEGAL_FUNCTION_ENCRYPT",
52 "ILLEGAL_FUNCTION_SIZE",
53 "Zlib_MISSING_INIT_EOM",
54 "ILLEGAL_FUNCTION_RSVD",
55 "ILLEGAL_BUFFER_LENGTH",
62 "Zlib_ILLEGAL_MULTI_QUEUE",
63 "Zlib_ILLEGAL_JOBID_CHANGE",
68 "IDMA1_AXI_SLAVE_FAULT",
74 "ZLIB_UNEXPECTED_EOM",
77 "ZLIB_UNDEFINED_SYMBOL",
78 "ZLIB_UNDEFINED_DISTANCE_S",
79 "ZLIB_CODE_LENGTH_SYMBOL",
80 "ZLIB _VHB_ILLEGAL_FETCH",
81 "ZLIB_UNCOMPRESSED_LEN",
83 "ZLIB_CHECKSUM_MISMATCH0",
91 void ccp_log_error(struct ccp_device *d, unsigned int e)
93 if (WARN_ON(e >= CCP_MAX_ERROR_CODE))
96 if (e < ARRAY_SIZE(ccp_error_codes))
97 dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]);
99 dev_err(d->dev, "CCP error %d: Unknown Error\n", e);
102 /* List of CCPs, CCP count, read-write access lock, and access functions
104 * Lock structure: get ccp_unit_lock for reading whenever we need to
105 * examine the CCP list. While holding it for reading we can acquire
106 * the RR lock to update the round-robin next-CCP pointer. The unit lock
107 * must be acquired before the RR lock.
109 * If the unit-lock is acquired for writing, we have total control over
110 * the list, so there's no value in getting the RR lock.
112 static DEFINE_RWLOCK(ccp_unit_lock);
113 static LIST_HEAD(ccp_units);
115 /* Round-robin counter */
116 static DEFINE_SPINLOCK(ccp_rr_lock);
117 static struct ccp_device *ccp_rr;
119 /* Ever-increasing value to produce unique unit numbers */
120 static atomic_t ccp_unit_ordinal;
121 static unsigned int ccp_increment_unit_ordinal(void)
123 return atomic_inc_return(&ccp_unit_ordinal);
127 * ccp_add_device - add a CCP device to the list
129 * @ccp: ccp_device struct pointer
131 * Put this CCP on the unit list, which makes it available
134 * Returns zero if a CCP device is present, -ENODEV otherwise.
136 void ccp_add_device(struct ccp_device *ccp)
140 write_lock_irqsave(&ccp_unit_lock, flags);
141 list_add_tail(&ccp->entry, &ccp_units);
143 /* We already have the list lock (we're first) so this
144 * pointer can't change on us. Set its initial value.
147 write_unlock_irqrestore(&ccp_unit_lock, flags);
151 * ccp_del_device - remove a CCP device from the list
153 * @ccp: ccp_device struct pointer
155 * Remove this unit from the list of devices. If the next device
156 * up for use is this one, adjust the pointer. If this is the last
157 * device, NULL the pointer.
159 void ccp_del_device(struct ccp_device *ccp)
163 write_lock_irqsave(&ccp_unit_lock, flags);
165 /* ccp_unit_lock is read/write; any read access
166 * will be suspended while we make changes to the
167 * list and RR pointer.
169 if (list_is_last(&ccp_rr->entry, &ccp_units))
170 ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
173 ccp_rr = list_next_entry(ccp_rr, entry);
175 list_del(&ccp->entry);
176 if (list_empty(&ccp_units))
178 write_unlock_irqrestore(&ccp_unit_lock, flags);
183 int ccp_register_rng(struct ccp_device *ccp)
187 dev_dbg(ccp->dev, "Registering RNG...\n");
188 /* Register an RNG */
189 ccp->hwrng.name = ccp->rngname;
190 ccp->hwrng.read = ccp_trng_read;
191 ret = hwrng_register(&ccp->hwrng);
193 dev_err(ccp->dev, "error registering hwrng (%d)\n", ret);
198 void ccp_unregister_rng(struct ccp_device *ccp)
201 hwrng_unregister(&ccp->hwrng);
204 static struct ccp_device *ccp_get_device(void)
207 struct ccp_device *dp = NULL;
209 /* We round-robin through the unit list.
210 * The (ccp_rr) pointer refers to the next unit to use.
212 read_lock_irqsave(&ccp_unit_lock, flags);
213 if (!list_empty(&ccp_units)) {
214 spin_lock(&ccp_rr_lock);
216 if (list_is_last(&ccp_rr->entry, &ccp_units))
217 ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
220 ccp_rr = list_next_entry(ccp_rr, entry);
221 spin_unlock(&ccp_rr_lock);
223 read_unlock_irqrestore(&ccp_unit_lock, flags);
229 * ccp_present - check if a CCP device is present
231 * Returns zero if a CCP device is present, -ENODEV otherwise.
233 int ccp_present(void)
238 read_lock_irqsave(&ccp_unit_lock, flags);
239 ret = list_empty(&ccp_units);
240 read_unlock_irqrestore(&ccp_unit_lock, flags);
242 return ret ? -ENODEV : 0;
244 EXPORT_SYMBOL_GPL(ccp_present);
247 * ccp_version - get the version of the CCP device
249 * Returns the version from the first unit on the list;
250 * otherwise a zero if no CCP device is present
252 unsigned int ccp_version(void)
254 struct ccp_device *dp;
258 read_lock_irqsave(&ccp_unit_lock, flags);
259 if (!list_empty(&ccp_units)) {
260 dp = list_first_entry(&ccp_units, struct ccp_device, entry);
261 ret = dp->vdata->version;
263 read_unlock_irqrestore(&ccp_unit_lock, flags);
267 EXPORT_SYMBOL_GPL(ccp_version);
270 * ccp_enqueue_cmd - queue an operation for processing by the CCP
272 * @cmd: ccp_cmd struct to be processed
274 * Queue a cmd to be processed by the CCP. If queueing the cmd
275 * would exceed the defined length of the cmd queue the cmd will
276 * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
277 * result in a return code of -EBUSY.
279 * The callback routine specified in the ccp_cmd struct will be
280 * called to notify the caller of completion (if the cmd was not
281 * backlogged) or advancement out of the backlog. If the cmd has
282 * advanced out of the backlog the "err" value of the callback
283 * will be -EINPROGRESS. Any other "err" value during callback is
284 * the result of the operation.
286 * The cmd has been successfully queued if:
287 * the return code is -EINPROGRESS or
288 * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
290 int ccp_enqueue_cmd(struct ccp_cmd *cmd)
292 struct ccp_device *ccp;
297 /* Some commands might need to be sent to a specific device */
298 ccp = cmd->ccp ? cmd->ccp : ccp_get_device();
303 /* Caller must supply a callback routine */
309 spin_lock_irqsave(&ccp->cmd_lock, flags);
311 i = ccp->cmd_q_count;
313 if (ccp->cmd_count >= MAX_CMD_QLEN) {
315 if (cmd->flags & CCP_CMD_MAY_BACKLOG)
316 list_add_tail(&cmd->entry, &ccp->backlog);
320 list_add_tail(&cmd->entry, &ccp->cmd);
322 /* Find an idle queue */
323 if (!ccp->suspending) {
324 for (i = 0; i < ccp->cmd_q_count; i++) {
325 if (ccp->cmd_q[i].active)
333 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
335 /* If we found an idle queue, wake it up */
336 if (i < ccp->cmd_q_count)
337 wake_up_process(ccp->cmd_q[i].kthread);
341 EXPORT_SYMBOL_GPL(ccp_enqueue_cmd);
343 static void ccp_do_cmd_backlog(struct work_struct *work)
345 struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
346 struct ccp_device *ccp = cmd->ccp;
350 cmd->callback(cmd->data, -EINPROGRESS);
352 spin_lock_irqsave(&ccp->cmd_lock, flags);
355 list_add_tail(&cmd->entry, &ccp->cmd);
357 /* Find an idle queue */
358 for (i = 0; i < ccp->cmd_q_count; i++) {
359 if (ccp->cmd_q[i].active)
365 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
367 /* If we found an idle queue, wake it up */
368 if (i < ccp->cmd_q_count)
369 wake_up_process(ccp->cmd_q[i].kthread);
372 static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
374 struct ccp_device *ccp = cmd_q->ccp;
375 struct ccp_cmd *cmd = NULL;
376 struct ccp_cmd *backlog = NULL;
379 spin_lock_irqsave(&ccp->cmd_lock, flags);
383 if (ccp->suspending) {
384 cmd_q->suspended = 1;
386 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
387 wake_up_interruptible(&ccp->suspend_queue);
392 if (ccp->cmd_count) {
395 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
396 list_del(&cmd->entry);
401 if (!list_empty(&ccp->backlog)) {
402 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
404 list_del(&backlog->entry);
407 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
410 INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
411 schedule_work(&backlog->work);
417 static void ccp_do_cmd_complete(unsigned long data)
419 struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data;
420 struct ccp_cmd *cmd = tdata->cmd;
422 cmd->callback(cmd->data, cmd->ret);
423 complete(&tdata->completion);
427 * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue
429 * @data: thread-specific data
431 int ccp_cmd_queue_thread(void *data)
433 struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
435 struct ccp_tasklet_data tdata;
436 struct tasklet_struct tasklet;
438 tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata);
440 set_current_state(TASK_INTERRUPTIBLE);
441 while (!kthread_should_stop()) {
444 set_current_state(TASK_INTERRUPTIBLE);
446 cmd = ccp_dequeue_cmd(cmd_q);
450 __set_current_state(TASK_RUNNING);
452 /* Execute the command */
453 cmd->ret = ccp_run_cmd(cmd_q, cmd);
455 /* Schedule the completion callback */
457 init_completion(&tdata.completion);
458 tasklet_schedule(&tasklet);
459 wait_for_completion(&tdata.completion);
462 __set_current_state(TASK_RUNNING);
468 * ccp_alloc_struct - allocate and initialize the ccp_device struct
470 * @dev: device struct of the CCP
472 struct ccp_device *ccp_alloc_struct(struct device *dev)
474 struct ccp_device *ccp;
476 ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL);
481 INIT_LIST_HEAD(&ccp->cmd);
482 INIT_LIST_HEAD(&ccp->backlog);
484 spin_lock_init(&ccp->cmd_lock);
485 mutex_init(&ccp->req_mutex);
486 mutex_init(&ccp->sb_mutex);
487 ccp->sb_count = KSB_COUNT;
490 ccp->ord = ccp_increment_unit_ordinal();
491 snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
492 snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord);
497 int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
499 struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
501 int len = min_t(int, sizeof(trng_value), max);
503 /* Locking is provided by the caller so we can update device
504 * hwrng-related fields safely
506 trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
508 /* Zero is returned if not data is available or if a
509 * bad-entropy error is present. Assume an error if
510 * we exceed TRNG_RETRIES reads of zero.
512 if (ccp->hwrng_retries++ > TRNG_RETRIES)
518 /* Reset the counter and save the rng value */
519 ccp->hwrng_retries = 0;
520 memcpy(data, &trng_value, len);
526 bool ccp_queues_suspended(struct ccp_device *ccp)
528 unsigned int suspended = 0;
532 spin_lock_irqsave(&ccp->cmd_lock, flags);
534 for (i = 0; i < ccp->cmd_q_count; i++)
535 if (ccp->cmd_q[i].suspended)
538 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
540 return ccp->cmd_q_count == suspended;
544 static int __init ccp_mod_init(void)
549 ret = ccp_pci_init();
553 /* Don't leave the driver loaded if init failed */
554 if (ccp_present() != 0) {
565 ret = ccp_platform_init();
569 /* Don't leave the driver loaded if init failed */
570 if (ccp_present() != 0) {
581 static void __exit ccp_mod_exit(void)
592 module_init(ccp_mod_init);
593 module_exit(ccp_mod_exit);