1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/crypto.h>
8 #include <linux/moduleparam.h>
9 #include <linux/types.h>
10 #include <linux/interrupt.h>
11 #include <linux/platform_device.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
15 #include <linux/clk.h>
16 #include <linux/of_address.h>
17 #include <linux/of_device.h>
18 #include <linux/pm_runtime.h>
20 #include "cc_driver.h"
21 #include "cc_request_mgr.h"
22 #include "cc_buffer_mgr.h"
23 #include "cc_debugfs.h"
24 #include "cc_cipher.h"
27 #include "cc_sram_mgr.h"
32 module_param_named(dump_desc, cc_dump_desc, bool, 0600);
33 MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid");
35 module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
36 MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
38 static bool cc_sec_disable;
39 module_param_named(sec_disable, cc_sec_disable, bool, 0600);
40 MODULE_PARM_DESC(cc_sec_disable, "Disable security functions");
52 #define CC_HW_RESET_LOOP_COUNT 10
54 /* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */
55 static const u32 pidr_0124_offsets[CC_NUM_IDRS] = {
56 CC_REG(PERIPHERAL_ID_0), CC_REG(PERIPHERAL_ID_1),
57 CC_REG(PERIPHERAL_ID_2), CC_REG(PERIPHERAL_ID_4)
60 static const u32 cidr_0123_offsets[CC_NUM_IDRS] = {
61 CC_REG(COMPONENT_ID_0), CC_REG(COMPONENT_ID_1),
62 CC_REG(COMPONENT_ID_2), CC_REG(COMPONENT_ID_3)
65 /* Hardware revisions defs. */
67 /* The 703 is a OSCCA only variant of the 713 */
68 static const struct cc_hw_data cc703_hw = {
69 .name = "703", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
70 .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_OSCCA
73 static const struct cc_hw_data cc713_hw = {
74 .name = "713", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
75 .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_ALL
78 static const struct cc_hw_data cc712_hw = {
79 .name = "712", .rev = CC_HW_REV_712, .sig = 0xDCC71200U,
80 .std_bodies = CC_STD_ALL
83 static const struct cc_hw_data cc710_hw = {
84 .name = "710", .rev = CC_HW_REV_710, .sig = 0xDCC63200U,
85 .std_bodies = CC_STD_ALL
88 static const struct cc_hw_data cc630p_hw = {
89 .name = "630P", .rev = CC_HW_REV_630, .sig = 0xDCC63000U,
90 .std_bodies = CC_STD_ALL
93 static const struct of_device_id arm_ccree_dev_of_match[] = {
94 { .compatible = "arm,cryptocell-703-ree", .data = &cc703_hw },
95 { .compatible = "arm,cryptocell-713-ree", .data = &cc713_hw },
96 { .compatible = "arm,cryptocell-712-ree", .data = &cc712_hw },
97 { .compatible = "arm,cryptocell-710-ree", .data = &cc710_hw },
98 { .compatible = "arm,cryptocell-630p-ree", .data = &cc630p_hw },
101 MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match);
103 static void init_cc_cache_params(struct cc_drvdata *drvdata)
105 struct device *dev = drvdata_to_dev(drvdata);
106 u32 cache_params, ace_const, val;
109 /* compute CC_AXIM_CACHE_PARAMS */
110 cache_params = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
111 dev_dbg(dev, "Cache params previous: 0x%08X\n", cache_params);
113 /* non cached or write-back, write allocate */
114 val = drvdata->coherent ? 0xb : 0x2;
116 mask = CC_GENMASK(CC_AXIM_CACHE_PARAMS_AWCACHE);
117 cache_params &= ~mask;
118 cache_params |= FIELD_PREP(mask, val);
120 mask = CC_GENMASK(CC_AXIM_CACHE_PARAMS_AWCACHE_LAST);
121 cache_params &= ~mask;
122 cache_params |= FIELD_PREP(mask, val);
124 mask = CC_GENMASK(CC_AXIM_CACHE_PARAMS_ARCACHE);
125 cache_params &= ~mask;
126 cache_params |= FIELD_PREP(mask, val);
128 drvdata->cache_params = cache_params;
130 dev_dbg(dev, "Cache params current: 0x%08X\n", cache_params);
132 if (drvdata->hw_rev <= CC_HW_REV_710)
135 /* compute CC_AXIM_ACE_CONST */
136 ace_const = cc_ioread(drvdata, CC_REG(AXIM_ACE_CONST));
137 dev_dbg(dev, "ACE-const previous: 0x%08X\n", ace_const);
139 /* system or outer-sharable */
140 val = drvdata->coherent ? 0x2 : 0x3;
142 mask = CC_GENMASK(CC_AXIM_ACE_CONST_ARDOMAIN);
144 ace_const |= FIELD_PREP(mask, val);
146 mask = CC_GENMASK(CC_AXIM_ACE_CONST_AWDOMAIN);
148 ace_const |= FIELD_PREP(mask, val);
150 dev_dbg(dev, "ACE-const current: 0x%08X\n", ace_const);
152 drvdata->ace_const = ace_const;
155 static u32 cc_read_idr(struct cc_drvdata *drvdata, const u32 *idr_offsets)
159 u8 regs[CC_NUM_IDRS];
163 for (i = 0; i < CC_NUM_IDRS; ++i)
164 idr.regs[i] = cc_ioread(drvdata, idr_offsets[i]);
166 return le32_to_cpu(idr.val);
169 void __dump_byte_array(const char *name, const u8 *buf, size_t len)
176 snprintf(prefix, sizeof(prefix), "%s[%zu]: ", name, len);
178 print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_ADDRESS, 16, 1, buf,
182 static irqreturn_t cc_isr(int irq, void *dev_id)
184 struct cc_drvdata *drvdata = (struct cc_drvdata *)dev_id;
185 struct device *dev = drvdata_to_dev(drvdata);
189 /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
190 /* if driver suspended return, probably shared interrupt */
191 if (pm_runtime_suspended(dev))
194 /* read the interrupt status */
195 irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
196 dev_dbg(dev, "Got IRR=0x%08X\n", irr);
198 if (irr == 0) /* Probably shared interrupt line */
201 imr = cc_ioread(drvdata, CC_REG(HOST_IMR));
203 /* clear interrupt - must be before processing events */
204 cc_iowrite(drvdata, CC_REG(HOST_ICR), irr);
207 /* Completion interrupt - most probable */
208 if (irr & drvdata->comp_mask) {
209 /* Mask all completion interrupts - will be unmasked in
210 * deferred service handler
212 cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | drvdata->comp_mask);
213 irr &= ~drvdata->comp_mask;
214 complete_request(drvdata);
216 #ifdef CONFIG_CRYPTO_FIPS
217 /* TEE FIPS interrupt */
218 if (irr & CC_GPR0_IRQ_MASK) {
219 /* Mask interrupt - will be unmasked in Deferred service
222 cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_GPR0_IRQ_MASK);
223 irr &= ~CC_GPR0_IRQ_MASK;
224 fips_handler(drvdata);
227 /* AXI error interrupt */
228 if (irr & CC_AXI_ERR_IRQ_MASK) {
231 /* Read the AXI error ID */
232 axi_err = cc_ioread(drvdata, CC_REG(AXIM_MON_ERR));
233 dev_dbg(dev, "AXI completion error: axim_mon_err=0x%08X\n",
236 irr &= ~CC_AXI_ERR_IRQ_MASK;
240 dev_dbg_ratelimited(dev, "IRR includes unknown cause bits (0x%08X)\n",
248 bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata)
253 /* 712/710/63 has no reset completion indication, always return true */
254 if (drvdata->hw_rev <= CC_HW_REV_712)
257 for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) {
258 /* in cc7x3 NVM_IS_IDLE indicates that CC reset is
259 * completed and device is fully functional
261 val = cc_ioread(drvdata, CC_REG(NVM_IS_IDLE));
262 if (val & CC_NVM_IS_IDLE_MASK) {
263 /* hw indicate reset completed */
266 /* allow scheduling other process on the processor */
269 /* reset not completed */
273 int init_cc_regs(struct cc_drvdata *drvdata)
276 struct device *dev = drvdata_to_dev(drvdata);
278 /* Unmask all AXI interrupt sources AXI_CFG1 register */
279 /* AXI interrupt config are obsoleted startign at cc7x3 */
280 if (drvdata->hw_rev <= CC_HW_REV_712) {
281 val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
282 cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
283 dev_dbg(dev, "AXIM_CFG=0x%08X\n",
284 cc_ioread(drvdata, CC_REG(AXIM_CFG)));
287 /* Clear all pending interrupts */
288 val = cc_ioread(drvdata, CC_REG(HOST_IRR));
289 dev_dbg(dev, "IRR=0x%08X\n", val);
290 cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
292 /* Unmask relevant interrupt cause */
293 val = drvdata->comp_mask | CC_AXI_ERR_IRQ_MASK;
295 if (drvdata->hw_rev >= CC_HW_REV_712)
296 val |= CC_GPR0_IRQ_MASK;
298 cc_iowrite(drvdata, CC_REG(HOST_IMR), ~val);
300 cc_iowrite(drvdata, CC_REG(AXIM_CACHE_PARAMS), drvdata->cache_params);
301 if (drvdata->hw_rev >= CC_HW_REV_712)
302 cc_iowrite(drvdata, CC_REG(AXIM_ACE_CONST), drvdata->ace_const);
307 static int init_cc_resources(struct platform_device *plat_dev)
309 struct resource *req_mem_cc_regs = NULL;
310 struct cc_drvdata *new_drvdata;
311 struct device *dev = &plat_dev->dev;
312 struct device_node *np = dev->of_node;
313 u32 val, hw_rev_pidr, sig_cidr;
315 const struct cc_hw_data *hw_rev;
320 new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
324 hw_rev = of_device_get_match_data(dev);
325 new_drvdata->hw_rev_name = hw_rev->name;
326 new_drvdata->hw_rev = hw_rev->rev;
327 new_drvdata->std_bodies = hw_rev->std_bodies;
329 if (hw_rev->rev >= CC_HW_REV_712) {
330 new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP);
331 new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_712);
332 new_drvdata->ver_offset = CC_REG(HOST_VERSION_712);
334 new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP8);
335 new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_630);
336 new_drvdata->ver_offset = CC_REG(HOST_VERSION_630);
339 new_drvdata->comp_mask = CC_COMP_IRQ_MASK;
341 platform_set_drvdata(plat_dev, new_drvdata);
342 new_drvdata->plat_dev = plat_dev;
344 clk = devm_clk_get_optional(dev, NULL);
346 return dev_err_probe(dev, PTR_ERR(clk), "Error getting clock\n");
347 new_drvdata->clk = clk;
349 new_drvdata->coherent = of_dma_is_coherent(np);
351 /* Get device resources */
352 /* First CC registers space */
353 req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
354 /* Map registers space */
355 new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
356 if (IS_ERR(new_drvdata->cc_base))
357 return PTR_ERR(new_drvdata->cc_base);
359 dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
361 dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
362 &req_mem_cc_regs->start, new_drvdata->cc_base);
365 irq = platform_get_irq(plat_dev, 0);
369 init_completion(&new_drvdata->hw_queue_avail);
372 dev->dma_mask = &dev->coherent_dma_mask;
374 dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
375 while (dma_mask > 0x7fffffffUL) {
376 if (dma_supported(dev, dma_mask)) {
377 rc = dma_set_coherent_mask(dev, dma_mask);
385 dev_err(dev, "Failed in dma_set_mask, mask=%llx\n", dma_mask);
389 rc = clk_prepare_enable(new_drvdata->clk);
391 dev_err(dev, "Failed to enable clock");
395 new_drvdata->sec_disabled = cc_sec_disable;
397 pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
398 pm_runtime_use_autosuspend(dev);
399 pm_runtime_set_active(dev);
400 pm_runtime_enable(dev);
401 rc = pm_runtime_get_sync(dev);
403 dev_err(dev, "pm_runtime_get_sync() failed: %d\n", rc);
407 /* Wait for Cryptocell reset completion */
408 if (!cc_wait_for_reset_completion(new_drvdata)) {
409 dev_err(dev, "Cryptocell reset not completed");
412 if (hw_rev->rev <= CC_HW_REV_712) {
413 /* Verify correct mapping */
414 val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
415 if (val != hw_rev->sig) {
416 dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
422 hw_rev_pidr = cc_ioread(new_drvdata, new_drvdata->ver_offset);
424 /* Verify correct mapping */
425 val = cc_read_idr(new_drvdata, pidr_0124_offsets);
426 if (val != hw_rev->pidr_0124) {
427 dev_err(dev, "Invalid CC PIDR: PIDR0124=0x%08X != expected=0x%08X\n",
428 val, hw_rev->pidr_0124);
434 val = cc_read_idr(new_drvdata, cidr_0123_offsets);
435 if (val != hw_rev->cidr_0123) {
436 dev_err(dev, "Invalid CC CIDR: CIDR0123=0x%08X != expected=0x%08X\n",
437 val, hw_rev->cidr_0123);
443 /* Check HW engine configuration */
444 val = cc_ioread(new_drvdata, CC_REG(HOST_REMOVE_INPUT_PINS));
450 if (new_drvdata->std_bodies & CC_STD_NIST) {
451 dev_warn(dev, "703 mode forced due to HW configuration.\n");
452 new_drvdata->std_bodies = CC_STD_OSCCA;
456 dev_err(dev, "Unsupported engines configuration.\n");
461 /* Check security disable state */
462 val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED));
463 val &= CC_SECURITY_DISABLED_MASK;
464 new_drvdata->sec_disabled |= !!val;
466 if (!new_drvdata->sec_disabled) {
467 new_drvdata->comp_mask |= CC_CPP_SM4_ABORT_MASK;
468 if (new_drvdata->std_bodies & CC_STD_NIST)
469 new_drvdata->comp_mask |= CC_CPP_AES_ABORT_MASK;
473 if (new_drvdata->sec_disabled)
474 dev_info(dev, "Security Disabled mode is in effect. Security functions disabled.\n");
476 /* Display HW versions */
477 dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
478 hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
479 /* register the driver isr function */
480 rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "ccree",
483 dev_err(dev, "Could not register to interrupt %d\n", irq);
486 dev_dbg(dev, "Registered to IRQ: %d\n", irq);
488 init_cc_cache_params(new_drvdata);
490 rc = init_cc_regs(new_drvdata);
492 dev_err(dev, "init_cc_regs failed\n");
496 rc = cc_debugfs_init(new_drvdata);
498 dev_err(dev, "Failed registering debugfs interface\n");
502 rc = cc_fips_init(new_drvdata);
504 dev_err(dev, "cc_fips_init failed 0x%x\n", rc);
505 goto post_debugfs_err;
507 rc = cc_sram_mgr_init(new_drvdata);
509 dev_err(dev, "cc_sram_mgr_init failed\n");
510 goto post_fips_init_err;
513 new_drvdata->mlli_sram_addr =
514 cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
515 if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) {
517 goto post_fips_init_err;
520 rc = cc_req_mgr_init(new_drvdata);
522 dev_err(dev, "cc_req_mgr_init failed\n");
523 goto post_fips_init_err;
526 rc = cc_buffer_mgr_init(new_drvdata);
528 dev_err(dev, "cc_buffer_mgr_init failed\n");
529 goto post_req_mgr_err;
532 /* Allocate crypto algs */
533 rc = cc_cipher_alloc(new_drvdata);
535 dev_err(dev, "cc_cipher_alloc failed\n");
536 goto post_buf_mgr_err;
539 /* hash must be allocated before aead since hash exports APIs */
540 rc = cc_hash_alloc(new_drvdata);
542 dev_err(dev, "cc_hash_alloc failed\n");
543 goto post_cipher_err;
546 rc = cc_aead_alloc(new_drvdata);
548 dev_err(dev, "cc_aead_alloc failed\n");
552 /* If we got here and FIPS mode is enabled
553 * it means all FIPS test passed, so let TEE
556 cc_set_ree_fips_status(new_drvdata, true);
562 cc_hash_free(new_drvdata);
564 cc_cipher_free(new_drvdata);
566 cc_buffer_mgr_fini(new_drvdata);
568 cc_req_mgr_fini(new_drvdata);
570 cc_fips_fini(new_drvdata);
572 cc_debugfs_fini(new_drvdata);
574 fini_cc_regs(new_drvdata);
576 pm_runtime_put_noidle(dev);
577 pm_runtime_disable(dev);
578 pm_runtime_set_suspended(dev);
579 clk_disable_unprepare(new_drvdata->clk);
583 void fini_cc_regs(struct cc_drvdata *drvdata)
585 /* Mask all interrupts */
586 cc_iowrite(drvdata, CC_REG(HOST_IMR), 0xFFFFFFFF);
589 static void cleanup_cc_resources(struct platform_device *plat_dev)
591 struct device *dev = &plat_dev->dev;
592 struct cc_drvdata *drvdata =
593 (struct cc_drvdata *)platform_get_drvdata(plat_dev);
595 cc_aead_free(drvdata);
596 cc_hash_free(drvdata);
597 cc_cipher_free(drvdata);
598 cc_buffer_mgr_fini(drvdata);
599 cc_req_mgr_fini(drvdata);
600 cc_fips_fini(drvdata);
601 cc_debugfs_fini(drvdata);
602 fini_cc_regs(drvdata);
603 pm_runtime_put_noidle(dev);
604 pm_runtime_disable(dev);
605 pm_runtime_set_suspended(dev);
606 clk_disable_unprepare(drvdata->clk);
609 unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata)
611 if (drvdata->hw_rev >= CC_HW_REV_712)
612 return HASH_LEN_SIZE_712;
614 return HASH_LEN_SIZE_630;
617 static int ccree_probe(struct platform_device *plat_dev)
620 struct device *dev = &plat_dev->dev;
622 /* Map registers space */
623 rc = init_cc_resources(plat_dev);
627 dev_info(dev, "ARM ccree device initialized\n");
632 static int ccree_remove(struct platform_device *plat_dev)
634 struct device *dev = &plat_dev->dev;
636 dev_dbg(dev, "Releasing ccree resources...\n");
638 cleanup_cc_resources(plat_dev);
640 dev_info(dev, "ARM ccree device terminated\n");
645 static struct platform_driver ccree_driver = {
648 .of_match_table = arm_ccree_dev_of_match,
653 .probe = ccree_probe,
654 .remove = ccree_remove,
657 static int __init ccree_init(void)
659 cc_debugfs_global_init();
661 return platform_driver_register(&ccree_driver);
663 module_init(ccree_init);
665 static void __exit ccree_exit(void)
667 platform_driver_unregister(&ccree_driver);
668 cc_debugfs_global_fini();
670 module_exit(ccree_exit);
672 /* Module description */
673 MODULE_DESCRIPTION("ARM TrustZone CryptoCell REE Driver");
674 MODULE_VERSION(DRV_MODULE_VERSION);
675 MODULE_AUTHOR("ARM");
676 MODULE_LICENSE("GPL v2");