2 * AMCC SoC PPC4xx Crypto Driver
4 * Copyright (c) 2008 Applied Micro Circuits Corporation.
5 * All rights reserved. James Hsiao <jhsiao@amcc.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * This file implements AMCC crypto offload Linux device driver for use with
21 #include <linux/kernel.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock_types.h>
24 #include <linux/random.h>
25 #include <linux/scatterlist.h>
26 #include <linux/crypto.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/platform_device.h>
29 #include <linux/init.h>
30 #include <linux/module.h>
31 #include <linux/of_address.h>
32 #include <linux/of_irq.h>
33 #include <linux/of_platform.h>
34 #include <linux/slab.h>
36 #include <asm/dcr-regs.h>
37 #include <asm/cacheflush.h>
38 #include <crypto/aes.h>
39 #include <crypto/sha.h>
40 #include "crypto4xx_reg_def.h"
41 #include "crypto4xx_core.h"
42 #include "crypto4xx_sa.h"
43 #include "crypto4xx_trng.h"
45 #define PPC4XX_SEC_VERSION_STR "0.5"
48 * PPC4xx Crypto Engine Initialization Routine
50 static void crypto4xx_hw_init(struct crypto4xx_device *dev)
52 union ce_ring_size ring_size;
53 union ce_ring_contol ring_ctrl;
54 union ce_part_ring_size part_ring_size;
55 union ce_io_threshold io_threshold;
57 union ce_pe_dma_cfg pe_dma_cfg;
60 writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
61 /* setup pe dma, include reset sg, pdr and pe, then release reset */
63 pe_dma_cfg.bf.bo_sgpd_en = 1;
64 pe_dma_cfg.bf.bo_data_en = 0;
65 pe_dma_cfg.bf.bo_sa_en = 1;
66 pe_dma_cfg.bf.bo_pd_en = 1;
67 pe_dma_cfg.bf.dynamic_sa_en = 1;
68 pe_dma_cfg.bf.reset_sg = 1;
69 pe_dma_cfg.bf.reset_pdr = 1;
70 pe_dma_cfg.bf.reset_pe = 1;
71 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
72 /* un reset pe,sg and pdr */
73 pe_dma_cfg.bf.pe_mode = 0;
74 pe_dma_cfg.bf.reset_sg = 0;
75 pe_dma_cfg.bf.reset_pdr = 0;
76 pe_dma_cfg.bf.reset_pe = 0;
77 pe_dma_cfg.bf.bo_td_en = 0;
78 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
79 writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
80 writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
81 writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
82 get_random_bytes(&rand_num, sizeof(rand_num));
83 writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
84 get_random_bytes(&rand_num, sizeof(rand_num));
85 writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
87 ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
88 ring_size.bf.ring_size = PPC4XX_NUM_PD;
89 writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
91 writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
92 device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
93 device_ctrl |= PPC4XX_DC_3DES_EN;
94 writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
95 writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
96 writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
98 part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
99 part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
100 writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
101 writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
103 io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
104 io_threshold.bf.input_threshold = PPC4XX_INPUT_THRESHOLD;
105 writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
106 writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
107 writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
108 writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
109 writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
110 writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
111 writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
112 writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
113 /* un reset pe,sg and pdr */
114 pe_dma_cfg.bf.pe_mode = 1;
115 pe_dma_cfg.bf.reset_sg = 0;
116 pe_dma_cfg.bf.reset_pdr = 0;
117 pe_dma_cfg.bf.reset_pe = 0;
118 pe_dma_cfg.bf.bo_td_en = 0;
119 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
120 /*clear all pending interrupt*/
121 writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
122 writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
123 writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
124 writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
125 writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
128 int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
130 ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
131 &ctx->sa_in_dma_addr, GFP_ATOMIC);
132 if (ctx->sa_in == NULL)
135 ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
136 &ctx->sa_out_dma_addr, GFP_ATOMIC);
137 if (ctx->sa_out == NULL) {
138 dma_free_coherent(ctx->dev->core_dev->device,
140 ctx->sa_in, ctx->sa_in_dma_addr);
144 memset(ctx->sa_in, 0, size * 4);
145 memset(ctx->sa_out, 0, size * 4);
151 void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
153 if (ctx->sa_in != NULL)
154 dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
155 ctx->sa_in, ctx->sa_in_dma_addr);
156 if (ctx->sa_out != NULL)
157 dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
158 ctx->sa_out, ctx->sa_out_dma_addr);
160 ctx->sa_in_dma_addr = 0;
161 ctx->sa_out_dma_addr = 0;
165 u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
167 ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
168 sizeof(struct sa_state_record),
169 &ctx->state_record_dma_addr, GFP_ATOMIC);
170 if (!ctx->state_record_dma_addr)
172 memset(ctx->state_record, 0, sizeof(struct sa_state_record));
177 void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
179 if (ctx->state_record != NULL)
180 dma_free_coherent(ctx->dev->core_dev->device,
181 sizeof(struct sa_state_record),
183 ctx->state_record_dma_addr);
184 ctx->state_record_dma_addr = 0;
188 * alloc memory for the gather ring
189 * no need to alloc buf for the ring
190 * gdr_tail, gdr_head and gdr_count are initialized by this function
192 static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
195 struct pd_uinfo *pd_uinfo;
196 dev->pdr = dma_alloc_coherent(dev->core_dev->device,
197 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
198 &dev->pdr_pa, GFP_ATOMIC);
202 dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD,
204 if (!dev->pdr_uinfo) {
205 dma_free_coherent(dev->core_dev->device,
206 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
211 memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
212 dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
214 &dev->shadow_sa_pool_pa,
216 if (!dev->shadow_sa_pool)
219 dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
220 sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
221 &dev->shadow_sr_pool_pa, GFP_ATOMIC);
222 if (!dev->shadow_sr_pool)
224 for (i = 0; i < PPC4XX_NUM_PD; i++) {
225 pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo +
226 sizeof(struct pd_uinfo) * i);
228 /* alloc 256 bytes which is enough for any kind of dynamic sa */
229 pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i;
230 pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
232 /* alloc state record */
233 pd_uinfo->sr_va = dev->shadow_sr_pool +
234 sizeof(struct sa_state_record) * i;
235 pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
236 sizeof(struct sa_state_record) * i;
242 static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
245 dma_free_coherent(dev->core_dev->device,
246 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
247 dev->pdr, dev->pdr_pa);
249 if (dev->shadow_sa_pool)
250 dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
251 dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
253 if (dev->shadow_sr_pool)
254 dma_free_coherent(dev->core_dev->device,
255 sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
256 dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
258 kfree(dev->pdr_uinfo);
261 static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
266 retval = dev->pdr_head;
267 tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
269 if (tmp == dev->pdr_tail)
270 return ERING_WAS_FULL;
277 static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
279 struct pd_uinfo *pd_uinfo;
282 pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
283 sizeof(struct pd_uinfo) * idx);
284 spin_lock_irqsave(&dev->core_dev->lock, flags);
285 if (dev->pdr_tail != PPC4XX_LAST_PD)
289 pd_uinfo->state = PD_ENTRY_FREE;
290 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
295 static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
296 dma_addr_t *pd_dma, u32 idx)
298 *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
300 return dev->pdr + sizeof(struct ce_pd) * idx;
304 * alloc memory for the gather ring
305 * no need to alloc buf for the ring
306 * gdr_tail, gdr_head and gdr_count are initialized by this function
308 static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
310 dev->gdr = dma_alloc_coherent(dev->core_dev->device,
311 sizeof(struct ce_gd) * PPC4XX_NUM_GD,
312 &dev->gdr_pa, GFP_ATOMIC);
316 memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);
321 static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
323 dma_free_coherent(dev->core_dev->device,
324 sizeof(struct ce_gd) * PPC4XX_NUM_GD,
325 dev->gdr, dev->gdr_pa);
329 * when this function is called.
330 * preemption or interrupt must be disabled
332 u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
336 if (n >= PPC4XX_NUM_GD)
337 return ERING_WAS_FULL;
339 retval = dev->gdr_head;
340 tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
341 if (dev->gdr_head > dev->gdr_tail) {
342 if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
343 return ERING_WAS_FULL;
344 } else if (dev->gdr_head < dev->gdr_tail) {
345 if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
346 return ERING_WAS_FULL;
353 static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
357 spin_lock_irqsave(&dev->core_dev->lock, flags);
358 if (dev->gdr_tail == dev->gdr_head) {
359 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
363 if (dev->gdr_tail != PPC4XX_LAST_GD)
368 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
373 static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
374 dma_addr_t *gd_dma, u32 idx)
376 *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
378 return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx);
382 * alloc memory for the scatter ring
383 * need to alloc buf for the ring
384 * sdr_tail, sdr_head and sdr_count are initialized by this function
386 static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
389 struct ce_sd *sd_array;
391 /* alloc memory for scatter descriptor ring */
392 dev->sdr = dma_alloc_coherent(dev->core_dev->device,
393 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
394 &dev->sdr_pa, GFP_ATOMIC);
398 dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE;
399 dev->scatter_buffer_va =
400 dma_alloc_coherent(dev->core_dev->device,
401 dev->scatter_buffer_size * PPC4XX_NUM_SD,
402 &dev->scatter_buffer_pa, GFP_ATOMIC);
403 if (!dev->scatter_buffer_va)
408 for (i = 0; i < PPC4XX_NUM_SD; i++) {
409 sd_array[i].ptr = dev->scatter_buffer_pa +
410 dev->scatter_buffer_size * i;
416 static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
419 dma_free_coherent(dev->core_dev->device,
420 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
421 dev->sdr, dev->sdr_pa);
423 if (dev->scatter_buffer_va)
424 dma_free_coherent(dev->core_dev->device,
425 dev->scatter_buffer_size * PPC4XX_NUM_SD,
426 dev->scatter_buffer_va,
427 dev->scatter_buffer_pa);
431 * when this function is called.
432 * preemption or interrupt must be disabled
434 static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
439 if (n >= PPC4XX_NUM_SD)
440 return ERING_WAS_FULL;
442 retval = dev->sdr_head;
443 tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
444 if (dev->sdr_head > dev->gdr_tail) {
445 if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
446 return ERING_WAS_FULL;
447 } else if (dev->sdr_head < dev->sdr_tail) {
448 if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
449 return ERING_WAS_FULL;
450 } /* the head = tail, or empty case is already take cared */
456 static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
460 spin_lock_irqsave(&dev->core_dev->lock, flags);
461 if (dev->sdr_tail == dev->sdr_head) {
462 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
465 if (dev->sdr_tail != PPC4XX_LAST_SD)
469 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
474 static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
475 dma_addr_t *sd_dma, u32 idx)
477 *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
479 return (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
482 static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
483 dma_addr_t *addr, u32 *length,
484 u32 *idx, u32 *offset, u32 *nbytes)
488 if (*length > dev->scatter_buffer_size) {
489 memcpy(phys_to_virt(*addr),
490 dev->scatter_buffer_va +
491 *idx * dev->scatter_buffer_size + *offset,
492 dev->scatter_buffer_size);
494 *length -= dev->scatter_buffer_size;
495 *nbytes -= dev->scatter_buffer_size;
496 if (*idx == PPC4XX_LAST_SD)
500 *addr = *addr + dev->scatter_buffer_size;
502 } else if (*length < dev->scatter_buffer_size) {
503 memcpy(phys_to_virt(*addr),
504 dev->scatter_buffer_va +
505 *idx * dev->scatter_buffer_size + *offset, *length);
506 if ((*offset + *length) == dev->scatter_buffer_size) {
507 if (*idx == PPC4XX_LAST_SD)
520 len = (*nbytes <= dev->scatter_buffer_size) ?
521 (*nbytes) : dev->scatter_buffer_size;
522 memcpy(phys_to_virt(*addr),
523 dev->scatter_buffer_va +
524 *idx * dev->scatter_buffer_size + *offset,
529 if (*idx == PPC4XX_LAST_SD)
538 static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
540 struct pd_uinfo *pd_uinfo,
542 struct scatterlist *dst)
550 struct scatterlist *sg;
552 this_sd = pd_uinfo->first_sd;
559 addr = dma_map_page(dev->core_dev->device, sg_page(sg),
560 sg->offset, sg->length, DMA_TO_DEVICE);
563 len = (nbytes <= sg->length) ? nbytes : sg->length;
564 while (crypto4xx_fill_one_page(dev, &addr, &len,
565 &this_sd, &offset, &nbytes))
571 len = (nbytes <= (dev->scatter_buffer_size - offset)) ?
572 nbytes : (dev->scatter_buffer_size - offset);
573 len = (sg->length < len) ? sg->length : len;
574 while (crypto4xx_fill_one_page(dev, &addr, &len,
575 &this_sd, &offset, &nbytes))
582 while (crypto4xx_fill_one_page(dev, &addr,
583 &sg_len, &this_sd, &offset, &nbytes))
591 static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
592 struct crypto4xx_ctx *ctx)
594 struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
595 struct sa_state_record *state_record =
596 (struct sa_state_record *) pd_uinfo->sr_va;
598 if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
599 memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
600 SA_HASH_ALG_SHA1_DIGEST_SIZE);
606 static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
607 struct pd_uinfo *pd_uinfo)
610 if (pd_uinfo->num_gd) {
611 for (i = 0; i < pd_uinfo->num_gd; i++)
612 crypto4xx_put_gd_to_gdr(dev);
613 pd_uinfo->first_gd = 0xffffffff;
614 pd_uinfo->num_gd = 0;
616 if (pd_uinfo->num_sd) {
617 for (i = 0; i < pd_uinfo->num_sd; i++)
618 crypto4xx_put_sd_to_sdr(dev);
620 pd_uinfo->first_sd = 0xffffffff;
621 pd_uinfo->num_sd = 0;
625 static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
626 struct pd_uinfo *pd_uinfo,
629 struct crypto4xx_ctx *ctx;
630 struct ablkcipher_request *ablk_req;
631 struct scatterlist *dst;
634 ablk_req = ablkcipher_request_cast(pd_uinfo->async_req);
635 ctx = crypto_tfm_ctx(ablk_req->base.tfm);
637 if (pd_uinfo->using_sd) {
638 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ablk_req->nbytes,
641 dst = pd_uinfo->dest_va;
642 addr = dma_map_page(dev->core_dev->device, sg_page(dst),
643 dst->offset, dst->length, DMA_FROM_DEVICE);
646 if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
647 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
649 crypto4xx_memcpy_from_le32((u32 *)req->iv,
650 pd_uinfo->sr_va->save_iv,
651 crypto_skcipher_ivsize(skcipher));
654 crypto4xx_ret_sg_desc(dev, pd_uinfo);
655 if (ablk_req->base.complete != NULL)
656 ablk_req->base.complete(&ablk_req->base, 0);
661 static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
662 struct pd_uinfo *pd_uinfo)
664 struct crypto4xx_ctx *ctx;
665 struct ahash_request *ahash_req;
667 ahash_req = ahash_request_cast(pd_uinfo->async_req);
668 ctx = crypto_tfm_ctx(ahash_req->base.tfm);
670 crypto4xx_copy_digest_to_dst(pd_uinfo,
671 crypto_tfm_ctx(ahash_req->base.tfm));
672 crypto4xx_ret_sg_desc(dev, pd_uinfo);
673 /* call user provided callback function x */
674 if (ahash_req->base.complete != NULL)
675 ahash_req->base.complete(&ahash_req->base, 0);
680 static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
683 struct pd_uinfo *pd_uinfo;
685 pd = dev->pdr + sizeof(struct ce_pd)*idx;
686 pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
687 if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
688 CRYPTO_ALG_TYPE_ABLKCIPHER)
689 return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
691 return crypto4xx_ahash_done(dev, pd_uinfo);
695 * Note: Only use this function to copy items that is word aligned.
697 void crypto4xx_memcpy_le(unsigned int *dst,
698 const unsigned char *buf,
702 for (; len >= 4; buf += 4, len -= 4)
703 *dst++ = cpu_to_le32(*(unsigned int *) buf);
730 static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
732 crypto4xx_destroy_pdr(core_dev->dev);
733 crypto4xx_destroy_gdr(core_dev->dev);
734 crypto4xx_destroy_sdr(core_dev->dev);
735 iounmap(core_dev->dev->ce_base);
736 kfree(core_dev->dev);
740 void crypto4xx_return_pd(struct crypto4xx_device *dev,
741 u32 pd_entry, struct ce_pd *pd,
742 struct pd_uinfo *pd_uinfo)
744 /* irq should be already disabled */
745 dev->pdr_head = pd_entry;
747 pd->pd_ctl_len.w = 0;
748 pd_uinfo->state = PD_ENTRY_FREE;
751 static u32 get_next_gd(u32 current)
753 if (current != PPC4XX_LAST_GD)
759 static u32 get_next_sd(u32 current)
761 if (current != PPC4XX_LAST_SD)
767 u32 crypto4xx_build_pd(struct crypto_async_request *req,
768 struct crypto4xx_ctx *ctx,
769 struct scatterlist *src,
770 struct scatterlist *dst,
771 unsigned int datalen,
772 void *iv, u32 iv_len)
774 struct crypto4xx_device *dev = ctx->dev;
775 dma_addr_t addr, pd_dma, sd_dma, gd_dma;
776 struct dynamic_sa_ctl *sa;
777 struct scatterlist *sg;
781 u32 fst_gd = 0xffffffff;
782 u32 fst_sd = 0xffffffff;
785 struct pd_uinfo *pd_uinfo = NULL;
786 unsigned int nbytes = datalen, idx;
787 unsigned int ivlen = 0;
790 /* figure how many gd is needed */
791 num_gd = sg_nents_for_len(src, datalen);
792 if ((int)num_gd < 0) {
793 dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
799 /* figure how many sd is needed */
800 if (sg_is_last(dst) || ctx->is_hash) {
803 if (datalen > PPC4XX_SD_BUFFER_SIZE) {
804 num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
805 if (datalen % PPC4XX_SD_BUFFER_SIZE)
813 * The follow section of code needs to be protected
814 * The gather ring and scatter ring needs to be consecutive
815 * In case of run out of any kind of descriptor, the descriptor
816 * already got must be return the original place.
818 spin_lock_irqsave(&dev->core_dev->lock, flags);
820 fst_gd = crypto4xx_get_n_gd(dev, num_gd);
821 if (fst_gd == ERING_WAS_FULL) {
822 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
827 fst_sd = crypto4xx_get_n_sd(dev, num_sd);
828 if (fst_sd == ERING_WAS_FULL) {
830 dev->gdr_head = fst_gd;
831 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
835 pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
836 if (pd_entry == ERING_WAS_FULL) {
838 dev->gdr_head = fst_gd;
840 dev->sdr_head = fst_sd;
841 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
844 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
846 pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
847 sizeof(struct pd_uinfo) * pd_entry);
848 pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
849 pd_uinfo->async_req = req;
850 pd_uinfo->num_gd = num_gd;
851 pd_uinfo->num_sd = num_sd;
853 if (iv_len || ctx->is_hash) {
855 pd->sa = pd_uinfo->sa_pa;
856 sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
857 if (ctx->direction == DIR_INBOUND)
858 memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
860 memcpy(sa, ctx->sa_out, ctx->sa_len * 4);
862 memcpy((void *) sa + ctx->offset_to_sr_ptr,
863 &pd_uinfo->sr_pa, 4);
866 crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
868 if (ctx->direction == DIR_INBOUND) {
869 pd->sa = ctx->sa_in_dma_addr;
870 sa = (struct dynamic_sa_ctl *) ctx->sa_in;
872 pd->sa = ctx->sa_out_dma_addr;
873 sa = (struct dynamic_sa_ctl *) ctx->sa_out;
876 pd->sa_len = ctx->sa_len;
878 /* get first gd we are going to use */
880 pd_uinfo->first_gd = fst_gd;
881 pd_uinfo->num_gd = num_gd;
882 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
885 sa->sa_command_0.bf.gather = 1;
888 /* walk the sg, and setup gather array */
891 addr = dma_map_page(dev->core_dev->device, sg_page(sg),
892 sg->offset, sg->length, DMA_TO_DEVICE);
894 gd->ctl_len.len = sg->length;
895 gd->ctl_len.done = 0;
896 gd->ctl_len.ready = 1;
897 if (sg->length >= nbytes)
899 nbytes -= sg->length;
900 gd_idx = get_next_gd(gd_idx);
901 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
905 pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
906 src->offset, src->length, DMA_TO_DEVICE);
908 * Disable gather in sa command
910 sa->sa_command_0.bf.gather = 0;
912 * Indicate gather array is not used
914 pd_uinfo->first_gd = 0xffffffff;
915 pd_uinfo->num_gd = 0;
917 if (ctx->is_hash || sg_is_last(dst)) {
919 * we know application give us dst a whole piece of memory
920 * no need to use scatter ring.
921 * In case of is_hash, the icv is always at end of src data.
923 pd_uinfo->using_sd = 0;
924 pd_uinfo->first_sd = 0xffffffff;
925 pd_uinfo->num_sd = 0;
926 pd_uinfo->dest_va = dst;
927 sa->sa_command_0.bf.scatter = 0;
929 pd->dest = virt_to_phys((void *)dst);
931 pd->dest = (u32)dma_map_page(dev->core_dev->device,
932 sg_page(dst), dst->offset,
933 dst->length, DMA_TO_DEVICE);
935 struct ce_sd *sd = NULL;
938 sa->sa_command_0.bf.scatter = 1;
939 pd_uinfo->using_sd = 1;
940 pd_uinfo->dest_va = dst;
941 pd_uinfo->first_sd = fst_sd;
942 pd_uinfo->num_sd = num_sd;
943 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
945 /* setup scatter descriptor */
948 /* sd->ptr should be setup by sd_init routine*/
950 if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
951 nbytes -= PPC4XX_SD_BUFFER_SIZE;
955 sd_idx = get_next_sd(sd_idx);
956 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
957 /* setup scatter descriptor */
960 if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
961 nbytes -= PPC4XX_SD_BUFFER_SIZE;
964 * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
965 * which is more than nbytes, so done.
971 sa->sa_command_1.bf.hash_crypto_offset = 0;
972 pd->pd_ctl.w = ctx->pd_ctl;
973 pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
974 pd_uinfo->state = PD_ENTRY_INUSE;
976 /* write any value to push engine to read a pd */
977 writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
982 * Algorithm Registration Functions
984 static int crypto4xx_alg_init(struct crypto_tfm *tfm)
986 struct crypto_alg *alg = tfm->__crt_alg;
987 struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg);
988 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
990 ctx->dev = amcc_alg->dev;
993 ctx->sa_in_dma_addr = 0;
994 ctx->sa_out_dma_addr = 0;
997 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
999 tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
1001 case CRYPTO_ALG_TYPE_AHASH:
1002 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1003 sizeof(struct crypto4xx_ctx));
1010 static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
1012 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
1014 crypto4xx_free_sa(ctx);
1015 crypto4xx_free_state_record(ctx);
1018 int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1019 struct crypto4xx_alg_common *crypto_alg,
1022 struct crypto4xx_alg *alg;
1026 for (i = 0; i < array_size; i++) {
1027 alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
1031 alg->alg = crypto_alg[i];
1034 switch (alg->alg.type) {
1035 case CRYPTO_ALG_TYPE_AHASH:
1036 rc = crypto_register_ahash(&alg->alg.u.hash);
1040 rc = crypto_register_alg(&alg->alg.u.cipher);
1047 list_add_tail(&alg->entry, &sec_dev->alg_list);
1053 static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1055 struct crypto4xx_alg *alg, *tmp;
1057 list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1058 list_del(&alg->entry);
1059 switch (alg->alg.type) {
1060 case CRYPTO_ALG_TYPE_AHASH:
1061 crypto_unregister_ahash(&alg->alg.u.hash);
1065 crypto_unregister_alg(&alg->alg.u.cipher);
1071 static void crypto4xx_bh_tasklet_cb(unsigned long data)
1073 struct device *dev = (struct device *)data;
1074 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1075 struct pd_uinfo *pd_uinfo;
1079 while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
1080 tail = core_dev->dev->pdr_tail;
1081 pd_uinfo = core_dev->dev->pdr_uinfo +
1082 sizeof(struct pd_uinfo)*tail;
1083 pd = core_dev->dev->pdr + sizeof(struct ce_pd) * tail;
1084 if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
1085 pd->pd_ctl.bf.pe_done &&
1086 !pd->pd_ctl.bf.host_ready) {
1087 pd->pd_ctl.bf.pe_done = 0;
1088 crypto4xx_pd_done(core_dev->dev, tail);
1089 crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
1090 pd_uinfo->state = PD_ENTRY_FREE;
1092 /* if tail not done, break */
1101 static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1103 struct device *dev = (struct device *)data;
1104 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1106 if (!core_dev->dev->ce_base)
1109 writel(PPC4XX_INTERRUPT_CLR,
1110 core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
1111 tasklet_schedule(&core_dev->tasklet);
1117 * Supported Crypto Algorithms
1119 struct crypto4xx_alg_common crypto4xx_alg[] = {
1120 /* Crypto AES modes */
1121 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
1122 .cra_name = "cbc(aes)",
1123 .cra_driver_name = "cbc-aes-ppc4xx",
1124 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1125 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1126 .cra_blocksize = AES_BLOCK_SIZE,
1127 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1128 .cra_type = &crypto_ablkcipher_type,
1129 .cra_init = crypto4xx_alg_init,
1130 .cra_exit = crypto4xx_alg_exit,
1131 .cra_module = THIS_MODULE,
1134 .min_keysize = AES_MIN_KEY_SIZE,
1135 .max_keysize = AES_MAX_KEY_SIZE,
1136 .ivsize = AES_IV_SIZE,
1137 .setkey = crypto4xx_setkey_aes_cbc,
1138 .encrypt = crypto4xx_encrypt,
1139 .decrypt = crypto4xx_decrypt,
1146 * Module Initialization Routine
1148 static int crypto4xx_probe(struct platform_device *ofdev)
1151 struct resource res;
1152 struct device *dev = &ofdev->dev;
1153 struct crypto4xx_core_device *core_dev;
1155 rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
1159 if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
1160 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1161 mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
1162 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1163 mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
1164 } else if (of_find_compatible_node(NULL, NULL,
1165 "amcc,ppc405ex-crypto")) {
1166 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1167 mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
1168 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1169 mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
1170 } else if (of_find_compatible_node(NULL, NULL,
1171 "amcc,ppc460sx-crypto")) {
1172 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1173 mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
1174 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1175 mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
1177 printk(KERN_ERR "Crypto Function Not supported!\n");
1181 core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
1185 dev_set_drvdata(dev, core_dev);
1186 core_dev->ofdev = ofdev;
1187 core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
1191 core_dev->dev->core_dev = core_dev;
1192 core_dev->device = dev;
1193 spin_lock_init(&core_dev->lock);
1194 INIT_LIST_HEAD(&core_dev->dev->alg_list);
1195 rc = crypto4xx_build_pdr(core_dev->dev);
1199 rc = crypto4xx_build_gdr(core_dev->dev);
1203 rc = crypto4xx_build_sdr(core_dev->dev);
1207 /* Init tasklet for bottom half processing */
1208 tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
1209 (unsigned long) dev);
1211 /* Register for Crypto isr, Crypto Engine IRQ */
1212 core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1213 rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
1214 core_dev->dev->name, dev);
1216 goto err_request_irq;
1218 core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
1219 if (!core_dev->dev->ce_base) {
1220 dev_err(dev, "failed to of_iomap\n");
1225 /* need to setup pdr, rdr, gdr and sdr before this */
1226 crypto4xx_hw_init(core_dev->dev);
1228 /* Register security algorithms with Linux CryptoAPI */
1229 rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
1230 ARRAY_SIZE(crypto4xx_alg));
1234 ppc4xx_trng_probe(core_dev);
1238 iounmap(core_dev->dev->ce_base);
1240 free_irq(core_dev->irq, dev);
1242 irq_dispose_mapping(core_dev->irq);
1243 tasklet_kill(&core_dev->tasklet);
1245 crypto4xx_destroy_sdr(core_dev->dev);
1246 crypto4xx_destroy_gdr(core_dev->dev);
1248 crypto4xx_destroy_pdr(core_dev->dev);
1249 kfree(core_dev->dev);
1256 static int crypto4xx_remove(struct platform_device *ofdev)
1258 struct device *dev = &ofdev->dev;
1259 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1261 ppc4xx_trng_remove(core_dev);
1263 free_irq(core_dev->irq, dev);
1264 irq_dispose_mapping(core_dev->irq);
1266 tasklet_kill(&core_dev->tasklet);
1267 /* Un-register with Linux CryptoAPI */
1268 crypto4xx_unregister_alg(core_dev->dev);
1269 /* Free all allocated memory */
1270 crypto4xx_stop_all(core_dev);
1275 static const struct of_device_id crypto4xx_match[] = {
1276 { .compatible = "amcc,ppc4xx-crypto",},
1279 MODULE_DEVICE_TABLE(of, crypto4xx_match);
1281 static struct platform_driver crypto4xx_driver = {
1283 .name = MODULE_NAME,
1284 .of_match_table = crypto4xx_match,
1286 .probe = crypto4xx_probe,
1287 .remove = crypto4xx_remove,
1290 module_platform_driver(crypto4xx_driver);
1292 MODULE_LICENSE("GPL");
1293 MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
1294 MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");