1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel IXP4xx NPE-C crypto driver
5 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
8 #include <linux/platform_device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dmapool.h>
11 #include <linux/crypto.h>
12 #include <linux/kernel.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/interrupt.h>
15 #include <linux/spinlock.h>
16 #include <linux/gfp.h>
17 #include <linux/module.h>
19 #include <crypto/ctr.h>
20 #include <crypto/internal/des.h>
21 #include <crypto/aes.h>
22 #include <crypto/hmac.h>
23 #include <crypto/sha.h>
24 #include <crypto/algapi.h>
25 #include <crypto/internal/aead.h>
26 #include <crypto/internal/skcipher.h>
27 #include <crypto/authenc.h>
28 #include <crypto/scatterwalk.h>
30 #include <linux/soc/ixp4xx/npe.h>
31 #include <linux/soc/ixp4xx/qmgr.h>
35 /* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
36 #define NPE_CTX_LEN 80
37 #define AES_BLOCK128 16
39 #define NPE_OP_HASH_VERIFY 0x01
40 #define NPE_OP_CCM_ENABLE 0x04
41 #define NPE_OP_CRYPT_ENABLE 0x08
42 #define NPE_OP_HASH_ENABLE 0x10
43 #define NPE_OP_NOT_IN_PLACE 0x20
44 #define NPE_OP_HMAC_DISABLE 0x40
45 #define NPE_OP_CRYPT_ENCRYPT 0x80
47 #define NPE_OP_CCM_GEN_MIC 0xcc
48 #define NPE_OP_HASH_GEN_ICV 0x50
49 #define NPE_OP_ENC_GEN_KEY 0xc9
51 #define MOD_ECB 0x0000
52 #define MOD_CTR 0x1000
53 #define MOD_CBC_ENC 0x2000
54 #define MOD_CBC_DEC 0x3000
55 #define MOD_CCM_ENC 0x4000
56 #define MOD_CCM_DEC 0x5000
62 #define CIPH_DECR 0x0000
63 #define CIPH_ENCR 0x0400
65 #define MOD_DES 0x0000
66 #define MOD_TDEA2 0x0100
67 #define MOD_3DES 0x0200
68 #define MOD_AES 0x0800
69 #define MOD_AES128 (0x0800 | KEYLEN_128)
70 #define MOD_AES192 (0x0900 | KEYLEN_192)
71 #define MOD_AES256 (0x0a00 | KEYLEN_256)
74 #define NPE_ID 2 /* NPE C */
76 /* Space for registering when the first
77 * NPE_QLEN crypt_ctl are busy */
78 #define NPE_QLEN_TOTAL 64
83 #define CTL_FLAG_UNUSED 0x0000
84 #define CTL_FLAG_USED 0x1000
85 #define CTL_FLAG_PERFORM_ABLK 0x0001
86 #define CTL_FLAG_GEN_ICV 0x0002
87 #define CTL_FLAG_GEN_REVAES 0x0004
88 #define CTL_FLAG_PERFORM_AEAD 0x0008
89 #define CTL_FLAG_MASK 0x000f
91 #define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
93 #define MD5_DIGEST_SIZE 16
104 dma_addr_t phys_addr;
106 struct buffer_desc *next;
107 enum dma_data_direction dir;
112 u8 mode; /* NPE_OP_* operation mode */
118 u8 mode; /* NPE_OP_* operation mode */
120 u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
121 dma_addr_t icv_rev_aes; /* icv or rev aes */
125 u16 auth_offs; /* Authentication start offset */
126 u16 auth_len; /* Authentication data length */
127 u16 crypt_offs; /* Cryption start offset */
128 u16 crypt_len; /* Cryption data length */
130 u16 auth_len; /* Authentication data length */
131 u16 auth_offs; /* Authentication start offset */
132 u16 crypt_len; /* Cryption data length */
133 u16 crypt_offs; /* Cryption start offset */
135 u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
136 u32 crypto_ctx; /* NPE Crypto Param structure address */
138 /* Used by Host: 4*4 bytes*/
141 struct skcipher_request *ablk_req;
142 struct aead_request *aead_req;
143 struct crypto_tfm *tfm;
145 struct buffer_desc *regist_buf;
150 struct buffer_desc *src;
151 struct buffer_desc *dst;
157 struct buffer_desc *src;
158 struct buffer_desc *dst;
159 struct scatterlist ivlist;
160 /* used when the hmac is not on one sg entry */
165 struct ix_hash_algo {
171 unsigned char *npe_ctx;
172 dma_addr_t npe_ctx_phys;
178 struct ix_sa_dir encrypt;
179 struct ix_sa_dir decrypt;
181 u8 authkey[MAX_KEYLEN];
183 u8 enckey[MAX_KEYLEN];
185 u8 nonce[CTR_RFC3686_NONCE_SIZE];
187 atomic_t configuring;
188 struct completion completion;
192 struct skcipher_alg crypto;
193 const struct ix_hash_algo *hash;
200 struct ixp_aead_alg {
201 struct aead_alg crypto;
202 const struct ix_hash_algo *hash;
209 static const struct ix_hash_algo hash_alg_md5 = {
210 .cfgword = 0xAA010004,
211 .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
212 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
214 static const struct ix_hash_algo hash_alg_sha1 = {
215 .cfgword = 0x00000005,
216 .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
217 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
220 static struct npe *npe_c;
221 static struct dma_pool *buffer_pool = NULL;
222 static struct dma_pool *ctx_pool = NULL;
224 static struct crypt_ctl *crypt_virt = NULL;
225 static dma_addr_t crypt_phys;
227 static int support_aes = 1;
229 #define DRIVER_NAME "ixp4xx_crypto"
231 static struct platform_device *pdev;
233 static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
235 return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
238 static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
240 return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
243 static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
245 return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_enc;
248 static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
250 return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_dec;
253 static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
255 return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
258 static int setup_crypt_desc(void)
260 struct device *dev = &pdev->dev;
261 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
262 crypt_virt = dma_alloc_coherent(dev,
263 NPE_QLEN * sizeof(struct crypt_ctl),
264 &crypt_phys, GFP_ATOMIC);
270 static spinlock_t desc_lock;
271 static struct crypt_ctl *get_crypt_desc(void)
277 spin_lock_irqsave(&desc_lock, flags);
279 if (unlikely(!crypt_virt))
281 if (unlikely(!crypt_virt)) {
282 spin_unlock_irqrestore(&desc_lock, flags);
286 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
287 if (++idx >= NPE_QLEN)
289 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
290 spin_unlock_irqrestore(&desc_lock, flags);
291 return crypt_virt +i;
293 spin_unlock_irqrestore(&desc_lock, flags);
298 static spinlock_t emerg_lock;
299 static struct crypt_ctl *get_crypt_desc_emerg(void)
302 static int idx = NPE_QLEN;
303 struct crypt_ctl *desc;
306 desc = get_crypt_desc();
309 if (unlikely(!crypt_virt))
312 spin_lock_irqsave(&emerg_lock, flags);
314 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
315 if (++idx >= NPE_QLEN_TOTAL)
317 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
318 spin_unlock_irqrestore(&emerg_lock, flags);
319 return crypt_virt +i;
321 spin_unlock_irqrestore(&emerg_lock, flags);
326 static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
330 struct buffer_desc *buf1;
334 phys1 = buf->phys_next;
335 dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
336 dma_pool_free(buffer_pool, buf, phys);
342 static struct tasklet_struct crypto_done_tasklet;
344 static void finish_scattered_hmac(struct crypt_ctl *crypt)
346 struct aead_request *req = crypt->data.aead_req;
347 struct aead_ctx *req_ctx = aead_request_ctx(req);
348 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
349 int authsize = crypto_aead_authsize(tfm);
350 int decryptlen = req->assoclen + req->cryptlen - authsize;
352 if (req_ctx->encrypt) {
353 scatterwalk_map_and_copy(req_ctx->hmac_virt,
354 req->dst, decryptlen, authsize, 1);
356 dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
359 static void one_packet(dma_addr_t phys)
361 struct device *dev = &pdev->dev;
362 struct crypt_ctl *crypt;
366 failed = phys & 0x1 ? -EBADMSG : 0;
368 crypt = crypt_phys2virt(phys);
370 switch (crypt->ctl_flags & CTL_FLAG_MASK) {
371 case CTL_FLAG_PERFORM_AEAD: {
372 struct aead_request *req = crypt->data.aead_req;
373 struct aead_ctx *req_ctx = aead_request_ctx(req);
375 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
376 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
377 if (req_ctx->hmac_virt) {
378 finish_scattered_hmac(crypt);
380 req->base.complete(&req->base, failed);
383 case CTL_FLAG_PERFORM_ABLK: {
384 struct skcipher_request *req = crypt->data.ablk_req;
385 struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
386 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
387 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
391 offset = req->cryptlen - ivsize;
392 if (req_ctx->encrypt) {
393 scatterwalk_map_and_copy(req->iv, req->dst,
396 memcpy(req->iv, req_ctx->iv, ivsize);
397 memzero_explicit(req_ctx->iv, ivsize);
402 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
404 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
405 req->base.complete(&req->base, failed);
408 case CTL_FLAG_GEN_ICV:
409 ctx = crypto_tfm_ctx(crypt->data.tfm);
410 dma_pool_free(ctx_pool, crypt->regist_ptr,
411 crypt->regist_buf->phys_addr);
412 dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
413 if (atomic_dec_and_test(&ctx->configuring))
414 complete(&ctx->completion);
416 case CTL_FLAG_GEN_REVAES:
417 ctx = crypto_tfm_ctx(crypt->data.tfm);
418 *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
419 if (atomic_dec_and_test(&ctx->configuring))
420 complete(&ctx->completion);
425 crypt->ctl_flags = CTL_FLAG_UNUSED;
428 static void irqhandler(void *_unused)
430 tasklet_schedule(&crypto_done_tasklet);
433 static void crypto_done_action(unsigned long arg)
438 dma_addr_t phys = qmgr_get_entry(RECV_QID);
443 tasklet_schedule(&crypto_done_tasklet);
446 static int init_ixp_crypto(struct device *dev)
449 u32 msg[2] = { 0, 0 };
451 if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
452 IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
453 printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
456 npe_c = npe_request(NPE_ID);
460 if (!npe_running(npe_c)) {
461 ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
464 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
467 if (npe_send_message(npe_c, msg, "STATUS_MSG"))
470 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
474 switch ((msg[1]>>16) & 0xff) {
476 printk(KERN_WARNING "Firmware of %s lacks AES support\n",
485 printk(KERN_ERR "Firmware of %s lacks crypto support\n",
490 /* buffer_pool will also be used to sometimes store the hmac,
491 * so assure it is large enough
493 BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
494 buffer_pool = dma_pool_create("buffer", dev,
495 sizeof(struct buffer_desc), 32, 0);
500 ctx_pool = dma_pool_create("context", dev,
505 ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
506 "ixp_crypto:out", NULL);
509 ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
510 "ixp_crypto:in", NULL);
512 qmgr_release_queue(SEND_QID);
515 qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
516 tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
518 qmgr_enable_irq(RECV_QID);
522 printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
525 dma_pool_destroy(ctx_pool);
526 dma_pool_destroy(buffer_pool);
532 static void release_ixp_crypto(struct device *dev)
534 qmgr_disable_irq(RECV_QID);
535 tasklet_kill(&crypto_done_tasklet);
537 qmgr_release_queue(SEND_QID);
538 qmgr_release_queue(RECV_QID);
540 dma_pool_destroy(ctx_pool);
541 dma_pool_destroy(buffer_pool);
546 dma_free_coherent(dev,
547 NPE_QLEN * sizeof(struct crypt_ctl),
548 crypt_virt, crypt_phys);
552 static void reset_sa_dir(struct ix_sa_dir *dir)
554 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
555 dir->npe_ctx_idx = 0;
559 static int init_sa_dir(struct ix_sa_dir *dir)
561 dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
569 static void free_sa_dir(struct ix_sa_dir *dir)
571 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
572 dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
575 static int init_tfm(struct crypto_tfm *tfm)
577 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
580 atomic_set(&ctx->configuring, 0);
581 ret = init_sa_dir(&ctx->encrypt);
584 ret = init_sa_dir(&ctx->decrypt);
586 free_sa_dir(&ctx->encrypt);
591 static int init_tfm_ablk(struct crypto_skcipher *tfm)
593 crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx));
594 return init_tfm(crypto_skcipher_tfm(tfm));
597 static int init_tfm_aead(struct crypto_aead *tfm)
599 crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
600 return init_tfm(crypto_aead_tfm(tfm));
603 static void exit_tfm(struct crypto_tfm *tfm)
605 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
606 free_sa_dir(&ctx->encrypt);
607 free_sa_dir(&ctx->decrypt);
610 static void exit_tfm_ablk(struct crypto_skcipher *tfm)
612 exit_tfm(crypto_skcipher_tfm(tfm));
615 static void exit_tfm_aead(struct crypto_aead *tfm)
617 exit_tfm(crypto_aead_tfm(tfm));
620 static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
621 int init_len, u32 ctx_addr, const u8 *key, int key_len)
623 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
624 struct crypt_ctl *crypt;
625 struct buffer_desc *buf;
628 dma_addr_t pad_phys, buf_phys;
630 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
631 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
634 buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
636 dma_pool_free(ctx_pool, pad, pad_phys);
639 crypt = get_crypt_desc_emerg();
641 dma_pool_free(ctx_pool, pad, pad_phys);
642 dma_pool_free(buffer_pool, buf, buf_phys);
646 memcpy(pad, key, key_len);
647 memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
648 for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
652 crypt->data.tfm = tfm;
653 crypt->regist_ptr = pad;
654 crypt->regist_buf = buf;
656 crypt->auth_offs = 0;
657 crypt->auth_len = HMAC_PAD_BLOCKLEN;
658 crypt->crypto_ctx = ctx_addr;
659 crypt->src_buf = buf_phys;
660 crypt->icv_rev_aes = target;
661 crypt->mode = NPE_OP_HASH_GEN_ICV;
662 crypt->init_len = init_len;
663 crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
666 buf->buf_len = HMAC_PAD_BLOCKLEN;
668 buf->phys_addr = pad_phys;
670 atomic_inc(&ctx->configuring);
671 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
672 BUG_ON(qmgr_stat_overflow(SEND_QID));
676 static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
677 const u8 *key, int key_len, unsigned digest_len)
679 u32 itarget, otarget, npe_ctx_addr;
680 unsigned char *cinfo;
681 int init_len, ret = 0;
683 struct ix_sa_dir *dir;
684 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
685 const struct ix_hash_algo *algo;
687 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
688 cinfo = dir->npe_ctx + dir->npe_ctx_idx;
691 /* write cfg word to cryptinfo */
692 cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
694 cfgword ^= 0xAA000000; /* change the "byte swap" flags */
696 *(u32*)cinfo = cpu_to_be32(cfgword);
697 cinfo += sizeof(cfgword);
699 /* write ICV to cryptinfo */
700 memcpy(cinfo, algo->icv, digest_len);
703 itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
704 + sizeof(algo->cfgword);
705 otarget = itarget + digest_len;
706 init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
707 npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
709 dir->npe_ctx_idx += init_len;
710 dir->npe_mode |= NPE_OP_HASH_ENABLE;
713 dir->npe_mode |= NPE_OP_HASH_VERIFY;
715 ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
716 init_len, npe_ctx_addr, key, key_len);
719 return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
720 init_len, npe_ctx_addr, key, key_len);
723 static int gen_rev_aes_key(struct crypto_tfm *tfm)
725 struct crypt_ctl *crypt;
726 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
727 struct ix_sa_dir *dir = &ctx->decrypt;
729 crypt = get_crypt_desc_emerg();
733 *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
735 crypt->data.tfm = tfm;
736 crypt->crypt_offs = 0;
737 crypt->crypt_len = AES_BLOCK128;
739 crypt->crypto_ctx = dir->npe_ctx_phys;
740 crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
741 crypt->mode = NPE_OP_ENC_GEN_KEY;
742 crypt->init_len = dir->npe_ctx_idx;
743 crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
745 atomic_inc(&ctx->configuring);
746 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
747 BUG_ON(qmgr_stat_overflow(SEND_QID));
751 static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
752 const u8 *key, int key_len)
757 struct ix_sa_dir *dir;
758 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
761 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
762 cinfo = dir->npe_ctx;
765 cipher_cfg = cipher_cfg_enc(tfm);
766 dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
768 cipher_cfg = cipher_cfg_dec(tfm);
770 if (cipher_cfg & MOD_AES) {
772 case 16: keylen_cfg = MOD_AES128; break;
773 case 24: keylen_cfg = MOD_AES192; break;
774 case 32: keylen_cfg = MOD_AES256; break;
778 cipher_cfg |= keylen_cfg;
780 err = crypto_des_verify_key(tfm, key);
784 /* write cfg word to cryptinfo */
785 *(u32*)cinfo = cpu_to_be32(cipher_cfg);
786 cinfo += sizeof(cipher_cfg);
788 /* write cipher key to cryptinfo */
789 memcpy(cinfo, key, key_len);
790 /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
791 if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
792 memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
793 key_len = DES3_EDE_KEY_SIZE;
795 dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
796 dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
797 if ((cipher_cfg & MOD_AES) && !encrypt) {
798 return gen_rev_aes_key(tfm);
803 static struct buffer_desc *chainup_buffers(struct device *dev,
804 struct scatterlist *sg, unsigned nbytes,
805 struct buffer_desc *buf, gfp_t flags,
806 enum dma_data_direction dir)
808 for (; nbytes > 0; sg = sg_next(sg)) {
809 unsigned len = min(nbytes, sg->length);
810 struct buffer_desc *next_buf;
811 dma_addr_t next_buf_phys;
816 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
821 sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
822 buf->next = next_buf;
823 buf->phys_next = next_buf_phys;
826 buf->phys_addr = sg_dma_address(sg);
835 static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
836 unsigned int key_len)
838 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
841 init_completion(&ctx->completion);
842 atomic_inc(&ctx->configuring);
844 reset_sa_dir(&ctx->encrypt);
845 reset_sa_dir(&ctx->decrypt);
847 ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
848 ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
850 ret = setup_cipher(&tfm->base, 0, key, key_len);
853 ret = setup_cipher(&tfm->base, 1, key, key_len);
855 if (!atomic_dec_and_test(&ctx->configuring))
856 wait_for_completion(&ctx->completion);
860 static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
861 unsigned int key_len)
863 return verify_skcipher_des3_key(tfm, key) ?:
864 ablk_setkey(tfm, key, key_len);
867 static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
868 unsigned int key_len)
870 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
872 /* the nonce is stored in bytes at end of key */
873 if (key_len < CTR_RFC3686_NONCE_SIZE)
876 memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
877 CTR_RFC3686_NONCE_SIZE);
879 key_len -= CTR_RFC3686_NONCE_SIZE;
880 return ablk_setkey(tfm, key, key_len);
883 static int ablk_perform(struct skcipher_request *req, int encrypt)
885 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
886 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
887 unsigned ivsize = crypto_skcipher_ivsize(tfm);
888 struct ix_sa_dir *dir;
889 struct crypt_ctl *crypt;
890 unsigned int nbytes = req->cryptlen;
891 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
892 struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
893 struct buffer_desc src_hook;
894 struct device *dev = &pdev->dev;
896 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
897 GFP_KERNEL : GFP_ATOMIC;
899 if (qmgr_stat_full(SEND_QID))
901 if (atomic_read(&ctx->configuring))
904 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
905 req_ctx->encrypt = encrypt;
907 crypt = get_crypt_desc();
911 crypt->data.ablk_req = req;
912 crypt->crypto_ctx = dir->npe_ctx_phys;
913 crypt->mode = dir->npe_mode;
914 crypt->init_len = dir->npe_ctx_idx;
916 crypt->crypt_offs = 0;
917 crypt->crypt_len = nbytes;
919 BUG_ON(ivsize && !req->iv);
920 memcpy(crypt->iv, req->iv, ivsize);
921 if (ivsize > 0 && !encrypt) {
922 offset = req->cryptlen - ivsize;
923 scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
925 if (req->src != req->dst) {
926 struct buffer_desc dst_hook;
927 crypt->mode |= NPE_OP_NOT_IN_PLACE;
928 /* This was never tested by Intel
929 * for more than one dst buffer, I think. */
931 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
932 flags, DMA_FROM_DEVICE))
934 src_direction = DMA_TO_DEVICE;
935 req_ctx->dst = dst_hook.next;
936 crypt->dst_buf = dst_hook.phys_next;
941 if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
942 flags, src_direction))
945 req_ctx->src = src_hook.next;
946 crypt->src_buf = src_hook.phys_next;
947 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
948 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
949 BUG_ON(qmgr_stat_overflow(SEND_QID));
953 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
955 if (req->src != req->dst) {
956 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
958 crypt->ctl_flags = CTL_FLAG_UNUSED;
962 static int ablk_encrypt(struct skcipher_request *req)
964 return ablk_perform(req, 1);
967 static int ablk_decrypt(struct skcipher_request *req)
969 return ablk_perform(req, 0);
972 static int ablk_rfc3686_crypt(struct skcipher_request *req)
974 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
975 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
976 u8 iv[CTR_RFC3686_BLOCK_SIZE];
980 /* set up counter block */
981 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
982 memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
984 /* initialize counter portion of counter block */
985 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
989 ret = ablk_perform(req, 1);
994 static int aead_perform(struct aead_request *req, int encrypt,
995 int cryptoffset, int eff_cryptlen, u8 *iv)
997 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
998 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
999 unsigned ivsize = crypto_aead_ivsize(tfm);
1000 unsigned authsize = crypto_aead_authsize(tfm);
1001 struct ix_sa_dir *dir;
1002 struct crypt_ctl *crypt;
1003 unsigned int cryptlen;
1004 struct buffer_desc *buf, src_hook;
1005 struct aead_ctx *req_ctx = aead_request_ctx(req);
1006 struct device *dev = &pdev->dev;
1007 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1008 GFP_KERNEL : GFP_ATOMIC;
1009 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
1010 unsigned int lastlen;
1012 if (qmgr_stat_full(SEND_QID))
1014 if (atomic_read(&ctx->configuring))
1018 dir = &ctx->encrypt;
1019 cryptlen = req->cryptlen;
1021 dir = &ctx->decrypt;
1022 /* req->cryptlen includes the authsize when decrypting */
1023 cryptlen = req->cryptlen -authsize;
1024 eff_cryptlen -= authsize;
1026 crypt = get_crypt_desc();
1030 crypt->data.aead_req = req;
1031 crypt->crypto_ctx = dir->npe_ctx_phys;
1032 crypt->mode = dir->npe_mode;
1033 crypt->init_len = dir->npe_ctx_idx;
1035 crypt->crypt_offs = cryptoffset;
1036 crypt->crypt_len = eff_cryptlen;
1038 crypt->auth_offs = 0;
1039 crypt->auth_len = req->assoclen + cryptlen;
1040 BUG_ON(ivsize && !req->iv);
1041 memcpy(crypt->iv, req->iv, ivsize);
1043 buf = chainup_buffers(dev, req->src, crypt->auth_len,
1044 &src_hook, flags, src_direction);
1045 req_ctx->src = src_hook.next;
1046 crypt->src_buf = src_hook.phys_next;
1050 lastlen = buf->buf_len;
1051 if (lastlen >= authsize)
1052 crypt->icv_rev_aes = buf->phys_addr +
1053 buf->buf_len - authsize;
1055 req_ctx->dst = NULL;
1057 if (req->src != req->dst) {
1058 struct buffer_desc dst_hook;
1060 crypt->mode |= NPE_OP_NOT_IN_PLACE;
1061 src_direction = DMA_TO_DEVICE;
1063 buf = chainup_buffers(dev, req->dst, crypt->auth_len,
1064 &dst_hook, flags, DMA_FROM_DEVICE);
1065 req_ctx->dst = dst_hook.next;
1066 crypt->dst_buf = dst_hook.phys_next;
1072 lastlen = buf->buf_len;
1073 if (lastlen >= authsize)
1074 crypt->icv_rev_aes = buf->phys_addr +
1075 buf->buf_len - authsize;
1079 if (unlikely(lastlen < authsize)) {
1080 /* The 12 hmac bytes are scattered,
1081 * we need to copy them into a safe buffer */
1082 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1083 &crypt->icv_rev_aes);
1084 if (unlikely(!req_ctx->hmac_virt))
1087 scatterwalk_map_and_copy(req_ctx->hmac_virt,
1088 req->src, cryptlen, authsize, 0);
1090 req_ctx->encrypt = encrypt;
1092 req_ctx->hmac_virt = NULL;
1095 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1096 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1097 BUG_ON(qmgr_stat_overflow(SEND_QID));
1098 return -EINPROGRESS;
1101 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1103 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1104 crypt->ctl_flags = CTL_FLAG_UNUSED;
1108 static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1110 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1111 unsigned digest_len = crypto_aead_maxauthsize(tfm);
1114 if (!ctx->enckey_len && !ctx->authkey_len)
1116 init_completion(&ctx->completion);
1117 atomic_inc(&ctx->configuring);
1119 reset_sa_dir(&ctx->encrypt);
1120 reset_sa_dir(&ctx->decrypt);
1122 ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1125 ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1128 ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1129 ctx->authkey_len, digest_len);
1132 ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
1133 ctx->authkey_len, digest_len);
1135 if (!atomic_dec_and_test(&ctx->configuring))
1136 wait_for_completion(&ctx->completion);
1140 static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1142 int max = crypto_aead_maxauthsize(tfm) >> 2;
1144 if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1146 return aead_setup(tfm, authsize);
1149 static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1150 unsigned int keylen)
1152 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1153 struct crypto_authenc_keys keys;
1155 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1158 if (keys.authkeylen > sizeof(ctx->authkey))
1161 if (keys.enckeylen > sizeof(ctx->enckey))
1164 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1165 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1166 ctx->authkey_len = keys.authkeylen;
1167 ctx->enckey_len = keys.enckeylen;
1169 memzero_explicit(&keys, sizeof(keys));
1170 return aead_setup(tfm, crypto_aead_authsize(tfm));
1172 memzero_explicit(&keys, sizeof(keys));
1176 static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1177 unsigned int keylen)
1179 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1180 struct crypto_authenc_keys keys;
1183 err = crypto_authenc_extractkeys(&keys, key, keylen);
1188 if (keys.authkeylen > sizeof(ctx->authkey))
1191 err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
1195 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1196 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1197 ctx->authkey_len = keys.authkeylen;
1198 ctx->enckey_len = keys.enckeylen;
1200 memzero_explicit(&keys, sizeof(keys));
1201 return aead_setup(tfm, crypto_aead_authsize(tfm));
1203 memzero_explicit(&keys, sizeof(keys));
1207 static int aead_encrypt(struct aead_request *req)
1209 return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
1212 static int aead_decrypt(struct aead_request *req)
1214 return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
1217 static struct ixp_alg ixp4xx_algos[] = {
1220 .base.cra_name = "cbc(des)",
1221 .base.cra_blocksize = DES_BLOCK_SIZE,
1223 .min_keysize = DES_KEY_SIZE,
1224 .max_keysize = DES_KEY_SIZE,
1225 .ivsize = DES_BLOCK_SIZE,
1227 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1228 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1232 .base.cra_name = "ecb(des)",
1233 .base.cra_blocksize = DES_BLOCK_SIZE,
1234 .min_keysize = DES_KEY_SIZE,
1235 .max_keysize = DES_KEY_SIZE,
1237 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1238 .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1241 .base.cra_name = "cbc(des3_ede)",
1242 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1244 .min_keysize = DES3_EDE_KEY_SIZE,
1245 .max_keysize = DES3_EDE_KEY_SIZE,
1246 .ivsize = DES3_EDE_BLOCK_SIZE,
1247 .setkey = ablk_des3_setkey,
1249 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1250 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1253 .base.cra_name = "ecb(des3_ede)",
1254 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1256 .min_keysize = DES3_EDE_KEY_SIZE,
1257 .max_keysize = DES3_EDE_KEY_SIZE,
1258 .setkey = ablk_des3_setkey,
1260 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1261 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1264 .base.cra_name = "cbc(aes)",
1265 .base.cra_blocksize = AES_BLOCK_SIZE,
1267 .min_keysize = AES_MIN_KEY_SIZE,
1268 .max_keysize = AES_MAX_KEY_SIZE,
1269 .ivsize = AES_BLOCK_SIZE,
1271 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1272 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1275 .base.cra_name = "ecb(aes)",
1276 .base.cra_blocksize = AES_BLOCK_SIZE,
1278 .min_keysize = AES_MIN_KEY_SIZE,
1279 .max_keysize = AES_MAX_KEY_SIZE,
1281 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1282 .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1285 .base.cra_name = "ctr(aes)",
1286 .base.cra_blocksize = 1,
1288 .min_keysize = AES_MIN_KEY_SIZE,
1289 .max_keysize = AES_MAX_KEY_SIZE,
1290 .ivsize = AES_BLOCK_SIZE,
1292 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1293 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1296 .base.cra_name = "rfc3686(ctr(aes))",
1297 .base.cra_blocksize = 1,
1299 .min_keysize = AES_MIN_KEY_SIZE,
1300 .max_keysize = AES_MAX_KEY_SIZE,
1301 .ivsize = AES_BLOCK_SIZE,
1302 .setkey = ablk_rfc3686_setkey,
1303 .encrypt = ablk_rfc3686_crypt,
1304 .decrypt = ablk_rfc3686_crypt,
1306 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1307 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1310 static struct ixp_aead_alg ixp4xx_aeads[] = {
1314 .cra_name = "authenc(hmac(md5),cbc(des))",
1315 .cra_blocksize = DES_BLOCK_SIZE,
1317 .ivsize = DES_BLOCK_SIZE,
1318 .maxauthsize = MD5_DIGEST_SIZE,
1320 .hash = &hash_alg_md5,
1321 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1322 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1326 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1327 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1329 .ivsize = DES3_EDE_BLOCK_SIZE,
1330 .maxauthsize = MD5_DIGEST_SIZE,
1331 .setkey = des3_aead_setkey,
1333 .hash = &hash_alg_md5,
1334 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1335 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1339 .cra_name = "authenc(hmac(sha1),cbc(des))",
1340 .cra_blocksize = DES_BLOCK_SIZE,
1342 .ivsize = DES_BLOCK_SIZE,
1343 .maxauthsize = SHA1_DIGEST_SIZE,
1345 .hash = &hash_alg_sha1,
1346 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1347 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1351 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1352 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1354 .ivsize = DES3_EDE_BLOCK_SIZE,
1355 .maxauthsize = SHA1_DIGEST_SIZE,
1356 .setkey = des3_aead_setkey,
1358 .hash = &hash_alg_sha1,
1359 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1360 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1364 .cra_name = "authenc(hmac(md5),cbc(aes))",
1365 .cra_blocksize = AES_BLOCK_SIZE,
1367 .ivsize = AES_BLOCK_SIZE,
1368 .maxauthsize = MD5_DIGEST_SIZE,
1370 .hash = &hash_alg_md5,
1371 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1372 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1376 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1377 .cra_blocksize = AES_BLOCK_SIZE,
1379 .ivsize = AES_BLOCK_SIZE,
1380 .maxauthsize = SHA1_DIGEST_SIZE,
1382 .hash = &hash_alg_sha1,
1383 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1384 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1387 #define IXP_POSTFIX "-ixp4xx"
1389 static const struct platform_device_info ixp_dev_info __initdata = {
1390 .name = DRIVER_NAME,
1392 .dma_mask = DMA_BIT_MASK(32),
1395 static int __init ixp_module_init(void)
1397 int num = ARRAY_SIZE(ixp4xx_algos);
1400 pdev = platform_device_register_full(&ixp_dev_info);
1402 return PTR_ERR(pdev);
1404 spin_lock_init(&desc_lock);
1405 spin_lock_init(&emerg_lock);
1407 err = init_ixp_crypto(&pdev->dev);
1409 platform_device_unregister(pdev);
1412 for (i=0; i< num; i++) {
1413 struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
1415 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1416 "%s"IXP_POSTFIX, cra->base.cra_name) >=
1417 CRYPTO_MAX_ALG_NAME)
1421 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1426 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1428 CRYPTO_ALG_ALLOCATES_MEMORY;
1430 cra->setkey = ablk_setkey;
1432 cra->encrypt = ablk_encrypt;
1434 cra->decrypt = ablk_decrypt;
1435 cra->init = init_tfm_ablk;
1436 cra->exit = exit_tfm_ablk;
1438 cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1439 cra->base.cra_module = THIS_MODULE;
1440 cra->base.cra_alignmask = 3;
1441 cra->base.cra_priority = 300;
1442 if (crypto_register_skcipher(cra))
1443 printk(KERN_ERR "Failed to register '%s'\n",
1444 cra->base.cra_name);
1446 ixp4xx_algos[i].registered = 1;
1449 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1450 struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
1452 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1453 "%s"IXP_POSTFIX, cra->base.cra_name) >=
1454 CRYPTO_MAX_ALG_NAME)
1456 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1460 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1462 CRYPTO_ALG_ALLOCATES_MEMORY;
1463 cra->setkey = cra->setkey ?: aead_setkey;
1464 cra->setauthsize = aead_setauthsize;
1465 cra->encrypt = aead_encrypt;
1466 cra->decrypt = aead_decrypt;
1467 cra->init = init_tfm_aead;
1468 cra->exit = exit_tfm_aead;
1470 cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1471 cra->base.cra_module = THIS_MODULE;
1472 cra->base.cra_alignmask = 3;
1473 cra->base.cra_priority = 300;
1475 if (crypto_register_aead(cra))
1476 printk(KERN_ERR "Failed to register '%s'\n",
1477 cra->base.cra_driver_name);
1479 ixp4xx_aeads[i].registered = 1;
1484 static void __exit ixp_module_exit(void)
1486 int num = ARRAY_SIZE(ixp4xx_algos);
1489 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1490 if (ixp4xx_aeads[i].registered)
1491 crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
1494 for (i=0; i< num; i++) {
1495 if (ixp4xx_algos[i].registered)
1496 crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
1498 release_ixp_crypto(&pdev->dev);
1499 platform_device_unregister(pdev);
1502 module_init(ixp_module_init);
1503 module_exit(ixp_module_exit);
1505 MODULE_LICENSE("GPL");
1506 MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1507 MODULE_DESCRIPTION("IXP4xx hardware crypto");