2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
63 ptr->eptr = upper_32_bits(dma_addr);
66 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
67 struct talitos_ptr *src_ptr, bool is_sec1)
69 dst_ptr->ptr = src_ptr->ptr;
71 dst_ptr->eptr = src_ptr->eptr;
74 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
79 ptr->len1 = cpu_to_be16(len);
81 ptr->len = cpu_to_be16(len);
85 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
89 return be16_to_cpu(ptr->len1);
91 return be16_to_cpu(ptr->len);
94 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
101 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
104 ptr->j_extent |= val;
108 * map virtual single (contiguous) pointer to h/w descriptor pointer
110 static void map_single_talitos_ptr(struct device *dev,
111 struct talitos_ptr *ptr,
112 unsigned int len, void *data,
113 enum dma_data_direction dir)
115 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
116 struct talitos_private *priv = dev_get_drvdata(dev);
117 bool is_sec1 = has_ftr_sec1(priv);
119 to_talitos_ptr_len(ptr, len, is_sec1);
120 to_talitos_ptr(ptr, dma_addr, is_sec1);
121 to_talitos_ptr_ext_set(ptr, 0, is_sec1);
125 * unmap bus single (contiguous) h/w descriptor pointer
127 static void unmap_single_talitos_ptr(struct device *dev,
128 struct talitos_ptr *ptr,
129 enum dma_data_direction dir)
131 struct talitos_private *priv = dev_get_drvdata(dev);
132 bool is_sec1 = has_ftr_sec1(priv);
134 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
135 from_talitos_ptr_len(ptr, is_sec1), dir);
138 static int reset_channel(struct device *dev, int ch)
140 struct talitos_private *priv = dev_get_drvdata(dev);
141 unsigned int timeout = TALITOS_TIMEOUT;
142 bool is_sec1 = has_ftr_sec1(priv);
145 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
146 TALITOS1_CCCR_LO_RESET);
148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
149 TALITOS1_CCCR_LO_RESET) && --timeout)
152 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
153 TALITOS2_CCCR_RESET);
155 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
156 TALITOS2_CCCR_RESET) && --timeout)
161 dev_err(dev, "failed to reset channel %d\n", ch);
165 /* set 36-bit addressing, done writeback enable and done IRQ enable */
166 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
167 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
169 /* and ICCR writeback, if available */
170 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
171 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
172 TALITOS_CCCR_LO_IWSE);
177 static int reset_device(struct device *dev)
179 struct talitos_private *priv = dev_get_drvdata(dev);
180 unsigned int timeout = TALITOS_TIMEOUT;
181 bool is_sec1 = has_ftr_sec1(priv);
182 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
184 setbits32(priv->reg + TALITOS_MCR, mcr);
186 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
191 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
192 setbits32(priv->reg + TALITOS_MCR, mcr);
196 dev_err(dev, "failed to reset device\n");
204 * Reset and initialize the device
206 static int init_device(struct device *dev)
208 struct talitos_private *priv = dev_get_drvdata(dev);
210 bool is_sec1 = has_ftr_sec1(priv);
214 * errata documentation: warning: certain SEC interrupts
215 * are not fully cleared by writing the MCR:SWR bit,
216 * set bit twice to completely reset
218 err = reset_device(dev);
222 err = reset_device(dev);
227 for (ch = 0; ch < priv->num_channels; ch++) {
228 err = reset_channel(dev, ch);
233 /* enable channel done and error interrupts */
235 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
236 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
237 /* disable parity error check in DEU (erroneous? test vect.) */
238 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
240 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
241 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
244 /* disable integrity check error interrupts (use writeback instead) */
245 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
246 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
247 TALITOS_MDEUICR_LO_ICE);
253 * talitos_submit - submits a descriptor to the device for processing
254 * @dev: the SEC device to be used
255 * @ch: the SEC device channel to be used
256 * @desc: the descriptor to be processed by the device
257 * @callback: whom to call when processing is complete
258 * @context: a handle for use by caller (optional)
260 * desc must contain valid dma-mapped (bus physical) address pointers.
261 * callback must check err and feedback in descriptor header
262 * for device processing status.
264 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
265 void (*callback)(struct device *dev,
266 struct talitos_desc *desc,
267 void *context, int error),
270 struct talitos_private *priv = dev_get_drvdata(dev);
271 struct talitos_request *request;
274 bool is_sec1 = has_ftr_sec1(priv);
276 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
278 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
279 /* h/w fifo is full */
280 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
284 head = priv->chan[ch].head;
285 request = &priv->chan[ch].fifo[head];
287 /* map descriptor and save caller data */
289 desc->hdr1 = desc->hdr;
291 request->dma_desc = dma_map_single(dev, &desc->hdr1,
295 request->dma_desc = dma_map_single(dev, desc,
299 request->callback = callback;
300 request->context = context;
302 /* increment fifo head */
303 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
306 request->desc = desc;
310 out_be32(priv->chan[ch].reg + TALITOS_FF,
311 upper_32_bits(request->dma_desc));
312 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
313 lower_32_bits(request->dma_desc));
315 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
319 EXPORT_SYMBOL(talitos_submit);
322 * process what was done, notify callback of error if not
324 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
326 struct talitos_private *priv = dev_get_drvdata(dev);
327 struct talitos_request *request, saved_req;
330 bool is_sec1 = has_ftr_sec1(priv);
332 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
334 tail = priv->chan[ch].tail;
335 while (priv->chan[ch].fifo[tail].desc) {
338 request = &priv->chan[ch].fifo[tail];
340 /* descriptors with their done bits set don't get the error */
342 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
344 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
352 dma_unmap_single(dev, request->dma_desc,
356 /* copy entries so we can call callback outside lock */
357 saved_req.desc = request->desc;
358 saved_req.callback = request->callback;
359 saved_req.context = request->context;
361 /* release request entry in fifo */
363 request->desc = NULL;
365 /* increment fifo tail */
366 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
368 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
370 atomic_dec(&priv->chan[ch].submit_count);
372 saved_req.callback(dev, saved_req.desc, saved_req.context,
374 /* channel may resume processing in single desc error case */
375 if (error && !reset_ch && status == error)
377 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
378 tail = priv->chan[ch].tail;
381 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
385 * process completed requests for channels that have done status
387 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
388 static void talitos1_done_##name(unsigned long data) \
390 struct device *dev = (struct device *)data; \
391 struct talitos_private *priv = dev_get_drvdata(dev); \
392 unsigned long flags; \
394 if (ch_done_mask & 0x10000000) \
395 flush_channel(dev, 0, 0, 0); \
396 if (priv->num_channels == 1) \
398 if (ch_done_mask & 0x40000000) \
399 flush_channel(dev, 1, 0, 0); \
400 if (ch_done_mask & 0x00010000) \
401 flush_channel(dev, 2, 0, 0); \
402 if (ch_done_mask & 0x00040000) \
403 flush_channel(dev, 3, 0, 0); \
406 /* At this point, all completed channels have been processed */ \
407 /* Unmask done interrupts for channels completed later on. */ \
408 spin_lock_irqsave(&priv->reg_lock, flags); \
409 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
410 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
411 spin_unlock_irqrestore(&priv->reg_lock, flags); \
414 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
416 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
417 static void talitos2_done_##name(unsigned long data) \
419 struct device *dev = (struct device *)data; \
420 struct talitos_private *priv = dev_get_drvdata(dev); \
421 unsigned long flags; \
423 if (ch_done_mask & 1) \
424 flush_channel(dev, 0, 0, 0); \
425 if (priv->num_channels == 1) \
427 if (ch_done_mask & (1 << 2)) \
428 flush_channel(dev, 1, 0, 0); \
429 if (ch_done_mask & (1 << 4)) \
430 flush_channel(dev, 2, 0, 0); \
431 if (ch_done_mask & (1 << 6)) \
432 flush_channel(dev, 3, 0, 0); \
435 /* At this point, all completed channels have been processed */ \
436 /* Unmask done interrupts for channels completed later on. */ \
437 spin_lock_irqsave(&priv->reg_lock, flags); \
438 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
439 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
440 spin_unlock_irqrestore(&priv->reg_lock, flags); \
443 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
444 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
445 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
448 * locate current (offending) descriptor
450 static __be32 current_desc_hdr(struct device *dev, int ch)
452 struct talitos_private *priv = dev_get_drvdata(dev);
456 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
457 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
460 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
464 tail = priv->chan[ch].tail;
467 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
468 iter = (iter + 1) & (priv->fifo_len - 1);
470 dev_err(dev, "couldn't locate current descriptor\n");
475 return priv->chan[ch].fifo[iter].desc->hdr;
479 * user diagnostics; report root cause of error based on execution unit status
481 static void report_eu_error(struct device *dev, int ch, __be32 desc_hdr)
483 struct talitos_private *priv = dev_get_drvdata(dev);
487 desc_hdr = cpu_to_be32(in_be32(priv->chan[ch].reg + TALITOS_DESCBUF));
489 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
490 case DESC_HDR_SEL0_AFEU:
491 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
492 in_be32(priv->reg_afeu + TALITOS_EUISR),
493 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
495 case DESC_HDR_SEL0_DEU:
496 dev_err(dev, "DEUISR 0x%08x_%08x\n",
497 in_be32(priv->reg_deu + TALITOS_EUISR),
498 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
500 case DESC_HDR_SEL0_MDEUA:
501 case DESC_HDR_SEL0_MDEUB:
502 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
503 in_be32(priv->reg_mdeu + TALITOS_EUISR),
504 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
506 case DESC_HDR_SEL0_RNG:
507 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
508 in_be32(priv->reg_rngu + TALITOS_ISR),
509 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
511 case DESC_HDR_SEL0_PKEU:
512 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
513 in_be32(priv->reg_pkeu + TALITOS_EUISR),
514 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
516 case DESC_HDR_SEL0_AESU:
517 dev_err(dev, "AESUISR 0x%08x_%08x\n",
518 in_be32(priv->reg_aesu + TALITOS_EUISR),
519 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
521 case DESC_HDR_SEL0_CRCU:
522 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
523 in_be32(priv->reg_crcu + TALITOS_EUISR),
524 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
526 case DESC_HDR_SEL0_KEU:
527 dev_err(dev, "KEUISR 0x%08x_%08x\n",
528 in_be32(priv->reg_pkeu + TALITOS_EUISR),
529 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
533 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
534 case DESC_HDR_SEL1_MDEUA:
535 case DESC_HDR_SEL1_MDEUB:
536 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
537 in_be32(priv->reg_mdeu + TALITOS_EUISR),
538 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
540 case DESC_HDR_SEL1_CRCU:
541 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
542 in_be32(priv->reg_crcu + TALITOS_EUISR),
543 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
547 for (i = 0; i < 8; i++)
548 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
549 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
550 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
554 * recover from error interrupts
556 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
558 struct talitos_private *priv = dev_get_drvdata(dev);
559 unsigned int timeout = TALITOS_TIMEOUT;
560 int ch, error, reset_dev = 0;
562 bool is_sec1 = has_ftr_sec1(priv);
563 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
565 for (ch = 0; ch < priv->num_channels; ch++) {
566 /* skip channels without errors */
568 /* bits 29, 31, 17, 19 */
569 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
572 if (!(isr & (1 << (ch * 2 + 1))))
578 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
580 if (v_lo & TALITOS_CCPSR_LO_DOF) {
581 dev_err(dev, "double fetch fifo overflow error\n");
585 if (v_lo & TALITOS_CCPSR_LO_SOF) {
586 /* h/w dropped descriptor */
587 dev_err(dev, "single fetch fifo overflow error\n");
590 if (v_lo & TALITOS_CCPSR_LO_MDTE)
591 dev_err(dev, "master data transfer error\n");
592 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
593 dev_err(dev, is_sec1 ? "pointeur not complete error\n"
594 : "s/g data length zero error\n");
595 if (v_lo & TALITOS_CCPSR_LO_FPZ)
596 dev_err(dev, is_sec1 ? "parity error\n"
597 : "fetch pointer zero error\n");
598 if (v_lo & TALITOS_CCPSR_LO_IDH)
599 dev_err(dev, "illegal descriptor header error\n");
600 if (v_lo & TALITOS_CCPSR_LO_IEU)
601 dev_err(dev, is_sec1 ? "static assignment error\n"
602 : "invalid exec unit error\n");
603 if (v_lo & TALITOS_CCPSR_LO_EU)
604 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
606 if (v_lo & TALITOS_CCPSR_LO_GB)
607 dev_err(dev, "gather boundary error\n");
608 if (v_lo & TALITOS_CCPSR_LO_GRL)
609 dev_err(dev, "gather return/length error\n");
610 if (v_lo & TALITOS_CCPSR_LO_SB)
611 dev_err(dev, "scatter boundary error\n");
612 if (v_lo & TALITOS_CCPSR_LO_SRL)
613 dev_err(dev, "scatter return/length error\n");
616 flush_channel(dev, ch, error, reset_ch);
619 reset_channel(dev, ch);
621 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
623 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
624 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
625 TALITOS2_CCCR_CONT) && --timeout)
628 dev_err(dev, "failed to restart channel %d\n",
634 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
635 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
636 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
637 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
640 dev_err(dev, "done overflow, internal time out, or "
641 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
643 /* purge request queues */
644 for (ch = 0; ch < priv->num_channels; ch++)
645 flush_channel(dev, ch, -EIO, 1);
647 /* reset and reinitialize the device */
652 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
653 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
655 struct device *dev = data; \
656 struct talitos_private *priv = dev_get_drvdata(dev); \
658 unsigned long flags; \
660 spin_lock_irqsave(&priv->reg_lock, flags); \
661 isr = in_be32(priv->reg + TALITOS_ISR); \
662 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
663 /* Acknowledge interrupt */ \
664 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
665 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
667 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
668 spin_unlock_irqrestore(&priv->reg_lock, flags); \
669 talitos_error(dev, isr & ch_err_mask, isr_lo); \
672 if (likely(isr & ch_done_mask)) { \
673 /* mask further done interrupts. */ \
674 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
675 /* done_task will unmask done interrupts at exit */ \
676 tasklet_schedule(&priv->done_task[tlet]); \
678 spin_unlock_irqrestore(&priv->reg_lock, flags); \
681 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
685 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
687 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
688 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
690 struct device *dev = data; \
691 struct talitos_private *priv = dev_get_drvdata(dev); \
693 unsigned long flags; \
695 spin_lock_irqsave(&priv->reg_lock, flags); \
696 isr = in_be32(priv->reg + TALITOS_ISR); \
697 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
698 /* Acknowledge interrupt */ \
699 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
700 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
702 if (unlikely(isr & ch_err_mask || isr_lo)) { \
703 spin_unlock_irqrestore(&priv->reg_lock, flags); \
704 talitos_error(dev, isr & ch_err_mask, isr_lo); \
707 if (likely(isr & ch_done_mask)) { \
708 /* mask further done interrupts. */ \
709 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
710 /* done_task will unmask done interrupts at exit */ \
711 tasklet_schedule(&priv->done_task[tlet]); \
713 spin_unlock_irqrestore(&priv->reg_lock, flags); \
716 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
720 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
721 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
723 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
729 static int talitos_rng_data_present(struct hwrng *rng, int wait)
731 struct device *dev = (struct device *)rng->priv;
732 struct talitos_private *priv = dev_get_drvdata(dev);
736 for (i = 0; i < 20; i++) {
737 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
738 TALITOS_RNGUSR_LO_OFL;
747 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
749 struct device *dev = (struct device *)rng->priv;
750 struct talitos_private *priv = dev_get_drvdata(dev);
752 /* rng fifo requires 64-bit accesses */
753 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
754 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
759 static int talitos_rng_init(struct hwrng *rng)
761 struct device *dev = (struct device *)rng->priv;
762 struct talitos_private *priv = dev_get_drvdata(dev);
763 unsigned int timeout = TALITOS_TIMEOUT;
765 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
766 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
767 & TALITOS_RNGUSR_LO_RD)
771 dev_err(dev, "failed to reset rng hw\n");
775 /* start generating */
776 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
781 static int talitos_register_rng(struct device *dev)
783 struct talitos_private *priv = dev_get_drvdata(dev);
786 priv->rng.name = dev_driver_string(dev),
787 priv->rng.init = talitos_rng_init,
788 priv->rng.data_present = talitos_rng_data_present,
789 priv->rng.data_read = talitos_rng_data_read,
790 priv->rng.priv = (unsigned long)dev;
792 err = hwrng_register(&priv->rng);
794 priv->rng_registered = true;
799 static void talitos_unregister_rng(struct device *dev)
801 struct talitos_private *priv = dev_get_drvdata(dev);
803 if (!priv->rng_registered)
806 hwrng_unregister(&priv->rng);
807 priv->rng_registered = false;
813 #define TALITOS_CRA_PRIORITY 3000
815 * Defines a priority for doing AEAD with descriptors type
816 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
818 #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
819 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
820 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
822 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
824 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
829 __be32 desc_hdr_template;
830 u8 key[TALITOS_MAX_KEY_SIZE];
831 u8 iv[TALITOS_MAX_IV_LENGTH];
833 unsigned int enckeylen;
834 unsigned int authkeylen;
837 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
838 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
840 struct talitos_ahash_req_ctx {
841 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
842 unsigned int hw_context_size;
843 u8 buf[HASH_MAX_BLOCK_SIZE];
844 u8 bufnext[HASH_MAX_BLOCK_SIZE];
848 unsigned int to_hash_later;
850 struct scatterlist bufsl[2];
851 struct scatterlist *psrc;
854 struct talitos_export_state {
855 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
856 u8 buf[HASH_MAX_BLOCK_SIZE];
860 unsigned int to_hash_later;
864 static int aead_setkey(struct crypto_aead *authenc,
865 const u8 *key, unsigned int keylen)
867 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
868 struct crypto_authenc_keys keys;
870 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
873 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
876 memcpy(ctx->key, keys.authkey, keys.authkeylen);
877 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
879 ctx->keylen = keys.authkeylen + keys.enckeylen;
880 ctx->enckeylen = keys.enckeylen;
881 ctx->authkeylen = keys.authkeylen;
886 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
891 * talitos_edesc - s/w-extended descriptor
892 * @src_nents: number of segments in input scatterlist
893 * @dst_nents: number of segments in output scatterlist
894 * @icv_ool: whether ICV is out-of-line
895 * @iv_dma: dma address of iv for checking continuity and link table
896 * @dma_len: length of dma mapped link_tbl space
897 * @dma_link_tbl: bus physical address of link_tbl/buf
898 * @desc: h/w descriptor
899 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
900 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
902 * if decrypting (with authcheck), or either one of src_nents or dst_nents
903 * is greater than 1, an integrity check value is concatenated to the end
906 struct talitos_edesc {
912 dma_addr_t dma_link_tbl;
913 struct talitos_desc desc;
915 struct talitos_ptr link_tbl[0];
920 static void talitos_sg_unmap(struct device *dev,
921 struct talitos_edesc *edesc,
922 struct scatterlist *src,
923 struct scatterlist *dst,
924 unsigned int len, unsigned int offset)
926 struct talitos_private *priv = dev_get_drvdata(dev);
927 bool is_sec1 = has_ftr_sec1(priv);
928 unsigned int src_nents = edesc->src_nents ? : 1;
929 unsigned int dst_nents = edesc->dst_nents ? : 1;
931 if (is_sec1 && dst && dst_nents > 1) {
932 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
933 len, DMA_FROM_DEVICE);
934 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
938 if (src_nents == 1 || !is_sec1)
939 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
941 if (dst && (dst_nents == 1 || !is_sec1))
942 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
943 } else if (src_nents == 1 || !is_sec1) {
944 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
948 static void ipsec_esp_unmap(struct device *dev,
949 struct talitos_edesc *edesc,
950 struct aead_request *areq, bool encrypt)
952 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
953 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
954 unsigned int ivsize = crypto_aead_ivsize(aead);
955 unsigned int authsize = crypto_aead_authsize(aead);
956 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
958 if (edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)
959 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
961 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
962 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
963 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
965 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, cryptlen,
969 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
972 if (!(edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
973 unsigned int dst_nents = edesc->dst_nents ? : 1;
975 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
976 areq->assoclen + cryptlen - ivsize);
981 * ipsec_esp descriptor callbacks
983 static void ipsec_esp_encrypt_done(struct device *dev,
984 struct talitos_desc *desc, void *context,
987 struct talitos_private *priv = dev_get_drvdata(dev);
988 bool is_sec1 = has_ftr_sec1(priv);
989 struct aead_request *areq = context;
990 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
991 unsigned int authsize = crypto_aead_authsize(authenc);
992 struct talitos_edesc *edesc;
995 edesc = container_of(desc, struct talitos_edesc, desc);
997 ipsec_esp_unmap(dev, edesc, areq, true);
999 /* copy the generated ICV to dst */
1000 if (edesc->icv_ool) {
1002 icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
1004 icvdata = &edesc->link_tbl[edesc->src_nents +
1005 edesc->dst_nents + 2];
1006 sg_pcopy_from_buffer(areq->dst, edesc->dst_nents ? : 1, icvdata,
1007 authsize, areq->assoclen + areq->cryptlen);
1012 aead_request_complete(areq, err);
1015 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1016 struct talitos_desc *desc,
1017 void *context, int err)
1019 struct aead_request *req = context;
1020 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1021 unsigned int authsize = crypto_aead_authsize(authenc);
1022 struct talitos_edesc *edesc;
1024 struct talitos_private *priv = dev_get_drvdata(dev);
1025 bool is_sec1 = has_ftr_sec1(priv);
1027 edesc = container_of(desc, struct talitos_edesc, desc);
1029 ipsec_esp_unmap(dev, edesc, req, false);
1032 char icvdata[SHA512_DIGEST_SIZE];
1033 int nents = edesc->dst_nents ? : 1;
1034 unsigned int len = req->assoclen + req->cryptlen;
1038 sg_pcopy_to_buffer(req->dst, nents, icvdata, authsize,
1042 icv = (char *)sg_virt(req->dst) + len - authsize;
1045 if (edesc->dma_len) {
1047 oicv = (char *)&edesc->dma_link_tbl +
1048 req->assoclen + req->cryptlen;
1051 &edesc->link_tbl[edesc->src_nents +
1052 edesc->dst_nents + 2];
1054 icv = oicv + authsize;
1056 oicv = (char *)&edesc->link_tbl[0];
1058 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1063 aead_request_complete(req, err);
1066 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1067 struct talitos_desc *desc,
1068 void *context, int err)
1070 struct aead_request *req = context;
1071 struct talitos_edesc *edesc;
1073 edesc = container_of(desc, struct talitos_edesc, desc);
1075 ipsec_esp_unmap(dev, edesc, req, false);
1077 /* check ICV auth status */
1078 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1079 DESC_HDR_LO_ICCR1_PASS))
1084 aead_request_complete(req, err);
1088 * convert scatterlist to SEC h/w link table format
1089 * stop at cryptlen bytes
1091 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1092 unsigned int offset, int cryptlen,
1093 struct talitos_ptr *link_tbl_ptr)
1095 int n_sg = sg_count;
1098 while (cryptlen && sg && n_sg--) {
1099 unsigned int len = sg_dma_len(sg);
1101 if (offset >= len) {
1111 to_talitos_ptr(link_tbl_ptr + count,
1112 sg_dma_address(sg) + offset, 0);
1113 to_talitos_ptr_len(link_tbl_ptr + count, len, 0);
1114 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1123 /* tag end of link table */
1125 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1126 DESC_PTR_LNKTBL_RETURN, 0);
1131 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1132 unsigned int len, struct talitos_edesc *edesc,
1133 struct talitos_ptr *ptr, int sg_count,
1134 unsigned int offset, int tbl_off, int elen)
1136 struct talitos_private *priv = dev_get_drvdata(dev);
1137 bool is_sec1 = has_ftr_sec1(priv);
1144 to_talitos_ptr_len(ptr, len, is_sec1);
1145 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1147 if (sg_count == 1) {
1148 to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1);
1152 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1);
1155 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
1156 &edesc->link_tbl[tbl_off]);
1157 if (sg_count == 1) {
1158 /* Only one segment now, so no link tbl needed*/
1159 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1162 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1163 tbl_off * sizeof(struct talitos_ptr), is_sec1);
1164 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1169 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1170 unsigned int len, struct talitos_edesc *edesc,
1171 struct talitos_ptr *ptr, int sg_count,
1172 unsigned int offset, int tbl_off)
1174 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1179 * fill in and submit ipsec_esp descriptor
1181 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1183 void (*callback)(struct device *dev,
1184 struct talitos_desc *desc,
1185 void *context, int error))
1187 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1188 unsigned int authsize = crypto_aead_authsize(aead);
1189 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1190 struct device *dev = ctx->dev;
1191 struct talitos_desc *desc = &edesc->desc;
1192 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1193 unsigned int ivsize = crypto_aead_ivsize(aead);
1197 bool sync_needed = false;
1198 struct talitos_private *priv = dev_get_drvdata(dev);
1199 bool is_sec1 = has_ftr_sec1(priv);
1202 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1205 sg_count = edesc->src_nents ?: 1;
1206 if (is_sec1 && sg_count > 1)
1207 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1208 areq->assoclen + cryptlen);
1210 sg_count = dma_map_sg(dev, areq->src, sg_count,
1211 (areq->src == areq->dst) ?
1212 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1215 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1216 &desc->ptr[1], sg_count, 0, tbl_off);
1224 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
1225 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, is_sec1);
1226 to_talitos_ptr_len(&desc->ptr[2], ivsize, is_sec1);
1227 to_talitos_ptr_ext_set(&desc->ptr[2], 0, is_sec1);
1229 to_talitos_ptr(&desc->ptr[3], edesc->iv_dma, is_sec1);
1230 to_talitos_ptr_len(&desc->ptr[3], ivsize, is_sec1);
1231 to_talitos_ptr_ext_set(&desc->ptr[3], 0, is_sec1);
1235 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1236 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1237 (char *)&ctx->key + ctx->authkeylen,
1240 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->enckeylen,
1241 (char *)&ctx->key + ctx->authkeylen,
1246 * map and adjust cipher len to aead request cryptlen.
1247 * extent is bytes of HMAC postpended to ciphertext,
1248 * typically 12 for ipsec
1250 if ((desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1251 (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1254 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1255 sg_count, areq->assoclen, tbl_off, elen);
1263 if (areq->src != areq->dst) {
1264 sg_count = edesc->dst_nents ? : 1;
1265 if (!is_sec1 || sg_count == 1)
1266 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1269 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1270 sg_count, areq->assoclen, tbl_off);
1272 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1273 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1278 edesc->icv_ool = true;
1281 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
1282 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1283 int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1284 sizeof(struct talitos_ptr) + authsize;
1286 /* Add an entry to the link table for ICV data */
1287 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1288 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1290 to_talitos_ptr_len(tbl_ptr, authsize, is_sec1);
1292 /* icv data follows link tables */
1293 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1296 dma_addr_t addr = edesc->dma_link_tbl;
1299 addr += areq->assoclen + cryptlen;
1301 addr += sizeof(struct talitos_ptr) * tbl_off;
1303 to_talitos_ptr(&desc->ptr[6], addr, is_sec1);
1304 to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
1306 } else if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
1307 ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
1308 &desc->ptr[6], sg_count, areq->assoclen +
1313 edesc->icv_ool = true;
1316 edesc->icv_ool = false;
1319 edesc->icv_ool = false;
1323 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1324 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1328 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1332 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1333 if (ret != -EINPROGRESS) {
1334 ipsec_esp_unmap(dev, edesc, areq, encrypt);
1341 * allocate and map the extended descriptor
1343 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1344 struct scatterlist *src,
1345 struct scatterlist *dst,
1347 unsigned int assoclen,
1348 unsigned int cryptlen,
1349 unsigned int authsize,
1350 unsigned int ivsize,
1355 struct talitos_edesc *edesc;
1356 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1357 dma_addr_t iv_dma = 0;
1358 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1360 struct talitos_private *priv = dev_get_drvdata(dev);
1361 bool is_sec1 = has_ftr_sec1(priv);
1362 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1364 if (cryptlen + authsize > max_len) {
1365 dev_err(dev, "length exceeds h/w max limit\n");
1366 return ERR_PTR(-EINVAL);
1369 if (!dst || dst == src) {
1370 src_len = assoclen + cryptlen + authsize;
1371 src_nents = sg_nents_for_len(src, src_len);
1372 if (src_nents < 0) {
1373 dev_err(dev, "Invalid number of src SG.\n");
1374 return ERR_PTR(-EINVAL);
1376 src_nents = (src_nents == 1) ? 0 : src_nents;
1377 dst_nents = dst ? src_nents : 0;
1379 } else { /* dst && dst != src*/
1380 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1381 src_nents = sg_nents_for_len(src, src_len);
1382 if (src_nents < 0) {
1383 dev_err(dev, "Invalid number of src SG.\n");
1384 return ERR_PTR(-EINVAL);
1386 src_nents = (src_nents == 1) ? 0 : src_nents;
1387 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1388 dst_nents = sg_nents_for_len(dst, dst_len);
1389 if (dst_nents < 0) {
1390 dev_err(dev, "Invalid number of dst SG.\n");
1391 return ERR_PTR(-EINVAL);
1393 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1397 * allocate space for base edesc plus the link tables,
1398 * allowing for two separate entries for AD and generated ICV (+ 2),
1399 * and space for two sets of ICVs (stashed and generated)
1401 alloc_len = sizeof(struct talitos_edesc);
1402 if (src_nents || dst_nents) {
1404 dma_len = (src_nents ? src_len : 0) +
1405 (dst_nents ? dst_len : 0);
1407 dma_len = (src_nents + dst_nents + 2) *
1408 sizeof(struct talitos_ptr) + authsize * 2;
1409 alloc_len += dma_len;
1412 alloc_len += icv_stashing ? authsize : 0;
1414 alloc_len += ivsize;
1416 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1418 return ERR_PTR(-ENOMEM);
1420 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1421 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1424 edesc->src_nents = src_nents;
1425 edesc->dst_nents = dst_nents;
1426 edesc->iv_dma = iv_dma;
1427 edesc->dma_len = dma_len;
1429 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1436 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1437 int icv_stashing, bool encrypt)
1439 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1440 unsigned int authsize = crypto_aead_authsize(authenc);
1441 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1442 unsigned int ivsize = crypto_aead_ivsize(authenc);
1443 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1445 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1446 iv, areq->assoclen, cryptlen,
1447 authsize, ivsize, icv_stashing,
1448 areq->base.flags, encrypt);
1451 static int aead_encrypt(struct aead_request *req)
1453 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1454 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1455 struct talitos_edesc *edesc;
1457 /* allocate extended descriptor */
1458 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1460 return PTR_ERR(edesc);
1463 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1465 return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1468 static int aead_decrypt(struct aead_request *req)
1470 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1471 unsigned int authsize = crypto_aead_authsize(authenc);
1472 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1473 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1474 struct talitos_edesc *edesc;
1477 /* allocate extended descriptor */
1478 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1480 return PTR_ERR(edesc);
1482 if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1483 (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1484 ((!edesc->src_nents && !edesc->dst_nents) ||
1485 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1487 /* decrypt and check the ICV */
1488 edesc->desc.hdr = ctx->desc_hdr_template |
1489 DESC_HDR_DIR_INBOUND |
1490 DESC_HDR_MODE1_MDEU_CICV;
1492 /* reset integrity check result bits */
1493 edesc->desc.hdr_lo = 0;
1495 return ipsec_esp(edesc, req, false,
1496 ipsec_esp_decrypt_hwauth_done);
1499 /* Have to check the ICV with software */
1500 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1502 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1504 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1505 edesc->dst_nents + 2];
1507 icvdata = &edesc->link_tbl[0];
1509 sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1510 req->assoclen + req->cryptlen - authsize);
1512 return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1515 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1516 const u8 *key, unsigned int keylen)
1518 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1519 u32 tmp[DES_EXPKEY_WORDS];
1521 if (keylen > TALITOS_MAX_KEY_SIZE) {
1522 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1526 if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1527 CRYPTO_TFM_REQ_WEAK_KEY) &&
1528 !des_ekey(tmp, key)) {
1529 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1533 memcpy(&ctx->key, key, keylen);
1534 ctx->keylen = keylen;
1539 static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
1540 const u8 *key, unsigned int keylen)
1542 if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1543 keylen == AES_KEYSIZE_256)
1544 return ablkcipher_setkey(cipher, key, keylen);
1546 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1551 static void common_nonsnoop_unmap(struct device *dev,
1552 struct talitos_edesc *edesc,
1553 struct ablkcipher_request *areq)
1555 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1557 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1558 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1559 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1562 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1566 static void ablkcipher_done(struct device *dev,
1567 struct talitos_desc *desc, void *context,
1570 struct ablkcipher_request *areq = context;
1571 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1572 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1573 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1574 struct talitos_edesc *edesc;
1576 edesc = container_of(desc, struct talitos_edesc, desc);
1578 common_nonsnoop_unmap(dev, edesc, areq);
1579 memcpy(areq->info, ctx->iv, ivsize);
1583 areq->base.complete(&areq->base, err);
1586 static int common_nonsnoop(struct talitos_edesc *edesc,
1587 struct ablkcipher_request *areq,
1588 void (*callback) (struct device *dev,
1589 struct talitos_desc *desc,
1590 void *context, int error))
1592 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1593 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1594 struct device *dev = ctx->dev;
1595 struct talitos_desc *desc = &edesc->desc;
1596 unsigned int cryptlen = areq->nbytes;
1597 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1599 bool sync_needed = false;
1600 struct talitos_private *priv = dev_get_drvdata(dev);
1601 bool is_sec1 = has_ftr_sec1(priv);
1603 /* first DWORD empty */
1604 desc->ptr[0] = zero_entry;
1607 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1608 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1609 to_talitos_ptr_ext_set(&desc->ptr[1], 0, is_sec1);
1612 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1613 (char *)&ctx->key, DMA_TO_DEVICE);
1615 sg_count = edesc->src_nents ?: 1;
1616 if (is_sec1 && sg_count > 1)
1617 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1620 sg_count = dma_map_sg(dev, areq->src, sg_count,
1621 (areq->src == areq->dst) ?
1622 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1626 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1627 &desc->ptr[3], sg_count, 0, 0);
1632 if (areq->src != areq->dst) {
1633 sg_count = edesc->dst_nents ? : 1;
1634 if (!is_sec1 || sg_count == 1)
1635 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1638 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1639 sg_count, 0, (edesc->src_nents + 1));
1644 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1647 /* last DWORD empty */
1648 desc->ptr[6] = zero_entry;
1651 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1652 edesc->dma_len, DMA_BIDIRECTIONAL);
1654 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1655 if (ret != -EINPROGRESS) {
1656 common_nonsnoop_unmap(dev, edesc, areq);
1662 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1665 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1666 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1667 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1669 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1670 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1671 areq->base.flags, encrypt);
1674 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1676 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1677 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1678 struct talitos_edesc *edesc;
1679 unsigned int blocksize =
1680 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1685 if (areq->nbytes % blocksize)
1688 /* allocate extended descriptor */
1689 edesc = ablkcipher_edesc_alloc(areq, true);
1691 return PTR_ERR(edesc);
1694 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1696 return common_nonsnoop(edesc, areq, ablkcipher_done);
1699 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1701 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1702 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1703 struct talitos_edesc *edesc;
1704 unsigned int blocksize =
1705 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1710 if (areq->nbytes % blocksize)
1713 /* allocate extended descriptor */
1714 edesc = ablkcipher_edesc_alloc(areq, false);
1716 return PTR_ERR(edesc);
1718 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1720 return common_nonsnoop(edesc, areq, ablkcipher_done);
1723 static void common_nonsnoop_hash_unmap(struct device *dev,
1724 struct talitos_edesc *edesc,
1725 struct ahash_request *areq)
1727 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1728 struct talitos_private *priv = dev_get_drvdata(dev);
1729 bool is_sec1 = has_ftr_sec1(priv);
1731 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1733 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1735 /* When using hashctx-in, must unmap it. */
1736 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1737 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1740 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
1741 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1745 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1750 static void ahash_done(struct device *dev,
1751 struct talitos_desc *desc, void *context,
1754 struct ahash_request *areq = context;
1755 struct talitos_edesc *edesc =
1756 container_of(desc, struct talitos_edesc, desc);
1757 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1759 if (!req_ctx->last && req_ctx->to_hash_later) {
1760 /* Position any partial block for next update/final/finup */
1761 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1762 req_ctx->nbuf = req_ctx->to_hash_later;
1764 common_nonsnoop_hash_unmap(dev, edesc, areq);
1768 areq->base.complete(&areq->base, err);
1772 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1773 * ourself and submit a padded block
1775 void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1776 struct talitos_edesc *edesc,
1777 struct talitos_ptr *ptr)
1779 static u8 padded_hash[64] = {
1780 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1781 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1782 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1783 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1786 pr_err_once("Bug in SEC1, padding ourself\n");
1787 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1788 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1789 (char *)padded_hash, DMA_TO_DEVICE);
1792 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1793 struct ahash_request *areq, unsigned int length,
1794 void (*callback) (struct device *dev,
1795 struct talitos_desc *desc,
1796 void *context, int error))
1798 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1799 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1800 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1801 struct device *dev = ctx->dev;
1802 struct talitos_desc *desc = &edesc->desc;
1804 bool sync_needed = false;
1805 struct talitos_private *priv = dev_get_drvdata(dev);
1806 bool is_sec1 = has_ftr_sec1(priv);
1809 /* first DWORD empty */
1810 desc->ptr[0] = zero_entry;
1812 /* hash context in */
1813 if (!req_ctx->first || req_ctx->swinit) {
1814 map_single_talitos_ptr(dev, &desc->ptr[1],
1815 req_ctx->hw_context_size,
1816 (char *)req_ctx->hw_context,
1818 req_ctx->swinit = 0;
1820 desc->ptr[1] = zero_entry;
1822 /* Indicate next op is not the first. */
1827 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1828 (char *)&ctx->key, DMA_TO_DEVICE);
1830 desc->ptr[2] = zero_entry;
1832 sg_count = edesc->src_nents ?: 1;
1833 if (is_sec1 && sg_count > 1)
1834 sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1836 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1841 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1842 &desc->ptr[3], sg_count, 0, 0);
1846 /* fifth DWORD empty */
1847 desc->ptr[4] = zero_entry;
1849 /* hash/HMAC out -or- hash context out */
1851 map_single_talitos_ptr(dev, &desc->ptr[5],
1852 crypto_ahash_digestsize(tfm),
1853 areq->result, DMA_FROM_DEVICE);
1855 map_single_talitos_ptr(dev, &desc->ptr[5],
1856 req_ctx->hw_context_size,
1857 req_ctx->hw_context, DMA_FROM_DEVICE);
1859 /* last DWORD empty */
1860 desc->ptr[6] = zero_entry;
1862 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1863 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1866 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1867 edesc->dma_len, DMA_BIDIRECTIONAL);
1869 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1870 if (ret != -EINPROGRESS) {
1871 common_nonsnoop_hash_unmap(dev, edesc, areq);
1877 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1878 unsigned int nbytes)
1880 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1881 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1882 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1884 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1885 nbytes, 0, 0, 0, areq->base.flags, false);
1888 static int ahash_init(struct ahash_request *areq)
1890 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1891 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1893 /* Initialize the context */
1895 req_ctx->first = 1; /* first indicates h/w must init its context */
1896 req_ctx->swinit = 0; /* assume h/w init of context */
1897 req_ctx->hw_context_size =
1898 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1899 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1900 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1906 * on h/w without explicit sha224 support, we initialize h/w context
1907 * manually with sha224 constants, and tell it to run sha256.
1909 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1911 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1914 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1916 req_ctx->hw_context[0] = SHA224_H0;
1917 req_ctx->hw_context[1] = SHA224_H1;
1918 req_ctx->hw_context[2] = SHA224_H2;
1919 req_ctx->hw_context[3] = SHA224_H3;
1920 req_ctx->hw_context[4] = SHA224_H4;
1921 req_ctx->hw_context[5] = SHA224_H5;
1922 req_ctx->hw_context[6] = SHA224_H6;
1923 req_ctx->hw_context[7] = SHA224_H7;
1925 /* init 64-bit count */
1926 req_ctx->hw_context[8] = 0;
1927 req_ctx->hw_context[9] = 0;
1932 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1934 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1935 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1936 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1937 struct talitos_edesc *edesc;
1938 unsigned int blocksize =
1939 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1940 unsigned int nbytes_to_hash;
1941 unsigned int to_hash_later;
1945 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1946 /* Buffer up to one whole block */
1947 nents = sg_nents_for_len(areq->src, nbytes);
1949 dev_err(ctx->dev, "Invalid number of src SG.\n");
1952 sg_copy_to_buffer(areq->src, nents,
1953 req_ctx->buf + req_ctx->nbuf, nbytes);
1954 req_ctx->nbuf += nbytes;
1958 /* At least (blocksize + 1) bytes are available to hash */
1959 nbytes_to_hash = nbytes + req_ctx->nbuf;
1960 to_hash_later = nbytes_to_hash & (blocksize - 1);
1964 else if (to_hash_later)
1965 /* There is a partial block. Hash the full block(s) now */
1966 nbytes_to_hash -= to_hash_later;
1968 /* Keep one block buffered */
1969 nbytes_to_hash -= blocksize;
1970 to_hash_later = blocksize;
1973 /* Chain in any previously buffered data */
1974 if (req_ctx->nbuf) {
1975 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1976 sg_init_table(req_ctx->bufsl, nsg);
1977 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1979 sg_chain(req_ctx->bufsl, 2, areq->src);
1980 req_ctx->psrc = req_ctx->bufsl;
1982 req_ctx->psrc = areq->src;
1984 if (to_hash_later) {
1985 nents = sg_nents_for_len(areq->src, nbytes);
1987 dev_err(ctx->dev, "Invalid number of src SG.\n");
1990 sg_pcopy_to_buffer(areq->src, nents,
1993 nbytes - to_hash_later);
1995 req_ctx->to_hash_later = to_hash_later;
1997 /* Allocate extended descriptor */
1998 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2000 return PTR_ERR(edesc);
2002 edesc->desc.hdr = ctx->desc_hdr_template;
2004 /* On last one, request SEC to pad; otherwise continue */
2006 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2008 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2010 /* request SEC to INIT hash. */
2011 if (req_ctx->first && !req_ctx->swinit)
2012 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2014 /* When the tfm context has a keylen, it's an HMAC.
2015 * A first or last (ie. not middle) descriptor must request HMAC.
2017 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2018 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2020 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
2024 static int ahash_update(struct ahash_request *areq)
2026 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2030 return ahash_process_req(areq, areq->nbytes);
2033 static int ahash_final(struct ahash_request *areq)
2035 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2039 return ahash_process_req(areq, 0);
2042 static int ahash_finup(struct ahash_request *areq)
2044 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2048 return ahash_process_req(areq, areq->nbytes);
2051 static int ahash_digest(struct ahash_request *areq)
2053 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2054 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2059 return ahash_process_req(areq, areq->nbytes);
2062 static int ahash_export(struct ahash_request *areq, void *out)
2064 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2065 struct talitos_export_state *export = out;
2067 memcpy(export->hw_context, req_ctx->hw_context,
2068 req_ctx->hw_context_size);
2069 memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
2070 export->swinit = req_ctx->swinit;
2071 export->first = req_ctx->first;
2072 export->last = req_ctx->last;
2073 export->to_hash_later = req_ctx->to_hash_later;
2074 export->nbuf = req_ctx->nbuf;
2079 static int ahash_import(struct ahash_request *areq, const void *in)
2081 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2082 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2083 const struct talitos_export_state *export = in;
2085 memset(req_ctx, 0, sizeof(*req_ctx));
2086 req_ctx->hw_context_size =
2087 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2088 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2089 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2090 memcpy(req_ctx->hw_context, export->hw_context,
2091 req_ctx->hw_context_size);
2092 memcpy(req_ctx->buf, export->buf, export->nbuf);
2093 req_ctx->swinit = export->swinit;
2094 req_ctx->first = export->first;
2095 req_ctx->last = export->last;
2096 req_ctx->to_hash_later = export->to_hash_later;
2097 req_ctx->nbuf = export->nbuf;
2102 struct keyhash_result {
2103 struct completion completion;
2107 static void keyhash_complete(struct crypto_async_request *req, int err)
2109 struct keyhash_result *res = req->data;
2111 if (err == -EINPROGRESS)
2115 complete(&res->completion);
2118 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2121 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2123 struct scatterlist sg[1];
2124 struct ahash_request *req;
2125 struct keyhash_result hresult;
2128 init_completion(&hresult.completion);
2130 req = ahash_request_alloc(tfm, GFP_KERNEL);
2134 /* Keep tfm keylen == 0 during hash of the long key */
2136 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2137 keyhash_complete, &hresult);
2139 sg_init_one(&sg[0], key, keylen);
2141 ahash_request_set_crypt(req, sg, hash, keylen);
2142 ret = crypto_ahash_digest(req);
2148 ret = wait_for_completion_interruptible(
2149 &hresult.completion);
2156 ahash_request_free(req);
2161 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2162 unsigned int keylen)
2164 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2165 unsigned int blocksize =
2166 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2167 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2168 unsigned int keysize = keylen;
2169 u8 hash[SHA512_DIGEST_SIZE];
2172 if (keylen <= blocksize)
2173 memcpy(ctx->key, key, keysize);
2175 /* Must get the hash of the long key */
2176 ret = keyhash(tfm, key, keylen, hash);
2179 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2183 keysize = digestsize;
2184 memcpy(ctx->key, hash, digestsize);
2187 ctx->keylen = keysize;
2193 struct talitos_alg_template {
2197 struct crypto_alg crypto;
2198 struct ahash_alg hash;
2199 struct aead_alg aead;
2201 __be32 desc_hdr_template;
2204 static struct talitos_alg_template driver_algs[] = {
2205 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2206 { .type = CRYPTO_ALG_TYPE_AEAD,
2209 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2210 .cra_driver_name = "authenc-hmac-sha1-"
2212 .cra_blocksize = AES_BLOCK_SIZE,
2213 .cra_flags = CRYPTO_ALG_ASYNC,
2215 .ivsize = AES_BLOCK_SIZE,
2216 .maxauthsize = SHA1_DIGEST_SIZE,
2218 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2219 DESC_HDR_SEL0_AESU |
2220 DESC_HDR_MODE0_AESU_CBC |
2221 DESC_HDR_SEL1_MDEUA |
2222 DESC_HDR_MODE1_MDEU_INIT |
2223 DESC_HDR_MODE1_MDEU_PAD |
2224 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2226 { .type = CRYPTO_ALG_TYPE_AEAD,
2227 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2230 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2231 .cra_driver_name = "authenc-hmac-sha1-"
2232 "cbc-aes-talitos-hsna",
2233 .cra_blocksize = AES_BLOCK_SIZE,
2234 .cra_flags = CRYPTO_ALG_ASYNC,
2236 .ivsize = AES_BLOCK_SIZE,
2237 .maxauthsize = SHA1_DIGEST_SIZE,
2239 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2240 DESC_HDR_SEL0_AESU |
2241 DESC_HDR_MODE0_AESU_CBC |
2242 DESC_HDR_SEL1_MDEUA |
2243 DESC_HDR_MODE1_MDEU_INIT |
2244 DESC_HDR_MODE1_MDEU_PAD |
2245 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2247 { .type = CRYPTO_ALG_TYPE_AEAD,
2250 .cra_name = "authenc(hmac(sha1),"
2252 .cra_driver_name = "authenc-hmac-sha1-"
2254 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2255 .cra_flags = CRYPTO_ALG_ASYNC,
2257 .ivsize = DES3_EDE_BLOCK_SIZE,
2258 .maxauthsize = SHA1_DIGEST_SIZE,
2260 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2262 DESC_HDR_MODE0_DEU_CBC |
2263 DESC_HDR_MODE0_DEU_3DES |
2264 DESC_HDR_SEL1_MDEUA |
2265 DESC_HDR_MODE1_MDEU_INIT |
2266 DESC_HDR_MODE1_MDEU_PAD |
2267 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2269 { .type = CRYPTO_ALG_TYPE_AEAD,
2270 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2273 .cra_name = "authenc(hmac(sha1),"
2275 .cra_driver_name = "authenc-hmac-sha1-"
2276 "cbc-3des-talitos-hsna",
2277 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2278 .cra_flags = CRYPTO_ALG_ASYNC,
2280 .ivsize = DES3_EDE_BLOCK_SIZE,
2281 .maxauthsize = SHA1_DIGEST_SIZE,
2283 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2285 DESC_HDR_MODE0_DEU_CBC |
2286 DESC_HDR_MODE0_DEU_3DES |
2287 DESC_HDR_SEL1_MDEUA |
2288 DESC_HDR_MODE1_MDEU_INIT |
2289 DESC_HDR_MODE1_MDEU_PAD |
2290 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2292 { .type = CRYPTO_ALG_TYPE_AEAD,
2295 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2296 .cra_driver_name = "authenc-hmac-sha224-"
2298 .cra_blocksize = AES_BLOCK_SIZE,
2299 .cra_flags = CRYPTO_ALG_ASYNC,
2301 .ivsize = AES_BLOCK_SIZE,
2302 .maxauthsize = SHA224_DIGEST_SIZE,
2304 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2305 DESC_HDR_SEL0_AESU |
2306 DESC_HDR_MODE0_AESU_CBC |
2307 DESC_HDR_SEL1_MDEUA |
2308 DESC_HDR_MODE1_MDEU_INIT |
2309 DESC_HDR_MODE1_MDEU_PAD |
2310 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2312 { .type = CRYPTO_ALG_TYPE_AEAD,
2313 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2316 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2317 .cra_driver_name = "authenc-hmac-sha224-"
2318 "cbc-aes-talitos-hsna",
2319 .cra_blocksize = AES_BLOCK_SIZE,
2320 .cra_flags = CRYPTO_ALG_ASYNC,
2322 .ivsize = AES_BLOCK_SIZE,
2323 .maxauthsize = SHA224_DIGEST_SIZE,
2325 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2326 DESC_HDR_SEL0_AESU |
2327 DESC_HDR_MODE0_AESU_CBC |
2328 DESC_HDR_SEL1_MDEUA |
2329 DESC_HDR_MODE1_MDEU_INIT |
2330 DESC_HDR_MODE1_MDEU_PAD |
2331 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2333 { .type = CRYPTO_ALG_TYPE_AEAD,
2336 .cra_name = "authenc(hmac(sha224),"
2338 .cra_driver_name = "authenc-hmac-sha224-"
2340 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2341 .cra_flags = CRYPTO_ALG_ASYNC,
2343 .ivsize = DES3_EDE_BLOCK_SIZE,
2344 .maxauthsize = SHA224_DIGEST_SIZE,
2346 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2348 DESC_HDR_MODE0_DEU_CBC |
2349 DESC_HDR_MODE0_DEU_3DES |
2350 DESC_HDR_SEL1_MDEUA |
2351 DESC_HDR_MODE1_MDEU_INIT |
2352 DESC_HDR_MODE1_MDEU_PAD |
2353 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2355 { .type = CRYPTO_ALG_TYPE_AEAD,
2356 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2359 .cra_name = "authenc(hmac(sha224),"
2361 .cra_driver_name = "authenc-hmac-sha224-"
2362 "cbc-3des-talitos-hsna",
2363 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2364 .cra_flags = CRYPTO_ALG_ASYNC,
2366 .ivsize = DES3_EDE_BLOCK_SIZE,
2367 .maxauthsize = SHA224_DIGEST_SIZE,
2369 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2371 DESC_HDR_MODE0_DEU_CBC |
2372 DESC_HDR_MODE0_DEU_3DES |
2373 DESC_HDR_SEL1_MDEUA |
2374 DESC_HDR_MODE1_MDEU_INIT |
2375 DESC_HDR_MODE1_MDEU_PAD |
2376 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2378 { .type = CRYPTO_ALG_TYPE_AEAD,
2381 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2382 .cra_driver_name = "authenc-hmac-sha256-"
2384 .cra_blocksize = AES_BLOCK_SIZE,
2385 .cra_flags = CRYPTO_ALG_ASYNC,
2387 .ivsize = AES_BLOCK_SIZE,
2388 .maxauthsize = SHA256_DIGEST_SIZE,
2390 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2391 DESC_HDR_SEL0_AESU |
2392 DESC_HDR_MODE0_AESU_CBC |
2393 DESC_HDR_SEL1_MDEUA |
2394 DESC_HDR_MODE1_MDEU_INIT |
2395 DESC_HDR_MODE1_MDEU_PAD |
2396 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2398 { .type = CRYPTO_ALG_TYPE_AEAD,
2399 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2402 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2403 .cra_driver_name = "authenc-hmac-sha256-"
2404 "cbc-aes-talitos-hsna",
2405 .cra_blocksize = AES_BLOCK_SIZE,
2406 .cra_flags = CRYPTO_ALG_ASYNC,
2408 .ivsize = AES_BLOCK_SIZE,
2409 .maxauthsize = SHA256_DIGEST_SIZE,
2411 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2412 DESC_HDR_SEL0_AESU |
2413 DESC_HDR_MODE0_AESU_CBC |
2414 DESC_HDR_SEL1_MDEUA |
2415 DESC_HDR_MODE1_MDEU_INIT |
2416 DESC_HDR_MODE1_MDEU_PAD |
2417 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2419 { .type = CRYPTO_ALG_TYPE_AEAD,
2422 .cra_name = "authenc(hmac(sha256),"
2424 .cra_driver_name = "authenc-hmac-sha256-"
2426 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2427 .cra_flags = CRYPTO_ALG_ASYNC,
2429 .ivsize = DES3_EDE_BLOCK_SIZE,
2430 .maxauthsize = SHA256_DIGEST_SIZE,
2432 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2434 DESC_HDR_MODE0_DEU_CBC |
2435 DESC_HDR_MODE0_DEU_3DES |
2436 DESC_HDR_SEL1_MDEUA |
2437 DESC_HDR_MODE1_MDEU_INIT |
2438 DESC_HDR_MODE1_MDEU_PAD |
2439 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2441 { .type = CRYPTO_ALG_TYPE_AEAD,
2442 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2445 .cra_name = "authenc(hmac(sha256),"
2447 .cra_driver_name = "authenc-hmac-sha256-"
2448 "cbc-3des-talitos-hsna",
2449 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2450 .cra_flags = CRYPTO_ALG_ASYNC,
2452 .ivsize = DES3_EDE_BLOCK_SIZE,
2453 .maxauthsize = SHA256_DIGEST_SIZE,
2455 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2457 DESC_HDR_MODE0_DEU_CBC |
2458 DESC_HDR_MODE0_DEU_3DES |
2459 DESC_HDR_SEL1_MDEUA |
2460 DESC_HDR_MODE1_MDEU_INIT |
2461 DESC_HDR_MODE1_MDEU_PAD |
2462 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2464 { .type = CRYPTO_ALG_TYPE_AEAD,
2467 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2468 .cra_driver_name = "authenc-hmac-sha384-"
2470 .cra_blocksize = AES_BLOCK_SIZE,
2471 .cra_flags = CRYPTO_ALG_ASYNC,
2473 .ivsize = AES_BLOCK_SIZE,
2474 .maxauthsize = SHA384_DIGEST_SIZE,
2476 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2477 DESC_HDR_SEL0_AESU |
2478 DESC_HDR_MODE0_AESU_CBC |
2479 DESC_HDR_SEL1_MDEUB |
2480 DESC_HDR_MODE1_MDEU_INIT |
2481 DESC_HDR_MODE1_MDEU_PAD |
2482 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2484 { .type = CRYPTO_ALG_TYPE_AEAD,
2487 .cra_name = "authenc(hmac(sha384),"
2489 .cra_driver_name = "authenc-hmac-sha384-"
2491 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2492 .cra_flags = CRYPTO_ALG_ASYNC,
2494 .ivsize = DES3_EDE_BLOCK_SIZE,
2495 .maxauthsize = SHA384_DIGEST_SIZE,
2497 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2499 DESC_HDR_MODE0_DEU_CBC |
2500 DESC_HDR_MODE0_DEU_3DES |
2501 DESC_HDR_SEL1_MDEUB |
2502 DESC_HDR_MODE1_MDEU_INIT |
2503 DESC_HDR_MODE1_MDEU_PAD |
2504 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2506 { .type = CRYPTO_ALG_TYPE_AEAD,
2509 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2510 .cra_driver_name = "authenc-hmac-sha512-"
2512 .cra_blocksize = AES_BLOCK_SIZE,
2513 .cra_flags = CRYPTO_ALG_ASYNC,
2515 .ivsize = AES_BLOCK_SIZE,
2516 .maxauthsize = SHA512_DIGEST_SIZE,
2518 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2519 DESC_HDR_SEL0_AESU |
2520 DESC_HDR_MODE0_AESU_CBC |
2521 DESC_HDR_SEL1_MDEUB |
2522 DESC_HDR_MODE1_MDEU_INIT |
2523 DESC_HDR_MODE1_MDEU_PAD |
2524 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2526 { .type = CRYPTO_ALG_TYPE_AEAD,
2529 .cra_name = "authenc(hmac(sha512),"
2531 .cra_driver_name = "authenc-hmac-sha512-"
2533 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2534 .cra_flags = CRYPTO_ALG_ASYNC,
2536 .ivsize = DES3_EDE_BLOCK_SIZE,
2537 .maxauthsize = SHA512_DIGEST_SIZE,
2539 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2541 DESC_HDR_MODE0_DEU_CBC |
2542 DESC_HDR_MODE0_DEU_3DES |
2543 DESC_HDR_SEL1_MDEUB |
2544 DESC_HDR_MODE1_MDEU_INIT |
2545 DESC_HDR_MODE1_MDEU_PAD |
2546 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2548 { .type = CRYPTO_ALG_TYPE_AEAD,
2551 .cra_name = "authenc(hmac(md5),cbc(aes))",
2552 .cra_driver_name = "authenc-hmac-md5-"
2554 .cra_blocksize = AES_BLOCK_SIZE,
2555 .cra_flags = CRYPTO_ALG_ASYNC,
2557 .ivsize = AES_BLOCK_SIZE,
2558 .maxauthsize = MD5_DIGEST_SIZE,
2560 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2561 DESC_HDR_SEL0_AESU |
2562 DESC_HDR_MODE0_AESU_CBC |
2563 DESC_HDR_SEL1_MDEUA |
2564 DESC_HDR_MODE1_MDEU_INIT |
2565 DESC_HDR_MODE1_MDEU_PAD |
2566 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2568 { .type = CRYPTO_ALG_TYPE_AEAD,
2569 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2572 .cra_name = "authenc(hmac(md5),cbc(aes))",
2573 .cra_driver_name = "authenc-hmac-md5-"
2574 "cbc-aes-talitos-hsna",
2575 .cra_blocksize = AES_BLOCK_SIZE,
2576 .cra_flags = CRYPTO_ALG_ASYNC,
2578 .ivsize = AES_BLOCK_SIZE,
2579 .maxauthsize = MD5_DIGEST_SIZE,
2581 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2582 DESC_HDR_SEL0_AESU |
2583 DESC_HDR_MODE0_AESU_CBC |
2584 DESC_HDR_SEL1_MDEUA |
2585 DESC_HDR_MODE1_MDEU_INIT |
2586 DESC_HDR_MODE1_MDEU_PAD |
2587 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2589 { .type = CRYPTO_ALG_TYPE_AEAD,
2592 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2593 .cra_driver_name = "authenc-hmac-md5-"
2595 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2596 .cra_flags = CRYPTO_ALG_ASYNC,
2598 .ivsize = DES3_EDE_BLOCK_SIZE,
2599 .maxauthsize = MD5_DIGEST_SIZE,
2601 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2603 DESC_HDR_MODE0_DEU_CBC |
2604 DESC_HDR_MODE0_DEU_3DES |
2605 DESC_HDR_SEL1_MDEUA |
2606 DESC_HDR_MODE1_MDEU_INIT |
2607 DESC_HDR_MODE1_MDEU_PAD |
2608 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2610 { .type = CRYPTO_ALG_TYPE_AEAD,
2611 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2614 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2615 .cra_driver_name = "authenc-hmac-md5-"
2616 "cbc-3des-talitos-hsna",
2617 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2618 .cra_flags = CRYPTO_ALG_ASYNC,
2620 .ivsize = DES3_EDE_BLOCK_SIZE,
2621 .maxauthsize = MD5_DIGEST_SIZE,
2623 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2625 DESC_HDR_MODE0_DEU_CBC |
2626 DESC_HDR_MODE0_DEU_3DES |
2627 DESC_HDR_SEL1_MDEUA |
2628 DESC_HDR_MODE1_MDEU_INIT |
2629 DESC_HDR_MODE1_MDEU_PAD |
2630 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2632 /* ABLKCIPHER algorithms. */
2633 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2635 .cra_name = "ecb(aes)",
2636 .cra_driver_name = "ecb-aes-talitos",
2637 .cra_blocksize = AES_BLOCK_SIZE,
2638 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2641 .min_keysize = AES_MIN_KEY_SIZE,
2642 .max_keysize = AES_MAX_KEY_SIZE,
2645 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2648 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2650 .cra_name = "cbc(aes)",
2651 .cra_driver_name = "cbc-aes-talitos",
2652 .cra_blocksize = AES_BLOCK_SIZE,
2653 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2656 .min_keysize = AES_MIN_KEY_SIZE,
2657 .max_keysize = AES_MAX_KEY_SIZE,
2658 .ivsize = AES_BLOCK_SIZE,
2659 .setkey = ablkcipher_aes_setkey,
2662 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2663 DESC_HDR_SEL0_AESU |
2664 DESC_HDR_MODE0_AESU_CBC,
2666 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2668 .cra_name = "ctr(aes)",
2669 .cra_driver_name = "ctr-aes-talitos",
2671 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2674 .min_keysize = AES_MIN_KEY_SIZE,
2675 .max_keysize = AES_MAX_KEY_SIZE,
2676 .ivsize = AES_BLOCK_SIZE,
2677 .setkey = ablkcipher_aes_setkey,
2680 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2681 DESC_HDR_SEL0_AESU |
2682 DESC_HDR_MODE0_AESU_CTR,
2684 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2686 .cra_name = "ecb(des)",
2687 .cra_driver_name = "ecb-des-talitos",
2688 .cra_blocksize = DES_BLOCK_SIZE,
2689 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2692 .min_keysize = DES_KEY_SIZE,
2693 .max_keysize = DES_KEY_SIZE,
2694 .ivsize = DES_BLOCK_SIZE,
2697 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2700 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2702 .cra_name = "cbc(des)",
2703 .cra_driver_name = "cbc-des-talitos",
2704 .cra_blocksize = DES_BLOCK_SIZE,
2705 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2708 .min_keysize = DES_KEY_SIZE,
2709 .max_keysize = DES_KEY_SIZE,
2710 .ivsize = DES_BLOCK_SIZE,
2713 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2715 DESC_HDR_MODE0_DEU_CBC,
2717 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2719 .cra_name = "ecb(des3_ede)",
2720 .cra_driver_name = "ecb-3des-talitos",
2721 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2722 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2725 .min_keysize = DES3_EDE_KEY_SIZE,
2726 .max_keysize = DES3_EDE_KEY_SIZE,
2727 .ivsize = DES3_EDE_BLOCK_SIZE,
2730 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2732 DESC_HDR_MODE0_DEU_3DES,
2734 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2736 .cra_name = "cbc(des3_ede)",
2737 .cra_driver_name = "cbc-3des-talitos",
2738 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2739 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2742 .min_keysize = DES3_EDE_KEY_SIZE,
2743 .max_keysize = DES3_EDE_KEY_SIZE,
2744 .ivsize = DES3_EDE_BLOCK_SIZE,
2747 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2749 DESC_HDR_MODE0_DEU_CBC |
2750 DESC_HDR_MODE0_DEU_3DES,
2752 /* AHASH algorithms. */
2753 { .type = CRYPTO_ALG_TYPE_AHASH,
2755 .halg.digestsize = MD5_DIGEST_SIZE,
2756 .halg.statesize = sizeof(struct talitos_export_state),
2759 .cra_driver_name = "md5-talitos",
2760 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2761 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2765 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2766 DESC_HDR_SEL0_MDEUA |
2767 DESC_HDR_MODE0_MDEU_MD5,
2769 { .type = CRYPTO_ALG_TYPE_AHASH,
2771 .halg.digestsize = SHA1_DIGEST_SIZE,
2772 .halg.statesize = sizeof(struct talitos_export_state),
2775 .cra_driver_name = "sha1-talitos",
2776 .cra_blocksize = SHA1_BLOCK_SIZE,
2777 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2781 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2782 DESC_HDR_SEL0_MDEUA |
2783 DESC_HDR_MODE0_MDEU_SHA1,
2785 { .type = CRYPTO_ALG_TYPE_AHASH,
2787 .halg.digestsize = SHA224_DIGEST_SIZE,
2788 .halg.statesize = sizeof(struct talitos_export_state),
2790 .cra_name = "sha224",
2791 .cra_driver_name = "sha224-talitos",
2792 .cra_blocksize = SHA224_BLOCK_SIZE,
2793 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2797 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2798 DESC_HDR_SEL0_MDEUA |
2799 DESC_HDR_MODE0_MDEU_SHA224,
2801 { .type = CRYPTO_ALG_TYPE_AHASH,
2803 .halg.digestsize = SHA256_DIGEST_SIZE,
2804 .halg.statesize = sizeof(struct talitos_export_state),
2806 .cra_name = "sha256",
2807 .cra_driver_name = "sha256-talitos",
2808 .cra_blocksize = SHA256_BLOCK_SIZE,
2809 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2813 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2814 DESC_HDR_SEL0_MDEUA |
2815 DESC_HDR_MODE0_MDEU_SHA256,
2817 { .type = CRYPTO_ALG_TYPE_AHASH,
2819 .halg.digestsize = SHA384_DIGEST_SIZE,
2820 .halg.statesize = sizeof(struct talitos_export_state),
2822 .cra_name = "sha384",
2823 .cra_driver_name = "sha384-talitos",
2824 .cra_blocksize = SHA384_BLOCK_SIZE,
2825 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2829 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2830 DESC_HDR_SEL0_MDEUB |
2831 DESC_HDR_MODE0_MDEUB_SHA384,
2833 { .type = CRYPTO_ALG_TYPE_AHASH,
2835 .halg.digestsize = SHA512_DIGEST_SIZE,
2836 .halg.statesize = sizeof(struct talitos_export_state),
2838 .cra_name = "sha512",
2839 .cra_driver_name = "sha512-talitos",
2840 .cra_blocksize = SHA512_BLOCK_SIZE,
2841 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2845 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2846 DESC_HDR_SEL0_MDEUB |
2847 DESC_HDR_MODE0_MDEUB_SHA512,
2849 { .type = CRYPTO_ALG_TYPE_AHASH,
2851 .halg.digestsize = MD5_DIGEST_SIZE,
2852 .halg.statesize = sizeof(struct talitos_export_state),
2854 .cra_name = "hmac(md5)",
2855 .cra_driver_name = "hmac-md5-talitos",
2856 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2857 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2861 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2862 DESC_HDR_SEL0_MDEUA |
2863 DESC_HDR_MODE0_MDEU_MD5,
2865 { .type = CRYPTO_ALG_TYPE_AHASH,
2867 .halg.digestsize = SHA1_DIGEST_SIZE,
2868 .halg.statesize = sizeof(struct talitos_export_state),
2870 .cra_name = "hmac(sha1)",
2871 .cra_driver_name = "hmac-sha1-talitos",
2872 .cra_blocksize = SHA1_BLOCK_SIZE,
2873 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2877 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2878 DESC_HDR_SEL0_MDEUA |
2879 DESC_HDR_MODE0_MDEU_SHA1,
2881 { .type = CRYPTO_ALG_TYPE_AHASH,
2883 .halg.digestsize = SHA224_DIGEST_SIZE,
2884 .halg.statesize = sizeof(struct talitos_export_state),
2886 .cra_name = "hmac(sha224)",
2887 .cra_driver_name = "hmac-sha224-talitos",
2888 .cra_blocksize = SHA224_BLOCK_SIZE,
2889 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2893 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2894 DESC_HDR_SEL0_MDEUA |
2895 DESC_HDR_MODE0_MDEU_SHA224,
2897 { .type = CRYPTO_ALG_TYPE_AHASH,
2899 .halg.digestsize = SHA256_DIGEST_SIZE,
2900 .halg.statesize = sizeof(struct talitos_export_state),
2902 .cra_name = "hmac(sha256)",
2903 .cra_driver_name = "hmac-sha256-talitos",
2904 .cra_blocksize = SHA256_BLOCK_SIZE,
2905 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2909 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2910 DESC_HDR_SEL0_MDEUA |
2911 DESC_HDR_MODE0_MDEU_SHA256,
2913 { .type = CRYPTO_ALG_TYPE_AHASH,
2915 .halg.digestsize = SHA384_DIGEST_SIZE,
2916 .halg.statesize = sizeof(struct talitos_export_state),
2918 .cra_name = "hmac(sha384)",
2919 .cra_driver_name = "hmac-sha384-talitos",
2920 .cra_blocksize = SHA384_BLOCK_SIZE,
2921 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2925 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2926 DESC_HDR_SEL0_MDEUB |
2927 DESC_HDR_MODE0_MDEUB_SHA384,
2929 { .type = CRYPTO_ALG_TYPE_AHASH,
2931 .halg.digestsize = SHA512_DIGEST_SIZE,
2932 .halg.statesize = sizeof(struct talitos_export_state),
2934 .cra_name = "hmac(sha512)",
2935 .cra_driver_name = "hmac-sha512-talitos",
2936 .cra_blocksize = SHA512_BLOCK_SIZE,
2937 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2941 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2942 DESC_HDR_SEL0_MDEUB |
2943 DESC_HDR_MODE0_MDEUB_SHA512,
2947 struct talitos_crypto_alg {
2948 struct list_head entry;
2950 struct talitos_alg_template algt;
2953 static int talitos_init_common(struct talitos_ctx *ctx,
2954 struct talitos_crypto_alg *talitos_alg)
2956 struct talitos_private *priv;
2958 /* update context with ptr to dev */
2959 ctx->dev = talitos_alg->dev;
2961 /* assign SEC channel to tfm in round-robin fashion */
2962 priv = dev_get_drvdata(ctx->dev);
2963 ctx->ch = atomic_inc_return(&priv->last_chan) &
2964 (priv->num_channels - 1);
2966 /* copy descriptor header template value */
2967 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2969 /* select done notification */
2970 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2975 static int talitos_cra_init(struct crypto_tfm *tfm)
2977 struct crypto_alg *alg = tfm->__crt_alg;
2978 struct talitos_crypto_alg *talitos_alg;
2979 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2981 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2982 talitos_alg = container_of(__crypto_ahash_alg(alg),
2983 struct talitos_crypto_alg,
2986 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2989 return talitos_init_common(ctx, talitos_alg);
2992 static int talitos_cra_init_aead(struct crypto_aead *tfm)
2994 struct aead_alg *alg = crypto_aead_alg(tfm);
2995 struct talitos_crypto_alg *talitos_alg;
2996 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
2998 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3001 return talitos_init_common(ctx, talitos_alg);
3004 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3006 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3008 talitos_cra_init(tfm);
3011 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3012 sizeof(struct talitos_ahash_req_ctx));
3018 * given the alg's descriptor header template, determine whether descriptor
3019 * type and primary/secondary execution units required match the hw
3020 * capabilities description provided in the device tree node.
3022 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3024 struct talitos_private *priv = dev_get_drvdata(dev);
3027 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3028 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3030 if (SECONDARY_EU(desc_hdr_template))
3031 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3032 & priv->exec_units);
3037 static int talitos_remove(struct platform_device *ofdev)
3039 struct device *dev = &ofdev->dev;
3040 struct talitos_private *priv = dev_get_drvdata(dev);
3041 struct talitos_crypto_alg *t_alg, *n;
3044 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3045 switch (t_alg->algt.type) {
3046 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3048 case CRYPTO_ALG_TYPE_AEAD:
3049 crypto_unregister_aead(&t_alg->algt.alg.aead);
3051 case CRYPTO_ALG_TYPE_AHASH:
3052 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3055 list_del(&t_alg->entry);
3059 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3060 talitos_unregister_rng(dev);
3062 for (i = 0; priv->chan && i < priv->num_channels; i++)
3063 kfree(priv->chan[i].fifo);
3067 for (i = 0; i < 2; i++)
3069 free_irq(priv->irq[i], dev);
3070 irq_dispose_mapping(priv->irq[i]);
3073 tasklet_kill(&priv->done_task[0]);
3075 tasklet_kill(&priv->done_task[1]);
3084 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3085 struct talitos_alg_template
3088 struct talitos_private *priv = dev_get_drvdata(dev);
3089 struct talitos_crypto_alg *t_alg;
3090 struct crypto_alg *alg;
3092 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
3094 return ERR_PTR(-ENOMEM);
3096 t_alg->algt = *template;
3098 switch (t_alg->algt.type) {
3099 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3100 alg = &t_alg->algt.alg.crypto;
3101 alg->cra_init = talitos_cra_init;
3102 alg->cra_type = &crypto_ablkcipher_type;
3103 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
3104 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3105 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3106 alg->cra_ablkcipher.geniv = "eseqiv";
3108 case CRYPTO_ALG_TYPE_AEAD:
3109 alg = &t_alg->algt.alg.aead.base;
3110 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3111 t_alg->algt.alg.aead.setkey = aead_setkey;
3112 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3113 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3114 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3115 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3117 return ERR_PTR(-ENOTSUPP);
3120 case CRYPTO_ALG_TYPE_AHASH:
3121 alg = &t_alg->algt.alg.hash.halg.base;
3122 alg->cra_init = talitos_cra_init_ahash;
3123 alg->cra_type = &crypto_ahash_type;
3124 t_alg->algt.alg.hash.init = ahash_init;
3125 t_alg->algt.alg.hash.update = ahash_update;
3126 t_alg->algt.alg.hash.final = ahash_final;
3127 t_alg->algt.alg.hash.finup = ahash_finup;
3128 t_alg->algt.alg.hash.digest = ahash_digest;
3129 if (!strncmp(alg->cra_name, "hmac", 4))
3130 t_alg->algt.alg.hash.setkey = ahash_setkey;
3131 t_alg->algt.alg.hash.import = ahash_import;
3132 t_alg->algt.alg.hash.export = ahash_export;
3134 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3135 !strncmp(alg->cra_name, "hmac", 4)) {
3137 return ERR_PTR(-ENOTSUPP);
3139 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3140 (!strcmp(alg->cra_name, "sha224") ||
3141 !strcmp(alg->cra_name, "hmac(sha224)"))) {
3142 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3143 t_alg->algt.desc_hdr_template =
3144 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3145 DESC_HDR_SEL0_MDEUA |
3146 DESC_HDR_MODE0_MDEU_SHA256;
3150 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3152 return ERR_PTR(-EINVAL);
3155 alg->cra_module = THIS_MODULE;
3156 if (t_alg->algt.priority)
3157 alg->cra_priority = t_alg->algt.priority;
3159 alg->cra_priority = TALITOS_CRA_PRIORITY;
3160 if (has_ftr_sec1(priv))
3161 alg->cra_alignmask = 3;
3163 alg->cra_alignmask = 0;
3164 alg->cra_ctxsize = sizeof(struct talitos_ctx);
3165 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3172 static int talitos_probe_irq(struct platform_device *ofdev)
3174 struct device *dev = &ofdev->dev;
3175 struct device_node *np = ofdev->dev.of_node;
3176 struct talitos_private *priv = dev_get_drvdata(dev);
3178 bool is_sec1 = has_ftr_sec1(priv);
3180 priv->irq[0] = irq_of_parse_and_map(np, 0);
3181 if (!priv->irq[0]) {
3182 dev_err(dev, "failed to map irq\n");
3186 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3187 dev_driver_string(dev), dev);
3191 priv->irq[1] = irq_of_parse_and_map(np, 1);
3193 /* get the primary irq line */
3194 if (!priv->irq[1]) {
3195 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3196 dev_driver_string(dev), dev);
3200 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3201 dev_driver_string(dev), dev);
3205 /* get the secondary irq line */
3206 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3207 dev_driver_string(dev), dev);
3209 dev_err(dev, "failed to request secondary irq\n");
3210 irq_dispose_mapping(priv->irq[1]);
3218 dev_err(dev, "failed to request primary irq\n");
3219 irq_dispose_mapping(priv->irq[0]);
3226 static int talitos_probe(struct platform_device *ofdev)
3228 struct device *dev = &ofdev->dev;
3229 struct device_node *np = ofdev->dev.of_node;
3230 struct talitos_private *priv;
3231 const unsigned int *prop;
3235 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
3239 INIT_LIST_HEAD(&priv->alg_list);
3241 dev_set_drvdata(dev, priv);
3243 priv->ofdev = ofdev;
3245 spin_lock_init(&priv->reg_lock);
3247 priv->reg = of_iomap(np, 0);
3249 dev_err(dev, "failed to of_iomap\n");
3254 /* get SEC version capabilities from device tree */
3255 prop = of_get_property(np, "fsl,num-channels", NULL);
3257 priv->num_channels = *prop;
3259 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
3261 priv->chfifo_len = *prop;
3263 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
3265 priv->exec_units = *prop;
3267 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
3269 priv->desc_types = *prop;
3271 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3272 !priv->exec_units || !priv->desc_types) {
3273 dev_err(dev, "invalid property data in device tree node\n");
3278 if (of_device_is_compatible(np, "fsl,sec3.0"))
3279 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3281 if (of_device_is_compatible(np, "fsl,sec2.1"))
3282 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3283 TALITOS_FTR_SHA224_HWINIT |
3284 TALITOS_FTR_HMAC_OK;
3286 if (of_device_is_compatible(np, "fsl,sec1.0"))
3287 priv->features |= TALITOS_FTR_SEC1;
3289 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3290 priv->reg_deu = priv->reg + TALITOS12_DEU;
3291 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3292 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3293 stride = TALITOS1_CH_STRIDE;
3294 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3295 priv->reg_deu = priv->reg + TALITOS10_DEU;
3296 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3297 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3298 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3299 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3300 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3301 stride = TALITOS1_CH_STRIDE;
3303 priv->reg_deu = priv->reg + TALITOS2_DEU;
3304 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3305 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3306 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3307 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3308 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3309 priv->reg_keu = priv->reg + TALITOS2_KEU;
3310 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3311 stride = TALITOS2_CH_STRIDE;
3314 err = talitos_probe_irq(ofdev);
3318 if (of_device_is_compatible(np, "fsl,sec1.0")) {
3319 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3320 (unsigned long)dev);
3322 if (!priv->irq[1]) {
3323 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3324 (unsigned long)dev);
3326 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3327 (unsigned long)dev);
3328 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3329 (unsigned long)dev);
3333 priv->chan = kzalloc(sizeof(struct talitos_channel) *
3334 priv->num_channels, GFP_KERNEL);
3336 dev_err(dev, "failed to allocate channel management space\n");
3341 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3343 for (i = 0; i < priv->num_channels; i++) {
3344 priv->chan[i].reg = priv->reg + stride * (i + 1);
3345 if (!priv->irq[1] || !(i & 1))
3346 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3348 spin_lock_init(&priv->chan[i].head_lock);
3349 spin_lock_init(&priv->chan[i].tail_lock);
3351 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
3352 priv->fifo_len, GFP_KERNEL);
3353 if (!priv->chan[i].fifo) {
3354 dev_err(dev, "failed to allocate request fifo %d\n", i);
3359 atomic_set(&priv->chan[i].submit_count,
3360 -(priv->chfifo_len - 1));
3363 dma_set_mask(dev, DMA_BIT_MASK(36));
3365 /* reset and initialize the h/w */
3366 err = init_device(dev);
3368 dev_err(dev, "failed to initialize device\n");
3372 /* register the RNG, if available */
3373 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3374 err = talitos_register_rng(dev);
3376 dev_err(dev, "failed to register hwrng: %d\n", err);
3379 dev_info(dev, "hwrng\n");
3382 /* register crypto algorithms the device supports */
3383 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3384 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3385 struct talitos_crypto_alg *t_alg;
3386 struct crypto_alg *alg = NULL;
3388 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3389 if (IS_ERR(t_alg)) {
3390 err = PTR_ERR(t_alg);
3391 if (err == -ENOTSUPP)
3396 switch (t_alg->algt.type) {
3397 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3398 err = crypto_register_alg(
3399 &t_alg->algt.alg.crypto);
3400 alg = &t_alg->algt.alg.crypto;
3403 case CRYPTO_ALG_TYPE_AEAD:
3404 err = crypto_register_aead(
3405 &t_alg->algt.alg.aead);
3406 alg = &t_alg->algt.alg.aead.base;
3409 case CRYPTO_ALG_TYPE_AHASH:
3410 err = crypto_register_ahash(
3411 &t_alg->algt.alg.hash);
3412 alg = &t_alg->algt.alg.hash.halg.base;
3416 dev_err(dev, "%s alg registration failed\n",
3417 alg->cra_driver_name);
3420 list_add_tail(&t_alg->entry, &priv->alg_list);
3423 if (!list_empty(&priv->alg_list))
3424 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3425 (char *)of_get_property(np, "compatible", NULL));
3430 talitos_remove(ofdev);
3435 static const struct of_device_id talitos_match[] = {
3436 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3438 .compatible = "fsl,sec1.0",
3441 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3443 .compatible = "fsl,sec2.0",
3448 MODULE_DEVICE_TABLE(of, talitos_match);
3450 static struct platform_driver talitos_driver = {
3453 .of_match_table = talitos_match,
3455 .probe = talitos_probe,
3456 .remove = talitos_remove,
3459 module_platform_driver(talitos_driver);
3461 MODULE_LICENSE("GPL");
3462 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3463 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");