GNU Linux-libre 4.19.314-gnu1
[releases.git] / drivers / crypto / amcc / crypto4xx_core.c
1 /**
2  * AMCC SoC PPC4xx Crypto Driver
3  *
4  * Copyright (c) 2008 Applied Micro Circuits Corporation.
5  * All rights reserved. James Hsiao <jhsiao@amcc.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * This file implements AMCC crypto offload Linux device driver for use with
18  * Linux CryptoAPI.
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock_types.h>
24 #include <linux/random.h>
25 #include <linux/scatterlist.h>
26 #include <linux/crypto.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/platform_device.h>
29 #include <linux/init.h>
30 #include <linux/module.h>
31 #include <linux/of_address.h>
32 #include <linux/of_irq.h>
33 #include <linux/of_platform.h>
34 #include <linux/slab.h>
35 #include <asm/dcr.h>
36 #include <asm/dcr-regs.h>
37 #include <asm/cacheflush.h>
38 #include <crypto/aead.h>
39 #include <crypto/aes.h>
40 #include <crypto/ctr.h>
41 #include <crypto/gcm.h>
42 #include <crypto/sha.h>
43 #include <crypto/scatterwalk.h>
44 #include <crypto/skcipher.h>
45 #include <crypto/internal/aead.h>
46 #include <crypto/internal/skcipher.h>
47 #include "crypto4xx_reg_def.h"
48 #include "crypto4xx_core.h"
49 #include "crypto4xx_sa.h"
50 #include "crypto4xx_trng.h"
51
52 #define PPC4XX_SEC_VERSION_STR                  "0.5"
53
54 /**
55  * PPC4xx Crypto Engine Initialization Routine
56  */
57 static void crypto4xx_hw_init(struct crypto4xx_device *dev)
58 {
59         union ce_ring_size ring_size;
60         union ce_ring_control ring_ctrl;
61         union ce_part_ring_size part_ring_size;
62         union ce_io_threshold io_threshold;
63         u32 rand_num;
64         union ce_pe_dma_cfg pe_dma_cfg;
65         u32 device_ctrl;
66
67         writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
68         /* setup pe dma, include reset sg, pdr and pe, then release reset */
69         pe_dma_cfg.w = 0;
70         pe_dma_cfg.bf.bo_sgpd_en = 1;
71         pe_dma_cfg.bf.bo_data_en = 0;
72         pe_dma_cfg.bf.bo_sa_en = 1;
73         pe_dma_cfg.bf.bo_pd_en = 1;
74         pe_dma_cfg.bf.dynamic_sa_en = 1;
75         pe_dma_cfg.bf.reset_sg = 1;
76         pe_dma_cfg.bf.reset_pdr = 1;
77         pe_dma_cfg.bf.reset_pe = 1;
78         writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
79         /* un reset pe,sg and pdr */
80         pe_dma_cfg.bf.pe_mode = 0;
81         pe_dma_cfg.bf.reset_sg = 0;
82         pe_dma_cfg.bf.reset_pdr = 0;
83         pe_dma_cfg.bf.reset_pe = 0;
84         pe_dma_cfg.bf.bo_td_en = 0;
85         writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
86         writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
87         writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
88         writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
89         get_random_bytes(&rand_num, sizeof(rand_num));
90         writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
91         get_random_bytes(&rand_num, sizeof(rand_num));
92         writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
93         ring_size.w = 0;
94         ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
95         ring_size.bf.ring_size   = PPC4XX_NUM_PD;
96         writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
97         ring_ctrl.w = 0;
98         writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
99         device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
100         device_ctrl |= PPC4XX_DC_3DES_EN;
101         writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
102         writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
103         writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
104         part_ring_size.w = 0;
105         part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
106         part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
107         writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
108         writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
109         io_threshold.w = 0;
110         io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
111         io_threshold.bf.input_threshold  = PPC4XX_INPUT_THRESHOLD;
112         writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
113         writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
114         writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
115         writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
116         writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
117         writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
118         writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
119         writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
120         /* un reset pe,sg and pdr */
121         pe_dma_cfg.bf.pe_mode = 1;
122         pe_dma_cfg.bf.reset_sg = 0;
123         pe_dma_cfg.bf.reset_pdr = 0;
124         pe_dma_cfg.bf.reset_pe = 0;
125         pe_dma_cfg.bf.bo_td_en = 0;
126         writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
127         /*clear all pending interrupt*/
128         writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
129         writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
130         writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
131         writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
132         if (dev->is_revb) {
133                 writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10,
134                        dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT);
135                 writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT,
136                        dev->ce_base + CRYPTO4XX_INT_EN);
137         } else {
138                 writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
139         }
140 }
141
142 int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
143 {
144         ctx->sa_in = kcalloc(size, 4, GFP_ATOMIC);
145         if (ctx->sa_in == NULL)
146                 return -ENOMEM;
147
148         ctx->sa_out = kcalloc(size, 4, GFP_ATOMIC);
149         if (ctx->sa_out == NULL) {
150                 kfree(ctx->sa_in);
151                 ctx->sa_in = NULL;
152                 return -ENOMEM;
153         }
154
155         ctx->sa_len = size;
156
157         return 0;
158 }
159
160 void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
161 {
162         kfree(ctx->sa_in);
163         ctx->sa_in = NULL;
164         kfree(ctx->sa_out);
165         ctx->sa_out = NULL;
166         ctx->sa_len = 0;
167 }
168
169 /**
170  * alloc memory for the gather ring
171  * no need to alloc buf for the ring
172  * gdr_tail, gdr_head and gdr_count are initialized by this function
173  */
174 static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
175 {
176         int i;
177         dev->pdr = dma_alloc_coherent(dev->core_dev->device,
178                                       sizeof(struct ce_pd) * PPC4XX_NUM_PD,
179                                       &dev->pdr_pa, GFP_ATOMIC);
180         if (!dev->pdr)
181                 return -ENOMEM;
182
183         dev->pdr_uinfo = kcalloc(PPC4XX_NUM_PD, sizeof(struct pd_uinfo),
184                                  GFP_KERNEL);
185         if (!dev->pdr_uinfo) {
186                 dma_free_coherent(dev->core_dev->device,
187                                   sizeof(struct ce_pd) * PPC4XX_NUM_PD,
188                                   dev->pdr,
189                                   dev->pdr_pa);
190                 return -ENOMEM;
191         }
192         memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
193         dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
194                                    sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
195                                    &dev->shadow_sa_pool_pa,
196                                    GFP_ATOMIC);
197         if (!dev->shadow_sa_pool)
198                 return -ENOMEM;
199
200         dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
201                          sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
202                          &dev->shadow_sr_pool_pa, GFP_ATOMIC);
203         if (!dev->shadow_sr_pool)
204                 return -ENOMEM;
205         for (i = 0; i < PPC4XX_NUM_PD; i++) {
206                 struct ce_pd *pd = &dev->pdr[i];
207                 struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[i];
208
209                 pd->sa = dev->shadow_sa_pool_pa +
210                         sizeof(union shadow_sa_buf) * i;
211
212                 /* alloc 256 bytes which is enough for any kind of dynamic sa */
213                 pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa;
214
215                 /* alloc state record */
216                 pd_uinfo->sr_va = &dev->shadow_sr_pool[i];
217                 pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
218                     sizeof(struct sa_state_record) * i;
219         }
220
221         return 0;
222 }
223
224 static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
225 {
226         if (dev->pdr)
227                 dma_free_coherent(dev->core_dev->device,
228                                   sizeof(struct ce_pd) * PPC4XX_NUM_PD,
229                                   dev->pdr, dev->pdr_pa);
230
231         if (dev->shadow_sa_pool)
232                 dma_free_coherent(dev->core_dev->device,
233                         sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
234                         dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
235
236         if (dev->shadow_sr_pool)
237                 dma_free_coherent(dev->core_dev->device,
238                         sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
239                         dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
240
241         kfree(dev->pdr_uinfo);
242 }
243
244 static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
245 {
246         u32 retval;
247         u32 tmp;
248
249         retval = dev->pdr_head;
250         tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
251
252         if (tmp == dev->pdr_tail)
253                 return ERING_WAS_FULL;
254
255         dev->pdr_head = tmp;
256
257         return retval;
258 }
259
260 static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
261 {
262         struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
263         u32 tail;
264         unsigned long flags;
265
266         spin_lock_irqsave(&dev->core_dev->lock, flags);
267         pd_uinfo->state = PD_ENTRY_FREE;
268
269         if (dev->pdr_tail != PPC4XX_LAST_PD)
270                 dev->pdr_tail++;
271         else
272                 dev->pdr_tail = 0;
273         tail = dev->pdr_tail;
274         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
275
276         return tail;
277 }
278
279 /**
280  * alloc memory for the gather ring
281  * no need to alloc buf for the ring
282  * gdr_tail, gdr_head and gdr_count are initialized by this function
283  */
284 static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
285 {
286         dev->gdr = dma_zalloc_coherent(dev->core_dev->device,
287                                        sizeof(struct ce_gd) * PPC4XX_NUM_GD,
288                                        &dev->gdr_pa, GFP_ATOMIC);
289         if (!dev->gdr)
290                 return -ENOMEM;
291
292         return 0;
293 }
294
295 static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
296 {
297         dma_free_coherent(dev->core_dev->device,
298                           sizeof(struct ce_gd) * PPC4XX_NUM_GD,
299                           dev->gdr, dev->gdr_pa);
300 }
301
302 /*
303  * when this function is called.
304  * preemption or interrupt must be disabled
305  */
306 static u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
307 {
308         u32 retval;
309         u32 tmp;
310
311         if (n >= PPC4XX_NUM_GD)
312                 return ERING_WAS_FULL;
313
314         retval = dev->gdr_head;
315         tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
316         if (dev->gdr_head > dev->gdr_tail) {
317                 if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
318                         return ERING_WAS_FULL;
319         } else if (dev->gdr_head < dev->gdr_tail) {
320                 if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
321                         return ERING_WAS_FULL;
322         }
323         dev->gdr_head = tmp;
324
325         return retval;
326 }
327
328 static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
329 {
330         unsigned long flags;
331
332         spin_lock_irqsave(&dev->core_dev->lock, flags);
333         if (dev->gdr_tail == dev->gdr_head) {
334                 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
335                 return 0;
336         }
337
338         if (dev->gdr_tail != PPC4XX_LAST_GD)
339                 dev->gdr_tail++;
340         else
341                 dev->gdr_tail = 0;
342
343         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
344
345         return 0;
346 }
347
348 static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
349                                               dma_addr_t *gd_dma, u32 idx)
350 {
351         *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
352
353         return &dev->gdr[idx];
354 }
355
356 /**
357  * alloc memory for the scatter ring
358  * need to alloc buf for the ring
359  * sdr_tail, sdr_head and sdr_count are initialized by this function
360  */
361 static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
362 {
363         int i;
364
365         /* alloc memory for scatter descriptor ring */
366         dev->sdr = dma_alloc_coherent(dev->core_dev->device,
367                                       sizeof(struct ce_sd) * PPC4XX_NUM_SD,
368                                       &dev->sdr_pa, GFP_ATOMIC);
369         if (!dev->sdr)
370                 return -ENOMEM;
371
372         dev->scatter_buffer_va =
373                 dma_alloc_coherent(dev->core_dev->device,
374                         PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
375                         &dev->scatter_buffer_pa, GFP_ATOMIC);
376         if (!dev->scatter_buffer_va)
377                 return -ENOMEM;
378
379         for (i = 0; i < PPC4XX_NUM_SD; i++) {
380                 dev->sdr[i].ptr = dev->scatter_buffer_pa +
381                                   PPC4XX_SD_BUFFER_SIZE * i;
382         }
383
384         return 0;
385 }
386
387 static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
388 {
389         if (dev->sdr)
390                 dma_free_coherent(dev->core_dev->device,
391                                   sizeof(struct ce_sd) * PPC4XX_NUM_SD,
392                                   dev->sdr, dev->sdr_pa);
393
394         if (dev->scatter_buffer_va)
395                 dma_free_coherent(dev->core_dev->device,
396                                   PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
397                                   dev->scatter_buffer_va,
398                                   dev->scatter_buffer_pa);
399 }
400
401 /*
402  * when this function is called.
403  * preemption or interrupt must be disabled
404  */
405 static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
406 {
407         u32 retval;
408         u32 tmp;
409
410         if (n >= PPC4XX_NUM_SD)
411                 return ERING_WAS_FULL;
412
413         retval = dev->sdr_head;
414         tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
415         if (dev->sdr_head > dev->gdr_tail) {
416                 if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
417                         return ERING_WAS_FULL;
418         } else if (dev->sdr_head < dev->sdr_tail) {
419                 if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
420                         return ERING_WAS_FULL;
421         } /* the head = tail, or empty case is already take cared */
422         dev->sdr_head = tmp;
423
424         return retval;
425 }
426
427 static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
428 {
429         unsigned long flags;
430
431         spin_lock_irqsave(&dev->core_dev->lock, flags);
432         if (dev->sdr_tail == dev->sdr_head) {
433                 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
434                 return 0;
435         }
436         if (dev->sdr_tail != PPC4XX_LAST_SD)
437                 dev->sdr_tail++;
438         else
439                 dev->sdr_tail = 0;
440         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
441
442         return 0;
443 }
444
445 static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
446                                               dma_addr_t *sd_dma, u32 idx)
447 {
448         *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
449
450         return &dev->sdr[idx];
451 }
452
453 static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
454                                       struct ce_pd *pd,
455                                       struct pd_uinfo *pd_uinfo,
456                                       u32 nbytes,
457                                       struct scatterlist *dst)
458 {
459         unsigned int first_sd = pd_uinfo->first_sd;
460         unsigned int last_sd;
461         unsigned int overflow = 0;
462         unsigned int to_copy;
463         unsigned int dst_start = 0;
464
465         /*
466          * Because the scatter buffers are all neatly organized in one
467          * big continuous ringbuffer; scatterwalk_map_and_copy() can
468          * be instructed to copy a range of buffers in one go.
469          */
470
471         last_sd = (first_sd + pd_uinfo->num_sd);
472         if (last_sd > PPC4XX_LAST_SD) {
473                 last_sd = PPC4XX_LAST_SD;
474                 overflow = last_sd % PPC4XX_NUM_SD;
475         }
476
477         while (nbytes) {
478                 void *buf = dev->scatter_buffer_va +
479                         first_sd * PPC4XX_SD_BUFFER_SIZE;
480
481                 to_copy = min(nbytes, PPC4XX_SD_BUFFER_SIZE *
482                                       (1 + last_sd - first_sd));
483                 scatterwalk_map_and_copy(buf, dst, dst_start, to_copy, 1);
484                 nbytes -= to_copy;
485
486                 if (overflow) {
487                         first_sd = 0;
488                         last_sd = overflow;
489                         dst_start += to_copy;
490                         overflow = 0;
491                 }
492         }
493 }
494
495 static void crypto4xx_copy_digest_to_dst(void *dst,
496                                         struct pd_uinfo *pd_uinfo,
497                                         struct crypto4xx_ctx *ctx)
498 {
499         struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
500
501         if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
502                 memcpy(dst, pd_uinfo->sr_va->save_digest,
503                        SA_HASH_ALG_SHA1_DIGEST_SIZE);
504         }
505 }
506
507 static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
508                                   struct pd_uinfo *pd_uinfo)
509 {
510         int i;
511         if (pd_uinfo->num_gd) {
512                 for (i = 0; i < pd_uinfo->num_gd; i++)
513                         crypto4xx_put_gd_to_gdr(dev);
514                 pd_uinfo->first_gd = 0xffffffff;
515                 pd_uinfo->num_gd = 0;
516         }
517         if (pd_uinfo->num_sd) {
518                 for (i = 0; i < pd_uinfo->num_sd; i++)
519                         crypto4xx_put_sd_to_sdr(dev);
520
521                 pd_uinfo->first_sd = 0xffffffff;
522                 pd_uinfo->num_sd = 0;
523         }
524 }
525
526 static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
527                                      struct pd_uinfo *pd_uinfo,
528                                      struct ce_pd *pd)
529 {
530         struct skcipher_request *req;
531         struct scatterlist *dst;
532
533         req = skcipher_request_cast(pd_uinfo->async_req);
534
535         if (pd_uinfo->using_sd) {
536                 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
537                                           req->cryptlen, req->dst);
538         } else {
539                 dst = pd_uinfo->dest_va;
540                 dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
541                                DMA_FROM_DEVICE);
542         }
543
544         if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
545                 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
546
547                 crypto4xx_memcpy_from_le32((u32 *)req->iv,
548                         pd_uinfo->sr_va->save_iv,
549                         crypto_skcipher_ivsize(skcipher));
550         }
551
552         crypto4xx_ret_sg_desc(dev, pd_uinfo);
553
554         if (pd_uinfo->state & PD_ENTRY_BUSY)
555                 skcipher_request_complete(req, -EINPROGRESS);
556         skcipher_request_complete(req, 0);
557 }
558
559 static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
560                                 struct pd_uinfo *pd_uinfo)
561 {
562         struct crypto4xx_ctx *ctx;
563         struct ahash_request *ahash_req;
564
565         ahash_req = ahash_request_cast(pd_uinfo->async_req);
566         ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(ahash_req));
567
568         crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, ctx);
569         crypto4xx_ret_sg_desc(dev, pd_uinfo);
570
571         if (pd_uinfo->state & PD_ENTRY_BUSY)
572                 ahash_request_complete(ahash_req, -EINPROGRESS);
573         ahash_request_complete(ahash_req, 0);
574 }
575
576 static void crypto4xx_aead_done(struct crypto4xx_device *dev,
577                                 struct pd_uinfo *pd_uinfo,
578                                 struct ce_pd *pd)
579 {
580         struct aead_request *aead_req = container_of(pd_uinfo->async_req,
581                 struct aead_request, base);
582         struct scatterlist *dst = pd_uinfo->dest_va;
583         size_t cp_len = crypto_aead_authsize(
584                 crypto_aead_reqtfm(aead_req));
585         u32 icv[AES_BLOCK_SIZE];
586         int err = 0;
587
588         if (pd_uinfo->using_sd) {
589                 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
590                                           pd->pd_ctl_len.bf.pkt_len,
591                                           dst);
592         } else {
593                 __dma_sync_page(sg_page(dst), dst->offset, dst->length,
594                                 DMA_FROM_DEVICE);
595         }
596
597         if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
598                 /* append icv at the end */
599                 crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
600                                            sizeof(icv));
601
602                 scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
603                                          cp_len, 1);
604         } else {
605                 /* check icv at the end */
606                 scatterwalk_map_and_copy(icv, aead_req->src,
607                         aead_req->assoclen + aead_req->cryptlen -
608                         cp_len, cp_len, 0);
609
610                 crypto4xx_memcpy_from_le32(icv, icv, sizeof(icv));
611
612                 if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
613                         err = -EBADMSG;
614         }
615
616         crypto4xx_ret_sg_desc(dev, pd_uinfo);
617
618         if (pd->pd_ctl.bf.status & 0xff) {
619                 if (!__ratelimit(&dev->aead_ratelimit)) {
620                         if (pd->pd_ctl.bf.status & 2)
621                                 pr_err("pad fail error\n");
622                         if (pd->pd_ctl.bf.status & 4)
623                                 pr_err("seqnum fail\n");
624                         if (pd->pd_ctl.bf.status & 8)
625                                 pr_err("error _notify\n");
626                         pr_err("aead return err status = 0x%02x\n",
627                                 pd->pd_ctl.bf.status & 0xff);
628                         pr_err("pd pad_ctl = 0x%08x\n",
629                                 pd->pd_ctl.bf.pd_pad_ctl);
630                 }
631                 err = -EINVAL;
632         }
633
634         if (pd_uinfo->state & PD_ENTRY_BUSY)
635                 aead_request_complete(aead_req, -EINPROGRESS);
636
637         aead_request_complete(aead_req, err);
638 }
639
640 static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
641 {
642         struct ce_pd *pd = &dev->pdr[idx];
643         struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
644
645         switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) {
646         case CRYPTO_ALG_TYPE_SKCIPHER:
647                 crypto4xx_cipher_done(dev, pd_uinfo, pd);
648                 break;
649         case CRYPTO_ALG_TYPE_AEAD:
650                 crypto4xx_aead_done(dev, pd_uinfo, pd);
651                 break;
652         case CRYPTO_ALG_TYPE_AHASH:
653                 crypto4xx_ahash_done(dev, pd_uinfo);
654                 break;
655         }
656 }
657
658 static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
659 {
660         crypto4xx_destroy_pdr(core_dev->dev);
661         crypto4xx_destroy_gdr(core_dev->dev);
662         crypto4xx_destroy_sdr(core_dev->dev);
663         iounmap(core_dev->dev->ce_base);
664         kfree(core_dev->dev);
665         kfree(core_dev);
666 }
667
668 static u32 get_next_gd(u32 current)
669 {
670         if (current != PPC4XX_LAST_GD)
671                 return current + 1;
672         else
673                 return 0;
674 }
675
676 static u32 get_next_sd(u32 current)
677 {
678         if (current != PPC4XX_LAST_SD)
679                 return current + 1;
680         else
681                 return 0;
682 }
683
684 int crypto4xx_build_pd(struct crypto_async_request *req,
685                        struct crypto4xx_ctx *ctx,
686                        struct scatterlist *src,
687                        struct scatterlist *dst,
688                        const unsigned int datalen,
689                        const __le32 *iv, const u32 iv_len,
690                        const struct dynamic_sa_ctl *req_sa,
691                        const unsigned int sa_len,
692                        const unsigned int assoclen,
693                        struct scatterlist *_dst)
694 {
695         struct crypto4xx_device *dev = ctx->dev;
696         struct dynamic_sa_ctl *sa;
697         struct ce_gd *gd;
698         struct ce_pd *pd;
699         u32 num_gd, num_sd;
700         u32 fst_gd = 0xffffffff;
701         u32 fst_sd = 0xffffffff;
702         u32 pd_entry;
703         unsigned long flags;
704         struct pd_uinfo *pd_uinfo;
705         unsigned int nbytes = datalen;
706         size_t offset_to_sr_ptr;
707         u32 gd_idx = 0;
708         int tmp;
709         bool is_busy, force_sd;
710
711         /*
712          * There's a very subtile/disguised "bug" in the hardware that
713          * gets indirectly mentioned in 18.1.3.5 Encryption/Decryption
714          * of the hardware spec:
715          * *drum roll* the AES/(T)DES OFB and CFB modes are listed as
716          * operation modes for >>> "Block ciphers" <<<.
717          *
718          * To workaround this issue and stop the hardware from causing
719          * "overran dst buffer" on crypttexts that are not a multiple
720          * of 16 (AES_BLOCK_SIZE), we force the driver to use the
721          * scatter buffers.
722          */
723         force_sd = (req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_CFB
724                 || req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_OFB)
725                 && (datalen % AES_BLOCK_SIZE);
726
727         /* figure how many gd are needed */
728         tmp = sg_nents_for_len(src, assoclen + datalen);
729         if (tmp < 0) {
730                 dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
731                 return tmp;
732         }
733         if (tmp == 1)
734                 tmp = 0;
735         num_gd = tmp;
736
737         if (assoclen) {
738                 nbytes += assoclen;
739                 dst = scatterwalk_ffwd(_dst, dst, assoclen);
740         }
741
742         /* figure how many sd are needed */
743         if (sg_is_last(dst) && force_sd == false) {
744                 num_sd = 0;
745         } else {
746                 if (datalen > PPC4XX_SD_BUFFER_SIZE) {
747                         num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
748                         if (datalen % PPC4XX_SD_BUFFER_SIZE)
749                                 num_sd++;
750                 } else {
751                         num_sd = 1;
752                 }
753         }
754
755         /*
756          * The follow section of code needs to be protected
757          * The gather ring and scatter ring needs to be consecutive
758          * In case of run out of any kind of descriptor, the descriptor
759          * already got must be return the original place.
760          */
761         spin_lock_irqsave(&dev->core_dev->lock, flags);
762         /*
763          * Let the caller know to slow down, once more than 13/16ths = 81%
764          * of the available data contexts are being used simultaneously.
765          *
766          * With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for
767          * 31 more contexts. Before new requests have to be rejected.
768          */
769         if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
770                 is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
771                         ((PPC4XX_NUM_PD * 13) / 16);
772         } else {
773                 /*
774                  * To fix contention issues between ipsec (no blacklog) and
775                  * dm-crypto (backlog) reserve 32 entries for "no backlog"
776                  * data contexts.
777                  */
778                 is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
779                         ((PPC4XX_NUM_PD * 15) / 16);
780
781                 if (is_busy) {
782                         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
783                         return -EBUSY;
784                 }
785         }
786
787         if (num_gd) {
788                 fst_gd = crypto4xx_get_n_gd(dev, num_gd);
789                 if (fst_gd == ERING_WAS_FULL) {
790                         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
791                         return -EAGAIN;
792                 }
793         }
794         if (num_sd) {
795                 fst_sd = crypto4xx_get_n_sd(dev, num_sd);
796                 if (fst_sd == ERING_WAS_FULL) {
797                         if (num_gd)
798                                 dev->gdr_head = fst_gd;
799                         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
800                         return -EAGAIN;
801                 }
802         }
803         pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
804         if (pd_entry == ERING_WAS_FULL) {
805                 if (num_gd)
806                         dev->gdr_head = fst_gd;
807                 if (num_sd)
808                         dev->sdr_head = fst_sd;
809                 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
810                 return -EAGAIN;
811         }
812         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
813
814         pd = &dev->pdr[pd_entry];
815         pd->sa_len = sa_len;
816
817         pd_uinfo = &dev->pdr_uinfo[pd_entry];
818         pd_uinfo->num_gd = num_gd;
819         pd_uinfo->num_sd = num_sd;
820         pd_uinfo->dest_va = dst;
821         pd_uinfo->async_req = req;
822
823         if (iv_len)
824                 memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
825
826         sa = pd_uinfo->sa_va;
827         memcpy(sa, req_sa, sa_len * 4);
828
829         sa->sa_command_1.bf.hash_crypto_offset = (assoclen >> 2);
830         offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
831         *(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa;
832
833         if (num_gd) {
834                 dma_addr_t gd_dma;
835                 struct scatterlist *sg;
836
837                 /* get first gd we are going to use */
838                 gd_idx = fst_gd;
839                 pd_uinfo->first_gd = fst_gd;
840                 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
841                 pd->src = gd_dma;
842                 /* enable gather */
843                 sa->sa_command_0.bf.gather = 1;
844                 /* walk the sg, and setup gather array */
845
846                 sg = src;
847                 while (nbytes) {
848                         size_t len;
849
850                         len = min(sg->length, nbytes);
851                         gd->ptr = dma_map_page(dev->core_dev->device,
852                                 sg_page(sg), sg->offset, len, DMA_TO_DEVICE);
853                         gd->ctl_len.len = len;
854                         gd->ctl_len.done = 0;
855                         gd->ctl_len.ready = 1;
856                         if (len >= nbytes)
857                                 break;
858
859                         nbytes -= sg->length;
860                         gd_idx = get_next_gd(gd_idx);
861                         gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
862                         sg = sg_next(sg);
863                 }
864         } else {
865                 pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
866                                 src->offset, min(nbytes, src->length),
867                                 DMA_TO_DEVICE);
868                 /*
869                  * Disable gather in sa command
870                  */
871                 sa->sa_command_0.bf.gather = 0;
872                 /*
873                  * Indicate gather array is not used
874                  */
875                 pd_uinfo->first_gd = 0xffffffff;
876         }
877         if (!num_sd) {
878                 /*
879                  * we know application give us dst a whole piece of memory
880                  * no need to use scatter ring.
881                  */
882                 pd_uinfo->using_sd = 0;
883                 pd_uinfo->first_sd = 0xffffffff;
884                 sa->sa_command_0.bf.scatter = 0;
885                 pd->dest = (u32)dma_map_page(dev->core_dev->device,
886                                              sg_page(dst), dst->offset,
887                                              min(datalen, dst->length),
888                                              DMA_TO_DEVICE);
889         } else {
890                 dma_addr_t sd_dma;
891                 struct ce_sd *sd = NULL;
892
893                 u32 sd_idx = fst_sd;
894                 nbytes = datalen;
895                 sa->sa_command_0.bf.scatter = 1;
896                 pd_uinfo->using_sd = 1;
897                 pd_uinfo->first_sd = fst_sd;
898                 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
899                 pd->dest = sd_dma;
900                 /* setup scatter descriptor */
901                 sd->ctl.done = 0;
902                 sd->ctl.rdy = 1;
903                 /* sd->ptr should be setup by sd_init routine*/
904                 if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
905                         nbytes -= PPC4XX_SD_BUFFER_SIZE;
906                 else
907                         nbytes = 0;
908                 while (nbytes) {
909                         sd_idx = get_next_sd(sd_idx);
910                         sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
911                         /* setup scatter descriptor */
912                         sd->ctl.done = 0;
913                         sd->ctl.rdy = 1;
914                         if (nbytes >= PPC4XX_SD_BUFFER_SIZE) {
915                                 nbytes -= PPC4XX_SD_BUFFER_SIZE;
916                         } else {
917                                 /*
918                                  * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
919                                  * which is more than nbytes, so done.
920                                  */
921                                 nbytes = 0;
922                         }
923                 }
924         }
925
926         pd->pd_ctl.w = PD_CTL_HOST_READY |
927                 ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) ||
928                  (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
929                         PD_CTL_HASH_FINAL : 0);
930         pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
931         pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
932
933         wmb();
934         /* write any value to push engine to read a pd */
935         writel(0, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
936         writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
937         return is_busy ? -EBUSY : -EINPROGRESS;
938 }
939
940 /**
941  * Algorithm Registration Functions
942  */
943 static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
944                                struct crypto4xx_ctx *ctx)
945 {
946         ctx->dev = amcc_alg->dev;
947         ctx->sa_in = NULL;
948         ctx->sa_out = NULL;
949         ctx->sa_len = 0;
950 }
951
952 static int crypto4xx_sk_init(struct crypto_skcipher *sk)
953 {
954         struct skcipher_alg *alg = crypto_skcipher_alg(sk);
955         struct crypto4xx_alg *amcc_alg;
956         struct crypto4xx_ctx *ctx =  crypto_skcipher_ctx(sk);
957
958         if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
959                 ctx->sw_cipher.cipher =
960                         crypto_alloc_skcipher(alg->base.cra_name, 0,
961                                               CRYPTO_ALG_NEED_FALLBACK |
962                                               CRYPTO_ALG_ASYNC);
963                 if (IS_ERR(ctx->sw_cipher.cipher))
964                         return PTR_ERR(ctx->sw_cipher.cipher);
965
966                 crypto_skcipher_set_reqsize(sk,
967                         sizeof(struct skcipher_request) + 32 +
968                         crypto_skcipher_reqsize(ctx->sw_cipher.cipher));
969         }
970
971         amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
972         crypto4xx_ctx_init(amcc_alg, ctx);
973         return 0;
974 }
975
976 static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx)
977 {
978         crypto4xx_free_sa(ctx);
979 }
980
981 static void crypto4xx_sk_exit(struct crypto_skcipher *sk)
982 {
983         struct crypto4xx_ctx *ctx =  crypto_skcipher_ctx(sk);
984
985         crypto4xx_common_exit(ctx);
986         if (ctx->sw_cipher.cipher)
987                 crypto_free_skcipher(ctx->sw_cipher.cipher);
988 }
989
990 static int crypto4xx_aead_init(struct crypto_aead *tfm)
991 {
992         struct aead_alg *alg = crypto_aead_alg(tfm);
993         struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
994         struct crypto4xx_alg *amcc_alg;
995
996         ctx->sw_cipher.aead = crypto_alloc_aead(alg->base.cra_name, 0,
997                                                 CRYPTO_ALG_NEED_FALLBACK |
998                                                 CRYPTO_ALG_ASYNC);
999         if (IS_ERR(ctx->sw_cipher.aead))
1000                 return PTR_ERR(ctx->sw_cipher.aead);
1001
1002         amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead);
1003         crypto4xx_ctx_init(amcc_alg, ctx);
1004         crypto_aead_set_reqsize(tfm, max(sizeof(struct aead_request) + 32 +
1005                                 crypto_aead_reqsize(ctx->sw_cipher.aead),
1006                                 sizeof(struct crypto4xx_aead_reqctx)));
1007         return 0;
1008 }
1009
1010 static void crypto4xx_aead_exit(struct crypto_aead *tfm)
1011 {
1012         struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
1013
1014         crypto4xx_common_exit(ctx);
1015         crypto_free_aead(ctx->sw_cipher.aead);
1016 }
1017
1018 static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1019                                   struct crypto4xx_alg_common *crypto_alg,
1020                                   int array_size)
1021 {
1022         struct crypto4xx_alg *alg;
1023         int i;
1024         int rc = 0;
1025
1026         for (i = 0; i < array_size; i++) {
1027                 alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
1028                 if (!alg)
1029                         return -ENOMEM;
1030
1031                 alg->alg = crypto_alg[i];
1032                 alg->dev = sec_dev;
1033
1034                 switch (alg->alg.type) {
1035                 case CRYPTO_ALG_TYPE_AEAD:
1036                         rc = crypto_register_aead(&alg->alg.u.aead);
1037                         break;
1038
1039                 case CRYPTO_ALG_TYPE_AHASH:
1040                         rc = crypto_register_ahash(&alg->alg.u.hash);
1041                         break;
1042
1043                 default:
1044                         rc = crypto_register_skcipher(&alg->alg.u.cipher);
1045                         break;
1046                 }
1047
1048                 if (rc)
1049                         kfree(alg);
1050                 else
1051                         list_add_tail(&alg->entry, &sec_dev->alg_list);
1052         }
1053
1054         return 0;
1055 }
1056
1057 static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1058 {
1059         struct crypto4xx_alg *alg, *tmp;
1060
1061         list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1062                 list_del(&alg->entry);
1063                 switch (alg->alg.type) {
1064                 case CRYPTO_ALG_TYPE_AHASH:
1065                         crypto_unregister_ahash(&alg->alg.u.hash);
1066                         break;
1067
1068                 case CRYPTO_ALG_TYPE_AEAD:
1069                         crypto_unregister_aead(&alg->alg.u.aead);
1070                         break;
1071
1072                 default:
1073                         crypto_unregister_skcipher(&alg->alg.u.cipher);
1074                 }
1075                 kfree(alg);
1076         }
1077 }
1078
1079 static void crypto4xx_bh_tasklet_cb(unsigned long data)
1080 {
1081         struct device *dev = (struct device *)data;
1082         struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1083         struct pd_uinfo *pd_uinfo;
1084         struct ce_pd *pd;
1085         u32 tail = core_dev->dev->pdr_tail;
1086         u32 head = core_dev->dev->pdr_head;
1087
1088         do {
1089                 pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
1090                 pd = &core_dev->dev->pdr[tail];
1091                 if ((pd_uinfo->state & PD_ENTRY_INUSE) &&
1092                      ((READ_ONCE(pd->pd_ctl.w) &
1093                        (PD_CTL_PE_DONE | PD_CTL_HOST_READY)) ==
1094                        PD_CTL_PE_DONE)) {
1095                         crypto4xx_pd_done(core_dev->dev, tail);
1096                         tail = crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
1097                 } else {
1098                         /* if tail not done, break */
1099                         break;
1100                 }
1101         } while (head != tail);
1102 }
1103
1104 /**
1105  * Top Half of isr.
1106  */
1107 static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
1108                                                       u32 clr_val)
1109 {
1110         struct device *dev = (struct device *)data;
1111         struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1112
1113         writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
1114         tasklet_schedule(&core_dev->tasklet);
1115
1116         return IRQ_HANDLED;
1117 }
1118
1119 static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1120 {
1121         return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR);
1122 }
1123
1124 static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
1125 {
1126         return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR |
1127                 PPC4XX_TMO_ERR_INT);
1128 }
1129
1130 /**
1131  * Supported Crypto Algorithms
1132  */
1133 static struct crypto4xx_alg_common crypto4xx_alg[] = {
1134         /* Crypto AES modes */
1135         { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1136                 .base = {
1137                         .cra_name = "cbc(aes)",
1138                         .cra_driver_name = "cbc-aes-ppc4xx",
1139                         .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1140                         .cra_flags = CRYPTO_ALG_ASYNC |
1141                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
1142                         .cra_blocksize = AES_BLOCK_SIZE,
1143                         .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1144                         .cra_module = THIS_MODULE,
1145                 },
1146                 .min_keysize = AES_MIN_KEY_SIZE,
1147                 .max_keysize = AES_MAX_KEY_SIZE,
1148                 .ivsize = AES_IV_SIZE,
1149                 .setkey = crypto4xx_setkey_aes_cbc,
1150                 .encrypt = crypto4xx_encrypt_iv_block,
1151                 .decrypt = crypto4xx_decrypt_iv_block,
1152                 .init = crypto4xx_sk_init,
1153                 .exit = crypto4xx_sk_exit,
1154         } },
1155         { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1156                 .base = {
1157                         .cra_name = "cfb(aes)",
1158                         .cra_driver_name = "cfb-aes-ppc4xx",
1159                         .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1160                         .cra_flags = CRYPTO_ALG_ASYNC |
1161                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
1162                         .cra_blocksize = AES_BLOCK_SIZE,
1163                         .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1164                         .cra_module = THIS_MODULE,
1165                 },
1166                 .min_keysize = AES_MIN_KEY_SIZE,
1167                 .max_keysize = AES_MAX_KEY_SIZE,
1168                 .ivsize = AES_IV_SIZE,
1169                 .setkey = crypto4xx_setkey_aes_cfb,
1170                 .encrypt = crypto4xx_encrypt_iv_stream,
1171                 .decrypt = crypto4xx_decrypt_iv_stream,
1172                 .init = crypto4xx_sk_init,
1173                 .exit = crypto4xx_sk_exit,
1174         } },
1175         { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1176                 .base = {
1177                         .cra_name = "ctr(aes)",
1178                         .cra_driver_name = "ctr-aes-ppc4xx",
1179                         .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1180                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
1181                                 CRYPTO_ALG_ASYNC |
1182                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
1183                         .cra_blocksize = 1,
1184                         .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1185                         .cra_module = THIS_MODULE,
1186                 },
1187                 .min_keysize = AES_MIN_KEY_SIZE,
1188                 .max_keysize = AES_MAX_KEY_SIZE,
1189                 .ivsize = AES_IV_SIZE,
1190                 .setkey = crypto4xx_setkey_aes_ctr,
1191                 .encrypt = crypto4xx_encrypt_ctr,
1192                 .decrypt = crypto4xx_decrypt_ctr,
1193                 .init = crypto4xx_sk_init,
1194                 .exit = crypto4xx_sk_exit,
1195         } },
1196         { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1197                 .base = {
1198                         .cra_name = "rfc3686(ctr(aes))",
1199                         .cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
1200                         .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1201                         .cra_flags = CRYPTO_ALG_ASYNC |
1202                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
1203                         .cra_blocksize = 1,
1204                         .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1205                         .cra_module = THIS_MODULE,
1206                 },
1207                 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
1208                 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
1209                 .ivsize = CTR_RFC3686_IV_SIZE,
1210                 .setkey = crypto4xx_setkey_rfc3686,
1211                 .encrypt = crypto4xx_rfc3686_encrypt,
1212                 .decrypt = crypto4xx_rfc3686_decrypt,
1213                 .init = crypto4xx_sk_init,
1214                 .exit = crypto4xx_sk_exit,
1215         } },
1216         { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1217                 .base = {
1218                         .cra_name = "ecb(aes)",
1219                         .cra_driver_name = "ecb-aes-ppc4xx",
1220                         .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1221                         .cra_flags = CRYPTO_ALG_ASYNC |
1222                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
1223                         .cra_blocksize = 1,
1224                         .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1225                         .cra_module = THIS_MODULE,
1226                 },
1227                 .min_keysize = AES_MIN_KEY_SIZE,
1228                 .max_keysize = AES_MAX_KEY_SIZE,
1229                 .setkey = crypto4xx_setkey_aes_ecb,
1230                 .encrypt = crypto4xx_encrypt_noiv_block,
1231                 .decrypt = crypto4xx_decrypt_noiv_block,
1232                 .init = crypto4xx_sk_init,
1233                 .exit = crypto4xx_sk_exit,
1234         } },
1235         { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1236                 .base = {
1237                         .cra_name = "ofb(aes)",
1238                         .cra_driver_name = "ofb-aes-ppc4xx",
1239                         .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1240                         .cra_flags = CRYPTO_ALG_ASYNC |
1241                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
1242                         .cra_blocksize = 1,
1243                         .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1244                         .cra_module = THIS_MODULE,
1245                 },
1246                 .min_keysize = AES_MIN_KEY_SIZE,
1247                 .max_keysize = AES_MAX_KEY_SIZE,
1248                 .ivsize = AES_IV_SIZE,
1249                 .setkey = crypto4xx_setkey_aes_ofb,
1250                 .encrypt = crypto4xx_encrypt_iv_stream,
1251                 .decrypt = crypto4xx_decrypt_iv_stream,
1252                 .init = crypto4xx_sk_init,
1253                 .exit = crypto4xx_sk_exit,
1254         } },
1255
1256         /* AEAD */
1257         { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
1258                 .setkey         = crypto4xx_setkey_aes_ccm,
1259                 .setauthsize    = crypto4xx_setauthsize_aead,
1260                 .encrypt        = crypto4xx_encrypt_aes_ccm,
1261                 .decrypt        = crypto4xx_decrypt_aes_ccm,
1262                 .init           = crypto4xx_aead_init,
1263                 .exit           = crypto4xx_aead_exit,
1264                 .ivsize         = AES_BLOCK_SIZE,
1265                 .maxauthsize    = 16,
1266                 .base = {
1267                         .cra_name       = "ccm(aes)",
1268                         .cra_driver_name = "ccm-aes-ppc4xx",
1269                         .cra_priority   = CRYPTO4XX_CRYPTO_PRIORITY,
1270                         .cra_flags      = CRYPTO_ALG_ASYNC |
1271                                           CRYPTO_ALG_NEED_FALLBACK |
1272                                           CRYPTO_ALG_KERN_DRIVER_ONLY,
1273                         .cra_blocksize  = 1,
1274                         .cra_ctxsize    = sizeof(struct crypto4xx_ctx),
1275                         .cra_module     = THIS_MODULE,
1276                 },
1277         } },
1278         { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
1279                 .setkey         = crypto4xx_setkey_aes_gcm,
1280                 .setauthsize    = crypto4xx_setauthsize_aead,
1281                 .encrypt        = crypto4xx_encrypt_aes_gcm,
1282                 .decrypt        = crypto4xx_decrypt_aes_gcm,
1283                 .init           = crypto4xx_aead_init,
1284                 .exit           = crypto4xx_aead_exit,
1285                 .ivsize         = GCM_AES_IV_SIZE,
1286                 .maxauthsize    = 16,
1287                 .base = {
1288                         .cra_name       = "gcm(aes)",
1289                         .cra_driver_name = "gcm-aes-ppc4xx",
1290                         .cra_priority   = CRYPTO4XX_CRYPTO_PRIORITY,
1291                         .cra_flags      = CRYPTO_ALG_ASYNC |
1292                                           CRYPTO_ALG_NEED_FALLBACK |
1293                                           CRYPTO_ALG_KERN_DRIVER_ONLY,
1294                         .cra_blocksize  = 1,
1295                         .cra_ctxsize    = sizeof(struct crypto4xx_ctx),
1296                         .cra_module     = THIS_MODULE,
1297                 },
1298         } },
1299 };
1300
1301 /**
1302  * Module Initialization Routine
1303  */
1304 static int crypto4xx_probe(struct platform_device *ofdev)
1305 {
1306         int rc;
1307         struct resource res;
1308         struct device *dev = &ofdev->dev;
1309         struct crypto4xx_core_device *core_dev;
1310         u32 pvr;
1311         bool is_revb = true;
1312
1313         rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
1314         if (rc)
1315                 return -ENODEV;
1316
1317         if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
1318                 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1319                        mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
1320                 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1321                        mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
1322         } else if (of_find_compatible_node(NULL, NULL,
1323                         "amcc,ppc405ex-crypto")) {
1324                 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1325                        mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
1326                 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1327                        mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
1328                 is_revb = false;
1329         } else if (of_find_compatible_node(NULL, NULL,
1330                         "amcc,ppc460sx-crypto")) {
1331                 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1332                        mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
1333                 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1334                        mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
1335         } else {
1336                 printk(KERN_ERR "Crypto Function Not supported!\n");
1337                 return -EINVAL;
1338         }
1339
1340         core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
1341         if (!core_dev)
1342                 return -ENOMEM;
1343
1344         dev_set_drvdata(dev, core_dev);
1345         core_dev->ofdev = ofdev;
1346         core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
1347         rc = -ENOMEM;
1348         if (!core_dev->dev)
1349                 goto err_alloc_dev;
1350
1351         /*
1352          * Older version of 460EX/GT have a hardware bug.
1353          * Hence they do not support H/W based security intr coalescing
1354          */
1355         pvr = mfspr(SPRN_PVR);
1356         if (is_revb && ((pvr >> 4) == 0x130218A)) {
1357                 u32 min = PVR_MIN(pvr);
1358
1359                 if (min < 4) {
1360                         dev_info(dev, "RevA detected - disable interrupt coalescing\n");
1361                         is_revb = false;
1362                 }
1363         }
1364
1365         core_dev->dev->core_dev = core_dev;
1366         core_dev->dev->is_revb = is_revb;
1367         core_dev->device = dev;
1368         spin_lock_init(&core_dev->lock);
1369         INIT_LIST_HEAD(&core_dev->dev->alg_list);
1370         ratelimit_default_init(&core_dev->dev->aead_ratelimit);
1371         rc = crypto4xx_build_pdr(core_dev->dev);
1372         if (rc)
1373                 goto err_build_pdr;
1374
1375         rc = crypto4xx_build_gdr(core_dev->dev);
1376         if (rc)
1377                 goto err_build_pdr;
1378
1379         rc = crypto4xx_build_sdr(core_dev->dev);
1380         if (rc)
1381                 goto err_build_sdr;
1382
1383         /* Init tasklet for bottom half processing */
1384         tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
1385                      (unsigned long) dev);
1386
1387         core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
1388         if (!core_dev->dev->ce_base) {
1389                 dev_err(dev, "failed to of_iomap\n");
1390                 rc = -ENOMEM;
1391                 goto err_iomap;
1392         }
1393
1394         /* Register for Crypto isr, Crypto Engine IRQ */
1395         core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1396         rc = request_irq(core_dev->irq, is_revb ?
1397                          crypto4xx_ce_interrupt_handler_revb :
1398                          crypto4xx_ce_interrupt_handler, 0,
1399                          KBUILD_MODNAME, dev);
1400         if (rc)
1401                 goto err_request_irq;
1402
1403         /* need to setup pdr, rdr, gdr and sdr before this */
1404         crypto4xx_hw_init(core_dev->dev);
1405
1406         /* Register security algorithms with Linux CryptoAPI */
1407         rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
1408                                ARRAY_SIZE(crypto4xx_alg));
1409         if (rc)
1410                 goto err_start_dev;
1411
1412         ppc4xx_trng_probe(core_dev);
1413         return 0;
1414
1415 err_start_dev:
1416         free_irq(core_dev->irq, dev);
1417 err_request_irq:
1418         irq_dispose_mapping(core_dev->irq);
1419         iounmap(core_dev->dev->ce_base);
1420 err_iomap:
1421         tasklet_kill(&core_dev->tasklet);
1422 err_build_sdr:
1423         crypto4xx_destroy_sdr(core_dev->dev);
1424         crypto4xx_destroy_gdr(core_dev->dev);
1425 err_build_pdr:
1426         crypto4xx_destroy_pdr(core_dev->dev);
1427         kfree(core_dev->dev);
1428 err_alloc_dev:
1429         kfree(core_dev);
1430
1431         return rc;
1432 }
1433
1434 static int crypto4xx_remove(struct platform_device *ofdev)
1435 {
1436         struct device *dev = &ofdev->dev;
1437         struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1438
1439         ppc4xx_trng_remove(core_dev);
1440
1441         free_irq(core_dev->irq, dev);
1442         irq_dispose_mapping(core_dev->irq);
1443
1444         tasklet_kill(&core_dev->tasklet);
1445         /* Un-register with Linux CryptoAPI */
1446         crypto4xx_unregister_alg(core_dev->dev);
1447         /* Free all allocated memory */
1448         crypto4xx_stop_all(core_dev);
1449
1450         return 0;
1451 }
1452
1453 static const struct of_device_id crypto4xx_match[] = {
1454         { .compatible      = "amcc,ppc4xx-crypto",},
1455         { },
1456 };
1457 MODULE_DEVICE_TABLE(of, crypto4xx_match);
1458
1459 static struct platform_driver crypto4xx_driver = {
1460         .driver = {
1461                 .name = KBUILD_MODNAME,
1462                 .of_match_table = crypto4xx_match,
1463         },
1464         .probe          = crypto4xx_probe,
1465         .remove         = crypto4xx_remove,
1466 };
1467
1468 module_platform_driver(crypto4xx_driver);
1469
1470 MODULE_LICENSE("GPL");
1471 MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
1472 MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");