GNU Linux-libre 4.14.262-gnu1
[releases.git] / drivers / crypto / chelsio / chcr_algo.c
1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *      Manoj Malviya (manojmalviya@chelsio.com)
36  *      Atul Gupta (atul.gupta@chelsio.com)
37  *      Jitendra Lulla (jlulla@chelsio.com)
38  *      Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39  *      Harsh Jain (harsh@chelsio.com)
40  */
41
42 #define pr_fmt(fmt) "chcr:" fmt
43
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
52
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/sha.h>
57 #include <crypto/authenc.h>
58 #include <crypto/ctr.h>
59 #include <crypto/gf128mul.h>
60 #include <crypto/internal/aead.h>
61 #include <crypto/null.h>
62 #include <crypto/internal/skcipher.h>
63 #include <crypto/aead.h>
64 #include <crypto/scatterwalk.h>
65 #include <crypto/internal/hash.h>
66
67 #include "t4fw_api.h"
68 #include "t4_msg.h"
69 #include "chcr_core.h"
70 #include "chcr_algo.h"
71 #include "chcr_crypto.h"
72
73 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
74 {
75         return ctx->crypto_ctx->aeadctx;
76 }
77
78 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
79 {
80         return ctx->crypto_ctx->ablkctx;
81 }
82
83 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
84 {
85         return ctx->crypto_ctx->hmacctx;
86 }
87
88 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
89 {
90         return gctx->ctx->gcm;
91 }
92
93 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
94 {
95         return gctx->ctx->authenc;
96 }
97
98 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
99 {
100         return ctx->dev->u_ctx;
101 }
102
103 static inline int is_ofld_imm(const struct sk_buff *skb)
104 {
105         return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN);
106 }
107
108 /*
109  *      sgl_len - calculates the size of an SGL of the given capacity
110  *      @n: the number of SGL entries
111  *      Calculates the number of flits needed for a scatter/gather list that
112  *      can hold the given number of entries.
113  */
114 static inline unsigned int sgl_len(unsigned int n)
115 {
116         n--;
117         return (3 * n) / 2 + (n & 1) + 2;
118 }
119
120 static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
121 {
122         u8 temp[SHA512_DIGEST_SIZE];
123         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
124         int authsize = crypto_aead_authsize(tfm);
125         struct cpl_fw6_pld *fw6_pld;
126         int cmp = 0;
127
128         fw6_pld = (struct cpl_fw6_pld *)input;
129         if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
130             (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
131                 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
132         } else {
133
134                 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
135                                 authsize, req->assoclen +
136                                 req->cryptlen - authsize);
137                 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
138         }
139         if (cmp)
140                 *err = -EBADMSG;
141         else
142                 *err = 0;
143 }
144
145 /*
146  *      chcr_handle_resp - Unmap the DMA buffers associated with the request
147  *      @req: crypto request
148  */
149 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
150                          int err)
151 {
152         struct crypto_tfm *tfm = req->tfm;
153         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
154         struct uld_ctx *u_ctx = ULD_CTX(ctx);
155         struct chcr_req_ctx ctx_req;
156         unsigned int digestsize, updated_digestsize;
157         struct adapter *adap = padap(ctx->dev);
158
159         switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
160         case CRYPTO_ALG_TYPE_AEAD:
161                 ctx_req.req.aead_req = aead_request_cast(req);
162                 ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
163                 dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
164                              ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
165                 if (ctx_req.ctx.reqctx->skb) {
166                         kfree_skb(ctx_req.ctx.reqctx->skb);
167                         ctx_req.ctx.reqctx->skb = NULL;
168                 }
169                 free_new_sg(ctx_req.ctx.reqctx->newdstsg);
170                 ctx_req.ctx.reqctx->newdstsg = NULL;
171                 if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
172                         chcr_verify_tag(ctx_req.req.aead_req, input,
173                                         &err);
174                         ctx_req.ctx.reqctx->verify = VERIFY_HW;
175                 }
176                 ctx_req.req.aead_req->base.complete(req, err);
177                 break;
178
179         case CRYPTO_ALG_TYPE_ABLKCIPHER:
180                  err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
181                                                input, err);
182                 break;
183
184         case CRYPTO_ALG_TYPE_AHASH:
185                 ctx_req.req.ahash_req = ahash_request_cast(req);
186                 ctx_req.ctx.ahash_ctx =
187                         ahash_request_ctx(ctx_req.req.ahash_req);
188                 digestsize =
189                         crypto_ahash_digestsize(crypto_ahash_reqtfm(
190                                                         ctx_req.req.ahash_req));
191                 updated_digestsize = digestsize;
192                 if (digestsize == SHA224_DIGEST_SIZE)
193                         updated_digestsize = SHA256_DIGEST_SIZE;
194                 else if (digestsize == SHA384_DIGEST_SIZE)
195                         updated_digestsize = SHA512_DIGEST_SIZE;
196                 if (ctx_req.ctx.ahash_ctx->skb) {
197                         kfree_skb(ctx_req.ctx.ahash_ctx->skb);
198                         ctx_req.ctx.ahash_ctx->skb = NULL;
199                 }
200                 if (ctx_req.ctx.ahash_ctx->result == 1) {
201                         ctx_req.ctx.ahash_ctx->result = 0;
202                         memcpy(ctx_req.req.ahash_req->result, input +
203                                sizeof(struct cpl_fw6_pld),
204                                digestsize);
205                 } else {
206                         memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
207                                sizeof(struct cpl_fw6_pld),
208                                updated_digestsize);
209                 }
210                 ctx_req.req.ahash_req->base.complete(req, err);
211                 break;
212         }
213         atomic_inc(&adap->chcr_stats.complete);
214         return err;
215 }
216
217 /*
218  *      calc_tx_flits_ofld - calculate # of flits for an offload packet
219  *      @skb: the packet
220  *      Returns the number of flits needed for the given offload packet.
221  *      These packets are already fully constructed and no additional headers
222  *      will be added.
223  */
224 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
225 {
226         unsigned int flits, cnt;
227
228         if (is_ofld_imm(skb))
229                 return DIV_ROUND_UP(skb->len, 8);
230
231         flits = skb_transport_offset(skb) / 8;   /* headers */
232         cnt = skb_shinfo(skb)->nr_frags;
233         if (skb_tail_pointer(skb) != skb_transport_header(skb))
234                 cnt++;
235         return flits + sgl_len(cnt);
236 }
237
238 static inline void get_aes_decrypt_key(unsigned char *dec_key,
239                                        const unsigned char *key,
240                                        unsigned int keylength)
241 {
242         u32 temp;
243         u32 w_ring[MAX_NK];
244         int i, j, k;
245         u8  nr, nk;
246
247         switch (keylength) {
248         case AES_KEYLENGTH_128BIT:
249                 nk = KEYLENGTH_4BYTES;
250                 nr = NUMBER_OF_ROUNDS_10;
251                 break;
252         case AES_KEYLENGTH_192BIT:
253                 nk = KEYLENGTH_6BYTES;
254                 nr = NUMBER_OF_ROUNDS_12;
255                 break;
256         case AES_KEYLENGTH_256BIT:
257                 nk = KEYLENGTH_8BYTES;
258                 nr = NUMBER_OF_ROUNDS_14;
259                 break;
260         default:
261                 return;
262         }
263         for (i = 0; i < nk; i++)
264                 w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
265
266         i = 0;
267         temp = w_ring[nk - 1];
268         while (i + nk < (nr + 1) * 4) {
269                 if (!(i % nk)) {
270                         /* RotWord(temp) */
271                         temp = (temp << 8) | (temp >> 24);
272                         temp = aes_ks_subword(temp);
273                         temp ^= round_constant[i / nk];
274                 } else if (nk == 8 && (i % 4 == 0)) {
275                         temp = aes_ks_subword(temp);
276                 }
277                 w_ring[i % nk] ^= temp;
278                 temp = w_ring[i % nk];
279                 i++;
280         }
281         i--;
282         for (k = 0, j = i % nk; k < nk; k++) {
283                 *((u32 *)dec_key + k) = htonl(w_ring[j]);
284                 j--;
285                 if (j < 0)
286                         j += nk;
287         }
288 }
289
290 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
291 {
292         struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
293
294         switch (ds) {
295         case SHA1_DIGEST_SIZE:
296                 base_hash = crypto_alloc_shash("sha1", 0, 0);
297                 break;
298         case SHA224_DIGEST_SIZE:
299                 base_hash = crypto_alloc_shash("sha224", 0, 0);
300                 break;
301         case SHA256_DIGEST_SIZE:
302                 base_hash = crypto_alloc_shash("sha256", 0, 0);
303                 break;
304         case SHA384_DIGEST_SIZE:
305                 base_hash = crypto_alloc_shash("sha384", 0, 0);
306                 break;
307         case SHA512_DIGEST_SIZE:
308                 base_hash = crypto_alloc_shash("sha512", 0, 0);
309                 break;
310         }
311
312         return base_hash;
313 }
314
315 static int chcr_compute_partial_hash(struct shash_desc *desc,
316                                      char *iopad, char *result_hash,
317                                      int digest_size)
318 {
319         struct sha1_state sha1_st;
320         struct sha256_state sha256_st;
321         struct sha512_state sha512_st;
322         int error;
323
324         if (digest_size == SHA1_DIGEST_SIZE) {
325                 error = crypto_shash_init(desc) ?:
326                         crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
327                         crypto_shash_export(desc, (void *)&sha1_st);
328                 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
329         } else if (digest_size == SHA224_DIGEST_SIZE) {
330                 error = crypto_shash_init(desc) ?:
331                         crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
332                         crypto_shash_export(desc, (void *)&sha256_st);
333                 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
334
335         } else if (digest_size == SHA256_DIGEST_SIZE) {
336                 error = crypto_shash_init(desc) ?:
337                         crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
338                         crypto_shash_export(desc, (void *)&sha256_st);
339                 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
340
341         } else if (digest_size == SHA384_DIGEST_SIZE) {
342                 error = crypto_shash_init(desc) ?:
343                         crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
344                         crypto_shash_export(desc, (void *)&sha512_st);
345                 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
346
347         } else if (digest_size == SHA512_DIGEST_SIZE) {
348                 error = crypto_shash_init(desc) ?:
349                         crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
350                         crypto_shash_export(desc, (void *)&sha512_st);
351                 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
352         } else {
353                 error = -EINVAL;
354                 pr_err("Unknown digest size %d\n", digest_size);
355         }
356         return error;
357 }
358
359 static void chcr_change_order(char *buf, int ds)
360 {
361         int i;
362
363         if (ds == SHA512_DIGEST_SIZE) {
364                 for (i = 0; i < (ds / sizeof(u64)); i++)
365                         *((__be64 *)buf + i) =
366                                 cpu_to_be64(*((u64 *)buf + i));
367         } else {
368                 for (i = 0; i < (ds / sizeof(u32)); i++)
369                         *((__be32 *)buf + i) =
370                                 cpu_to_be32(*((u32 *)buf + i));
371         }
372 }
373
374 static inline int is_hmac(struct crypto_tfm *tfm)
375 {
376         struct crypto_alg *alg = tfm->__crt_alg;
377         struct chcr_alg_template *chcr_crypto_alg =
378                 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
379                              alg.hash);
380         if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
381                 return 1;
382         return 0;
383 }
384
385 static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
386                            struct scatterlist *sg,
387                            struct phys_sge_parm *sg_param,
388                            int pci_chan_id)
389 {
390         struct phys_sge_pairs *to;
391         unsigned int len = 0, left_size = sg_param->obsize;
392         unsigned int nents = sg_param->nents, i, j = 0;
393
394         phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
395                                     | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
396         phys_cpl->pcirlxorder_to_noofsgentr =
397                 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
398                       CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
399                       CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
400                       CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
401                       CPL_RX_PHYS_DSGL_DCAID_V(0) |
402                       CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents));
403         phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
404         phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
405         phys_cpl->rss_hdr_int.hash_val = 0;
406         phys_cpl->rss_hdr_int.channel = pci_chan_id;
407         to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
408                                        sizeof(struct cpl_rx_phys_dsgl));
409         for (i = 0; nents && left_size; to++) {
410                 for (j = 0; j < 8 && nents && left_size; j++, nents--) {
411                         len = min(left_size, sg_dma_len(sg));
412                         to->len[j] = htons(len);
413                         to->addr[j] = cpu_to_be64(sg_dma_address(sg));
414                         left_size -= len;
415                         sg = sg_next(sg);
416                 }
417         }
418 }
419
420 static inline int map_writesg_phys_cpl(struct device *dev,
421                                         struct cpl_rx_phys_dsgl *phys_cpl,
422                                         struct scatterlist *sg,
423                                         struct phys_sge_parm *sg_param,
424                                         int pci_chan_id)
425 {
426         if (!sg || !sg_param->nents)
427                 return -EINVAL;
428
429         sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
430         if (sg_param->nents == 0) {
431                 pr_err("CHCR : DMA mapping failed\n");
432                 return -EINVAL;
433         }
434         write_phys_cpl(phys_cpl, sg, sg_param, pci_chan_id);
435         return 0;
436 }
437
438 static inline int get_aead_subtype(struct crypto_aead *aead)
439 {
440         struct aead_alg *alg = crypto_aead_alg(aead);
441         struct chcr_alg_template *chcr_crypto_alg =
442                 container_of(alg, struct chcr_alg_template, alg.aead);
443         return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
444 }
445
446 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
447 {
448         struct crypto_alg *alg = tfm->__crt_alg;
449         struct chcr_alg_template *chcr_crypto_alg =
450                 container_of(alg, struct chcr_alg_template, alg.crypto);
451
452         return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
453 }
454
455 static inline void write_buffer_to_skb(struct sk_buff *skb,
456                                         unsigned int *frags,
457                                         char *bfr,
458                                         u8 bfr_len)
459 {
460         skb->len += bfr_len;
461         skb->data_len += bfr_len;
462         skb->truesize += bfr_len;
463         get_page(virt_to_page(bfr));
464         skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
465                            offset_in_page(bfr), bfr_len);
466         (*frags)++;
467 }
468
469
470 static inline void
471 write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
472                         struct scatterlist *sg, unsigned int count)
473 {
474         struct page *spage;
475         unsigned int page_len;
476
477         skb->len += count;
478         skb->data_len += count;
479         skb->truesize += count;
480
481         while (count > 0) {
482                 if (!sg || (!(sg->length)))
483                         break;
484                 spage = sg_page(sg);
485                 get_page(spage);
486                 page_len = min(sg->length, count);
487                 skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
488                 (*frags)++;
489                 count -= page_len;
490                 sg = sg_next(sg);
491         }
492 }
493
494 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
495 {
496         struct adapter *adap = netdev2adap(dev);
497         struct sge_uld_txq_info *txq_info =
498                 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
499         struct sge_uld_txq *txq;
500         int ret = 0;
501
502         local_bh_disable();
503         txq = &txq_info->uldtxq[idx];
504         spin_lock(&txq->sendq.lock);
505         if (txq->full)
506                 ret = -1;
507         spin_unlock(&txq->sendq.lock);
508         local_bh_enable();
509         return ret;
510 }
511
512 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
513                                struct _key_ctx *key_ctx)
514 {
515         if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
516                 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
517         } else {
518                 memcpy(key_ctx->key,
519                        ablkctx->key + (ablkctx->enckey_len >> 1),
520                        ablkctx->enckey_len >> 1);
521                 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
522                        ablkctx->rrkey, ablkctx->enckey_len >> 1);
523         }
524         return 0;
525 }
526 static int chcr_sg_ent_in_wr(struct scatterlist *src,
527                              struct scatterlist *dst,
528                              unsigned int minsg,
529                              unsigned int space,
530                              short int *sent,
531                              short int *dent)
532 {
533         int srclen = 0, dstlen = 0;
534         int srcsg = minsg, dstsg = 0;
535
536         *sent = 0;
537         *dent = 0;
538         while (src && dst && ((srcsg + 1) <= MAX_SKB_FRAGS) &&
539                space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
540                 srclen += src->length;
541                 srcsg++;
542                 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
543                        space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
544                         if (srclen <= dstlen)
545                                 break;
546                         dstlen += dst->length;
547                         dst = sg_next(dst);
548                         dstsg++;
549                 }
550                 src = sg_next(src);
551         }
552         *sent = srcsg - minsg;
553         *dent = dstsg;
554         return min(srclen, dstlen);
555 }
556
557 static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
558                                 u32 flags,
559                                 struct scatterlist *src,
560                                 struct scatterlist *dst,
561                                 unsigned int nbytes,
562                                 u8 *iv,
563                                 unsigned short op_type)
564 {
565         int err;
566
567         SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
568         skcipher_request_set_tfm(subreq, cipher);
569         skcipher_request_set_callback(subreq, flags, NULL, NULL);
570         skcipher_request_set_crypt(subreq, src, dst,
571                                    nbytes, iv);
572
573         err = op_type ? crypto_skcipher_decrypt(subreq) :
574                 crypto_skcipher_encrypt(subreq);
575         skcipher_request_zero(subreq);
576
577         return err;
578
579 }
580 static inline void create_wreq(struct chcr_context *ctx,
581                                struct chcr_wr *chcr_req,
582                                void *req, struct sk_buff *skb,
583                                int kctx_len, int hash_sz,
584                                int is_iv,
585                                unsigned int sc_len,
586                                unsigned int lcb)
587 {
588         struct uld_ctx *u_ctx = ULD_CTX(ctx);
589         int iv_loc = IV_DSGL;
590         int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
591         unsigned int immdatalen = 0, nr_frags = 0;
592
593         if (is_ofld_imm(skb)) {
594                 immdatalen = skb->data_len;
595                 iv_loc = IV_IMMEDIATE;
596         } else {
597                 nr_frags = skb_shinfo(skb)->nr_frags;
598         }
599
600         chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
601                                 ((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
602         chcr_req->wreq.pld_size_hash_size =
603                 htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
604                       FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
605         chcr_req->wreq.len16_pkd =
606                 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
607                                     (calc_tx_flits_ofld(skb) * 8), 16)));
608         chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
609         chcr_req->wreq.rx_chid_to_rx_q_id =
610                 FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
611                                 is_iv ? iv_loc : IV_NOP, !!lcb,
612                                 ctx->tx_qidx);
613
614         chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
615                                                        qid);
616         chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
617                                         16) - ((sizeof(chcr_req->wreq)) >> 4)));
618
619         chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
620         chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
621                                    sizeof(chcr_req->key_ctx) +
622                                    kctx_len + sc_len + immdatalen);
623 }
624
625 /**
626  *      create_cipher_wr - form the WR for cipher operations
627  *      @req: cipher req.
628  *      @ctx: crypto driver context of the request.
629  *      @qid: ingress qid where response of this WR should be received.
630  *      @op_type:       encryption or decryption
631  */
632 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
633 {
634         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
635         struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
636         struct uld_ctx *u_ctx = ULD_CTX(ctx);
637         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
638         struct sk_buff *skb = NULL;
639         struct chcr_wr *chcr_req;
640         struct cpl_rx_phys_dsgl *phys_cpl;
641         struct chcr_blkcipher_req_ctx *reqctx =
642                 ablkcipher_request_ctx(wrparam->req);
643         struct phys_sge_parm sg_param;
644         unsigned int frags = 0, transhdr_len, phys_dsgl;
645         int error;
646         unsigned int ivsize = AES_BLOCK_SIZE, kctx_len;
647         gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
648                         GFP_KERNEL : GFP_ATOMIC;
649         struct adapter *adap = padap(ctx->dev);
650
651         phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
652
653         kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
654         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
655         skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
656         if (!skb) {
657                 error = -ENOMEM;
658                 goto err;
659         }
660         skb_reserve(skb, sizeof(struct sge_opaque_hdr));
661         chcr_req = __skb_put_zero(skb, transhdr_len);
662         chcr_req->sec_cpl.op_ivinsrtofst =
663                 FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 1);
664
665         chcr_req->sec_cpl.pldlen = htonl(ivsize + wrparam->bytes);
666         chcr_req->sec_cpl.aadstart_cipherstop_hi =
667                         FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
668
669         chcr_req->sec_cpl.cipherstop_lo_authinsert =
670                         FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
671         chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
672                                                          ablkctx->ciph_mode,
673                                                          0, 0, ivsize >> 1);
674         chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
675                                                           0, 1, phys_dsgl);
676
677         chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
678         if ((reqctx->op == CHCR_DECRYPT_OP) &&
679             (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
680                CRYPTO_ALG_SUB_TYPE_CTR)) &&
681             (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
682                CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
683                 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
684         } else {
685                 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
686                     (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
687                         memcpy(chcr_req->key_ctx.key, ablkctx->key,
688                                ablkctx->enckey_len);
689                 } else {
690                         memcpy(chcr_req->key_ctx.key, ablkctx->key +
691                                (ablkctx->enckey_len >> 1),
692                                ablkctx->enckey_len >> 1);
693                         memcpy(chcr_req->key_ctx.key +
694                                (ablkctx->enckey_len >> 1),
695                                ablkctx->key,
696                                ablkctx->enckey_len >> 1);
697                 }
698         }
699         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
700         sg_param.nents = reqctx->dst_nents;
701         sg_param.obsize =  wrparam->bytes;
702         sg_param.qid = wrparam->qid;
703         error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
704                                        reqctx->dst, &sg_param,
705                                        ctx->pci_chan_id);
706         if (error)
707                 goto map_fail1;
708
709         skb_set_transport_header(skb, transhdr_len);
710         write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
711         write_sg_to_skb(skb, &frags, wrparam->srcsg, wrparam->bytes);
712         atomic_inc(&adap->chcr_stats.cipher_rqst);
713         create_wreq(ctx, chcr_req, &(wrparam->req->base), skb, kctx_len, 0, 1,
714                         sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl,
715                         ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
716         reqctx->skb = skb;
717         skb_get(skb);
718         return skb;
719 map_fail1:
720         kfree_skb(skb);
721 err:
722         return ERR_PTR(error);
723 }
724
725 static inline int chcr_keyctx_ck_size(unsigned int keylen)
726 {
727         int ck_size = 0;
728
729         if (keylen == AES_KEYSIZE_128)
730                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
731         else if (keylen == AES_KEYSIZE_192)
732                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
733         else if (keylen == AES_KEYSIZE_256)
734                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
735         else
736                 ck_size = 0;
737
738         return ck_size;
739 }
740 static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
741                                        const u8 *key,
742                                        unsigned int keylen)
743 {
744         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
745         struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
746         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
747         int err = 0;
748
749         crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
750         crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
751                                   CRYPTO_TFM_REQ_MASK);
752         err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
753         tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
754         tfm->crt_flags |=
755                 crypto_skcipher_get_flags(ablkctx->sw_cipher) &
756                 CRYPTO_TFM_RES_MASK;
757         return err;
758 }
759
760 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
761                                const u8 *key,
762                                unsigned int keylen)
763 {
764         struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
765         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
766         unsigned int ck_size, context_size;
767         u16 alignment = 0;
768         int err;
769
770         err = chcr_cipher_fallback_setkey(cipher, key, keylen);
771         if (err)
772                 goto badkey_err;
773
774         ck_size = chcr_keyctx_ck_size(keylen);
775         alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
776         memcpy(ablkctx->key, key, keylen);
777         ablkctx->enckey_len = keylen;
778         get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
779         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
780                         keylen + alignment) >> 4;
781
782         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
783                                                 0, 0, context_size);
784         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
785         return 0;
786 badkey_err:
787         crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
788         ablkctx->enckey_len = 0;
789
790         return err;
791 }
792
793 static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
794                                    const u8 *key,
795                                    unsigned int keylen)
796 {
797         struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
798         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
799         unsigned int ck_size, context_size;
800         u16 alignment = 0;
801         int err;
802
803         err = chcr_cipher_fallback_setkey(cipher, key, keylen);
804         if (err)
805                 goto badkey_err;
806         ck_size = chcr_keyctx_ck_size(keylen);
807         alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
808         memcpy(ablkctx->key, key, keylen);
809         ablkctx->enckey_len = keylen;
810         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
811                         keylen + alignment) >> 4;
812
813         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
814                                                 0, 0, context_size);
815         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
816
817         return 0;
818 badkey_err:
819         crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
820         ablkctx->enckey_len = 0;
821
822         return err;
823 }
824
825 static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
826                                    const u8 *key,
827                                    unsigned int keylen)
828 {
829         struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
830         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
831         unsigned int ck_size, context_size;
832         u16 alignment = 0;
833         int err;
834
835         if (keylen < CTR_RFC3686_NONCE_SIZE)
836                 return -EINVAL;
837         memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
838                CTR_RFC3686_NONCE_SIZE);
839
840         keylen -= CTR_RFC3686_NONCE_SIZE;
841         err = chcr_cipher_fallback_setkey(cipher, key, keylen);
842         if (err)
843                 goto badkey_err;
844
845         ck_size = chcr_keyctx_ck_size(keylen);
846         alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
847         memcpy(ablkctx->key, key, keylen);
848         ablkctx->enckey_len = keylen;
849         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
850                         keylen + alignment) >> 4;
851
852         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
853                                                 0, 0, context_size);
854         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
855
856         return 0;
857 badkey_err:
858         crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
859         ablkctx->enckey_len = 0;
860
861         return err;
862 }
863 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
864 {
865         unsigned int size = AES_BLOCK_SIZE;
866         __be32 *b = (__be32 *)(dstiv + size);
867         u32 c, prev;
868
869         memcpy(dstiv, srciv, AES_BLOCK_SIZE);
870         for (; size >= 4; size -= 4) {
871                 prev = be32_to_cpu(*--b);
872                 c = prev + add;
873                 *b = cpu_to_be32(c);
874                 if (prev < c)
875                         break;
876                 add = 1;
877         }
878
879 }
880
881 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
882 {
883         __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
884         u64 c;
885         u32 temp = be32_to_cpu(*--b);
886
887         temp = ~temp;
888         c = (u64)temp +  1; // No of block can processed withou overflow
889         if ((bytes / AES_BLOCK_SIZE) > c)
890                 bytes = c * AES_BLOCK_SIZE;
891         return bytes;
892 }
893
894 static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
895 {
896         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
897         struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
898         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
899         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
900         struct crypto_cipher *cipher;
901         int ret, i;
902         u8 *key;
903         unsigned int keylen;
904
905         cipher = ablkctx->aes_generic;
906         memcpy(iv, req->info, AES_BLOCK_SIZE);
907
908         keylen = ablkctx->enckey_len / 2;
909         key = ablkctx->key + keylen;
910         ret = crypto_cipher_setkey(cipher, key, keylen);
911         if (ret)
912                 goto out;
913
914         crypto_cipher_encrypt_one(cipher, iv, iv);
915         for (i = 0; i < (reqctx->processed / AES_BLOCK_SIZE); i++)
916                 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
917
918         crypto_cipher_decrypt_one(cipher, iv, iv);
919 out:
920         return ret;
921 }
922
923 static int chcr_update_cipher_iv(struct ablkcipher_request *req,
924                                    struct cpl_fw6_pld *fw6_pld, u8 *iv)
925 {
926         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
927         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
928         int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
929         int ret = 0;
930
931         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
932                 ctr_add_iv(iv, req->info, (reqctx->processed /
933                            AES_BLOCK_SIZE));
934         else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
935                 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
936                         CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
937                                                 AES_BLOCK_SIZE) + 1);
938         else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
939                 ret = chcr_update_tweak(req, iv);
940         else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
941                 if (reqctx->op)
942                         sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
943                                            16,
944                                            reqctx->processed - AES_BLOCK_SIZE);
945                 else
946                         memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
947         }
948
949         return ret;
950
951 }
952
953 /* We need separate function for final iv because in rfc3686  Initial counter
954  * starts from 1 and buffer size of iv is 8 byte only which remains constant
955  * for subsequent update requests
956  */
957
958 static int chcr_final_cipher_iv(struct ablkcipher_request *req,
959                                    struct cpl_fw6_pld *fw6_pld, u8 *iv)
960 {
961         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
962         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
963         int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
964         int ret = 0;
965
966         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
967                 ctr_add_iv(iv, req->info, (reqctx->processed /
968                            AES_BLOCK_SIZE));
969         else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
970                 ret = chcr_update_tweak(req, iv);
971         else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
972                 if (reqctx->op)
973                         sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
974                                            16,
975                                            reqctx->processed - AES_BLOCK_SIZE);
976                 else
977                         memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
978
979         }
980         return ret;
981
982 }
983
984
985 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
986                                    unsigned char *input, int err)
987 {
988         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
989         struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
990         struct uld_ctx *u_ctx = ULD_CTX(ctx);
991         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
992         struct sk_buff *skb;
993         struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
994         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
995         struct  cipher_wr_param wrparam;
996         int bytes;
997
998         dma_unmap_sg(&u_ctx->lldi.pdev->dev, reqctx->dst, reqctx->dst_nents,
999                      DMA_FROM_DEVICE);
1000
1001         if (reqctx->skb) {
1002                 kfree_skb(reqctx->skb);
1003                 reqctx->skb = NULL;
1004         }
1005         if (err)
1006                 goto complete;
1007
1008         if (req->nbytes == reqctx->processed) {
1009                 err = chcr_final_cipher_iv(req, fw6_pld, req->info);
1010                 goto complete;
1011         }
1012
1013         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1014                                             ctx->tx_qidx))) {
1015                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1016                         err = -EBUSY;
1017                         goto complete;
1018                 }
1019
1020         }
1021         wrparam.srcsg = scatterwalk_ffwd(reqctx->srcffwd, req->src,
1022                                        reqctx->processed);
1023         reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, reqctx->dstsg,
1024                                          reqctx->processed);
1025         if (!wrparam.srcsg || !reqctx->dst) {
1026                 pr_err("Input sg list length less that nbytes\n");
1027                 err = -EINVAL;
1028                 goto complete;
1029         }
1030         bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dst, 1,
1031                                  SPACE_LEFT(ablkctx->enckey_len),
1032                                  &wrparam.snent, &reqctx->dst_nents);
1033         if ((bytes + reqctx->processed) >= req->nbytes)
1034                 bytes  = req->nbytes - reqctx->processed;
1035         else
1036                 bytes = ROUND_16(bytes);
1037         err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1038         if (err)
1039                 goto complete;
1040
1041         if (unlikely(bytes == 0)) {
1042                 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1043                                      req->base.flags,
1044                                      wrparam.srcsg,
1045                                      reqctx->dst,
1046                                      req->nbytes - reqctx->processed,
1047                                      reqctx->iv,
1048                                      reqctx->op);
1049                 goto complete;
1050         }
1051
1052         if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1053             CRYPTO_ALG_SUB_TYPE_CTR)
1054                 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1055         reqctx->processed += bytes;
1056         wrparam.qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
1057         wrparam.req = req;
1058         wrparam.bytes = bytes;
1059         skb = create_cipher_wr(&wrparam);
1060         if (IS_ERR(skb)) {
1061                 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1062                 err = PTR_ERR(skb);
1063                 goto complete;
1064         }
1065         skb->dev = u_ctx->lldi.ports[0];
1066         set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1067         chcr_send_wr(skb);
1068         return 0;
1069 complete:
1070         free_new_sg(reqctx->newdstsg);
1071         reqctx->newdstsg = NULL;
1072         req->base.complete(&req->base, err);
1073         return err;
1074 }
1075
1076 static int process_cipher(struct ablkcipher_request *req,
1077                                   unsigned short qid,
1078                                   struct sk_buff **skb,
1079                                   unsigned short op_type)
1080 {
1081         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1082         unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
1083         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1084         struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1085         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1086         struct  cipher_wr_param wrparam;
1087         int bytes, nents, err = -EINVAL;
1088
1089         reqctx->newdstsg = NULL;
1090         reqctx->processed = 0;
1091         if (!req->info)
1092                 goto error;
1093         if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1094             (req->nbytes == 0) ||
1095             (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
1096                 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1097                        ablkctx->enckey_len, req->nbytes, ivsize);
1098                 goto error;
1099         }
1100         wrparam.srcsg = req->src;
1101         if (is_newsg(req->dst, &nents)) {
1102                 reqctx->newdstsg = alloc_new_sg(req->dst, nents);
1103                 if (IS_ERR(reqctx->newdstsg))
1104                         return PTR_ERR(reqctx->newdstsg);
1105                 reqctx->dstsg = reqctx->newdstsg;
1106         } else {
1107                 reqctx->dstsg = req->dst;
1108         }
1109         bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dstsg, MIN_CIPHER_SG,
1110                                  SPACE_LEFT(ablkctx->enckey_len),
1111                                  &wrparam.snent,
1112                                  &reqctx->dst_nents);
1113         if ((bytes + reqctx->processed) >= req->nbytes)
1114                 bytes  = req->nbytes - reqctx->processed;
1115         else
1116                 bytes = ROUND_16(bytes);
1117         if (unlikely(bytes > req->nbytes))
1118                 bytes = req->nbytes;
1119         if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1120                                   CRYPTO_ALG_SUB_TYPE_CTR) {
1121                 bytes = adjust_ctr_overflow(req->info, bytes);
1122         }
1123         if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1124             CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1125                 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1126                 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
1127                                 CTR_RFC3686_IV_SIZE);
1128
1129                 /* initialize counter portion of counter block */
1130                 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1131                         CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1132
1133         } else {
1134
1135                 memcpy(reqctx->iv, req->info, ivsize);
1136         }
1137         if (unlikely(bytes == 0)) {
1138                 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1139                                            req->base.flags,
1140                                            req->src,
1141                                            req->dst,
1142                                            req->nbytes,
1143                                            req->info,
1144                                            op_type);
1145                 goto error;
1146         }
1147         reqctx->processed = bytes;
1148         reqctx->dst = reqctx->dstsg;
1149         reqctx->op = op_type;
1150         wrparam.qid = qid;
1151         wrparam.req = req;
1152         wrparam.bytes = bytes;
1153         *skb = create_cipher_wr(&wrparam);
1154         if (IS_ERR(*skb)) {
1155                 err = PTR_ERR(*skb);
1156                 goto error;
1157         }
1158
1159         return 0;
1160 error:
1161         free_new_sg(reqctx->newdstsg);
1162         reqctx->newdstsg = NULL;
1163         return err;
1164 }
1165
1166 static int chcr_aes_encrypt(struct ablkcipher_request *req)
1167 {
1168         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1169         struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1170         struct sk_buff *skb = NULL;
1171         int err;
1172         struct uld_ctx *u_ctx = ULD_CTX(ctx);
1173
1174         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1175                                             ctx->tx_qidx))) {
1176                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1177                         return -EBUSY;
1178         }
1179
1180         err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb,
1181                                CHCR_ENCRYPT_OP);
1182         if (err || !skb)
1183                 return  err;
1184         skb->dev = u_ctx->lldi.ports[0];
1185         set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1186         chcr_send_wr(skb);
1187         return -EINPROGRESS;
1188 }
1189
1190 static int chcr_aes_decrypt(struct ablkcipher_request *req)
1191 {
1192         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1193         struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1194         struct uld_ctx *u_ctx = ULD_CTX(ctx);
1195         struct sk_buff *skb = NULL;
1196         int err;
1197
1198         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1199                                             ctx->tx_qidx))) {
1200                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1201                         return -EBUSY;
1202         }
1203
1204          err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb,
1205                                CHCR_DECRYPT_OP);
1206         if (err || !skb)
1207                 return err;
1208         skb->dev = u_ctx->lldi.ports[0];
1209         set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1210         chcr_send_wr(skb);
1211         return -EINPROGRESS;
1212 }
1213
1214 static int chcr_device_init(struct chcr_context *ctx)
1215 {
1216         struct uld_ctx *u_ctx = NULL;
1217         struct adapter *adap;
1218         unsigned int id;
1219         int txq_perchan, txq_idx, ntxq;
1220         int err = 0, rxq_perchan, rxq_idx;
1221
1222         id = smp_processor_id();
1223         if (!ctx->dev) {
1224                 u_ctx = assign_chcr_device();
1225                 if (!u_ctx) {
1226                         pr_err("chcr device assignment fails\n");
1227                         goto out;
1228                 }
1229                 ctx->dev = u_ctx->dev;
1230                 adap = padap(ctx->dev);
1231                 ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
1232                                     adap->vres.ncrypto_fc);
1233                 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1234                 txq_perchan = ntxq / u_ctx->lldi.nchan;
1235                 spin_lock(&ctx->dev->lock_chcr_dev);
1236                 ctx->tx_chan_id = ctx->dev->tx_channel_id;
1237                 ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
1238                 ctx->dev->rx_channel_id = 0;
1239                 spin_unlock(&ctx->dev->lock_chcr_dev);
1240                 rxq_idx = ctx->tx_chan_id * rxq_perchan;
1241                 rxq_idx += id % rxq_perchan;
1242                 txq_idx = ctx->tx_chan_id * txq_perchan;
1243                 txq_idx += id % txq_perchan;
1244                 ctx->rx_qidx = rxq_idx;
1245                 ctx->tx_qidx = txq_idx;
1246                 /* Channel Id used by SGE to forward packet to Host.
1247                  * Same value should be used in cpl_fw6_pld RSS_CH field
1248                  * by FW. Driver programs PCI channel ID to be used in fw
1249                  * at the time of queue allocation with value "pi->tx_chan"
1250                  */
1251                 ctx->pci_chan_id = txq_idx / txq_perchan;
1252         }
1253 out:
1254         return err;
1255 }
1256
1257 static int chcr_cra_init(struct crypto_tfm *tfm)
1258 {
1259         struct crypto_alg *alg = tfm->__crt_alg;
1260         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1261         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1262
1263         ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
1264                                 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1265         if (IS_ERR(ablkctx->sw_cipher)) {
1266                 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1267                 return PTR_ERR(ablkctx->sw_cipher);
1268         }
1269
1270         if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
1271                 /* To update tweak*/
1272                 ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
1273                 if (IS_ERR(ablkctx->aes_generic)) {
1274                         pr_err("failed to allocate aes cipher for tweak\n");
1275                         return PTR_ERR(ablkctx->aes_generic);
1276                 }
1277         } else
1278                 ablkctx->aes_generic = NULL;
1279
1280         tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1281         return chcr_device_init(crypto_tfm_ctx(tfm));
1282 }
1283
1284 static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1285 {
1286         struct crypto_alg *alg = tfm->__crt_alg;
1287         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1288         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1289
1290         /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1291          * cannot be used as fallback in chcr_handle_cipher_response
1292          */
1293         ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1294                                 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1295         if (IS_ERR(ablkctx->sw_cipher)) {
1296                 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1297                 return PTR_ERR(ablkctx->sw_cipher);
1298         }
1299         tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1300         return chcr_device_init(crypto_tfm_ctx(tfm));
1301 }
1302
1303
1304 static void chcr_cra_exit(struct crypto_tfm *tfm)
1305 {
1306         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1307         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1308
1309         crypto_free_skcipher(ablkctx->sw_cipher);
1310         if (ablkctx->aes_generic)
1311                 crypto_free_cipher(ablkctx->aes_generic);
1312 }
1313
1314 static int get_alg_config(struct algo_param *params,
1315                           unsigned int auth_size)
1316 {
1317         switch (auth_size) {
1318         case SHA1_DIGEST_SIZE:
1319                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1320                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1321                 params->result_size = SHA1_DIGEST_SIZE;
1322                 break;
1323         case SHA224_DIGEST_SIZE:
1324                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1325                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1326                 params->result_size = SHA256_DIGEST_SIZE;
1327                 break;
1328         case SHA256_DIGEST_SIZE:
1329                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1330                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1331                 params->result_size = SHA256_DIGEST_SIZE;
1332                 break;
1333         case SHA384_DIGEST_SIZE:
1334                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1335                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1336                 params->result_size = SHA512_DIGEST_SIZE;
1337                 break;
1338         case SHA512_DIGEST_SIZE:
1339                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1340                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1341                 params->result_size = SHA512_DIGEST_SIZE;
1342                 break;
1343         default:
1344                 pr_err("chcr : ERROR, unsupported digest size\n");
1345                 return -EINVAL;
1346         }
1347         return 0;
1348 }
1349
1350 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1351 {
1352                 crypto_free_shash(base_hash);
1353 }
1354
1355 /**
1356  *      create_hash_wr - Create hash work request
1357  *      @req - Cipher req base
1358  */
1359 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1360                                       struct hash_wr_param *param)
1361 {
1362         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1363         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1364         struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1365         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1366         struct sk_buff *skb = NULL;
1367         struct chcr_wr *chcr_req;
1368         unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
1369         unsigned int digestsize = crypto_ahash_digestsize(tfm);
1370         unsigned int kctx_len = 0;
1371         u8 hash_size_in_response = 0;
1372         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1373                 GFP_ATOMIC;
1374         struct adapter *adap = padap(ctx->dev);
1375
1376         iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
1377         kctx_len = param->alg_prm.result_size + iopad_alignment;
1378         if (param->opad_needed)
1379                 kctx_len += param->alg_prm.result_size + iopad_alignment;
1380
1381         if (req_ctx->result)
1382                 hash_size_in_response = digestsize;
1383         else
1384                 hash_size_in_response = param->alg_prm.result_size;
1385         transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
1386         skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
1387         if (!skb)
1388                 return skb;
1389
1390         skb_reserve(skb, sizeof(struct sge_opaque_hdr));
1391         chcr_req = __skb_put_zero(skb, transhdr_len);
1392
1393         chcr_req->sec_cpl.op_ivinsrtofst =
1394                 FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 0);
1395         chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1396
1397         chcr_req->sec_cpl.aadstart_cipherstop_hi =
1398                 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1399         chcr_req->sec_cpl.cipherstop_lo_authinsert =
1400                 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1401         chcr_req->sec_cpl.seqno_numivs =
1402                 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1403                                          param->opad_needed, 0);
1404
1405         chcr_req->sec_cpl.ivgen_hdrlen =
1406                 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1407
1408         memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1409                param->alg_prm.result_size);
1410
1411         if (param->opad_needed)
1412                 memcpy(chcr_req->key_ctx.key +
1413                        ((param->alg_prm.result_size <= 32) ? 32 :
1414                         CHCR_HASH_MAX_DIGEST_SIZE),
1415                        hmacctx->opad, param->alg_prm.result_size);
1416
1417         chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1418                                             param->alg_prm.mk_size, 0,
1419                                             param->opad_needed,
1420                                             ((kctx_len +
1421                                              sizeof(chcr_req->key_ctx)) >> 4));
1422         chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1423
1424         skb_set_transport_header(skb, transhdr_len);
1425         if (param->bfr_len != 0)
1426                 write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
1427                                     param->bfr_len);
1428         if (param->sg_len != 0)
1429                 write_sg_to_skb(skb, &frags, req->src, param->sg_len);
1430         atomic_inc(&adap->chcr_stats.digest_rqst);
1431         create_wreq(ctx, chcr_req, &req->base, skb, kctx_len,
1432                     hash_size_in_response, 0, DUMMY_BYTES, 0);
1433         req_ctx->skb = skb;
1434         skb_get(skb);
1435         return skb;
1436 }
1437
1438 static int chcr_ahash_update(struct ahash_request *req)
1439 {
1440         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1441         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1442         struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1443         struct uld_ctx *u_ctx = NULL;
1444         struct sk_buff *skb;
1445         u8 remainder = 0, bs;
1446         unsigned int nbytes = req->nbytes;
1447         struct hash_wr_param params;
1448
1449         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1450
1451         u_ctx = ULD_CTX(ctx);
1452         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1453                                             ctx->tx_qidx))) {
1454                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1455                         return -EBUSY;
1456         }
1457
1458         if (nbytes + req_ctx->reqlen >= bs) {
1459                 remainder = (nbytes + req_ctx->reqlen) % bs;
1460                 nbytes = nbytes + req_ctx->reqlen - remainder;
1461         } else {
1462                 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1463                                    + req_ctx->reqlen, nbytes, 0);
1464                 req_ctx->reqlen += nbytes;
1465                 return 0;
1466         }
1467
1468         params.opad_needed = 0;
1469         params.more = 1;
1470         params.last = 0;
1471         params.sg_len = nbytes - req_ctx->reqlen;
1472         params.bfr_len = req_ctx->reqlen;
1473         params.scmd1 = 0;
1474         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1475         req_ctx->result = 0;
1476         req_ctx->data_len += params.sg_len + params.bfr_len;
1477         skb = create_hash_wr(req, &params);
1478         if (!skb)
1479                 return -ENOMEM;
1480
1481         if (remainder) {
1482                 u8 *temp;
1483                 /* Swap buffers */
1484                 temp = req_ctx->reqbfr;
1485                 req_ctx->reqbfr = req_ctx->skbfr;
1486                 req_ctx->skbfr = temp;
1487                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1488                                    req_ctx->reqbfr, remainder, req->nbytes -
1489                                    remainder);
1490         }
1491         req_ctx->reqlen = remainder;
1492         skb->dev = u_ctx->lldi.ports[0];
1493         set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1494         chcr_send_wr(skb);
1495
1496         return -EINPROGRESS;
1497 }
1498
1499 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1500 {
1501         memset(bfr_ptr, 0, bs);
1502         *bfr_ptr = 0x80;
1503         if (bs == 64)
1504                 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1505         else
1506                 *(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1507 }
1508
1509 static int chcr_ahash_final(struct ahash_request *req)
1510 {
1511         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1512         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1513         struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1514         struct hash_wr_param params;
1515         struct sk_buff *skb;
1516         struct uld_ctx *u_ctx = NULL;
1517         u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1518
1519         u_ctx = ULD_CTX(ctx);
1520         if (is_hmac(crypto_ahash_tfm(rtfm)))
1521                 params.opad_needed = 1;
1522         else
1523                 params.opad_needed = 0;
1524         params.sg_len = 0;
1525         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1526         req_ctx->result = 1;
1527         params.bfr_len = req_ctx->reqlen;
1528         req_ctx->data_len += params.bfr_len + params.sg_len;
1529         if (req_ctx->reqlen == 0) {
1530                 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1531                 params.last = 0;
1532                 params.more = 1;
1533                 params.scmd1 = 0;
1534                 params.bfr_len = bs;
1535
1536         } else {
1537                 params.scmd1 = req_ctx->data_len;
1538                 params.last = 1;
1539                 params.more = 0;
1540         }
1541         skb = create_hash_wr(req, &params);
1542         if (!skb)
1543                 return -ENOMEM;
1544
1545         skb->dev = u_ctx->lldi.ports[0];
1546         set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1547         chcr_send_wr(skb);
1548         return -EINPROGRESS;
1549 }
1550
1551 static int chcr_ahash_finup(struct ahash_request *req)
1552 {
1553         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1554         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1555         struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1556         struct uld_ctx *u_ctx = NULL;
1557         struct sk_buff *skb;
1558         struct hash_wr_param params;
1559         u8  bs;
1560
1561         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1562         u_ctx = ULD_CTX(ctx);
1563
1564         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1565                                             ctx->tx_qidx))) {
1566                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1567                         return -EBUSY;
1568         }
1569
1570         if (is_hmac(crypto_ahash_tfm(rtfm)))
1571                 params.opad_needed = 1;
1572         else
1573                 params.opad_needed = 0;
1574
1575         params.sg_len = req->nbytes;
1576         params.bfr_len = req_ctx->reqlen;
1577         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1578         req_ctx->data_len += params.bfr_len + params.sg_len;
1579         req_ctx->result = 1;
1580         if ((req_ctx->reqlen + req->nbytes) == 0) {
1581                 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1582                 params.last = 0;
1583                 params.more = 1;
1584                 params.scmd1 = 0;
1585                 params.bfr_len = bs;
1586         } else {
1587                 params.scmd1 = req_ctx->data_len;
1588                 params.last = 1;
1589                 params.more = 0;
1590         }
1591
1592         skb = create_hash_wr(req, &params);
1593         if (!skb)
1594                 return -ENOMEM;
1595
1596         skb->dev = u_ctx->lldi.ports[0];
1597         set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1598         chcr_send_wr(skb);
1599
1600         return -EINPROGRESS;
1601 }
1602
1603 static int chcr_ahash_digest(struct ahash_request *req)
1604 {
1605         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1606         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1607         struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1608         struct uld_ctx *u_ctx = NULL;
1609         struct sk_buff *skb;
1610         struct hash_wr_param params;
1611         u8  bs;
1612
1613         rtfm->init(req);
1614         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1615
1616         u_ctx = ULD_CTX(ctx);
1617         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1618                                             ctx->tx_qidx))) {
1619                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1620                         return -EBUSY;
1621         }
1622
1623         if (is_hmac(crypto_ahash_tfm(rtfm)))
1624                 params.opad_needed = 1;
1625         else
1626                 params.opad_needed = 0;
1627
1628         params.last = 0;
1629         params.more = 0;
1630         params.sg_len = req->nbytes;
1631         params.bfr_len = 0;
1632         params.scmd1 = 0;
1633         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1634         req_ctx->result = 1;
1635         req_ctx->data_len += params.bfr_len + params.sg_len;
1636
1637         if (req->nbytes == 0) {
1638                 create_last_hash_block(req_ctx->reqbfr, bs, 0);
1639                 params.more = 1;
1640                 params.bfr_len = bs;
1641         }
1642
1643         skb = create_hash_wr(req, &params);
1644         if (!skb)
1645                 return -ENOMEM;
1646
1647         skb->dev = u_ctx->lldi.ports[0];
1648         set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1649         chcr_send_wr(skb);
1650         return -EINPROGRESS;
1651 }
1652
1653 static int chcr_ahash_export(struct ahash_request *areq, void *out)
1654 {
1655         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1656         struct chcr_ahash_req_ctx *state = out;
1657
1658         state->reqlen = req_ctx->reqlen;
1659         state->data_len = req_ctx->data_len;
1660         memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
1661         memcpy(state->partial_hash, req_ctx->partial_hash,
1662                CHCR_HASH_MAX_DIGEST_SIZE);
1663                 return 0;
1664 }
1665
1666 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
1667 {
1668         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1669         struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
1670
1671         req_ctx->reqlen = state->reqlen;
1672         req_ctx->data_len = state->data_len;
1673         req_ctx->reqbfr = req_ctx->bfr1;
1674         req_ctx->skbfr = req_ctx->bfr2;
1675         memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
1676         memcpy(req_ctx->partial_hash, state->partial_hash,
1677                CHCR_HASH_MAX_DIGEST_SIZE);
1678         return 0;
1679 }
1680
1681 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1682                              unsigned int keylen)
1683 {
1684         struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1685         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1686         unsigned int digestsize = crypto_ahash_digestsize(tfm);
1687         unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1688         unsigned int i, err = 0, updated_digestsize;
1689
1690         SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
1691
1692         /* use the key to calculate the ipad and opad. ipad will sent with the
1693          * first request's data. opad will be sent with the final hash result
1694          * ipad in hmacctx->ipad and opad in hmacctx->opad location
1695          */
1696         shash->tfm = hmacctx->base_hash;
1697         shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
1698         if (keylen > bs) {
1699                 err = crypto_shash_digest(shash, key, keylen,
1700                                           hmacctx->ipad);
1701                 if (err)
1702                         goto out;
1703                 keylen = digestsize;
1704         } else {
1705                 memcpy(hmacctx->ipad, key, keylen);
1706         }
1707         memset(hmacctx->ipad + keylen, 0, bs - keylen);
1708         memcpy(hmacctx->opad, hmacctx->ipad, bs);
1709
1710         for (i = 0; i < bs / sizeof(int); i++) {
1711                 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
1712                 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
1713         }
1714
1715         updated_digestsize = digestsize;
1716         if (digestsize == SHA224_DIGEST_SIZE)
1717                 updated_digestsize = SHA256_DIGEST_SIZE;
1718         else if (digestsize == SHA384_DIGEST_SIZE)
1719                 updated_digestsize = SHA512_DIGEST_SIZE;
1720         err = chcr_compute_partial_hash(shash, hmacctx->ipad,
1721                                         hmacctx->ipad, digestsize);
1722         if (err)
1723                 goto out;
1724         chcr_change_order(hmacctx->ipad, updated_digestsize);
1725
1726         err = chcr_compute_partial_hash(shash, hmacctx->opad,
1727                                         hmacctx->opad, digestsize);
1728         if (err)
1729                 goto out;
1730         chcr_change_order(hmacctx->opad, updated_digestsize);
1731 out:
1732         return err;
1733 }
1734
1735 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1736                                unsigned int key_len)
1737 {
1738         struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
1739         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1740         unsigned short context_size = 0;
1741         int err;
1742
1743         err = chcr_cipher_fallback_setkey(cipher, key, key_len);
1744         if (err)
1745                 goto badkey_err;
1746
1747         memcpy(ablkctx->key, key, key_len);
1748         ablkctx->enckey_len = key_len;
1749         get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
1750         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
1751         ablkctx->key_ctx_hdr =
1752                 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
1753                                  CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
1754                                  CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
1755                                  CHCR_KEYCTX_NO_KEY, 1,
1756                                  0, context_size);
1757         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1758         return 0;
1759 badkey_err:
1760         crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1761         ablkctx->enckey_len = 0;
1762
1763         return err;
1764 }
1765
1766 static int chcr_sha_init(struct ahash_request *areq)
1767 {
1768         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1769         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1770         int digestsize =  crypto_ahash_digestsize(tfm);
1771
1772         req_ctx->data_len = 0;
1773         req_ctx->reqlen = 0;
1774         req_ctx->reqbfr = req_ctx->bfr1;
1775         req_ctx->skbfr = req_ctx->bfr2;
1776         req_ctx->skb = NULL;
1777         req_ctx->result = 0;
1778         copy_hash_init_values(req_ctx->partial_hash, digestsize);
1779         return 0;
1780 }
1781
1782 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
1783 {
1784         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1785                                  sizeof(struct chcr_ahash_req_ctx));
1786         return chcr_device_init(crypto_tfm_ctx(tfm));
1787 }
1788
1789 static int chcr_hmac_init(struct ahash_request *areq)
1790 {
1791         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1792         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
1793         struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1794         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1795         unsigned int digestsize = crypto_ahash_digestsize(rtfm);
1796         unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1797
1798         chcr_sha_init(areq);
1799         req_ctx->data_len = bs;
1800         if (is_hmac(crypto_ahash_tfm(rtfm))) {
1801                 if (digestsize == SHA224_DIGEST_SIZE)
1802                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
1803                                SHA256_DIGEST_SIZE);
1804                 else if (digestsize == SHA384_DIGEST_SIZE)
1805                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
1806                                SHA512_DIGEST_SIZE);
1807                 else
1808                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
1809                                digestsize);
1810         }
1811         return 0;
1812 }
1813
1814 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
1815 {
1816         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1817         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1818         unsigned int digestsize =
1819                 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
1820
1821         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1822                                  sizeof(struct chcr_ahash_req_ctx));
1823         hmacctx->base_hash = chcr_alloc_shash(digestsize);
1824         if (IS_ERR(hmacctx->base_hash))
1825                 return PTR_ERR(hmacctx->base_hash);
1826         return chcr_device_init(crypto_tfm_ctx(tfm));
1827 }
1828
1829 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
1830 {
1831         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1832         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1833
1834         if (hmacctx->base_hash) {
1835                 chcr_free_shash(hmacctx->base_hash);
1836                 hmacctx->base_hash = NULL;
1837         }
1838 }
1839
1840 static int is_newsg(struct scatterlist *sgl, unsigned int *newents)
1841 {
1842         int nents = 0;
1843         int ret = 0;
1844
1845         while (sgl) {
1846                 if (sgl->length > CHCR_SG_SIZE)
1847                         ret = 1;
1848                 nents += DIV_ROUND_UP(sgl->length, CHCR_SG_SIZE);
1849                 sgl = sg_next(sgl);
1850         }
1851         *newents = nents;
1852         return ret;
1853 }
1854
1855 static inline void free_new_sg(struct scatterlist *sgl)
1856 {
1857         kfree(sgl);
1858 }
1859
1860 static struct scatterlist *alloc_new_sg(struct scatterlist *sgl,
1861                                        unsigned int nents)
1862 {
1863         struct scatterlist *newsg, *sg;
1864         int i, len, processed = 0;
1865         struct page *spage;
1866         int offset;
1867
1868         newsg = kmalloc_array(nents, sizeof(struct scatterlist), GFP_KERNEL);
1869         if (!newsg)
1870                 return ERR_PTR(-ENOMEM);
1871         sg = newsg;
1872         sg_init_table(sg, nents);
1873         offset = sgl->offset;
1874         spage = sg_page(sgl);
1875         for (i = 0; i < nents; i++) {
1876                 len = min_t(u32, sgl->length - processed, CHCR_SG_SIZE);
1877                 sg_set_page(sg, spage, len, offset);
1878                 processed += len;
1879                 offset += len;
1880                 if (offset >= PAGE_SIZE) {
1881                         offset = offset % PAGE_SIZE;
1882                         spage++;
1883                 }
1884                 if (processed == sgl->length) {
1885                         processed = 0;
1886                         sgl = sg_next(sgl);
1887                         if (!sgl)
1888                                 break;
1889                         spage = sg_page(sgl);
1890                         offset = sgl->offset;
1891                 }
1892                 sg = sg_next(sg);
1893         }
1894         return newsg;
1895 }
1896
1897 static int chcr_copy_assoc(struct aead_request *req,
1898                                 struct chcr_aead_ctx *ctx)
1899 {
1900         SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
1901
1902         skcipher_request_set_tfm(skreq, ctx->null);
1903         skcipher_request_set_callback(skreq, aead_request_flags(req),
1904                         NULL, NULL);
1905         skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
1906                         NULL);
1907
1908         return crypto_skcipher_encrypt(skreq);
1909 }
1910 static int chcr_aead_need_fallback(struct aead_request *req, int src_nent,
1911                                    int aadmax, int wrlen,
1912                                    unsigned short op_type)
1913 {
1914         unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
1915
1916         if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
1917             (req->assoclen > aadmax) ||
1918             (src_nent > MAX_SKB_FRAGS) ||
1919             (wrlen > MAX_WR_SIZE))
1920                 return 1;
1921         return 0;
1922 }
1923
1924 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
1925 {
1926         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1927         struct chcr_context *ctx = crypto_aead_ctx(tfm);
1928         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1929         struct aead_request *subreq = aead_request_ctx(req);
1930
1931         aead_request_set_tfm(subreq, aeadctx->sw_cipher);
1932         aead_request_set_callback(subreq, req->base.flags,
1933                                   req->base.complete, req->base.data);
1934          aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
1935                                  req->iv);
1936          aead_request_set_ad(subreq, req->assoclen);
1937         return op_type ? crypto_aead_decrypt(subreq) :
1938                 crypto_aead_encrypt(subreq);
1939 }
1940
1941 static struct sk_buff *create_authenc_wr(struct aead_request *req,
1942                                          unsigned short qid,
1943                                          int size,
1944                                          unsigned short op_type)
1945 {
1946         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1947         struct chcr_context *ctx = crypto_aead_ctx(tfm);
1948         struct uld_ctx *u_ctx = ULD_CTX(ctx);
1949         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1950         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
1951         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1952         struct sk_buff *skb = NULL;
1953         struct chcr_wr *chcr_req;
1954         struct cpl_rx_phys_dsgl *phys_cpl;
1955         struct phys_sge_parm sg_param;
1956         struct scatterlist *src;
1957         unsigned int frags = 0, transhdr_len;
1958         unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
1959         unsigned int   kctx_len = 0, nents;
1960         unsigned short stop_offset = 0;
1961         unsigned int  assoclen = req->assoclen;
1962         unsigned int  authsize = crypto_aead_authsize(tfm);
1963         int error = -EINVAL, src_nent;
1964         int null = 0;
1965         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1966                 GFP_ATOMIC;
1967         struct adapter *adap = padap(ctx->dev);
1968
1969         reqctx->newdstsg = NULL;
1970         dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize :
1971                                                    authsize);
1972         if (aeadctx->enckey_len == 0 || (req->cryptlen <= 0))
1973                 goto err;
1974
1975         if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1976                 goto err;
1977         src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
1978         if (src_nent < 0)
1979                 goto err;
1980         src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1981
1982         if (req->src != req->dst) {
1983                 error = chcr_copy_assoc(req, aeadctx);
1984                 if (error)
1985                         return ERR_PTR(error);
1986         }
1987         if (dst_size && is_newsg(req->dst, &nents)) {
1988                 reqctx->newdstsg = alloc_new_sg(req->dst, nents);
1989                 if (IS_ERR(reqctx->newdstsg))
1990                         return ERR_CAST(reqctx->newdstsg);
1991                 reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
1992                                                reqctx->newdstsg, req->assoclen);
1993         } else {
1994                 if (req->src == req->dst)
1995                         reqctx->dst = src;
1996                 else
1997                         reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
1998                                                        req->dst, req->assoclen);
1999         }
2000         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
2001                 null = 1;
2002                 assoclen = 0;
2003         }
2004         reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
2005                                              (op_type ? -authsize : authsize));
2006         if (reqctx->dst_nents < 0) {
2007                 pr_err("AUTHENC:Invalid Destination sg entries\n");
2008                 error = -EINVAL;
2009                 goto err;
2010         }
2011         dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
2012         kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
2013                 - sizeof(chcr_req->key_ctx);
2014         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2015         if (chcr_aead_need_fallback(req, src_nent + MIN_AUTH_SG,
2016                         T6_MAX_AAD_SIZE,
2017                         transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8),
2018                                 op_type)) {
2019                 atomic_inc(&adap->chcr_stats.fallback);
2020                 free_new_sg(reqctx->newdstsg);
2021                 reqctx->newdstsg = NULL;
2022                 return ERR_PTR(chcr_aead_fallback(req, op_type));
2023         }
2024         skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
2025         if (!skb) {
2026                 error = -ENOMEM;
2027                 goto err;
2028         }
2029
2030         /* LLD is going to write the sge hdr. */
2031         skb_reserve(skb, sizeof(struct sge_opaque_hdr));
2032
2033         /* Write WR */
2034         chcr_req = __skb_put_zero(skb, transhdr_len);
2035
2036         stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
2037
2038         /*
2039          * Input order  is AAD,IV and Payload. where IV should be included as
2040          * the part of authdata. All other fields should be filled according
2041          * to the hardware spec
2042          */
2043         chcr_req->sec_cpl.op_ivinsrtofst =
2044                 FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2,
2045                                        (ivsize ? (assoclen + 1) : 0));
2046         chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
2047         chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2048                                         assoclen ? 1 : 0, assoclen,
2049                                         assoclen + ivsize + 1,
2050                                         (stop_offset & 0x1F0) >> 4);
2051         chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2052                                         stop_offset & 0xF,
2053                                         null ? 0 : assoclen + ivsize + 1,
2054                                         stop_offset, stop_offset);
2055         chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2056                                         (op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
2057                                         CHCR_SCMD_CIPHER_MODE_AES_CBC,
2058                                         actx->auth_mode, aeadctx->hmac_ctrl,
2059                                         ivsize >> 1);
2060         chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2061                                          0, 1, dst_size);
2062
2063         chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2064         if (op_type == CHCR_ENCRYPT_OP)
2065                 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2066                        aeadctx->enckey_len);
2067         else
2068                 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2069                        aeadctx->enckey_len);
2070
2071         memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
2072                                         4), actx->h_iopad, kctx_len -
2073                                 (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
2074
2075         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2076         sg_param.nents = reqctx->dst_nents;
2077         sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
2078         sg_param.qid = qid;
2079         error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
2080                                         reqctx->dst, &sg_param,
2081                                         ctx->pci_chan_id);
2082         if (error)
2083                 goto dstmap_fail;
2084
2085         skb_set_transport_header(skb, transhdr_len);
2086
2087         if (assoclen) {
2088                 /* AAD buffer in */
2089                 write_sg_to_skb(skb, &frags, req->src, assoclen);
2090
2091         }
2092         write_buffer_to_skb(skb, &frags, req->iv, ivsize);
2093         write_sg_to_skb(skb, &frags, src, req->cryptlen);
2094         atomic_inc(&adap->chcr_stats.cipher_rqst);
2095         create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
2096                    sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
2097         reqctx->skb = skb;
2098         skb_get(skb);
2099
2100         return skb;
2101 dstmap_fail:
2102         /* ivmap_fail: */
2103         kfree_skb(skb);
2104 err:
2105         free_new_sg(reqctx->newdstsg);
2106         reqctx->newdstsg = NULL;
2107         return ERR_PTR(error);
2108 }
2109
2110 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2111 {
2112         __be32 data;
2113
2114         memset(block, 0, csize);
2115         block += csize;
2116
2117         if (csize >= 4)
2118                 csize = 4;
2119         else if (msglen > (unsigned int)(1 << (8 * csize)))
2120                 return -EOVERFLOW;
2121
2122         data = cpu_to_be32(msglen);
2123         memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2124
2125         return 0;
2126 }
2127
2128 static void generate_b0(struct aead_request *req,
2129                         struct chcr_aead_ctx *aeadctx,
2130                         unsigned short op_type)
2131 {
2132         unsigned int l, lp, m;
2133         int rc;
2134         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2135         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2136         u8 *b0 = reqctx->scratch_pad;
2137
2138         m = crypto_aead_authsize(aead);
2139
2140         memcpy(b0, reqctx->iv, 16);
2141
2142         lp = b0[0];
2143         l = lp + 1;
2144
2145         /* set m, bits 3-5 */
2146         *b0 |= (8 * ((m - 2) / 2));
2147
2148         /* set adata, bit 6, if associated data is used */
2149         if (req->assoclen)
2150                 *b0 |= 64;
2151         rc = set_msg_len(b0 + 16 - l,
2152                          (op_type == CHCR_DECRYPT_OP) ?
2153                          req->cryptlen - m : req->cryptlen, l);
2154 }
2155
2156 static inline int crypto_ccm_check_iv(const u8 *iv)
2157 {
2158         /* 2 <= L <= 8, so 1 <= L' <= 7. */
2159         if (iv[0] < 1 || iv[0] > 7)
2160                 return -EINVAL;
2161
2162         return 0;
2163 }
2164
2165 static int ccm_format_packet(struct aead_request *req,
2166                              struct chcr_aead_ctx *aeadctx,
2167                              unsigned int sub_type,
2168                              unsigned short op_type)
2169 {
2170         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2171         int rc = 0;
2172
2173         if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2174                 reqctx->iv[0] = 3;
2175                 memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
2176                 memcpy(reqctx->iv + 4, req->iv, 8);
2177                 memset(reqctx->iv + 12, 0, 4);
2178                 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2179                         htons(req->assoclen - 8);
2180         } else {
2181                 memcpy(reqctx->iv, req->iv, 16);
2182                 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2183                         htons(req->assoclen);
2184         }
2185         generate_b0(req, aeadctx, op_type);
2186         /* zero the ctr value */
2187         memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
2188         return rc;
2189 }
2190
2191 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2192                                   unsigned int dst_size,
2193                                   struct aead_request *req,
2194                                   unsigned short op_type,
2195                                           struct chcr_context *chcrctx)
2196 {
2197         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2198         struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2199         unsigned int ivsize = AES_BLOCK_SIZE;
2200         unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2201         unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2202         unsigned int c_id = chcrctx->dev->rx_channel_id;
2203         unsigned int ccm_xtra;
2204         unsigned int tag_offset = 0, auth_offset = 0;
2205         unsigned int assoclen;
2206
2207         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2208                 assoclen = req->assoclen - 8;
2209         else
2210                 assoclen = req->assoclen;
2211         ccm_xtra = CCM_B0_SIZE +
2212                 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2213
2214         auth_offset = req->cryptlen ?
2215                 (assoclen + ivsize + 1 + ccm_xtra) : 0;
2216         if (op_type == CHCR_DECRYPT_OP) {
2217                 if (crypto_aead_authsize(tfm) != req->cryptlen)
2218                         tag_offset = crypto_aead_authsize(tfm);
2219                 else
2220                         auth_offset = 0;
2221         }
2222
2223
2224         sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2225                                          2, (ivsize ?  (assoclen + 1) :  0) +
2226                                          ccm_xtra);
2227         sec_cpl->pldlen =
2228                 htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
2229         /* For CCM there wil be b0 always. So AAD start will be 1 always */
2230         sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2231                                         1, assoclen + ccm_xtra, assoclen
2232                                         + ivsize + 1 + ccm_xtra, 0);
2233
2234         sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2235                                         auth_offset, tag_offset,
2236                                         (op_type == CHCR_ENCRYPT_OP) ? 0 :
2237                                         crypto_aead_authsize(tfm));
2238         sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2239                                         (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2240                                         cipher_mode, mac_mode,
2241                                         aeadctx->hmac_ctrl, ivsize >> 1);
2242
2243         sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2244                                         1, dst_size);
2245 }
2246
2247 int aead_ccm_validate_input(unsigned short op_type,
2248                             struct aead_request *req,
2249                             struct chcr_aead_ctx *aeadctx,
2250                             unsigned int sub_type)
2251 {
2252         if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2253                 if (crypto_ccm_check_iv(req->iv)) {
2254                         pr_err("CCM: IV check fails\n");
2255                         return -EINVAL;
2256                 }
2257         } else {
2258                 if (req->assoclen != 16 && req->assoclen != 20) {
2259                         pr_err("RFC4309: Invalid AAD length %d\n",
2260                                req->assoclen);
2261                         return -EINVAL;
2262                 }
2263         }
2264         if (aeadctx->enckey_len == 0) {
2265                 pr_err("CCM: Encryption key not set\n");
2266                 return -EINVAL;
2267         }
2268         return 0;
2269 }
2270
2271 unsigned int fill_aead_req_fields(struct sk_buff *skb,
2272                                   struct aead_request *req,
2273                                   struct scatterlist *src,
2274                                   unsigned int ivsize,
2275                                   struct chcr_aead_ctx *aeadctx)
2276 {
2277         unsigned int frags = 0;
2278         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2279         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2280         /* b0 and aad length(if available) */
2281
2282         write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
2283                                 (req->assoclen ?  CCM_AAD_FIELD_SIZE : 0));
2284         if (req->assoclen) {
2285                 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2286                         write_sg_to_skb(skb, &frags, req->src,
2287                                         req->assoclen - 8);
2288                 else
2289                         write_sg_to_skb(skb, &frags, req->src, req->assoclen);
2290         }
2291         write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
2292         if (req->cryptlen)
2293                 write_sg_to_skb(skb, &frags, src, req->cryptlen);
2294
2295         return frags;
2296 }
2297
2298 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2299                                           unsigned short qid,
2300                                           int size,
2301                                           unsigned short op_type)
2302 {
2303         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2304         struct chcr_context *ctx = crypto_aead_ctx(tfm);
2305         struct uld_ctx *u_ctx = ULD_CTX(ctx);
2306         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2307         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2308         struct sk_buff *skb = NULL;
2309         struct chcr_wr *chcr_req;
2310         struct cpl_rx_phys_dsgl *phys_cpl;
2311         struct phys_sge_parm sg_param;
2312         struct scatterlist *src;
2313         unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
2314         unsigned int dst_size = 0, kctx_len, nents;
2315         unsigned int sub_type;
2316         unsigned int authsize = crypto_aead_authsize(tfm);
2317         int error = -EINVAL, src_nent;
2318         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2319                 GFP_ATOMIC;
2320         struct adapter *adap = padap(ctx->dev);
2321
2322         dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize :
2323                                                    authsize);
2324         reqctx->newdstsg = NULL;
2325         if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
2326                 goto err;
2327         src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
2328         if (src_nent < 0)
2329                 goto err;
2330
2331         sub_type = get_aead_subtype(tfm);
2332         src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
2333         if (req->src != req->dst) {
2334                 error = chcr_copy_assoc(req, aeadctx);
2335                 if (error) {
2336                         pr_err("AAD copy to destination buffer fails\n");
2337                         return ERR_PTR(error);
2338                 }
2339         }
2340         if (dst_size && is_newsg(req->dst, &nents)) {
2341                 reqctx->newdstsg = alloc_new_sg(req->dst, nents);
2342                 if (IS_ERR(reqctx->newdstsg))
2343                         return ERR_CAST(reqctx->newdstsg);
2344                 reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
2345                                                reqctx->newdstsg, req->assoclen);
2346         } else {
2347                 if (req->src == req->dst)
2348                         reqctx->dst = src;
2349                 else
2350                         reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
2351                                                        req->dst, req->assoclen);
2352         }
2353         reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
2354                                              (op_type ? -authsize : authsize));
2355         if (reqctx->dst_nents < 0) {
2356                 pr_err("CCM:Invalid Destination sg entries\n");
2357                 error = -EINVAL;
2358                 goto err;
2359         }
2360         error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
2361         if (error)
2362                 goto err;
2363
2364         dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
2365         kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
2366         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2367         if (chcr_aead_need_fallback(req, src_nent + MIN_CCM_SG,
2368                             T6_MAX_AAD_SIZE - 18,
2369                             transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8),
2370                             op_type)) {
2371                 atomic_inc(&adap->chcr_stats.fallback);
2372                 free_new_sg(reqctx->newdstsg);
2373                 reqctx->newdstsg = NULL;
2374                 return ERR_PTR(chcr_aead_fallback(req, op_type));
2375         }
2376
2377         skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),  flags);
2378
2379         if (!skb) {
2380                 error = -ENOMEM;
2381                 goto err;
2382         }
2383
2384         skb_reserve(skb, sizeof(struct sge_opaque_hdr));
2385
2386         chcr_req = __skb_put_zero(skb, transhdr_len);
2387
2388         fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
2389
2390         chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2391         memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2392         memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
2393                                         16), aeadctx->key, aeadctx->enckey_len);
2394
2395         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2396         error = ccm_format_packet(req, aeadctx, sub_type, op_type);
2397         if (error)
2398                 goto dstmap_fail;
2399
2400         sg_param.nents = reqctx->dst_nents;
2401         sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
2402         sg_param.qid = qid;
2403         error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
2404                                  reqctx->dst, &sg_param, ctx->pci_chan_id);
2405         if (error)
2406                 goto dstmap_fail;
2407
2408         skb_set_transport_header(skb, transhdr_len);
2409         frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
2410         atomic_inc(&adap->chcr_stats.aead_rqst);
2411         create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, 0, 1,
2412                     sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
2413         reqctx->skb = skb;
2414         skb_get(skb);
2415         return skb;
2416 dstmap_fail:
2417         kfree_skb(skb);
2418 err:
2419         free_new_sg(reqctx->newdstsg);
2420         reqctx->newdstsg = NULL;
2421         return ERR_PTR(error);
2422 }
2423
2424 static struct sk_buff *create_gcm_wr(struct aead_request *req,
2425                                      unsigned short qid,
2426                                      int size,
2427                                      unsigned short op_type)
2428 {
2429         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2430         struct chcr_context *ctx = crypto_aead_ctx(tfm);
2431         struct uld_ctx *u_ctx = ULD_CTX(ctx);
2432         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2433         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2434         struct sk_buff *skb = NULL;
2435         struct chcr_wr *chcr_req;
2436         struct cpl_rx_phys_dsgl *phys_cpl;
2437         struct phys_sge_parm sg_param;
2438         struct scatterlist *src;
2439         unsigned int frags = 0, transhdr_len;
2440         unsigned int ivsize = AES_BLOCK_SIZE;
2441         unsigned int dst_size = 0, kctx_len, nents, assoclen = req->assoclen;
2442         unsigned char tag_offset = 0;
2443         unsigned int authsize = crypto_aead_authsize(tfm);
2444         int error = -EINVAL, src_nent;
2445         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2446                 GFP_ATOMIC;
2447         struct adapter *adap = padap(ctx->dev);
2448
2449         reqctx->newdstsg = NULL;
2450         dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
2451                                                     authsize);
2452         /* validate key size */
2453         if (aeadctx->enckey_len == 0)
2454                 goto err;
2455
2456         if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
2457                 goto err;
2458         src_nent = sg_nents_for_len(req->src, assoclen + req->cryptlen);
2459         if (src_nent < 0)
2460                 goto err;
2461
2462         src = scatterwalk_ffwd(reqctx->srcffwd, req->src, assoclen);
2463         if (req->src != req->dst) {
2464                 error = chcr_copy_assoc(req, aeadctx);
2465                 if (error)
2466                         return  ERR_PTR(error);
2467         }
2468
2469         if (dst_size && is_newsg(req->dst, &nents)) {
2470                 reqctx->newdstsg = alloc_new_sg(req->dst, nents);
2471                 if (IS_ERR(reqctx->newdstsg))
2472                         return ERR_CAST(reqctx->newdstsg);
2473                 reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
2474                                                reqctx->newdstsg, assoclen);
2475         } else {
2476                 if (req->src == req->dst)
2477                         reqctx->dst = src;
2478                 else
2479                         reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
2480                                                        req->dst, assoclen);
2481         }
2482
2483         reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
2484                                              (op_type ? -authsize : authsize));
2485         if (reqctx->dst_nents < 0) {
2486                 pr_err("GCM:Invalid Destination sg entries\n");
2487                 error = -EINVAL;
2488                 goto err;
2489         }
2490
2491
2492         dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
2493         kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
2494                 AEAD_H_SIZE;
2495         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2496         if (chcr_aead_need_fallback(req, src_nent + MIN_GCM_SG,
2497                             T6_MAX_AAD_SIZE,
2498                             transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8),
2499                             op_type)) {
2500                 atomic_inc(&adap->chcr_stats.fallback);
2501                 free_new_sg(reqctx->newdstsg);
2502                 reqctx->newdstsg = NULL;
2503                 return ERR_PTR(chcr_aead_fallback(req, op_type));
2504         }
2505         skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
2506         if (!skb) {
2507                 error = -ENOMEM;
2508                 goto err;
2509         }
2510
2511         /* NIC driver is going to write the sge hdr. */
2512         skb_reserve(skb, sizeof(struct sge_opaque_hdr));
2513
2514         chcr_req = __skb_put_zero(skb, transhdr_len);
2515
2516         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
2517                 assoclen = req->assoclen - 8;
2518
2519         tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
2520         chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
2521                                         ctx->dev->rx_channel_id, 2, (ivsize ?
2522                                         (assoclen + 1) : 0));
2523         chcr_req->sec_cpl.pldlen =
2524                 htonl(assoclen + ivsize + req->cryptlen);
2525         chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2526                                         assoclen ? 1 : 0, assoclen,
2527                                         assoclen + ivsize + 1, 0);
2528                 chcr_req->sec_cpl.cipherstop_lo_authinsert =
2529                         FILL_SEC_CPL_AUTHINSERT(0, assoclen + ivsize + 1,
2530                                                 tag_offset, tag_offset);
2531                 chcr_req->sec_cpl.seqno_numivs =
2532                         FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
2533                                         CHCR_ENCRYPT_OP) ? 1 : 0,
2534                                         CHCR_SCMD_CIPHER_MODE_AES_GCM,
2535                                         CHCR_SCMD_AUTH_MODE_GHASH,
2536                                         aeadctx->hmac_ctrl, ivsize >> 1);
2537         chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2538                                         0, 1, dst_size);
2539         chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2540         memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2541         memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
2542                                 16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
2543
2544         /* prepare a 16 byte iv */
2545         /* S   A   L  T |  IV | 0x00000001 */
2546         if (get_aead_subtype(tfm) ==
2547             CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
2548                 memcpy(reqctx->iv, aeadctx->salt, 4);
2549                 memcpy(reqctx->iv + 4, req->iv, 8);
2550         } else {
2551                 memcpy(reqctx->iv, req->iv, 12);
2552         }
2553         *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
2554
2555         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2556         sg_param.nents = reqctx->dst_nents;
2557         sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
2558         sg_param.qid = qid;
2559         error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
2560                                           reqctx->dst, &sg_param,
2561                                           ctx->pci_chan_id);
2562         if (error)
2563                 goto dstmap_fail;
2564
2565         skb_set_transport_header(skb, transhdr_len);
2566         write_sg_to_skb(skb, &frags, req->src, assoclen);
2567         write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
2568         write_sg_to_skb(skb, &frags, src, req->cryptlen);
2569         atomic_inc(&adap->chcr_stats.aead_rqst);
2570         create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
2571                         sizeof(struct cpl_rx_phys_dsgl) + dst_size,
2572                         reqctx->verify);
2573         reqctx->skb = skb;
2574         skb_get(skb);
2575         return skb;
2576
2577 dstmap_fail:
2578         /* ivmap_fail: */
2579         kfree_skb(skb);
2580 err:
2581         free_new_sg(reqctx->newdstsg);
2582         reqctx->newdstsg = NULL;
2583         return ERR_PTR(error);
2584 }
2585
2586
2587
2588 static int chcr_aead_cra_init(struct crypto_aead *tfm)
2589 {
2590         struct chcr_context *ctx = crypto_aead_ctx(tfm);
2591         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2592         struct aead_alg *alg = crypto_aead_alg(tfm);
2593
2594         aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
2595                                                CRYPTO_ALG_NEED_FALLBACK |
2596                                                CRYPTO_ALG_ASYNC);
2597         if  (IS_ERR(aeadctx->sw_cipher))
2598                 return PTR_ERR(aeadctx->sw_cipher);
2599         crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
2600                                  sizeof(struct aead_request) +
2601                                  crypto_aead_reqsize(aeadctx->sw_cipher)));
2602         aeadctx->null = crypto_get_default_null_skcipher();
2603         if (IS_ERR(aeadctx->null))
2604                 return PTR_ERR(aeadctx->null);
2605         return chcr_device_init(ctx);
2606 }
2607
2608 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
2609 {
2610         struct chcr_context *ctx = crypto_aead_ctx(tfm);
2611         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2612
2613         crypto_put_default_null_skcipher();
2614         crypto_free_aead(aeadctx->sw_cipher);
2615 }
2616
2617 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
2618                                         unsigned int authsize)
2619 {
2620         struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2621
2622         aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
2623         aeadctx->mayverify = VERIFY_HW;
2624         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2625 }
2626 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
2627                                     unsigned int authsize)
2628 {
2629         struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2630         u32 maxauth = crypto_aead_maxauthsize(tfm);
2631
2632         /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
2633          * true for sha1. authsize == 12 condition should be before
2634          * authsize == (maxauth >> 1)
2635          */
2636         if (authsize == ICV_4) {
2637                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2638                 aeadctx->mayverify = VERIFY_HW;
2639         } else if (authsize == ICV_6) {
2640                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
2641                 aeadctx->mayverify = VERIFY_HW;
2642         } else if (authsize == ICV_10) {
2643                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
2644                 aeadctx->mayverify = VERIFY_HW;
2645         } else if (authsize == ICV_12) {
2646                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2647                 aeadctx->mayverify = VERIFY_HW;
2648         } else if (authsize == ICV_14) {
2649                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2650                 aeadctx->mayverify = VERIFY_HW;
2651         } else if (authsize == (maxauth >> 1)) {
2652                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2653                 aeadctx->mayverify = VERIFY_HW;
2654         } else if (authsize == maxauth) {
2655                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2656                 aeadctx->mayverify = VERIFY_HW;
2657         } else {
2658                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2659                 aeadctx->mayverify = VERIFY_SW;
2660         }
2661         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2662 }
2663
2664
2665 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
2666 {
2667         struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2668
2669         switch (authsize) {
2670         case ICV_4:
2671                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2672                 aeadctx->mayverify = VERIFY_HW;
2673                 break;
2674         case ICV_8:
2675                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2676                 aeadctx->mayverify = VERIFY_HW;
2677                 break;
2678         case ICV_12:
2679                  aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2680                  aeadctx->mayverify = VERIFY_HW;
2681                 break;
2682         case ICV_14:
2683                  aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2684                  aeadctx->mayverify = VERIFY_HW;
2685                 break;
2686         case ICV_16:
2687                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2688                 aeadctx->mayverify = VERIFY_HW;
2689                 break;
2690         case ICV_13:
2691         case ICV_15:
2692                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2693                 aeadctx->mayverify = VERIFY_SW;
2694                 break;
2695         default:
2696                 return -EINVAL;
2697         }
2698         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2699 }
2700
2701 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
2702                                           unsigned int authsize)
2703 {
2704         struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2705
2706         switch (authsize) {
2707         case ICV_8:
2708                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2709                 aeadctx->mayverify = VERIFY_HW;
2710                 break;
2711         case ICV_12:
2712                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2713                 aeadctx->mayverify = VERIFY_HW;
2714                 break;
2715         case ICV_16:
2716                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2717                 aeadctx->mayverify = VERIFY_HW;
2718                 break;
2719         default:
2720                 return -EINVAL;
2721         }
2722         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2723 }
2724
2725 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
2726                                 unsigned int authsize)
2727 {
2728         struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2729
2730         switch (authsize) {
2731         case ICV_4:
2732                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2733                 aeadctx->mayverify = VERIFY_HW;
2734                 break;
2735         case ICV_6:
2736                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
2737                 aeadctx->mayverify = VERIFY_HW;
2738                 break;
2739         case ICV_8:
2740                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2741                 aeadctx->mayverify = VERIFY_HW;
2742                 break;
2743         case ICV_10:
2744                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
2745                 aeadctx->mayverify = VERIFY_HW;
2746                 break;
2747         case ICV_12:
2748                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2749                 aeadctx->mayverify = VERIFY_HW;
2750                 break;
2751         case ICV_14:
2752                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2753                 aeadctx->mayverify = VERIFY_HW;
2754                 break;
2755         case ICV_16:
2756                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2757                 aeadctx->mayverify = VERIFY_HW;
2758                 break;
2759         default:
2760                 return -EINVAL;
2761         }
2762         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2763 }
2764
2765 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
2766                                 const u8 *key,
2767                                 unsigned int keylen)
2768 {
2769         struct chcr_context *ctx = crypto_aead_ctx(aead);
2770         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2771         unsigned char ck_size, mk_size;
2772         int key_ctx_size = 0;
2773
2774         key_ctx_size = sizeof(struct _key_ctx) +
2775                 ((DIV_ROUND_UP(keylen, 16)) << 4)  * 2;
2776         if (keylen == AES_KEYSIZE_128) {
2777                 mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2778                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2779         } else if (keylen == AES_KEYSIZE_192) {
2780                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2781                 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
2782         } else if (keylen == AES_KEYSIZE_256) {
2783                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2784                 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2785         } else {
2786                 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
2787                 aeadctx->enckey_len = 0;
2788                 return  -EINVAL;
2789         }
2790         aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
2791                                                 key_ctx_size >> 4);
2792         memcpy(aeadctx->key, key, keylen);
2793         aeadctx->enckey_len = keylen;
2794
2795         return 0;
2796 }
2797
2798 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
2799                                 const u8 *key,
2800                                 unsigned int keylen)
2801 {
2802         struct chcr_context *ctx = crypto_aead_ctx(aead);
2803         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2804         int error;
2805
2806         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2807         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
2808                               CRYPTO_TFM_REQ_MASK);
2809         error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2810         crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
2811         crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
2812                               CRYPTO_TFM_RES_MASK);
2813         if (error)
2814                 return error;
2815         return chcr_ccm_common_setkey(aead, key, keylen);
2816 }
2817
2818 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
2819                                     unsigned int keylen)
2820 {
2821         struct chcr_context *ctx = crypto_aead_ctx(aead);
2822         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2823         int error;
2824
2825         if (keylen < 3) {
2826                 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
2827                 aeadctx->enckey_len = 0;
2828                 return  -EINVAL;
2829         }
2830         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2831         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
2832                               CRYPTO_TFM_REQ_MASK);
2833         error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2834         crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
2835         crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
2836                               CRYPTO_TFM_RES_MASK);
2837         if (error)
2838                 return error;
2839         keylen -= 3;
2840         memcpy(aeadctx->salt, key + keylen, 3);
2841         return chcr_ccm_common_setkey(aead, key, keylen);
2842 }
2843
2844 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
2845                            unsigned int keylen)
2846 {
2847         struct chcr_context *ctx = crypto_aead_ctx(aead);
2848         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2849         struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
2850         struct crypto_cipher *cipher;
2851         unsigned int ck_size;
2852         int ret = 0, key_ctx_size = 0;
2853
2854         aeadctx->enckey_len = 0;
2855         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2856         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
2857                               & CRYPTO_TFM_REQ_MASK);
2858         ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2859         crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
2860         crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
2861                               CRYPTO_TFM_RES_MASK);
2862         if (ret)
2863                 goto out;
2864
2865         if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
2866             keylen > 3) {
2867                 keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
2868                 memcpy(aeadctx->salt, key + keylen, 4);
2869         }
2870         if (keylen == AES_KEYSIZE_128) {
2871                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2872         } else if (keylen == AES_KEYSIZE_192) {
2873                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2874         } else if (keylen == AES_KEYSIZE_256) {
2875                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2876         } else {
2877                 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
2878                 pr_err("GCM: Invalid key length %d\n", keylen);
2879                 ret = -EINVAL;
2880                 goto out;
2881         }
2882
2883         memcpy(aeadctx->key, key, keylen);
2884         aeadctx->enckey_len = keylen;
2885         key_ctx_size = sizeof(struct _key_ctx) +
2886                 ((DIV_ROUND_UP(keylen, 16)) << 4) +
2887                 AEAD_H_SIZE;
2888                 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
2889                                                 CHCR_KEYCTX_MAC_KEY_SIZE_128,
2890                                                 0, 0,
2891                                                 key_ctx_size >> 4);
2892         /* Calculate the H = CIPH(K, 0 repeated 16 times).
2893          * It will go in key context
2894          */
2895         cipher = crypto_alloc_cipher("aes-generic", 0, 0);
2896         if (IS_ERR(cipher)) {
2897                 aeadctx->enckey_len = 0;
2898                 ret = -ENOMEM;
2899                 goto out;
2900         }
2901
2902         ret = crypto_cipher_setkey(cipher, key, keylen);
2903         if (ret) {
2904                 aeadctx->enckey_len = 0;
2905                 goto out1;
2906         }
2907         memset(gctx->ghash_h, 0, AEAD_H_SIZE);
2908         crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
2909
2910 out1:
2911         crypto_free_cipher(cipher);
2912 out:
2913         return ret;
2914 }
2915
2916 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
2917                                    unsigned int keylen)
2918 {
2919         struct chcr_context *ctx = crypto_aead_ctx(authenc);
2920         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2921         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2922         /* it contains auth and cipher key both*/
2923         struct crypto_authenc_keys keys;
2924         unsigned int bs;
2925         unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
2926         int err = 0, i, key_ctx_len = 0;
2927         unsigned char ck_size = 0;
2928         unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
2929         struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
2930         struct algo_param param;
2931         int align;
2932         u8 *o_ptr = NULL;
2933
2934         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2935         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
2936                               & CRYPTO_TFM_REQ_MASK);
2937         err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2938         crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
2939         crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
2940                               & CRYPTO_TFM_RES_MASK);
2941         if (err)
2942                 goto out;
2943
2944         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
2945                 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
2946                 goto out;
2947         }
2948
2949         if (get_alg_config(&param, max_authsize)) {
2950                 pr_err("chcr : Unsupported digest size\n");
2951                 goto out;
2952         }
2953         if (keys.enckeylen == AES_KEYSIZE_128) {
2954                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2955         } else if (keys.enckeylen == AES_KEYSIZE_192) {
2956                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2957         } else if (keys.enckeylen == AES_KEYSIZE_256) {
2958                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2959         } else {
2960                 pr_err("chcr : Unsupported cipher key\n");
2961                 goto out;
2962         }
2963
2964         /* Copy only encryption key. We use authkey to generate h(ipad) and
2965          * h(opad) so authkey is not needed again. authkeylen size have the
2966          * size of the hash digest size.
2967          */
2968         memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
2969         aeadctx->enckey_len = keys.enckeylen;
2970         get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
2971                             aeadctx->enckey_len << 3);
2972
2973         base_hash  = chcr_alloc_shash(max_authsize);
2974         if (IS_ERR(base_hash)) {
2975                 pr_err("chcr : Base driver cannot be loaded\n");
2976                 aeadctx->enckey_len = 0;
2977                 return -EINVAL;
2978         }
2979         {
2980                 SHASH_DESC_ON_STACK(shash, base_hash);
2981                 shash->tfm = base_hash;
2982                 shash->flags = crypto_shash_get_flags(base_hash);
2983                 bs = crypto_shash_blocksize(base_hash);
2984                 align = KEYCTX_ALIGN_PAD(max_authsize);
2985                 o_ptr =  actx->h_iopad + param.result_size + align;
2986
2987                 if (keys.authkeylen > bs) {
2988                         err = crypto_shash_digest(shash, keys.authkey,
2989                                                   keys.authkeylen,
2990                                                   o_ptr);
2991                         if (err) {
2992                                 pr_err("chcr : Base driver cannot be loaded\n");
2993                                 goto out;
2994                         }
2995                         keys.authkeylen = max_authsize;
2996                 } else
2997                         memcpy(o_ptr, keys.authkey, keys.authkeylen);
2998
2999                 /* Compute the ipad-digest*/
3000                 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3001                 memcpy(pad, o_ptr, keys.authkeylen);
3002                 for (i = 0; i < bs >> 2; i++)
3003                         *((unsigned int *)pad + i) ^= IPAD_DATA;
3004
3005                 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3006                                               max_authsize))
3007                         goto out;
3008                 /* Compute the opad-digest */
3009                 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3010                 memcpy(pad, o_ptr, keys.authkeylen);
3011                 for (i = 0; i < bs >> 2; i++)
3012                         *((unsigned int *)pad + i) ^= OPAD_DATA;
3013
3014                 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3015                         goto out;
3016
3017                 /* convert the ipad and opad digest to network order */
3018                 chcr_change_order(actx->h_iopad, param.result_size);
3019                 chcr_change_order(o_ptr, param.result_size);
3020                 key_ctx_len = sizeof(struct _key_ctx) +
3021                         ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
3022                         (param.result_size + align) * 2;
3023                 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3024                                                 0, 1, key_ctx_len >> 4);
3025                 actx->auth_mode = param.auth_mode;
3026                 chcr_free_shash(base_hash);
3027
3028                 return 0;
3029         }
3030 out:
3031         aeadctx->enckey_len = 0;
3032         if (!IS_ERR(base_hash))
3033                 chcr_free_shash(base_hash);
3034         return -EINVAL;
3035 }
3036
3037 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3038                                         const u8 *key, unsigned int keylen)
3039 {
3040         struct chcr_context *ctx = crypto_aead_ctx(authenc);
3041         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3042         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3043         struct crypto_authenc_keys keys;
3044         int err;
3045         /* it contains auth and cipher key both*/
3046         int key_ctx_len = 0;
3047         unsigned char ck_size = 0;
3048
3049         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3050         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3051                               & CRYPTO_TFM_REQ_MASK);
3052         err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3053         crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3054         crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3055                               & CRYPTO_TFM_RES_MASK);
3056         if (err)
3057                 goto out;
3058
3059         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3060                 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3061                 goto out;
3062         }
3063         if (keys.enckeylen == AES_KEYSIZE_128) {
3064                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3065         } else if (keys.enckeylen == AES_KEYSIZE_192) {
3066                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3067         } else if (keys.enckeylen == AES_KEYSIZE_256) {
3068                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3069         } else {
3070                 pr_err("chcr : Unsupported cipher key\n");
3071                 goto out;
3072         }
3073         memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3074         aeadctx->enckey_len = keys.enckeylen;
3075         get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3076                                     aeadctx->enckey_len << 3);
3077         key_ctx_len =  sizeof(struct _key_ctx)
3078                 + ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
3079
3080         aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3081                                                 0, key_ctx_len >> 4);
3082         actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3083         return 0;
3084 out:
3085         aeadctx->enckey_len = 0;
3086         return -EINVAL;
3087 }
3088 static int chcr_aead_encrypt(struct aead_request *req)
3089 {
3090         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3091         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3092
3093         reqctx->verify = VERIFY_HW;
3094
3095         switch (get_aead_subtype(tfm)) {
3096         case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
3097         case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
3098                 return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3099                                     create_authenc_wr);
3100         case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3101         case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3102                 return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3103                                     create_aead_ccm_wr);
3104         default:
3105                 return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3106                                     create_gcm_wr);
3107         }
3108 }
3109
3110 static int chcr_aead_decrypt(struct aead_request *req)
3111 {
3112         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3113         struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
3114         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3115         int size;
3116
3117         if (aeadctx->mayverify == VERIFY_SW) {
3118                 size = crypto_aead_maxauthsize(tfm);
3119                 reqctx->verify = VERIFY_SW;
3120         } else {
3121                 size = 0;
3122                 reqctx->verify = VERIFY_HW;
3123         }
3124
3125         switch (get_aead_subtype(tfm)) {
3126         case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
3127         case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
3128                 return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3129                                     create_authenc_wr);
3130         case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3131         case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3132                 return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3133                                     create_aead_ccm_wr);
3134         default:
3135                 return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3136                                     create_gcm_wr);
3137         }
3138 }
3139
3140 static int chcr_aead_op(struct aead_request *req,
3141                           unsigned short op_type,
3142                           int size,
3143                           create_wr_t create_wr_fn)
3144 {
3145         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3146         struct chcr_context *ctx = crypto_aead_ctx(tfm);
3147         struct uld_ctx *u_ctx;
3148         struct sk_buff *skb;
3149
3150         if (!ctx->dev) {
3151                 pr_err("chcr : %s : No crypto device.\n", __func__);
3152                 return -ENXIO;
3153         }
3154         u_ctx = ULD_CTX(ctx);
3155         if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3156                                    ctx->tx_qidx)) {
3157                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3158                         return -EBUSY;
3159         }
3160
3161         /* Form a WR from req */
3162         skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], size,
3163                            op_type);
3164
3165         if (IS_ERR(skb) || !skb)
3166                 return PTR_ERR(skb);
3167
3168         skb->dev = u_ctx->lldi.ports[0];
3169         set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
3170         chcr_send_wr(skb);
3171         return -EINPROGRESS;
3172 }
3173 static struct chcr_alg_template driver_algs[] = {
3174         /* AES-CBC */
3175         {
3176                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3177                 .is_registered = 0,
3178                 .alg.crypto = {
3179                         .cra_name               = "cbc(aes)",
3180                         .cra_driver_name        = "cbc-aes-chcr",
3181                         .cra_blocksize          = AES_BLOCK_SIZE,
3182                         .cra_init               = chcr_cra_init,
3183                         .cra_exit               = chcr_cra_exit,
3184                         .cra_u.ablkcipher       = {
3185                                 .min_keysize    = AES_MIN_KEY_SIZE,
3186                                 .max_keysize    = AES_MAX_KEY_SIZE,
3187                                 .ivsize         = AES_BLOCK_SIZE,
3188                                 .setkey                 = chcr_aes_cbc_setkey,
3189                                 .encrypt                = chcr_aes_encrypt,
3190                                 .decrypt                = chcr_aes_decrypt,
3191                         }
3192                 }
3193         },
3194         {
3195                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3196                 .is_registered = 0,
3197                 .alg.crypto =   {
3198                         .cra_name               = "xts(aes)",
3199                         .cra_driver_name        = "xts-aes-chcr",
3200                         .cra_blocksize          = AES_BLOCK_SIZE,
3201                         .cra_init               = chcr_cra_init,
3202                         .cra_exit               = NULL,
3203                         .cra_u .ablkcipher = {
3204                                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
3205                                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
3206                                         .ivsize         = AES_BLOCK_SIZE,
3207                                         .setkey         = chcr_aes_xts_setkey,
3208                                         .encrypt        = chcr_aes_encrypt,
3209                                         .decrypt        = chcr_aes_decrypt,
3210                                 }
3211                         }
3212         },
3213         {
3214                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3215                 .is_registered = 0,
3216                 .alg.crypto = {
3217                         .cra_name               = "ctr(aes)",
3218                         .cra_driver_name        = "ctr-aes-chcr",
3219                         .cra_blocksize          = 1,
3220                         .cra_init               = chcr_cra_init,
3221                         .cra_exit               = chcr_cra_exit,
3222                         .cra_u.ablkcipher       = {
3223                                 .min_keysize    = AES_MIN_KEY_SIZE,
3224                                 .max_keysize    = AES_MAX_KEY_SIZE,
3225                                 .ivsize         = AES_BLOCK_SIZE,
3226                                 .setkey         = chcr_aes_ctr_setkey,
3227                                 .encrypt        = chcr_aes_encrypt,
3228                                 .decrypt        = chcr_aes_decrypt,
3229                         }
3230                 }
3231         },
3232         {
3233                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
3234                         CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3235                 .is_registered = 0,
3236                 .alg.crypto = {
3237                         .cra_name               = "rfc3686(ctr(aes))",
3238                         .cra_driver_name        = "rfc3686-ctr-aes-chcr",
3239                         .cra_blocksize          = 1,
3240                         .cra_init               = chcr_rfc3686_init,
3241                         .cra_exit               = chcr_cra_exit,
3242                         .cra_u.ablkcipher       = {
3243                                 .min_keysize    = AES_MIN_KEY_SIZE +
3244                                         CTR_RFC3686_NONCE_SIZE,
3245                                 .max_keysize    = AES_MAX_KEY_SIZE +
3246                                         CTR_RFC3686_NONCE_SIZE,
3247                                 .ivsize         = CTR_RFC3686_IV_SIZE,
3248                                 .setkey         = chcr_aes_rfc3686_setkey,
3249                                 .encrypt        = chcr_aes_encrypt,
3250                                 .decrypt        = chcr_aes_decrypt,
3251                                 .geniv          = "seqiv",
3252                         }
3253                 }
3254         },
3255         /* SHA */
3256         {
3257                 .type = CRYPTO_ALG_TYPE_AHASH,
3258                 .is_registered = 0,
3259                 .alg.hash = {
3260                         .halg.digestsize = SHA1_DIGEST_SIZE,
3261                         .halg.base = {
3262                                 .cra_name = "sha1",
3263                                 .cra_driver_name = "sha1-chcr",
3264                                 .cra_blocksize = SHA1_BLOCK_SIZE,
3265                         }
3266                 }
3267         },
3268         {
3269                 .type = CRYPTO_ALG_TYPE_AHASH,
3270                 .is_registered = 0,
3271                 .alg.hash = {
3272                         .halg.digestsize = SHA256_DIGEST_SIZE,
3273                         .halg.base = {
3274                                 .cra_name = "sha256",
3275                                 .cra_driver_name = "sha256-chcr",
3276                                 .cra_blocksize = SHA256_BLOCK_SIZE,
3277                         }
3278                 }
3279         },
3280         {
3281                 .type = CRYPTO_ALG_TYPE_AHASH,
3282                 .is_registered = 0,
3283                 .alg.hash = {
3284                         .halg.digestsize = SHA224_DIGEST_SIZE,
3285                         .halg.base = {
3286                                 .cra_name = "sha224",
3287                                 .cra_driver_name = "sha224-chcr",
3288                                 .cra_blocksize = SHA224_BLOCK_SIZE,
3289                         }
3290                 }
3291         },
3292         {
3293                 .type = CRYPTO_ALG_TYPE_AHASH,
3294                 .is_registered = 0,
3295                 .alg.hash = {
3296                         .halg.digestsize = SHA384_DIGEST_SIZE,
3297                         .halg.base = {
3298                                 .cra_name = "sha384",
3299                                 .cra_driver_name = "sha384-chcr",
3300                                 .cra_blocksize = SHA384_BLOCK_SIZE,
3301                         }
3302                 }
3303         },
3304         {
3305                 .type = CRYPTO_ALG_TYPE_AHASH,
3306                 .is_registered = 0,
3307                 .alg.hash = {
3308                         .halg.digestsize = SHA512_DIGEST_SIZE,
3309                         .halg.base = {
3310                                 .cra_name = "sha512",
3311                                 .cra_driver_name = "sha512-chcr",
3312                                 .cra_blocksize = SHA512_BLOCK_SIZE,
3313                         }
3314                 }
3315         },
3316         /* HMAC */
3317         {
3318                 .type = CRYPTO_ALG_TYPE_HMAC,
3319                 .is_registered = 0,
3320                 .alg.hash = {
3321                         .halg.digestsize = SHA1_DIGEST_SIZE,
3322                         .halg.base = {
3323                                 .cra_name = "hmac(sha1)",
3324                                 .cra_driver_name = "hmac-sha1-chcr",
3325                                 .cra_blocksize = SHA1_BLOCK_SIZE,
3326                         }
3327                 }
3328         },
3329         {
3330                 .type = CRYPTO_ALG_TYPE_HMAC,
3331                 .is_registered = 0,
3332                 .alg.hash = {
3333                         .halg.digestsize = SHA224_DIGEST_SIZE,
3334                         .halg.base = {
3335                                 .cra_name = "hmac(sha224)",
3336                                 .cra_driver_name = "hmac-sha224-chcr",
3337                                 .cra_blocksize = SHA224_BLOCK_SIZE,
3338                         }
3339                 }
3340         },
3341         {
3342                 .type = CRYPTO_ALG_TYPE_HMAC,
3343                 .is_registered = 0,
3344                 .alg.hash = {
3345                         .halg.digestsize = SHA256_DIGEST_SIZE,
3346                         .halg.base = {
3347                                 .cra_name = "hmac(sha256)",
3348                                 .cra_driver_name = "hmac-sha256-chcr",
3349                                 .cra_blocksize = SHA256_BLOCK_SIZE,
3350                         }
3351                 }
3352         },
3353         {
3354                 .type = CRYPTO_ALG_TYPE_HMAC,
3355                 .is_registered = 0,
3356                 .alg.hash = {
3357                         .halg.digestsize = SHA384_DIGEST_SIZE,
3358                         .halg.base = {
3359                                 .cra_name = "hmac(sha384)",
3360                                 .cra_driver_name = "hmac-sha384-chcr",
3361                                 .cra_blocksize = SHA384_BLOCK_SIZE,
3362                         }
3363                 }
3364         },
3365         {
3366                 .type = CRYPTO_ALG_TYPE_HMAC,
3367                 .is_registered = 0,
3368                 .alg.hash = {
3369                         .halg.digestsize = SHA512_DIGEST_SIZE,
3370                         .halg.base = {
3371                                 .cra_name = "hmac(sha512)",
3372                                 .cra_driver_name = "hmac-sha512-chcr",
3373                                 .cra_blocksize = SHA512_BLOCK_SIZE,
3374                         }
3375                 }
3376         },
3377         /* Add AEAD Algorithms */
3378         {
3379                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3380                 .is_registered = 0,
3381                 .alg.aead = {
3382                         .base = {
3383                                 .cra_name = "gcm(aes)",
3384                                 .cra_driver_name = "gcm-aes-chcr",
3385                                 .cra_blocksize  = 1,
3386                                 .cra_priority = CHCR_AEAD_PRIORITY,
3387                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3388                                                 sizeof(struct chcr_aead_ctx) +
3389                                                 sizeof(struct chcr_gcm_ctx),
3390                         },
3391                         .ivsize = 12,
3392                         .maxauthsize = GHASH_DIGEST_SIZE,
3393                         .setkey = chcr_gcm_setkey,
3394                         .setauthsize = chcr_gcm_setauthsize,
3395                 }
3396         },
3397         {
3398                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3399                 .is_registered = 0,
3400                 .alg.aead = {
3401                         .base = {
3402                                 .cra_name = "rfc4106(gcm(aes))",
3403                                 .cra_driver_name = "rfc4106-gcm-aes-chcr",
3404                                 .cra_blocksize   = 1,
3405                                 .cra_priority = CHCR_AEAD_PRIORITY + 1,
3406                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3407                                                 sizeof(struct chcr_aead_ctx) +
3408                                                 sizeof(struct chcr_gcm_ctx),
3409
3410                         },
3411                         .ivsize = 8,
3412                         .maxauthsize    = GHASH_DIGEST_SIZE,
3413                         .setkey = chcr_gcm_setkey,
3414                         .setauthsize    = chcr_4106_4309_setauthsize,
3415                 }
3416         },
3417         {
3418                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3419                 .is_registered = 0,
3420                 .alg.aead = {
3421                         .base = {
3422                                 .cra_name = "ccm(aes)",
3423                                 .cra_driver_name = "ccm-aes-chcr",
3424                                 .cra_blocksize   = 1,
3425                                 .cra_priority = CHCR_AEAD_PRIORITY,
3426                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3427                                                 sizeof(struct chcr_aead_ctx),
3428
3429                         },
3430                         .ivsize = AES_BLOCK_SIZE,
3431                         .maxauthsize    = GHASH_DIGEST_SIZE,
3432                         .setkey = chcr_aead_ccm_setkey,
3433                         .setauthsize    = chcr_ccm_setauthsize,
3434                 }
3435         },
3436         {
3437                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
3438                 .is_registered = 0,
3439                 .alg.aead = {
3440                         .base = {
3441                                 .cra_name = "rfc4309(ccm(aes))",
3442                                 .cra_driver_name = "rfc4309-ccm-aes-chcr",
3443                                 .cra_blocksize   = 1,
3444                                 .cra_priority = CHCR_AEAD_PRIORITY + 1,
3445                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3446                                                 sizeof(struct chcr_aead_ctx),
3447
3448                         },
3449                         .ivsize = 8,
3450                         .maxauthsize    = GHASH_DIGEST_SIZE,
3451                         .setkey = chcr_aead_rfc4309_setkey,
3452                         .setauthsize = chcr_4106_4309_setauthsize,
3453                 }
3454         },
3455         {
3456                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3457                 .is_registered = 0,
3458                 .alg.aead = {
3459                         .base = {
3460                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3461                                 .cra_driver_name =
3462                                         "authenc-hmac-sha1-cbc-aes-chcr",
3463                                 .cra_blocksize   = AES_BLOCK_SIZE,
3464                                 .cra_priority = CHCR_AEAD_PRIORITY,
3465                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3466                                                 sizeof(struct chcr_aead_ctx) +
3467                                                 sizeof(struct chcr_authenc_ctx),
3468
3469                         },
3470                         .ivsize = AES_BLOCK_SIZE,
3471                         .maxauthsize = SHA1_DIGEST_SIZE,
3472                         .setkey = chcr_authenc_setkey,
3473                         .setauthsize = chcr_authenc_setauthsize,
3474                 }
3475         },
3476         {
3477                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3478                 .is_registered = 0,
3479                 .alg.aead = {
3480                         .base = {
3481
3482                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3483                                 .cra_driver_name =
3484                                         "authenc-hmac-sha256-cbc-aes-chcr",
3485                                 .cra_blocksize   = AES_BLOCK_SIZE,
3486                                 .cra_priority = CHCR_AEAD_PRIORITY,
3487                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3488                                                 sizeof(struct chcr_aead_ctx) +
3489                                                 sizeof(struct chcr_authenc_ctx),
3490
3491                         },
3492                         .ivsize = AES_BLOCK_SIZE,
3493                         .maxauthsize    = SHA256_DIGEST_SIZE,
3494                         .setkey = chcr_authenc_setkey,
3495                         .setauthsize = chcr_authenc_setauthsize,
3496                 }
3497         },
3498         {
3499                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3500                 .is_registered = 0,
3501                 .alg.aead = {
3502                         .base = {
3503                                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
3504                                 .cra_driver_name =
3505                                         "authenc-hmac-sha224-cbc-aes-chcr",
3506                                 .cra_blocksize   = AES_BLOCK_SIZE,
3507                                 .cra_priority = CHCR_AEAD_PRIORITY,
3508                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3509                                                 sizeof(struct chcr_aead_ctx) +
3510                                                 sizeof(struct chcr_authenc_ctx),
3511                         },
3512                         .ivsize = AES_BLOCK_SIZE,
3513                         .maxauthsize = SHA224_DIGEST_SIZE,
3514                         .setkey = chcr_authenc_setkey,
3515                         .setauthsize = chcr_authenc_setauthsize,
3516                 }
3517         },
3518         {
3519                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3520                 .is_registered = 0,
3521                 .alg.aead = {
3522                         .base = {
3523                                 .cra_name = "authenc(hmac(sha384),cbc(aes))",
3524                                 .cra_driver_name =
3525                                         "authenc-hmac-sha384-cbc-aes-chcr",
3526                                 .cra_blocksize   = AES_BLOCK_SIZE,
3527                                 .cra_priority = CHCR_AEAD_PRIORITY,
3528                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3529                                                 sizeof(struct chcr_aead_ctx) +
3530                                                 sizeof(struct chcr_authenc_ctx),
3531
3532                         },
3533                         .ivsize = AES_BLOCK_SIZE,
3534                         .maxauthsize = SHA384_DIGEST_SIZE,
3535                         .setkey = chcr_authenc_setkey,
3536                         .setauthsize = chcr_authenc_setauthsize,
3537                 }
3538         },
3539         {
3540                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3541                 .is_registered = 0,
3542                 .alg.aead = {
3543                         .base = {
3544                                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
3545                                 .cra_driver_name =
3546                                         "authenc-hmac-sha512-cbc-aes-chcr",
3547                                 .cra_blocksize   = AES_BLOCK_SIZE,
3548                                 .cra_priority = CHCR_AEAD_PRIORITY,
3549                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3550                                                 sizeof(struct chcr_aead_ctx) +
3551                                                 sizeof(struct chcr_authenc_ctx),
3552
3553                         },
3554                         .ivsize = AES_BLOCK_SIZE,
3555                         .maxauthsize = SHA512_DIGEST_SIZE,
3556                         .setkey = chcr_authenc_setkey,
3557                         .setauthsize = chcr_authenc_setauthsize,
3558                 }
3559         },
3560         {
3561                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
3562                 .is_registered = 0,
3563                 .alg.aead = {
3564                         .base = {
3565                                 .cra_name = "authenc(digest_null,cbc(aes))",
3566                                 .cra_driver_name =
3567                                         "authenc-digest_null-cbc-aes-chcr",
3568                                 .cra_blocksize   = AES_BLOCK_SIZE,
3569                                 .cra_priority = CHCR_AEAD_PRIORITY,
3570                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3571                                                 sizeof(struct chcr_aead_ctx) +
3572                                                 sizeof(struct chcr_authenc_ctx),
3573
3574                         },
3575                         .ivsize  = AES_BLOCK_SIZE,
3576                         .maxauthsize = 0,
3577                         .setkey  = chcr_aead_digest_null_setkey,
3578                         .setauthsize = chcr_authenc_null_setauthsize,
3579                 }
3580         },
3581 };
3582
3583 /*
3584  *      chcr_unregister_alg - Deregister crypto algorithms with
3585  *      kernel framework.
3586  */
3587 static int chcr_unregister_alg(void)
3588 {
3589         int i;
3590
3591         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3592                 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
3593                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3594                         if (driver_algs[i].is_registered)
3595                                 crypto_unregister_alg(
3596                                                 &driver_algs[i].alg.crypto);
3597                         break;
3598                 case CRYPTO_ALG_TYPE_AEAD:
3599                         if (driver_algs[i].is_registered)
3600                                 crypto_unregister_aead(
3601                                                 &driver_algs[i].alg.aead);
3602                         break;
3603                 case CRYPTO_ALG_TYPE_AHASH:
3604                         if (driver_algs[i].is_registered)
3605                                 crypto_unregister_ahash(
3606                                                 &driver_algs[i].alg.hash);
3607                         break;
3608                 }
3609                 driver_algs[i].is_registered = 0;
3610         }
3611         return 0;
3612 }
3613
3614 #define SZ_AHASH_CTX sizeof(struct chcr_context)
3615 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
3616 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
3617 #define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
3618
3619 /*
3620  *      chcr_register_alg - Register crypto algorithms with kernel framework.
3621  */
3622 static int chcr_register_alg(void)
3623 {
3624         struct crypto_alg ai;
3625         struct ahash_alg *a_hash;
3626         int err = 0, i;
3627         char *name = NULL;
3628
3629         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3630                 if (driver_algs[i].is_registered)
3631                         continue;
3632                 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
3633                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3634                         driver_algs[i].alg.crypto.cra_priority =
3635                                 CHCR_CRA_PRIORITY;
3636                         driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
3637                         driver_algs[i].alg.crypto.cra_flags =
3638                                 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
3639                                 CRYPTO_ALG_NEED_FALLBACK;
3640                         driver_algs[i].alg.crypto.cra_ctxsize =
3641                                 sizeof(struct chcr_context) +
3642                                 sizeof(struct ablk_ctx);
3643                         driver_algs[i].alg.crypto.cra_alignmask = 0;
3644                         driver_algs[i].alg.crypto.cra_type =
3645                                 &crypto_ablkcipher_type;
3646                         err = crypto_register_alg(&driver_algs[i].alg.crypto);
3647                         name = driver_algs[i].alg.crypto.cra_driver_name;
3648                         break;
3649                 case CRYPTO_ALG_TYPE_AEAD:
3650                         driver_algs[i].alg.aead.base.cra_flags =
3651                                 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
3652                                 CRYPTO_ALG_NEED_FALLBACK;
3653                         driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
3654                         driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
3655                         driver_algs[i].alg.aead.init = chcr_aead_cra_init;
3656                         driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
3657                         driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
3658                         err = crypto_register_aead(&driver_algs[i].alg.aead);
3659                         name = driver_algs[i].alg.aead.base.cra_driver_name;
3660                         break;
3661                 case CRYPTO_ALG_TYPE_AHASH:
3662                         a_hash = &driver_algs[i].alg.hash;
3663                         a_hash->update = chcr_ahash_update;
3664                         a_hash->final = chcr_ahash_final;
3665                         a_hash->finup = chcr_ahash_finup;
3666                         a_hash->digest = chcr_ahash_digest;
3667                         a_hash->export = chcr_ahash_export;
3668                         a_hash->import = chcr_ahash_import;
3669                         a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
3670                         a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
3671                         a_hash->halg.base.cra_module = THIS_MODULE;
3672                         a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
3673                         a_hash->halg.base.cra_alignmask = 0;
3674                         a_hash->halg.base.cra_exit = NULL;
3675                         a_hash->halg.base.cra_type = &crypto_ahash_type;
3676
3677                         if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
3678                                 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
3679                                 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
3680                                 a_hash->init = chcr_hmac_init;
3681                                 a_hash->setkey = chcr_ahash_setkey;
3682                                 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
3683                         } else {
3684                                 a_hash->init = chcr_sha_init;
3685                                 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
3686                                 a_hash->halg.base.cra_init = chcr_sha_cra_init;
3687                         }
3688                         err = crypto_register_ahash(&driver_algs[i].alg.hash);
3689                         ai = driver_algs[i].alg.hash.halg.base;
3690                         name = ai.cra_driver_name;
3691                         break;
3692                 }
3693                 if (err) {
3694                         pr_err("chcr : %s : Algorithm registration failed\n",
3695                                name);
3696                         goto register_err;
3697                 } else {
3698                         driver_algs[i].is_registered = 1;
3699                 }
3700         }
3701         return 0;
3702
3703 register_err:
3704         chcr_unregister_alg();
3705         return err;
3706 }
3707
3708 /*
3709  *      start_crypto - Register the crypto algorithms.
3710  *      This should called once when the first device comesup. After this
3711  *      kernel will start calling driver APIs for crypto operations.
3712  */
3713 int start_crypto(void)
3714 {
3715         return chcr_register_alg();
3716 }
3717
3718 /*
3719  *      stop_crypto - Deregister all the crypto algorithms with kernel.
3720  *      This should be called once when the last device goes down. After this
3721  *      kernel will not call the driver API for crypto operations.
3722  */
3723 int stop_crypto(void)
3724 {
3725         chcr_unregister_alg();
3726         return 0;
3727 }