2 * linux/net/sunrpc/gss_krb5_crypto.c
4 * Copyright (c) 2000-2008 The Regents of the University of Michigan.
7 * Andy Adamson <andros@umich.edu>
8 * Bruce Fields <bfields@umich.edu>
12 * Copyright (C) 1998 by the FundsXpress, INC.
14 * All rights reserved.
16 * Export of this software from the United States of America may require
17 * a specific license from the United States Government. It is the
18 * responsibility of any person or organization contemplating export to
19 * obtain such a license before exporting.
21 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
22 * distribute this software and its documentation for any purpose and
23 * without fee is hereby granted, provided that the above copyright
24 * notice appear in all copies and that both that copyright notice and
25 * this permission notice appear in supporting documentation, and that
26 * the name of FundsXpress. not be used in advertising or publicity pertaining
27 * to distribution of the software without specific, written prior
28 * permission. FundsXpress makes no representations about the suitability of
29 * this software for any purpose. It is provided "as is" without express
30 * or implied warranty.
32 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
33 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
37 #include <crypto/algapi.h>
38 #include <crypto/hash.h>
39 #include <crypto/skcipher.h>
40 #include <linux/err.h>
41 #include <linux/types.h>
43 #include <linux/scatterlist.h>
44 #include <linux/highmem.h>
45 #include <linux/pagemap.h>
46 #include <linux/random.h>
47 #include <linux/sunrpc/gss_krb5.h>
48 #include <linux/sunrpc/xdr.h>
50 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
51 # define RPCDBG_FACILITY RPCDBG_AUTH
56 struct crypto_sync_skcipher *tfm,
63 struct scatterlist sg[1];
64 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
65 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
67 if (length % crypto_sync_skcipher_blocksize(tfm) != 0)
70 if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
71 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
72 crypto_sync_skcipher_ivsize(tfm));
77 memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
79 memcpy(out, in, length);
80 sg_init_one(sg, out, length);
82 skcipher_request_set_sync_tfm(req, tfm);
83 skcipher_request_set_callback(req, 0, NULL, NULL);
84 skcipher_request_set_crypt(req, sg, sg, length, local_iv);
86 ret = crypto_skcipher_encrypt(req);
87 skcipher_request_zero(req);
89 dprintk("RPC: krb5_encrypt returns %d\n", ret);
95 struct crypto_sync_skcipher *tfm,
102 struct scatterlist sg[1];
103 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
104 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
106 if (length % crypto_sync_skcipher_blocksize(tfm) != 0)
109 if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
110 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
111 crypto_sync_skcipher_ivsize(tfm));
115 memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
117 memcpy(out, in, length);
118 sg_init_one(sg, out, length);
120 skcipher_request_set_sync_tfm(req, tfm);
121 skcipher_request_set_callback(req, 0, NULL, NULL);
122 skcipher_request_set_crypt(req, sg, sg, length, local_iv);
124 ret = crypto_skcipher_decrypt(req);
125 skcipher_request_zero(req);
127 dprintk("RPC: gss_k5decrypt returns %d\n",ret);
132 checksummer(struct scatterlist *sg, void *data)
134 struct ahash_request *req = data;
136 ahash_request_set_crypt(req, sg, NULL, sg->length);
138 return crypto_ahash_update(req);
142 * checksum the plaintext data and hdrlen bytes of the token header
143 * The checksum is performed over the first 8 bytes of the
144 * gss token header and then over the data body
147 make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
148 struct xdr_buf *body, int body_offset, u8 *cksumkey,
149 unsigned int usage, struct xdr_netobj *cksumout)
151 struct crypto_ahash *tfm;
152 struct ahash_request *req;
153 struct scatterlist sg[1];
156 unsigned int checksumlen;
158 if (cksumout->len < kctx->gk5e->cksumlength) {
159 dprintk("%s: checksum buffer length, %u, too small for %s\n",
160 __func__, cksumout->len, kctx->gk5e->name);
161 return GSS_S_FAILURE;
164 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_KERNEL);
165 if (checksumdata == NULL)
166 return GSS_S_FAILURE;
168 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
172 req = ahash_request_alloc(tfm, GFP_KERNEL);
176 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
178 checksumlen = crypto_ahash_digestsize(tfm);
180 if (cksumkey != NULL) {
181 err = crypto_ahash_setkey(tfm, cksumkey,
182 kctx->gk5e->keylength);
187 err = crypto_ahash_init(req);
190 sg_init_one(sg, header, hdrlen);
191 ahash_request_set_crypt(req, sg, NULL, hdrlen);
192 err = crypto_ahash_update(req);
195 err = xdr_process_buf(body, body_offset, body->len - body_offset,
199 ahash_request_set_crypt(req, NULL, checksumdata, 0);
200 err = crypto_ahash_final(req);
204 switch (kctx->gk5e->ctype) {
205 case CKSUMTYPE_RSA_MD5:
206 err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,
207 checksumdata, checksumlen);
210 memcpy(cksumout->data,
211 checksumdata + checksumlen - kctx->gk5e->cksumlength,
212 kctx->gk5e->cksumlength);
214 case CKSUMTYPE_HMAC_SHA1_DES3:
215 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
221 cksumout->len = kctx->gk5e->cksumlength;
223 ahash_request_free(req);
225 crypto_free_ahash(tfm);
228 return err ? GSS_S_FAILURE : 0;
232 * checksum the plaintext data and hdrlen bytes of the token header
233 * Per rfc4121, sec. 4.2.4, the checksum is performed over the data
234 * body then over the first 16 octets of the MIC token
235 * Inclusion of the header data in the calculation of the
236 * checksum is optional.
239 make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
240 struct xdr_buf *body, int body_offset, u8 *cksumkey,
241 unsigned int usage, struct xdr_netobj *cksumout)
243 struct crypto_ahash *tfm;
244 struct ahash_request *req;
245 struct scatterlist sg[1];
249 if (kctx->gk5e->keyed_cksum == 0) {
250 dprintk("%s: expected keyed hash for %s\n",
251 __func__, kctx->gk5e->name);
252 return GSS_S_FAILURE;
254 if (cksumkey == NULL) {
255 dprintk("%s: no key supplied for %s\n",
256 __func__, kctx->gk5e->name);
257 return GSS_S_FAILURE;
260 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_KERNEL);
262 return GSS_S_FAILURE;
264 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
268 req = ahash_request_alloc(tfm, GFP_KERNEL);
272 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
274 err = crypto_ahash_setkey(tfm, cksumkey, kctx->gk5e->keylength);
278 err = crypto_ahash_init(req);
281 err = xdr_process_buf(body, body_offset, body->len - body_offset,
285 if (header != NULL) {
286 sg_init_one(sg, header, hdrlen);
287 ahash_request_set_crypt(req, sg, NULL, hdrlen);
288 err = crypto_ahash_update(req);
292 ahash_request_set_crypt(req, NULL, checksumdata, 0);
293 err = crypto_ahash_final(req);
297 cksumout->len = kctx->gk5e->cksumlength;
299 switch (kctx->gk5e->ctype) {
300 case CKSUMTYPE_HMAC_SHA1_96_AES128:
301 case CKSUMTYPE_HMAC_SHA1_96_AES256:
302 /* note that this truncates the hash */
303 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
310 ahash_request_free(req);
312 crypto_free_ahash(tfm);
315 return err ? GSS_S_FAILURE : 0;
318 struct encryptor_desc {
319 u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
320 struct skcipher_request *req;
322 struct xdr_buf *outbuf;
324 struct scatterlist infrags[4];
325 struct scatterlist outfrags[4];
331 encryptor(struct scatterlist *sg, void *data)
333 struct encryptor_desc *desc = data;
334 struct xdr_buf *outbuf = desc->outbuf;
335 struct crypto_sync_skcipher *tfm =
336 crypto_sync_skcipher_reqtfm(desc->req);
337 struct page *in_page;
338 int thislen = desc->fraglen + sg->length;
342 /* Worst case is 4 fragments: head, end of page 1, start
343 * of page 2, tail. Anything more is a bug. */
344 BUG_ON(desc->fragno > 3);
346 page_pos = desc->pos - outbuf->head[0].iov_len;
347 if (page_pos >= 0 && page_pos < outbuf->page_len) {
348 /* pages are not in place: */
349 int i = (page_pos + outbuf->page_base) >> PAGE_SHIFT;
350 in_page = desc->pages[i];
352 in_page = sg_page(sg);
354 sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
356 sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
359 desc->fraglen += sg->length;
360 desc->pos += sg->length;
362 fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1);
368 sg_mark_end(&desc->infrags[desc->fragno - 1]);
369 sg_mark_end(&desc->outfrags[desc->fragno - 1]);
371 skcipher_request_set_crypt(desc->req, desc->infrags, desc->outfrags,
374 ret = crypto_skcipher_encrypt(desc->req);
378 sg_init_table(desc->infrags, 4);
379 sg_init_table(desc->outfrags, 4);
382 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
383 sg->offset + sg->length - fraglen);
384 desc->infrags[0] = desc->outfrags[0];
385 sg_assign_page(&desc->infrags[0], in_page);
387 desc->fraglen = fraglen;
396 gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
397 int offset, struct page **pages)
400 struct encryptor_desc desc;
401 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
403 BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
405 skcipher_request_set_sync_tfm(req, tfm);
406 skcipher_request_set_callback(req, 0, NULL, NULL);
408 memset(desc.iv, 0, sizeof(desc.iv));
416 sg_init_table(desc.infrags, 4);
417 sg_init_table(desc.outfrags, 4);
419 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
420 skcipher_request_zero(req);
424 struct decryptor_desc {
425 u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
426 struct skcipher_request *req;
427 struct scatterlist frags[4];
433 decryptor(struct scatterlist *sg, void *data)
435 struct decryptor_desc *desc = data;
436 int thislen = desc->fraglen + sg->length;
437 struct crypto_sync_skcipher *tfm =
438 crypto_sync_skcipher_reqtfm(desc->req);
441 /* Worst case is 4 fragments: head, end of page 1, start
442 * of page 2, tail. Anything more is a bug. */
443 BUG_ON(desc->fragno > 3);
444 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
447 desc->fraglen += sg->length;
449 fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1);
455 sg_mark_end(&desc->frags[desc->fragno - 1]);
457 skcipher_request_set_crypt(desc->req, desc->frags, desc->frags,
460 ret = crypto_skcipher_decrypt(desc->req);
464 sg_init_table(desc->frags, 4);
467 sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
468 sg->offset + sg->length - fraglen);
470 desc->fraglen = fraglen;
479 gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
483 struct decryptor_desc desc;
484 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
487 BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
489 skcipher_request_set_sync_tfm(req, tfm);
490 skcipher_request_set_callback(req, 0, NULL, NULL);
492 memset(desc.iv, 0, sizeof(desc.iv));
497 sg_init_table(desc.frags, 4);
499 ret = xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
500 skcipher_request_zero(req);
505 * This function makes the assumption that it was ultimately called
508 * The client auth_gss code moves any existing tail data into a
509 * separate page before calling gss_wrap.
510 * The server svcauth_gss code ensures that both the head and the
511 * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
513 * Even with that guarantee, this function may be called more than
514 * once in the processing of gss_wrap(). The best we can do is
515 * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
516 * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
517 * At run-time we can verify that a single invocation of this
518 * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
522 xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
529 BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
530 BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
532 p = buf->head[0].iov_base + base;
534 memmove(p + shiftlen, p, buf->head[0].iov_len - base);
536 buf->head[0].iov_len += shiftlen;
537 buf->len += shiftlen;
543 gss_krb5_cts_crypt(struct crypto_sync_skcipher *cipher, struct xdr_buf *buf,
544 u32 offset, u8 *iv, struct page **pages, int encrypt)
547 struct scatterlist sg[1];
548 SYNC_SKCIPHER_REQUEST_ON_STACK(req, cipher);
550 struct page **save_pages;
551 u32 len = buf->len - offset;
553 if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) {
557 data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_KERNEL);
562 * For encryption, we want to read from the cleartext
563 * page cache pages, and write the encrypted data to
564 * the supplied xdr_buf pages.
566 save_pages = buf->pages;
570 ret = read_bytes_from_xdr_buf(buf, offset, data, len);
571 buf->pages = save_pages;
575 sg_init_one(sg, data, len);
577 skcipher_request_set_sync_tfm(req, cipher);
578 skcipher_request_set_callback(req, 0, NULL, NULL);
579 skcipher_request_set_crypt(req, sg, sg, len, iv);
582 ret = crypto_skcipher_encrypt(req);
584 ret = crypto_skcipher_decrypt(req);
586 skcipher_request_zero(req);
591 ret = write_bytes_to_xdr_buf(buf, offset, data, len);
599 gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
600 struct xdr_buf *buf, struct page **pages)
603 struct xdr_netobj hmac;
606 struct crypto_sync_skcipher *cipher, *aux_cipher;
608 struct page **save_pages;
610 struct encryptor_desc desc;
614 if (kctx->initiate) {
615 cipher = kctx->initiator_enc;
616 aux_cipher = kctx->initiator_enc_aux;
617 cksumkey = kctx->initiator_integ;
618 usage = KG_USAGE_INITIATOR_SEAL;
620 cipher = kctx->acceptor_enc;
621 aux_cipher = kctx->acceptor_enc_aux;
622 cksumkey = kctx->acceptor_integ;
623 usage = KG_USAGE_ACCEPTOR_SEAL;
625 blocksize = crypto_sync_skcipher_blocksize(cipher);
627 /* hide the gss token header and insert the confounder */
628 offset += GSS_KRB5_TOK_HDR_LEN;
629 if (xdr_extend_head(buf, offset, kctx->gk5e->conflen))
630 return GSS_S_FAILURE;
631 gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen);
632 offset -= GSS_KRB5_TOK_HDR_LEN;
634 if (buf->tail[0].iov_base != NULL) {
635 ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
637 buf->tail[0].iov_base = buf->head[0].iov_base
638 + buf->head[0].iov_len;
639 buf->tail[0].iov_len = 0;
640 ecptr = buf->tail[0].iov_base;
643 /* copy plaintext gss token header after filler (if any) */
644 memcpy(ecptr, buf->head[0].iov_base + offset, GSS_KRB5_TOK_HDR_LEN);
645 buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
646 buf->len += GSS_KRB5_TOK_HDR_LEN;
649 hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
650 hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
653 * When we are called, pages points to the real page cache
654 * data -- which we can't go and encrypt! buf->pages points
655 * to scratch pages which we are going to send off to the
656 * client/server. Swap in the plaintext pages to calculate
659 save_pages = buf->pages;
662 err = make_checksum_v2(kctx, NULL, 0, buf,
663 offset + GSS_KRB5_TOK_HDR_LEN,
664 cksumkey, usage, &hmac);
665 buf->pages = save_pages;
667 return GSS_S_FAILURE;
669 nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN;
670 nblocks = (nbytes + blocksize - 1) / blocksize;
673 cbcbytes = (nblocks - 2) * blocksize;
675 memset(desc.iv, 0, sizeof(desc.iv));
678 SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
680 desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
687 skcipher_request_set_sync_tfm(req, aux_cipher);
688 skcipher_request_set_callback(req, 0, NULL, NULL);
690 sg_init_table(desc.infrags, 4);
691 sg_init_table(desc.outfrags, 4);
693 err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
694 cbcbytes, encryptor, &desc);
695 skcipher_request_zero(req);
700 /* Make sure IV carries forward from any CBC results. */
701 err = gss_krb5_cts_crypt(cipher, buf,
702 offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes,
709 /* Now update buf to account for HMAC */
710 buf->tail[0].iov_len += kctx->gk5e->cksumlength;
711 buf->len += kctx->gk5e->cksumlength;
720 gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
721 struct xdr_buf *buf, u32 *headskip, u32 *tailskip)
723 struct xdr_buf subbuf;
726 struct crypto_sync_skcipher *cipher, *aux_cipher;
727 struct xdr_netobj our_hmac_obj;
728 u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
729 u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
730 int nblocks, blocksize, cbcbytes;
731 struct decryptor_desc desc;
734 if (kctx->initiate) {
735 cipher = kctx->acceptor_enc;
736 aux_cipher = kctx->acceptor_enc_aux;
737 cksum_key = kctx->acceptor_integ;
738 usage = KG_USAGE_ACCEPTOR_SEAL;
740 cipher = kctx->initiator_enc;
741 aux_cipher = kctx->initiator_enc_aux;
742 cksum_key = kctx->initiator_integ;
743 usage = KG_USAGE_INITIATOR_SEAL;
745 blocksize = crypto_sync_skcipher_blocksize(cipher);
748 /* create a segment skipping the header and leaving out the checksum */
749 xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
750 (len - offset - GSS_KRB5_TOK_HDR_LEN -
751 kctx->gk5e->cksumlength));
753 nblocks = (subbuf.len + blocksize - 1) / blocksize;
757 cbcbytes = (nblocks - 2) * blocksize;
759 memset(desc.iv, 0, sizeof(desc.iv));
762 SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
768 skcipher_request_set_sync_tfm(req, aux_cipher);
769 skcipher_request_set_callback(req, 0, NULL, NULL);
771 sg_init_table(desc.frags, 4);
773 ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
774 skcipher_request_zero(req);
779 /* Make sure IV carries forward from any CBC results. */
780 ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0);
785 /* Calculate our hmac over the plaintext data */
786 our_hmac_obj.len = sizeof(our_hmac);
787 our_hmac_obj.data = our_hmac;
789 ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0,
790 cksum_key, usage, &our_hmac_obj);
794 /* Get the packet's hmac value */
795 ret = read_bytes_from_xdr_buf(buf, len - kctx->gk5e->cksumlength,
796 pkt_hmac, kctx->gk5e->cksumlength);
800 if (crypto_memneq(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
804 *headskip = kctx->gk5e->conflen;
805 *tailskip = kctx->gk5e->cksumlength;
807 if (ret && ret != GSS_S_BAD_SIG)