1 // SPDX-License-Identifier: GPL-2.0
8 #include <linux/crc32c.h>
9 #include <linux/crypto.h>
10 #include <linux/xxhash.h>
11 #include <linux/key.h>
12 #include <linux/random.h>
13 #include <linux/scatterlist.h>
14 #include <crypto/algapi.h>
15 #include <crypto/chacha.h>
16 #include <crypto/hash.h>
17 #include <crypto/poly1305.h>
18 #include <crypto/skcipher.h>
19 #include <keys/user-type.h>
22 * bch2_checksum state is an abstraction of the checksum state calculated over different pages.
23 * it features page merging without having the checksum algorithm lose its state.
24 * for native checksum aglorithms (like crc), a default seed value will do.
25 * for hash-like algorithms, a state needs to be stored
28 struct bch2_checksum_state {
31 struct xxh64_state h64state;
36 static void bch2_checksum_init(struct bch2_checksum_state *state)
38 switch (state->type) {
44 case BCH_CSUM_crc32c_nonzero:
45 state->seed = U32_MAX;
47 case BCH_CSUM_crc64_nonzero:
48 state->seed = U64_MAX;
51 xxh64_reset(&state->h64state, 0);
58 static u64 bch2_checksum_final(const struct bch2_checksum_state *state)
60 switch (state->type) {
65 case BCH_CSUM_crc32c_nonzero:
66 return state->seed ^ U32_MAX;
67 case BCH_CSUM_crc64_nonzero:
68 return state->seed ^ U64_MAX;
70 return xxh64_digest(&state->h64state);
76 static void bch2_checksum_update(struct bch2_checksum_state *state, const void *data, size_t len)
78 switch (state->type) {
81 case BCH_CSUM_crc32c_nonzero:
83 state->seed = crc32c(state->seed, data, len);
85 case BCH_CSUM_crc64_nonzero:
87 state->seed = crc64_be(state->seed, data, len);
90 xxh64_update(&state->h64state, data, len);
97 static inline int do_encrypt_sg(struct crypto_sync_skcipher *tfm,
99 struct scatterlist *sg, size_t len)
101 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
104 skcipher_request_set_sync_tfm(req, tfm);
105 skcipher_request_set_callback(req, 0, NULL, NULL);
106 skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
108 ret = crypto_skcipher_encrypt(req);
110 pr_err("got error %i from crypto_skcipher_encrypt()", ret);
115 static inline int do_encrypt(struct crypto_sync_skcipher *tfm,
117 void *buf, size_t len)
119 if (!is_vmalloc_addr(buf)) {
120 struct scatterlist sg;
122 sg_init_table(&sg, 1);
125 ? vmalloc_to_page(buf)
127 len, offset_in_page(buf));
128 return do_encrypt_sg(tfm, nonce, &sg, len);
130 unsigned pages = buf_pages(buf, len);
131 struct scatterlist *sg;
132 size_t orig_len = len;
135 sg = kmalloc_array(pages, sizeof(*sg), GFP_KERNEL);
137 return -BCH_ERR_ENOMEM_do_encrypt;
139 sg_init_table(sg, pages);
141 for (i = 0; i < pages; i++) {
142 unsigned offset = offset_in_page(buf);
143 unsigned pg_len = min_t(size_t, len, PAGE_SIZE - offset);
145 sg_set_page(sg + i, vmalloc_to_page(buf), pg_len, offset);
150 ret = do_encrypt_sg(tfm, nonce, sg, orig_len);
156 int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
157 void *buf, size_t len)
159 struct crypto_sync_skcipher *chacha20 =
160 crypto_alloc_sync_skcipher("chacha20", 0, 0);
163 ret = PTR_ERR_OR_ZERO(chacha20);
165 pr_err("error requesting chacha20 cipher: %s", bch2_err_str(ret));
169 ret = crypto_skcipher_setkey(&chacha20->base,
170 (void *) key, sizeof(*key));
172 pr_err("error from crypto_skcipher_setkey(): %s", bch2_err_str(ret));
176 ret = do_encrypt(chacha20, nonce, buf, len);
178 crypto_free_sync_skcipher(chacha20);
182 static int gen_poly_key(struct bch_fs *c, struct shash_desc *desc,
185 u8 key[POLY1305_KEY_SIZE];
188 nonce.d[3] ^= BCH_NONCE_POLY;
190 memset(key, 0, sizeof(key));
191 ret = do_encrypt(c->chacha20, nonce, key, sizeof(key));
195 desc->tfm = c->poly1305;
196 crypto_shash_init(desc);
197 crypto_shash_update(desc, key, sizeof(key));
201 struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
202 struct nonce nonce, const void *data, size_t len)
206 case BCH_CSUM_crc32c_nonzero:
207 case BCH_CSUM_crc64_nonzero:
208 case BCH_CSUM_crc32c:
209 case BCH_CSUM_xxhash:
210 case BCH_CSUM_crc64: {
211 struct bch2_checksum_state state;
215 bch2_checksum_init(&state);
216 bch2_checksum_update(&state, data, len);
218 return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
221 case BCH_CSUM_chacha20_poly1305_80:
222 case BCH_CSUM_chacha20_poly1305_128: {
223 SHASH_DESC_ON_STACK(desc, c->poly1305);
224 u8 digest[POLY1305_DIGEST_SIZE];
225 struct bch_csum ret = { 0 };
227 gen_poly_key(c, desc, nonce);
229 crypto_shash_update(desc, data, len);
230 crypto_shash_final(desc, digest);
232 memcpy(&ret, digest, bch_crc_bytes[type]);
240 int bch2_encrypt(struct bch_fs *c, unsigned type,
241 struct nonce nonce, void *data, size_t len)
243 if (!bch2_csum_type_is_encryption(type))
246 return do_encrypt(c->chacha20, nonce, data, len);
249 static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
250 struct nonce nonce, struct bio *bio,
251 struct bvec_iter *iter)
257 return (struct bch_csum) { 0 };
258 case BCH_CSUM_crc32c_nonzero:
259 case BCH_CSUM_crc64_nonzero:
260 case BCH_CSUM_crc32c:
261 case BCH_CSUM_xxhash:
262 case BCH_CSUM_crc64: {
263 struct bch2_checksum_state state;
266 bch2_checksum_init(&state);
268 #ifdef CONFIG_HIGHMEM
269 __bio_for_each_segment(bv, bio, *iter, *iter) {
270 void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
272 bch2_checksum_update(&state, p, bv.bv_len);
276 __bio_for_each_bvec(bv, bio, *iter, *iter)
277 bch2_checksum_update(&state, page_address(bv.bv_page) + bv.bv_offset,
280 return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
283 case BCH_CSUM_chacha20_poly1305_80:
284 case BCH_CSUM_chacha20_poly1305_128: {
285 SHASH_DESC_ON_STACK(desc, c->poly1305);
286 u8 digest[POLY1305_DIGEST_SIZE];
287 struct bch_csum ret = { 0 };
289 gen_poly_key(c, desc, nonce);
291 #ifdef CONFIG_HIGHMEM
292 __bio_for_each_segment(bv, bio, *iter, *iter) {
293 void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
295 crypto_shash_update(desc, p, bv.bv_len);
299 __bio_for_each_bvec(bv, bio, *iter, *iter)
300 crypto_shash_update(desc,
301 page_address(bv.bv_page) + bv.bv_offset,
304 crypto_shash_final(desc, digest);
306 memcpy(&ret, digest, bch_crc_bytes[type]);
314 struct bch_csum bch2_checksum_bio(struct bch_fs *c, unsigned type,
315 struct nonce nonce, struct bio *bio)
317 struct bvec_iter iter = bio->bi_iter;
319 return __bch2_checksum_bio(c, type, nonce, bio, &iter);
322 int __bch2_encrypt_bio(struct bch_fs *c, unsigned type,
323 struct nonce nonce, struct bio *bio)
326 struct bvec_iter iter;
327 struct scatterlist sgl[16], *sg = sgl;
331 if (!bch2_csum_type_is_encryption(type))
334 sg_init_table(sgl, ARRAY_SIZE(sgl));
336 bio_for_each_segment(bv, bio, iter) {
337 if (sg == sgl + ARRAY_SIZE(sgl)) {
340 ret = do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
344 nonce = nonce_add(nonce, bytes);
347 sg_init_table(sgl, ARRAY_SIZE(sgl));
351 sg_set_page(sg++, bv.bv_page, bv.bv_len, bv.bv_offset);
356 return do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
359 struct bch_csum bch2_checksum_merge(unsigned type, struct bch_csum a,
360 struct bch_csum b, size_t b_len)
362 struct bch2_checksum_state state;
365 bch2_checksum_init(&state);
366 state.seed = le64_to_cpu(a.lo);
368 BUG_ON(!bch2_checksum_mergeable(type));
371 unsigned page_len = min_t(unsigned, b_len, PAGE_SIZE);
373 bch2_checksum_update(&state,
374 page_address(ZERO_PAGE(0)), page_len);
377 a.lo = cpu_to_le64(bch2_checksum_final(&state));
383 int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
384 struct bversion version,
385 struct bch_extent_crc_unpacked crc_old,
386 struct bch_extent_crc_unpacked *crc_a,
387 struct bch_extent_crc_unpacked *crc_b,
388 unsigned len_a, unsigned len_b,
389 unsigned new_csum_type)
391 struct bvec_iter iter = bio->bi_iter;
392 struct nonce nonce = extent_nonce(version, crc_old);
393 struct bch_csum merged = { 0 };
395 struct bch_extent_crc_unpacked *crc;
398 struct bch_csum csum;
400 { crc_a, len_a, new_csum_type, { 0 }},
401 { crc_b, len_b, new_csum_type, { 0 } },
402 { NULL, bio_sectors(bio) - len_a - len_b, new_csum_type, { 0 } },
404 bool mergeable = crc_old.csum_type == new_csum_type &&
405 bch2_checksum_mergeable(new_csum_type);
406 unsigned crc_nonce = crc_old.nonce;
408 BUG_ON(len_a + len_b > bio_sectors(bio));
409 BUG_ON(crc_old.uncompressed_size != bio_sectors(bio));
410 BUG_ON(crc_is_compressed(crc_old));
411 BUG_ON(bch2_csum_type_is_encryption(crc_old.csum_type) !=
412 bch2_csum_type_is_encryption(new_csum_type));
414 for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
415 iter.bi_size = i->len << 9;
416 if (mergeable || i->crc)
417 i->csum = __bch2_checksum_bio(c, i->csum_type,
420 bio_advance_iter(bio, &iter, i->len << 9);
421 nonce = nonce_add(nonce, i->len << 9);
425 for (i = splits; i < splits + ARRAY_SIZE(splits); i++)
426 merged = bch2_checksum_merge(new_csum_type, merged,
427 i->csum, i->len << 9);
429 merged = bch2_checksum_bio(c, crc_old.csum_type,
430 extent_nonce(version, crc_old), bio);
432 if (bch2_crc_cmp(merged, crc_old.csum) && !c->opts.no_data_io) {
433 struct printbuf buf = PRINTBUF;
434 prt_printf(&buf, "checksum error in %s() (memory corruption or bug?)\n"
435 "expected %0llx:%0llx got %0llx:%0llx (old type ",
441 bch2_prt_csum_type(&buf, crc_old.csum_type);
442 prt_str(&buf, " new type ");
443 bch2_prt_csum_type(&buf, new_csum_type);
445 bch_err(c, "%s", buf.buf);
450 for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
452 *i->crc = (struct bch_extent_crc_unpacked) {
453 .csum_type = i->csum_type,
454 .compression_type = crc_old.compression_type,
455 .compressed_size = i->len,
456 .uncompressed_size = i->len,
463 if (bch2_csum_type_is_encryption(new_csum_type))
470 /* BCH_SB_FIELD_crypt: */
472 static int bch2_sb_crypt_validate(struct bch_sb *sb,
473 struct bch_sb_field *f,
474 struct printbuf *err)
476 struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
478 if (vstruct_bytes(&crypt->field) < sizeof(*crypt)) {
479 prt_printf(err, "wrong size (got %zu should be %zu)",
480 vstruct_bytes(&crypt->field), sizeof(*crypt));
481 return -BCH_ERR_invalid_sb_crypt;
484 if (BCH_CRYPT_KDF_TYPE(crypt)) {
485 prt_printf(err, "bad kdf type %llu", BCH_CRYPT_KDF_TYPE(crypt));
486 return -BCH_ERR_invalid_sb_crypt;
492 static void bch2_sb_crypt_to_text(struct printbuf *out, struct bch_sb *sb,
493 struct bch_sb_field *f)
495 struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
497 prt_printf(out, "KFD: %llu", BCH_CRYPT_KDF_TYPE(crypt));
499 prt_printf(out, "scrypt n: %llu", BCH_KDF_SCRYPT_N(crypt));
501 prt_printf(out, "scrypt r: %llu", BCH_KDF_SCRYPT_R(crypt));
503 prt_printf(out, "scrypt p: %llu", BCH_KDF_SCRYPT_P(crypt));
507 const struct bch_sb_field_ops bch_sb_field_ops_crypt = {
508 .validate = bch2_sb_crypt_validate,
509 .to_text = bch2_sb_crypt_to_text,
513 static int __bch2_request_key(char *key_description, struct bch_key *key)
515 struct key *keyring_key;
516 const struct user_key_payload *ukp;
519 keyring_key = request_key(&key_type_user, key_description, NULL);
520 if (IS_ERR(keyring_key))
521 return PTR_ERR(keyring_key);
523 down_read(&keyring_key->sem);
524 ukp = dereference_key_locked(keyring_key);
525 if (ukp->datalen == sizeof(*key)) {
526 memcpy(key, ukp->data, ukp->datalen);
531 up_read(&keyring_key->sem);
532 key_put(keyring_key);
537 #include <keyutils.h>
539 static int __bch2_request_key(char *key_description, struct bch_key *key)
543 key_id = request_key("user", key_description, NULL,
544 KEY_SPEC_SESSION_KEYRING);
548 key_id = request_key("user", key_description, NULL,
549 KEY_SPEC_USER_KEYRING);
553 key_id = request_key("user", key_description, NULL,
554 KEY_SPEC_USER_SESSION_KEYRING);
561 if (keyctl_read(key_id, (void *) key, sizeof(*key)) != sizeof(*key))
570 int bch2_request_key(struct bch_sb *sb, struct bch_key *key)
572 struct printbuf key_description = PRINTBUF;
575 prt_printf(&key_description, "bcachefs:");
576 pr_uuid(&key_description, sb->user_uuid.b);
578 ret = __bch2_request_key(key_description.buf, key);
579 printbuf_exit(&key_description);
583 char *passphrase = read_passphrase("Enter passphrase: ");
584 struct bch_encrypted_key sb_key;
586 bch2_passphrase_check(sb, passphrase,
592 /* stash with memfd, pass memfd fd to mount */
598 int bch2_revoke_key(struct bch_sb *sb)
601 struct printbuf key_description = PRINTBUF;
603 prt_printf(&key_description, "bcachefs:");
604 pr_uuid(&key_description, sb->user_uuid.b);
606 key_id = request_key("user", key_description.buf, NULL, KEY_SPEC_USER_KEYRING);
607 printbuf_exit(&key_description);
611 keyctl_revoke(key_id);
617 int bch2_decrypt_sb_key(struct bch_fs *c,
618 struct bch_sb_field_crypt *crypt,
621 struct bch_encrypted_key sb_key = crypt->key;
622 struct bch_key user_key;
625 /* is key encrypted? */
626 if (!bch2_key_is_encrypted(&sb_key))
629 ret = bch2_request_key(c->disk_sb.sb, &user_key);
631 bch_err(c, "error requesting encryption key: %s", bch2_err_str(ret));
635 /* decrypt real key: */
636 ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
637 &sb_key, sizeof(sb_key));
641 if (bch2_key_is_encrypted(&sb_key)) {
642 bch_err(c, "incorrect encryption key");
649 memzero_explicit(&sb_key, sizeof(sb_key));
650 memzero_explicit(&user_key, sizeof(user_key));
654 static int bch2_alloc_ciphers(struct bch_fs *c)
659 c->chacha20 = crypto_alloc_sync_skcipher("chacha20", 0, 0);
660 ret = PTR_ERR_OR_ZERO(c->chacha20);
663 bch_err(c, "error requesting chacha20 module: %s", bch2_err_str(ret));
668 c->poly1305 = crypto_alloc_shash("poly1305", 0, 0);
669 ret = PTR_ERR_OR_ZERO(c->poly1305);
672 bch_err(c, "error requesting poly1305 module: %s", bch2_err_str(ret));
679 int bch2_disable_encryption(struct bch_fs *c)
681 struct bch_sb_field_crypt *crypt;
685 mutex_lock(&c->sb_lock);
687 crypt = bch2_sb_field_get(c->disk_sb.sb, crypt);
691 /* is key encrypted? */
693 if (bch2_key_is_encrypted(&crypt->key))
696 ret = bch2_decrypt_sb_key(c, crypt, &key);
700 crypt->key.magic = cpu_to_le64(BCH_KEY_MAGIC);
701 crypt->key.key = key;
703 SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 0);
706 mutex_unlock(&c->sb_lock);
711 int bch2_enable_encryption(struct bch_fs *c, bool keyed)
713 struct bch_encrypted_key key;
714 struct bch_key user_key;
715 struct bch_sb_field_crypt *crypt;
718 mutex_lock(&c->sb_lock);
720 /* Do we already have an encryption key? */
721 if (bch2_sb_field_get(c->disk_sb.sb, crypt))
724 ret = bch2_alloc_ciphers(c);
728 key.magic = cpu_to_le64(BCH_KEY_MAGIC);
729 get_random_bytes(&key.key, sizeof(key.key));
732 ret = bch2_request_key(c->disk_sb.sb, &user_key);
734 bch_err(c, "error requesting encryption key: %s", bch2_err_str(ret));
738 ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
744 ret = crypto_skcipher_setkey(&c->chacha20->base,
745 (void *) &key.key, sizeof(key.key));
749 crypt = bch2_sb_field_resize(&c->disk_sb, crypt,
750 sizeof(*crypt) / sizeof(u64));
752 ret = -BCH_ERR_ENOSPC_sb_crypt;
758 /* write superblock */
759 SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 1);
762 mutex_unlock(&c->sb_lock);
763 memzero_explicit(&user_key, sizeof(user_key));
764 memzero_explicit(&key, sizeof(key));
768 void bch2_fs_encryption_exit(struct bch_fs *c)
770 if (!IS_ERR_OR_NULL(c->poly1305))
771 crypto_free_shash(c->poly1305);
772 if (!IS_ERR_OR_NULL(c->chacha20))
773 crypto_free_sync_skcipher(c->chacha20);
774 if (!IS_ERR_OR_NULL(c->sha256))
775 crypto_free_shash(c->sha256);
778 int bch2_fs_encryption_init(struct bch_fs *c)
780 struct bch_sb_field_crypt *crypt;
784 c->sha256 = crypto_alloc_shash("sha256", 0, 0);
785 ret = PTR_ERR_OR_ZERO(c->sha256);
787 bch_err(c, "error requesting sha256 module: %s", bch2_err_str(ret));
791 crypt = bch2_sb_field_get(c->disk_sb.sb, crypt);
795 ret = bch2_alloc_ciphers(c);
799 ret = bch2_decrypt_sb_key(c, crypt, &key);
803 ret = crypto_skcipher_setkey(&c->chacha20->base,
804 (void *) &key.key, sizeof(key.key));
808 memzero_explicit(&key, sizeof(key));