1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright 2019 Google LLC
6 #ifndef __LINUX_BLK_CRYPTO_INTERNAL_H
7 #define __LINUX_BLK_CRYPTO_INTERNAL_H
10 #include <linux/blkdev.h>
12 /* Represents a crypto mode supported by blk-crypto */
13 struct blk_crypto_mode {
14 const char *cipher_str; /* crypto API name (for fallback case) */
15 unsigned int keysize; /* key size in bytes */
16 unsigned int ivsize; /* iv size in bytes */
19 extern const struct blk_crypto_mode blk_crypto_modes[];
21 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
23 void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
26 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
28 bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
29 struct bio_crypt_ctx *bc2);
31 static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
34 return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
35 bio->bi_crypt_context);
38 static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
41 return bio_crypt_ctx_mergeable(bio->bi_crypt_context,
42 bio->bi_iter.bi_size, req->crypt_ctx);
45 static inline bool bio_crypt_ctx_merge_rq(struct request *req,
48 return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
52 static inline void blk_crypto_rq_set_defaults(struct request *rq)
55 rq->crypt_keyslot = NULL;
58 static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
63 static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
65 return rq->crypt_keyslot;
68 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
70 static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
76 static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
82 static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
88 static inline bool bio_crypt_ctx_merge_rq(struct request *req,
94 static inline void blk_crypto_rq_set_defaults(struct request *rq) { }
96 static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
101 static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
106 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
108 void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
109 static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
111 if (bio_has_crypt_ctx(bio))
112 __bio_crypt_advance(bio, bytes);
115 void __bio_crypt_free_ctx(struct bio *bio);
116 static inline void bio_crypt_free_ctx(struct bio *bio)
118 if (bio_has_crypt_ctx(bio))
119 __bio_crypt_free_ctx(bio);
122 static inline void bio_crypt_do_front_merge(struct request *rq,
125 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
126 if (bio_has_crypt_ctx(bio))
127 memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun,
128 sizeof(rq->crypt_ctx->bc_dun));
132 bool __blk_crypto_bio_prep(struct bio **bio_ptr);
133 static inline bool blk_crypto_bio_prep(struct bio **bio_ptr)
135 if (bio_has_crypt_ctx(*bio_ptr))
136 return __blk_crypto_bio_prep(bio_ptr);
140 blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq);
141 static inline blk_status_t blk_crypto_rq_get_keyslot(struct request *rq)
143 if (blk_crypto_rq_is_encrypted(rq))
144 return __blk_crypto_rq_get_keyslot(rq);
148 void __blk_crypto_rq_put_keyslot(struct request *rq);
149 static inline void blk_crypto_rq_put_keyslot(struct request *rq)
151 if (blk_crypto_rq_has_keyslot(rq))
152 __blk_crypto_rq_put_keyslot(rq);
155 void __blk_crypto_free_request(struct request *rq);
156 static inline void blk_crypto_free_request(struct request *rq)
158 if (blk_crypto_rq_is_encrypted(rq))
159 __blk_crypto_free_request(rq);
162 int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
165 * blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio
167 * @rq: The request to prepare
168 * @bio: The first bio being inserted into the request
169 * @gfp_mask: Memory allocation flags
171 * Return: 0 on success, -ENOMEM if out of memory. -ENOMEM is only possible if
172 * @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM.
174 static inline int blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
177 if (bio_has_crypt_ctx(bio))
178 return __blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
183 * blk_crypto_insert_cloned_request - Prepare a cloned request to be inserted
184 * into a request queue.
185 * @rq: the request being queued
187 * Return: BLK_STS_OK on success, nonzero on error.
189 static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq)
192 if (blk_crypto_rq_is_encrypted(rq))
193 return blk_crypto_rq_get_keyslot(rq);
197 #ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
199 int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num);
201 bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr);
203 int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key);
205 #else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
208 blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
210 pr_warn_once("crypto API fallback is disabled\n");
214 static inline bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
216 pr_warn_once("crypto API fallback disabled; failing request.\n");
217 (*bio_ptr)->bi_status = BLK_STS_NOTSUPP;
222 blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
227 #endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
229 #endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */