1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <crypto/hash.h>
5 #include <linux/kref.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/percpu.h>
9 #include <linux/workqueue.h>
12 static size_t __scratch_size;
13 static DEFINE_PER_CPU(void __rcu *, sigpool_scratch);
15 struct sigpool_entry {
16 struct crypto_ahash *hash;
23 #define CPOOL_SIZE (PAGE_SIZE / sizeof(struct sigpool_entry))
24 static struct sigpool_entry cpool[CPOOL_SIZE];
25 static unsigned int cpool_populated;
26 static DEFINE_MUTEX(cpool_mutex);
29 struct scratches_to_free {
35 static void free_old_scratches(struct rcu_head *head)
37 struct scratches_to_free *stf;
39 stf = container_of(head, struct scratches_to_free, rcu);
41 kfree(stf->scratches[stf->cnt]);
46 * sigpool_reserve_scratch - re-allocates scratch buffer, slow-path
47 * @size: request size for the scratch/temp buffer
49 static int sigpool_reserve_scratch(size_t size)
51 struct scratches_to_free *stf;
52 size_t stf_sz = struct_size(stf, scratches, num_possible_cpus());
55 lockdep_assert_held(&cpool_mutex);
56 if (__scratch_size >= size)
59 stf = kmalloc(stf_sz, GFP_KERNEL);
64 size = max(size, __scratch_size);
66 for_each_possible_cpu(cpu) {
67 void *scratch, *old_scratch;
69 scratch = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
75 old_scratch = rcu_replace_pointer(per_cpu(sigpool_scratch, cpu),
76 scratch, lockdep_is_held(&cpool_mutex));
77 if (!cpu_online(cpu) || !old_scratch) {
81 stf->scratches[stf->cnt++] = old_scratch;
85 __scratch_size = size;
87 call_rcu(&stf->rcu, free_old_scratches);
91 static void sigpool_scratch_free(void)
95 for_each_possible_cpu(cpu)
96 kfree(rcu_replace_pointer(per_cpu(sigpool_scratch, cpu),
97 NULL, lockdep_is_held(&cpool_mutex)));
101 static int __cpool_try_clone(struct crypto_ahash *hash)
103 struct crypto_ahash *tmp;
105 tmp = crypto_clone_ahash(hash);
109 crypto_free_ahash(tmp);
113 static int __cpool_alloc_ahash(struct sigpool_entry *e, const char *alg)
115 struct crypto_ahash *cpu0_hash;
118 e->alg = kstrdup(alg, GFP_KERNEL);
122 cpu0_hash = crypto_alloc_ahash(alg, 0, CRYPTO_ALG_ASYNC);
123 if (IS_ERR(cpu0_hash)) {
124 ret = PTR_ERR(cpu0_hash);
128 e->needs_key = crypto_ahash_get_flags(cpu0_hash) & CRYPTO_TFM_NEED_KEY;
130 ret = __cpool_try_clone(cpu0_hash);
132 goto out_free_cpu0_hash;
138 crypto_free_ahash(cpu0_hash);
146 * tcp_sigpool_alloc_ahash - allocates pool for ahash requests
147 * @alg: name of async hash algorithm
148 * @scratch_size: reserve a tcp_sigpool::scratch buffer of this size
150 int tcp_sigpool_alloc_ahash(const char *alg, size_t scratch_size)
155 mutex_lock(&cpool_mutex);
156 ret = sigpool_reserve_scratch(scratch_size);
159 for (i = 0; i < cpool_populated; i++) {
162 if (strcmp(cpool[i].alg, alg))
165 /* pairs with tcp_sigpool_release() */
166 if (!kref_get_unless_zero(&cpool[i].kref))
167 kref_init(&cpool[i].kref);
172 for (i = 0; i < cpool_populated; i++) {
176 if (i >= CPOOL_SIZE) {
181 ret = __cpool_alloc_ahash(&cpool[i], alg);
184 if (i == cpool_populated)
188 mutex_unlock(&cpool_mutex);
191 EXPORT_SYMBOL_GPL(tcp_sigpool_alloc_ahash);
193 static void __cpool_free_entry(struct sigpool_entry *e)
195 crypto_free_ahash(e->hash);
197 memset(e, 0, sizeof(*e));
200 static void cpool_cleanup_work_cb(struct work_struct *work)
202 bool free_scratch = true;
205 mutex_lock(&cpool_mutex);
206 for (i = 0; i < cpool_populated; i++) {
207 if (kref_read(&cpool[i].kref) > 0) {
208 free_scratch = false;
213 __cpool_free_entry(&cpool[i]);
216 sigpool_scratch_free();
217 mutex_unlock(&cpool_mutex);
220 static DECLARE_WORK(cpool_cleanup_work, cpool_cleanup_work_cb);
221 static void cpool_schedule_cleanup(struct kref *kref)
223 schedule_work(&cpool_cleanup_work);
227 * tcp_sigpool_release - decreases number of users for a pool. If it was
228 * the last user of the pool, releases any memory that was consumed.
229 * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash()
231 void tcp_sigpool_release(unsigned int id)
233 if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg))
237 kref_put(&cpool[id].kref, cpool_schedule_cleanup);
239 EXPORT_SYMBOL_GPL(tcp_sigpool_release);
242 * tcp_sigpool_get - increases number of users (refcounter) for a pool
243 * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash()
245 void tcp_sigpool_get(unsigned int id)
247 if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg))
249 kref_get(&cpool[id].kref);
251 EXPORT_SYMBOL_GPL(tcp_sigpool_get);
253 int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c) __cond_acquires(RCU_BH)
255 struct crypto_ahash *hash;
258 if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg)) {
259 rcu_read_unlock_bh();
263 hash = crypto_clone_ahash(cpool[id].hash);
265 rcu_read_unlock_bh();
266 return PTR_ERR(hash);
269 c->req = ahash_request_alloc(hash, GFP_ATOMIC);
271 crypto_free_ahash(hash);
272 rcu_read_unlock_bh();
275 ahash_request_set_callback(c->req, 0, NULL, NULL);
277 /* Pairs with tcp_sigpool_reserve_scratch(), scratch area is
278 * valid (allocated) until tcp_sigpool_end().
280 c->scratch = rcu_dereference_bh(*this_cpu_ptr(&sigpool_scratch));
283 EXPORT_SYMBOL_GPL(tcp_sigpool_start);
285 void tcp_sigpool_end(struct tcp_sigpool *c) __releases(RCU_BH)
287 struct crypto_ahash *hash = crypto_ahash_reqtfm(c->req);
289 rcu_read_unlock_bh();
290 ahash_request_free(c->req);
291 crypto_free_ahash(hash);
293 EXPORT_SYMBOL_GPL(tcp_sigpool_end);
296 * tcp_sigpool_algo - return algorithm of tcp_sigpool
297 * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash()
298 * @buf: buffer to return name of algorithm
299 * @buf_len: size of @buf
301 size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len)
303 if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg))
306 return strscpy(buf, cpool[id].alg, buf_len);
308 EXPORT_SYMBOL_GPL(tcp_sigpool_algo);
311 * tcp_sigpool_hash_skb_data - hash data in skb with initialized tcp_sigpool
312 * @hp: tcp_sigpool pointer
313 * @skb: buffer to add sign for
314 * @header_len: TCP header length for this segment
316 int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp,
317 const struct sk_buff *skb,
318 unsigned int header_len)
320 const unsigned int head_data_len = skb_headlen(skb) > header_len ?
321 skb_headlen(skb) - header_len : 0;
322 const struct skb_shared_info *shi = skb_shinfo(skb);
323 const struct tcphdr *tp = tcp_hdr(skb);
324 struct ahash_request *req = hp->req;
325 struct sk_buff *frag_iter;
326 struct scatterlist sg;
329 sg_init_table(&sg, 1);
331 sg_set_buf(&sg, ((u8 *)tp) + header_len, head_data_len);
332 ahash_request_set_crypt(req, &sg, NULL, head_data_len);
333 if (crypto_ahash_update(req))
336 for (i = 0; i < shi->nr_frags; ++i) {
337 const skb_frag_t *f = &shi->frags[i];
338 unsigned int offset = skb_frag_off(f);
341 page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
342 sg_set_page(&sg, page, skb_frag_size(f), offset_in_page(offset));
343 ahash_request_set_crypt(req, &sg, NULL, skb_frag_size(f));
344 if (crypto_ahash_update(req))
348 skb_walk_frags(skb, frag_iter)
349 if (tcp_sigpool_hash_skb_data(hp, frag_iter, 0))
354 EXPORT_SYMBOL(tcp_sigpool_hash_skb_data);
356 MODULE_LICENSE("GPL");
357 MODULE_DESCRIPTION("Per-CPU pool of crypto requests");