GNU Linux-libre 5.10.215-gnu1
[releases.git] / crypto / scompress.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Synchronous Compression operations
4  *
5  * Copyright 2015 LG Electronics Inc.
6  * Copyright (c) 2016, Intel Corporation
7  * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8  */
9 #include <linux/errno.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/seq_file.h>
13 #include <linux/slab.h>
14 #include <linux/string.h>
15 #include <linux/crypto.h>
16 #include <linux/compiler.h>
17 #include <linux/vmalloc.h>
18 #include <crypto/algapi.h>
19 #include <linux/cryptouser.h>
20 #include <net/netlink.h>
21 #include <linux/scatterlist.h>
22 #include <crypto/scatterwalk.h>
23 #include <crypto/internal/acompress.h>
24 #include <crypto/internal/scompress.h>
25 #include "internal.h"
26
27 struct scomp_scratch {
28         spinlock_t      lock;
29         void            *src;
30         void            *dst;
31 };
32
33 static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
34         .lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
35 };
36
37 static const struct crypto_type crypto_scomp_type;
38 static int scomp_scratch_users;
39 static DEFINE_MUTEX(scomp_lock);
40
41 #ifdef CONFIG_NET
42 static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
43 {
44         struct crypto_report_comp rscomp;
45
46         memset(&rscomp, 0, sizeof(rscomp));
47
48         strscpy(rscomp.type, "scomp", sizeof(rscomp.type));
49
50         return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
51                        sizeof(rscomp), &rscomp);
52 }
53 #else
54 static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
55 {
56         return -ENOSYS;
57 }
58 #endif
59
60 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
61         __maybe_unused;
62
63 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
64 {
65         seq_puts(m, "type         : scomp\n");
66 }
67
68 static void crypto_scomp_free_scratches(void)
69 {
70         struct scomp_scratch *scratch;
71         int i;
72
73         for_each_possible_cpu(i) {
74                 scratch = per_cpu_ptr(&scomp_scratch, i);
75
76                 vfree(scratch->src);
77                 vfree(scratch->dst);
78                 scratch->src = NULL;
79                 scratch->dst = NULL;
80         }
81 }
82
83 static int crypto_scomp_alloc_scratches(void)
84 {
85         struct scomp_scratch *scratch;
86         int i;
87
88         for_each_possible_cpu(i) {
89                 void *mem;
90
91                 scratch = per_cpu_ptr(&scomp_scratch, i);
92
93                 mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
94                 if (!mem)
95                         goto error;
96                 scratch->src = mem;
97                 mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
98                 if (!mem)
99                         goto error;
100                 scratch->dst = mem;
101         }
102         return 0;
103 error:
104         crypto_scomp_free_scratches();
105         return -ENOMEM;
106 }
107
108 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
109 {
110         int ret = 0;
111
112         mutex_lock(&scomp_lock);
113         if (!scomp_scratch_users++)
114                 ret = crypto_scomp_alloc_scratches();
115         mutex_unlock(&scomp_lock);
116
117         return ret;
118 }
119
120 static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
121 {
122         struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
123         void **tfm_ctx = acomp_tfm_ctx(tfm);
124         struct crypto_scomp *scomp = *tfm_ctx;
125         void **ctx = acomp_request_ctx(req);
126         struct scomp_scratch *scratch;
127         unsigned int dlen;
128         int ret;
129
130         if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
131                 return -EINVAL;
132
133         if (req->dst && !req->dlen)
134                 return -EINVAL;
135
136         if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
137                 req->dlen = SCOMP_SCRATCH_SIZE;
138
139         dlen = req->dlen;
140
141         scratch = raw_cpu_ptr(&scomp_scratch);
142         spin_lock(&scratch->lock);
143
144         scatterwalk_map_and_copy(scratch->src, req->src, 0, req->slen, 0);
145         if (dir)
146                 ret = crypto_scomp_compress(scomp, scratch->src, req->slen,
147                                             scratch->dst, &req->dlen, *ctx);
148         else
149                 ret = crypto_scomp_decompress(scomp, scratch->src, req->slen,
150                                               scratch->dst, &req->dlen, *ctx);
151         if (!ret) {
152                 if (!req->dst) {
153                         req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
154                         if (!req->dst) {
155                                 ret = -ENOMEM;
156                                 goto out;
157                         }
158                 } else if (req->dlen > dlen) {
159                         ret = -ENOSPC;
160                         goto out;
161                 }
162                 scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
163                                          1);
164         }
165 out:
166         spin_unlock(&scratch->lock);
167         return ret;
168 }
169
170 static int scomp_acomp_compress(struct acomp_req *req)
171 {
172         return scomp_acomp_comp_decomp(req, 1);
173 }
174
175 static int scomp_acomp_decompress(struct acomp_req *req)
176 {
177         return scomp_acomp_comp_decomp(req, 0);
178 }
179
180 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
181 {
182         struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
183
184         crypto_free_scomp(*ctx);
185
186         mutex_lock(&scomp_lock);
187         if (!--scomp_scratch_users)
188                 crypto_scomp_free_scratches();
189         mutex_unlock(&scomp_lock);
190 }
191
192 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
193 {
194         struct crypto_alg *calg = tfm->__crt_alg;
195         struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
196         struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
197         struct crypto_scomp *scomp;
198
199         if (!crypto_mod_get(calg))
200                 return -EAGAIN;
201
202         scomp = crypto_create_tfm(calg, &crypto_scomp_type);
203         if (IS_ERR(scomp)) {
204                 crypto_mod_put(calg);
205                 return PTR_ERR(scomp);
206         }
207
208         *ctx = scomp;
209         tfm->exit = crypto_exit_scomp_ops_async;
210
211         crt->compress = scomp_acomp_compress;
212         crt->decompress = scomp_acomp_decompress;
213         crt->dst_free = sgl_free;
214         crt->reqsize = sizeof(void *);
215
216         return 0;
217 }
218
219 struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
220 {
221         struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
222         struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
223         struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
224         struct crypto_scomp *scomp = *tfm_ctx;
225         void *ctx;
226
227         ctx = crypto_scomp_alloc_ctx(scomp);
228         if (IS_ERR(ctx)) {
229                 kfree(req);
230                 return NULL;
231         }
232
233         *req->__ctx = ctx;
234
235         return req;
236 }
237
238 void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
239 {
240         struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
241         struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
242         struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
243         struct crypto_scomp *scomp = *tfm_ctx;
244         void *ctx = *req->__ctx;
245
246         if (ctx)
247                 crypto_scomp_free_ctx(scomp, ctx);
248 }
249
250 static const struct crypto_type crypto_scomp_type = {
251         .extsize = crypto_alg_extsize,
252         .init_tfm = crypto_scomp_init_tfm,
253 #ifdef CONFIG_PROC_FS
254         .show = crypto_scomp_show,
255 #endif
256         .report = crypto_scomp_report,
257         .maskclear = ~CRYPTO_ALG_TYPE_MASK,
258         .maskset = CRYPTO_ALG_TYPE_MASK,
259         .type = CRYPTO_ALG_TYPE_SCOMPRESS,
260         .tfmsize = offsetof(struct crypto_scomp, base),
261 };
262
263 int crypto_register_scomp(struct scomp_alg *alg)
264 {
265         struct crypto_alg *base = &alg->base;
266
267         base->cra_type = &crypto_scomp_type;
268         base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
269         base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
270
271         return crypto_register_alg(base);
272 }
273 EXPORT_SYMBOL_GPL(crypto_register_scomp);
274
275 void crypto_unregister_scomp(struct scomp_alg *alg)
276 {
277         crypto_unregister_alg(&alg->base);
278 }
279 EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
280
281 int crypto_register_scomps(struct scomp_alg *algs, int count)
282 {
283         int i, ret;
284
285         for (i = 0; i < count; i++) {
286                 ret = crypto_register_scomp(&algs[i]);
287                 if (ret)
288                         goto err;
289         }
290
291         return 0;
292
293 err:
294         for (--i; i >= 0; --i)
295                 crypto_unregister_scomp(&algs[i]);
296
297         return ret;
298 }
299 EXPORT_SYMBOL_GPL(crypto_register_scomps);
300
301 void crypto_unregister_scomps(struct scomp_alg *algs, int count)
302 {
303         int i;
304
305         for (i = count - 1; i >= 0; --i)
306                 crypto_unregister_scomp(&algs[i]);
307 }
308 EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
309
310 MODULE_LICENSE("GPL");
311 MODULE_DESCRIPTION("Synchronous compression type");