1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Synchronous Compression operations
4 *
5 * Copyright 2015 LG Electronics Inc.
6 * Copyright (c) 2016, Intel Corporation
7 * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8 */
9
10 #include <crypto/internal/acompress.h>
11 #include <crypto/internal/scompress.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/cryptouser.h>
14 #include <linux/err.h>
15 #include <linux/highmem.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/overflow.h>
19 #include <linux/scatterlist.h>
20 #include <linux/seq_file.h>
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/vmalloc.h>
24 #include <net/netlink.h>
25
26 #include "compress.h"
27
28 #define SCOMP_SCRATCH_SIZE 65400
29
30 struct scomp_scratch {
31 spinlock_t lock;
32 union {
33 void *src;
34 unsigned long saddr;
35 };
36 void *dst;
37 };
38
39 static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
40 .lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
41 };
42
43 static const struct crypto_type crypto_scomp_type;
44 static int scomp_scratch_users;
45 static DEFINE_MUTEX(scomp_lock);
46
crypto_scomp_report(struct sk_buff * skb,struct crypto_alg * alg)47 static int __maybe_unused crypto_scomp_report(
48 struct sk_buff *skb, struct crypto_alg *alg)
49 {
50 struct crypto_report_comp rscomp;
51
52 memset(&rscomp, 0, sizeof(rscomp));
53
54 strscpy(rscomp.type, "scomp", sizeof(rscomp.type));
55
56 return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
57 sizeof(rscomp), &rscomp);
58 }
59
60 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
61 __maybe_unused;
62
crypto_scomp_show(struct seq_file * m,struct crypto_alg * alg)63 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
64 {
65 seq_puts(m, "type : scomp\n");
66 }
67
crypto_scomp_free_scratches(void)68 static void crypto_scomp_free_scratches(void)
69 {
70 struct scomp_scratch *scratch;
71 int i;
72
73 for_each_possible_cpu(i) {
74 scratch = per_cpu_ptr(&scomp_scratch, i);
75
76 free_page(scratch->saddr);
77 vfree(scratch->dst);
78 scratch->src = NULL;
79 scratch->dst = NULL;
80 }
81 }
82
crypto_scomp_alloc_scratches(void)83 static int crypto_scomp_alloc_scratches(void)
84 {
85 struct scomp_scratch *scratch;
86 int i;
87
88 for_each_possible_cpu(i) {
89 struct page *page;
90 void *mem;
91
92 scratch = per_cpu_ptr(&scomp_scratch, i);
93
94 page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, 0);
95 if (!page)
96 goto error;
97 scratch->src = page_address(page);
98 mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
99 if (!mem)
100 goto error;
101 scratch->dst = mem;
102 }
103 return 0;
104 error:
105 crypto_scomp_free_scratches();
106 return -ENOMEM;
107 }
108
scomp_free_streams(struct scomp_alg * alg)109 static void scomp_free_streams(struct scomp_alg *alg)
110 {
111 struct crypto_acomp_stream __percpu *stream = alg->stream;
112 int i;
113
114 alg->stream = NULL;
115 if (!stream)
116 return;
117
118 for_each_possible_cpu(i) {
119 struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
120
121 if (IS_ERR_OR_NULL(ps->ctx))
122 break;
123
124 alg->free_ctx(ps->ctx);
125 }
126
127 free_percpu(stream);
128 }
129
scomp_alloc_streams(struct scomp_alg * alg)130 static int scomp_alloc_streams(struct scomp_alg *alg)
131 {
132 struct crypto_acomp_stream __percpu *stream;
133 int i;
134
135 stream = alloc_percpu(struct crypto_acomp_stream);
136 if (!stream)
137 return -ENOMEM;
138
139 alg->stream = stream;
140
141 for_each_possible_cpu(i) {
142 struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
143
144 ps->ctx = alg->alloc_ctx();
145 if (IS_ERR(ps->ctx)) {
146 scomp_free_streams(alg);
147 return PTR_ERR(ps->ctx);
148 }
149
150 spin_lock_init(&ps->lock);
151 }
152 return 0;
153 }
154
crypto_scomp_init_tfm(struct crypto_tfm * tfm)155 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
156 {
157 struct scomp_alg *alg = crypto_scomp_alg(__crypto_scomp_tfm(tfm));
158 int ret = 0;
159
160 mutex_lock(&scomp_lock);
161 if (!alg->stream) {
162 ret = scomp_alloc_streams(alg);
163 if (ret)
164 goto unlock;
165 }
166 if (!scomp_scratch_users++) {
167 ret = crypto_scomp_alloc_scratches();
168 if (ret)
169 scomp_scratch_users--;
170 }
171 unlock:
172 mutex_unlock(&scomp_lock);
173
174 return ret;
175 }
176
scomp_acomp_comp_decomp(struct acomp_req * req,int dir)177 static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
178 {
179 struct scomp_scratch *scratch = raw_cpu_ptr(&scomp_scratch);
180 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
181 struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm);
182 struct crypto_scomp *scomp = *tfm_ctx;
183 struct crypto_acomp_stream *stream;
184 unsigned int slen = req->slen;
185 unsigned int dlen = req->dlen;
186 struct page *spage, *dpage;
187 unsigned int n;
188 const u8 *src;
189 size_t soff;
190 size_t doff;
191 u8 *dst;
192 int ret;
193
194 if (!req->src || !slen)
195 return -EINVAL;
196
197 if (!req->dst || !dlen)
198 return -EINVAL;
199
200 if (acomp_request_src_isvirt(req))
201 src = req->svirt;
202 else {
203 src = scratch->src;
204 do {
205 if (acomp_request_src_isfolio(req)) {
206 spage = folio_page(req->sfolio, 0);
207 soff = req->soff;
208 } else if (slen <= req->src->length) {
209 spage = sg_page(req->src);
210 soff = req->src->offset;
211 } else
212 break;
213
214 spage = nth_page(spage, soff / PAGE_SIZE);
215 soff = offset_in_page(soff);
216
217 n = (slen - 1) / PAGE_SIZE;
218 n += (offset_in_page(slen - 1) + soff) / PAGE_SIZE;
219 if (PageHighMem(nth_page(spage, n)) &&
220 size_add(soff, slen) > PAGE_SIZE)
221 break;
222 src = kmap_local_page(spage) + soff;
223 } while (0);
224 }
225
226 if (acomp_request_dst_isvirt(req))
227 dst = req->dvirt;
228 else {
229 unsigned int max = SCOMP_SCRATCH_SIZE;
230
231 dst = scratch->dst;
232 do {
233 if (acomp_request_dst_isfolio(req)) {
234 dpage = folio_page(req->dfolio, 0);
235 doff = req->doff;
236 } else if (dlen <= req->dst->length) {
237 dpage = sg_page(req->dst);
238 doff = req->dst->offset;
239 } else
240 break;
241
242 dpage = nth_page(dpage, doff / PAGE_SIZE);
243 doff = offset_in_page(doff);
244
245 n = (dlen - 1) / PAGE_SIZE;
246 n += (offset_in_page(dlen - 1) + doff) / PAGE_SIZE;
247 if (PageHighMem(nth_page(dpage, n)) &&
248 size_add(doff, dlen) > PAGE_SIZE)
249 break;
250 dst = kmap_local_page(dpage) + doff;
251 max = dlen;
252 } while (0);
253 dlen = min(dlen, max);
254 }
255
256 spin_lock_bh(&scratch->lock);
257
258 if (src == scratch->src)
259 memcpy_from_sglist(scratch->src, req->src, 0, slen);
260
261 stream = raw_cpu_ptr(crypto_scomp_alg(scomp)->stream);
262 spin_lock(&stream->lock);
263 if (dir)
264 ret = crypto_scomp_compress(scomp, src, slen,
265 dst, &dlen, stream->ctx);
266 else
267 ret = crypto_scomp_decompress(scomp, src, slen,
268 dst, &dlen, stream->ctx);
269
270 if (dst == scratch->dst)
271 memcpy_to_sglist(req->dst, 0, dst, dlen);
272
273 spin_unlock(&stream->lock);
274 spin_unlock_bh(&scratch->lock);
275
276 req->dlen = dlen;
277
278 if (!acomp_request_dst_isvirt(req) && dst != scratch->dst) {
279 kunmap_local(dst);
280 dlen += doff;
281 for (;;) {
282 flush_dcache_page(dpage);
283 if (dlen <= PAGE_SIZE)
284 break;
285 dlen -= PAGE_SIZE;
286 dpage = nth_page(dpage, 1);
287 }
288 }
289 if (!acomp_request_src_isvirt(req) && src != scratch->src)
290 kunmap_local(src);
291
292 return ret;
293 }
294
scomp_acomp_chain(struct acomp_req * req,int dir)295 static int scomp_acomp_chain(struct acomp_req *req, int dir)
296 {
297 struct acomp_req *r2;
298 int err;
299
300 err = scomp_acomp_comp_decomp(req, dir);
301 req->base.err = err;
302
303 list_for_each_entry(r2, &req->base.list, base.list)
304 r2->base.err = scomp_acomp_comp_decomp(r2, dir);
305
306 return err;
307 }
308
scomp_acomp_compress(struct acomp_req * req)309 static int scomp_acomp_compress(struct acomp_req *req)
310 {
311 return scomp_acomp_chain(req, 1);
312 }
313
scomp_acomp_decompress(struct acomp_req * req)314 static int scomp_acomp_decompress(struct acomp_req *req)
315 {
316 return scomp_acomp_chain(req, 0);
317 }
318
crypto_exit_scomp_ops_async(struct crypto_tfm * tfm)319 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
320 {
321 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
322
323 crypto_free_scomp(*ctx);
324
325 mutex_lock(&scomp_lock);
326 if (!--scomp_scratch_users)
327 crypto_scomp_free_scratches();
328 mutex_unlock(&scomp_lock);
329 }
330
crypto_init_scomp_ops_async(struct crypto_tfm * tfm)331 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
332 {
333 struct crypto_alg *calg = tfm->__crt_alg;
334 struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
335 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
336 struct crypto_scomp *scomp;
337
338 if (!crypto_mod_get(calg))
339 return -EAGAIN;
340
341 scomp = crypto_create_tfm(calg, &crypto_scomp_type);
342 if (IS_ERR(scomp)) {
343 crypto_mod_put(calg);
344 return PTR_ERR(scomp);
345 }
346
347 *ctx = scomp;
348 tfm->exit = crypto_exit_scomp_ops_async;
349
350 crt->compress = scomp_acomp_compress;
351 crt->decompress = scomp_acomp_decompress;
352
353 return 0;
354 }
355
crypto_scomp_destroy(struct crypto_alg * alg)356 static void crypto_scomp_destroy(struct crypto_alg *alg)
357 {
358 scomp_free_streams(__crypto_scomp_alg(alg));
359 }
360
361 static const struct crypto_type crypto_scomp_type = {
362 .extsize = crypto_alg_extsize,
363 .init_tfm = crypto_scomp_init_tfm,
364 .destroy = crypto_scomp_destroy,
365 #ifdef CONFIG_PROC_FS
366 .show = crypto_scomp_show,
367 #endif
368 #if IS_ENABLED(CONFIG_CRYPTO_USER)
369 .report = crypto_scomp_report,
370 #endif
371 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
372 .maskset = CRYPTO_ALG_TYPE_MASK,
373 .type = CRYPTO_ALG_TYPE_SCOMPRESS,
374 .tfmsize = offsetof(struct crypto_scomp, base),
375 };
376
scomp_prepare_alg(struct scomp_alg * alg)377 static void scomp_prepare_alg(struct scomp_alg *alg)
378 {
379 struct crypto_alg *base = &alg->calg.base;
380
381 comp_prepare_alg(&alg->calg);
382
383 base->cra_flags |= CRYPTO_ALG_REQ_CHAIN;
384 }
385
crypto_register_scomp(struct scomp_alg * alg)386 int crypto_register_scomp(struct scomp_alg *alg)
387 {
388 struct crypto_alg *base = &alg->calg.base;
389
390 scomp_prepare_alg(alg);
391
392 base->cra_type = &crypto_scomp_type;
393 base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
394
395 return crypto_register_alg(base);
396 }
397 EXPORT_SYMBOL_GPL(crypto_register_scomp);
398
crypto_unregister_scomp(struct scomp_alg * alg)399 void crypto_unregister_scomp(struct scomp_alg *alg)
400 {
401 crypto_unregister_alg(&alg->base);
402 }
403 EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
404
crypto_register_scomps(struct scomp_alg * algs,int count)405 int crypto_register_scomps(struct scomp_alg *algs, int count)
406 {
407 int i, ret;
408
409 for (i = 0; i < count; i++) {
410 ret = crypto_register_scomp(&algs[i]);
411 if (ret)
412 goto err;
413 }
414
415 return 0;
416
417 err:
418 for (--i; i >= 0; --i)
419 crypto_unregister_scomp(&algs[i]);
420
421 return ret;
422 }
423 EXPORT_SYMBOL_GPL(crypto_register_scomps);
424
crypto_unregister_scomps(struct scomp_alg * algs,int count)425 void crypto_unregister_scomps(struct scomp_alg *algs, int count)
426 {
427 int i;
428
429 for (i = count - 1; i >= 0; --i)
430 crypto_unregister_scomp(&algs[i]);
431 }
432 EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
433
434 MODULE_LICENSE("GPL");
435 MODULE_DESCRIPTION("Synchronous compression type");
436