1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Asynchronous Compression operations
4  *
5  * Copyright (c) 2016, Intel Corporation
6  * Authors: Weigang Li <weigang.li@intel.com>
7  *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8  */
9 #ifndef _CRYPTO_ACOMP_H
10 #define _CRYPTO_ACOMP_H
11 
12 #include <linux/atomic.h>
13 #include <linux/args.h>
14 #include <linux/compiler_types.h>
15 #include <linux/container_of.h>
16 #include <linux/crypto.h>
17 #include <linux/err.h>
18 #include <linux/scatterlist.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock_types.h>
21 #include <linux/types.h>
22 
23 /* Set this bit if source is virtual address instead of SG list. */
24 #define CRYPTO_ACOMP_REQ_SRC_VIRT	0x00000002
25 
26 /* Set this bit for if virtual address source cannot be used for DMA. */
27 #define CRYPTO_ACOMP_REQ_SRC_NONDMA	0x00000004
28 
29 /* Set this bit if destination is virtual address instead of SG list. */
30 #define CRYPTO_ACOMP_REQ_DST_VIRT	0x00000008
31 
32 /* Set this bit for if virtual address destination cannot be used for DMA. */
33 #define CRYPTO_ACOMP_REQ_DST_NONDMA	0x00000010
34 
35 /* Set this bit if source is a folio. */
36 #define CRYPTO_ACOMP_REQ_SRC_FOLIO	0x00000020
37 
38 /* Set this bit if destination is a folio. */
39 #define CRYPTO_ACOMP_REQ_DST_FOLIO	0x00000040
40 
41 #define CRYPTO_ACOMP_DST_MAX		131072
42 
43 #define	MAX_SYNC_COMP_REQSIZE		0
44 
45 #define ACOMP_REQUEST_ALLOC(name, tfm, gfp) \
46         char __##name##_req[sizeof(struct acomp_req) + \
47                             MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
48         struct acomp_req *name = acomp_request_on_stack_init( \
49                 __##name##_req, (tfm), (gfp), false)
50 
51 struct acomp_req;
52 struct folio;
53 
54 struct acomp_req_chain {
55 	struct list_head head;
56 	struct acomp_req *req0;
57 	struct acomp_req *cur;
58 	int (*op)(struct acomp_req *req);
59 	crypto_completion_t compl;
60 	void *data;
61 	struct scatterlist ssg;
62 	struct scatterlist dsg;
63 	union {
64 		const u8 *src;
65 		struct folio *sfolio;
66 	};
67 	union {
68 		u8 *dst;
69 		struct folio *dfolio;
70 	};
71 	size_t soff;
72 	size_t doff;
73 	u32 flags;
74 };
75 
76 /**
77  * struct acomp_req - asynchronous (de)compression request
78  *
79  * @base:	Common attributes for asynchronous crypto requests
80  * @src:	Source scatterlist
81  * @dst:	Destination scatterlist
82  * @svirt:	Source virtual address
83  * @dvirt:	Destination virtual address
84  * @sfolio:	Source folio
85  * @soff:	Source folio offset
86  * @dfolio:	Destination folio
87  * @doff:	Destination folio offset
88  * @slen:	Size of the input buffer
89  * @dlen:	Size of the output buffer and number of bytes produced
90  * @chain:	Private API code data, do not use
91  * @__ctx:	Start of private context data
92  */
93 struct acomp_req {
94 	struct crypto_async_request base;
95 	union {
96 		struct scatterlist *src;
97 		const u8 *svirt;
98 		struct folio *sfolio;
99 	};
100 	union {
101 		struct scatterlist *dst;
102 		u8 *dvirt;
103 		struct folio *dfolio;
104 	};
105 	size_t soff;
106 	size_t doff;
107 	unsigned int slen;
108 	unsigned int dlen;
109 
110 	struct acomp_req_chain chain;
111 
112 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
113 };
114 
115 /**
116  * struct crypto_acomp - user-instantiated objects which encapsulate
117  * algorithms and core processing logic
118  *
119  * @compress:		Function performs a compress operation
120  * @decompress:		Function performs a de-compress operation
121  * @reqsize:		Context size for (de)compression requests
122  * @fb:			Synchronous fallback tfm
123  * @base:		Common crypto API algorithm data structure
124  */
125 struct crypto_acomp {
126 	int (*compress)(struct acomp_req *req);
127 	int (*decompress)(struct acomp_req *req);
128 	unsigned int reqsize;
129 	struct crypto_acomp *fb;
130 	struct crypto_tfm base;
131 };
132 
133 struct crypto_acomp_stream {
134 	spinlock_t lock;
135 	void *ctx;
136 };
137 
138 #define COMP_ALG_COMMON {			\
139 	struct crypto_alg base;			\
140 	struct crypto_acomp_stream __percpu *stream;	\
141 }
142 struct comp_alg_common COMP_ALG_COMMON;
143 
144 /**
145  * DOC: Asynchronous Compression API
146  *
147  * The Asynchronous Compression API is used with the algorithms of type
148  * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto)
149  */
150 
151 /**
152  * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle
153  * @alg_name:	is the cra_name / name or cra_driver_name / driver name of the
154  *		compression algorithm e.g. "deflate"
155  * @type:	specifies the type of the algorithm
156  * @mask:	specifies the mask for the algorithm
157  *
158  * Allocate a handle for a compression algorithm. The returned struct
159  * crypto_acomp is the handle that is required for any subsequent
160  * API invocation for the compression operations.
161  *
162  * Return:	allocated handle in case of success; IS_ERR() is true in case
163  *		of an error, PTR_ERR() returns the error code.
164  */
165 struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
166 					u32 mask);
167 /**
168  * crypto_alloc_acomp_node() -- allocate ACOMPRESS tfm handle with desired NUMA node
169  * @alg_name:	is the cra_name / name or cra_driver_name / driver name of the
170  *		compression algorithm e.g. "deflate"
171  * @type:	specifies the type of the algorithm
172  * @mask:	specifies the mask for the algorithm
173  * @node:	specifies the NUMA node the ZIP hardware belongs to
174  *
175  * Allocate a handle for a compression algorithm. Drivers should try to use
176  * (de)compressors on the specified NUMA node.
177  * The returned struct crypto_acomp is the handle that is required for any
178  * subsequent API invocation for the compression operations.
179  *
180  * Return:	allocated handle in case of success; IS_ERR() is true in case
181  *		of an error, PTR_ERR() returns the error code.
182  */
183 struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
184 					u32 mask, int node);
185 
crypto_acomp_tfm(struct crypto_acomp * tfm)186 static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
187 {
188 	return &tfm->base;
189 }
190 
__crypto_comp_alg_common(struct crypto_alg * alg)191 static inline struct comp_alg_common *__crypto_comp_alg_common(
192 	struct crypto_alg *alg)
193 {
194 	return container_of(alg, struct comp_alg_common, base);
195 }
196 
__crypto_acomp_tfm(struct crypto_tfm * tfm)197 static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
198 {
199 	return container_of(tfm, struct crypto_acomp, base);
200 }
201 
crypto_comp_alg_common(struct crypto_acomp * tfm)202 static inline struct comp_alg_common *crypto_comp_alg_common(
203 	struct crypto_acomp *tfm)
204 {
205 	return __crypto_comp_alg_common(crypto_acomp_tfm(tfm)->__crt_alg);
206 }
207 
crypto_acomp_reqsize(struct crypto_acomp * tfm)208 static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
209 {
210 	return tfm->reqsize;
211 }
212 
acomp_request_set_tfm(struct acomp_req * req,struct crypto_acomp * tfm)213 static inline void acomp_request_set_tfm(struct acomp_req *req,
214 					 struct crypto_acomp *tfm)
215 {
216 	req->base.tfm = crypto_acomp_tfm(tfm);
217 }
218 
acomp_is_async(struct crypto_acomp * tfm)219 static inline bool acomp_is_async(struct crypto_acomp *tfm)
220 {
221 	return crypto_comp_alg_common(tfm)->base.cra_flags &
222 	       CRYPTO_ALG_ASYNC;
223 }
224 
crypto_acomp_reqtfm(struct acomp_req * req)225 static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
226 {
227 	return __crypto_acomp_tfm(req->base.tfm);
228 }
229 
230 /**
231  * crypto_free_acomp() -- free ACOMPRESS tfm handle
232  *
233  * @tfm:	ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
234  *
235  * If @tfm is a NULL or error pointer, this function does nothing.
236  */
crypto_free_acomp(struct crypto_acomp * tfm)237 static inline void crypto_free_acomp(struct crypto_acomp *tfm)
238 {
239 	crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm));
240 }
241 
crypto_has_acomp(const char * alg_name,u32 type,u32 mask)242 static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
243 {
244 	type &= ~CRYPTO_ALG_TYPE_MASK;
245 	type |= CRYPTO_ALG_TYPE_ACOMPRESS;
246 	mask |= CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
247 
248 	return crypto_has_alg(alg_name, type, mask);
249 }
250 
crypto_acomp_alg_name(struct crypto_acomp * tfm)251 static inline const char *crypto_acomp_alg_name(struct crypto_acomp *tfm)
252 {
253 	return crypto_tfm_alg_name(crypto_acomp_tfm(tfm));
254 }
255 
crypto_acomp_driver_name(struct crypto_acomp * tfm)256 static inline const char *crypto_acomp_driver_name(struct crypto_acomp *tfm)
257 {
258 	return crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
259 }
260 
261 /**
262  * acomp_request_alloc() -- allocates asynchronous (de)compression request
263  *
264  * @tfm:	ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
265  * @gfp:	gfp to pass to kzalloc (defaults to GFP_KERNEL)
266  *
267  * Return:	allocated handle in case of success or NULL in case of an error
268  */
acomp_request_alloc_extra_noprof(struct crypto_acomp * tfm,size_t extra,gfp_t gfp)269 static inline struct acomp_req *acomp_request_alloc_extra_noprof(
270 	struct crypto_acomp *tfm, size_t extra, gfp_t gfp)
271 {
272 	struct acomp_req *req;
273 	size_t len;
274 
275 	len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN);
276 	if (check_add_overflow(len, extra, &len))
277 		return NULL;
278 
279 	req = kzalloc_noprof(len, gfp);
280 	if (likely(req))
281 		acomp_request_set_tfm(req, tfm);
282 	return req;
283 }
284 #define acomp_request_alloc_noprof(tfm, ...) \
285 	CONCATENATE(acomp_request_alloc_noprof_, COUNT_ARGS(__VA_ARGS__))( \
286 		tfm, ##__VA_ARGS__)
287 #define acomp_request_alloc_noprof_0(tfm) \
288 	acomp_request_alloc_noprof_1(tfm, GFP_KERNEL)
289 #define acomp_request_alloc_noprof_1(tfm, gfp) \
290 	acomp_request_alloc_extra_noprof(tfm, 0, gfp)
291 #define acomp_request_alloc(...)	alloc_hooks(acomp_request_alloc_noprof(__VA_ARGS__))
292 
293 /**
294  * acomp_request_alloc_extra() -- allocate acomp request with extra memory
295  *
296  * @tfm:	ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
297  * @extra:	amount of extra memory
298  * @gfp:	gfp to pass to kzalloc
299  *
300  * Return:	allocated handle in case of success or NULL in case of an error
301  */
302 #define acomp_request_alloc_extra(...)	alloc_hooks(acomp_request_alloc_extra_noprof(__VA_ARGS__))
303 
acomp_request_extra(struct acomp_req * req)304 static inline void *acomp_request_extra(struct acomp_req *req)
305 {
306 	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
307 	size_t len;
308 
309 	len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN);
310 	return (void *)((char *)req + len);
311 }
312 
313 /**
314  * acomp_request_free() -- zeroize and free asynchronous (de)compression
315  *			   request as well as the output buffer if allocated
316  *			   inside the algorithm
317  *
318  * @req:	request to free
319  */
acomp_request_free(struct acomp_req * req)320 static inline void acomp_request_free(struct acomp_req *req)
321 {
322 	if (!req || (req->base.flags & CRYPTO_TFM_REQ_ON_STACK))
323 		return;
324 	kfree_sensitive(req);
325 }
326 
327 /**
328  * acomp_request_set_callback() -- Sets an asynchronous callback
329  *
330  * Callback will be called when an asynchronous operation on a given
331  * request is finished.
332  *
333  * @req:	request that the callback will be set for
334  * @flgs:	specify for instance if the operation may backlog
335  * @cmlp:	callback which will be called
336  * @data:	private data used by the caller
337  */
acomp_request_set_callback(struct acomp_req * req,u32 flgs,crypto_completion_t cmpl,void * data)338 static inline void acomp_request_set_callback(struct acomp_req *req,
339 					      u32 flgs,
340 					      crypto_completion_t cmpl,
341 					      void *data)
342 {
343 	u32 keep = CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA |
344 		   CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA |
345 		   CRYPTO_ACOMP_REQ_SRC_FOLIO | CRYPTO_ACOMP_REQ_DST_FOLIO |
346 		   CRYPTO_TFM_REQ_ON_STACK;
347 
348 	req->base.complete = cmpl;
349 	req->base.data = data;
350 	req->base.flags &= keep;
351 	req->base.flags |= flgs & ~keep;
352 
353 	crypto_reqchain_init(&req->base);
354 }
355 
356 /**
357  * acomp_request_set_params() -- Sets request parameters
358  *
359  * Sets parameters required by an acomp operation
360  *
361  * @req:	asynchronous compress request
362  * @src:	pointer to input buffer scatterlist
363  * @dst:	pointer to output buffer scatterlist. If this is NULL, the
364  *		acomp layer will allocate the output memory
365  * @slen:	size of the input buffer
366  * @dlen:	size of the output buffer. If dst is NULL, this can be used by
367  *		the user to specify the maximum amount of memory to allocate
368  */
acomp_request_set_params(struct acomp_req * req,struct scatterlist * src,struct scatterlist * dst,unsigned int slen,unsigned int dlen)369 static inline void acomp_request_set_params(struct acomp_req *req,
370 					    struct scatterlist *src,
371 					    struct scatterlist *dst,
372 					    unsigned int slen,
373 					    unsigned int dlen)
374 {
375 	req->src = src;
376 	req->dst = dst;
377 	req->slen = slen;
378 	req->dlen = dlen;
379 
380 	req->base.flags &= ~(CRYPTO_ACOMP_REQ_SRC_VIRT |
381 			     CRYPTO_ACOMP_REQ_SRC_NONDMA |
382 			     CRYPTO_ACOMP_REQ_SRC_FOLIO |
383 			     CRYPTO_ACOMP_REQ_DST_FOLIO |
384 			     CRYPTO_ACOMP_REQ_DST_VIRT |
385 			     CRYPTO_ACOMP_REQ_DST_NONDMA);
386 }
387 
388 /**
389  * acomp_request_set_src_sg() -- Sets source scatterlist
390  *
391  * Sets source scatterlist required by an acomp operation.
392  *
393  * @req:	asynchronous compress request
394  * @src:	pointer to input buffer scatterlist
395  * @slen:	size of the input buffer
396  */
acomp_request_set_src_sg(struct acomp_req * req,struct scatterlist * src,unsigned int slen)397 static inline void acomp_request_set_src_sg(struct acomp_req *req,
398 					    struct scatterlist *src,
399 					    unsigned int slen)
400 {
401 	req->src = src;
402 	req->slen = slen;
403 
404 	req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
405 	req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT;
406 	req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO;
407 }
408 
409 /**
410  * acomp_request_set_src_dma() -- Sets DMA source virtual address
411  *
412  * Sets source virtual address required by an acomp operation.
413  * The address must be usable for DMA.
414  *
415  * @req:	asynchronous compress request
416  * @src:	virtual address pointer to input buffer
417  * @slen:	size of the input buffer
418  */
acomp_request_set_src_dma(struct acomp_req * req,const u8 * src,unsigned int slen)419 static inline void acomp_request_set_src_dma(struct acomp_req *req,
420 					     const u8 *src, unsigned int slen)
421 {
422 	req->svirt = src;
423 	req->slen = slen;
424 
425 	req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
426 	req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO;
427 	req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
428 }
429 
430 /**
431  * acomp_request_set_src_nondma() -- Sets non-DMA source virtual address
432  *
433  * Sets source virtual address required by an acomp operation.
434  * The address can not be used for DMA.
435  *
436  * @req:	asynchronous compress request
437  * @src:	virtual address pointer to input buffer
438  * @slen:	size of the input buffer
439  */
acomp_request_set_src_nondma(struct acomp_req * req,const u8 * src,unsigned int slen)440 static inline void acomp_request_set_src_nondma(struct acomp_req *req,
441 						const u8 *src,
442 						unsigned int slen)
443 {
444 	req->svirt = src;
445 	req->slen = slen;
446 
447 	req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO;
448 	req->base.flags |= CRYPTO_ACOMP_REQ_SRC_NONDMA;
449 	req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
450 }
451 
452 /**
453  * acomp_request_set_src_folio() -- Sets source folio
454  *
455  * Sets source folio required by an acomp operation.
456  *
457  * @req:	asynchronous compress request
458  * @folio:	pointer to input folio
459  * @off:	input folio offset
460  * @len:	size of the input buffer
461  */
acomp_request_set_src_folio(struct acomp_req * req,struct folio * folio,size_t off,unsigned int len)462 static inline void acomp_request_set_src_folio(struct acomp_req *req,
463 					       struct folio *folio, size_t off,
464 					       unsigned int len)
465 {
466 	req->sfolio = folio;
467 	req->soff = off;
468 	req->slen = len;
469 
470 	req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
471 	req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT;
472 	req->base.flags |= CRYPTO_ACOMP_REQ_SRC_FOLIO;
473 }
474 
475 /**
476  * acomp_request_set_dst_sg() -- Sets destination scatterlist
477  *
478  * Sets destination scatterlist required by an acomp operation.
479  *
480  * @req:	asynchronous compress request
481  * @dst:	pointer to output buffer scatterlist
482  * @dlen:	size of the output buffer
483  */
acomp_request_set_dst_sg(struct acomp_req * req,struct scatterlist * dst,unsigned int dlen)484 static inline void acomp_request_set_dst_sg(struct acomp_req *req,
485 					    struct scatterlist *dst,
486 					    unsigned int dlen)
487 {
488 	req->dst = dst;
489 	req->dlen = dlen;
490 
491 	req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
492 	req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT;
493 	req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO;
494 }
495 
496 /**
497  * acomp_request_set_dst_dma() -- Sets DMA destination virtual address
498  *
499  * Sets destination virtual address required by an acomp operation.
500  * The address must be usable for DMA.
501  *
502  * @req:	asynchronous compress request
503  * @dst:	virtual address pointer to output buffer
504  * @dlen:	size of the output buffer
505  */
acomp_request_set_dst_dma(struct acomp_req * req,u8 * dst,unsigned int dlen)506 static inline void acomp_request_set_dst_dma(struct acomp_req *req,
507 					     u8 *dst, unsigned int dlen)
508 {
509 	req->dvirt = dst;
510 	req->dlen = dlen;
511 
512 	req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
513 	req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO;
514 	req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
515 }
516 
517 /**
518  * acomp_request_set_dst_nondma() -- Sets non-DMA destination virtual address
519  *
520  * Sets destination virtual address required by an acomp operation.
521  * The address can not be used for DMA.
522  *
523  * @req:	asynchronous compress request
524  * @dst:	virtual address pointer to output buffer
525  * @dlen:	size of the output buffer
526  */
acomp_request_set_dst_nondma(struct acomp_req * req,u8 * dst,unsigned int dlen)527 static inline void acomp_request_set_dst_nondma(struct acomp_req *req,
528 						u8 *dst, unsigned int dlen)
529 {
530 	req->dvirt = dst;
531 	req->dlen = dlen;
532 
533 	req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO;
534 	req->base.flags |= CRYPTO_ACOMP_REQ_DST_NONDMA;
535 	req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
536 }
537 
538 /**
539  * acomp_request_set_dst_folio() -- Sets destination folio
540  *
541  * Sets destination folio required by an acomp operation.
542  *
543  * @req:	asynchronous compress request
544  * @folio:	pointer to input folio
545  * @off:	input folio offset
546  * @len:	size of the input buffer
547  */
acomp_request_set_dst_folio(struct acomp_req * req,struct folio * folio,size_t off,unsigned int len)548 static inline void acomp_request_set_dst_folio(struct acomp_req *req,
549 					       struct folio *folio, size_t off,
550 					       unsigned int len)
551 {
552 	req->dfolio = folio;
553 	req->doff = off;
554 	req->dlen = len;
555 
556 	req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
557 	req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT;
558 	req->base.flags |= CRYPTO_ACOMP_REQ_DST_FOLIO;
559 }
560 
acomp_request_chain(struct acomp_req * req,struct acomp_req * head)561 static inline void acomp_request_chain(struct acomp_req *req,
562 				       struct acomp_req *head)
563 {
564 	crypto_request_chain(&req->base, &head->base);
565 }
566 
567 /**
568  * crypto_acomp_compress() -- Invoke asynchronous compress operation
569  *
570  * Function invokes the asynchronous compress operation
571  *
572  * @req:	asynchronous compress request
573  *
574  * Return:	zero on success; error code in case of error
575  */
576 int crypto_acomp_compress(struct acomp_req *req);
577 
578 /**
579  * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
580  *
581  * Function invokes the asynchronous decompress operation
582  *
583  * @req:	asynchronous compress request
584  *
585  * Return:	zero on success; error code in case of error
586  */
587 int crypto_acomp_decompress(struct acomp_req *req);
588 
acomp_request_on_stack_init(char * buf,struct crypto_acomp * tfm,gfp_t gfp,bool stackonly)589 static inline struct acomp_req *acomp_request_on_stack_init(
590 	char *buf, struct crypto_acomp *tfm, gfp_t gfp, bool stackonly)
591 {
592 	struct acomp_req *req;
593 
594 	if (!stackonly && (req = acomp_request_alloc(tfm, gfp)))
595 		return req;
596 
597 	req = (void *)buf;
598 	acomp_request_set_tfm(req, tfm->fb);
599 	req->base.flags = CRYPTO_TFM_REQ_ON_STACK;
600 
601 	return req;
602 }
603 
604 #endif
605