1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Asynchronous Compression operations
4 *
5 * Copyright (c) 2016, Intel Corporation
6 * Authors: Weigang Li <weigang.li@intel.com>
7 * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8 */
9 #ifndef _CRYPTO_ACOMP_INT_H
10 #define _CRYPTO_ACOMP_INT_H
11
12 #include <crypto/acompress.h>
13 #include <crypto/algapi.h>
14 #include <crypto/scatterwalk.h>
15 #include <linux/compiler_types.h>
16 #include <linux/cpumask_types.h>
17 #include <linux/spinlock.h>
18 #include <linux/workqueue_types.h>
19
20 #define ACOMP_FBREQ_ON_STACK(name, req) \
21 char __##name##_req[sizeof(struct acomp_req) + \
22 MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
23 struct acomp_req *name = acomp_fbreq_on_stack_init( \
24 __##name##_req, (req))
25
26 /**
27 * struct acomp_alg - asynchronous compression algorithm
28 *
29 * @compress: Function performs a compress operation
30 * @decompress: Function performs a de-compress operation
31 * @init: Initialize the cryptographic transformation object.
32 * This function is used to initialize the cryptographic
33 * transformation object. This function is called only once at
34 * the instantiation time, right after the transformation context
35 * was allocated. In case the cryptographic hardware has some
36 * special requirements which need to be handled by software, this
37 * function shall check for the precise requirement of the
38 * transformation and put any software fallbacks in place.
39 * @exit: Deinitialize the cryptographic transformation object. This is a
40 * counterpart to @init, used to remove various changes set in
41 * @init.
42 *
43 * @base: Common crypto API algorithm data structure
44 * @calg: Cmonn algorithm data structure shared with scomp
45 */
46 struct acomp_alg {
47 int (*compress)(struct acomp_req *req);
48 int (*decompress)(struct acomp_req *req);
49 int (*init)(struct crypto_acomp *tfm);
50 void (*exit)(struct crypto_acomp *tfm);
51
52 union {
53 struct COMP_ALG_COMMON;
54 struct comp_alg_common calg;
55 };
56 };
57
58 struct crypto_acomp_stream {
59 spinlock_t lock;
60 void *ctx;
61 };
62
63 struct crypto_acomp_streams {
64 /* These must come first because of struct scomp_alg. */
65 void *(*alloc_ctx)(void);
66 void (*free_ctx)(void *);
67
68 struct crypto_acomp_stream __percpu *streams;
69 struct work_struct stream_work;
70 cpumask_t stream_want;
71 };
72
73 struct acomp_walk {
74 union {
75 /* Virtual address of the source. */
76 struct {
77 struct {
78 const void *const addr;
79 } virt;
80 } src;
81
82 /* Private field for the API, do not use. */
83 struct scatter_walk in;
84 };
85
86 union {
87 /* Virtual address of the destination. */
88 struct {
89 struct {
90 void *const addr;
91 } virt;
92 } dst;
93
94 /* Private field for the API, do not use. */
95 struct scatter_walk out;
96 };
97
98 unsigned int slen;
99 unsigned int dlen;
100
101 int flags;
102 };
103
104 /*
105 * Transform internal helpers.
106 */
acomp_request_ctx(struct acomp_req * req)107 static inline void *acomp_request_ctx(struct acomp_req *req)
108 {
109 return req->__ctx;
110 }
111
acomp_tfm_ctx(struct crypto_acomp * tfm)112 static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
113 {
114 return tfm->base.__crt_ctx;
115 }
116
acomp_request_complete(struct acomp_req * req,int err)117 static inline void acomp_request_complete(struct acomp_req *req,
118 int err)
119 {
120 crypto_request_complete(&req->base, err);
121 }
122
123 /**
124 * crypto_register_acomp() -- Register asynchronous compression algorithm
125 *
126 * Function registers an implementation of an asynchronous
127 * compression algorithm
128 *
129 * @alg: algorithm definition
130 *
131 * Return: zero on success; error code in case of error
132 */
133 int crypto_register_acomp(struct acomp_alg *alg);
134
135 /**
136 * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm
137 *
138 * Function unregisters an implementation of an asynchronous
139 * compression algorithm
140 *
141 * @alg: algorithm definition
142 */
143 void crypto_unregister_acomp(struct acomp_alg *alg);
144
145 int crypto_register_acomps(struct acomp_alg *algs, int count);
146 void crypto_unregister_acomps(struct acomp_alg *algs, int count);
147
acomp_request_issg(struct acomp_req * req)148 static inline bool acomp_request_issg(struct acomp_req *req)
149 {
150 return !(req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
151 CRYPTO_ACOMP_REQ_DST_VIRT));
152 }
153
acomp_request_src_isvirt(struct acomp_req * req)154 static inline bool acomp_request_src_isvirt(struct acomp_req *req)
155 {
156 return req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT;
157 }
158
acomp_request_dst_isvirt(struct acomp_req * req)159 static inline bool acomp_request_dst_isvirt(struct acomp_req *req)
160 {
161 return req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT;
162 }
163
acomp_request_isvirt(struct acomp_req * req)164 static inline bool acomp_request_isvirt(struct acomp_req *req)
165 {
166 return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
167 CRYPTO_ACOMP_REQ_DST_VIRT);
168 }
169
acomp_request_src_isnondma(struct acomp_req * req)170 static inline bool acomp_request_src_isnondma(struct acomp_req *req)
171 {
172 return req->base.flags & CRYPTO_ACOMP_REQ_SRC_NONDMA;
173 }
174
acomp_request_dst_isnondma(struct acomp_req * req)175 static inline bool acomp_request_dst_isnondma(struct acomp_req *req)
176 {
177 return req->base.flags & CRYPTO_ACOMP_REQ_DST_NONDMA;
178 }
179
acomp_request_isnondma(struct acomp_req * req)180 static inline bool acomp_request_isnondma(struct acomp_req *req)
181 {
182 return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_NONDMA |
183 CRYPTO_ACOMP_REQ_DST_NONDMA);
184 }
185
crypto_acomp_req_virt(struct crypto_acomp * tfm)186 static inline bool crypto_acomp_req_virt(struct crypto_acomp *tfm)
187 {
188 return crypto_tfm_req_virt(&tfm->base);
189 }
190
191 void crypto_acomp_free_streams(struct crypto_acomp_streams *s);
192 int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s);
193
194 struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
195 struct crypto_acomp_streams *s) __acquires(stream);
196
crypto_acomp_unlock_stream_bh(struct crypto_acomp_stream * stream)197 static inline void crypto_acomp_unlock_stream_bh(
198 struct crypto_acomp_stream *stream) __releases(stream)
199 {
200 spin_unlock_bh(&stream->lock);
201 }
202
203 void acomp_walk_done_src(struct acomp_walk *walk, int used);
204 void acomp_walk_done_dst(struct acomp_walk *walk, int used);
205 int acomp_walk_next_src(struct acomp_walk *walk);
206 int acomp_walk_next_dst(struct acomp_walk *walk);
207 int acomp_walk_virt(struct acomp_walk *__restrict walk,
208 struct acomp_req *__restrict req, bool atomic);
209
acomp_walk_more_src(const struct acomp_walk * walk,int cur)210 static inline bool acomp_walk_more_src(const struct acomp_walk *walk, int cur)
211 {
212 return walk->slen != cur;
213 }
214
acomp_request_flags(struct acomp_req * req)215 static inline u32 acomp_request_flags(struct acomp_req *req)
216 {
217 return crypto_request_flags(&req->base) & ~CRYPTO_ACOMP_REQ_PRIVATE;
218 }
219
crypto_acomp_fb(struct crypto_acomp * tfm)220 static inline struct crypto_acomp *crypto_acomp_fb(struct crypto_acomp *tfm)
221 {
222 return __crypto_acomp_tfm(crypto_acomp_tfm(tfm)->fb);
223 }
224
acomp_fbreq_on_stack_init(char * buf,struct acomp_req * old)225 static inline struct acomp_req *acomp_fbreq_on_stack_init(
226 char *buf, struct acomp_req *old)
227 {
228 struct crypto_acomp *tfm = crypto_acomp_reqtfm(old);
229 struct acomp_req *req = (void *)buf;
230
231 crypto_stack_request_init(&req->base,
232 crypto_acomp_tfm(crypto_acomp_fb(tfm)));
233 acomp_request_set_callback(req, acomp_request_flags(old), NULL, NULL);
234 req->base.flags &= ~CRYPTO_ACOMP_REQ_PRIVATE;
235 req->base.flags |= old->base.flags & CRYPTO_ACOMP_REQ_PRIVATE;
236 req->src = old->src;
237 req->dst = old->dst;
238 req->slen = old->slen;
239 req->dlen = old->dlen;
240
241 return req;
242 }
243
244 #endif
245