1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Cryptographic scatter and gather helpers.
4 *
5 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
6 * Copyright (c) 2002 Adam J. Richter <adam@yggdrasil.com>
7 * Copyright (c) 2004 Jean-Luc Cooke <jlcooke@certainkey.com>
8 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
9 */
10
11 #ifndef _CRYPTO_SCATTERWALK_H
12 #define _CRYPTO_SCATTERWALK_H
13
14 #include <linux/errno.h>
15 #include <linux/highmem.h>
16 #include <linux/mm.h>
17 #include <linux/scatterlist.h>
18 #include <linux/types.h>
19
20 struct scatter_walk {
21 /* Must be the first member, see struct skcipher_walk. */
22 union {
23 void *const addr;
24
25 /* Private API field, do not touch. */
26 union crypto_no_such_thing *__addr;
27 };
28 struct scatterlist *sg;
29 unsigned int offset;
30 };
31
32 struct skcipher_walk {
33 union {
34 /* Virtual address of the source. */
35 struct {
36 struct {
37 const void *const addr;
38 } virt;
39 } src;
40
41 /* Private field for the API, do not use. */
42 struct scatter_walk in;
43 };
44
45 union {
46 /* Virtual address of the destination. */
47 struct {
48 struct {
49 void *const addr;
50 } virt;
51 } dst;
52
53 /* Private field for the API, do not use. */
54 struct scatter_walk out;
55 };
56
57 unsigned int nbytes;
58 unsigned int total;
59
60 u8 *page;
61 u8 *buffer;
62 u8 *oiv;
63 void *iv;
64
65 unsigned int ivsize;
66
67 int flags;
68 unsigned int blocksize;
69 unsigned int stride;
70 unsigned int alignmask;
71 };
72
scatterwalk_crypto_chain(struct scatterlist * head,struct scatterlist * sg,int num)73 static inline void scatterwalk_crypto_chain(struct scatterlist *head,
74 struct scatterlist *sg, int num)
75 {
76 if (sg)
77 sg_chain(head, num, sg);
78 else
79 sg_mark_end(head);
80 }
81
scatterwalk_start(struct scatter_walk * walk,struct scatterlist * sg)82 static inline void scatterwalk_start(struct scatter_walk *walk,
83 struct scatterlist *sg)
84 {
85 walk->sg = sg;
86 walk->offset = sg->offset;
87 }
88
89 /*
90 * This is equivalent to scatterwalk_start(walk, sg) followed by
91 * scatterwalk_skip(walk, pos).
92 */
scatterwalk_start_at_pos(struct scatter_walk * walk,struct scatterlist * sg,unsigned int pos)93 static inline void scatterwalk_start_at_pos(struct scatter_walk *walk,
94 struct scatterlist *sg,
95 unsigned int pos)
96 {
97 while (pos > sg->length) {
98 pos -= sg->length;
99 sg = sg_next(sg);
100 }
101 walk->sg = sg;
102 walk->offset = sg->offset + pos;
103 }
104
scatterwalk_clamp(struct scatter_walk * walk,unsigned int nbytes)105 static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk,
106 unsigned int nbytes)
107 {
108 unsigned int len_this_sg;
109 unsigned int limit;
110
111 if (walk->offset >= walk->sg->offset + walk->sg->length)
112 scatterwalk_start(walk, sg_next(walk->sg));
113 len_this_sg = walk->sg->offset + walk->sg->length - walk->offset;
114
115 /*
116 * HIGHMEM case: the page may have to be mapped into memory. To avoid
117 * the complexity of having to map multiple pages at once per sg entry,
118 * clamp the returned length to not cross a page boundary.
119 *
120 * !HIGHMEM case: no mapping is needed; all pages of the sg entry are
121 * already mapped contiguously in the kernel's direct map. For improved
122 * performance, allow the walker to return data segments that cross a
123 * page boundary. Do still cap the length to PAGE_SIZE, since some
124 * users rely on that to avoid disabling preemption for too long when
125 * using SIMD. It's also needed for when skcipher_walk uses a bounce
126 * page due to the data not being aligned to the algorithm's alignmask.
127 */
128 if (IS_ENABLED(CONFIG_HIGHMEM))
129 limit = PAGE_SIZE - offset_in_page(walk->offset);
130 else
131 limit = PAGE_SIZE;
132
133 return min3(nbytes, len_this_sg, limit);
134 }
135
136 /*
137 * Create a scatterlist that represents the remaining data in a walk. Uses
138 * chaining to reference the original scatterlist, so this uses at most two
139 * entries in @sg_out regardless of the number of entries in the original list.
140 * Assumes that sg_init_table() was already done.
141 */
scatterwalk_get_sglist(struct scatter_walk * walk,struct scatterlist sg_out[2])142 static inline void scatterwalk_get_sglist(struct scatter_walk *walk,
143 struct scatterlist sg_out[2])
144 {
145 if (walk->offset >= walk->sg->offset + walk->sg->length)
146 scatterwalk_start(walk, sg_next(walk->sg));
147 sg_set_page(sg_out, sg_page(walk->sg),
148 walk->sg->offset + walk->sg->length - walk->offset,
149 walk->offset);
150 scatterwalk_crypto_chain(sg_out, sg_next(walk->sg), 2);
151 }
152
scatterwalk_map(struct scatter_walk * walk)153 static inline void scatterwalk_map(struct scatter_walk *walk)
154 {
155 struct page *base_page = sg_page(walk->sg);
156 unsigned int offset = walk->offset;
157 void *addr;
158
159 if (IS_ENABLED(CONFIG_HIGHMEM)) {
160 struct page *page;
161
162 page = nth_page(base_page, offset >> PAGE_SHIFT);
163 offset = offset_in_page(offset);
164 addr = kmap_local_page(page) + offset;
165 } else {
166 /*
167 * When !HIGHMEM we allow the walker to return segments that
168 * span a page boundary; see scatterwalk_clamp(). To make it
169 * clear that in this case we're working in the linear buffer of
170 * the whole sg entry in the kernel's direct map rather than
171 * within the mapped buffer of a single page, compute the
172 * address as an offset from the page_address() of the first
173 * page of the sg entry. Either way the result is the address
174 * in the direct map, but this makes it clearer what is really
175 * going on.
176 */
177 addr = page_address(base_page) + offset;
178 }
179
180 walk->__addr = addr;
181 }
182
183 /**
184 * scatterwalk_next() - Get the next data buffer in a scatterlist walk
185 * @walk: the scatter_walk
186 * @total: the total number of bytes remaining, > 0
187 *
188 * A virtual address for the next segment of data from the scatterlist will
189 * be placed into @walk->addr. The caller must call scatterwalk_done_src()
190 * or scatterwalk_done_dst() when it is done using this virtual address.
191 *
192 * Returns: the next number of bytes available, <= @total
193 */
scatterwalk_next(struct scatter_walk * walk,unsigned int total)194 static inline unsigned int scatterwalk_next(struct scatter_walk *walk,
195 unsigned int total)
196 {
197 unsigned int nbytes = scatterwalk_clamp(walk, total);
198
199 scatterwalk_map(walk);
200 return nbytes;
201 }
202
scatterwalk_unmap(struct scatter_walk * walk)203 static inline void scatterwalk_unmap(struct scatter_walk *walk)
204 {
205 if (IS_ENABLED(CONFIG_HIGHMEM))
206 kunmap_local(walk->__addr);
207 }
208
scatterwalk_advance(struct scatter_walk * walk,unsigned int nbytes)209 static inline void scatterwalk_advance(struct scatter_walk *walk,
210 unsigned int nbytes)
211 {
212 walk->offset += nbytes;
213 }
214
215 /**
216 * scatterwalk_done_src() - Finish one step of a walk of source scatterlist
217 * @walk: the scatter_walk
218 * @nbytes: the number of bytes processed this step, less than or equal to the
219 * number of bytes that scatterwalk_next() returned.
220 *
221 * Use this if the mapped address was not written to, i.e. it is source data.
222 */
scatterwalk_done_src(struct scatter_walk * walk,unsigned int nbytes)223 static inline void scatterwalk_done_src(struct scatter_walk *walk,
224 unsigned int nbytes)
225 {
226 scatterwalk_unmap(walk);
227 scatterwalk_advance(walk, nbytes);
228 }
229
230 /**
231 * scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist
232 * @walk: the scatter_walk
233 * @nbytes: the number of bytes processed this step, less than or equal to the
234 * number of bytes that scatterwalk_next() returned.
235 *
236 * Use this if the mapped address may have been written to, i.e. it is
237 * destination data.
238 */
scatterwalk_done_dst(struct scatter_walk * walk,unsigned int nbytes)239 static inline void scatterwalk_done_dst(struct scatter_walk *walk,
240 unsigned int nbytes)
241 {
242 scatterwalk_unmap(walk);
243 /*
244 * Explicitly check ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE instead of just
245 * relying on flush_dcache_page() being a no-op when not implemented,
246 * since otherwise the BUG_ON in sg_page() does not get optimized out.
247 * This also avoids having to consider whether the loop would get
248 * reliably optimized out or not.
249 */
250 if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) {
251 struct page *base_page;
252 unsigned int offset;
253 int start, end, i;
254
255 base_page = sg_page(walk->sg);
256 offset = walk->offset;
257 start = offset >> PAGE_SHIFT;
258 end = start + (nbytes >> PAGE_SHIFT);
259 end += (offset_in_page(offset) + offset_in_page(nbytes) +
260 PAGE_SIZE - 1) >> PAGE_SHIFT;
261 for (i = start; i < end; i++)
262 flush_dcache_page(nth_page(base_page, i));
263 }
264 scatterwalk_advance(walk, nbytes);
265 }
266
267 void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes);
268
269 void memcpy_from_scatterwalk(void *buf, struct scatter_walk *walk,
270 unsigned int nbytes);
271
272 void memcpy_to_scatterwalk(struct scatter_walk *walk, const void *buf,
273 unsigned int nbytes);
274
275 void memcpy_from_sglist(void *buf, struct scatterlist *sg,
276 unsigned int start, unsigned int nbytes);
277
278 void memcpy_to_sglist(struct scatterlist *sg, unsigned int start,
279 const void *buf, unsigned int nbytes);
280
281 void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
282 unsigned int nbytes);
283
284 /* In new code, please use memcpy_{from,to}_sglist() directly instead. */
scatterwalk_map_and_copy(void * buf,struct scatterlist * sg,unsigned int start,unsigned int nbytes,int out)285 static inline void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
286 unsigned int start,
287 unsigned int nbytes, int out)
288 {
289 if (out)
290 memcpy_to_sglist(sg, start, buf, nbytes);
291 else
292 memcpy_from_sglist(buf, sg, start, nbytes);
293 }
294
295 struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
296 struct scatterlist *src,
297 unsigned int len);
298
299 int skcipher_walk_first(struct skcipher_walk *walk, bool atomic);
300 int skcipher_walk_done(struct skcipher_walk *walk, int res);
301
skcipher_walk_abort(struct skcipher_walk * walk)302 static inline void skcipher_walk_abort(struct skcipher_walk *walk)
303 {
304 skcipher_walk_done(walk, -ECANCELED);
305 }
306
307 #endif /* _CRYPTO_SCATTERWALK_H */
308