1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2016-present, Facebook, Inc.
4 * All rights reserved.
5 *
6 */
7
8 #include <linux/bio.h>
9 #include <linux/bitmap.h>
10 #include <linux/err.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/mm.h>
14 #include <linux/sched/mm.h>
15 #include <linux/pagemap.h>
16 #include <linux/refcount.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/zstd.h>
20 #include "misc.h"
21 #include "fs.h"
22 #include "btrfs_inode.h"
23 #include "compression.h"
24 #include "super.h"
25
26 #define ZSTD_BTRFS_MAX_WINDOWLOG 17
27 #define ZSTD_BTRFS_MAX_INPUT (1U << ZSTD_BTRFS_MAX_WINDOWLOG)
28 #define ZSTD_BTRFS_DEFAULT_LEVEL 3
29 #define ZSTD_BTRFS_MIN_LEVEL -15
30 #define ZSTD_BTRFS_MAX_LEVEL 15
31 /* 307s to avoid pathologically clashing with transaction commit */
32 #define ZSTD_BTRFS_RECLAIM_JIFFIES (307 * HZ)
33
zstd_get_btrfs_parameters(int level,size_t src_len)34 static zstd_parameters zstd_get_btrfs_parameters(int level,
35 size_t src_len)
36 {
37 zstd_parameters params = zstd_get_params(level, src_len);
38
39 if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG)
40 params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG;
41 WARN_ON(src_len > ZSTD_BTRFS_MAX_INPUT);
42 return params;
43 }
44
45 struct workspace {
46 void *mem;
47 size_t size;
48 char *buf;
49 int level;
50 int req_level;
51 unsigned long last_used; /* jiffies */
52 struct list_head list;
53 struct list_head lru_list;
54 zstd_in_buffer in_buf;
55 zstd_out_buffer out_buf;
56 zstd_parameters params;
57 };
58
59 /*
60 * Zstd Workspace Management
61 *
62 * Zstd workspaces have different memory requirements depending on the level.
63 * The zstd workspaces are managed by having individual lists for each level
64 * and a global lru. Forward progress is maintained by protecting a max level
65 * workspace.
66 *
67 * Getting a workspace is done by using the bitmap to identify the levels that
68 * have available workspaces and scans up. This lets us recycle higher level
69 * workspaces because of the monotonic memory guarantee. A workspace's
70 * last_used is only updated if it is being used by the corresponding memory
71 * level. Putting a workspace involves adding it back to the appropriate places
72 * and adding it back to the lru if necessary.
73 *
74 * A timer is used to reclaim workspaces if they have not been used for
75 * ZSTD_BTRFS_RECLAIM_JIFFIES. This helps keep only active workspaces around.
76 * The upper bound is provided by the workqueue limit which is 2 (percpu limit).
77 */
78
79 struct zstd_workspace_manager {
80 spinlock_t lock;
81 struct list_head lru_list;
82 struct list_head idle_ws[ZSTD_BTRFS_MAX_LEVEL];
83 unsigned long active_map;
84 wait_queue_head_t wait;
85 struct timer_list timer;
86 };
87
88 static size_t zstd_ws_mem_sizes[ZSTD_BTRFS_MAX_LEVEL];
89
list_to_workspace(struct list_head * list)90 static inline struct workspace *list_to_workspace(struct list_head *list)
91 {
92 return container_of(list, struct workspace, list);
93 }
94
clip_level(int level)95 static inline int clip_level(int level)
96 {
97 return max(0, level - 1);
98 }
99
100 /*
101 * Timer callback to free unused workspaces.
102 *
103 * @t: timer
104 *
105 * This scans the lru_list and attempts to reclaim any workspace that hasn't
106 * been used for ZSTD_BTRFS_RECLAIM_JIFFIES.
107 *
108 * The context is softirq and does not need the _bh locking primitives.
109 */
zstd_reclaim_timer_fn(struct timer_list * timer)110 static void zstd_reclaim_timer_fn(struct timer_list *timer)
111 {
112 struct zstd_workspace_manager *zwsm =
113 container_of(timer, struct zstd_workspace_manager, timer);
114 unsigned long reclaim_threshold = jiffies - ZSTD_BTRFS_RECLAIM_JIFFIES;
115 struct list_head *pos, *next;
116
117 spin_lock(&zwsm->lock);
118
119 if (list_empty(&zwsm->lru_list)) {
120 spin_unlock(&zwsm->lock);
121 return;
122 }
123
124 list_for_each_prev_safe(pos, next, &zwsm->lru_list) {
125 struct workspace *victim = container_of(pos, struct workspace,
126 lru_list);
127 int level;
128
129 if (time_after(victim->last_used, reclaim_threshold))
130 break;
131
132 /* workspace is in use */
133 if (victim->req_level)
134 continue;
135
136 level = victim->level;
137 list_del(&victim->lru_list);
138 list_del(&victim->list);
139 zstd_free_workspace(&victim->list);
140
141 if (list_empty(&zwsm->idle_ws[level]))
142 clear_bit(level, &zwsm->active_map);
143
144 }
145
146 if (!list_empty(&zwsm->lru_list))
147 mod_timer(&zwsm->timer, jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
148
149 spin_unlock(&zwsm->lock);
150 }
151
152 /*
153 * Calculate monotonic memory bounds.
154 *
155 * It is possible based on the level configurations that a higher level
156 * workspace uses less memory than a lower level workspace. In order to reuse
157 * workspaces, this must be made a monotonic relationship. This precomputes
158 * the required memory for each level and enforces the monotonicity between
159 * level and memory required.
160 */
zstd_calc_ws_mem_sizes(void)161 static void zstd_calc_ws_mem_sizes(void)
162 {
163 size_t max_size = 0;
164 int level;
165
166 for (level = ZSTD_BTRFS_MIN_LEVEL; level <= ZSTD_BTRFS_MAX_LEVEL; level++) {
167 if (level == 0)
168 continue;
169 zstd_parameters params =
170 zstd_get_btrfs_parameters(level, ZSTD_BTRFS_MAX_INPUT);
171 size_t level_size =
172 max_t(size_t,
173 zstd_cstream_workspace_bound(¶ms.cParams),
174 zstd_dstream_workspace_bound(ZSTD_BTRFS_MAX_INPUT));
175
176 max_size = max_t(size_t, max_size, level_size);
177 /* Use level 1 workspace size for all the fast mode negative levels. */
178 zstd_ws_mem_sizes[clip_level(level)] = max_size;
179 }
180 }
181
zstd_alloc_workspace_manager(struct btrfs_fs_info * fs_info)182 int zstd_alloc_workspace_manager(struct btrfs_fs_info *fs_info)
183 {
184 struct zstd_workspace_manager *zwsm;
185 struct list_head *ws;
186
187 ASSERT(fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD] == NULL);
188 zwsm = kzalloc_obj(*zwsm);
189 if (!zwsm)
190 return -ENOMEM;
191 zstd_calc_ws_mem_sizes();
192 spin_lock_init(&zwsm->lock);
193 init_waitqueue_head(&zwsm->wait);
194 timer_setup(&zwsm->timer, zstd_reclaim_timer_fn, 0);
195
196 INIT_LIST_HEAD(&zwsm->lru_list);
197 for (int i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++)
198 INIT_LIST_HEAD(&zwsm->idle_ws[i]);
199 fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD] = zwsm;
200
201 ws = zstd_alloc_workspace(fs_info, ZSTD_BTRFS_MAX_LEVEL);
202 if (IS_ERR(ws)) {
203 btrfs_warn(NULL, "cannot preallocate zstd compression workspace");
204 } else {
205 set_bit(ZSTD_BTRFS_MAX_LEVEL - 1, &zwsm->active_map);
206 list_add(ws, &zwsm->idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1]);
207 }
208 return 0;
209 }
210
zstd_free_workspace_manager(struct btrfs_fs_info * fs_info)211 void zstd_free_workspace_manager(struct btrfs_fs_info *fs_info)
212 {
213 struct zstd_workspace_manager *zwsm = fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD];
214 struct workspace *workspace;
215
216 if (!zwsm)
217 return;
218 fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD] = NULL;
219 spin_lock_bh(&zwsm->lock);
220 for (int i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) {
221 while (!list_empty(&zwsm->idle_ws[i])) {
222 workspace = container_of(zwsm->idle_ws[i].next,
223 struct workspace, list);
224 list_del(&workspace->list);
225 list_del(&workspace->lru_list);
226 zstd_free_workspace(&workspace->list);
227 }
228 }
229 spin_unlock_bh(&zwsm->lock);
230 timer_delete_sync(&zwsm->timer);
231 kfree(zwsm);
232 }
233
234 /*
235 * Find workspace for given level.
236 *
237 * @level: compression level
238 *
239 * This iterates over the set bits in the active_map beginning at the requested
240 * compression level. This lets us utilize already allocated workspaces before
241 * allocating a new one. If the workspace is of a larger size, it is used, but
242 * the place in the lru_list and last_used times are not updated. This is to
243 * offer the opportunity to reclaim the workspace in favor of allocating an
244 * appropriately sized one in the future.
245 */
zstd_find_workspace(struct btrfs_fs_info * fs_info,int level)246 static struct list_head *zstd_find_workspace(struct btrfs_fs_info *fs_info, int level)
247 {
248 struct zstd_workspace_manager *zwsm = fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD];
249 struct list_head *ws;
250 struct workspace *workspace;
251 int i = clip_level(level);
252
253 ASSERT(zwsm);
254 spin_lock_bh(&zwsm->lock);
255 for_each_set_bit_from(i, &zwsm->active_map, ZSTD_BTRFS_MAX_LEVEL) {
256 if (!list_empty(&zwsm->idle_ws[i])) {
257 ws = zwsm->idle_ws[i].next;
258 workspace = list_to_workspace(ws);
259 list_del_init(ws);
260 /* keep its place if it's a lower level using this */
261 workspace->req_level = level;
262 if (clip_level(level) == workspace->level)
263 list_del(&workspace->lru_list);
264 if (list_empty(&zwsm->idle_ws[i]))
265 clear_bit(i, &zwsm->active_map);
266 spin_unlock_bh(&zwsm->lock);
267 return ws;
268 }
269 }
270 spin_unlock_bh(&zwsm->lock);
271
272 return NULL;
273 }
274
275 /*
276 * Zstd get_workspace for level.
277 *
278 * @level: compression level
279 *
280 * If @level is 0, then any compression level can be used. Therefore, we begin
281 * scanning from 1. We first scan through possible workspaces and then after
282 * attempt to allocate a new workspace. If we fail to allocate one due to
283 * memory pressure, go to sleep waiting for the max level workspace to free up.
284 */
zstd_get_workspace(struct btrfs_fs_info * fs_info,int level)285 struct list_head *zstd_get_workspace(struct btrfs_fs_info *fs_info, int level)
286 {
287 struct zstd_workspace_manager *zwsm = fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD];
288 struct list_head *ws;
289 unsigned int nofs_flag;
290
291 ASSERT(zwsm);
292
293 /* level == 0 means we can use any workspace */
294 if (!level)
295 level = 1;
296
297 again:
298 ws = zstd_find_workspace(fs_info, level);
299 if (ws)
300 return ws;
301
302 nofs_flag = memalloc_nofs_save();
303 ws = zstd_alloc_workspace(fs_info, level);
304 memalloc_nofs_restore(nofs_flag);
305
306 if (IS_ERR(ws)) {
307 DEFINE_WAIT(wait);
308
309 prepare_to_wait(&zwsm->wait, &wait, TASK_UNINTERRUPTIBLE);
310 schedule();
311 finish_wait(&zwsm->wait, &wait);
312
313 goto again;
314 }
315
316 return ws;
317 }
318
319 /*
320 * Zstd put_workspace.
321 *
322 * @ws: list_head for the workspace
323 *
324 * When putting back a workspace, we only need to update the LRU if we are of
325 * the requested compression level. Here is where we continue to protect the
326 * max level workspace or update last_used accordingly. If the reclaim timer
327 * isn't set, it is also set here. Only the max level workspace tries and wakes
328 * up waiting workspaces.
329 */
zstd_put_workspace(struct btrfs_fs_info * fs_info,struct list_head * ws)330 void zstd_put_workspace(struct btrfs_fs_info *fs_info, struct list_head *ws)
331 {
332 struct zstd_workspace_manager *zwsm = fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD];
333 struct workspace *workspace = list_to_workspace(ws);
334
335 ASSERT(zwsm);
336 spin_lock_bh(&zwsm->lock);
337
338 /* A node is only taken off the lru if we are the corresponding level */
339 if (clip_level(workspace->req_level) == workspace->level) {
340 /* Hide a max level workspace from reclaim */
341 if (list_empty(&zwsm->idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1])) {
342 INIT_LIST_HEAD(&workspace->lru_list);
343 } else {
344 workspace->last_used = jiffies;
345 list_add(&workspace->lru_list, &zwsm->lru_list);
346 if (!timer_pending(&zwsm->timer))
347 mod_timer(&zwsm->timer,
348 jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
349 }
350 }
351
352 set_bit(workspace->level, &zwsm->active_map);
353 list_add(&workspace->list, &zwsm->idle_ws[workspace->level]);
354 workspace->req_level = 0;
355
356 spin_unlock_bh(&zwsm->lock);
357
358 if (workspace->level == clip_level(ZSTD_BTRFS_MAX_LEVEL))
359 cond_wake_up(&zwsm->wait);
360 }
361
zstd_free_workspace(struct list_head * ws)362 void zstd_free_workspace(struct list_head *ws)
363 {
364 struct workspace *workspace = list_entry(ws, struct workspace, list);
365
366 kvfree(workspace->mem);
367 kfree(workspace->buf);
368 kfree(workspace);
369 }
370
zstd_alloc_workspace(struct btrfs_fs_info * fs_info,int level)371 struct list_head *zstd_alloc_workspace(struct btrfs_fs_info *fs_info, int level)
372 {
373 struct workspace *workspace;
374
375 workspace = kzalloc_obj(*workspace);
376 if (!workspace)
377 return ERR_PTR(-ENOMEM);
378
379 /* Use level 1 workspace size for all the fast mode negative levels. */
380 workspace->size = zstd_ws_mem_sizes[clip_level(level)];
381 workspace->level = clip_level(level);
382 workspace->req_level = level;
383 workspace->last_used = jiffies;
384 workspace->mem = kvmalloc(workspace->size, GFP_KERNEL | __GFP_NOWARN);
385 workspace->buf = kmalloc(fs_info->sectorsize, GFP_KERNEL);
386 if (!workspace->mem || !workspace->buf)
387 goto fail;
388
389 INIT_LIST_HEAD(&workspace->list);
390 INIT_LIST_HEAD(&workspace->lru_list);
391
392 return &workspace->list;
393 fail:
394 zstd_free_workspace(&workspace->list);
395 return ERR_PTR(-ENOMEM);
396 }
397
zstd_compress_bio(struct list_head * ws,struct compressed_bio * cb)398 int zstd_compress_bio(struct list_head *ws, struct compressed_bio *cb)
399 {
400 struct btrfs_inode *inode = cb->bbio.inode;
401 struct btrfs_fs_info *fs_info = inode->root->fs_info;
402 struct workspace *workspace = list_entry(ws, struct workspace, list);
403 struct address_space *mapping = inode->vfs_inode.i_mapping;
404 struct bio *bio = &cb->bbio.bio;
405 zstd_cstream *stream;
406 int ret = 0;
407 /* The current folio to read. */
408 struct folio *in_folio = NULL;
409 /* The current folio to write to. */
410 struct folio *out_folio = NULL;
411 unsigned long tot_in = 0;
412 unsigned long tot_out = 0;
413 const u64 start = cb->start;
414 const u32 len = cb->len;
415 const u64 end = start + len;
416 const u32 min_folio_size = btrfs_min_folio_size(fs_info);
417
418 workspace->params = zstd_get_btrfs_parameters(workspace->req_level, len);
419
420 /* Initialize the stream. */
421 stream = zstd_init_cstream(&workspace->params, len, workspace->mem, workspace->size);
422 if (unlikely(!stream)) {
423 btrfs_err(fs_info,
424 "zstd compression init level %d failed, root %llu inode %llu offset %llu",
425 workspace->req_level, btrfs_root_id(inode->root),
426 btrfs_ino(inode), start);
427 ret = -EIO;
428 goto out;
429 }
430
431 /* Map in the first page of input data. */
432 ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio);
433 if (ret < 0)
434 goto out;
435 workspace->in_buf.src = kmap_local_folio(in_folio, offset_in_folio(in_folio, start));
436 workspace->in_buf.pos = 0;
437 workspace->in_buf.size = btrfs_calc_input_length(in_folio, end, start);
438
439 /* Allocate and map in the output buffer. */
440 out_folio = btrfs_alloc_compr_folio(fs_info, GFP_NOFS);
441 if (out_folio == NULL) {
442 ret = -ENOMEM;
443 goto out;
444 }
445 workspace->out_buf.dst = folio_address(out_folio);
446 workspace->out_buf.pos = 0;
447 workspace->out_buf.size = min_folio_size;
448
449 while (1) {
450 size_t ret2;
451
452 ret2 = zstd_compress_stream(stream, &workspace->out_buf, &workspace->in_buf);
453 if (unlikely(zstd_is_error(ret2))) {
454 btrfs_warn(fs_info,
455 "zstd compression level %d failed, error %d root %llu inode %llu offset %llu",
456 workspace->req_level, zstd_get_error_code(ret2),
457 btrfs_root_id(inode->root), btrfs_ino(inode),
458 start + tot_in);
459 ret = -EIO;
460 goto out;
461 }
462
463 /* Check to see if we are making it bigger. */
464 if (tot_in + workspace->in_buf.pos > fs_info->sectorsize * 2 &&
465 tot_in + workspace->in_buf.pos < tot_out + workspace->out_buf.pos) {
466 ret = -E2BIG;
467 goto out;
468 }
469
470 /* Check if we need more output space. */
471 if (workspace->out_buf.pos >= workspace->out_buf.size) {
472 tot_out += min_folio_size;
473 if (tot_out >= len) {
474 ret = -E2BIG;
475 goto out;
476 }
477 /* Queue the current foliot into the bio. */
478 if (!bio_add_folio(bio, out_folio, folio_size(out_folio), 0)) {
479 ret = -E2BIG;
480 goto out;
481 }
482
483 out_folio = btrfs_alloc_compr_folio(fs_info, GFP_NOFS);
484 if (out_folio == NULL) {
485 ret = -ENOMEM;
486 goto out;
487 }
488 workspace->out_buf.dst = folio_address(out_folio);
489 workspace->out_buf.pos = 0;
490 workspace->out_buf.size = min_folio_size;
491 }
492
493 /* We've reached the end of the input. */
494 if (tot_in + workspace->in_buf.pos >= len) {
495 tot_in += workspace->in_buf.pos;
496 break;
497 }
498
499 /* Check if we need more input. */
500 if (workspace->in_buf.pos >= workspace->in_buf.size) {
501 u64 cur;
502
503 tot_in += workspace->in_buf.size;
504 cur = start + tot_in;
505
506 kunmap_local(workspace->in_buf.src);
507 workspace->in_buf.src = NULL;
508 folio_put(in_folio);
509
510 ret = btrfs_compress_filemap_get_folio(mapping, cur, &in_folio);
511 if (ret < 0)
512 goto out;
513 workspace->in_buf.src = kmap_local_folio(in_folio,
514 offset_in_folio(in_folio, cur));
515 workspace->in_buf.pos = 0;
516 workspace->in_buf.size = btrfs_calc_input_length(in_folio, end, cur);
517 }
518 }
519
520 while (1) {
521 size_t ret2;
522
523 ret2 = zstd_end_stream(stream, &workspace->out_buf);
524 if (unlikely(zstd_is_error(ret2))) {
525 btrfs_err(fs_info,
526 "zstd compression end level %d failed, error %d root %llu inode %llu offset %llu",
527 workspace->req_level, zstd_get_error_code(ret2),
528 btrfs_root_id(inode->root), btrfs_ino(inode),
529 start + tot_in);
530 ret = -EIO;
531 goto out;
532 }
533 /* Queue the remaining part of the output folio into bio. */
534 if (ret2 == 0) {
535 tot_out += workspace->out_buf.pos;
536 if (tot_out >= len) {
537 ret = -E2BIG;
538 goto out;
539 }
540 if (!bio_add_folio(bio, out_folio, workspace->out_buf.pos, 0)) {
541 ret = -E2BIG;
542 goto out;
543 }
544 out_folio = NULL;
545 break;
546 }
547 tot_out += min_folio_size;
548 if (tot_out >= len) {
549 ret = -E2BIG;
550 goto out;
551 }
552 if (!bio_add_folio(bio, out_folio, folio_size(out_folio), 0)) {
553 ret = -E2BIG;
554 goto out;
555 }
556 out_folio = btrfs_alloc_compr_folio(fs_info, GFP_NOFS);
557 if (out_folio == NULL) {
558 ret = -ENOMEM;
559 goto out;
560 }
561 workspace->out_buf.dst = folio_address(out_folio);
562 workspace->out_buf.pos = 0;
563 workspace->out_buf.size = min_folio_size;
564 }
565
566 if (tot_out >= tot_in) {
567 ret = -E2BIG;
568 goto out;
569 }
570
571 ret = 0;
572 ASSERT(tot_out == bio->bi_iter.bi_size);
573 out:
574 if (out_folio)
575 btrfs_free_compr_folio(out_folio);
576 if (workspace->in_buf.src) {
577 kunmap_local(workspace->in_buf.src);
578 folio_put(in_folio);
579 }
580 return ret;
581 }
582
zstd_decompress_bio(struct list_head * ws,struct compressed_bio * cb)583 int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
584 {
585 struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
586 struct workspace *workspace = list_entry(ws, struct workspace, list);
587 struct folio_iter fi;
588 size_t srclen = bio_get_size(&cb->bbio.bio);
589 zstd_dstream *stream;
590 int ret = 0;
591 const unsigned int min_folio_size = btrfs_min_folio_size(fs_info);
592 unsigned long folio_in_index = 0;
593 unsigned long total_folios_in = DIV_ROUND_UP(srclen, min_folio_size);
594 unsigned long buf_start;
595 unsigned long total_out = 0;
596
597 bio_first_folio(&fi, &cb->bbio.bio, 0);
598 if (unlikely(!fi.folio))
599 return -EINVAL;
600 ASSERT(folio_size(fi.folio) == min_folio_size);
601
602 stream = zstd_init_dstream(
603 ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
604 if (unlikely(!stream)) {
605 struct btrfs_inode *inode = cb->bbio.inode;
606
607 btrfs_err(inode->root->fs_info,
608 "zstd decompression init failed, root %llu inode %llu offset %llu",
609 btrfs_root_id(inode->root), btrfs_ino(inode), cb->start);
610 ret = -EIO;
611 goto done;
612 }
613
614 workspace->in_buf.src = kmap_local_folio(fi.folio, 0);
615 workspace->in_buf.pos = 0;
616 workspace->in_buf.size = min_t(size_t, srclen, min_folio_size);
617
618 workspace->out_buf.dst = workspace->buf;
619 workspace->out_buf.pos = 0;
620 workspace->out_buf.size = fs_info->sectorsize;
621
622 while (1) {
623 size_t ret2;
624
625 ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
626 &workspace->in_buf);
627 if (unlikely(zstd_is_error(ret2))) {
628 struct btrfs_inode *inode = cb->bbio.inode;
629
630 btrfs_err(inode->root->fs_info,
631 "zstd decompression failed, error %d root %llu inode %llu offset %llu",
632 zstd_get_error_code(ret2), btrfs_root_id(inode->root),
633 btrfs_ino(inode), cb->start);
634 ret = -EIO;
635 goto done;
636 }
637 buf_start = total_out;
638 total_out += workspace->out_buf.pos;
639 workspace->out_buf.pos = 0;
640
641 ret = btrfs_decompress_buf2page(workspace->out_buf.dst,
642 total_out - buf_start, cb, buf_start);
643 if (ret == 0)
644 break;
645
646 if (workspace->in_buf.pos >= srclen)
647 break;
648
649 /* Check if we've hit the end of a frame */
650 if (ret2 == 0)
651 break;
652
653 if (workspace->in_buf.pos == workspace->in_buf.size) {
654 kunmap_local(workspace->in_buf.src);
655 folio_in_index++;
656 if (unlikely(folio_in_index >= total_folios_in)) {
657 workspace->in_buf.src = NULL;
658 ret = -EIO;
659 goto done;
660 }
661 srclen -= min_folio_size;
662 bio_next_folio(&fi, &cb->bbio.bio);
663 ASSERT(fi.folio);
664 workspace->in_buf.src = kmap_local_folio(fi.folio, 0);
665 workspace->in_buf.pos = 0;
666 workspace->in_buf.size = min_t(size_t, srclen, min_folio_size);
667 }
668 }
669 ret = 0;
670 done:
671 if (workspace->in_buf.src)
672 kunmap_local(workspace->in_buf.src);
673 return ret;
674 }
675
zstd_decompress(struct list_head * ws,const u8 * data_in,struct folio * dest_folio,unsigned long dest_pgoff,size_t srclen,size_t destlen)676 int zstd_decompress(struct list_head *ws, const u8 *data_in,
677 struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
678 size_t destlen)
679 {
680 struct workspace *workspace = list_entry(ws, struct workspace, list);
681 struct btrfs_fs_info *fs_info = btrfs_sb(folio_inode(dest_folio)->i_sb);
682 zstd_dstream *stream;
683 int ret = 0;
684 unsigned long to_copy = 0;
685
686 stream = zstd_init_dstream(
687 ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
688 if (unlikely(!stream)) {
689 struct btrfs_inode *inode = folio_to_inode(dest_folio);
690
691 btrfs_err(inode->root->fs_info,
692 "zstd decompression init failed, root %llu inode %llu offset %llu",
693 btrfs_root_id(inode->root), btrfs_ino(inode),
694 folio_pos(dest_folio));
695 ret = -EIO;
696 goto finish;
697 }
698
699 workspace->in_buf.src = data_in;
700 workspace->in_buf.pos = 0;
701 workspace->in_buf.size = srclen;
702
703 workspace->out_buf.dst = workspace->buf;
704 workspace->out_buf.pos = 0;
705 workspace->out_buf.size = fs_info->sectorsize;
706
707 /*
708 * Since both input and output buffers should not exceed one sector,
709 * one call should end the decompression.
710 */
711 ret = zstd_decompress_stream(stream, &workspace->out_buf, &workspace->in_buf);
712 if (unlikely(zstd_is_error(ret))) {
713 struct btrfs_inode *inode = folio_to_inode(dest_folio);
714
715 btrfs_err(inode->root->fs_info,
716 "zstd decompression failed, error %d root %llu inode %llu offset %llu",
717 zstd_get_error_code(ret), btrfs_root_id(inode->root),
718 btrfs_ino(inode), folio_pos(dest_folio));
719 goto finish;
720 }
721 to_copy = workspace->out_buf.pos;
722 memcpy_to_folio(dest_folio, dest_pgoff, workspace->out_buf.dst, to_copy);
723 finish:
724 /* Error or early end. */
725 if (unlikely(to_copy < destlen)) {
726 ret = -EIO;
727 folio_zero_range(dest_folio, dest_pgoff + to_copy, destlen - to_copy);
728 }
729 return ret;
730 }
731
732 const struct btrfs_compress_levels btrfs_zstd_compress = {
733 .min_level = ZSTD_BTRFS_MIN_LEVEL,
734 .max_level = ZSTD_BTRFS_MAX_LEVEL,
735 .default_level = ZSTD_BTRFS_DEFAULT_LEVEL,
736 };
737