1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016-present, Facebook, Inc. 4 * All rights reserved. 5 * 6 */ 7 8 #include <linux/bio.h> 9 #include <linux/bitmap.h> 10 #include <linux/err.h> 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/mm.h> 14 #include <linux/sched/mm.h> 15 #include <linux/pagemap.h> 16 #include <linux/refcount.h> 17 #include <linux/sched.h> 18 #include <linux/slab.h> 19 #include <linux/zstd.h> 20 #include "misc.h" 21 #include "fs.h" 22 #include "btrfs_inode.h" 23 #include "compression.h" 24 #include "super.h" 25 26 #define ZSTD_BTRFS_MAX_WINDOWLOG 17 27 #define ZSTD_BTRFS_MAX_INPUT (1U << ZSTD_BTRFS_MAX_WINDOWLOG) 28 #define ZSTD_BTRFS_DEFAULT_LEVEL 3 29 #define ZSTD_BTRFS_MIN_LEVEL -15 30 #define ZSTD_BTRFS_MAX_LEVEL 15 31 /* 307s to avoid pathologically clashing with transaction commit */ 32 #define ZSTD_BTRFS_RECLAIM_JIFFIES (307 * HZ) 33 34 static zstd_parameters zstd_get_btrfs_parameters(int level, 35 size_t src_len) 36 { 37 zstd_parameters params = zstd_get_params(level, src_len); 38 39 if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG) 40 params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG; 41 WARN_ON(src_len > ZSTD_BTRFS_MAX_INPUT); 42 return params; 43 } 44 45 struct workspace { 46 void *mem; 47 size_t size; 48 char *buf; 49 int level; 50 int req_level; 51 unsigned long last_used; /* jiffies */ 52 struct list_head list; 53 struct list_head lru_list; 54 zstd_in_buffer in_buf; 55 zstd_out_buffer out_buf; 56 zstd_parameters params; 57 }; 58 59 /* 60 * Zstd Workspace Management 61 * 62 * Zstd workspaces have different memory requirements depending on the level. 63 * The zstd workspaces are managed by having individual lists for each level 64 * and a global lru. Forward progress is maintained by protecting a max level 65 * workspace. 66 * 67 * Getting a workspace is done by using the bitmap to identify the levels that 68 * have available workspaces and scans up. This lets us recycle higher level 69 * workspaces because of the monotonic memory guarantee. A workspace's 70 * last_used is only updated if it is being used by the corresponding memory 71 * level. Putting a workspace involves adding it back to the appropriate places 72 * and adding it back to the lru if necessary. 73 * 74 * A timer is used to reclaim workspaces if they have not been used for 75 * ZSTD_BTRFS_RECLAIM_JIFFIES. This helps keep only active workspaces around. 76 * The upper bound is provided by the workqueue limit which is 2 (percpu limit). 77 */ 78 79 struct zstd_workspace_manager { 80 const struct btrfs_compress_op *ops; 81 spinlock_t lock; 82 struct list_head lru_list; 83 struct list_head idle_ws[ZSTD_BTRFS_MAX_LEVEL]; 84 unsigned long active_map; 85 wait_queue_head_t wait; 86 struct timer_list timer; 87 }; 88 89 static struct zstd_workspace_manager wsm; 90 91 static size_t zstd_ws_mem_sizes[ZSTD_BTRFS_MAX_LEVEL]; 92 93 static inline struct workspace *list_to_workspace(struct list_head *list) 94 { 95 return container_of(list, struct workspace, list); 96 } 97 98 static inline int clip_level(int level) 99 { 100 return max(0, level - 1); 101 } 102 103 /* 104 * Timer callback to free unused workspaces. 105 * 106 * @t: timer 107 * 108 * This scans the lru_list and attempts to reclaim any workspace that hasn't 109 * been used for ZSTD_BTRFS_RECLAIM_JIFFIES. 110 * 111 * The context is softirq and does not need the _bh locking primitives. 112 */ 113 static void zstd_reclaim_timer_fn(struct timer_list *timer) 114 { 115 unsigned long reclaim_threshold = jiffies - ZSTD_BTRFS_RECLAIM_JIFFIES; 116 struct list_head *pos, *next; 117 118 ASSERT(timer == &wsm.timer); 119 120 spin_lock(&wsm.lock); 121 122 if (list_empty(&wsm.lru_list)) { 123 spin_unlock(&wsm.lock); 124 return; 125 } 126 127 list_for_each_prev_safe(pos, next, &wsm.lru_list) { 128 struct workspace *victim = container_of(pos, struct workspace, 129 lru_list); 130 int level; 131 132 if (time_after(victim->last_used, reclaim_threshold)) 133 break; 134 135 /* workspace is in use */ 136 if (victim->req_level) 137 continue; 138 139 level = victim->level; 140 list_del(&victim->lru_list); 141 list_del(&victim->list); 142 zstd_free_workspace(&victim->list); 143 144 if (list_empty(&wsm.idle_ws[level])) 145 clear_bit(level, &wsm.active_map); 146 147 } 148 149 if (!list_empty(&wsm.lru_list)) 150 mod_timer(&wsm.timer, jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES); 151 152 spin_unlock(&wsm.lock); 153 } 154 155 /* 156 * Calculate monotonic memory bounds. 157 * 158 * It is possible based on the level configurations that a higher level 159 * workspace uses less memory than a lower level workspace. In order to reuse 160 * workspaces, this must be made a monotonic relationship. This precomputes 161 * the required memory for each level and enforces the monotonicity between 162 * level and memory required. 163 */ 164 static void zstd_calc_ws_mem_sizes(void) 165 { 166 size_t max_size = 0; 167 int level; 168 169 for (level = ZSTD_BTRFS_MIN_LEVEL; level <= ZSTD_BTRFS_MAX_LEVEL; level++) { 170 if (level == 0) 171 continue; 172 zstd_parameters params = 173 zstd_get_btrfs_parameters(level, ZSTD_BTRFS_MAX_INPUT); 174 size_t level_size = 175 max_t(size_t, 176 zstd_cstream_workspace_bound(¶ms.cParams), 177 zstd_dstream_workspace_bound(ZSTD_BTRFS_MAX_INPUT)); 178 179 max_size = max_t(size_t, max_size, level_size); 180 /* Use level 1 workspace size for all the fast mode negative levels. */ 181 zstd_ws_mem_sizes[clip_level(level)] = max_size; 182 } 183 } 184 185 void zstd_init_workspace_manager(void) 186 { 187 struct list_head *ws; 188 int i; 189 190 zstd_calc_ws_mem_sizes(); 191 192 wsm.ops = &btrfs_zstd_compress; 193 spin_lock_init(&wsm.lock); 194 init_waitqueue_head(&wsm.wait); 195 timer_setup(&wsm.timer, zstd_reclaim_timer_fn, 0); 196 197 INIT_LIST_HEAD(&wsm.lru_list); 198 for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) 199 INIT_LIST_HEAD(&wsm.idle_ws[i]); 200 201 ws = zstd_alloc_workspace(ZSTD_BTRFS_MAX_LEVEL); 202 if (IS_ERR(ws)) { 203 pr_warn( 204 "BTRFS: cannot preallocate zstd compression workspace\n"); 205 } else { 206 set_bit(ZSTD_BTRFS_MAX_LEVEL - 1, &wsm.active_map); 207 list_add(ws, &wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1]); 208 } 209 } 210 211 void zstd_cleanup_workspace_manager(void) 212 { 213 struct workspace *workspace; 214 int i; 215 216 spin_lock_bh(&wsm.lock); 217 for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) { 218 while (!list_empty(&wsm.idle_ws[i])) { 219 workspace = container_of(wsm.idle_ws[i].next, 220 struct workspace, list); 221 list_del(&workspace->list); 222 list_del(&workspace->lru_list); 223 zstd_free_workspace(&workspace->list); 224 } 225 } 226 spin_unlock_bh(&wsm.lock); 227 228 timer_delete_sync(&wsm.timer); 229 } 230 231 /* 232 * Find workspace for given level. 233 * 234 * @level: compression level 235 * 236 * This iterates over the set bits in the active_map beginning at the requested 237 * compression level. This lets us utilize already allocated workspaces before 238 * allocating a new one. If the workspace is of a larger size, it is used, but 239 * the place in the lru_list and last_used times are not updated. This is to 240 * offer the opportunity to reclaim the workspace in favor of allocating an 241 * appropriately sized one in the future. 242 */ 243 static struct list_head *zstd_find_workspace(int level) 244 { 245 struct list_head *ws; 246 struct workspace *workspace; 247 int i = clip_level(level); 248 249 spin_lock_bh(&wsm.lock); 250 for_each_set_bit_from(i, &wsm.active_map, ZSTD_BTRFS_MAX_LEVEL) { 251 if (!list_empty(&wsm.idle_ws[i])) { 252 ws = wsm.idle_ws[i].next; 253 workspace = list_to_workspace(ws); 254 list_del_init(ws); 255 /* keep its place if it's a lower level using this */ 256 workspace->req_level = level; 257 if (clip_level(level) == workspace->level) 258 list_del(&workspace->lru_list); 259 if (list_empty(&wsm.idle_ws[i])) 260 clear_bit(i, &wsm.active_map); 261 spin_unlock_bh(&wsm.lock); 262 return ws; 263 } 264 } 265 spin_unlock_bh(&wsm.lock); 266 267 return NULL; 268 } 269 270 /* 271 * Zstd get_workspace for level. 272 * 273 * @level: compression level 274 * 275 * If @level is 0, then any compression level can be used. Therefore, we begin 276 * scanning from 1. We first scan through possible workspaces and then after 277 * attempt to allocate a new workspace. If we fail to allocate one due to 278 * memory pressure, go to sleep waiting for the max level workspace to free up. 279 */ 280 struct list_head *zstd_get_workspace(int level) 281 { 282 struct list_head *ws; 283 unsigned int nofs_flag; 284 285 /* level == 0 means we can use any workspace */ 286 if (!level) 287 level = 1; 288 289 again: 290 ws = zstd_find_workspace(level); 291 if (ws) 292 return ws; 293 294 nofs_flag = memalloc_nofs_save(); 295 ws = zstd_alloc_workspace(level); 296 memalloc_nofs_restore(nofs_flag); 297 298 if (IS_ERR(ws)) { 299 DEFINE_WAIT(wait); 300 301 prepare_to_wait(&wsm.wait, &wait, TASK_UNINTERRUPTIBLE); 302 schedule(); 303 finish_wait(&wsm.wait, &wait); 304 305 goto again; 306 } 307 308 return ws; 309 } 310 311 /* 312 * Zstd put_workspace. 313 * 314 * @ws: list_head for the workspace 315 * 316 * When putting back a workspace, we only need to update the LRU if we are of 317 * the requested compression level. Here is where we continue to protect the 318 * max level workspace or update last_used accordingly. If the reclaim timer 319 * isn't set, it is also set here. Only the max level workspace tries and wakes 320 * up waiting workspaces. 321 */ 322 void zstd_put_workspace(struct list_head *ws) 323 { 324 struct workspace *workspace = list_to_workspace(ws); 325 326 spin_lock_bh(&wsm.lock); 327 328 /* A node is only taken off the lru if we are the corresponding level */ 329 if (clip_level(workspace->req_level) == workspace->level) { 330 /* Hide a max level workspace from reclaim */ 331 if (list_empty(&wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1])) { 332 INIT_LIST_HEAD(&workspace->lru_list); 333 } else { 334 workspace->last_used = jiffies; 335 list_add(&workspace->lru_list, &wsm.lru_list); 336 if (!timer_pending(&wsm.timer)) 337 mod_timer(&wsm.timer, 338 jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES); 339 } 340 } 341 342 set_bit(workspace->level, &wsm.active_map); 343 list_add(&workspace->list, &wsm.idle_ws[workspace->level]); 344 workspace->req_level = 0; 345 346 spin_unlock_bh(&wsm.lock); 347 348 if (workspace->level == clip_level(ZSTD_BTRFS_MAX_LEVEL)) 349 cond_wake_up(&wsm.wait); 350 } 351 352 void zstd_free_workspace(struct list_head *ws) 353 { 354 struct workspace *workspace = list_entry(ws, struct workspace, list); 355 356 kvfree(workspace->mem); 357 kfree(workspace->buf); 358 kfree(workspace); 359 } 360 361 struct list_head *zstd_alloc_workspace(int level) 362 { 363 struct workspace *workspace; 364 365 workspace = kzalloc(sizeof(*workspace), GFP_KERNEL); 366 if (!workspace) 367 return ERR_PTR(-ENOMEM); 368 369 /* Use level 1 workspace size for all the fast mode negative levels. */ 370 workspace->size = zstd_ws_mem_sizes[clip_level(level)]; 371 workspace->level = clip_level(level); 372 workspace->req_level = level; 373 workspace->last_used = jiffies; 374 workspace->mem = kvmalloc(workspace->size, GFP_KERNEL | __GFP_NOWARN); 375 workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 376 if (!workspace->mem || !workspace->buf) 377 goto fail; 378 379 INIT_LIST_HEAD(&workspace->list); 380 INIT_LIST_HEAD(&workspace->lru_list); 381 382 return &workspace->list; 383 fail: 384 zstd_free_workspace(&workspace->list); 385 return ERR_PTR(-ENOMEM); 386 } 387 388 int zstd_compress_folios(struct list_head *ws, struct address_space *mapping, 389 u64 start, struct folio **folios, unsigned long *out_folios, 390 unsigned long *total_in, unsigned long *total_out) 391 { 392 struct workspace *workspace = list_entry(ws, struct workspace, list); 393 zstd_cstream *stream; 394 int ret = 0; 395 int nr_folios = 0; 396 struct folio *in_folio = NULL; /* The current folio to read. */ 397 struct folio *out_folio = NULL; /* The current folio to write to. */ 398 unsigned long tot_in = 0; 399 unsigned long tot_out = 0; 400 unsigned long len = *total_out; 401 const unsigned long nr_dest_folios = *out_folios; 402 const u64 orig_end = start + len; 403 unsigned long max_out = nr_dest_folios * PAGE_SIZE; 404 unsigned int cur_len; 405 406 workspace->params = zstd_get_btrfs_parameters(workspace->req_level, len); 407 *out_folios = 0; 408 *total_out = 0; 409 *total_in = 0; 410 411 /* Initialize the stream */ 412 stream = zstd_init_cstream(&workspace->params, len, workspace->mem, 413 workspace->size); 414 if (unlikely(!stream)) { 415 struct btrfs_inode *inode = BTRFS_I(mapping->host); 416 417 btrfs_err(inode->root->fs_info, 418 "zstd compression init level %d failed, root %llu inode %llu offset %llu", 419 workspace->req_level, btrfs_root_id(inode->root), 420 btrfs_ino(inode), start); 421 ret = -EIO; 422 goto out; 423 } 424 425 /* map in the first page of input data */ 426 ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio); 427 if (ret < 0) 428 goto out; 429 cur_len = btrfs_calc_input_length(in_folio, orig_end, start); 430 workspace->in_buf.src = kmap_local_folio(in_folio, offset_in_folio(in_folio, start)); 431 workspace->in_buf.pos = 0; 432 workspace->in_buf.size = cur_len; 433 434 /* Allocate and map in the output buffer */ 435 out_folio = btrfs_alloc_compr_folio(); 436 if (out_folio == NULL) { 437 ret = -ENOMEM; 438 goto out; 439 } 440 folios[nr_folios++] = out_folio; 441 workspace->out_buf.dst = folio_address(out_folio); 442 workspace->out_buf.pos = 0; 443 workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE); 444 445 while (1) { 446 size_t ret2; 447 448 ret2 = zstd_compress_stream(stream, &workspace->out_buf, 449 &workspace->in_buf); 450 if (unlikely(zstd_is_error(ret2))) { 451 struct btrfs_inode *inode = BTRFS_I(mapping->host); 452 453 btrfs_warn(inode->root->fs_info, 454 "zstd compression level %d failed, error %d root %llu inode %llu offset %llu", 455 workspace->req_level, zstd_get_error_code(ret2), 456 btrfs_root_id(inode->root), btrfs_ino(inode), 457 start); 458 ret = -EIO; 459 goto out; 460 } 461 462 /* Check to see if we are making it bigger */ 463 if (tot_in + workspace->in_buf.pos > 8192 && 464 tot_in + workspace->in_buf.pos < 465 tot_out + workspace->out_buf.pos) { 466 ret = -E2BIG; 467 goto out; 468 } 469 470 /* We've reached the end of our output range */ 471 if (workspace->out_buf.pos >= max_out) { 472 tot_out += workspace->out_buf.pos; 473 ret = -E2BIG; 474 goto out; 475 } 476 477 /* Check if we need more output space */ 478 if (workspace->out_buf.pos == workspace->out_buf.size) { 479 tot_out += PAGE_SIZE; 480 max_out -= PAGE_SIZE; 481 if (nr_folios == nr_dest_folios) { 482 ret = -E2BIG; 483 goto out; 484 } 485 out_folio = btrfs_alloc_compr_folio(); 486 if (out_folio == NULL) { 487 ret = -ENOMEM; 488 goto out; 489 } 490 folios[nr_folios++] = out_folio; 491 workspace->out_buf.dst = folio_address(out_folio); 492 workspace->out_buf.pos = 0; 493 workspace->out_buf.size = min_t(size_t, max_out, 494 PAGE_SIZE); 495 } 496 497 /* We've reached the end of the input */ 498 if (workspace->in_buf.pos >= len) { 499 tot_in += workspace->in_buf.pos; 500 break; 501 } 502 503 /* Check if we need more input */ 504 if (workspace->in_buf.pos == workspace->in_buf.size) { 505 tot_in += workspace->in_buf.size; 506 kunmap_local(workspace->in_buf.src); 507 workspace->in_buf.src = NULL; 508 folio_put(in_folio); 509 start += cur_len; 510 len -= cur_len; 511 ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio); 512 if (ret < 0) 513 goto out; 514 cur_len = btrfs_calc_input_length(in_folio, orig_end, start); 515 workspace->in_buf.src = kmap_local_folio(in_folio, 516 offset_in_folio(in_folio, start)); 517 workspace->in_buf.pos = 0; 518 workspace->in_buf.size = cur_len; 519 } 520 } 521 while (1) { 522 size_t ret2; 523 524 ret2 = zstd_end_stream(stream, &workspace->out_buf); 525 if (unlikely(zstd_is_error(ret2))) { 526 struct btrfs_inode *inode = BTRFS_I(mapping->host); 527 528 btrfs_err(inode->root->fs_info, 529 "zstd compression end level %d failed, error %d root %llu inode %llu offset %llu", 530 workspace->req_level, zstd_get_error_code(ret2), 531 btrfs_root_id(inode->root), btrfs_ino(inode), 532 start); 533 ret = -EIO; 534 goto out; 535 } 536 if (ret2 == 0) { 537 tot_out += workspace->out_buf.pos; 538 break; 539 } 540 if (workspace->out_buf.pos >= max_out) { 541 tot_out += workspace->out_buf.pos; 542 ret = -E2BIG; 543 goto out; 544 } 545 546 tot_out += PAGE_SIZE; 547 max_out -= PAGE_SIZE; 548 if (nr_folios == nr_dest_folios) { 549 ret = -E2BIG; 550 goto out; 551 } 552 out_folio = btrfs_alloc_compr_folio(); 553 if (out_folio == NULL) { 554 ret = -ENOMEM; 555 goto out; 556 } 557 folios[nr_folios++] = out_folio; 558 workspace->out_buf.dst = folio_address(out_folio); 559 workspace->out_buf.pos = 0; 560 workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE); 561 } 562 563 if (tot_out >= tot_in) { 564 ret = -E2BIG; 565 goto out; 566 } 567 568 ret = 0; 569 *total_in = tot_in; 570 *total_out = tot_out; 571 out: 572 *out_folios = nr_folios; 573 if (workspace->in_buf.src) { 574 kunmap_local(workspace->in_buf.src); 575 folio_put(in_folio); 576 } 577 return ret; 578 } 579 580 int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb) 581 { 582 struct workspace *workspace = list_entry(ws, struct workspace, list); 583 struct folio **folios_in = cb->compressed_folios; 584 size_t srclen = cb->compressed_len; 585 zstd_dstream *stream; 586 int ret = 0; 587 unsigned long folio_in_index = 0; 588 unsigned long total_folios_in = DIV_ROUND_UP(srclen, PAGE_SIZE); 589 unsigned long buf_start; 590 unsigned long total_out = 0; 591 592 stream = zstd_init_dstream( 593 ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size); 594 if (unlikely(!stream)) { 595 struct btrfs_inode *inode = cb->bbio.inode; 596 597 btrfs_err(inode->root->fs_info, 598 "zstd decompression init failed, root %llu inode %llu offset %llu", 599 btrfs_root_id(inode->root), btrfs_ino(inode), cb->start); 600 ret = -EIO; 601 goto done; 602 } 603 604 workspace->in_buf.src = kmap_local_folio(folios_in[folio_in_index], 0); 605 workspace->in_buf.pos = 0; 606 workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE); 607 608 workspace->out_buf.dst = workspace->buf; 609 workspace->out_buf.pos = 0; 610 workspace->out_buf.size = PAGE_SIZE; 611 612 while (1) { 613 size_t ret2; 614 615 ret2 = zstd_decompress_stream(stream, &workspace->out_buf, 616 &workspace->in_buf); 617 if (unlikely(zstd_is_error(ret2))) { 618 struct btrfs_inode *inode = cb->bbio.inode; 619 620 btrfs_err(inode->root->fs_info, 621 "zstd decompression failed, error %d root %llu inode %llu offset %llu", 622 zstd_get_error_code(ret2), btrfs_root_id(inode->root), 623 btrfs_ino(inode), cb->start); 624 ret = -EIO; 625 goto done; 626 } 627 buf_start = total_out; 628 total_out += workspace->out_buf.pos; 629 workspace->out_buf.pos = 0; 630 631 ret = btrfs_decompress_buf2page(workspace->out_buf.dst, 632 total_out - buf_start, cb, buf_start); 633 if (ret == 0) 634 break; 635 636 if (workspace->in_buf.pos >= srclen) 637 break; 638 639 /* Check if we've hit the end of a frame */ 640 if (ret2 == 0) 641 break; 642 643 if (workspace->in_buf.pos == workspace->in_buf.size) { 644 kunmap_local(workspace->in_buf.src); 645 folio_in_index++; 646 if (folio_in_index >= total_folios_in) { 647 workspace->in_buf.src = NULL; 648 ret = -EIO; 649 goto done; 650 } 651 srclen -= PAGE_SIZE; 652 workspace->in_buf.src = 653 kmap_local_folio(folios_in[folio_in_index], 0); 654 workspace->in_buf.pos = 0; 655 workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE); 656 } 657 } 658 ret = 0; 659 done: 660 if (workspace->in_buf.src) 661 kunmap_local(workspace->in_buf.src); 662 return ret; 663 } 664 665 int zstd_decompress(struct list_head *ws, const u8 *data_in, 666 struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen, 667 size_t destlen) 668 { 669 struct workspace *workspace = list_entry(ws, struct workspace, list); 670 struct btrfs_fs_info *fs_info = btrfs_sb(folio_inode(dest_folio)->i_sb); 671 const u32 sectorsize = fs_info->sectorsize; 672 zstd_dstream *stream; 673 int ret = 0; 674 unsigned long to_copy = 0; 675 676 stream = zstd_init_dstream( 677 ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size); 678 if (unlikely(!stream)) { 679 struct btrfs_inode *inode = folio_to_inode(dest_folio); 680 681 btrfs_err(inode->root->fs_info, 682 "zstd decompression init failed, root %llu inode %llu offset %llu", 683 btrfs_root_id(inode->root), btrfs_ino(inode), 684 folio_pos(dest_folio)); 685 ret = -EIO; 686 goto finish; 687 } 688 689 workspace->in_buf.src = data_in; 690 workspace->in_buf.pos = 0; 691 workspace->in_buf.size = srclen; 692 693 workspace->out_buf.dst = workspace->buf; 694 workspace->out_buf.pos = 0; 695 workspace->out_buf.size = sectorsize; 696 697 /* 698 * Since both input and output buffers should not exceed one sector, 699 * one call should end the decompression. 700 */ 701 ret = zstd_decompress_stream(stream, &workspace->out_buf, &workspace->in_buf); 702 if (unlikely(zstd_is_error(ret))) { 703 struct btrfs_inode *inode = folio_to_inode(dest_folio); 704 705 btrfs_err(inode->root->fs_info, 706 "zstd decompression failed, error %d root %llu inode %llu offset %llu", 707 zstd_get_error_code(ret), btrfs_root_id(inode->root), 708 btrfs_ino(inode), folio_pos(dest_folio)); 709 goto finish; 710 } 711 to_copy = workspace->out_buf.pos; 712 memcpy_to_folio(dest_folio, dest_pgoff, workspace->out_buf.dst, to_copy); 713 finish: 714 /* Error or early end. */ 715 if (unlikely(to_copy < destlen)) { 716 ret = -EIO; 717 folio_zero_range(dest_folio, dest_pgoff + to_copy, destlen - to_copy); 718 } 719 return ret; 720 } 721 722 const struct btrfs_compress_op btrfs_zstd_compress = { 723 /* ZSTD uses own workspace manager */ 724 .workspace_manager = NULL, 725 .min_level = ZSTD_BTRFS_MIN_LEVEL, 726 .max_level = ZSTD_BTRFS_MAX_LEVEL, 727 .default_level = ZSTD_BTRFS_DEFAULT_LEVEL, 728 }; 729