Lines Matching full:restore

87  * struct ttm_pool_tt_restore - State representing restore from backup
94 * page vector from this restore session.
442 * to be able to resume an interrupted restore, and that structure is freed once
448 /* Is restore ongoing for the currently allocated page? */
449 static bool ttm_pool_restore_valid(const struct ttm_pool_tt_restore *restore) in ttm_pool_restore_valid() argument
451 return restore && restore->restored_pages < (1 << restore->order); in ttm_pool_restore_valid()
505 * When restoring, restore backed-up content to the newly allocated page and
508 static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore, in ttm_pool_restore_commit() argument
514 pgoff_t i, nr = 1UL << restore->order; in ttm_pool_restore_commit()
519 for (i = restore->restored_pages; i < nr; ++i) { in ttm_pool_restore_commit()
531 restore->restored_pages++; in ttm_pool_restore_commit()
535 ret = ttm_backup_copy_page(backup, restore->alloced_page + i, in ttm_pool_restore_commit()
549 ttm_pool_split_for_swap(restore->pool, p); in ttm_pool_restore_commit()
550 copy_highpage(restore->alloced_page + i, p); in ttm_pool_restore_commit()
554 restore->restored_pages++; in ttm_pool_restore_commit()
559 if (!restore->restored_pages) { in ttm_pool_restore_commit()
560 dma_addr_t *dma_addr = alloc->dma_addr ? &restore->first_dma : NULL; in ttm_pool_restore_commit()
562 ttm_pool_unmap_and_free(restore->pool, restore->alloced_page, in ttm_pool_restore_commit()
563 dma_addr, restore->page_caching); in ttm_pool_restore_commit()
564 restore->restored_pages = nr; in ttm_pool_restore_commit()
569 ttm_pool_allocated_page_commit(restore->alloced_page, restore->first_dma, in ttm_pool_restore_commit()
571 if (restore->page_caching == alloc->tt_caching || PageHighMem(restore->alloced_page)) in ttm_pool_restore_commit()
573 restore->snapshot_alloc = *alloc; in ttm_pool_restore_commit()
574 restore->alloced_pages += nr; in ttm_pool_restore_commit()
585 struct ttm_pool_tt_restore *restore, in ttm_pool_page_allocated_restore() argument
588 restore->pool = pool; in ttm_pool_page_allocated_restore()
589 restore->order = order; in ttm_pool_page_allocated_restore()
590 restore->restored_pages = 0; in ttm_pool_page_allocated_restore()
591 restore->page_caching = page_caching; in ttm_pool_page_allocated_restore()
592 restore->first_dma = first_dma; in ttm_pool_page_allocated_restore()
593 restore->alloced_page = p; in ttm_pool_page_allocated_restore()
594 restore->snapshot_alloc = *alloc; in ttm_pool_page_allocated_restore()
607 struct ttm_pool_tt_restore *restore) in ttm_pool_page_allocated() argument
627 if (restore) { in ttm_pool_page_allocated()
629 first_dma, restore, alloc); in ttm_pool_page_allocated()
702 struct ttm_pool_tt_restore *restore) in __ttm_pool_alloc() argument
761 restore); in __ttm_pool_alloc()
765 if (ttm_pool_restore_valid(restore)) { in __ttm_pool_alloc()
766 r = ttm_pool_restore_commit(restore, tt->backup, ctx, alloc); in __ttm_pool_alloc()
776 kfree(tt->restore); in __ttm_pool_alloc()
777 tt->restore = NULL; in __ttm_pool_alloc()
785 if (tt->restore) in __ttm_pool_alloc()
843 if (!tt->restore) { in ttm_pool_restore_and_alloc()
850 tt->restore = kzalloc(sizeof(*tt->restore), gfp); in ttm_pool_restore_and_alloc()
851 if (!tt->restore) in ttm_pool_restore_and_alloc()
854 tt->restore->snapshot_alloc = alloc; in ttm_pool_restore_and_alloc()
855 tt->restore->pool = pool; in ttm_pool_restore_and_alloc()
856 tt->restore->restored_pages = 1; in ttm_pool_restore_and_alloc()
858 struct ttm_pool_tt_restore *restore = tt->restore; in ttm_pool_restore_and_alloc() local
861 alloc = restore->snapshot_alloc; in ttm_pool_restore_and_alloc()
862 if (ttm_pool_restore_valid(tt->restore)) { in ttm_pool_restore_and_alloc()
863 ret = ttm_pool_restore_commit(restore, tt->backup, ctx, &alloc); in ttm_pool_restore_and_alloc()
871 return __ttm_pool_alloc(pool, tt, ctx, &alloc, tt->restore); in ttm_pool_restore_and_alloc()
900 struct ttm_pool_tt_restore *restore; in ttm_pool_drop_backed_up() local
905 restore = tt->restore; in ttm_pool_drop_backed_up()
908 * Unmap and free any uncommitted restore page. in ttm_pool_drop_backed_up()
912 if (ttm_pool_restore_valid(restore)) { in ttm_pool_drop_backed_up()
913 dma_addr_t *dma_addr = tt->dma_address ? &restore->first_dma : NULL; in ttm_pool_drop_backed_up()
915 ttm_pool_unmap_and_free(restore->pool, restore->alloced_page, in ttm_pool_drop_backed_up()
916 dma_addr, restore->page_caching); in ttm_pool_drop_backed_up()
917 restore->restored_pages = 1UL << restore->order; in ttm_pool_drop_backed_up()
921 * If a restore is ongoing, part of the tt pages may have a in ttm_pool_drop_backed_up()
924 if (restore) { in ttm_pool_drop_backed_up()
925 pgoff_t mid = restore->snapshot_alloc.caching_divide - tt->pages; in ttm_pool_drop_backed_up()
927 start_page = restore->alloced_pages; in ttm_pool_drop_backed_up()
930 ttm_pool_free_range(restore->pool, tt, tt->caching, in ttm_pool_drop_backed_up()
933 ttm_pool_free_range(restore->pool, tt, ttm_cached, in ttm_pool_drop_backed_up()
934 mid, restore->alloced_pages); in ttm_pool_drop_backed_up()
935 kfree(restore); in ttm_pool_drop_backed_up()
936 tt->restore = NULL; in ttm_pool_drop_backed_up()