xref: /linux/drivers/mtd/ubi/fastmap-wl.c (revision 62b31a045757eac81fed94b19df47418a0818528)
150acfb2bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
278d6d497SRichard Weinberger /*
378d6d497SRichard Weinberger  * Copyright (c) 2012 Linutronix GmbH
478d6d497SRichard Weinberger  * Copyright (c) 2014 sigma star gmbh
578d6d497SRichard Weinberger  * Author: Richard Weinberger <richard@nod.at>
678d6d497SRichard Weinberger  */
778d6d497SRichard Weinberger 
878d6d497SRichard Weinberger /**
978d6d497SRichard Weinberger  * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
1078d6d497SRichard Weinberger  * @wrk: the work description object
1178d6d497SRichard Weinberger  */
1278d6d497SRichard Weinberger static void update_fastmap_work_fn(struct work_struct *wrk)
1378d6d497SRichard Weinberger {
1478d6d497SRichard Weinberger 	struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
151841fcfdSRichard Weinberger 
1678d6d497SRichard Weinberger 	ubi_update_fastmap(ubi);
1778d6d497SRichard Weinberger 	spin_lock(&ubi->wl_lock);
1878d6d497SRichard Weinberger 	ubi->fm_work_scheduled = 0;
1978d6d497SRichard Weinberger 	spin_unlock(&ubi->wl_lock);
2078d6d497SRichard Weinberger }
2178d6d497SRichard Weinberger 
2278d6d497SRichard Weinberger /**
2378d6d497SRichard Weinberger  * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
2478d6d497SRichard Weinberger  * @root: the RB-tree where to look for
2578d6d497SRichard Weinberger  */
2678d6d497SRichard Weinberger static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
2778d6d497SRichard Weinberger {
2878d6d497SRichard Weinberger 	struct rb_node *p;
2978d6d497SRichard Weinberger 	struct ubi_wl_entry *e, *victim = NULL;
3078d6d497SRichard Weinberger 	int max_ec = UBI_MAX_ERASECOUNTER;
3178d6d497SRichard Weinberger 
3278d6d497SRichard Weinberger 	ubi_rb_for_each_entry(p, e, root, u.rb) {
3378d6d497SRichard Weinberger 		if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
3478d6d497SRichard Weinberger 			victim = e;
3578d6d497SRichard Weinberger 			max_ec = e->ec;
3678d6d497SRichard Weinberger 		}
3778d6d497SRichard Weinberger 	}
3878d6d497SRichard Weinberger 
3978d6d497SRichard Weinberger 	return victim;
4078d6d497SRichard Weinberger }
4178d6d497SRichard Weinberger 
42c16f39d1SHou Tao static inline void return_unused_peb(struct ubi_device *ubi,
43c16f39d1SHou Tao 				     struct ubi_wl_entry *e)
44c16f39d1SHou Tao {
45c16f39d1SHou Tao 	wl_tree_add(e, &ubi->free);
46c16f39d1SHou Tao 	ubi->free_count++;
47c16f39d1SHou Tao }
48c16f39d1SHou Tao 
4978d6d497SRichard Weinberger /**
5078d6d497SRichard Weinberger  * return_unused_pool_pebs - returns unused PEB to the free tree.
5178d6d497SRichard Weinberger  * @ubi: UBI device description object
5278d6d497SRichard Weinberger  * @pool: fastmap pool description object
5378d6d497SRichard Weinberger  */
5478d6d497SRichard Weinberger static void return_unused_pool_pebs(struct ubi_device *ubi,
5578d6d497SRichard Weinberger 				    struct ubi_fm_pool *pool)
5678d6d497SRichard Weinberger {
5778d6d497SRichard Weinberger 	int i;
5878d6d497SRichard Weinberger 	struct ubi_wl_entry *e;
5978d6d497SRichard Weinberger 
6078d6d497SRichard Weinberger 	for (i = pool->used; i < pool->size; i++) {
6178d6d497SRichard Weinberger 		e = ubi->lookuptbl[pool->pebs[i]];
62c16f39d1SHou Tao 		return_unused_peb(ubi, e);
6378d6d497SRichard Weinberger 	}
6478d6d497SRichard Weinberger }
6578d6d497SRichard Weinberger 
6678d6d497SRichard Weinberger /**
6778d6d497SRichard Weinberger  * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
6878d6d497SRichard Weinberger  * @ubi: UBI device description object
6978d6d497SRichard Weinberger  * @anchor: This PEB will be used as anchor PEB by fastmap
7078d6d497SRichard Weinberger  *
7178d6d497SRichard Weinberger  * The function returns a physical erase block with a given maximal number
7278d6d497SRichard Weinberger  * and removes it from the wl subsystem.
7378d6d497SRichard Weinberger  * Must be called with wl_lock held!
7478d6d497SRichard Weinberger  */
7578d6d497SRichard Weinberger struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
7678d6d497SRichard Weinberger {
7778d6d497SRichard Weinberger 	struct ubi_wl_entry *e = NULL;
7878d6d497SRichard Weinberger 
7978d6d497SRichard Weinberger 	if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
8078d6d497SRichard Weinberger 		goto out;
8178d6d497SRichard Weinberger 
8278d6d497SRichard Weinberger 	if (anchor)
8378d6d497SRichard Weinberger 		e = find_anchor_wl_entry(&ubi->free);
8478d6d497SRichard Weinberger 	else
8578d6d497SRichard Weinberger 		e = find_mean_wl_entry(ubi, &ubi->free);
8678d6d497SRichard Weinberger 
8778d6d497SRichard Weinberger 	if (!e)
8878d6d497SRichard Weinberger 		goto out;
8978d6d497SRichard Weinberger 
9078d6d497SRichard Weinberger 	self_check_in_wl_tree(ubi, e, &ubi->free);
9178d6d497SRichard Weinberger 
9278d6d497SRichard Weinberger 	/* remove it from the free list,
9378d6d497SRichard Weinberger 	 * the wl subsystem does no longer know this erase block */
9478d6d497SRichard Weinberger 	rb_erase(&e->u.rb, &ubi->free);
9578d6d497SRichard Weinberger 	ubi->free_count--;
9678d6d497SRichard Weinberger out:
9778d6d497SRichard Weinberger 	return e;
9878d6d497SRichard Weinberger }
9978d6d497SRichard Weinberger 
10078d6d497SRichard Weinberger /**
10178d6d497SRichard Weinberger  * ubi_refill_pools - refills all fastmap PEB pools.
10278d6d497SRichard Weinberger  * @ubi: UBI device description object
10378d6d497SRichard Weinberger  */
10478d6d497SRichard Weinberger void ubi_refill_pools(struct ubi_device *ubi)
10578d6d497SRichard Weinberger {
10678d6d497SRichard Weinberger 	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
10778d6d497SRichard Weinberger 	struct ubi_fm_pool *pool = &ubi->fm_pool;
10878d6d497SRichard Weinberger 	struct ubi_wl_entry *e;
10978d6d497SRichard Weinberger 	int enough;
11078d6d497SRichard Weinberger 
11178d6d497SRichard Weinberger 	spin_lock(&ubi->wl_lock);
11278d6d497SRichard Weinberger 
11378d6d497SRichard Weinberger 	return_unused_pool_pebs(ubi, wl_pool);
11478d6d497SRichard Weinberger 	return_unused_pool_pebs(ubi, pool);
11578d6d497SRichard Weinberger 
11678d6d497SRichard Weinberger 	wl_pool->size = 0;
11778d6d497SRichard Weinberger 	pool->size = 0;
11878d6d497SRichard Weinberger 
1194b68bf9aSArne Edholm 	if (ubi->fm_anchor) {
1204b68bf9aSArne Edholm 		wl_tree_add(ubi->fm_anchor, &ubi->free);
1214b68bf9aSArne Edholm 		ubi->free_count++;
1224b68bf9aSArne Edholm 	}
1234b68bf9aSArne Edholm 	if (ubi->fm_next_anchor) {
1244b68bf9aSArne Edholm 		wl_tree_add(ubi->fm_next_anchor, &ubi->free);
1254b68bf9aSArne Edholm 		ubi->free_count++;
1264b68bf9aSArne Edholm 	}
1274b68bf9aSArne Edholm 
1284b68bf9aSArne Edholm 	/* All available PEBs are in ubi->free, now is the time to get
1294b68bf9aSArne Edholm 	 * the best anchor PEBs.
1304b68bf9aSArne Edholm 	 */
1314b68bf9aSArne Edholm 	ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
1324b68bf9aSArne Edholm 	ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
1334b68bf9aSArne Edholm 
13478d6d497SRichard Weinberger 	for (;;) {
13578d6d497SRichard Weinberger 		enough = 0;
13678d6d497SRichard Weinberger 		if (pool->size < pool->max_size) {
13778d6d497SRichard Weinberger 			if (!ubi->free.rb_node)
13878d6d497SRichard Weinberger 				break;
13978d6d497SRichard Weinberger 
14078d6d497SRichard Weinberger 			e = wl_get_wle(ubi);
14178d6d497SRichard Weinberger 			if (!e)
14278d6d497SRichard Weinberger 				break;
14378d6d497SRichard Weinberger 
14478d6d497SRichard Weinberger 			pool->pebs[pool->size] = e->pnum;
14578d6d497SRichard Weinberger 			pool->size++;
14678d6d497SRichard Weinberger 		} else
14778d6d497SRichard Weinberger 			enough++;
14878d6d497SRichard Weinberger 
14978d6d497SRichard Weinberger 		if (wl_pool->size < wl_pool->max_size) {
15078d6d497SRichard Weinberger 			if (!ubi->free.rb_node ||
15178d6d497SRichard Weinberger 			   (ubi->free_count - ubi->beb_rsvd_pebs < 5))
15278d6d497SRichard Weinberger 				break;
15378d6d497SRichard Weinberger 
15478d6d497SRichard Weinberger 			e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
15578d6d497SRichard Weinberger 			self_check_in_wl_tree(ubi, e, &ubi->free);
15678d6d497SRichard Weinberger 			rb_erase(&e->u.rb, &ubi->free);
15778d6d497SRichard Weinberger 			ubi->free_count--;
15878d6d497SRichard Weinberger 
15978d6d497SRichard Weinberger 			wl_pool->pebs[wl_pool->size] = e->pnum;
16078d6d497SRichard Weinberger 			wl_pool->size++;
16178d6d497SRichard Weinberger 		} else
16278d6d497SRichard Weinberger 			enough++;
16378d6d497SRichard Weinberger 
16478d6d497SRichard Weinberger 		if (enough == 2)
16578d6d497SRichard Weinberger 			break;
16678d6d497SRichard Weinberger 	}
16778d6d497SRichard Weinberger 
16878d6d497SRichard Weinberger 	wl_pool->used = 0;
16978d6d497SRichard Weinberger 	pool->used = 0;
17078d6d497SRichard Weinberger 
17178d6d497SRichard Weinberger 	spin_unlock(&ubi->wl_lock);
17278d6d497SRichard Weinberger }
17378d6d497SRichard Weinberger 
17478d6d497SRichard Weinberger /**
1751cb8f977SRichard Weinberger  * produce_free_peb - produce a free physical eraseblock.
1761cb8f977SRichard Weinberger  * @ubi: UBI device description object
1771cb8f977SRichard Weinberger  *
1781cb8f977SRichard Weinberger  * This function tries to make a free PEB by means of synchronous execution of
1791cb8f977SRichard Weinberger  * pending works. This may be needed if, for example the background thread is
1801cb8f977SRichard Weinberger  * disabled. Returns zero in case of success and a negative error code in case
1811cb8f977SRichard Weinberger  * of failure.
1821cb8f977SRichard Weinberger  */
1831cb8f977SRichard Weinberger static int produce_free_peb(struct ubi_device *ubi)
1841cb8f977SRichard Weinberger {
1851cb8f977SRichard Weinberger 	int err;
1861cb8f977SRichard Weinberger 
1871cb8f977SRichard Weinberger 	while (!ubi->free.rb_node && ubi->works_count) {
1881cb8f977SRichard Weinberger 		dbg_wl("do one work synchronously");
1891cb8f977SRichard Weinberger 		err = do_work(ubi);
1901cb8f977SRichard Weinberger 
1911cb8f977SRichard Weinberger 		if (err)
1921cb8f977SRichard Weinberger 			return err;
1931cb8f977SRichard Weinberger 	}
1941cb8f977SRichard Weinberger 
1951cb8f977SRichard Weinberger 	return 0;
1961cb8f977SRichard Weinberger }
1971cb8f977SRichard Weinberger 
1981cb8f977SRichard Weinberger /**
19978d6d497SRichard Weinberger  * ubi_wl_get_peb - get a physical eraseblock.
20078d6d497SRichard Weinberger  * @ubi: UBI device description object
20178d6d497SRichard Weinberger  *
20278d6d497SRichard Weinberger  * This function returns a physical eraseblock in case of success and a
20378d6d497SRichard Weinberger  * negative error code in case of failure.
20478d6d497SRichard Weinberger  * Returns with ubi->fm_eba_sem held in read mode!
20578d6d497SRichard Weinberger  */
20678d6d497SRichard Weinberger int ubi_wl_get_peb(struct ubi_device *ubi)
20778d6d497SRichard Weinberger {
2088615b94fSZhihao Cheng 	int ret, attempts = 0;
20978d6d497SRichard Weinberger 	struct ubi_fm_pool *pool = &ubi->fm_pool;
21078d6d497SRichard Weinberger 	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
21178d6d497SRichard Weinberger 
21278d6d497SRichard Weinberger again:
21378d6d497SRichard Weinberger 	down_read(&ubi->fm_eba_sem);
21478d6d497SRichard Weinberger 	spin_lock(&ubi->wl_lock);
21578d6d497SRichard Weinberger 
21678d6d497SRichard Weinberger 	/* We check here also for the WL pool because at this point we can
21778d6d497SRichard Weinberger 	 * refill the WL pool synchronous. */
21878d6d497SRichard Weinberger 	if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
21978d6d497SRichard Weinberger 		spin_unlock(&ubi->wl_lock);
22078d6d497SRichard Weinberger 		up_read(&ubi->fm_eba_sem);
22178d6d497SRichard Weinberger 		ret = ubi_update_fastmap(ubi);
22278d6d497SRichard Weinberger 		if (ret) {
22378d6d497SRichard Weinberger 			ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
22478d6d497SRichard Weinberger 			down_read(&ubi->fm_eba_sem);
22578d6d497SRichard Weinberger 			return -ENOSPC;
22678d6d497SRichard Weinberger 		}
22778d6d497SRichard Weinberger 		down_read(&ubi->fm_eba_sem);
22878d6d497SRichard Weinberger 		spin_lock(&ubi->wl_lock);
22978d6d497SRichard Weinberger 	}
23078d6d497SRichard Weinberger 
23178d6d497SRichard Weinberger 	if (pool->used == pool->size) {
23278d6d497SRichard Weinberger 		spin_unlock(&ubi->wl_lock);
2338615b94fSZhihao Cheng 		attempts++;
2348615b94fSZhihao Cheng 		if (attempts == 10) {
23578d6d497SRichard Weinberger 			ubi_err(ubi, "Unable to get a free PEB from user WL pool");
23678d6d497SRichard Weinberger 			ret = -ENOSPC;
23778d6d497SRichard Weinberger 			goto out;
23878d6d497SRichard Weinberger 		}
23978d6d497SRichard Weinberger 		up_read(&ubi->fm_eba_sem);
2401cb8f977SRichard Weinberger 		ret = produce_free_peb(ubi);
2411cb8f977SRichard Weinberger 		if (ret < 0) {
2421cb8f977SRichard Weinberger 			down_read(&ubi->fm_eba_sem);
2431cb8f977SRichard Weinberger 			goto out;
2441cb8f977SRichard Weinberger 		}
24578d6d497SRichard Weinberger 		goto again;
24678d6d497SRichard Weinberger 	}
24778d6d497SRichard Weinberger 
24878d6d497SRichard Weinberger 	ubi_assert(pool->used < pool->size);
24978d6d497SRichard Weinberger 	ret = pool->pebs[pool->used++];
25078d6d497SRichard Weinberger 	prot_queue_add(ubi, ubi->lookuptbl[ret]);
25178d6d497SRichard Weinberger 	spin_unlock(&ubi->wl_lock);
25278d6d497SRichard Weinberger out:
25378d6d497SRichard Weinberger 	return ret;
25478d6d497SRichard Weinberger }
25578d6d497SRichard Weinberger 
25678d6d497SRichard Weinberger /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
25778d6d497SRichard Weinberger  *
25878d6d497SRichard Weinberger  * @ubi: UBI device description object
25978d6d497SRichard Weinberger  */
26078d6d497SRichard Weinberger static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
26178d6d497SRichard Weinberger {
26278d6d497SRichard Weinberger 	struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
26378d6d497SRichard Weinberger 	int pnum;
26478d6d497SRichard Weinberger 
2652e8f08deSRichard Weinberger 	ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
2662e8f08deSRichard Weinberger 
26778d6d497SRichard Weinberger 	if (pool->used == pool->size) {
26878d6d497SRichard Weinberger 		/* We cannot update the fastmap here because this
26978d6d497SRichard Weinberger 		 * function is called in atomic context.
27078d6d497SRichard Weinberger 		 * Let's fail here and refill/update it as soon as possible. */
27178d6d497SRichard Weinberger 		if (!ubi->fm_work_scheduled) {
27278d6d497SRichard Weinberger 			ubi->fm_work_scheduled = 1;
27378d6d497SRichard Weinberger 			schedule_work(&ubi->fm_work);
27478d6d497SRichard Weinberger 		}
27578d6d497SRichard Weinberger 		return NULL;
276e1bc37ceSRichard Weinberger 	}
277e1bc37ceSRichard Weinberger 
27878d6d497SRichard Weinberger 	pnum = pool->pebs[pool->used++];
27978d6d497SRichard Weinberger 	return ubi->lookuptbl[pnum];
28078d6d497SRichard Weinberger }
28178d6d497SRichard Weinberger 
28278d6d497SRichard Weinberger /**
28378d6d497SRichard Weinberger  * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
28478d6d497SRichard Weinberger  * @ubi: UBI device description object
28578d6d497SRichard Weinberger  */
28678d6d497SRichard Weinberger int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
28778d6d497SRichard Weinberger {
28878d6d497SRichard Weinberger 	struct ubi_work *wrk;
28978d6d497SRichard Weinberger 
29078d6d497SRichard Weinberger 	spin_lock(&ubi->wl_lock);
291f9c34bb5SSascha Hauer 
2924b68bf9aSArne Edholm 	/* Do we have a next anchor? */
2934b68bf9aSArne Edholm 	if (!ubi->fm_next_anchor) {
2944b68bf9aSArne Edholm 		ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
2954b68bf9aSArne Edholm 		if (!ubi->fm_next_anchor)
2964b68bf9aSArne Edholm 			/* Tell wear leveling to produce a new anchor PEB */
297f9c34bb5SSascha Hauer 			ubi->fm_do_produce_anchor = 1;
2984b68bf9aSArne Edholm 	}
2994b68bf9aSArne Edholm 
3004b68bf9aSArne Edholm 	/* Do wear leveling to get a new anchor PEB or check the
3014b68bf9aSArne Edholm 	 * existing next anchor candidate.
3024b68bf9aSArne Edholm 	 */
30378d6d497SRichard Weinberger 	if (ubi->wl_scheduled) {
30478d6d497SRichard Weinberger 		spin_unlock(&ubi->wl_lock);
30578d6d497SRichard Weinberger 		return 0;
30678d6d497SRichard Weinberger 	}
30778d6d497SRichard Weinberger 	ubi->wl_scheduled = 1;
30878d6d497SRichard Weinberger 	spin_unlock(&ubi->wl_lock);
30978d6d497SRichard Weinberger 
31078d6d497SRichard Weinberger 	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
31178d6d497SRichard Weinberger 	if (!wrk) {
31278d6d497SRichard Weinberger 		spin_lock(&ubi->wl_lock);
31378d6d497SRichard Weinberger 		ubi->wl_scheduled = 0;
31478d6d497SRichard Weinberger 		spin_unlock(&ubi->wl_lock);
31578d6d497SRichard Weinberger 		return -ENOMEM;
31678d6d497SRichard Weinberger 	}
31778d6d497SRichard Weinberger 
31878d6d497SRichard Weinberger 	wrk->func = &wear_leveling_worker;
3192e8f08deSRichard Weinberger 	__schedule_ubi_work(ubi, wrk);
32078d6d497SRichard Weinberger 	return 0;
32178d6d497SRichard Weinberger }
32278d6d497SRichard Weinberger 
32378d6d497SRichard Weinberger /**
32478d6d497SRichard Weinberger  * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
32578d6d497SRichard Weinberger  * sub-system.
32678d6d497SRichard Weinberger  * see: ubi_wl_put_peb()
32778d6d497SRichard Weinberger  *
32878d6d497SRichard Weinberger  * @ubi: UBI device description object
32978d6d497SRichard Weinberger  * @fm_e: physical eraseblock to return
33078d6d497SRichard Weinberger  * @lnum: the last used logical eraseblock number for the PEB
33178d6d497SRichard Weinberger  * @torture: if this physical eraseblock has to be tortured
33278d6d497SRichard Weinberger  */
33378d6d497SRichard Weinberger int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
33478d6d497SRichard Weinberger 		      int lnum, int torture)
33578d6d497SRichard Weinberger {
33678d6d497SRichard Weinberger 	struct ubi_wl_entry *e;
33778d6d497SRichard Weinberger 	int vol_id, pnum = fm_e->pnum;
33878d6d497SRichard Weinberger 
33978d6d497SRichard Weinberger 	dbg_wl("PEB %d", pnum);
34078d6d497SRichard Weinberger 
34178d6d497SRichard Weinberger 	ubi_assert(pnum >= 0);
34278d6d497SRichard Weinberger 	ubi_assert(pnum < ubi->peb_count);
34378d6d497SRichard Weinberger 
34478d6d497SRichard Weinberger 	spin_lock(&ubi->wl_lock);
34578d6d497SRichard Weinberger 	e = ubi->lookuptbl[pnum];
34678d6d497SRichard Weinberger 
34778d6d497SRichard Weinberger 	/* This can happen if we recovered from a fastmap the very
34878d6d497SRichard Weinberger 	 * first time and writing now a new one. In this case the wl system
34978d6d497SRichard Weinberger 	 * has never seen any PEB used by the original fastmap.
35078d6d497SRichard Weinberger 	 */
35178d6d497SRichard Weinberger 	if (!e) {
35278d6d497SRichard Weinberger 		e = fm_e;
35378d6d497SRichard Weinberger 		ubi_assert(e->ec >= 0);
35478d6d497SRichard Weinberger 		ubi->lookuptbl[pnum] = e;
35578d6d497SRichard Weinberger 	}
35678d6d497SRichard Weinberger 
35778d6d497SRichard Weinberger 	spin_unlock(&ubi->wl_lock);
35878d6d497SRichard Weinberger 
35978d6d497SRichard Weinberger 	vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
3602e8f08deSRichard Weinberger 	return schedule_erase(ubi, e, vol_id, lnum, torture, true);
36178d6d497SRichard Weinberger }
36278d6d497SRichard Weinberger 
36378d6d497SRichard Weinberger /**
36478d6d497SRichard Weinberger  * ubi_is_erase_work - checks whether a work is erase work.
36578d6d497SRichard Weinberger  * @wrk: The work object to be checked
36678d6d497SRichard Weinberger  */
36778d6d497SRichard Weinberger int ubi_is_erase_work(struct ubi_work *wrk)
36878d6d497SRichard Weinberger {
36978d6d497SRichard Weinberger 	return wrk->func == erase_worker;
37078d6d497SRichard Weinberger }
37178d6d497SRichard Weinberger 
37278d6d497SRichard Weinberger static void ubi_fastmap_close(struct ubi_device *ubi)
37378d6d497SRichard Weinberger {
37478d6d497SRichard Weinberger 	int i;
37578d6d497SRichard Weinberger 
37678d6d497SRichard Weinberger 	return_unused_pool_pebs(ubi, &ubi->fm_pool);
37778d6d497SRichard Weinberger 	return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
37878d6d497SRichard Weinberger 
379c16f39d1SHou Tao 	if (ubi->fm_anchor) {
380c16f39d1SHou Tao 		return_unused_peb(ubi, ubi->fm_anchor);
381c16f39d1SHou Tao 		ubi->fm_anchor = NULL;
382c16f39d1SHou Tao 	}
383c16f39d1SHou Tao 
384*c3fc1a39SZhihao Cheng 	if (ubi->fm_next_anchor) {
385*c3fc1a39SZhihao Cheng 		return_unused_peb(ubi, ubi->fm_next_anchor);
386*c3fc1a39SZhihao Cheng 		ubi->fm_next_anchor = NULL;
387*c3fc1a39SZhihao Cheng 	}
388*c3fc1a39SZhihao Cheng 
38978d6d497SRichard Weinberger 	if (ubi->fm) {
39078d6d497SRichard Weinberger 		for (i = 0; i < ubi->fm->used_blocks; i++)
39178d6d497SRichard Weinberger 			kfree(ubi->fm->e[i]);
39278d6d497SRichard Weinberger 	}
39378d6d497SRichard Weinberger 	kfree(ubi->fm);
39478d6d497SRichard Weinberger }
3952f84c246SRichard Weinberger 
3962f84c246SRichard Weinberger /**
3972f84c246SRichard Weinberger  * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
3982f84c246SRichard Weinberger  * See find_mean_wl_entry()
3992f84c246SRichard Weinberger  *
4002f84c246SRichard Weinberger  * @ubi: UBI device description object
4012f84c246SRichard Weinberger  * @e: physical eraseblock to return
4022f84c246SRichard Weinberger  * @root: RB tree to test against.
4032f84c246SRichard Weinberger  */
4042f84c246SRichard Weinberger static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
4052f84c246SRichard Weinberger 					   struct ubi_wl_entry *e,
4062f84c246SRichard Weinberger 					   struct rb_root *root) {
4072f84c246SRichard Weinberger 	if (e && !ubi->fm_disabled && !ubi->fm &&
4082f84c246SRichard Weinberger 	    e->pnum < UBI_FM_MAX_START)
4092f84c246SRichard Weinberger 		e = rb_entry(rb_next(root->rb_node),
4102f84c246SRichard Weinberger 			     struct ubi_wl_entry, u.rb);
4112f84c246SRichard Weinberger 
4122f84c246SRichard Weinberger 	return e;
4132f84c246SRichard Weinberger }
414