Lines Matching +full:cache +full:- +full:block +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Direct MTD block device access
5 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
6 * Copyright © 2000-2003 Nicolas Pitre <nico@fluxnic.net>
35 * Cache stuff...
38 * buffer cache can handle, we must implement read-modify-write on flash
39 * sectors for each block write requests. To avoid over-erasing flash sectors
40 * and to speed things up, we locally cache a whole flash sector while it is
52 * First, let's erase the flash block. in erase_write()
61 pos, len, mtd->name); in erase_write()
73 return -EIO; in erase_write()
80 struct mtd_info *mtd = mtdblk->mbd.mtd; in write_cached_data()
83 if (mtdblk->cache_state != STATE_DIRTY) in write_cached_data()
87 "at 0x%lx, size 0x%x\n", mtd->name, in write_cached_data()
88 mtdblk->cache_offset, mtdblk->cache_size); in write_cached_data()
90 ret = erase_write (mtd, mtdblk->cache_offset, in write_cached_data()
91 mtdblk->cache_size, mtdblk->cache_data); in write_cached_data()
94 * Here we could arguably set the cache state to STATE_CLEAN. in write_cached_data()
98 * the buffer cache instead. in write_cached_data()
100 * If this cache_offset points to a bad block, data cannot be in write_cached_data()
104 if (ret == 0 || ret == -EIO) in write_cached_data()
105 mtdblk->cache_state = STATE_EMPTY; in write_cached_data()
113 struct mtd_info *mtd = mtdblk->mbd.mtd; in do_cached_write()
114 unsigned int sect_size = mtdblk->cache_size; in do_cached_write()
118 pr_debug("mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n", in do_cached_write()
119 mtd->name, pos, len); in do_cached_write()
126 unsigned int offset = pos - sect_start; in do_cached_write()
127 unsigned int size = sect_size - offset; in do_cached_write() local
128 if( size > len ) in do_cached_write()
129 size = len; in do_cached_write()
131 if (size == sect_size) { in do_cached_write()
134 * need to bother with the cache while it may still be in do_cached_write()
137 ret = erase_write (mtd, pos, size, buf); in do_cached_write()
141 /* Partial sector: need to use the cache */ in do_cached_write()
143 if (mtdblk->cache_state == STATE_DIRTY && in do_cached_write()
144 mtdblk->cache_offset != sect_start) { in do_cached_write()
150 if (mtdblk->cache_state == STATE_EMPTY || in do_cached_write()
151 mtdblk->cache_offset != sect_start) { in do_cached_write()
152 /* fill the cache with the current sector */ in do_cached_write()
153 mtdblk->cache_state = STATE_EMPTY; in do_cached_write()
155 &retlen, mtdblk->cache_data); in do_cached_write()
159 return -EIO; in do_cached_write()
161 mtdblk->cache_offset = sect_start; in do_cached_write()
162 mtdblk->cache_size = sect_size; in do_cached_write()
163 mtdblk->cache_state = STATE_CLEAN; in do_cached_write()
166 /* write data to our local cache */ in do_cached_write()
167 memcpy (mtdblk->cache_data + offset, buf, size); in do_cached_write()
168 mtdblk->cache_state = STATE_DIRTY; in do_cached_write()
171 buf += size; in do_cached_write()
172 pos += size; in do_cached_write()
173 len -= size; in do_cached_write()
183 struct mtd_info *mtd = mtdblk->mbd.mtd; in do_cached_read()
184 unsigned int sect_size = mtdblk->cache_size; in do_cached_read()
188 pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n", in do_cached_read()
189 mtd->name, pos, len); in do_cached_read()
196 unsigned int offset = pos - sect_start; in do_cached_read()
197 unsigned int size = sect_size - offset; in do_cached_read() local
198 if (size > len) in do_cached_read()
199 size = len; in do_cached_read()
203 * Read the requested amount of data from our internal cache if it in do_cached_read()
207 if (mtdblk->cache_state != STATE_EMPTY && in do_cached_read()
208 mtdblk->cache_offset == sect_start) { in do_cached_read()
209 memcpy (buf, mtdblk->cache_data + offset, size); in do_cached_read()
211 ret = mtd_read(mtd, pos, size, &retlen, buf); in do_cached_read()
214 if (retlen != size) in do_cached_read()
215 return -EIO; in do_cached_read()
218 buf += size; in do_cached_read()
219 pos += size; in do_cached_read()
220 len -= size; in do_cached_read()
227 unsigned long block, char *buf) in mtdblock_readsect() argument
230 return do_cached_read(mtdblk, block<<9, 512, buf); in mtdblock_readsect()
234 unsigned long block, char *buf) in mtdblock_writesect() argument
237 if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) { in mtdblock_writesect()
238 mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize); in mtdblock_writesect()
239 if (!mtdblk->cache_data) in mtdblock_writesect()
240 return -EINTR; in mtdblock_writesect()
241 /* -EINTR is not really correct, but it is the best match in mtdblock_writesect()
243 * return -EAGAIN sometimes, but why bother? in mtdblock_writesect()
246 return do_cached_write(mtdblk, block<<9, 512, buf); in mtdblock_writesect()
255 if (mtdblk->count) { in mtdblock_open()
256 mtdblk->count++; in mtdblock_open()
260 /* OK, it's not open. Create cache info for it */ in mtdblock_open()
261 mtdblk->count = 1; in mtdblock_open()
262 mutex_init(&mtdblk->cache_mutex); in mtdblock_open()
263 mtdblk->cache_state = STATE_EMPTY; in mtdblock_open()
264 if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) { in mtdblock_open()
265 mtdblk->cache_size = mbd->mtd->erasesize; in mtdblock_open()
266 mtdblk->cache_data = NULL; in mtdblock_open()
280 mutex_lock(&mtdblk->cache_mutex); in mtdblock_release()
282 mutex_unlock(&mtdblk->cache_mutex); in mtdblock_release()
284 if (!--mtdblk->count) { in mtdblock_release()
286 * It was the last usage. Free the cache, but only sync if in mtdblock_release()
289 if (mbd->file_mode & FMODE_WRITE) in mtdblock_release()
290 mtd_sync(mbd->mtd); in mtdblock_release()
291 vfree(mtdblk->cache_data); in mtdblock_release()
302 mutex_lock(&mtdblk->cache_mutex); in mtdblock_flush()
304 mutex_unlock(&mtdblk->cache_mutex); in mtdblock_flush()
305 mtd_sync(dev->mtd); in mtdblock_flush()
316 dev->mbd.mtd = mtd; in mtdblock_add_mtd()
317 dev->mbd.devnum = mtd->index; in mtdblock_add_mtd()
319 dev->mbd.size = mtd->size >> 9; in mtdblock_add_mtd()
320 dev->mbd.tr = tr; in mtdblock_add_mtd()
322 if (!(mtd->flags & MTD_WRITEABLE)) in mtdblock_add_mtd()
323 dev->mbd.readonly = 1; in mtdblock_add_mtd()
325 if (add_mtd_blktrans_dev(&dev->mbd)) in mtdblock_add_mtd()
365 MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to MTD devices");