Lines Matching full:part
93 static int build_block_map(struct partition *part, int block_no) in build_block_map() argument
95 struct block *block = &part->blocks[block_no]; in build_block_map()
98 block->offset = part->block_size * block_no; in build_block_map()
100 if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) { in build_block_map()
107 for (i=0; i<part->data_sectors_per_block; i++) { in build_block_map()
110 entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]); in build_block_map()
123 if (entry >= part->sector_count) { in build_block_map()
127 part->mbd.mtd->name, block_no, i, entry); in build_block_map()
131 if (part->sector_map[entry] != -1) { in build_block_map()
134 part->mbd.mtd->name, entry); in build_block_map()
135 part->errors = 1; in build_block_map()
139 part->sector_map[entry] = block->offset + in build_block_map()
140 (i + part->header_sectors_per_block) * SECTOR_SIZE; in build_block_map()
145 if (block->free_sectors == part->data_sectors_per_block) in build_block_map()
146 part->reserved_block = block_no; in build_block_map()
151 static int scan_header(struct partition *part) in scan_header() argument
158 sectors_per_block = part->block_size / SECTOR_SIZE; in scan_header()
159 part->total_blocks = (u32)part->mbd.mtd->size / part->block_size; in scan_header()
161 if (part->total_blocks < 2) in scan_header()
165 part->header_sectors_per_block = in scan_header()
169 part->data_sectors_per_block = sectors_per_block - in scan_header()
170 part->header_sectors_per_block; in scan_header()
172 part->header_size = (HEADER_MAP_OFFSET + in scan_header()
173 part->data_sectors_per_block) * sizeof(u16); in scan_header()
175 part->cylinders = (part->data_sectors_per_block * in scan_header()
176 (part->total_blocks - 1) - 1) / SECTORS_PER_TRACK; in scan_header()
178 part->sector_count = part->cylinders * SECTORS_PER_TRACK; in scan_header()
180 part->current_block = -1; in scan_header()
181 part->reserved_block = -1; in scan_header()
182 part->is_reclaiming = 0; in scan_header()
184 part->header_cache = kmalloc(part->header_size, GFP_KERNEL); in scan_header()
185 if (!part->header_cache) in scan_header()
188 part->blocks = kcalloc(part->total_blocks, sizeof(struct block), in scan_header()
190 if (!part->blocks) in scan_header()
193 part->sector_map = vmalloc(array_size(sizeof(u_long), in scan_header()
194 part->sector_count)); in scan_header()
195 if (!part->sector_map) { in scan_header()
197 "sector map", part->mbd.mtd->name); in scan_header()
201 for (i=0; i<part->sector_count; i++) in scan_header()
202 part->sector_map[i] = -1; in scan_header()
204 for (i=0, blocks_found=0; i<part->total_blocks; i++) { in scan_header()
205 rc = mtd_read(part->mbd.mtd, i * part->block_size, in scan_header()
206 part->header_size, &retlen, in scan_header()
207 (u_char *)part->header_cache); in scan_header()
209 if (!rc && retlen != part->header_size) in scan_header()
215 if (!build_block_map(part, i)) in scan_header()
221 part->mbd.mtd->name); in scan_header()
226 if (part->reserved_block == -1) { in scan_header()
228 part->mbd.mtd->name); in scan_header()
230 part->errors = 1; in scan_header()
236 vfree(part->sector_map); in scan_header()
237 kfree(part->header_cache); in scan_header()
238 kfree(part->blocks); in scan_header()
245 struct partition *part = (struct partition*)dev; in rfd_ftl_readsect() local
250 if (sector >= part->sector_count) in rfd_ftl_readsect()
253 addr = part->sector_map[sector]; in rfd_ftl_readsect()
255 rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, in rfd_ftl_readsect()
262 "0x%lx\n", part->mbd.mtd->name, addr); in rfd_ftl_readsect()
271 static int erase_block(struct partition *part, int block) in erase_block() argument
280 erase->addr = part->blocks[block].offset; in erase_block()
281 erase->len = part->block_size; in erase_block()
283 part->blocks[block].state = BLOCK_ERASING; in erase_block()
284 part->blocks[block].free_sectors = 0; in erase_block()
286 rc = mtd_erase(part->mbd.mtd, erase); in erase_block()
290 (unsigned long long)erase->len, part->mbd.mtd->name); in erase_block()
291 part->blocks[block].state = BLOCK_FAILED; in erase_block()
292 part->blocks[block].free_sectors = 0; in erase_block()
293 part->blocks[block].used_sectors = 0; in erase_block()
298 part->blocks[block].state = BLOCK_ERASED; in erase_block()
299 part->blocks[block].free_sectors = part->data_sectors_per_block; in erase_block()
300 part->blocks[block].used_sectors = 0; in erase_block()
301 part->blocks[block].erases++; in erase_block()
303 rc = mtd_write(part->mbd.mtd, part->blocks[block].offset, in erase_block()
310 part->mbd.mtd->name, part->blocks[block].offset); in erase_block()
311 part->blocks[block].state = BLOCK_FAILED; in erase_block()
313 part->blocks[block].state = BLOCK_OK; in erase_block()
322 static int move_block_contents(struct partition *part, int block_no, u_long *old_sector) in move_block_contents() argument
329 part->is_reclaiming = 1; in move_block_contents()
335 map = kmalloc(part->header_size, GFP_KERNEL); in move_block_contents()
339 rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset, in move_block_contents()
340 part->header_size, &retlen, (u_char *)map); in move_block_contents()
342 if (!rc && retlen != part->header_size) in move_block_contents()
347 "0x%lx\n", part->mbd.mtd->name, in move_block_contents()
348 part->blocks[block_no].offset); in move_block_contents()
353 for (i=0; i<part->data_sectors_per_block; i++) { in move_block_contents()
365 if (entry >= part->sector_count) in move_block_contents()
368 addr = part->blocks[block_no].offset + in move_block_contents()
369 (i + part->header_sectors_per_block) * SECTOR_SIZE; in move_block_contents()
373 if (!part->blocks[block_no].used_sectors--) { in move_block_contents()
374 rc = erase_block(part, block_no); in move_block_contents()
379 rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, in move_block_contents()
388 part->mbd.mtd->name); in move_block_contents()
393 rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part, in move_block_contents()
405 part->is_reclaiming = 0; in move_block_contents()
410 static int reclaim_block(struct partition *part, u_long *old_sector) in reclaim_block() argument
416 mtd_sync(part->mbd.mtd); in reclaim_block()
421 old_sector_block = *old_sector / part->block_size; in reclaim_block()
425 for (block=0; block<part->total_blocks; block++) { in reclaim_block()
428 if (block == part->reserved_block) in reclaim_block()
436 if (part->blocks[block].free_sectors) in reclaim_block()
439 this_score = part->blocks[block].used_sectors; in reclaim_block()
445 if (part->blocks[block].used_sectors == in reclaim_block()
446 part->data_sectors_per_block) in reclaim_block()
450 this_score += part->blocks[block].erases; in reclaim_block()
461 part->current_block = -1; in reclaim_block()
462 part->reserved_block = best_block; in reclaim_block()
466 part->blocks[best_block].used_sectors, in reclaim_block()
467 part->blocks[best_block].free_sectors); in reclaim_block()
469 if (part->blocks[best_block].used_sectors) in reclaim_block()
470 rc = move_block_contents(part, best_block, old_sector); in reclaim_block()
472 rc = erase_block(part, best_block); in reclaim_block()
482 static int find_free_block(struct partition *part) in find_free_block() argument
486 block = part->current_block == -1 ? in find_free_block()
487 jiffies % part->total_blocks : part->current_block; in find_free_block()
491 if (part->blocks[block].free_sectors && in find_free_block()
492 block != part->reserved_block) in find_free_block()
495 if (part->blocks[block].state == BLOCK_UNUSED) in find_free_block()
496 erase_block(part, block); in find_free_block()
498 if (++block >= part->total_blocks) in find_free_block()
506 static int find_writable_block(struct partition *part, u_long *old_sector) in find_writable_block() argument
511 block = find_free_block(part); in find_writable_block()
514 if (!part->is_reclaiming) { in find_writable_block()
515 rc = reclaim_block(part, old_sector); in find_writable_block()
519 block = find_free_block(part); in find_writable_block()
528 rc = mtd_read(part->mbd.mtd, part->blocks[block].offset, in find_writable_block()
529 part->header_size, &retlen, in find_writable_block()
530 (u_char *)part->header_cache); in find_writable_block()
532 if (!rc && retlen != part->header_size) in find_writable_block()
537 "0x%lx\n", part->mbd.mtd->name, in find_writable_block()
538 part->blocks[block].offset); in find_writable_block()
542 part->current_block = block; in find_writable_block()
548 static int mark_sector_deleted(struct partition *part, u_long old_addr) in mark_sector_deleted() argument
555 block = old_addr / part->block_size; in mark_sector_deleted()
556 offset = (old_addr % part->block_size) / SECTOR_SIZE - in mark_sector_deleted()
557 part->header_sectors_per_block; in mark_sector_deleted()
559 addr = part->blocks[block].offset + in mark_sector_deleted()
561 rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen, in mark_sector_deleted()
569 "0x%lx\n", part->mbd.mtd->name, addr); in mark_sector_deleted()
572 if (block == part->current_block) in mark_sector_deleted()
573 part->header_cache[offset + HEADER_MAP_OFFSET] = del; in mark_sector_deleted()
575 part->blocks[block].used_sectors--; in mark_sector_deleted()
577 if (!part->blocks[block].used_sectors && in mark_sector_deleted()
578 !part->blocks[block].free_sectors) in mark_sector_deleted()
579 rc = erase_block(part, block); in mark_sector_deleted()
585 static int find_free_sector(const struct partition *part, const struct block *block) in find_free_sector() argument
589 i = stop = part->data_sectors_per_block - block->free_sectors; in find_free_sector()
592 if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]) in find_free_sector()
596 if (++i == part->data_sectors_per_block) in find_free_sector()
606 struct partition *part = (struct partition*)dev; in do_writesect() local
614 if (part->current_block == -1 || in do_writesect()
615 !part->blocks[part->current_block].free_sectors) { in do_writesect()
617 rc = find_writable_block(part, old_addr); in do_writesect()
622 block = &part->blocks[part->current_block]; in do_writesect()
624 i = find_free_sector(part, block); in do_writesect()
631 addr = (i + part->header_sectors_per_block) * SECTOR_SIZE + in do_writesect()
633 rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, in do_writesect()
641 part->mbd.mtd->name, addr); in do_writesect()
645 part->sector_map[sector] = addr; in do_writesect()
649 part->header_cache[i + HEADER_MAP_OFFSET] = entry; in do_writesect()
652 rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen, in do_writesect()
660 part->mbd.mtd->name, addr); in do_writesect()
672 struct partition *part = (struct partition*)dev; in rfd_ftl_writesect() local
679 if (part->reserved_block == -1) { in rfd_ftl_writesect()
684 if (sector >= part->sector_count) { in rfd_ftl_writesect()
689 old_addr = part->sector_map[sector]; in rfd_ftl_writesect()
702 part->sector_map[sector] = -1; in rfd_ftl_writesect()
705 rc = mark_sector_deleted(part, old_addr); in rfd_ftl_writesect()
713 struct partition *part = (struct partition*)dev; in rfd_ftl_getgeo() local
717 geo->cylinders = part->cylinders; in rfd_ftl_getgeo()
724 struct partition *part; in rfd_ftl_add_mtd() local
729 part = kzalloc(sizeof(struct partition), GFP_KERNEL); in rfd_ftl_add_mtd()
730 if (!part) in rfd_ftl_add_mtd()
733 part->mbd.mtd = mtd; in rfd_ftl_add_mtd()
736 part->block_size = block_size; in rfd_ftl_add_mtd()
742 part->block_size = mtd->erasesize; in rfd_ftl_add_mtd()
745 if (scan_header(part) == 0) { in rfd_ftl_add_mtd()
746 part->mbd.size = part->sector_count; in rfd_ftl_add_mtd()
747 part->mbd.tr = tr; in rfd_ftl_add_mtd()
748 part->mbd.devnum = -1; in rfd_ftl_add_mtd()
750 part->mbd.readonly = 1; in rfd_ftl_add_mtd()
751 else if (part->errors) { in rfd_ftl_add_mtd()
754 part->mbd.readonly = 1; in rfd_ftl_add_mtd()
760 if (!add_mtd_blktrans_dev((void*)part)) in rfd_ftl_add_mtd()
764 kfree(part); in rfd_ftl_add_mtd()
769 struct partition *part = (struct partition*)dev; in rfd_ftl_remove_dev() local
772 for (i=0; i<part->total_blocks; i++) { in rfd_ftl_remove_dev()
774 part->mbd.mtd->name, i, part->blocks[i].erases); in rfd_ftl_remove_dev()
778 vfree(part->sector_map); in rfd_ftl_remove_dev()
779 kfree(part->header_cache); in rfd_ftl_remove_dev()
780 kfree(part->blocks); in rfd_ftl_remove_dev()