Lines Matching +full:two +full:- +full:lane
1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2014-2015, Intel Corporation.
18 #include <linux/backing-dev.h>
29 return &arena->nd_btt->dev; in to_dev()
34 return offset + nd_btt->initial_offset; in adjust_initial_offset()
40 struct nd_btt *nd_btt = arena->nd_btt; in arena_read_bytes()
41 struct nd_namespace_common *ndns = nd_btt->ndns; in arena_read_bytes()
51 struct nd_btt *nd_btt = arena->nd_btt; in arena_write_bytes()
52 struct nd_namespace_common *ndns = nd_btt->ndns; in arena_write_bytes()
68 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512), in btt_info_write()
69 "arena->infooff: %#llx is unaligned\n", arena->infooff); in btt_info_write()
70 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512), in btt_info_write()
71 "arena->info2off: %#llx is unaligned\n", arena->info2off); in btt_info_write()
73 ret = arena_write_bytes(arena, arena->info2off, super, in btt_info_write()
78 return arena_write_bytes(arena, arena->infooff, super, in btt_info_write()
84 return arena_read_bytes(arena, arena->infooff, super, in btt_info_read()
91 * mapping is in little-endian
97 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE); in __btt_map_write()
99 if (unlikely(lba >= arena->external_nlba)) in __btt_map_write()
102 __func__, lba, arena->external_nlba); in __btt_map_write()
143 return -EIO; in btt_map_write()
156 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE); in btt_map_read()
158 if (unlikely(lba >= arena->external_nlba)) in btt_map_read()
161 __func__, lba, arena->external_nlba); in btt_map_read()
195 return -EIO; in btt_map_read()
206 static int btt_log_group_read(struct arena_info *arena, u32 lane, in btt_log_group_read() argument
210 arena->logoff + (lane * LOG_GRP_SIZE), log, in btt_log_group_read()
230 a->debugfs_dir = d; in arena_debugfs_init()
232 debugfs_create_x64("size", S_IRUGO, d, &a->size); in arena_debugfs_init()
234 &a->external_lba_start); in arena_debugfs_init()
235 debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba); in arena_debugfs_init()
237 &a->internal_lbasize); in arena_debugfs_init()
238 debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba); in arena_debugfs_init()
240 &a->external_lbasize); in arena_debugfs_init()
241 debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree); in arena_debugfs_init()
242 debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major); in arena_debugfs_init()
243 debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor); in arena_debugfs_init()
244 debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff); in arena_debugfs_init()
245 debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff); in arena_debugfs_init()
246 debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff); in arena_debugfs_init()
247 debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff); in arena_debugfs_init()
248 debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff); in arena_debugfs_init()
249 debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off); in arena_debugfs_init()
250 debugfs_create_x32("flags", S_IRUGO, d, &a->flags); in arena_debugfs_init()
251 debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]); in arena_debugfs_init()
252 debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]); in arena_debugfs_init()
260 btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev), in btt_debugfs_init()
262 if (IS_ERR_OR_NULL(btt->debugfs_dir)) in btt_debugfs_init()
265 list_for_each_entry(arena, &btt->arena_list, list) { in btt_debugfs_init()
266 arena_debugfs_init(arena, btt->debugfs_dir, i); in btt_debugfs_init()
273 return le32_to_cpu(log->ent[log_idx].seq); in log_seq()
277 * This function accepts two log entries, and uses the
283 * TODO The logic feels a bit kludge-y. make it better..
287 int idx0 = a->log_index[0]; in btt_log_get_old()
288 int idx1 = a->log_index[1]; in btt_log_get_old()
297 log->ent[idx0].seq = cpu_to_le32(1); in btt_log_get_old()
302 return -EINVAL; in btt_log_get_old()
304 return -EINVAL; in btt_log_get_old()
307 if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1) in btt_log_get_old()
312 if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1) in btt_log_get_old()
323 * it is not NULL. It returns the sub-slot number (0 or 1)
327 static int btt_log_read(struct arena_info *arena, u32 lane, in btt_log_read() argument
334 ret = btt_log_group_read(arena, lane, &log); in btt_log_read()
336 return -EIO; in btt_log_read()
341 "log corruption (%d): lane %d seq [%d, %d]\n", in btt_log_read()
342 old_ent, lane, log.ent[arena->log_index[0]].seq, in btt_log_read()
343 log.ent[arena->log_index[1]].seq); in btt_log_read()
345 return -EIO; in btt_log_read()
348 ret_ent = (old_flag ? old_ent : (1 - old_ent)); in btt_log_read()
351 memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE); in btt_log_read()
361 static int __btt_log_write(struct arena_info *arena, u32 lane, in __btt_log_write() argument
365 u32 group_slot = arena->log_index[sub]; in __btt_log_write()
370 ns_off = arena->logoff + (lane * LOG_GRP_SIZE) + in __btt_log_write()
382 static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub, in btt_flog_write() argument
387 ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC); in btt_flog_write()
392 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub; in btt_flog_write()
393 if (++(arena->freelist[lane].seq) == 4) in btt_flog_write()
394 arena->freelist[lane].seq = 1; in btt_flog_write()
395 if (ent_e_flag(le32_to_cpu(ent->old_map))) in btt_flog_write()
396 arena->freelist[lane].has_err = 1; in btt_flog_write()
397 arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map)); in btt_flog_write()
404 * all-zeroes, and indicates an identity mapping
408 int ret = -EINVAL; in btt_map_init()
412 size_t mapsize = arena->logoff - arena->mapoff; in btt_map_init()
416 return -ENOMEM; in btt_map_init()
423 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512), in btt_map_init()
424 "arena->mapoff: %#llx is unaligned\n", arena->mapoff); in btt_map_init()
431 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf, in btt_map_init()
437 mapsize -= size; in btt_map_init()
452 size_t logsize = arena->info2off - arena->logoff; in btt_log_init()
461 return -ENOMEM; in btt_log_init()
467 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512), in btt_log_init()
468 "arena->logoff: %#llx is unaligned\n", arena->logoff); in btt_log_init()
475 ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf, in btt_log_init()
481 logsize -= size; in btt_log_init()
485 for (i = 0; i < arena->nfree; i++) { in btt_log_init()
487 ent.old_map = cpu_to_le32(arena->external_nlba + i); in btt_log_init()
488 ent.new_map = cpu_to_le32(arena->external_nlba + i); in btt_log_init()
502 return arena->dataoff + ((u64)lba * arena->internal_lbasize); in to_namespace_offset()
505 static int arena_clear_freelist_error(struct arena_info *arena, u32 lane) in arena_clear_freelist_error() argument
509 if (arena->freelist[lane].has_err) { in arena_clear_freelist_error()
511 u32 lba = arena->freelist[lane].block; in arena_clear_freelist_error()
513 unsigned long len = arena->sector_size; in arena_clear_freelist_error()
515 mutex_lock(&arena->err_lock); in arena_clear_freelist_error()
524 len -= chunk; in arena_clear_freelist_error()
527 arena->freelist[lane].has_err = 0; in arena_clear_freelist_error()
529 mutex_unlock(&arena->err_lock); in arena_clear_freelist_error()
540 arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry), in btt_freelist_init()
542 if (!arena->freelist) in btt_freelist_init()
543 return -ENOMEM; in btt_freelist_init()
545 for (i = 0; i < arena->nfree; i++) { in btt_freelist_init()
555 arena->freelist[i].sub = 1 - new; in btt_freelist_init()
556 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq)); in btt_freelist_init()
557 arena->freelist[i].block = log_oldmap; in btt_freelist_init()
561 * the BTT read-only in btt_freelist_init()
565 arena->freelist[i].has_err = 1; in btt_freelist_init()
606 return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0) in ent_is_padding()
607 && (ent->seq == 0); in ent_is_padding()
613 * four slots. We expect that a padding slot will be all-zeroes, and use this
626 int ret, log_index[2] = {-1, -1}; in log_set_indices()
631 for (i = 0; i < arena->nfree; i++) { in log_set_indices()
651 /* two valid entries found */ in log_set_indices()
655 return -ENXIO; in log_set_indices()
667 return -ENXIO; in log_set_indices()
672 * lane never got used and it is still in log_set_indices()
679 return -ENXIO; in log_set_indices()
685 * non-padding entry, then the we are no longer in the in log_set_indices()
694 return -ENXIO; in log_set_indices()
711 return -ENXIO; in log_set_indices()
714 arena->log_index[0] = log_index[0]; in log_set_indices()
715 arena->log_index[1] = log_index[1]; in log_set_indices()
723 arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL); in btt_rtt_init()
724 if (arena->rtt == NULL) in btt_rtt_init()
725 return -ENOMEM; in btt_rtt_init()
734 arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock), in btt_maplocks_init()
736 if (!arena->map_locks) in btt_maplocks_init()
737 return -ENOMEM; in btt_maplocks_init()
739 for (i = 0; i < arena->nfree; i++) in btt_maplocks_init()
740 spin_lock_init(&arena->map_locks[i].lock); in btt_maplocks_init()
755 arena->nd_btt = btt->nd_btt; in alloc_arena()
756 arena->sector_size = btt->sector_size; in alloc_arena()
757 mutex_init(&arena->err_lock); in alloc_arena()
762 arena->size = size; in alloc_arena()
763 arena->external_lba_start = start; in alloc_arena()
764 arena->external_lbasize = btt->lbasize; in alloc_arena()
765 arena->internal_lbasize = roundup(arena->external_lbasize, in alloc_arena()
767 arena->nfree = BTT_DEFAULT_NFREE; in alloc_arena()
768 arena->version_major = btt->nd_btt->version_major; in alloc_arena()
769 arena->version_minor = btt->nd_btt->version_minor; in alloc_arena()
772 available -= (available % BTT_PG_SIZE); in alloc_arena()
774 /* Two pages are reserved for the super block and its copy */ in alloc_arena()
775 available -= 2 * BTT_PG_SIZE; in alloc_arena()
778 logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE); in alloc_arena()
779 available -= logsize; in alloc_arena()
782 arena->internal_nlba = div_u64(available - BTT_PG_SIZE, in alloc_arena()
783 arena->internal_lbasize + MAP_ENT_SIZE); in alloc_arena()
784 arena->external_nlba = arena->internal_nlba - arena->nfree; in alloc_arena()
786 mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE); in alloc_arena()
787 datasize = available - mapsize; in alloc_arena()
790 arena->infooff = arena_off; in alloc_arena()
791 arena->dataoff = arena->infooff + BTT_PG_SIZE; in alloc_arena()
792 arena->mapoff = arena->dataoff + datasize; in alloc_arena()
793 arena->logoff = arena->mapoff + mapsize; in alloc_arena()
794 arena->info2off = arena->logoff + logsize; in alloc_arena()
797 arena->log_index[0] = 0; in alloc_arena()
798 arena->log_index[1] = 1; in alloc_arena()
806 list_for_each_entry_safe(arena, next, &btt->arena_list, list) { in free_arenas()
807 list_del(&arena->list); in free_arenas()
808 kfree(arena->rtt); in free_arenas()
809 kfree(arena->map_locks); in free_arenas()
810 kfree(arena->freelist); in free_arenas()
811 debugfs_remove_recursive(arena->debugfs_dir); in free_arenas()
823 arena->internal_nlba = le32_to_cpu(super->internal_nlba); in parse_arena_meta()
824 arena->internal_lbasize = le32_to_cpu(super->internal_lbasize); in parse_arena_meta()
825 arena->external_nlba = le32_to_cpu(super->external_nlba); in parse_arena_meta()
826 arena->external_lbasize = le32_to_cpu(super->external_lbasize); in parse_arena_meta()
827 arena->nfree = le32_to_cpu(super->nfree); in parse_arena_meta()
828 arena->version_major = le16_to_cpu(super->version_major); in parse_arena_meta()
829 arena->version_minor = le16_to_cpu(super->version_minor); in parse_arena_meta()
831 arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off + in parse_arena_meta()
832 le64_to_cpu(super->nextoff)); in parse_arena_meta()
833 arena->infooff = arena_off; in parse_arena_meta()
834 arena->dataoff = arena_off + le64_to_cpu(super->dataoff); in parse_arena_meta()
835 arena->mapoff = arena_off + le64_to_cpu(super->mapoff); in parse_arena_meta()
836 arena->logoff = arena_off + le64_to_cpu(super->logoff); in parse_arena_meta()
837 arena->info2off = arena_off + le64_to_cpu(super->info2off); in parse_arena_meta()
839 arena->size = (le64_to_cpu(super->nextoff) > 0) in parse_arena_meta()
840 ? (le64_to_cpu(super->nextoff)) in parse_arena_meta()
841 : (arena->info2off - arena->infooff + BTT_PG_SIZE); in parse_arena_meta()
843 arena->flags = le32_to_cpu(super->flags); in parse_arena_meta()
851 size_t remaining = btt->rawsize; in discover_arenas()
858 return -ENOMEM; in discover_arenas()
864 ret = -ENOMEM; in discover_arenas()
868 arena->infooff = cur_off; in discover_arenas()
873 if (!nd_btt_arena_is_valid(btt->nd_btt, super)) { in discover_arenas()
874 if (remaining == btt->rawsize) { in discover_arenas()
875 btt->init_state = INIT_NOTFOUND; in discover_arenas()
881 ret = -ENODEV; in discover_arenas()
886 arena->external_lba_start = cur_nlba; in discover_arenas()
908 list_add_tail(&arena->list, &btt->arena_list); in discover_arenas()
910 remaining -= arena->size; in discover_arenas()
911 cur_off += arena->size; in discover_arenas()
912 cur_nlba += arena->external_nlba; in discover_arenas()
915 if (arena->nextoff == 0) in discover_arenas()
918 btt->num_arenas = num_arenas; in discover_arenas()
919 btt->nlba = cur_nlba; in discover_arenas()
920 btt->init_state = INIT_READY; in discover_arenas()
935 size_t remaining = btt->rawsize; in create_arenas()
942 remaining -= arena_size; in create_arenas()
946 arena = alloc_arena(btt, arena_size, btt->nlba, cur_off); in create_arenas()
949 return -ENOMEM; in create_arenas()
951 btt->nlba += arena->external_nlba; in create_arenas()
953 arena->nextoff = arena->size; in create_arenas()
955 arena->nextoff = 0; in create_arenas()
957 list_add_tail(&arena->list, &btt->arena_list); in create_arenas()
974 struct nd_btt *nd_btt = arena->nd_btt; in btt_arena_write_layout()
975 const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev); in btt_arena_write_layout()
987 return -ENOMEM; in btt_arena_write_layout()
989 strncpy(super->signature, BTT_SIG, BTT_SIG_LEN); in btt_arena_write_layout()
990 memcpy(super->uuid, nd_btt->uuid, 16); in btt_arena_write_layout()
991 memcpy(super->parent_uuid, parent_uuid, 16); in btt_arena_write_layout()
992 super->flags = cpu_to_le32(arena->flags); in btt_arena_write_layout()
993 super->version_major = cpu_to_le16(arena->version_major); in btt_arena_write_layout()
994 super->version_minor = cpu_to_le16(arena->version_minor); in btt_arena_write_layout()
995 super->external_lbasize = cpu_to_le32(arena->external_lbasize); in btt_arena_write_layout()
996 super->external_nlba = cpu_to_le32(arena->external_nlba); in btt_arena_write_layout()
997 super->internal_lbasize = cpu_to_le32(arena->internal_lbasize); in btt_arena_write_layout()
998 super->internal_nlba = cpu_to_le32(arena->internal_nlba); in btt_arena_write_layout()
999 super->nfree = cpu_to_le32(arena->nfree); in btt_arena_write_layout()
1000 super->infosize = cpu_to_le32(sizeof(struct btt_sb)); in btt_arena_write_layout()
1001 super->nextoff = cpu_to_le64(arena->nextoff); in btt_arena_write_layout()
1003 * Subtract arena->infooff (arena start) so numbers are relative in btt_arena_write_layout()
1006 super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff); in btt_arena_write_layout()
1007 super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff); in btt_arena_write_layout()
1008 super->logoff = cpu_to_le64(arena->logoff - arena->infooff); in btt_arena_write_layout()
1009 super->info2off = cpu_to_le64(arena->info2off - arena->infooff); in btt_arena_write_layout()
1011 super->flags = 0; in btt_arena_write_layout()
1013 super->checksum = cpu_to_le64(sum); in btt_arena_write_layout()
1030 mutex_lock(&btt->init_lock); in btt_meta_init()
1031 list_for_each_entry(arena, &btt->arena_list, list) { in btt_meta_init()
1049 btt->init_state = INIT_READY; in btt_meta_init()
1052 mutex_unlock(&btt->init_lock); in btt_meta_init()
1058 return btt->lbasize - btt->sector_size; in btt_meta_size()
1072 __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size); in lba_to_arena()
1074 list_for_each_entry(arena_list, &btt->arena_list, list) { in lba_to_arena()
1075 if (lba < arena_list->external_nlba) { in lba_to_arena()
1080 lba -= arena_list->external_nlba; in lba_to_arena()
1083 return -EIO; in lba_to_arena()
1091 __acquires(&arena->map_locks[idx].lock) in lock_map()
1093 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree; in lock_map()
1095 spin_lock(&arena->map_locks[idx].lock); in lock_map()
1099 __releases(&arena->map_locks[idx].lock) in unlock_map()
1101 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree; in unlock_map()
1103 spin_unlock(&arena->map_locks[idx].lock); in unlock_map()
1151 meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size; in btt_rw_integrity()
1158 bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter); in btt_rw_integrity()
1161 * .bv_offset already adjusted for iter->bi_bvec_done, and we in btt_rw_integrity()
1180 len -= cur_len; in btt_rw_integrity()
1182 if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len)) in btt_rw_integrity()
1183 return -EIO; in btt_rw_integrity()
1204 u32 lane = 0, premap, postmap; in btt_read_pg() local
1209 lane = nd_region_acquire_lane(btt->nd_region); in btt_read_pg()
1215 cur_len = min(btt->sector_size, len); in btt_read_pg()
1237 ret = -EIO; in btt_read_pg()
1241 arena->rtt[lane] = RTT_VALID | postmap; in btt_read_pg()
1264 /* Media error - set the e_flag */ in btt_read_pg()
1278 arena->rtt[lane] = RTT_INVALID; in btt_read_pg()
1279 nd_region_release_lane(btt->nd_region, lane); in btt_read_pg()
1281 len -= cur_len; in btt_read_pg()
1283 sector += btt->sector_size >> SECTOR_SHIFT; in btt_read_pg()
1289 arena->rtt[lane] = RTT_INVALID; in btt_read_pg()
1291 nd_region_release_lane(btt->nd_region, lane); in btt_read_pg()
1303 u64 nsoff = adjust_initial_offset(arena->nd_btt, in btt_is_badblock()
1307 return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize); in btt_is_badblock()
1316 u32 premap = 0, old_postmap, new_postmap, lane = 0, i; in btt_write_pg() local
1325 lane = nd_region_acquire_lane(btt->nd_region); in btt_write_pg()
1330 cur_len = min(btt->sector_size, len); in btt_write_pg()
1332 if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) { in btt_write_pg()
1333 ret = -EIO; in btt_write_pg()
1337 if (btt_is_badblock(btt, arena, arena->freelist[lane].block)) in btt_write_pg()
1338 arena->freelist[lane].has_err = 1; in btt_write_pg()
1340 if (mutex_is_locked(&arena->err_lock) in btt_write_pg()
1341 || arena->freelist[lane].has_err) { in btt_write_pg()
1342 nd_region_release_lane(btt->nd_region, lane); in btt_write_pg()
1344 ret = arena_clear_freelist_error(arena, lane); in btt_write_pg()
1348 /* OK to acquire a different lane/free block */ in btt_write_pg()
1352 new_postmap = arena->freelist[lane].block; in btt_write_pg()
1355 for (i = 0; i < arena->nfree; i++) in btt_write_pg()
1356 while (arena->rtt[i] == (RTT_VALID | new_postmap)) in btt_write_pg()
1360 if (new_postmap >= arena->internal_nlba) { in btt_write_pg()
1361 ret = -EIO; in btt_write_pg()
1381 if (old_postmap >= arena->internal_nlba) { in btt_write_pg()
1382 ret = -EIO; in btt_write_pg()
1391 log.seq = cpu_to_le32(arena->freelist[lane].seq); in btt_write_pg()
1392 sub = arena->freelist[lane].sub; in btt_write_pg()
1393 ret = btt_flog_write(arena, lane, sub, &log); in btt_write_pg()
1403 nd_region_release_lane(btt->nd_region, lane); in btt_write_pg()
1406 ret = arena_clear_freelist_error(arena, lane); in btt_write_pg()
1411 len -= cur_len; in btt_write_pg()
1413 sector += btt->sector_size >> SECTOR_SHIFT; in btt_write_pg()
1421 nd_region_release_lane(btt->nd_region, lane); in btt_write_pg()
1445 struct btt *btt = bio->bi_disk->private_data; in btt_submit_bio()
1455 do_acct = blk_queue_io_stat(bio->bi_disk->queue); in btt_submit_bio()
1461 if (len > PAGE_SIZE || len < btt->sector_size || in btt_submit_bio()
1462 len % btt->sector_size) { in btt_submit_bio()
1463 dev_err_ratelimited(&btt->nd_btt->dev, in btt_submit_bio()
1465 bio->bi_status = BLK_STS_IOERR; in btt_submit_bio()
1472 dev_err(&btt->nd_btt->dev, in btt_submit_bio()
1477 bio->bi_status = errno_to_blk_status(err); in btt_submit_bio()
1491 struct btt *btt = bdev->bd_disk->private_data; in btt_rw_page()
1505 geo->heads = 1 << 6; in btt_getgeo()
1506 geo->sectors = 1 << 5; in btt_getgeo()
1507 geo->cylinders = get_capacity(bd->bd_disk) >> 11; in btt_getgeo()
1520 struct nd_btt *nd_btt = btt->nd_btt; in btt_blk_init()
1521 struct nd_namespace_common *ndns = nd_btt->ndns; in btt_blk_init()
1524 btt->btt_queue = blk_alloc_queue(NUMA_NO_NODE); in btt_blk_init()
1525 if (!btt->btt_queue) in btt_blk_init()
1526 return -ENOMEM; in btt_blk_init()
1528 btt->btt_disk = alloc_disk(0); in btt_blk_init()
1529 if (!btt->btt_disk) { in btt_blk_init()
1530 blk_cleanup_queue(btt->btt_queue); in btt_blk_init()
1531 return -ENOMEM; in btt_blk_init()
1534 nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name); in btt_blk_init()
1535 btt->btt_disk->first_minor = 0; in btt_blk_init()
1536 btt->btt_disk->fops = &btt_fops; in btt_blk_init()
1537 btt->btt_disk->private_data = btt; in btt_blk_init()
1538 btt->btt_disk->queue = btt->btt_queue; in btt_blk_init()
1539 btt->btt_disk->flags = GENHD_FL_EXT_DEVT; in btt_blk_init()
1541 blk_queue_logical_block_size(btt->btt_queue, btt->sector_size); in btt_blk_init()
1542 blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX); in btt_blk_init()
1543 blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_queue); in btt_blk_init()
1546 int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt)); in btt_blk_init()
1549 del_gendisk(btt->btt_disk); in btt_blk_init()
1550 put_disk(btt->btt_disk); in btt_blk_init()
1551 blk_cleanup_queue(btt->btt_queue); in btt_blk_init()
1555 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); in btt_blk_init()
1556 device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL); in btt_blk_init()
1557 btt->nd_btt->size = btt->nlba * (u64)btt->sector_size; in btt_blk_init()
1558 nvdimm_check_and_set_ro(btt->btt_disk); in btt_blk_init()
1565 del_gendisk(btt->btt_disk); in btt_blk_cleanup()
1566 put_disk(btt->btt_disk); in btt_blk_cleanup()
1567 blk_cleanup_queue(btt->btt_queue); in btt_blk_cleanup()
1571 * btt_init - initialize a block translation table for the given device
1575 * @uuid: A uuid for the backing device - this is stored on media
1593 struct device *dev = &nd_btt->dev; in btt_init()
1599 btt->nd_btt = nd_btt; in btt_init()
1600 btt->rawsize = rawsize; in btt_init()
1601 btt->lbasize = lbasize; in btt_init()
1602 btt->sector_size = ((lbasize >= 4096) ? 4096 : 512); in btt_init()
1603 INIT_LIST_HEAD(&btt->arena_list); in btt_init()
1604 mutex_init(&btt->init_lock); in btt_init()
1605 btt->nd_region = nd_region; in btt_init()
1606 nsio = to_nd_namespace_io(&nd_btt->ndns->dev); in btt_init()
1607 btt->phys_bb = &nsio->bb; in btt_init()
1615 if (btt->init_state != INIT_READY && nd_region->ro) { in btt_init()
1616 dev_warn(dev, "%s is read-only, unable to init btt metadata\n", in btt_init()
1617 dev_name(&nd_region->dev)); in btt_init()
1619 } else if (btt->init_state != INIT_READY) { in btt_init()
1620 btt->num_arenas = (rawsize / ARENA_MAX_SIZE) + in btt_init()
1623 btt->num_arenas, rawsize); in btt_init()
1650 * btt_fini - de-initialize a BTT
1653 * De-initialize a Block Translation Table on device removal
1663 debugfs_remove_recursive(btt->debugfs_dir); in btt_fini()
1669 struct nd_btt *nd_btt = to_nd_btt(ndns->claim); in nvdimm_namespace_attach_btt()
1676 if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) { in nvdimm_namespace_attach_btt()
1677 dev_dbg(&nd_btt->dev, "incomplete btt configuration\n"); in nvdimm_namespace_attach_btt()
1678 return -ENODEV; in nvdimm_namespace_attach_btt()
1681 btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL); in nvdimm_namespace_attach_btt()
1683 return -ENOMEM; in nvdimm_namespace_attach_btt()
1686 rc = devm_namespace_enable(&nd_btt->dev, ndns, size); in nvdimm_namespace_attach_btt()
1698 rawsize = size - nd_btt->initial_offset; in nvdimm_namespace_attach_btt()
1700 dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n", in nvdimm_namespace_attach_btt()
1701 dev_name(&ndns->dev), in nvdimm_namespace_attach_btt()
1702 ARENA_MIN_SIZE + nd_btt->initial_offset); in nvdimm_namespace_attach_btt()
1703 return -ENXIO; in nvdimm_namespace_attach_btt()
1705 nd_region = to_nd_region(nd_btt->dev.parent); in nvdimm_namespace_attach_btt()
1706 btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid, in nvdimm_namespace_attach_btt()
1709 return -ENOMEM; in nvdimm_namespace_attach_btt()
1710 nd_btt->btt = btt; in nvdimm_namespace_attach_btt()
1718 struct btt *btt = nd_btt->btt; in nvdimm_namespace_detach_btt()
1721 nd_btt->btt = NULL; in nvdimm_namespace_detach_btt()
1733 rc = -ENXIO; in nd_btt_init()