1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * NILFS direct block pointer. 4 * 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 6 * 7 * Written by Koji Sato. 8 */ 9 10 #include <linux/errno.h> 11 #include "nilfs.h" 12 #include "page.h" 13 #include "direct.h" 14 #include "alloc.h" 15 #include "dat.h" 16 17 static inline __le64 *nilfs_direct_dptrs(const struct nilfs_bmap *direct) 18 { 19 return (__le64 *) 20 ((struct nilfs_direct_node *)direct->b_u.u_data + 1); 21 } 22 23 static inline __u64 24 nilfs_direct_get_ptr(const struct nilfs_bmap *direct, __u64 key) 25 { 26 return le64_to_cpu(*(nilfs_direct_dptrs(direct) + key)); 27 } 28 29 static inline void nilfs_direct_set_ptr(struct nilfs_bmap *direct, 30 __u64 key, __u64 ptr) 31 { 32 *(nilfs_direct_dptrs(direct) + key) = cpu_to_le64(ptr); 33 } 34 35 static int nilfs_direct_lookup(const struct nilfs_bmap *direct, 36 __u64 key, int level, __u64 *ptrp) 37 { 38 __u64 ptr; 39 40 if (key > NILFS_DIRECT_KEY_MAX || level != 1) 41 return -ENOENT; 42 ptr = nilfs_direct_get_ptr(direct, key); 43 if (ptr == NILFS_BMAP_INVALID_PTR) 44 return -ENOENT; 45 46 *ptrp = ptr; 47 return 0; 48 } 49 50 static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct, 51 __u64 key, __u64 *ptrp, 52 unsigned int maxblocks) 53 { 54 struct inode *dat = NULL; 55 __u64 ptr, ptr2; 56 sector_t blocknr; 57 int ret, cnt; 58 59 if (key > NILFS_DIRECT_KEY_MAX) 60 return -ENOENT; 61 ptr = nilfs_direct_get_ptr(direct, key); 62 if (ptr == NILFS_BMAP_INVALID_PTR) 63 return -ENOENT; 64 65 if (NILFS_BMAP_USE_VBN(direct)) { 66 dat = nilfs_bmap_get_dat(direct); 67 ret = nilfs_dat_translate(dat, ptr, &blocknr); 68 if (ret < 0) 69 goto dat_error; 70 ptr = blocknr; 71 } 72 73 maxblocks = min_t(unsigned int, maxblocks, 74 NILFS_DIRECT_KEY_MAX - key + 1); 75 for (cnt = 1; cnt < maxblocks && 76 (ptr2 = nilfs_direct_get_ptr(direct, key + cnt)) != 77 NILFS_BMAP_INVALID_PTR; 78 cnt++) { 79 if (dat) { 80 ret = nilfs_dat_translate(dat, ptr2, &blocknr); 81 if (ret < 0) 82 goto dat_error; 83 ptr2 = blocknr; 84 } 85 if (ptr2 != ptr + cnt) 86 break; 87 } 88 *ptrp = ptr; 89 return cnt; 90 91 dat_error: 92 if (ret == -ENOENT) 93 ret = -EINVAL; /* Notify bmap layer of metadata corruption */ 94 return ret; 95 } 96 97 static __u64 98 nilfs_direct_find_target_v(const struct nilfs_bmap *direct, __u64 key) 99 { 100 __u64 ptr; 101 102 ptr = nilfs_bmap_find_target_seq(direct, key); 103 if (ptr != NILFS_BMAP_INVALID_PTR) 104 /* sequential access */ 105 return ptr; 106 107 /* block group */ 108 return nilfs_bmap_find_target_in_group(direct); 109 } 110 111 static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) 112 { 113 union nilfs_bmap_ptr_req req; 114 struct inode *dat = NULL; 115 struct buffer_head *bh; 116 int ret; 117 118 if (key > NILFS_DIRECT_KEY_MAX) 119 return -ENOENT; 120 if (nilfs_direct_get_ptr(bmap, key) != NILFS_BMAP_INVALID_PTR) 121 return -EEXIST; 122 123 if (NILFS_BMAP_USE_VBN(bmap)) { 124 req.bpr_ptr = nilfs_direct_find_target_v(bmap, key); 125 dat = nilfs_bmap_get_dat(bmap); 126 } 127 ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat); 128 if (!ret) { 129 /* ptr must be a pointer to a buffer head. */ 130 bh = (struct buffer_head *)((unsigned long)ptr); 131 set_buffer_nilfs_volatile(bh); 132 133 nilfs_bmap_commit_alloc_ptr(bmap, &req, dat); 134 nilfs_direct_set_ptr(bmap, key, req.bpr_ptr); 135 136 if (!nilfs_bmap_dirty(bmap)) 137 nilfs_bmap_set_dirty(bmap); 138 139 if (NILFS_BMAP_USE_VBN(bmap)) 140 nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr); 141 142 nilfs_inode_add_blocks(bmap->b_inode, 1); 143 } 144 return ret; 145 } 146 147 static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key) 148 { 149 union nilfs_bmap_ptr_req req; 150 struct inode *dat; 151 int ret; 152 153 if (key > NILFS_DIRECT_KEY_MAX || 154 nilfs_direct_get_ptr(bmap, key) == NILFS_BMAP_INVALID_PTR) 155 return -ENOENT; 156 157 dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL; 158 req.bpr_ptr = nilfs_direct_get_ptr(bmap, key); 159 160 ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat); 161 if (!ret) { 162 nilfs_bmap_commit_end_ptr(bmap, &req, dat); 163 nilfs_direct_set_ptr(bmap, key, NILFS_BMAP_INVALID_PTR); 164 nilfs_inode_sub_blocks(bmap->b_inode, 1); 165 } 166 return ret; 167 } 168 169 static int nilfs_direct_seek_key(const struct nilfs_bmap *direct, __u64 start, 170 __u64 *keyp) 171 { 172 __u64 key; 173 174 for (key = start; key <= NILFS_DIRECT_KEY_MAX; key++) { 175 if (nilfs_direct_get_ptr(direct, key) != 176 NILFS_BMAP_INVALID_PTR) { 177 *keyp = key; 178 return 0; 179 } 180 } 181 return -ENOENT; 182 } 183 184 static int nilfs_direct_last_key(const struct nilfs_bmap *direct, __u64 *keyp) 185 { 186 __u64 key, lastkey; 187 188 lastkey = NILFS_DIRECT_KEY_MAX + 1; 189 for (key = NILFS_DIRECT_KEY_MIN; key <= NILFS_DIRECT_KEY_MAX; key++) 190 if (nilfs_direct_get_ptr(direct, key) != 191 NILFS_BMAP_INVALID_PTR) 192 lastkey = key; 193 194 if (lastkey == NILFS_DIRECT_KEY_MAX + 1) 195 return -ENOENT; 196 197 *keyp = lastkey; 198 199 return 0; 200 } 201 202 static int nilfs_direct_check_insert(const struct nilfs_bmap *bmap, __u64 key) 203 { 204 return key > NILFS_DIRECT_KEY_MAX; 205 } 206 207 static int nilfs_direct_gather_data(struct nilfs_bmap *direct, 208 __u64 *keys, __u64 *ptrs, int nitems) 209 { 210 __u64 key; 211 __u64 ptr; 212 int n; 213 214 if (nitems > NILFS_DIRECT_NBLOCKS) 215 nitems = NILFS_DIRECT_NBLOCKS; 216 n = 0; 217 for (key = 0; key < nitems; key++) { 218 ptr = nilfs_direct_get_ptr(direct, key); 219 if (ptr != NILFS_BMAP_INVALID_PTR) { 220 keys[n] = key; 221 ptrs[n] = ptr; 222 n++; 223 } 224 } 225 return n; 226 } 227 228 int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap, 229 __u64 key, __u64 *keys, __u64 *ptrs, int n) 230 { 231 __le64 *dptrs; 232 int ret, i, j; 233 234 /* no need to allocate any resource for conversion */ 235 236 /* delete */ 237 ret = bmap->b_ops->bop_delete(bmap, key); 238 if (ret < 0) 239 return ret; 240 241 /* free resources */ 242 if (bmap->b_ops->bop_clear != NULL) 243 bmap->b_ops->bop_clear(bmap); 244 245 /* convert */ 246 dptrs = nilfs_direct_dptrs(bmap); 247 for (i = 0, j = 0; i < NILFS_DIRECT_NBLOCKS; i++) { 248 if ((j < n) && (i == keys[j])) { 249 dptrs[i] = (i != key) ? 250 cpu_to_le64(ptrs[j]) : 251 NILFS_BMAP_INVALID_PTR; 252 j++; 253 } else 254 dptrs[i] = NILFS_BMAP_INVALID_PTR; 255 } 256 257 nilfs_direct_init(bmap); 258 return 0; 259 } 260 261 static int nilfs_direct_propagate(struct nilfs_bmap *bmap, 262 struct buffer_head *bh) 263 { 264 struct nilfs_palloc_req oldreq, newreq; 265 struct inode *dat; 266 __u64 key; 267 __u64 ptr; 268 int ret; 269 270 if (!NILFS_BMAP_USE_VBN(bmap)) 271 return 0; 272 273 dat = nilfs_bmap_get_dat(bmap); 274 key = nilfs_bmap_data_get_key(bmap, bh); 275 ptr = nilfs_direct_get_ptr(bmap, key); 276 if (ptr == NILFS_BMAP_INVALID_PTR) 277 return -EINVAL; 278 279 if (!buffer_nilfs_volatile(bh)) { 280 oldreq.pr_entry_nr = ptr; 281 newreq.pr_entry_nr = ptr; 282 ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq); 283 if (ret < 0) 284 return ret; 285 nilfs_dat_commit_update(dat, &oldreq, &newreq, 286 bmap->b_ptr_type == NILFS_BMAP_PTR_VS); 287 set_buffer_nilfs_volatile(bh); 288 nilfs_direct_set_ptr(bmap, key, newreq.pr_entry_nr); 289 } else 290 ret = nilfs_dat_mark_dirty(dat, ptr); 291 292 return ret; 293 } 294 295 static int nilfs_direct_assign_v(struct nilfs_bmap *direct, 296 __u64 key, __u64 ptr, 297 struct buffer_head **bh, 298 sector_t blocknr, 299 union nilfs_binfo *binfo) 300 { 301 struct inode *dat = nilfs_bmap_get_dat(direct); 302 union nilfs_bmap_ptr_req req; 303 int ret; 304 305 req.bpr_ptr = ptr; 306 ret = nilfs_dat_prepare_start(dat, &req.bpr_req); 307 if (!ret) { 308 nilfs_dat_commit_start(dat, &req.bpr_req, blocknr); 309 binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr); 310 binfo->bi_v.bi_blkoff = cpu_to_le64(key); 311 } 312 return ret; 313 } 314 315 static int nilfs_direct_assign_p(struct nilfs_bmap *direct, 316 __u64 key, __u64 ptr, 317 struct buffer_head **bh, 318 sector_t blocknr, 319 union nilfs_binfo *binfo) 320 { 321 nilfs_direct_set_ptr(direct, key, blocknr); 322 323 binfo->bi_dat.bi_blkoff = cpu_to_le64(key); 324 binfo->bi_dat.bi_level = 0; 325 memset(binfo->bi_dat.bi_pad, 0, sizeof(binfo->bi_dat.bi_pad)); 326 327 return 0; 328 } 329 330 static int nilfs_direct_assign(struct nilfs_bmap *bmap, 331 struct buffer_head **bh, 332 sector_t blocknr, 333 union nilfs_binfo *binfo) 334 { 335 __u64 key; 336 __u64 ptr; 337 338 key = nilfs_bmap_data_get_key(bmap, *bh); 339 if (unlikely(key > NILFS_DIRECT_KEY_MAX)) { 340 nilfs_crit(bmap->b_inode->i_sb, 341 "%s (ino=%lu): invalid key: %llu", 342 __func__, 343 bmap->b_inode->i_ino, (unsigned long long)key); 344 return -EINVAL; 345 } 346 ptr = nilfs_direct_get_ptr(bmap, key); 347 if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) { 348 nilfs_crit(bmap->b_inode->i_sb, 349 "%s (ino=%lu): invalid pointer: %llu", 350 __func__, 351 bmap->b_inode->i_ino, (unsigned long long)ptr); 352 return -EINVAL; 353 } 354 355 return NILFS_BMAP_USE_VBN(bmap) ? 356 nilfs_direct_assign_v(bmap, key, ptr, bh, blocknr, binfo) : 357 nilfs_direct_assign_p(bmap, key, ptr, bh, blocknr, binfo); 358 } 359 360 static const struct nilfs_bmap_operations nilfs_direct_ops = { 361 .bop_lookup = nilfs_direct_lookup, 362 .bop_lookup_contig = nilfs_direct_lookup_contig, 363 .bop_insert = nilfs_direct_insert, 364 .bop_delete = nilfs_direct_delete, 365 .bop_clear = NULL, 366 367 .bop_propagate = nilfs_direct_propagate, 368 369 .bop_lookup_dirty_buffers = NULL, 370 371 .bop_assign = nilfs_direct_assign, 372 .bop_mark = NULL, 373 374 .bop_seek_key = nilfs_direct_seek_key, 375 .bop_last_key = nilfs_direct_last_key, 376 377 .bop_check_insert = nilfs_direct_check_insert, 378 .bop_check_delete = NULL, 379 .bop_gather_data = nilfs_direct_gather_data, 380 }; 381 382 383 int nilfs_direct_init(struct nilfs_bmap *bmap) 384 { 385 bmap->b_ops = &nilfs_direct_ops; 386 return 0; 387 } 388