Lines Matching defs:mg

483 	struct dm_cache_migration *mg;
485 mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
487 memset(mg, 0, sizeof(*mg));
489 mg->cache = cache;
492 return mg;
495 static void free_migration(struct dm_cache_migration *mg)
497 struct cache *cache = mg->cache;
502 mempool_free(mg, &cache->migration_pool);
1076 static void quiesce(struct dm_cache_migration *mg,
1079 init_continuation(&mg->k, continuation);
1080 dm_cell_quiesce_v2(mg->cache->prison, mg->cell, &mg->k.ws);
1092 struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k);
1095 mg->k.input = BLK_STS_IOERR;
1097 queue_continuation(mg->cache->wq, &mg->k);
1100 static void copy(struct dm_cache_migration *mg, bool promote)
1103 struct cache *cache = mg->cache;
1106 o_region.sector = from_oblock(mg->op->oblock) * cache->sectors_per_block;
1110 c_region.sector = from_cblock(mg->op->cblock) * cache->sectors_per_block;
1114 dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, &mg->k);
1116 dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, &mg->k);
1130 struct dm_cache_migration *mg = bio->bi_private;
1131 struct cache *cache = mg->cache;
1137 mg->k.input = bio->bi_status;
1139 queue_continuation(cache->wq, &mg->k);
1142 static void overwrite(struct dm_cache_migration *mg,
1145 struct bio *bio = mg->overwrite_bio;
1148 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
1154 if (mg->op->op == POLICY_PROMOTE)
1155 remap_to_cache(mg->cache, bio, mg->op->cblock);
1157 remap_to_origin(mg->cache, bio);
1159 init_continuation(&mg->k, continuation);
1160 accounted_request(mg->cache, bio);
1174 static void mg_complete(struct dm_cache_migration *mg, bool success)
1177 struct cache *cache = mg->cache;
1178 struct policy_work *op = mg->op;
1189 if (mg->overwrite_bio) {
1192 else if (mg->k.input)
1193 mg->overwrite_bio->bi_status = mg->k.input;
1195 mg->overwrite_bio->bi_status = BLK_STS_IOERR;
1196 bio_endio(mg->overwrite_bio);
1223 if (mg->cell) {
1224 if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios))
1225 free_prison_cell(cache, mg->cell);
1228 free_migration(mg);
1237 struct dm_cache_migration *mg = ws_to_mg(ws);
1239 mg_complete(mg, mg->k.input == 0);
1245 struct dm_cache_migration *mg = ws_to_mg(ws);
1246 struct cache *cache = mg->cache;
1247 struct policy_work *op = mg->op;
1257 mg_complete(mg, false);
1260 mg_complete(mg, true);
1270 mg_complete(mg, false);
1293 init_continuation(&mg->k, mg_success);
1294 continue_after_commit(&cache->committer, &mg->k);
1299 mg_complete(mg, true);
1306 struct dm_cache_migration *mg = ws_to_mg(ws);
1311 if (mg->k.input)
1312 mg_complete(mg, false);
1320 struct dm_cache_migration *mg = ws_to_mg(ws);
1325 if (mg->k.input)
1326 mg_complete(mg, false);
1332 r = dm_cell_lock_promote_v2(mg->cache->prison, mg->cell,
1335 mg_complete(mg, false);
1338 quiesce(mg, mg_update_metadata);
1347 struct dm_cache_migration *mg = ws_to_mg(ws);
1348 struct cache *cache = mg->cache;
1349 struct policy_work *op = mg->op;
1358 init_continuation(&mg->k, mg_upgrade_lock);
1359 copy(mg, is_policy_promote);
1364 struct dm_cache_migration *mg = ws_to_mg(ws);
1366 if (mg->overwrite_bio) {
1372 if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) {
1376 bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
1379 mg->overwrite_bio = NULL;
1380 inc_io_migrations(mg->cache);
1392 overwrite(mg, mg_update_metadata_after_copy);
1398 static int mg_lock_writes(struct dm_cache_migration *mg)
1402 struct cache *cache = mg->cache;
1412 build_key(mg->op->oblock, oblock_succ(mg->op->oblock), &key);
1414 mg->overwrite_bio ? READ_WRITE_LOCK_LEVEL : WRITE_LOCK_LEVEL,
1415 prealloc, &mg->cell);
1418 mg_complete(mg, false);
1422 if (mg->cell != prealloc)
1426 mg_copy(&mg->k.ws);
1428 quiesce(mg, mg_copy);
1435 struct dm_cache_migration *mg;
1442 mg = alloc_migration(cache);
1444 mg->op = op;
1445 mg->overwrite_bio = bio;
1450 return mg_lock_writes(mg);
1459 static void invalidate_complete(struct dm_cache_migration *mg, bool success)
1462 struct cache *cache = mg->cache;
1465 if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios))
1466 free_prison_cell(cache, mg->cell);
1468 if (!success && mg->overwrite_bio)
1469 bio_io_error(mg->overwrite_bio);
1471 free_migration(mg);
1479 struct dm_cache_migration *mg = ws_to_mg(ws);
1481 invalidate_complete(mg, !mg->k.input);
1512 struct dm_cache_migration *mg = ws_to_mg(ws);
1513 struct cache *cache = mg->cache;
1515 r = invalidate_cblock(cache, mg->invalidate_cblock);
1517 invalidate_complete(mg, false);
1521 init_continuation(&mg->k, invalidate_completed);
1522 continue_after_commit(&cache->committer, &mg->k);
1523 remap_to_origin_clear_discard(cache, mg->overwrite_bio, mg->invalidate_oblock);
1524 mg->overwrite_bio = NULL;
1528 static int invalidate_lock(struct dm_cache_migration *mg)
1532 struct cache *cache = mg->cache;
1537 build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
1539 READ_WRITE_LOCK_LEVEL, prealloc, &mg->cell);
1542 invalidate_complete(mg, false);
1546 if (mg->cell != prealloc)
1550 quiesce(mg, invalidate_remove);
1557 init_continuation(&mg->k, invalidate_remove);
1558 queue_work(cache->wq, &mg->k.ws);
1567 struct dm_cache_migration *mg;
1572 mg = alloc_migration(cache);
1574 mg->overwrite_bio = bio;
1575 mg->invalidate_cblock = cblock;
1576 mg->invalidate_oblock = oblock;
1578 return invalidate_lock(mg);