Lines Matching defs:t
58 * Return the n'th node of level l from table t.
60 static inline sector_t *get_node(struct dm_table *t,
63 return t->index[l] + (n * KEYS_PER_NODE);
70 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
72 for (; l < t->depth - 1; l++)
75 if (n >= t->counts[l])
78 return get_node(t, l, n)[KEYS_PER_NODE - 1];
85 static int setup_btree_index(unsigned int l, struct dm_table *t)
90 for (n = 0U; n < t->counts[l]; n++) {
91 node = get_node(t, l, n);
94 node[k] = high(t, l + 1, get_child(n, k));
104 static int alloc_targets(struct dm_table *t, unsigned int num)
121 t->num_allocated = num;
122 t->highs = n_highs;
123 t->targets = n_targets;
131 struct dm_table *t;
136 t = kzalloc(sizeof(*t), GFP_KERNEL);
138 if (!t)
141 INIT_LIST_HEAD(&t->devices);
142 init_rwsem(&t->devices_lock);
150 kfree(t);
154 if (alloc_targets(t, num_targets)) {
155 kfree(t);
159 t->type = DM_TYPE_NONE;
160 t->mode = mode;
161 t->md = md;
162 t->flush_bypasses_map = true;
163 *result = t;
181 static void dm_table_destroy_crypto_profile(struct dm_table *t);
183 void dm_table_destroy(struct dm_table *t)
185 if (!t)
189 if (t->depth >= 2)
190 kvfree(t->index[t->depth - 2]);
193 for (unsigned int i = 0; i < t->num_targets; i++) {
194 struct dm_target *ti = dm_table_get_target(t, i);
202 kvfree(t->highs);
205 free_devices(&t->devices, t->md);
207 dm_free_md_mempools(t->mempools);
209 dm_table_destroy_crypto_profile(t);
211 kfree(t);
372 struct dm_table *t = ti->table;
374 BUG_ON(!t);
380 if (dev == disk_devt(t->md->disk))
383 down_write(&t->devices_lock);
385 dd = find_device(&t->devices, dev);
393 r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev);
400 list_add(&dd->list, &t->devices);
404 r = upgrade_mode(dd, mode, t->md);
410 up_write(&t->devices_lock);
415 up_write(&t->devices_lock);
452 * Only stack the integrity profile if the target doesn't have native
467 struct dm_table *t = ti->table;
468 struct list_head *devices = &t->devices;
471 down_write(&t->devices_lock);
481 dm_device_name(t->md), d->name);
485 dm_put_table_device(t->md, d);
491 up_write(&t->devices_lock);
498 static int adjoin(struct dm_table *t, struct dm_target *ti)
502 if (!t->num_targets)
505 prev = &t->targets[t->num_targets - 1];
619 static int validate_hardware_logical_block_alignment(struct dm_table *t,
647 for (i = 0; i < t->num_targets; i++) {
648 ti = dm_table_get_target(t, i);
676 dm_device_name(t->md), i,
686 int dm_table_add_target(struct dm_table *t, const char *type,
693 if (t->singleton) {
695 dm_device_name(t->md), t->targets->type->name);
699 BUG_ON(t->num_targets >= t->num_allocated);
701 ti = t->targets + t->num_targets;
705 DMERR("%s: zero-length target", dm_device_name(t->md));
709 DMERR("%s: too large device", dm_device_name(t->md));
715 DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
720 if (t->num_targets) {
724 t->singleton = true;
728 !(t->mode & BLK_OPEN_WRITE)) {
733 if (t->immutable_target_type) {
734 if (t->immutable_target_type != ti->type) {
739 if (t->num_targets) {
743 t->immutable_target_type = ti->type;
746 ti->table = t;
754 if (!adjoin(t, ti)) {
761 ti->error = "couldn't split parameters";
770 t->highs[t->num_targets++] = ti->begin + ti->len - 1;
774 dm_device_name(t->md), type);
780 t->flush_bypasses_map = false;
785 DMERR("%s: %s: %s (%pe)", dm_device_name(t->md), type, ti->error, ERR_PTR(r));
859 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
861 t->type = type;
883 static bool dm_table_supports_dax(struct dm_table *t,
887 for (unsigned int i = 0; i < t->num_targets; i++) {
888 struct dm_target *ti = dm_table_get_target(t, i);
915 static int dm_table_determine_type(struct dm_table *t)
919 struct list_head *devices = dm_table_get_devices(t);
920 enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
922 if (t->type != DM_TYPE_NONE) {
924 if (t->type == DM_TYPE_BIO_BASED) {
928 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
932 for (unsigned int i = 0; i < t->num_targets; i++) {
933 ti = dm_table_get_target(t, i);
942 DMERR("Inconsistent table: different target types can't be mixed up");
962 t->type = DM_TYPE_BIO_BASED;
963 if (dm_table_supports_dax(t, device_not_dax_capable) ||
965 t->type = DM_TYPE_DAX_BIO_BASED;
972 t->type = DM_TYPE_REQUEST_BASED;
981 if (t->num_targets > 1) {
982 DMERR("request-based DM doesn't support multiple targets");
988 struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
992 t->type = live_table->type;
993 dm_put_live_table(t->md, srcu_idx);
997 ti = dm_table_get_immutable_target(t);
1006 /* Non-request-stackable devices can't be used for request-based dm */
1016 enum dm_queue_mode dm_table_get_type(struct dm_table *t)
1018 return t->type;
1021 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
1023 return t->immutable_target_type;
1026 struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
1029 if (t->num_targets > 1 ||
1030 !dm_target_is_immutable(t->targets[0].type))
1033 return t->targets;
1036 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
1038 for (unsigned int i = 0; i < t->num_targets; i++) {
1039 struct dm_target *ti = dm_table_get_target(t, i);
1048 bool dm_table_request_based(struct dm_table *t)
1050 return __table_type_request_based(dm_table_get_type(t));
1053 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
1055 enum dm_queue_mode type = dm_table_get_type(t);
1062 DMERR("no table type is set, can't allocate mempools");
1079 for (unsigned int i = 0; i < t->num_targets; i++) {
1080 struct dm_target *ti = dm_table_get_target(t, i);
1097 t->mempools = pools;
1105 static int setup_indexes(struct dm_table *t)
1112 for (i = t->depth - 2; i >= 0; i--) {
1113 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
1114 total += t->counts[i];
1122 for (i = t->depth - 2; i >= 0; i--) {
1123 t->index[i] = indexes;
1124 indexes += (KEYS_PER_NODE * t->counts[i]);
1125 setup_btree_index(i, t);
1134 static int dm_table_build_index(struct dm_table *t)
1140 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1141 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1144 t->counts[t->depth - 1] = leaf_nodes;
1145 t->index[t->depth - 1] = t->highs;
1147 if (t->depth >= 2)
1148 r = setup_indexes(t);
1178 struct dm_table *t;
1181 t = dm_get_live_table(md, &srcu_idx);
1182 if (!t)
1185 for (unsigned int i = 0; i < t->num_targets; i++) {
1186 struct dm_target *ti = dm_table_get_target(t, i);
1280 struct dm_table *t;
1286 t = dm_get_live_table(md, &srcu_idx);
1287 if (!t)
1299 for (i = 0; i < t->num_targets; i++) {
1300 ti = dm_table_get_target(t, i);
1394 static void dm_table_destroy_crypto_profile(struct dm_table *t)
1396 dm_destroy_crypto_profile(t->crypto_profile);
1397 t->crypto_profile = NULL;
1401 * Constructs and initializes t->crypto_profile with a crypto profile that
1403 * the dm_table. However, if the constructed crypto profile doesn't support all
1405 * returns an error instead, since we don't support removing crypto capabilities
1407 * no crypto capabilities at all), it just sets t->crypto_profile to NULL.
1409 static int dm_table_construct_crypto_profile(struct dm_table *t)
1419 dmcp->md = t->md;
1429 for (i = 0; i < t->num_targets; i++) {
1430 struct dm_target *ti = dm_table_get_target(t, i);
1450 if (t->md->queue &&
1452 t->md->queue->crypto_profile)) {
1459 * If the new profile doesn't actually support any crypto capabilities,
1475 * t->crypto_profile is only set temporarily while the table is being
1479 t->crypto_profile = profile;
1485 struct dm_table *t)
1487 if (!t->crypto_profile)
1492 blk_crypto_register(t->crypto_profile, q);
1495 t->crypto_profile);
1496 dm_destroy_crypto_profile(t->crypto_profile);
1498 t->crypto_profile = NULL;
1503 static int dm_table_construct_crypto_profile(struct dm_table *t)
1512 static void dm_table_destroy_crypto_profile(struct dm_table *t)
1517 struct dm_table *t)
1527 int dm_table_complete(struct dm_table *t)
1531 r = dm_table_determine_type(t);
1537 r = dm_table_build_index(t);
1543 r = dm_table_construct_crypto_profile(t);
1549 r = dm_table_alloc_md_mempools(t, t->md);
1557 void dm_table_event_callback(struct dm_table *t,
1561 t->event_fn = fn;
1562 t->event_context = context;
1566 void dm_table_event(struct dm_table *t)
1569 if (t->event_fn)
1570 t->event_fn(t->event_context);
1575 inline sector_t dm_table_get_size(struct dm_table *t)
1577 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1587 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1592 if (unlikely(sector >= dm_table_get_size(t)))
1595 for (l = 0; l < t->depth; l++) {
1597 node = get_node(t, l, n);
1604 return &t->targets[(KEYS_PER_NODE * n) + k];
1628 * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data);
1630 static bool dm_table_any_dev_attr(struct dm_table *t,
1633 for (unsigned int i = 0; i < t->num_targets; i++) {
1634 struct dm_target *ti = dm_table_get_target(t, i);
1657 * Returns false if the result is unknown because a target doesn't
1660 bool dm_table_has_no_data_devices(struct dm_table *t)
1662 for (unsigned int i = 0; i < t->num_targets; i++) {
1663 struct dm_target *ti = dm_table_get_target(t, i);
1677 bool dm_table_is_wildcard(struct dm_table *t)
1679 for (unsigned int i = 0; i < t->num_targets; i++) {
1680 struct dm_target *ti = dm_table_get_target(t, i);
1710 static bool dm_table_supports_zoned(struct dm_table *t, bool zoned)
1712 for (unsigned int i = 0; i < t->num_targets; i++) {
1713 struct dm_target *ti = dm_table_get_target(t, i);
1756 static int validate_hardware_zoned(struct dm_table *t, bool zoned,
1762 if (!dm_table_supports_zoned(t, zoned)) {
1764 dm_device_name(t->md));
1772 if (dm_table_any_dev_attr(t, device_not_matches_zone_sectors, &zone_sectors)) {
1774 dm_device_name(t->md));
1784 int dm_calculate_queue_limits(struct dm_table *t,
1793 t->integrity_supported = true;
1794 for (unsigned int i = 0; i < t->num_targets; i++) {
1795 struct dm_target *ti = dm_table_get_target(t, i);
1798 t->integrity_supported = false;
1801 for (unsigned int i = 0; i < t->num_targets; i++) {
1802 struct dm_target *ti = dm_table_get_target(t, i);
1848 dm_device_name(t->md),
1852 if (t->integrity_supported ||
1857 dm_device_name(t->md),
1860 t->integrity_supported = false;
1880 if (validate_hardware_zoned(t, zoned, zone_sectors))
1883 return validate_hardware_logical_block_alignment(t, limits);
1890 static bool dm_table_supports_flush(struct dm_table *t)
1892 for (unsigned int i = 0; i < t->num_targets; i++) {
1893 struct dm_target *ti = dm_table_get_target(t, i);
1928 static bool dm_table_supports_write_zeroes(struct dm_table *t)
1930 for (unsigned int i = 0; i < t->num_targets; i++) {
1931 struct dm_target *ti = dm_table_get_target(t, i);
1944 static bool dm_table_supports_nowait(struct dm_table *t)
1946 for (unsigned int i = 0; i < t->num_targets; i++) {
1947 struct dm_target *ti = dm_table_get_target(t, i);
1962 static bool dm_table_supports_discards(struct dm_table *t)
1964 for (unsigned int i = 0; i < t->num_targets; i++) {
1965 struct dm_target *ti = dm_table_get_target(t, i);
1991 static bool dm_table_supports_secure_erase(struct dm_table *t)
1993 for (unsigned int i = 0; i < t->num_targets; i++) {
1994 struct dm_target *ti = dm_table_get_target(t, i);
2014 static bool dm_table_supports_atomic_writes(struct dm_table *t)
2016 for (unsigned int i = 0; i < t->num_targets; i++) {
2017 struct dm_target *ti = dm_table_get_target(t, i);
2033 bool dm_table_supports_size_change(struct dm_table *t, sector_t old_size,
2036 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && dm_has_zone_plugs(t->md) &&
2040 dm_device_name(t->md));
2046 int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
2052 if (!dm_table_supports_nowait(t))
2059 if (!__table_type_bio_based(t->type))
2062 if (!dm_table_supports_discards(t)) {
2068 if (!dm_table_supports_write_zeroes(t)) {
2073 if (!dm_table_supports_secure_erase(t))
2076 if (dm_table_supports_flush(t))
2079 if (dm_table_supports_dax(t, device_not_dax_capable))
2087 r = dm_set_zones_restrictions(t, q, limits);
2090 } else if (dm_has_zone_plugs(t->md)) {
2093 dm_device_name(t->md));
2098 if (dm_table_supports_atomic_writes(t))
2112 r = dm_revalidate_zones(t, q);
2120 dm_finalize_zone_settings(t, limits);
2122 if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
2123 set_dax_synchronous(t->md->dax_dev);
2125 if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
2126 dax_write_cache(t->md->dax_dev, true);
2128 dm_update_crypto_profile(q, t);
2132 struct list_head *dm_table_get_devices(struct dm_table *t)
2134 return &t->devices;
2137 blk_mode_t dm_table_get_mode(struct dm_table *t)
2139 return t->mode;
2149 static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
2151 lockdep_assert_held(&t->md->suspend_lock);
2153 for (unsigned int i = 0; i < t->num_targets; i++) {
2154 struct dm_target *ti = dm_table_get_target(t, i);
2173 void dm_table_presuspend_targets(struct dm_table *t)
2175 if (!t)
2178 suspend_targets(t, PRESUSPEND);
2181 void dm_table_presuspend_undo_targets(struct dm_table *t)
2183 if (!t)
2186 suspend_targets(t, PRESUSPEND_UNDO);
2189 void dm_table_postsuspend_targets(struct dm_table *t)
2191 if (!t)
2194 suspend_targets(t, POSTSUSPEND);
2197 int dm_table_resume_targets(struct dm_table *t)
2202 lockdep_assert_held(&t->md->suspend_lock);
2204 for (i = 0; i < t->num_targets; i++) {
2205 struct dm_target *ti = dm_table_get_target(t, i);
2213 dm_device_name(t->md), ti->type->name, r);
2218 for (i = 0; i < t->num_targets; i++) {
2219 struct dm_target *ti = dm_table_get_target(t, i);
2228 struct mapped_device *dm_table_get_md(struct dm_table *t)
2230 return t->md;
2234 const char *dm_table_device_name(struct dm_table *t)
2236 return dm_device_name(t->md);
2240 void dm_table_run_md_queue_async(struct dm_table *t)
2242 if (!dm_table_request_based(t))
2245 if (t->md->queue)
2246 blk_mq_run_hw_queues(t->md->queue, true);