/linux/drivers/md/ |
H A D | dm.h | 47 struct dm_table; 57 void dm_table_event_callback(struct dm_table *t, 59 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector); 60 bool dm_table_has_no_data_devices(struct dm_table *table); 61 bool dm_table_is_wildcard(struct dm_table *t); 62 int dm_calculate_queue_limits(struct dm_table *table, 64 int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 66 struct list_head *dm_table_get_devices(struct dm_table *t); 67 void dm_table_presuspend_targets(struct dm_table *t); 68 void dm_table_presuspend_undo_targets(struct dm_table * [all...] |
H A D | dm-table.c | 60 static inline sector_t *get_node(struct dm_table *t, in get_node() 70 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) in high() 85 static int setup_btree_index(unsigned int l, struct dm_table *t) in setup_btree_index() 104 static int alloc_targets(struct dm_table *t, unsigned int num) in alloc_targets() 128 int dm_table_create(struct dm_table **result, blk_mode_t mode, in dm_table_create() 131 struct dm_table *t; in dm_table_create() 181 static void dm_table_destroy_crypto_profile(struct dm_table *t); 183 void dm_table_destroy(struct dm_table *t) in dm_table_destroy() 372 struct dm_table *t = ti->table; in dm_get_device() 467 struct dm_table * in dm_put_device() [all...] |
H A D | dm-zone.c | 19 static int dm_blk_do_report_zones(struct mapped_device *md, struct dm_table *t, in dm_blk_do_report_zones() 58 struct dm_table *map; in dm_blk_report_zones() 59 struct dm_table *zone_revalidate_map = md->zone_revalidate_map; in dm_blk_report_zones() 159 int dm_revalidate_zones(struct dm_table *t, struct request_queue *q) in dm_revalidate_zones() 208 static bool dm_table_supports_zone_append(struct dm_table *t) in dm_table_supports_zone_append() 338 int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q, in dm_set_zones_restrictions() 434 void dm_finalize_zone_settings(struct dm_table *t, struct queue_limits *lim) in dm_finalize_zone_settings() 491 int dm_zone_get_reset_bitmap(struct mapped_device *md, struct dm_table *t, in dm_zone_get_reset_bitmap()
|
H A D | dm-ima.h | 60 void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_flags); 69 static inline void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_flags) {} in dm_ima_measure_on_table_load()
|
H A D | dm-ioctl.c | 53 struct dm_table *new_map; 308 static struct dm_table *__hash_remove(struct hash_cell *hc) in __hash_remove() 310 struct dm_table *table; in __hash_remove() 342 struct dm_table *t; in dm_hash_remove_all() 431 struct dm_table *table; in dm_hash_rename() 789 static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *srcu_idx) in dm_get_inactive_table() 792 struct dm_table *table = NULL; in dm_get_inactive_table() 812 static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md, in dm_get_live_or_inactive_table() 827 struct dm_table *table; in __dev_status() 982 struct dm_table * in dev_remove() [all...] |
H A D | dm.c | 84 struct dm_table *map; 418 struct dm_table *map; in dm_prepare_ioctl() 688 struct dm_table *dm_get_live_table(struct mapped_device *md, in dm_get_live_table() 712 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) in dm_get_live_table_fast() 1198 struct dm_table *map; in dm_dax_get_live_target() 1529 struct dm_table *t = ci->map; in __send_empty_flush() 1764 struct dm_table *map, struct bio *bio, bool is_abnormal) in init_clone_info() 1902 struct dm_table *t = ci->map; in __send_zone_reset_all() 1943 struct dm_table *map, struct bio *bio) in dm_split_and_process_bio() 2046 struct dm_table *ma in dm_submit_bio() [all...] |
H A D | dm-core.h | 37 * DM targets must _not_ deference a mapped_device or dm_table to directly 56 * The current mapping (struct dm_table *). 189 struct dm_table { struct 231 static inline struct dm_target *dm_table_get_target(struct dm_table *t, in dm_table_get_target() argument 190 mddm_table global() argument 191 typedm_table global() argument 194 depthdm_table global() argument 195 countsdm_table global() argument 196 indexdm_table global() argument 198 num_targetsdm_table global() argument 199 num_allocateddm_table global() argument 200 highsdm_table global() argument 201 targetsdm_table global() argument 203 immutable_target_typedm_table global() argument 205 integrity_supporteddm_table global() argument 206 singletondm_table global() argument
|
H A D | dm-rq.h | 34 int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t);
|
H A D | dm-verity-loadpin.c | 56 struct dm_table *table; in dm_verity_loadpin_is_bdev_trusted()
|
H A D | dm-rq.c | 495 struct dm_table *map; in dm_mq_queue_rq() 538 int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) in dm_mq_init_request_queue()
|
H A D | dm-ima.c | 178 void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_flags) in dm_ima_measure_on_table_load()
|
/linux/include/linux/ |
H A D | device-mapper.h | 20 struct dm_table; 313 struct dm_table *table; 415 * dm_table->devices and send flushes to the devices directly. This 577 int dm_table_create(struct dm_table **result, blk_mode_t mode, 583 int dm_table_add_target(struct dm_table *t, const char *type, 592 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type); 597 int dm_table_complete(struct dm_table *t); 602 void dm_table_destroy(struct dm_table *t); 612 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx); 619 sector_t dm_table_get_size(struct dm_table * [all...] |