1 /* 2 * Copyright (C) 2009-2011 Red Hat, Inc. 3 * 4 * Author: Mikulas Patocka <mpatocka@redhat.com> 5 * 6 * This file is released under the GPL. 7 */ 8 9 #ifndef DM_BUFIO_H 10 #define DM_BUFIO_H 11 12 #include <linux/blkdev.h> 13 #include <linux/types.h> 14 15 /*----------------------------------------------------------------*/ 16 17 struct dm_bufio_client; 18 struct dm_buffer; 19 20 /* 21 * Create a buffered IO cache on a given device 22 */ 23 struct dm_bufio_client * 24 dm_bufio_client_create(struct block_device *bdev, unsigned block_size, 25 unsigned reserved_buffers, unsigned aux_size, 26 void (*alloc_callback)(struct dm_buffer *), 27 void (*write_callback)(struct dm_buffer *)); 28 29 /* 30 * Release a buffered IO cache. 31 */ 32 void dm_bufio_client_destroy(struct dm_bufio_client *c); 33 34 /* 35 * WARNING: to avoid deadlocks, these conditions are observed: 36 * 37 * - At most one thread can hold at most "reserved_buffers" simultaneously. 38 * - Each other threads can hold at most one buffer. 39 * - Threads which call only dm_bufio_get can hold unlimited number of 40 * buffers. 41 */ 42 43 /* 44 * Read a given block from disk. Returns pointer to data. Returns a 45 * pointer to dm_buffer that can be used to release the buffer or to make 46 * it dirty. 47 */ 48 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, 49 struct dm_buffer **bp); 50 51 /* 52 * Like dm_bufio_read, but return buffer from cache, don't read 53 * it. If the buffer is not in the cache, return NULL. 54 */ 55 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, 56 struct dm_buffer **bp); 57 58 /* 59 * Like dm_bufio_read, but don't read anything from the disk. It is 60 * expected that the caller initializes the buffer and marks it dirty. 61 */ 62 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, 63 struct dm_buffer **bp); 64 65 /* 66 * Release a reference obtained with dm_bufio_{read,get,new}. The data 67 * pointer and dm_buffer pointer is no longer valid after this call. 68 */ 69 void dm_bufio_release(struct dm_buffer *b); 70 71 /* 72 * Mark a buffer dirty. It should be called after the buffer is modified. 73 * 74 * In case of memory pressure, the buffer may be written after 75 * dm_bufio_mark_buffer_dirty, but before dm_bufio_write_dirty_buffers. So 76 * dm_bufio_write_dirty_buffers guarantees that the buffer is on-disk but 77 * the actual writing may occur earlier. 78 */ 79 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b); 80 81 /* 82 * Initiate writing of dirty buffers, without waiting for completion. 83 */ 84 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c); 85 86 /* 87 * Write all dirty buffers. Guarantees that all dirty buffers created prior 88 * to this call are on disk when this call exits. 89 */ 90 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c); 91 92 /* 93 * Send an empty write barrier to the device to flush hardware disk cache. 94 */ 95 int dm_bufio_issue_flush(struct dm_bufio_client *c); 96 97 /* 98 * Like dm_bufio_release but also move the buffer to the new 99 * block. dm_bufio_write_dirty_buffers is needed to commit the new block. 100 */ 101 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block); 102 103 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c); 104 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c); 105 sector_t dm_bufio_get_block_number(struct dm_buffer *b); 106 void *dm_bufio_get_block_data(struct dm_buffer *b); 107 void *dm_bufio_get_aux_data(struct dm_buffer *b); 108 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b); 109 110 /*----------------------------------------------------------------*/ 111 112 #endif 113