1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef KUBLK_INTERNAL_H
3 #define KUBLK_INTERNAL_H
4
5 #include <unistd.h>
6 #include <stdlib.h>
7 #include <assert.h>
8 #include <stdio.h>
9 #include <stdarg.h>
10 #include <string.h>
11 #include <pthread.h>
12 #include <getopt.h>
13 #include <limits.h>
14 #include <poll.h>
15 #include <fcntl.h>
16 #include <sys/syscall.h>
17 #include <sys/mman.h>
18 #include <sys/ioctl.h>
19 #include <sys/inotify.h>
20 #include <sys/wait.h>
21 #include <sys/eventfd.h>
22 #include <sys/uio.h>
23 #include <sys/ipc.h>
24 #include <sys/shm.h>
25 #include <linux/io_uring.h>
26 #include <liburing.h>
27 #include <semaphore.h>
28
29 /* allow ublk_dep.h to override ublk_cmd.h */
30 #include "ublk_dep.h"
31 #include <linux/ublk_cmd.h>
32
33 #define __maybe_unused __attribute__((unused))
34 #define MAX_BACK_FILES 4
35 #ifndef min
36 #define min(a, b) ((a) < (b) ? (a) : (b))
37 #endif
38
39 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
40
41 /****************** part 1: libublk ********************/
42
43 #define CTRL_DEV "/dev/ublk-control"
44 #define UBLKC_DEV "/dev/ublkc"
45 #define UBLKB_DEV "/dev/ublkb"
46 #define UBLK_CTRL_RING_DEPTH 32
47 #define ERROR_EVTFD_DEVID -2
48
49 /* queue idle timeout */
50 #define UBLKSRV_IO_IDLE_SECS 20
51
52 #define UBLK_IO_MAX_BYTES (1 << 20)
53 #define UBLK_MAX_QUEUES 32
54 #define UBLK_QUEUE_DEPTH 1024
55
56 #define UBLK_DBG_DEV (1U << 0)
57 #define UBLK_DBG_QUEUE (1U << 1)
58 #define UBLK_DBG_IO_CMD (1U << 2)
59 #define UBLK_DBG_IO (1U << 3)
60 #define UBLK_DBG_CTRL_CMD (1U << 4)
61 #define UBLK_LOG (1U << 5)
62
63 struct ublk_dev;
64 struct ublk_queue;
65
66 struct stripe_ctx {
67 /* stripe */
68 unsigned int chunk_size;
69 };
70
71 struct fault_inject_ctx {
72 /* fault_inject */
73 unsigned long delay_us;
74 };
75
76 struct dev_ctx {
77 char tgt_type[16];
78 unsigned long flags;
79 unsigned nr_hw_queues;
80 unsigned queue_depth;
81 int dev_id;
82 int nr_files;
83 char *files[MAX_BACK_FILES];
84 unsigned int logging:1;
85 unsigned int all:1;
86 unsigned int fg:1;
87 unsigned int recovery:1;
88
89 int _evtfd;
90 int _shmid;
91
92 /* built from shmem, only for ublk_dump_dev() */
93 struct ublk_dev *shadow_dev;
94
95 union {
96 struct stripe_ctx stripe;
97 struct fault_inject_ctx fault_inject;
98 };
99 };
100
101 struct ublk_ctrl_cmd_data {
102 __u32 cmd_op;
103 #define CTRL_CMD_HAS_DATA 1
104 #define CTRL_CMD_HAS_BUF 2
105 __u32 flags;
106
107 __u64 data[2];
108 __u64 addr;
109 __u32 len;
110 };
111
112 struct ublk_io {
113 char *buf_addr;
114
115 #define UBLKSRV_NEED_FETCH_RQ (1UL << 0)
116 #define UBLKSRV_NEED_COMMIT_RQ_COMP (1UL << 1)
117 #define UBLKSRV_IO_FREE (1UL << 2)
118 #define UBLKSRV_NEED_GET_DATA (1UL << 3)
119 unsigned short flags;
120 unsigned short refs; /* used by target code only */
121
122 int result;
123
124 unsigned short tgt_ios;
125 void *private_data;
126 };
127
128 struct ublk_tgt_ops {
129 const char *name;
130 int (*init_tgt)(const struct dev_ctx *ctx, struct ublk_dev *);
131 void (*deinit_tgt)(struct ublk_dev *);
132
133 int (*queue_io)(struct ublk_queue *, int tag);
134 void (*tgt_io_done)(struct ublk_queue *,
135 int tag, const struct io_uring_cqe *);
136
137 /*
138 * Target specific command line handling
139 *
140 * each option requires argument for target command line
141 */
142 void (*parse_cmd_line)(struct dev_ctx *ctx, int argc, char *argv[]);
143 void (*usage)(const struct ublk_tgt_ops *ops);
144 };
145
146 struct ublk_tgt {
147 unsigned long dev_size;
148 unsigned int sq_depth;
149 unsigned int cq_depth;
150 const struct ublk_tgt_ops *ops;
151 struct ublk_params params;
152
153 int nr_backing_files;
154 unsigned long backing_file_size[MAX_BACK_FILES];
155 char backing_file[MAX_BACK_FILES][PATH_MAX];
156 };
157
158 struct ublk_queue {
159 int q_id;
160 int q_depth;
161 unsigned int cmd_inflight;
162 unsigned int io_inflight;
163 struct ublk_dev *dev;
164 const struct ublk_tgt_ops *tgt_ops;
165 struct ublksrv_io_desc *io_cmd_buf;
166 struct io_uring ring;
167 struct ublk_io ios[UBLK_QUEUE_DEPTH];
168 #define UBLKSRV_QUEUE_STOPPING (1U << 0)
169 #define UBLKSRV_QUEUE_IDLE (1U << 1)
170 #define UBLKSRV_NO_BUF (1U << 2)
171 #define UBLKSRV_ZC (1U << 3)
172 unsigned state;
173 pid_t tid;
174 pthread_t thread;
175 };
176
177 struct ublk_dev {
178 struct ublk_tgt tgt;
179 struct ublksrv_ctrl_dev_info dev_info;
180 struct ublk_queue q[UBLK_MAX_QUEUES];
181
182 int fds[MAX_BACK_FILES + 1]; /* fds[0] points to /dev/ublkcN */
183 int nr_fds;
184 int ctrl_fd;
185 struct io_uring ring;
186
187 void *private_data;
188 };
189
190 #ifndef offsetof
191 #define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER)
192 #endif
193
194 #ifndef container_of
195 #define container_of(ptr, type, member) ({ \
196 unsigned long __mptr = (unsigned long)(ptr); \
197 ((type *)(__mptr - offsetof(type, member))); })
198 #endif
199
200 #define round_up(val, rnd) \
201 (((val) + ((rnd) - 1)) & ~((rnd) - 1))
202
203
204 extern unsigned int ublk_dbg_mask;
205 extern int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag);
206
is_target_io(__u64 user_data)207 static inline int is_target_io(__u64 user_data)
208 {
209 return (user_data & (1ULL << 63)) != 0;
210 }
211
build_user_data(unsigned tag,unsigned op,unsigned tgt_data,unsigned is_target_io)212 static inline __u64 build_user_data(unsigned tag, unsigned op,
213 unsigned tgt_data, unsigned is_target_io)
214 {
215 assert(!(tag >> 16) && !(op >> 8) && !(tgt_data >> 16));
216
217 return tag | (op << 16) | (tgt_data << 24) | (__u64)is_target_io << 63;
218 }
219
user_data_to_tag(__u64 user_data)220 static inline unsigned int user_data_to_tag(__u64 user_data)
221 {
222 return user_data & 0xffff;
223 }
224
user_data_to_op(__u64 user_data)225 static inline unsigned int user_data_to_op(__u64 user_data)
226 {
227 return (user_data >> 16) & 0xff;
228 }
229
user_data_to_tgt_data(__u64 user_data)230 static inline unsigned int user_data_to_tgt_data(__u64 user_data)
231 {
232 return (user_data >> 24) & 0xffff;
233 }
234
ublk_cmd_op_nr(unsigned int op)235 static inline unsigned short ublk_cmd_op_nr(unsigned int op)
236 {
237 return _IOC_NR(op);
238 }
239
ublk_err(const char * fmt,...)240 static inline void ublk_err(const char *fmt, ...)
241 {
242 va_list ap;
243
244 va_start(ap, fmt);
245 vfprintf(stderr, fmt, ap);
246 }
247
ublk_log(const char * fmt,...)248 static inline void ublk_log(const char *fmt, ...)
249 {
250 if (ublk_dbg_mask & UBLK_LOG) {
251 va_list ap;
252
253 va_start(ap, fmt);
254 vfprintf(stdout, fmt, ap);
255 }
256 }
257
ublk_dbg(int level,const char * fmt,...)258 static inline void ublk_dbg(int level, const char *fmt, ...)
259 {
260 if (level & ublk_dbg_mask) {
261 va_list ap;
262
263 va_start(ap, fmt);
264 vfprintf(stdout, fmt, ap);
265 }
266 }
267
ublk_queue_alloc_sqes(struct ublk_queue * q,struct io_uring_sqe * sqes[],int nr_sqes)268 static inline int ublk_queue_alloc_sqes(struct ublk_queue *q,
269 struct io_uring_sqe *sqes[], int nr_sqes)
270 {
271 unsigned left = io_uring_sq_space_left(&q->ring);
272 int i;
273
274 if (left < nr_sqes)
275 io_uring_submit(&q->ring);
276
277 for (i = 0; i < nr_sqes; i++) {
278 sqes[i] = io_uring_get_sqe(&q->ring);
279 if (!sqes[i])
280 return i;
281 }
282
283 return nr_sqes;
284 }
285
io_uring_prep_buf_register(struct io_uring_sqe * sqe,int dev_fd,int tag,int q_id,__u64 index)286 static inline void io_uring_prep_buf_register(struct io_uring_sqe *sqe,
287 int dev_fd, int tag, int q_id, __u64 index)
288 {
289 struct ublksrv_io_cmd *cmd = (struct ublksrv_io_cmd *)sqe->cmd;
290
291 io_uring_prep_read(sqe, dev_fd, 0, 0, 0);
292 sqe->opcode = IORING_OP_URING_CMD;
293 sqe->flags |= IOSQE_FIXED_FILE;
294 sqe->cmd_op = UBLK_U_IO_REGISTER_IO_BUF;
295
296 cmd->tag = tag;
297 cmd->addr = index;
298 cmd->q_id = q_id;
299 }
300
io_uring_prep_buf_unregister(struct io_uring_sqe * sqe,int dev_fd,int tag,int q_id,__u64 index)301 static inline void io_uring_prep_buf_unregister(struct io_uring_sqe *sqe,
302 int dev_fd, int tag, int q_id, __u64 index)
303 {
304 struct ublksrv_io_cmd *cmd = (struct ublksrv_io_cmd *)sqe->cmd;
305
306 io_uring_prep_read(sqe, dev_fd, 0, 0, 0);
307 sqe->opcode = IORING_OP_URING_CMD;
308 sqe->flags |= IOSQE_FIXED_FILE;
309 sqe->cmd_op = UBLK_U_IO_UNREGISTER_IO_BUF;
310
311 cmd->tag = tag;
312 cmd->addr = index;
313 cmd->q_id = q_id;
314 }
315
ublk_get_sqe_cmd(const struct io_uring_sqe * sqe)316 static inline void *ublk_get_sqe_cmd(const struct io_uring_sqe *sqe)
317 {
318 return (void *)&sqe->cmd;
319 }
320
ublk_set_io_res(struct ublk_queue * q,int tag,int res)321 static inline void ublk_set_io_res(struct ublk_queue *q, int tag, int res)
322 {
323 q->ios[tag].result = res;
324 }
325
ublk_get_io_res(const struct ublk_queue * q,unsigned tag)326 static inline int ublk_get_io_res(const struct ublk_queue *q, unsigned tag)
327 {
328 return q->ios[tag].result;
329 }
330
ublk_mark_io_done(struct ublk_io * io,int res)331 static inline void ublk_mark_io_done(struct ublk_io *io, int res)
332 {
333 io->flags |= (UBLKSRV_NEED_COMMIT_RQ_COMP | UBLKSRV_IO_FREE);
334 io->result = res;
335 }
336
ublk_get_iod(const struct ublk_queue * q,int tag)337 static inline const struct ublksrv_io_desc *ublk_get_iod(const struct ublk_queue *q, int tag)
338 {
339 return &q->io_cmd_buf[tag];
340 }
341
ublk_set_sqe_cmd_op(struct io_uring_sqe * sqe,__u32 cmd_op)342 static inline void ublk_set_sqe_cmd_op(struct io_uring_sqe *sqe, __u32 cmd_op)
343 {
344 __u32 *addr = (__u32 *)&sqe->off;
345
346 addr[0] = cmd_op;
347 addr[1] = 0;
348 }
349
ublk_get_io(struct ublk_queue * q,unsigned tag)350 static inline struct ublk_io *ublk_get_io(struct ublk_queue *q, unsigned tag)
351 {
352 return &q->ios[tag];
353 }
354
ublk_complete_io(struct ublk_queue * q,unsigned tag,int res)355 static inline int ublk_complete_io(struct ublk_queue *q, unsigned tag, int res)
356 {
357 struct ublk_io *io = &q->ios[tag];
358
359 ublk_mark_io_done(io, res);
360
361 return ublk_queue_io_cmd(q, io, tag);
362 }
363
ublk_queued_tgt_io(struct ublk_queue * q,unsigned tag,int queued)364 static inline void ublk_queued_tgt_io(struct ublk_queue *q, unsigned tag, int queued)
365 {
366 if (queued < 0)
367 ublk_complete_io(q, tag, queued);
368 else {
369 struct ublk_io *io = ublk_get_io(q, tag);
370
371 q->io_inflight += queued;
372 io->tgt_ios = queued;
373 io->result = 0;
374 }
375 }
376
ublk_completed_tgt_io(struct ublk_queue * q,unsigned tag)377 static inline int ublk_completed_tgt_io(struct ublk_queue *q, unsigned tag)
378 {
379 struct ublk_io *io = ublk_get_io(q, tag);
380
381 q->io_inflight--;
382
383 return --io->tgt_ios == 0;
384 }
385
ublk_queue_use_zc(const struct ublk_queue * q)386 static inline int ublk_queue_use_zc(const struct ublk_queue *q)
387 {
388 return q->state & UBLKSRV_ZC;
389 }
390
391 extern const struct ublk_tgt_ops null_tgt_ops;
392 extern const struct ublk_tgt_ops loop_tgt_ops;
393 extern const struct ublk_tgt_ops stripe_tgt_ops;
394 extern const struct ublk_tgt_ops fault_inject_tgt_ops;
395
396 void backing_file_tgt_deinit(struct ublk_dev *dev);
397 int backing_file_tgt_init(struct ublk_dev *dev);
398
ilog2(unsigned int x)399 static inline unsigned int ilog2(unsigned int x)
400 {
401 if (x == 0)
402 return 0;
403 return (sizeof(x) * 8 - 1) - __builtin_clz(x);
404 }
405 #endif
406