1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "kublk.h"
4
5 #define NR_STRIPE MAX_BACK_FILES
6
7 struct stripe_conf {
8 unsigned nr_files;
9 unsigned shift;
10 };
11
12 struct stripe {
13 loff_t start;
14 unsigned nr_sects;
15 int seq;
16
17 struct iovec *vec;
18 unsigned nr_vec;
19 unsigned cap;
20 };
21
22 struct stripe_array {
23 struct stripe s[NR_STRIPE];
24 unsigned nr;
25 struct iovec _vec[];
26 };
27
get_chunk_shift(const struct ublk_queue * q)28 static inline const struct stripe_conf *get_chunk_shift(const struct ublk_queue *q)
29 {
30 return (struct stripe_conf *)q->dev->private_data;
31 }
32
calculate_nr_vec(const struct stripe_conf * conf,const struct ublksrv_io_desc * iod)33 static inline unsigned calculate_nr_vec(const struct stripe_conf *conf,
34 const struct ublksrv_io_desc *iod)
35 {
36 const unsigned shift = conf->shift - 9;
37 const unsigned unit_sects = conf->nr_files << shift;
38 loff_t start = iod->start_sector;
39 loff_t end = start + iod->nr_sectors;
40
41 return (end / unit_sects) - (start / unit_sects) + 1;
42 }
43
alloc_stripe_array(const struct stripe_conf * conf,const struct ublksrv_io_desc * iod)44 static struct stripe_array *alloc_stripe_array(const struct stripe_conf *conf,
45 const struct ublksrv_io_desc *iod)
46 {
47 unsigned nr_vecs = calculate_nr_vec(conf, iod);
48 unsigned total = nr_vecs * conf->nr_files;
49 struct stripe_array *s;
50 int i;
51
52 s = malloc(sizeof(*s) + total * sizeof(struct iovec));
53
54 s->nr = 0;
55 for (i = 0; i < conf->nr_files; i++) {
56 struct stripe *t = &s->s[i];
57
58 t->nr_vec = 0;
59 t->vec = &s->_vec[i * nr_vecs];
60 t->nr_sects = 0;
61 t->cap = nr_vecs;
62 }
63
64 return s;
65 }
66
free_stripe_array(struct stripe_array * s)67 static void free_stripe_array(struct stripe_array *s)
68 {
69 free(s);
70 }
71
calculate_stripe_array(const struct stripe_conf * conf,const struct ublksrv_io_desc * iod,struct stripe_array * s)72 static void calculate_stripe_array(const struct stripe_conf *conf,
73 const struct ublksrv_io_desc *iod, struct stripe_array *s)
74 {
75 const unsigned shift = conf->shift - 9;
76 const unsigned chunk_sects = 1 << shift;
77 const unsigned unit_sects = conf->nr_files << shift;
78 off64_t start = iod->start_sector;
79 off64_t end = start + iod->nr_sectors;
80 unsigned long done = 0;
81 unsigned idx = 0;
82
83 while (start < end) {
84 unsigned nr_sects = chunk_sects - (start & (chunk_sects - 1));
85 loff_t unit_off = (start / unit_sects) * unit_sects;
86 unsigned seq = (start - unit_off) >> shift;
87 struct stripe *this = &s->s[idx];
88 loff_t stripe_off = (unit_off / conf->nr_files) +
89 (start & (chunk_sects - 1));
90
91 if (nr_sects > end - start)
92 nr_sects = end - start;
93 if (this->nr_sects == 0) {
94 this->nr_sects = nr_sects;
95 this->start = stripe_off;
96 this->seq = seq;
97 s->nr += 1;
98 } else {
99 assert(seq == this->seq);
100 assert(this->start + this->nr_sects == stripe_off);
101 this->nr_sects += nr_sects;
102 }
103
104 assert(this->nr_vec < this->cap);
105 this->vec[this->nr_vec].iov_base = (void *)(iod->addr + done);
106 this->vec[this->nr_vec++].iov_len = nr_sects << 9;
107
108 start += nr_sects;
109 done += nr_sects << 9;
110 idx = (idx + 1) % conf->nr_files;
111 }
112 }
113
stripe_to_uring_op(const struct ublksrv_io_desc * iod,int zc)114 static inline enum io_uring_op stripe_to_uring_op(
115 const struct ublksrv_io_desc *iod, int zc)
116 {
117 unsigned ublk_op = ublksrv_get_op(iod);
118
119 if (ublk_op == UBLK_IO_OP_READ)
120 return zc ? IORING_OP_READV_FIXED : IORING_OP_READV;
121 else if (ublk_op == UBLK_IO_OP_WRITE)
122 return zc ? IORING_OP_WRITEV_FIXED : IORING_OP_WRITEV;
123 assert(0);
124 }
125
stripe_queue_tgt_rw_io(struct ublk_queue * q,const struct ublksrv_io_desc * iod,int tag)126 static int stripe_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag)
127 {
128 const struct stripe_conf *conf = get_chunk_shift(q);
129 int zc = !!(ublk_queue_use_zc(q) != 0);
130 enum io_uring_op op = stripe_to_uring_op(iod, zc);
131 struct io_uring_sqe *sqe[NR_STRIPE];
132 struct stripe_array *s = alloc_stripe_array(conf, iod);
133 struct ublk_io *io = ublk_get_io(q, tag);
134 int i, extra = zc ? 2 : 0;
135
136 io->private_data = s;
137 calculate_stripe_array(conf, iod, s);
138
139 ublk_queue_alloc_sqes(q, sqe, s->nr + extra);
140
141 if (zc) {
142 io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag);
143 sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
144 sqe[0]->user_data = build_user_data(tag,
145 ublk_cmd_op_nr(sqe[0]->cmd_op), 0, 1);
146 }
147
148 for (i = zc; i < s->nr + extra - zc; i++) {
149 struct stripe *t = &s->s[i - zc];
150
151 io_uring_prep_rw(op, sqe[i],
152 t->seq + 1,
153 (void *)t->vec,
154 t->nr_vec,
155 t->start << 9);
156 if (zc) {
157 sqe[i]->buf_index = tag;
158 io_uring_sqe_set_flags(sqe[i],
159 IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK);
160 } else {
161 io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE);
162 }
163 /* bit63 marks us as tgt io */
164 sqe[i]->user_data = build_user_data(tag, ublksrv_get_op(iod), i - zc, 1);
165 }
166 if (zc) {
167 struct io_uring_sqe *unreg = sqe[s->nr + 1];
168
169 io_uring_prep_buf_unregister(unreg, 0, tag, q->q_id, tag);
170 unreg->user_data = build_user_data(tag, ublk_cmd_op_nr(unreg->cmd_op), 0, 1);
171 }
172
173 /* register buffer is skip_success */
174 return s->nr + zc;
175 }
176
handle_flush(struct ublk_queue * q,const struct ublksrv_io_desc * iod,int tag)177 static int handle_flush(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag)
178 {
179 const struct stripe_conf *conf = get_chunk_shift(q);
180 struct io_uring_sqe *sqe[NR_STRIPE];
181 int i;
182
183 ublk_queue_alloc_sqes(q, sqe, conf->nr_files);
184 for (i = 0; i < conf->nr_files; i++) {
185 io_uring_prep_fsync(sqe[i], i + 1, IORING_FSYNC_DATASYNC);
186 io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE);
187 sqe[i]->user_data = build_user_data(tag, UBLK_IO_OP_FLUSH, 0, 1);
188 }
189 return conf->nr_files;
190 }
191
stripe_queue_tgt_io(struct ublk_queue * q,int tag)192 static int stripe_queue_tgt_io(struct ublk_queue *q, int tag)
193 {
194 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
195 unsigned ublk_op = ublksrv_get_op(iod);
196 int ret = 0;
197
198 switch (ublk_op) {
199 case UBLK_IO_OP_FLUSH:
200 ret = handle_flush(q, iod, tag);
201 break;
202 case UBLK_IO_OP_WRITE_ZEROES:
203 case UBLK_IO_OP_DISCARD:
204 ret = -ENOTSUP;
205 break;
206 case UBLK_IO_OP_READ:
207 case UBLK_IO_OP_WRITE:
208 ret = stripe_queue_tgt_rw_io(q, iod, tag);
209 break;
210 default:
211 ret = -EINVAL;
212 break;
213 }
214 ublk_dbg(UBLK_DBG_IO, "%s: tag %d ublk io %x %llx %u ret %d\n", __func__, tag,
215 iod->op_flags, iod->start_sector, iod->nr_sectors << 9, ret);
216 return ret;
217 }
218
ublk_stripe_queue_io(struct ublk_queue * q,int tag)219 static int ublk_stripe_queue_io(struct ublk_queue *q, int tag)
220 {
221 int queued = stripe_queue_tgt_io(q, tag);
222
223 ublk_queued_tgt_io(q, tag, queued);
224 return 0;
225 }
226
ublk_stripe_io_done(struct ublk_queue * q,int tag,const struct io_uring_cqe * cqe)227 static void ublk_stripe_io_done(struct ublk_queue *q, int tag,
228 const struct io_uring_cqe *cqe)
229 {
230 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
231 unsigned op = user_data_to_op(cqe->user_data);
232 struct ublk_io *io = ublk_get_io(q, tag);
233 int res = cqe->res;
234
235 if (res < 0 || op != ublk_cmd_op_nr(UBLK_U_IO_UNREGISTER_IO_BUF)) {
236 if (!io->result)
237 io->result = res;
238 if (res < 0)
239 ublk_err("%s: io failure %d tag %u\n", __func__, res, tag);
240 }
241
242 /* buffer register op is IOSQE_CQE_SKIP_SUCCESS */
243 if (op == ublk_cmd_op_nr(UBLK_U_IO_REGISTER_IO_BUF))
244 io->tgt_ios += 1;
245
246 /* fail short READ/WRITE simply */
247 if (op == UBLK_IO_OP_READ || op == UBLK_IO_OP_WRITE) {
248 unsigned seq = user_data_to_tgt_data(cqe->user_data);
249 struct stripe_array *s = io->private_data;
250
251 if (res < s->s[seq].nr_sects << 9) {
252 io->result = -EIO;
253 ublk_err("%s: short rw op %u res %d exp %u tag %u\n",
254 __func__, op, res, s->s[seq].vec->iov_len, tag);
255 }
256 }
257
258 if (ublk_completed_tgt_io(q, tag)) {
259 int res = io->result;
260
261 if (!res)
262 res = iod->nr_sectors << 9;
263
264 ublk_complete_io(q, tag, res);
265
266 free_stripe_array(io->private_data);
267 io->private_data = NULL;
268 }
269 }
270
ublk_stripe_tgt_init(const struct dev_ctx * ctx,struct ublk_dev * dev)271 static int ublk_stripe_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
272 {
273 struct ublk_params p = {
274 .types = UBLK_PARAM_TYPE_BASIC,
275 .basic = {
276 .attrs = UBLK_ATTR_VOLATILE_CACHE,
277 .logical_bs_shift = 9,
278 .physical_bs_shift = 12,
279 .io_opt_shift = 12,
280 .io_min_shift = 9,
281 .max_sectors = dev->dev_info.max_io_buf_bytes >> 9,
282 },
283 };
284 unsigned chunk_size = ctx->stripe.chunk_size;
285 struct stripe_conf *conf;
286 unsigned chunk_shift;
287 loff_t bytes = 0;
288 int ret, i, mul = 1;
289
290 if ((chunk_size & (chunk_size - 1)) || !chunk_size) {
291 ublk_err("invalid chunk size %u\n", chunk_size);
292 return -EINVAL;
293 }
294
295 if (chunk_size < 4096 || chunk_size > 512 * 1024) {
296 ublk_err("invalid chunk size %u\n", chunk_size);
297 return -EINVAL;
298 }
299
300 chunk_shift = ilog2(chunk_size);
301
302 ret = backing_file_tgt_init(dev);
303 if (ret)
304 return ret;
305
306 if (!dev->tgt.nr_backing_files || dev->tgt.nr_backing_files > NR_STRIPE)
307 return -EINVAL;
308
309 assert(dev->nr_fds == dev->tgt.nr_backing_files + 1);
310
311 for (i = 0; i < dev->tgt.nr_backing_files; i++)
312 dev->tgt.backing_file_size[i] &= ~((1 << chunk_shift) - 1);
313
314 for (i = 0; i < dev->tgt.nr_backing_files; i++) {
315 unsigned long size = dev->tgt.backing_file_size[i];
316
317 if (size != dev->tgt.backing_file_size[0])
318 return -EINVAL;
319 bytes += size;
320 }
321
322 conf = malloc(sizeof(*conf));
323 conf->shift = chunk_shift;
324 conf->nr_files = dev->tgt.nr_backing_files;
325
326 dev->private_data = conf;
327 dev->tgt.dev_size = bytes;
328 p.basic.dev_sectors = bytes >> 9;
329 dev->tgt.params = p;
330
331 if (dev->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY)
332 mul = 2;
333 dev->tgt.sq_depth = mul * dev->dev_info.queue_depth * conf->nr_files;
334 dev->tgt.cq_depth = mul * dev->dev_info.queue_depth * conf->nr_files;
335
336 printf("%s: shift %u files %u\n", __func__, conf->shift, conf->nr_files);
337
338 return 0;
339 }
340
ublk_stripe_tgt_deinit(struct ublk_dev * dev)341 static void ublk_stripe_tgt_deinit(struct ublk_dev *dev)
342 {
343 free(dev->private_data);
344 backing_file_tgt_deinit(dev);
345 }
346
ublk_stripe_cmd_line(struct dev_ctx * ctx,int argc,char * argv[])347 static void ublk_stripe_cmd_line(struct dev_ctx *ctx, int argc, char *argv[])
348 {
349 static const struct option longopts[] = {
350 { "chunk_size", 1, NULL, 0 },
351 { 0, 0, 0, 0 }
352 };
353 int option_idx, opt;
354
355 ctx->stripe.chunk_size = 65536;
356 while ((opt = getopt_long(argc, argv, "",
357 longopts, &option_idx)) != -1) {
358 switch (opt) {
359 case 0:
360 if (!strcmp(longopts[option_idx].name, "chunk_size"))
361 ctx->stripe.chunk_size = strtol(optarg, NULL, 10);
362 }
363 }
364 }
365
ublk_stripe_usage(const struct ublk_tgt_ops * ops)366 static void ublk_stripe_usage(const struct ublk_tgt_ops *ops)
367 {
368 printf("\tstripe: [--chunk_size chunk_size (default 65536)]\n");
369 }
370
371 const struct ublk_tgt_ops stripe_tgt_ops = {
372 .name = "stripe",
373 .init_tgt = ublk_stripe_tgt_init,
374 .deinit_tgt = ublk_stripe_tgt_deinit,
375 .queue_io = ublk_stripe_queue_io,
376 .tgt_io_done = ublk_stripe_io_done,
377 .parse_cmd_line = ublk_stripe_cmd_line,
378 .usage = ublk_stripe_usage,
379 };
380