Lines Matching defs:dio
60 struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf)
62 if (dio->dops && dio->dops->bio_set)
64 GFP_KERNEL, dio->dops->bio_set);
69 struct iomap_dio *dio, struct bio *bio, loff_t pos)
71 struct kiocb *iocb = dio->iocb;
73 atomic_inc(&dio->ref);
75 /* Sync dio can't be polled reliably */
81 if (dio->dops && dio->dops->submit_io) {
82 dio->dops->submit_io(iter, bio, pos);
89 ssize_t iomap_dio_complete(struct iomap_dio *dio)
91 const struct iomap_dio_ops *dops = dio->dops;
92 struct kiocb *iocb = dio->iocb;
94 ssize_t ret = dio->error;
97 ret = dops->end_io(iocb, dio->size, ret, dio->flags);
100 ret = dio->size;
102 if (offset + ret > dio->i_size &&
103 !(dio->flags & IOMAP_DIO_WRITE))
104 ret = dio->i_size - offset;
119 if (!dio->error && dio->size && (dio->flags & IOMAP_DIO_WRITE) &&
120 !(dio->flags & IOMAP_DIO_NO_INVALIDATE))
121 kiocb_invalidate_post_direct_write(iocb, dio->size);
132 if (dio->flags & IOMAP_DIO_NEED_SYNC)
135 ret += dio->done_before;
137 trace_iomap_dio_complete(iocb, dio->error, ret);
138 kfree(dio);
150 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
151 struct kiocb *iocb = dio->iocb;
153 iocb->ki_complete(iocb, iomap_dio_complete(dio));
157 * Set an error in the dio if none is set yet. We have to use cmpxchg
161 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
163 cmpxchg(&dio->error, 0, ret);
167 * Called when dio->ref reaches zero from an I/O completion.
169 static void iomap_dio_done(struct iomap_dio *dio)
171 struct kiocb *iocb = dio->iocb;
173 if (dio->wait_for_completion) {
178 struct task_struct *waiter = dio->submit.waiter;
180 WRITE_ONCE(dio->submit.waiter, NULL);
182 } else if (dio->flags & IOMAP_DIO_INLINE_COMP) {
184 iomap_dio_complete_work(&dio->aio.work);
185 } else if (dio->flags & IOMAP_DIO_CALLER_COMP) {
187 * If this dio is flagged with IOMAP_DIO_CALLER_COMP, then
192 iocb->private = dio;
217 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
218 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
224 struct iomap_dio *dio = bio->bi_private;
225 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
228 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
230 if (atomic_dec_and_test(&dio->ref))
231 iomap_dio_done(dio);
244 struct iomap_dio *dio = ioend->io_bio.bi_private;
245 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
249 iomap_dio_set_error(dio, ioend->io_error);
251 if (atomic_dec_and_test(&dio->ref)) {
260 if (!dio->iocb->ki_filp->f_mapping->nrpages) {
261 dio->flags |= IOMAP_DIO_INLINE_COMP;
262 dio->flags |= IOMAP_DIO_NO_INVALIDATE;
264 dio->flags &= ~IOMAP_DIO_CALLER_COMP;
265 iomap_dio_done(dio);
283 static int iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
286 struct inode *inode = file_inode(dio->iocb->ki_filp);
297 bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
301 bio->bi_private = dio;
305 iomap_dio_submit_bio(iter, dio, bio, pos);
317 struct iomap_dio *dio)
321 if (!(dio->flags & IOMAP_DIO_WRITE_THROUGH))
326 static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
341 !bdev_iter_is_aligned(iomap->bdev, dio->submit.iter))
344 if (dio->flags & IOMAP_DIO_WRITE) {
359 dio->flags |= IOMAP_DIO_UNWRITTEN;
364 dio->flags |= IOMAP_DIO_COW;
369 if (iomap_dio_can_use_fua(iomap, dio))
372 dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;
384 ((dio->flags & IOMAP_DIO_NEED_SYNC) &&
386 dio->flags &= ~IOMAP_DIO_CALLER_COMP;
396 orig_count = iov_iter_count(dio->submit.iter);
397 iov_iter_truncate(dio->submit.iter, length);
399 if (!iov_iter_count(dio->submit.iter))
407 if (!(dio->flags & (IOMAP_DIO_INLINE_COMP|IOMAP_DIO_CALLER_COMP)))
408 dio->iocb->ki_flags &= ~IOCB_HIPRI;
414 ret = iomap_dio_zero(iter, dio, pos - pad, pad);
419 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
422 if (dio->error) {
423 iov_iter_revert(dio->submit.iter, copied);
428 bio = iomap_dio_alloc_bio(iter, dio, nr_pages, bio_opf);
433 bio->bi_ioprio = dio->iocb->ki_ioprio;
434 bio->bi_private = dio;
437 ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
461 if (dio->flags & IOMAP_DIO_WRITE)
463 else if (dio->flags & IOMAP_DIO_DIRTY)
466 dio->size += n;
469 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter,
475 dio->iocb->ki_flags &= ~IOCB_HIPRI;
476 iomap_dio_submit_bio(iter, dio, bio, pos);
488 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
492 ret = iomap_dio_zero(iter, dio, pos,
497 iov_iter_reexpand(dio->submit.iter, orig_count - copied);
503 static int iomap_dio_hole_iter(struct iomap_iter *iter, struct iomap_dio *dio)
505 loff_t length = iov_iter_zero(iomap_length(iter), dio->submit.iter);
507 dio->size += length;
513 static int iomap_dio_inline_iter(struct iomap_iter *iomi, struct iomap_dio *dio)
516 struct iov_iter *iter = dio->submit.iter;
525 if (dio->flags & IOMAP_DIO_WRITE) {
539 dio->size += copied;
545 static int iomap_dio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
549 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
551 return iomap_dio_hole_iter(iter, dio);
553 if (!(dio->flags & IOMAP_DIO_WRITE))
554 return iomap_dio_hole_iter(iter, dio);
555 return iomap_dio_bio_iter(iter, dio);
557 return iomap_dio_bio_iter(iter, dio);
559 return iomap_dio_inline_iter(iter, dio);
568 dio->iocb->ki_filp, current->comm);
612 struct iomap_dio *dio;
620 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
621 if (!dio)
624 dio->iocb = iocb;
625 atomic_set(&dio->ref, 1);
626 dio->size = 0;
627 dio->i_size = i_size_read(inode);
628 dio->dops = dops;
629 dio->error = 0;
630 dio->flags = 0;
631 dio->done_before = done_before;
633 dio->submit.iter = iter;
634 dio->submit.waiter = current;
641 dio->flags |= IOMAP_DIO_INLINE_COMP;
643 if (iomi.pos >= dio->i_size)
647 dio->flags |= IOMAP_DIO_DIRTY;
654 dio->flags |= IOMAP_DIO_WRITE;
663 dio->flags |= IOMAP_DIO_CALLER_COMP;
667 if (iomi.pos >= dio->i_size ||
668 iomi.pos + iomi.len > dio->i_size)
678 dio->flags |= IOMAP_DIO_NEED_SYNC;
690 dio->flags |= IOMAP_DIO_WRITE_THROUGH;
729 iomi.status = iomap_dio_iter(&iomi, dio);
744 if (iov_iter_rw(iter) == READ && iomi.pos >= dio->i_size)
745 iov_iter_revert(iter, iomi.pos - dio->i_size);
747 if (ret == -EFAULT && dio->size && (dio_flags & IOMAP_DIO_PARTIAL)) {
759 iomap_dio_set_error(dio, ret);
766 if (dio->flags & IOMAP_DIO_WRITE_THROUGH)
767 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
771 * might be the last reference to the dio. There are three different
775 * the dio ourselves.
777 * iocb, we must never touch the dio after the decrement, the
784 dio->wait_for_completion = wait_for_completion;
785 if (!atomic_dec_and_test(&dio->ref)) {
793 if (!READ_ONCE(dio->submit.waiter))
801 return dio;
804 kfree(dio);
816 struct iomap_dio *dio;
818 dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags, private,
820 if (IS_ERR_OR_NULL(dio))
821 return PTR_ERR_OR_ZERO(dio);
822 return iomap_dio_complete(dio);