Lines Matching defs:dio
60 * down by dio->blkfactor. Similarly, fs-blocksize quantities are converted
95 * Deferred addition of a page to the dio. These variables are
116 struct dio {
166 static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
168 struct page **pages = dio->pages;
169 const enum req_op dio_op = dio->opf & REQ_OP_MASK;
181 if (dio->page_errors == 0)
182 dio->page_errors = ret;
183 dio->pages[0] = ZERO_PAGE(0);
203 * buffered inside the dio so that we can call iov_iter_extract_pages()
207 static inline struct page *dio_get_page(struct dio *dio,
213 ret = dio_refill_pages(dio, sdio);
218 return dio->pages[sdio->head];
221 static void dio_pin_page(struct dio *dio, struct page *page)
223 if (dio->is_pinned)
227 static void dio_unpin_page(struct dio *dio, struct page *page)
229 if (dio->is_pinned)
244 static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
246 const enum req_op dio_op = dio->opf & REQ_OP_MASK;
247 loff_t offset = dio->iocb->ki_pos;
260 if (dio->result) {
261 transferred = dio->result;
265 ((offset + transferred) > dio->i_size))
266 transferred = dio->i_size - offset;
273 ret = dio->page_errors;
275 ret = dio->io_error;
279 if (dio->end_io) {
281 err = dio->end_io(dio->iocb, offset, ret, dio->private);
293 * And this page cache invalidation has to be after dio->end_io(), as
300 kiocb_invalidate_post_direct_write(dio->iocb, ret);
302 inode_dio_end(dio->inode);
310 dio->iocb->ki_pos += transferred;
313 ret = generic_write_sync(dio->iocb, ret);
314 dio->iocb->ki_complete(dio->iocb, ret);
317 kmem_cache_free(dio_cache, dio);
323 struct dio *dio = container_of(work, struct dio, complete_work);
325 dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE);
328 static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
335 struct dio *dio = bio->bi_private;
336 const enum req_op dio_op = dio->opf & REQ_OP_MASK;
342 dio_bio_complete(dio, bio);
344 spin_lock_irqsave(&dio->bio_lock, flags);
345 remaining = --dio->refcount;
346 if (remaining == 1 && dio->waiter)
347 wake_up_process(dio->waiter);
348 spin_unlock_irqrestore(&dio->bio_lock, flags);
359 if (dio->result)
360 defer_completion = dio->defer_completion ||
362 dio->inode->i_mapping->nrpages);
364 INIT_WORK(&dio->complete_work, dio_aio_complete_work);
365 queue_work(dio->inode->i_sb->s_dio_done_wq,
366 &dio->complete_work);
368 dio_complete(dio, 0, DIO_COMPLETE_ASYNC);
377 * During I/O bi_private points at the dio. After I/O, bi_private is used to
378 * implement a singly-linked list of completed BIOs, at dio->bio_list.
382 struct dio *dio = bio->bi_private;
385 spin_lock_irqsave(&dio->bio_lock, flags);
386 bio->bi_private = dio->bio_list;
387 dio->bio_list = bio;
388 if (--dio->refcount == 1 && dio->waiter)
389 wake_up_process(dio->waiter);
390 spin_unlock_irqrestore(&dio->bio_lock, flags);
394 dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
404 bio = bio_alloc(bdev, nr_vecs, dio->opf, GFP_KERNEL);
406 if (dio->is_async)
410 if (dio->is_pinned)
412 bio->bi_write_hint = file_inode(dio->iocb->ki_filp)->i_write_hint;
423 * bios hold a dio reference between submit_bio and ->end_io.
425 static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
427 const enum req_op dio_op = dio->opf & REQ_OP_MASK;
431 bio->bi_private = dio;
433 spin_lock_irqsave(&dio->bio_lock, flags);
434 dio->refcount++;
435 spin_unlock_irqrestore(&dio->bio_lock, flags);
437 if (dio->is_async && dio_op == REQ_OP_READ && dio->should_dirty)
440 dio->bio_disk = bio->bi_bdev->bd_disk;
452 static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
454 if (dio->is_pinned)
455 unpin_user_pages(dio->pages + sdio->head,
463 * all bios have been issued so that dio->refcount can only decrease. This
464 * requires that the caller hold a reference on the dio.
466 static struct bio *dio_await_one(struct dio *dio)
471 spin_lock_irqsave(&dio->bio_lock, flags);
479 while (dio->refcount > 1 && dio->bio_list == NULL) {
481 dio->waiter = current;
482 spin_unlock_irqrestore(&dio->bio_lock, flags);
485 spin_lock_irqsave(&dio->bio_lock, flags);
486 dio->waiter = NULL;
488 if (dio->bio_list) {
489 bio = dio->bio_list;
490 dio->bio_list = bio->bi_private;
492 spin_unlock_irqrestore(&dio->bio_lock, flags);
499 static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio)
502 const enum req_op dio_op = dio->opf & REQ_OP_MASK;
503 bool should_dirty = dio_op == REQ_OP_READ && dio->should_dirty;
507 dio->io_error = -EAGAIN;
509 dio->io_error = -EIO;
512 if (dio->is_async && should_dirty) {
525 * errors are propagated through dio->io_error and should be propagated via
528 static void dio_await_completion(struct dio *dio)
532 bio = dio_await_one(dio);
534 dio_bio_complete(dio, bio);
545 static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
550 while (dio->bio_list) {
555 spin_lock_irqsave(&dio->bio_lock, flags);
556 bio = dio->bio_list;
557 dio->bio_list = bio->bi_private;
558 spin_unlock_irqrestore(&dio->bio_lock, flags);
559 ret2 = blk_status_to_errno(dio_bio_complete(dio, bio));
568 static int dio_set_defer_completion(struct dio *dio)
570 struct super_block *sb = dio->inode->i_sb;
572 if (dio->defer_completion)
574 dio->defer_completion = true;
603 static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
606 const enum req_op dio_op = dio->opf & REQ_OP_MASK;
619 ret = dio->page_errors;
642 if (dio->flags & DIO_SKIP_HOLES) {
643 i_size = i_size_read(dio->inode);
648 ret = (*sdio->get_block)(dio->inode, fs_startblk,
652 dio->private = map_bh->b_private;
655 ret = dio_set_defer_completion(dio);
663 static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio,
669 ret = dio_bio_reap(dio, sdio);
675 dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages);
688 static inline int dio_bio_add_page(struct dio *dio, struct dio_submit *sdio)
700 dio_pin_page(dio, sdio->cur_page);
718 * dio, and for dropping the refcount which came from that presence.
720 static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
746 dio_bio_submit(dio, sdio);
750 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
755 if (dio_bio_add_page(dio, sdio) != 0) {
756 dio_bio_submit(dio, sdio);
757 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
759 ret = dio_bio_add_page(dio, sdio);
778 * private part of the dio structure. If possible, we just expand the IO
782 * page to the dio instead.
785 submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
789 const enum req_op dio_op = dio->opf & REQ_OP_MASK;
801 * Can we just grow the current page's presence in the dio?
815 ret = dio_send_cur_page(dio, sdio, map_bh);
816 dio_unpin_page(dio, sdio->cur_page);
822 dio_pin_page(dio, page); /* It is in dio */
834 ret = dio_send_cur_page(dio, sdio, map_bh);
836 dio_bio_submit(dio, sdio);
837 dio_unpin_page(dio, sdio->cur_page);
852 static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio,
880 if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes,
903 static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
906 const enum req_op dio_op = dio->opf & REQ_OP_MASK;
915 page = dio_get_page(dio, sdio);
936 ret = get_more_blocks(dio, sdio, map_bh);
938 dio_unpin_page(dio, page);
983 dio_unpin_page(dio, page);
991 i_size_aligned = ALIGN(i_size_read(dio->inode),
996 dio_unpin_page(dio, page);
1002 dio->result += 1 << blkbits;
1012 dio_zero_block(dio, sdio, 0, map_bh);
1030 ret = submit_page_section(dio, sdio, page,
1036 dio_unpin_page(dio, page);
1043 dio->result += this_chunk_bytes;
1052 dio_unpin_page(dio, page);
1058 static inline int drop_refcount(struct dio *dio)
1074 spin_lock_irqsave(&dio->bio_lock, flags);
1075 ret2 = --dio->refcount;
1076 spin_unlock_irqrestore(&dio->bio_lock, flags);
1117 struct dio *dio;
1127 dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
1128 if (!dio)
1135 memset(dio, 0, offsetof(struct dio, pages));
1137 dio->flags = flags;
1138 if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) {
1142 dio->is_pinned = iov_iter_extract_will_pin(iter);
1145 dio->i_size = i_size_read(inode);
1146 if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
1159 if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) {
1174 dio->is_async = false;
1176 dio->is_async = false;
1178 dio->is_async = true;
1180 dio->inode = inode;
1182 dio->opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
1184 dio->opf |= REQ_NOWAIT;
1186 dio->opf = REQ_OP_READ;
1193 if (dio->is_async && iov_iter_rw(iter) == WRITE) {
1196 retval = dio_set_defer_completion(dio);
1197 else if (!dio->inode->i_sb->s_dio_done_wq) {
1203 retval = sb_init_dio_done_wq(dio->inode->i_sb);
1219 dio->end_io = end_io;
1223 dio->iocb = iocb;
1225 spin_lock_init(&dio->bio_lock);
1226 dio->refcount = 1;
1228 dio->should_dirty = user_backed_iter(iter) && iov_iter_rw(iter) == READ;
1243 retval = do_direct_IO(dio, &sdio, &map_bh);
1245 dio_cleanup(dio, &sdio);
1258 dio_zero_block(dio, &sdio, 1, &map_bh);
1263 ret2 = dio_send_cur_page(dio, &sdio, &map_bh);
1266 dio_unpin_page(dio, sdio.cur_page);
1270 dio_bio_submit(dio, &sdio);
1278 dio_cleanup(dio, &sdio);
1285 if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
1286 inode_unlock(dio->inode);
1296 if (dio->is_async && retval == 0 && dio->result &&
1297 (iov_iter_rw(iter) == READ || dio->result == count))
1300 dio_await_completion(dio);
1302 if (drop_refcount(dio) == 0) {
1303 retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE);
1310 if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ)
1313 kmem_cache_free(dio_cache, dio);
1320 dio_cache = KMEM_CACHE(dio, SLAB_PANIC);