Lines Matching +full:apt +full:- +full:get
21 #include <linux/backing-dev.h>
42 #include <linux/percpu-refcount.h>
114 * The real limit is nr_events - 1, which will be larger (see
131 * signals when all in-flight requests are done
194 * access the file pointer through any of the sub-structs,
221 /*------ sysctl variables----*/
225 /*----end sysctl variables---*/
229 .procname = "aio-nr",
236 .procname = "aio-max-nr",
263 struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb); in aio_private_file()
267 inode->i_mapping->a_ops = &aio_ctx_aops; in aio_private_file()
268 inode->i_mapping->i_private_data = ctx; in aio_private_file()
269 inode->i_size = PAGE_SIZE * nr_pages; in aio_private_file()
281 return -ENOMEM; in aio_init_fs_context()
282 fc->s_iflags |= SB_I_NOEXEC; in aio_init_fs_context()
310 struct file *aio_ring_file = ctx->aio_ring_file; in put_aio_ring_file()
317 i_mapping = aio_ring_file->f_mapping; in put_aio_ring_file()
318 spin_lock(&i_mapping->i_private_lock); in put_aio_ring_file()
319 i_mapping->i_private_data = NULL; in put_aio_ring_file()
320 ctx->aio_ring_file = NULL; in put_aio_ring_file()
321 spin_unlock(&i_mapping->i_private_lock); in put_aio_ring_file()
336 for (i = 0; i < ctx->nr_pages; i++) { in aio_free_ring()
338 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, in aio_free_ring()
339 page_count(ctx->ring_pages[i])); in aio_free_ring()
340 page = ctx->ring_pages[i]; in aio_free_ring()
343 ctx->ring_pages[i] = NULL; in aio_free_ring()
347 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) { in aio_free_ring()
348 kfree(ctx->ring_pages); in aio_free_ring()
349 ctx->ring_pages = NULL; in aio_free_ring()
355 struct file *file = vma->vm_file; in aio_ring_mremap()
356 struct mm_struct *mm = vma->vm_mm; in aio_ring_mremap()
358 int i, res = -EINVAL; in aio_ring_mremap()
360 spin_lock(&mm->ioctx_lock); in aio_ring_mremap()
362 table = rcu_dereference(mm->ioctx_table); in aio_ring_mremap()
366 for (i = 0; i < table->nr; i++) { in aio_ring_mremap()
369 ctx = rcu_dereference(table->table[i]); in aio_ring_mremap()
370 if (ctx && ctx->aio_ring_file == file) { in aio_ring_mremap()
371 if (!atomic_read(&ctx->dead)) { in aio_ring_mremap()
372 ctx->user_id = ctx->mmap_base = vma->vm_start; in aio_ring_mremap()
381 spin_unlock(&mm->ioctx_lock); in aio_ring_mremap()
397 vma->vm_ops = &aio_ring_vm_ops; in aio_ring_mmap()
416 * happen under the ctx->completion_lock. That does not work with the in aio_migrate_folio()
420 return -EINVAL; in aio_migrate_folio()
424 /* mapping->i_private_lock here protects against the kioctx teardown. */ in aio_migrate_folio()
425 spin_lock(&mapping->i_private_lock); in aio_migrate_folio()
426 ctx = mapping->i_private_data; in aio_migrate_folio()
428 rc = -EINVAL; in aio_migrate_folio()
436 if (!mutex_trylock(&ctx->ring_lock)) { in aio_migrate_folio()
437 rc = -EAGAIN; in aio_migrate_folio()
441 idx = src->index; in aio_migrate_folio()
442 if (idx < (pgoff_t)ctx->nr_pages) { in aio_migrate_folio()
444 if (ctx->ring_pages[idx] != &src->page) in aio_migrate_folio()
445 rc = -EAGAIN; in aio_migrate_folio()
447 rc = -EINVAL; in aio_migrate_folio()
466 spin_lock_irqsave(&ctx->completion_lock, flags); in aio_migrate_folio()
468 BUG_ON(ctx->ring_pages[idx] != &src->page); in aio_migrate_folio()
469 ctx->ring_pages[idx] = &dst->page; in aio_migrate_folio()
470 spin_unlock_irqrestore(&ctx->completion_lock, flags); in aio_migrate_folio()
476 mutex_unlock(&ctx->ring_lock); in aio_migrate_folio()
478 spin_unlock(&mapping->i_private_lock); in aio_migrate_folio()
493 struct mm_struct *mm = current->mm; in aio_setup_ring()
507 return -EINVAL; in aio_setup_ring()
511 ctx->aio_ring_file = NULL; in aio_setup_ring()
512 return -ENOMEM; in aio_setup_ring()
515 ctx->aio_ring_file = file; in aio_setup_ring()
516 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) in aio_setup_ring()
519 ctx->ring_pages = ctx->internal_pages; in aio_setup_ring()
521 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), in aio_setup_ring()
523 if (!ctx->ring_pages) { in aio_setup_ring()
525 return -ENOMEM; in aio_setup_ring()
531 page = find_or_create_page(file->f_mapping, in aio_setup_ring()
535 pr_debug("pid(%d) page[%d]->count=%d\n", in aio_setup_ring()
536 current->pid, i, page_count(page)); in aio_setup_ring()
540 ctx->ring_pages[i] = page; in aio_setup_ring()
542 ctx->nr_pages = i; in aio_setup_ring()
546 return -ENOMEM; in aio_setup_ring()
549 ctx->mmap_size = nr_pages * PAGE_SIZE; in aio_setup_ring()
550 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); in aio_setup_ring()
553 ctx->mmap_size = 0; in aio_setup_ring()
555 return -EINTR; in aio_setup_ring()
558 ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size, in aio_setup_ring()
562 if (IS_ERR((void *)ctx->mmap_base)) { in aio_setup_ring()
563 ctx->mmap_size = 0; in aio_setup_ring()
565 return -ENOMEM; in aio_setup_ring()
568 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); in aio_setup_ring()
570 ctx->user_id = ctx->mmap_base; in aio_setup_ring()
571 ctx->nr_events = nr_events; /* trusted copy */ in aio_setup_ring()
573 ring = page_address(ctx->ring_pages[0]); in aio_setup_ring()
574 ring->nr = nr_events; /* user copy */ in aio_setup_ring()
575 ring->id = ~0U; in aio_setup_ring()
576 ring->head = ring->tail = 0; in aio_setup_ring()
577 ring->magic = AIO_RING_MAGIC; in aio_setup_ring()
578 ring->compat_features = AIO_RING_COMPAT_FEATURES; in aio_setup_ring()
579 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; in aio_setup_ring()
580 ring->header_length = sizeof(struct aio_ring); in aio_setup_ring()
581 flush_dcache_page(ctx->ring_pages[0]); in aio_setup_ring()
587 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
588 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
600 if (!(iocb->ki_flags & IOCB_AIO_RW)) in kiocb_set_cancel_fn()
605 if (WARN_ON_ONCE(!list_empty(&req->ki_list))) in kiocb_set_cancel_fn()
608 ctx = req->ki_ctx; in kiocb_set_cancel_fn()
610 spin_lock_irqsave(&ctx->ctx_lock, flags); in kiocb_set_cancel_fn()
611 list_add_tail(&req->ki_list, &ctx->active_reqs); in kiocb_set_cancel_fn()
612 req->ki_cancel = cancel; in kiocb_set_cancel_fn()
613 spin_unlock_irqrestore(&ctx->ctx_lock, flags); in kiocb_set_cancel_fn()
629 free_percpu(ctx->cpu); in free_ioctx()
630 percpu_ref_exit(&ctx->reqs); in free_ioctx()
631 percpu_ref_exit(&ctx->users); in free_ioctx()
639 /* At this point we know that there are no any in-flight requests */ in free_ioctx_reqs()
640 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) in free_ioctx_reqs()
641 complete(&ctx->rq_wait->comp); in free_ioctx_reqs()
643 /* Synchronize against RCU protected table->table[] dereferences */ in free_ioctx_reqs()
644 INIT_RCU_WORK(&ctx->free_rwork, free_ioctx); in free_ioctx_reqs()
645 queue_rcu_work(system_wq, &ctx->free_rwork); in free_ioctx_reqs()
650 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
658 spin_lock_irq(&ctx->ctx_lock); in free_ioctx_users()
660 while (!list_empty(&ctx->active_reqs)) { in free_ioctx_users()
661 req = list_first_entry(&ctx->active_reqs, in free_ioctx_users()
663 req->ki_cancel(&req->rw); in free_ioctx_users()
664 list_del_init(&req->ki_list); in free_ioctx_users()
667 spin_unlock_irq(&ctx->ctx_lock); in free_ioctx_users()
669 percpu_ref_kill(&ctx->reqs); in free_ioctx_users()
670 percpu_ref_put(&ctx->reqs); in free_ioctx_users()
679 spin_lock(&mm->ioctx_lock); in ioctx_add_table()
680 table = rcu_dereference_raw(mm->ioctx_table); in ioctx_add_table()
684 for (i = 0; i < table->nr; i++) in ioctx_add_table()
685 if (!rcu_access_pointer(table->table[i])) { in ioctx_add_table()
686 ctx->id = i; in ioctx_add_table()
687 rcu_assign_pointer(table->table[i], ctx); in ioctx_add_table()
688 spin_unlock(&mm->ioctx_lock); in ioctx_add_table()
692 * changes ring_pages by ->ring_lock. in ioctx_add_table()
694 ring = page_address(ctx->ring_pages[0]); in ioctx_add_table()
695 ring->id = ctx->id; in ioctx_add_table()
699 new_nr = (table ? table->nr : 1) * 4; in ioctx_add_table()
700 spin_unlock(&mm->ioctx_lock); in ioctx_add_table()
704 return -ENOMEM; in ioctx_add_table()
706 table->nr = new_nr; in ioctx_add_table()
708 spin_lock(&mm->ioctx_lock); in ioctx_add_table()
709 old = rcu_dereference_raw(mm->ioctx_table); in ioctx_add_table()
712 rcu_assign_pointer(mm->ioctx_table, table); in ioctx_add_table()
713 } else if (table->nr > old->nr) { in ioctx_add_table()
714 memcpy(table->table, old->table, in ioctx_add_table()
715 old->nr * sizeof(struct kioctx *)); in ioctx_add_table()
717 rcu_assign_pointer(mm->ioctx_table, table); in ioctx_add_table()
729 if (WARN_ON(aio_nr - nr > aio_nr)) in aio_nr_sub()
732 aio_nr -= nr; in aio_nr_sub()
741 struct mm_struct *mm = current->mm; in ioctx_alloc()
743 int err = -ENOMEM; in ioctx_alloc()
746 * Store the original nr_events -- what userspace passed to io_setup(), in ioctx_alloc()
747 * for counting against the global limit -- before it changes. in ioctx_alloc()
766 return ERR_PTR(-EINVAL); in ioctx_alloc()
770 return ERR_PTR(-EAGAIN); in ioctx_alloc()
774 return ERR_PTR(-ENOMEM); in ioctx_alloc()
776 ctx->max_reqs = max_reqs; in ioctx_alloc()
778 spin_lock_init(&ctx->ctx_lock); in ioctx_alloc()
779 spin_lock_init(&ctx->completion_lock); in ioctx_alloc()
780 mutex_init(&ctx->ring_lock); in ioctx_alloc()
783 mutex_lock(&ctx->ring_lock); in ioctx_alloc()
784 init_waitqueue_head(&ctx->wait); in ioctx_alloc()
786 INIT_LIST_HEAD(&ctx->active_reqs); in ioctx_alloc()
788 if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL)) in ioctx_alloc()
791 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL)) in ioctx_alloc()
794 ctx->cpu = alloc_percpu(struct kioctx_cpu); in ioctx_alloc()
795 if (!ctx->cpu) in ioctx_alloc()
802 atomic_set(&ctx->reqs_available, ctx->nr_events - 1); in ioctx_alloc()
803 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); in ioctx_alloc()
804 if (ctx->req_batch < 1) in ioctx_alloc()
805 ctx->req_batch = 1; in ioctx_alloc()
809 if (aio_nr + ctx->max_reqs > aio_max_nr || in ioctx_alloc()
810 aio_nr + ctx->max_reqs < aio_nr) { in ioctx_alloc()
812 err = -EAGAIN; in ioctx_alloc()
815 aio_nr += ctx->max_reqs; in ioctx_alloc()
818 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ in ioctx_alloc()
819 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ in ioctx_alloc()
826 mutex_unlock(&ctx->ring_lock); in ioctx_alloc()
829 ctx, ctx->user_id, mm, ctx->nr_events); in ioctx_alloc()
833 aio_nr_sub(ctx->max_reqs); in ioctx_alloc()
835 atomic_set(&ctx->dead, 1); in ioctx_alloc()
836 if (ctx->mmap_size) in ioctx_alloc()
837 vm_munmap(ctx->mmap_base, ctx->mmap_size); in ioctx_alloc()
840 mutex_unlock(&ctx->ring_lock); in ioctx_alloc()
841 free_percpu(ctx->cpu); in ioctx_alloc()
842 percpu_ref_exit(&ctx->reqs); in ioctx_alloc()
843 percpu_ref_exit(&ctx->users); in ioctx_alloc()
859 spin_lock(&mm->ioctx_lock); in kill_ioctx()
860 if (atomic_xchg(&ctx->dead, 1)) { in kill_ioctx()
861 spin_unlock(&mm->ioctx_lock); in kill_ioctx()
862 return -EINVAL; in kill_ioctx()
865 table = rcu_dereference_raw(mm->ioctx_table); in kill_ioctx()
866 WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id])); in kill_ioctx()
867 RCU_INIT_POINTER(table->table[ctx->id], NULL); in kill_ioctx()
868 spin_unlock(&mm->ioctx_lock); in kill_ioctx()
871 wake_up_all(&ctx->wait); in kill_ioctx()
875 * the outstanding kiocbs have finished - but by then io_destroy in kill_ioctx()
877 * -EAGAIN with no ioctxs actually in use (as far as userspace in kill_ioctx()
880 aio_nr_sub(ctx->max_reqs); in kill_ioctx()
882 if (ctx->mmap_size) in kill_ioctx()
883 vm_munmap(ctx->mmap_base, ctx->mmap_size); in kill_ioctx()
885 ctx->rq_wait = wait; in kill_ioctx()
886 percpu_ref_kill(&ctx->users); in kill_ioctx()
900 struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table); in exit_aio()
907 atomic_set(&wait.count, table->nr); in exit_aio()
911 for (i = 0; i < table->nr; ++i) { in exit_aio()
913 rcu_dereference_protected(table->table[i], true); in exit_aio()
921 * We don't need to bother with munmap() here - exit_mmap(mm) in exit_aio()
923 * this is not necessarily our ->mm. in exit_aio()
924 * Since kill_ioctx() uses non-zero ->mmap_size as indicator in exit_aio()
927 ctx->mmap_size = 0; in exit_aio()
936 RCU_INIT_POINTER(mm->ioctx_table, NULL); in exit_aio()
946 kcpu = this_cpu_ptr(ctx->cpu); in put_reqs_available()
947 kcpu->reqs_available += nr; in put_reqs_available()
949 while (kcpu->reqs_available >= ctx->req_batch * 2) { in put_reqs_available()
950 kcpu->reqs_available -= ctx->req_batch; in put_reqs_available()
951 atomic_add(ctx->req_batch, &ctx->reqs_available); in put_reqs_available()
964 kcpu = this_cpu_ptr(ctx->cpu); in __get_reqs_available()
965 if (!kcpu->reqs_available) { in __get_reqs_available()
966 int avail = atomic_read(&ctx->reqs_available); in __get_reqs_available()
969 if (avail < ctx->req_batch) in __get_reqs_available()
971 } while (!atomic_try_cmpxchg(&ctx->reqs_available, in __get_reqs_available()
972 &avail, avail - ctx->req_batch)); in __get_reqs_available()
974 kcpu->reqs_available += ctx->req_batch; in __get_reqs_available()
978 kcpu->reqs_available--; in __get_reqs_available()
989 * called holding ctx->completion_lock.
997 head %= ctx->nr_events; in refill_reqs_available()
999 events_in_ring = tail - head; in refill_reqs_available()
1001 events_in_ring = ctx->nr_events - (head - tail); in refill_reqs_available()
1003 completed = ctx->completed_events; in refill_reqs_available()
1005 completed -= events_in_ring; in refill_reqs_available()
1012 ctx->completed_events -= completed; in refill_reqs_available()
1022 spin_lock_irq(&ctx->completion_lock); in user_refill_reqs_available()
1023 if (ctx->completed_events) { in user_refill_reqs_available()
1027 /* Access of ring->head may race with aio_read_events_ring() in user_refill_reqs_available()
1032 * ctx->completion_lock. Even if head is invalid, the check in user_refill_reqs_available()
1033 * against ctx->completed_events below will make sure we do the in user_refill_reqs_available()
1036 ring = page_address(ctx->ring_pages[0]); in user_refill_reqs_available()
1037 head = ring->head; in user_refill_reqs_available()
1039 refill_reqs_available(ctx, head, ctx->tail); in user_refill_reqs_available()
1042 spin_unlock_irq(&ctx->completion_lock); in user_refill_reqs_available()
1057 * The refcount is initialized to 2 - one for the async op completion,
1073 percpu_ref_get(&ctx->reqs); in aio_get_req()
1074 req->ki_ctx = ctx; in aio_get_req()
1075 INIT_LIST_HEAD(&req->ki_list); in aio_get_req()
1076 refcount_set(&req->ki_refcnt, 2); in aio_get_req()
1077 req->ki_eventfd = NULL; in aio_get_req()
1084 struct mm_struct *mm = current->mm; in lookup_ioctx()
1089 if (get_user(id, &ring->id)) in lookup_ioctx()
1093 table = rcu_dereference(mm->ioctx_table); in lookup_ioctx()
1095 if (!table || id >= table->nr) in lookup_ioctx()
1098 id = array_index_nospec(id, table->nr); in lookup_ioctx()
1099 ctx = rcu_dereference(table->table[id]); in lookup_ioctx()
1100 if (ctx && ctx->user_id == ctx_id) { in lookup_ioctx()
1101 if (percpu_ref_tryget_live(&ctx->users)) in lookup_ioctx()
1111 if (iocb->ki_eventfd) in iocb_destroy()
1112 eventfd_ctx_put(iocb->ki_eventfd); in iocb_destroy()
1113 if (iocb->ki_filp) in iocb_destroy()
1114 fput(iocb->ki_filp); in iocb_destroy()
1115 percpu_ref_put(&iocb->ki_ctx->reqs); in iocb_destroy()
1129 struct kioctx *ctx = iocb->ki_ctx; in aio_complete()
1137 * ctx->completion_lock to prevent other code from messing with the tail in aio_complete()
1140 spin_lock_irqsave(&ctx->completion_lock, flags); in aio_complete()
1142 tail = ctx->tail; in aio_complete()
1145 if (++tail >= ctx->nr_events) in aio_complete()
1148 ev_page = page_address(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); in aio_complete()
1151 *event = iocb->ki_res; in aio_complete()
1153 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); in aio_complete()
1156 (void __user *)(unsigned long)iocb->ki_res.obj, in aio_complete()
1157 iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2); in aio_complete()
1164 ctx->tail = tail; in aio_complete()
1166 ring = page_address(ctx->ring_pages[0]); in aio_complete()
1167 head = ring->head; in aio_complete()
1168 ring->tail = tail; in aio_complete()
1169 flush_dcache_page(ctx->ring_pages[0]); in aio_complete()
1171 ctx->completed_events++; in aio_complete()
1172 if (ctx->completed_events > 1) in aio_complete()
1176 ? tail - head in aio_complete()
1177 : tail + ctx->nr_events - head; in aio_complete()
1178 spin_unlock_irqrestore(&ctx->completion_lock, flags); in aio_complete()
1187 if (iocb->ki_eventfd) in aio_complete()
1188 eventfd_signal(iocb->ki_eventfd); in aio_complete()
1198 if (waitqueue_active(&ctx->wait)) { in aio_complete()
1202 spin_lock_irqsave(&ctx->wait.lock, flags); in aio_complete()
1203 list_for_each_entry_safe(curr, next, &ctx->wait.head, w.entry) in aio_complete()
1204 if (avail >= curr->min_nr) { in aio_complete()
1205 list_del_init_careful(&curr->w.entry); in aio_complete()
1206 wake_up_process(curr->w.private); in aio_complete()
1208 spin_unlock_irqrestore(&ctx->wait.lock, flags); in aio_complete()
1214 if (refcount_dec_and_test(&iocb->ki_refcnt)) { in iocb_put()
1239 mutex_lock(&ctx->ring_lock); in aio_read_events_ring()
1241 /* Access to ->ring_pages here is protected by ctx->ring_lock. */ in aio_read_events_ring()
1242 ring = page_address(ctx->ring_pages[0]); in aio_read_events_ring()
1243 head = ring->head; in aio_read_events_ring()
1244 tail = ring->tail; in aio_read_events_ring()
1252 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); in aio_read_events_ring()
1257 head %= ctx->nr_events; in aio_read_events_ring()
1258 tail %= ctx->nr_events; in aio_read_events_ring()
1265 avail = (head <= tail ? tail : ctx->nr_events) - head; in aio_read_events_ring()
1270 page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]; in aio_read_events_ring()
1273 avail = min(avail, nr - ret); in aio_read_events_ring()
1274 avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos); in aio_read_events_ring()
1281 ret = -EFAULT; in aio_read_events_ring()
1287 head %= ctx->nr_events; in aio_read_events_ring()
1290 ring = page_address(ctx->ring_pages[0]); in aio_read_events_ring()
1291 ring->head = head; in aio_read_events_ring()
1292 flush_dcache_page(ctx->ring_pages[0]); in aio_read_events_ring()
1296 mutex_unlock(&ctx->ring_lock); in aio_read_events_ring()
1304 long ret = aio_read_events_ring(ctx, event + *i, nr - *i); in aio_read_events()
1309 if (unlikely(atomic_read(&ctx->dead))) in aio_read_events()
1310 ret = -EINVAL; in aio_read_events()
1327 * Note that aio_read_events() is being called as the conditional - i.e. in read_events()
1335 * TASK_RUNNING and return 0 too much - that causes us to spin. That in read_events()
1346 hrtimer_set_expires_range_ns(&t.timer, until, current->timer_slack_ns); in read_events()
1355 w.min_nr = min_nr - ret; in read_events()
1357 ret2 = prepare_to_wait_event(&ctx->wait, &w.w, TASK_INTERRUPTIBLE); in read_events()
1359 ret2 = -ETIME; in read_events()
1368 finish_wait(&ctx->wait, &w.w); in read_events()
1380 * handle. May fail with -EINVAL if *ctxp is not initialized,
1382 * with -EAGAIN if the specified nr_events exceeds the user's limit
1383 * of available events. May fail with -ENOMEM if insufficient kernel
1384 * resources are available. May fail with -EFAULT if an invalid
1385 * pointer is passed for ctxp. Will fail with -ENOSYS if not
1398 ret = -EINVAL; in SYSCALL_DEFINE2()
1408 ret = put_user(ioctx->user_id, ctxp); in SYSCALL_DEFINE2()
1410 kill_ioctx(current->mm, ioctx, NULL); in SYSCALL_DEFINE2()
1411 percpu_ref_put(&ioctx->users); in SYSCALL_DEFINE2()
1429 ret = -EINVAL; in COMPAT_SYSCALL_DEFINE2()
1440 ret = put_user((u32)ioctx->user_id, ctx32p); in COMPAT_SYSCALL_DEFINE2()
1442 kill_ioctx(current->mm, ioctx, NULL); in COMPAT_SYSCALL_DEFINE2()
1443 percpu_ref_put(&ioctx->users); in COMPAT_SYSCALL_DEFINE2()
1453 * AIOs and block on completion. Will fail with -ENOSYS if not
1454 * implemented. May fail with -EINVAL if the context pointed to
1468 * in a thread-safe way. If we try to set it here then we have in SYSCALL_DEFINE1()
1471 ret = kill_ioctx(current->mm, ioctx, &wait); in SYSCALL_DEFINE1()
1472 percpu_ref_put(&ioctx->users); in SYSCALL_DEFINE1()
1475 * keep using user-space buffers even if user thinks the context in SYSCALL_DEFINE1()
1484 return -EINVAL; in SYSCALL_DEFINE1()
1489 struct kioctx *ctx = iocb->ki_ctx; in aio_remove_iocb()
1492 spin_lock_irqsave(&ctx->ctx_lock, flags); in aio_remove_iocb()
1493 list_del(&iocb->ki_list); in aio_remove_iocb()
1494 spin_unlock_irqrestore(&ctx->ctx_lock, flags); in aio_remove_iocb()
1501 if (!list_empty_careful(&iocb->ki_list)) in aio_complete_rw()
1504 if (kiocb->ki_flags & IOCB_WRITE) { in aio_complete_rw()
1505 struct inode *inode = file_inode(kiocb->ki_filp); in aio_complete_rw()
1507 if (S_ISREG(inode->i_mode)) in aio_complete_rw()
1511 iocb->ki_res.res = res; in aio_complete_rw()
1512 iocb->ki_res.res2 = 0; in aio_complete_rw()
1520 req->ki_complete = aio_complete_rw; in aio_prep_rw()
1521 req->private = NULL; in aio_prep_rw()
1522 req->ki_pos = iocb->aio_offset; in aio_prep_rw()
1523 req->ki_flags = req->ki_filp->f_iocb_flags | IOCB_AIO_RW; in aio_prep_rw()
1524 if (iocb->aio_flags & IOCB_FLAG_RESFD) in aio_prep_rw()
1525 req->ki_flags |= IOCB_EVENTFD; in aio_prep_rw()
1526 if (iocb->aio_flags & IOCB_FLAG_IOPRIO) { in aio_prep_rw()
1532 ret = ioprio_check_cap(iocb->aio_reqprio); in aio_prep_rw()
1538 req->ki_ioprio = iocb->aio_reqprio; in aio_prep_rw()
1540 req->ki_ioprio = get_current_ioprio(); in aio_prep_rw()
1542 ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags); in aio_prep_rw()
1546 req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */ in aio_prep_rw()
1554 void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf; in aio_setup_rw()
1555 size_t len = iocb->aio_nbytes; in aio_setup_rw()
1569 case -EIOCBQUEUED: in aio_rw_done()
1571 case -ERESTARTSYS: in aio_rw_done()
1572 case -ERESTARTNOINTR: in aio_rw_done()
1573 case -ERESTARTNOHAND: in aio_rw_done()
1574 case -ERESTART_RESTARTBLOCK: in aio_rw_done()
1579 ret = -EINTR; in aio_rw_done()
1582 req->ki_complete(req, ret); in aio_rw_done()
1597 file = req->ki_filp; in aio_read()
1598 if (unlikely(!(file->f_mode & FMODE_READ))) in aio_read()
1599 return -EBADF; in aio_read()
1600 if (unlikely(!file->f_op->read_iter)) in aio_read()
1601 return -EINVAL; in aio_read()
1606 ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter)); in aio_read()
1624 file = req->ki_filp; in aio_write()
1626 if (unlikely(!(file->f_mode & FMODE_WRITE))) in aio_write()
1627 return -EBADF; in aio_write()
1628 if (unlikely(!file->f_op->write_iter)) in aio_write()
1629 return -EINVAL; in aio_write()
1634 ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter)); in aio_write()
1636 if (S_ISREG(file_inode(file)->i_mode)) in aio_write()
1638 req->ki_flags |= IOCB_WRITE; in aio_write()
1648 const struct cred *old_cred = override_creds(iocb->fsync.creds); in aio_fsync_work()
1650 iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync); in aio_fsync_work()
1652 put_cred(iocb->fsync.creds); in aio_fsync_work()
1659 if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes || in aio_fsync()
1660 iocb->aio_rw_flags)) in aio_fsync()
1661 return -EINVAL; in aio_fsync()
1663 if (unlikely(!req->file->f_op->fsync)) in aio_fsync()
1664 return -EINVAL; in aio_fsync()
1666 req->creds = prepare_creds(); in aio_fsync()
1667 if (!req->creds) in aio_fsync()
1668 return -ENOMEM; in aio_fsync()
1670 req->datasync = datasync; in aio_fsync()
1671 INIT_WORK(&req->work, aio_fsync_work); in aio_fsync()
1672 schedule_work(&req->work); in aio_fsync()
1686 * case where the ->poll() provider decides to free its waitqueue early.
1688 * Returns true on success, meaning that req->head->lock was locked, req->wait
1689 * is on req->head, and an RCU read lock was taken. Returns false if the
1702 * all users of wake_up_pollfree() will RCU-delay the actual free. If in poll_iocb_lock_wq()
1704 * non-NULL, we can then lock it without the memory being freed out from in poll_iocb_lock_wq()
1712 head = smp_load_acquire(&req->head); in poll_iocb_lock_wq()
1714 spin_lock(&head->lock); in poll_iocb_lock_wq()
1715 if (!list_empty(&req->wait.entry)) in poll_iocb_lock_wq()
1717 spin_unlock(&head->lock); in poll_iocb_lock_wq()
1725 spin_unlock(&req->head->lock); in poll_iocb_unlock_wq()
1733 struct poll_table_struct pt = { ._key = req->events }; in aio_poll_complete_work()
1734 struct kioctx *ctx = iocb->ki_ctx; in aio_poll_complete_work()
1737 if (!READ_ONCE(req->cancelled)) in aio_poll_complete_work()
1738 mask = vfs_poll(req->file, &pt) & req->events; in aio_poll_complete_work()
1741 * Note that ->ki_cancel callers also delete iocb from active_reqs after in aio_poll_complete_work()
1742 * calling ->ki_cancel. We need the ctx_lock roundtrip here to in aio_poll_complete_work()
1747 spin_lock_irq(&ctx->ctx_lock); in aio_poll_complete_work()
1749 if (!mask && !READ_ONCE(req->cancelled)) { in aio_poll_complete_work()
1754 if (req->work_need_resched) { in aio_poll_complete_work()
1755 schedule_work(&req->work); in aio_poll_complete_work()
1756 req->work_need_resched = false; in aio_poll_complete_work()
1758 req->work_scheduled = false; in aio_poll_complete_work()
1761 spin_unlock_irq(&ctx->ctx_lock); in aio_poll_complete_work()
1764 list_del_init(&req->wait.entry); in aio_poll_complete_work()
1767 list_del_init(&iocb->ki_list); in aio_poll_complete_work()
1768 iocb->ki_res.res = mangle_poll(mask); in aio_poll_complete_work()
1769 spin_unlock_irq(&ctx->ctx_lock); in aio_poll_complete_work()
1778 struct poll_iocb *req = &aiocb->poll; in aio_poll_cancel()
1781 WRITE_ONCE(req->cancelled, true); in aio_poll_cancel()
1782 if (!req->work_scheduled) { in aio_poll_cancel()
1783 schedule_work(&aiocb->poll.work); in aio_poll_cancel()
1784 req->work_scheduled = true; in aio_poll_cancel()
1787 } /* else, the request was force-cancelled by POLLFREE already */ in aio_poll_cancel()
1801 if (mask && !(mask & req->events)) in aio_poll_wake()
1808 * instead, then mask == 0 and we have to call vfs_poll() to get in aio_poll_wake()
1817 if (mask && !req->work_scheduled && in aio_poll_wake()
1818 spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { in aio_poll_wake()
1819 struct kioctx *ctx = iocb->ki_ctx; in aio_poll_wake()
1821 list_del_init(&req->wait.entry); in aio_poll_wake()
1822 list_del(&iocb->ki_list); in aio_poll_wake()
1823 iocb->ki_res.res = mangle_poll(mask); in aio_poll_wake()
1824 if (iocb->ki_eventfd && !eventfd_signal_allowed()) { in aio_poll_wake()
1826 INIT_WORK(&req->work, aio_poll_put_work); in aio_poll_wake()
1827 schedule_work(&req->work); in aio_poll_wake()
1829 spin_unlock_irqrestore(&ctx->ctx_lock, flags); in aio_poll_wake()
1842 if (req->work_scheduled) { in aio_poll_wake()
1843 req->work_need_resched = true; in aio_poll_wake()
1845 schedule_work(&req->work); in aio_poll_wake()
1846 req->work_scheduled = true; in aio_poll_wake()
1856 * cancelled, to potentially skip an unneeded call to ->poll(). in aio_poll_wake()
1859 WRITE_ONCE(req->cancelled, true); in aio_poll_wake()
1860 list_del_init(&req->wait.entry); in aio_poll_wake()
1864 * as req->head is NULL'ed out, the request can be in aio_poll_wake()
1868 smp_store_release(&req->head, NULL); in aio_poll_wake()
1888 if (unlikely(pt->queued)) { in aio_poll_queue_proc()
1889 pt->error = -EINVAL; in aio_poll_queue_proc()
1893 pt->queued = true; in aio_poll_queue_proc()
1894 pt->error = 0; in aio_poll_queue_proc()
1895 pt->iocb->poll.head = head; in aio_poll_queue_proc()
1896 add_wait_queue(head, &pt->iocb->poll.wait); in aio_poll_queue_proc()
1901 struct kioctx *ctx = aiocb->ki_ctx; in aio_poll()
1902 struct poll_iocb *req = &aiocb->poll; in aio_poll()
1903 struct aio_poll_table apt; in aio_poll() local
1908 if ((u16)iocb->aio_buf != iocb->aio_buf) in aio_poll()
1909 return -EINVAL; in aio_poll()
1911 if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags) in aio_poll()
1912 return -EINVAL; in aio_poll()
1914 INIT_WORK(&req->work, aio_poll_complete_work); in aio_poll()
1915 req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; in aio_poll()
1917 req->head = NULL; in aio_poll()
1918 req->cancelled = false; in aio_poll()
1919 req->work_scheduled = false; in aio_poll()
1920 req->work_need_resched = false; in aio_poll()
1922 apt.pt._qproc = aio_poll_queue_proc; in aio_poll()
1923 apt.pt._key = req->events; in aio_poll()
1924 apt.iocb = aiocb; in aio_poll()
1925 apt.queued = false; in aio_poll()
1926 apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */ in aio_poll()
1929 INIT_LIST_HEAD(&req->wait.entry); in aio_poll()
1930 init_waitqueue_func_entry(&req->wait, aio_poll_wake); in aio_poll()
1932 mask = vfs_poll(req->file, &apt.pt) & req->events; in aio_poll()
1933 spin_lock_irq(&ctx->ctx_lock); in aio_poll()
1934 if (likely(apt.queued)) { in aio_poll()
1937 if (!on_queue || req->work_scheduled) { in aio_poll()
1942 if (apt.error) /* unsupported case: multiple queues */ in aio_poll()
1944 apt.error = 0; in aio_poll()
1947 if (mask || apt.error) { in aio_poll()
1949 list_del_init(&req->wait.entry); in aio_poll()
1952 WRITE_ONCE(req->cancelled, true); in aio_poll()
1958 list_add_tail(&aiocb->ki_list, &ctx->active_reqs); in aio_poll()
1959 aiocb->ki_cancel = aio_poll_cancel; in aio_poll()
1965 aiocb->ki_res.res = mangle_poll(mask); in aio_poll()
1966 apt.error = 0; in aio_poll()
1968 spin_unlock_irq(&ctx->ctx_lock); in aio_poll()
1971 return apt.error; in aio_poll()
1978 req->ki_filp = fget(iocb->aio_fildes); in __io_submit_one()
1979 if (unlikely(!req->ki_filp)) in __io_submit_one()
1980 return -EBADF; in __io_submit_one()
1982 if (iocb->aio_flags & IOCB_FLAG_RESFD) { in __io_submit_one()
1985 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an in __io_submit_one()
1990 eventfd = eventfd_ctx_fdget(iocb->aio_resfd); in __io_submit_one()
1994 req->ki_eventfd = eventfd; in __io_submit_one()
1997 if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) { in __io_submit_one()
1999 return -EFAULT; in __io_submit_one()
2002 req->ki_res.obj = (u64)(unsigned long)user_iocb; in __io_submit_one()
2003 req->ki_res.data = iocb->aio_data; in __io_submit_one()
2004 req->ki_res.res = 0; in __io_submit_one()
2005 req->ki_res.res2 = 0; in __io_submit_one()
2007 switch (iocb->aio_lio_opcode) { in __io_submit_one()
2009 return aio_read(&req->rw, iocb, false, compat); in __io_submit_one()
2011 return aio_write(&req->rw, iocb, false, compat); in __io_submit_one()
2013 return aio_read(&req->rw, iocb, true, compat); in __io_submit_one()
2015 return aio_write(&req->rw, iocb, true, compat); in __io_submit_one()
2017 return aio_fsync(&req->fsync, iocb, false); in __io_submit_one()
2019 return aio_fsync(&req->fsync, iocb, true); in __io_submit_one()
2023 pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode); in __io_submit_one()
2024 return -EINVAL; in __io_submit_one()
2036 return -EFAULT; in io_submit_one()
2041 return -EINVAL; in io_submit_one()
2051 return -EINVAL; in io_submit_one()
2056 return -EAGAIN; in io_submit_one()
2065 * arranged for that to be done asynchronously. Anything non-zero in io_submit_one()
2077 * the number of iocbs queued. May return -EINVAL if the aio_context
2081 * -EFAULT if any of the data structures point to invalid data. May
2082 * fail with -EBADF if the file descriptor specified in the first
2083 * iocb is invalid. May fail with -EAGAIN if insufficient resources
2085 * fail with -ENOSYS if not implemented.
2096 return -EINVAL; in SYSCALL_DEFINE3()
2101 return -EINVAL; in SYSCALL_DEFINE3()
2104 if (nr > ctx->nr_events) in SYSCALL_DEFINE3()
2105 nr = ctx->nr_events; in SYSCALL_DEFINE3()
2113 ret = -EFAULT; in SYSCALL_DEFINE3()
2124 percpu_ref_put(&ctx->users); in SYSCALL_DEFINE3()
2138 return -EINVAL; in COMPAT_SYSCALL_DEFINE3()
2143 return -EINVAL; in COMPAT_SYSCALL_DEFINE3()
2146 if (nr > ctx->nr_events) in COMPAT_SYSCALL_DEFINE3()
2147 nr = ctx->nr_events; in COMPAT_SYSCALL_DEFINE3()
2155 ret = -EFAULT; in COMPAT_SYSCALL_DEFINE3()
2166 percpu_ref_put(&ctx->users); in COMPAT_SYSCALL_DEFINE3()
2176 * -EFAULT if any of the data structures pointed to are invalid.
2177 * May fail with -EINVAL if aio_context specified by ctx_id is
2178 * invalid. May fail with -EAGAIN if the iocb specified was not
2179 * cancelled. Will fail with -ENOSYS if not implemented.
2186 int ret = -EINVAL; in SYSCALL_DEFINE3()
2190 if (unlikely(get_user(key, &iocb->aio_key))) in SYSCALL_DEFINE3()
2191 return -EFAULT; in SYSCALL_DEFINE3()
2193 return -EINVAL; in SYSCALL_DEFINE3()
2197 return -EINVAL; in SYSCALL_DEFINE3()
2199 spin_lock_irq(&ctx->ctx_lock); in SYSCALL_DEFINE3()
2201 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { in SYSCALL_DEFINE3()
2202 if (kiocb->ki_res.obj == obj) { in SYSCALL_DEFINE3()
2203 ret = kiocb->ki_cancel(&kiocb->rw); in SYSCALL_DEFINE3()
2204 list_del_init(&kiocb->ki_list); in SYSCALL_DEFINE3()
2208 spin_unlock_irq(&ctx->ctx_lock); in SYSCALL_DEFINE3()
2212 * The result argument is no longer used - the io_event is in SYSCALL_DEFINE3()
2213 * always delivered via the ring buffer. -EINPROGRESS indicates in SYSCALL_DEFINE3()
2216 ret = -EINPROGRESS; in SYSCALL_DEFINE3()
2219 percpu_ref_put(&ctx->users); in SYSCALL_DEFINE3()
2232 long ret = -EINVAL; in do_io_getevents()
2237 percpu_ref_put(&ioctx->users); in do_io_getevents()
2247 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
2248 * out of range, if timeout is out of range. May fail with -EFAULT
2253 * timeout is relative. Will fail with -ENOSYS if not implemented.
2267 return -EFAULT; in SYSCALL_DEFINE5()
2271 ret = -EINTR; in SYSCALL_DEFINE5()
2296 return -EFAULT; in SYSCALL_DEFINE6()
2299 return -EFAULT; in SYSCALL_DEFINE6()
2310 ret = -ERESTARTNOHAND; in SYSCALL_DEFINE6()
2331 return -EFAULT; in SYSCALL_DEFINE6()
2334 return -EFAULT; in SYSCALL_DEFINE6()
2346 ret = -ERESTARTNOHAND; in SYSCALL_DEFINE6()
2365 return -EFAULT; in SYSCALL_DEFINE5()
2369 ret = -EINTR; in SYSCALL_DEFINE5()
2398 return -EFAULT; in COMPAT_SYSCALL_DEFINE6()
2401 return -EFAULT; in COMPAT_SYSCALL_DEFINE6()
2412 ret = -ERESTARTNOHAND; in COMPAT_SYSCALL_DEFINE6()
2433 return -EFAULT; in COMPAT_SYSCALL_DEFINE6()
2436 return -EFAULT; in COMPAT_SYSCALL_DEFINE6()
2447 ret = -ERESTARTNOHAND; in COMPAT_SYSCALL_DEFINE6()