Lines Matching +full:enum +full:- +full:as +full:- +full:flags
1 /* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
32 __u8 flags; /* IOSQE_ flags */ member
56 __u32 poll32_events; /* word-reversed for BE */
115 /* sqe->attr_type_mask flags */
119 __u16 flags; member
128 * If sqe->file_index is set to this for opcodes that instantiate a new
131 * in. The picked direct descriptor will be returned in cqe->res, or -ENFILE
136 enum io_uring_sqe_flags_bit {
147 * sqe->flags
159 /* select buffer from sqe->buf_group */
165 * io_uring_setup() flags
179 * than force an inter-processor interrupt reschedule. This avoids interrupting
186 * IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN.
223 enum io_uring_op {
292 * sqe->uring_cmd_flags top 8bits aren't available for userspace
294 * along with setting sqe->buf_index.
301 * sqe->fsync_flags
306 * sqe->timeout_flags
318 * sqe->splice_flags
319 * extends splice(2) flags
324 * POLL_ADD flags. Note that since sqe->poll_events is the flag space, the
325 * command flags for POLL_ADD are stored in sqe->len.
332 * sqe->addr as the old user_data field.
342 * ASYNC_CANCEL flags.
360 * send/sendmsg and recv/recvmsg flags (sqe->ioprio)
364 * -EAGAIN result, arm poll upfront and skip
383 * recv will grab as many buffers from the buffer
386 * the starting buffer ID in cqe->flags as per
400 * It should be treated as a flag, all other
401 * bits of cqe.res should be treated as reserved!
406 * accept flags stored in sqe->ioprio
413 * IORING_OP_MSG_RING command types, stored in sqe->addr
415 enum io_uring_msg_ring_flags {
416 IORING_MSG_DATA, /* pass sqe->len as 'res' and off as user_data */
421 * IORING_OP_MSG_RING flags (sqe->msg_ring_flags)
427 /* Pass through the flags from sqe->file_index to cqe->flags */
431 * IORING_OP_FIXED_FD_INSTALL flags (sqe->install_fd_flags)
433 * IORING_FIXED_FD_NO_CLOEXEC Don't mark the fd as O_CLOEXEC
438 * IORING_OP_NOP flags (sqe->nop_flags)
440 * IORING_NOP_INJECT_RESULT Inject result from sqe->result
451 __u64 user_data; /* sqe->user_data value passed back */
453 __u32 flags; member
457 * contains 16-bytes of padding, doubling the size of the CQE.
463 * cqe->flags
474 * the incremental buffer consumption, as provided by
506 __u32 flags; member
514 * sq_ring->flags
527 __u32 flags; member
533 * cq_ring->flags
540 * io_uring_enter(2) flags
557 __u32 flags; member
568 * io_uring_params->features flags
592 enum io_uring_register_op {
613 /* set/clear io-wq thread affinities */
617 /* set/get max number of io-wq workers */
664 /* io-wq worker categories */
665 enum io_wq_type {
677 enum {
685 __u32 flags; member
691 enum {
692 /* expose the region as registered wait arguments */
698 __u64 flags; member
704 * -1 file descriptors.
710 __u32 flags; member
732 #define IORING_REGISTER_FILES_SKIP (-2)
739 __u16 flags; /* IO_URING_OP_* flags */ member
767 enum {
774 __u32 flags; member
792 * ring tail is overlaid with the io_uring_buf->resv field.
805 * Flags for IORING_REGISTER_PBUF_RING.
810 * mmap(2) with the offset set as:
817 * use of it will consume only as much as it needs. This
821 enum io_uring_register_pbuf_ring_flags {
831 __u16 flags; member
842 enum io_uring_napi_op {
851 enum io_uring_napi_tracking_strategy {
879 * io_uring_restriction->opcode values
881 enum io_uring_register_restriction_op {
888 /* Allow sqe flags */
891 /* Require sqe flags (these flags must be set on each submission) */
897 enum {
910 __u32 flags; member
933 __u32 flags; member
942 * The range is specified as [off, off + len)
954 __u32 flags; member
960 enum io_uring_socket_op {
981 #define IORING_ZCRX_AREA_MASK (~(((__u64)1 << IORING_ZCRX_AREA_SHIFT) - 1))
995 __u32 flags; member
1007 __u32 flags; member