1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _LINUX_IO_URING_CMD_H
3 #define _LINUX_IO_URING_CMD_H
4
5 #include <uapi/linux/io_uring.h>
6 #include <linux/io_uring_types.h>
7 #include <linux/blk-mq.h>
8
9 /* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */
10 #define IORING_URING_CMD_CANCELABLE (1U << 30)
11 /* io_uring_cmd is being issued again */
12 #define IORING_URING_CMD_REISSUE (1U << 31)
13
14 struct io_uring_cmd {
15 struct file *file;
16 const struct io_uring_sqe *sqe;
17 /* callback to defer completions to task context */
18 void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
19 u32 cmd_op;
20 u32 flags;
21 u8 pdu[32]; /* available inline for free use */
22 };
23
io_uring_sqe_cmd(const struct io_uring_sqe * sqe)24 static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
25 {
26 return sqe->cmd;
27 }
28
io_uring_cmd_private_sz_check(size_t cmd_sz)29 static inline void io_uring_cmd_private_sz_check(size_t cmd_sz)
30 {
31 BUILD_BUG_ON(cmd_sz > sizeof_field(struct io_uring_cmd, pdu));
32 }
33 #define io_uring_cmd_to_pdu(cmd, pdu_type) ( \
34 io_uring_cmd_private_sz_check(sizeof(pdu_type)), \
35 ((pdu_type *)&(cmd)->pdu) \
36 )
37
38 #if defined(CONFIG_IO_URING)
39 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
40 struct iov_iter *iter,
41 struct io_uring_cmd *ioucmd,
42 unsigned int issue_flags);
43 int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
44 const struct iovec __user *uvec,
45 size_t uvec_segs,
46 int ddir, struct iov_iter *iter,
47 unsigned issue_flags);
48
49 /*
50 * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd
51 * and the corresponding io_uring request.
52 *
53 * Note: the caller should never hard code @issue_flags and is only allowed
54 * to pass the mask provided by the core io_uring code.
55 */
56 void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, u64 res2,
57 unsigned issue_flags);
58
59 void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
60 void (*task_work_cb)(struct io_uring_cmd *, unsigned),
61 unsigned flags);
62
63 /*
64 * Note: the caller should never hard code @issue_flags and only use the
65 * mask provided by the core io_uring code.
66 */
67 void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
68 unsigned int issue_flags);
69
70 /* Execute the request from a blocking context */
71 void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd);
72
73 #else
74 static inline int
io_uring_cmd_import_fixed(u64 ubuf,unsigned long len,int rw,struct iov_iter * iter,struct io_uring_cmd * ioucmd,unsigned int issue_flags)75 io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
76 struct iov_iter *iter, struct io_uring_cmd *ioucmd,
77 unsigned int issue_flags)
78 {
79 return -EOPNOTSUPP;
80 }
io_uring_cmd_import_fixed_vec(struct io_uring_cmd * ioucmd,const struct iovec __user * uvec,size_t uvec_segs,int ddir,struct iov_iter * iter,unsigned issue_flags)81 static inline int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
82 const struct iovec __user *uvec,
83 size_t uvec_segs,
84 int ddir, struct iov_iter *iter,
85 unsigned issue_flags)
86 {
87 return -EOPNOTSUPP;
88 }
io_uring_cmd_done(struct io_uring_cmd * cmd,ssize_t ret,u64 ret2,unsigned issue_flags)89 static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
90 u64 ret2, unsigned issue_flags)
91 {
92 }
__io_uring_cmd_do_in_task(struct io_uring_cmd * ioucmd,void (* task_work_cb)(struct io_uring_cmd *,unsigned),unsigned flags)93 static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
94 void (*task_work_cb)(struct io_uring_cmd *, unsigned),
95 unsigned flags)
96 {
97 }
io_uring_cmd_mark_cancelable(struct io_uring_cmd * cmd,unsigned int issue_flags)98 static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
99 unsigned int issue_flags)
100 {
101 }
io_uring_cmd_issue_blocking(struct io_uring_cmd * ioucmd)102 static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
103 {
104 }
105 #endif
106
107 /*
108 * Polled completions must ensure they are coming from a poll queue, and
109 * hence are completed inside the usual poll handling loops.
110 */
io_uring_cmd_iopoll_done(struct io_uring_cmd * ioucmd,ssize_t ret,ssize_t res2)111 static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd,
112 ssize_t ret, ssize_t res2)
113 {
114 lockdep_assert(in_task());
115 io_uring_cmd_done(ioucmd, ret, res2, 0);
116 }
117
118 /* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
io_uring_cmd_do_in_task_lazy(struct io_uring_cmd * ioucmd,void (* task_work_cb)(struct io_uring_cmd *,unsigned))119 static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
120 void (*task_work_cb)(struct io_uring_cmd *, unsigned))
121 {
122 __io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE);
123 }
124
io_uring_cmd_complete_in_task(struct io_uring_cmd * ioucmd,void (* task_work_cb)(struct io_uring_cmd *,unsigned))125 static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
126 void (*task_work_cb)(struct io_uring_cmd *, unsigned))
127 {
128 __io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0);
129 }
130
io_uring_cmd_get_task(struct io_uring_cmd * cmd)131 static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
132 {
133 return cmd_to_io_kiocb(cmd)->tctx->task;
134 }
135
136 /*
137 * Return uring_cmd's context reference as its context handle for driver to
138 * track per-context resource, such as registered kernel IO buffer
139 */
io_uring_cmd_ctx_handle(struct io_uring_cmd * cmd)140 static inline void *io_uring_cmd_ctx_handle(struct io_uring_cmd *cmd)
141 {
142 return cmd_to_io_kiocb(cmd)->ctx;
143 }
144
145 int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
146 void (*release)(void *), unsigned int index,
147 unsigned int issue_flags);
148 int io_buffer_unregister_bvec(struct io_uring_cmd *cmd, unsigned int index,
149 unsigned int issue_flags);
150
151 #endif /* _LINUX_IO_URING_CMD_H */
152