Lines Matching full:kmsg

142 static void io_netmsg_iovec_free(struct io_async_msghdr *kmsg)  in io_netmsg_iovec_free()  argument
144 if (kmsg->vec.iovec) in io_netmsg_iovec_free()
145 io_vec_free(&kmsg->vec); in io_netmsg_iovec_free()
185 struct io_async_msghdr *kmsg) in io_mshot_prep_retry() argument
337 struct io_async_msghdr *kmsg = req->async_data; in io_send_setup() local
347 kmsg->msg.msg_name = NULL; in io_send_setup()
348 kmsg->msg.msg_namelen = 0; in io_send_setup()
349 kmsg->msg.msg_control = NULL; in io_send_setup()
350 kmsg->msg.msg_controllen = 0; in io_send_setup()
351 kmsg->msg.msg_ubuf = NULL; in io_send_setup()
356 ret = move_addr_to_kernel(addr, addr_len, &kmsg->addr); in io_send_setup()
359 kmsg->msg.msg_name = &kmsg->addr; in io_send_setup()
360 kmsg->msg.msg_namelen = addr_len; in io_send_setup()
366 &kmsg->msg.msg_iter); in io_send_setup()
376 struct io_async_msghdr *kmsg = req->async_data; in io_sendmsg_setup() local
381 ret = io_msg_copy_hdr(req, kmsg, &msg, ITER_SOURCE, NULL); in io_sendmsg_setup()
385 sr->msg_control = kmsg->msg.msg_control_user; in io_sendmsg_setup()
388 kmsg->msg.msg_iter.nr_segs = msg.msg_iovlen; in io_sendmsg_setup()
389 return io_prep_reg_iovec(req, &kmsg->vec, msg.msg_iov, in io_sendmsg_setup()
394 return io_net_import_vec(req, kmsg, msg.msg_iov, msg.msg_iovlen, ITER_SOURCE); in io_sendmsg_setup()
449 static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret) in io_bundle_nbufs() argument
457 if (iter_is_ubuf(&kmsg->msg.msg_iter)) in io_bundle_nbufs()
460 iov = kmsg->vec.iovec; in io_bundle_nbufs()
462 iov = &kmsg->fast_iov; in io_bundle_nbufs()
465 if (!iov_iter_count(&kmsg->msg.msg_iter)) in io_bundle_nbufs()
466 return iter_iov(&kmsg->msg.msg_iter) - iov; in io_bundle_nbufs()
481 struct io_async_msghdr *kmsg, in io_send_finish() argument
493 cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), issue_flags); in io_send_finish()
503 io_mshot_prep_retry(req, kmsg); in io_send_finish()
517 struct io_async_msghdr *kmsg = req->async_data; in io_sendmsg() local
535 min_ret = iov_iter_count(&kmsg->msg.msg_iter); in io_sendmsg()
537 kmsg->msg.msg_control_user = sr->msg_control; in io_sendmsg()
539 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); in io_sendmsg()
545 kmsg->msg.msg_controllen = 0; in io_sendmsg()
546 kmsg->msg.msg_control = NULL; in io_sendmsg()
565 struct io_async_msghdr *kmsg) in io_send_select_buffer() argument
571 .iovs = &kmsg->fast_iov, in io_send_select_buffer()
576 if (kmsg->vec.iovec) { in io_send_select_buffer()
577 arg.nr_iovs = kmsg->vec.nr; in io_send_select_buffer()
578 arg.iovs = kmsg->vec.iovec; in io_send_select_buffer()
591 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->vec.iovec) { in io_send_select_buffer()
592 kmsg->vec.nr = ret; in io_send_select_buffer()
593 kmsg->vec.iovec = arg.iovs; in io_send_select_buffer()
601 &kmsg->msg.msg_iter); in io_send_select_buffer()
605 iov_iter_init(&kmsg->msg.msg_iter, ITER_SOURCE, in io_send_select_buffer()
615 struct io_async_msghdr *kmsg = req->async_data; in io_send() local
635 ret = io_send_select_buffer(req, issue_flags, kmsg); in io_send()
646 min_ret = iov_iter_count(&kmsg->msg.msg_iter); in io_send()
649 kmsg->msg.msg_flags = flags; in io_send()
650 ret = sock_sendmsg(sock, &kmsg->msg); in io_send()
671 if (!io_send_finish(req, &ret, kmsg, issue_flags)) in io_send()
725 struct io_async_msghdr *kmsg; in io_recvmsg_prep_setup() local
728 kmsg = io_msg_alloc_async(req); in io_recvmsg_prep_setup()
729 if (unlikely(!kmsg)) in io_recvmsg_prep_setup()
733 kmsg->msg.msg_name = NULL; in io_recvmsg_prep_setup()
734 kmsg->msg.msg_namelen = 0; in io_recvmsg_prep_setup()
735 kmsg->msg.msg_inq = 0; in io_recvmsg_prep_setup()
736 kmsg->msg.msg_control = NULL; in io_recvmsg_prep_setup()
737 kmsg->msg.msg_get_inq = 1; in io_recvmsg_prep_setup()
738 kmsg->msg.msg_controllen = 0; in io_recvmsg_prep_setup()
739 kmsg->msg.msg_iocb = NULL; in io_recvmsg_prep_setup()
740 kmsg->msg.msg_ubuf = NULL; in io_recvmsg_prep_setup()
744 &kmsg->msg.msg_iter); in io_recvmsg_prep_setup()
751 return io_recvmsg_copy_hdr(req, kmsg); in io_recvmsg_prep_setup()
820 struct io_async_msghdr *kmsg, in io_recv_finish() argument
826 if (kmsg->msg.msg_inq > 0) in io_recv_finish()
832 cflags |= io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, this_ret), in io_recv_finish()
843 if (!sr->retry && kmsg->msg.msg_inq > 0 && this_ret > 0 && in io_recv_finish()
844 !iov_iter_count(&kmsg->msg.msg_iter)) { in io_recv_finish()
846 sr->len = kmsg->msg.msg_inq; in io_recv_finish()
862 io_mshot_prep_retry(req, kmsg); in io_recv_finish()
864 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) { in io_recv_finish()
883 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg, in io_recvmsg_prep_multishot() argument
890 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + in io_recvmsg_prep_multishot()
891 kmsg->controllen; in io_recvmsg_prep_multishot()
895 if (kmsg->controllen) { in io_recvmsg_prep_multishot()
896 unsigned long control = ubuf + hdr - kmsg->controllen; in io_recvmsg_prep_multishot()
898 kmsg->msg.msg_control_user = (void __user *) control; in io_recvmsg_prep_multishot()
899 kmsg->msg.msg_controllen = kmsg->controllen; in io_recvmsg_prep_multishot()
904 kmsg->payloadlen = *len = *len - hdr; in io_recvmsg_prep_multishot()
914 struct io_async_msghdr *kmsg, in io_recvmsg_multishot() argument
921 if (kmsg->namelen) in io_recvmsg_multishot()
922 kmsg->msg.msg_name = &hdr.addr; in io_recvmsg_multishot()
923 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); in io_recvmsg_multishot()
924 kmsg->msg.msg_namelen = 0; in io_recvmsg_multishot()
929 err = sock_recvmsg(sock, &kmsg->msg, flags); in io_recvmsg_multishot()
935 .controllen = kmsg->controllen - kmsg->msg.msg_controllen, in io_recvmsg_multishot()
936 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT in io_recvmsg_multishot()
940 if (err > kmsg->payloadlen) in io_recvmsg_multishot()
941 err = kmsg->payloadlen; in io_recvmsg_multishot()
944 if (kmsg->msg.msg_namelen > kmsg->namelen) in io_recvmsg_multishot()
945 copy_len += kmsg->namelen; in io_recvmsg_multishot()
947 copy_len += kmsg->msg.msg_namelen; in io_recvmsg_multishot()
953 hdr.msg.namelen = kmsg->msg.msg_namelen; in io_recvmsg_multishot()
963 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + in io_recvmsg_multishot()
964 kmsg->controllen + err; in io_recvmsg_multishot()
970 struct io_async_msghdr *kmsg = req->async_data; in io_recvmsg() local
999 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len); in io_recvmsg()
1006 iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len); in io_recvmsg()
1009 kmsg->msg.msg_get_inq = 1; in io_recvmsg()
1010 kmsg->msg.msg_inq = -1; in io_recvmsg()
1012 ret = io_recvmsg_multishot(sock, sr, kmsg, flags, in io_recvmsg()
1016 if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen) in io_recvmsg()
1017 min_ret = iov_iter_count(&kmsg->msg.msg_iter); in io_recvmsg()
1019 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg, in io_recvmsg()
1020 kmsg->uaddr, flags); in io_recvmsg()
1038 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { in io_recvmsg()
1049 if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags)) in io_recvmsg()
1055 static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg, in io_recv_buf_select() argument
1069 .iovs = &kmsg->fast_iov, in io_recv_buf_select()
1074 if (kmsg->vec.iovec) { in io_recv_buf_select()
1075 arg.nr_iovs = kmsg->vec.nr; in io_recv_buf_select()
1076 arg.iovs = kmsg->vec.iovec; in io_recv_buf_select()
1080 if (kmsg->msg.msg_inq > 0) in io_recv_buf_select()
1081 arg.max_len = min_not_zero(sr->len, kmsg->msg.msg_inq); in io_recv_buf_select()
1093 iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, arg.iovs, ret, in io_recv_buf_select()
1095 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->vec.iovec) { in io_recv_buf_select()
1096 kmsg->vec.nr = ret; in io_recv_buf_select()
1097 kmsg->vec.iovec = arg.iovs; in io_recv_buf_select()
1111 &kmsg->msg.msg_iter); in io_recv_buf_select()
1122 struct io_async_msghdr *kmsg = req->async_data; in io_recv() local
1144 ret = io_recv_buf_select(req, kmsg, &len, issue_flags); in io_recv()
1146 kmsg->msg.msg_inq = -1; in io_recv()
1152 kmsg->msg.msg_flags = 0; in io_recv()
1153 kmsg->msg.msg_inq = -1; in io_recv()
1156 min_ret = iov_iter_count(&kmsg->msg.msg_iter); in io_recv()
1158 ret = sock_recvmsg(sock, &kmsg->msg, flags); in io_recv()
1176 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { in io_recv()
1189 if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags)) in io_recv()
1402 struct io_async_msghdr *kmsg = req->async_data; in io_send_zc_import() local
1407 return io_import_reg_buf(sr->notif, &kmsg->msg.msg_iter, in io_send_zc_import()
1415 struct io_async_msghdr *kmsg = req->async_data; in io_send_zc() local
1441 min_ret = iov_iter_count(&kmsg->msg.msg_iter); in io_send_zc()
1444 kmsg->msg.msg_flags = msg_flags; in io_send_zc()
1445 kmsg->msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg; in io_send_zc()
1446 ret = sock_sendmsg(sock, &kmsg->msg); in io_send_zc()
1452 if (ret > 0 && io_net_retry(sock, kmsg->msg.msg_flags)) { in io_send_zc()
1485 struct io_async_msghdr *kmsg = req->async_data; in io_sendmsg_zc() local
1491 unsigned uvec_segs = kmsg->msg.msg_iter.nr_segs; in io_sendmsg_zc()
1494 ret = io_import_reg_vec(ITER_SOURCE, &kmsg->msg.msg_iter, req, in io_sendmsg_zc()
1495 &kmsg->vec, uvec_segs, issue_flags); in io_sendmsg_zc()
1515 min_ret = iov_iter_count(&kmsg->msg.msg_iter); in io_sendmsg_zc()
1517 kmsg->msg.msg_control_user = sr->msg_control; in io_sendmsg_zc()
1518 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg; in io_sendmsg_zc()
1519 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); in io_sendmsg_zc()
1850 struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry; in io_netmsg_cache_free() local
1852 io_vec_free(&kmsg->vec); in io_netmsg_cache_free()
1853 kfree(kmsg); in io_netmsg_cache_free()