Lines Matching +full:rpc +full:- +full:if
1 // SPDX-License-Identifier: GPL-2.0
5 * Client-side transport implementation for sockets.
164 * Wait duration for a reply from the RPC portmapper.
169 * Delay if a UDP socket connect error occurs. This is most likely some
181 * increase over time if the server is down or not responding.
186 * TCP idle timeout; client drops the transport socket if it is idle
188 * holding port numbers when there is no RPC traffic.
192 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
203 dprintk("RPC: %s\n", msg); in xs_pktdump()
205 if (!(j & 31)) { in xs_pktdump()
206 if (j) in xs_pktdump()
224 return (struct rpc_xprt *) sk->sk_user_data; in xprt_from_sock()
229 return (struct sockaddr *) &xprt->addr; in xs_addr()
234 return (struct sockaddr_un *) &xprt->addr; in xs_addr_un()
239 return (struct sockaddr_in *) &xprt->addr; in xs_addr_in()
244 return (struct sockaddr_in6 *) &xprt->addr; in xs_addr_in6()
255 switch (sap->sa_family) { in xs_format_common_peer_addresses()
258 strlcpy(buf, sun->sun_path, sizeof(buf)); in xs_format_common_peer_addresses()
259 xprt->address_strings[RPC_DISPLAY_ADDR] = in xs_format_common_peer_addresses()
264 xprt->address_strings[RPC_DISPLAY_ADDR] = in xs_format_common_peer_addresses()
267 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); in xs_format_common_peer_addresses()
271 xprt->address_strings[RPC_DISPLAY_ADDR] = in xs_format_common_peer_addresses()
274 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); in xs_format_common_peer_addresses()
280 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); in xs_format_common_peer_addresses()
289 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); in xs_format_common_peer_ports()
292 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); in xs_format_common_peer_ports()
299 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol; in xs_format_peer_addresses()
300 xprt->address_strings[RPC_DISPLAY_NETID] = netid; in xs_format_peer_addresses()
307 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]); in xs_update_peer_port()
308 kfree(xprt->address_strings[RPC_DISPLAY_PORT]); in xs_update_peer_port()
323 kfree(xprt->address_strings[i]); in xs_free_peer_addresses()
332 if (!want || !(buf->flags & XDRBUF_SPARSE_PAGES)) in xs_alloc_sparse_pages()
334 n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT; in xs_alloc_sparse_pages()
336 if (buf->pages[i]) in xs_alloc_sparse_pages()
338 buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp); in xs_alloc_sparse_pages()
339 if (!buf->pages[i]) { in xs_alloc_sparse_pages()
341 return i > buf->page_base ? i - buf->page_base : 0; in xs_alloc_sparse_pages()
351 if (seek != 0) in xs_sock_recvmsg()
352 iov_iter_advance(&msg->msg_iter, seek); in xs_sock_recvmsg()
361 iov_iter_kvec(&msg->msg_iter, READ, kvec, 1, count); in xs_read_kvec()
370 iov_iter_bvec(&msg->msg_iter, READ, bvec, nr, count); in xs_read_bvec()
378 iov_iter_discard(&msg->msg_iter, READ, count); in xs_read_discard()
382 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
409 want = min_t(size_t, count, buf->head[0].iov_len); in xs_read_xdr_buf()
410 if (seek < want) { in xs_read_xdr_buf()
411 ret = xs_read_kvec(sock, msg, flags, &buf->head[0], want, seek); in xs_read_xdr_buf()
412 if (ret <= 0) in xs_read_xdr_buf()
415 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) in xs_read_xdr_buf()
417 if (ret != want) in xs_read_xdr_buf()
421 seek -= want; in xs_read_xdr_buf()
426 min_t(size_t, count - offset, buf->page_len), in xs_read_xdr_buf()
428 if (seek < want) { in xs_read_xdr_buf()
429 ret = xs_read_bvec(sock, msg, flags, buf->bvec, in xs_read_xdr_buf()
431 want + buf->page_base, in xs_read_xdr_buf()
432 seek + buf->page_base); in xs_read_xdr_buf()
433 if (ret <= 0) in xs_read_xdr_buf()
435 xs_flush_bvec(buf->bvec, ret, seek + buf->page_base); in xs_read_xdr_buf()
436 offset += ret - buf->page_base; in xs_read_xdr_buf()
437 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) in xs_read_xdr_buf()
439 if (ret != want) in xs_read_xdr_buf()
443 seek -= want; in xs_read_xdr_buf()
447 want = min_t(size_t, count - offset, buf->tail[0].iov_len); in xs_read_xdr_buf()
448 if (seek < want) { in xs_read_xdr_buf()
449 ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek); in xs_read_xdr_buf()
450 if (ret <= 0) in xs_read_xdr_buf()
453 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) in xs_read_xdr_buf()
455 if (ret != want) in xs_read_xdr_buf()
457 } else if (offset < seek_init) in xs_read_xdr_buf()
459 ret = -EMSGSIZE; in xs_read_xdr_buf()
461 *read = offset - seek_init; in xs_read_xdr_buf()
471 if (!transport->recv.copied) { in xs_read_header()
472 if (buf->head[0].iov_len >= transport->recv.offset) in xs_read_header()
473 memcpy(buf->head[0].iov_base, in xs_read_header()
474 &transport->recv.xid, in xs_read_header()
475 transport->recv.offset); in xs_read_header()
476 transport->recv.copied = transport->recv.offset; in xs_read_header()
483 return transport->recv.fraghdr & cpu_to_be32(RPC_LAST_STREAM_FRAGMENT); in xs_read_stream_request_done()
490 if (xs_read_stream_request_done(transport)) in xs_read_stream_check_eor()
491 msg->msg_flags |= MSG_EOR; in xs_read_stream_check_eor()
498 struct xdr_buf *buf = &req->rq_private_buf; in xs_read_stream_request()
504 want = transport->recv.len - transport->recv.offset; in xs_read_stream_request()
505 if (want != 0) { in xs_read_stream_request()
506 ret = xs_read_xdr_buf(transport->sock, msg, flags, buf, in xs_read_stream_request()
507 transport->recv.copied + want, in xs_read_stream_request()
508 transport->recv.copied, in xs_read_stream_request()
510 transport->recv.offset += read; in xs_read_stream_request()
511 transport->recv.copied += read; in xs_read_stream_request()
514 if (transport->recv.offset == transport->recv.len) in xs_read_stream_request()
517 if (want == 0) in xs_read_stream_request()
523 case -EFAULT: in xs_read_stream_request()
524 case -EMSGSIZE: in xs_read_stream_request()
525 msg->msg_flags |= MSG_TRUNC; in xs_read_stream_request()
528 return -ESHUTDOWN; in xs_read_stream_request()
536 if (isfrag) in xs_read_stream_headersize()
546 .iov_base = &transport->recv.fraghdr, in xs_read_stream_header()
549 return xs_read_kvec(transport->sock, msg, flags, &kvec, want, seek); in xs_read_stream_header()
552 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
556 struct rpc_xprt *xprt = &transport->xprt; in xs_read_stream_call()
561 req = xprt_lookup_bc_request(xprt, transport->recv.xid); in xs_read_stream_call()
562 if (!req) { in xs_read_stream_call()
564 return -ESHUTDOWN; in xs_read_stream_call()
566 if (transport->recv.copied && !req->rq_private_buf.len) in xs_read_stream_call()
567 return -ESHUTDOWN; in xs_read_stream_call()
570 if (msg->msg_flags & (MSG_EOR|MSG_TRUNC)) in xs_read_stream_call()
571 xprt_complete_bc_request(req, transport->recv.copied); in xs_read_stream_call()
573 req->rq_private_buf.len = transport->recv.copied; in xs_read_stream_call()
581 return -ESHUTDOWN; in xs_read_stream_call()
588 struct rpc_xprt *xprt = &transport->xprt; in xs_read_stream_reply()
593 spin_lock(&xprt->queue_lock); in xs_read_stream_reply()
594 req = xprt_lookup_rqst(xprt, transport->recv.xid); in xs_read_stream_reply()
595 if (!req || (transport->recv.copied && !req->rq_private_buf.len)) { in xs_read_stream_reply()
596 msg->msg_flags |= MSG_TRUNC; in xs_read_stream_reply()
600 spin_unlock(&xprt->queue_lock); in xs_read_stream_reply()
604 spin_lock(&xprt->queue_lock); in xs_read_stream_reply()
605 if (msg->msg_flags & (MSG_EOR|MSG_TRUNC)) in xs_read_stream_reply()
606 xprt_complete_rqst(req->rq_task, transport->recv.copied); in xs_read_stream_reply()
608 req->rq_private_buf.len = transport->recv.copied; in xs_read_stream_reply()
611 spin_unlock(&xprt->queue_lock); in xs_read_stream_reply()
622 if (transport->recv.len == 0) { in xs_read_stream()
623 want = xs_read_stream_headersize(transport->recv.copied != 0); in xs_read_stream()
625 transport->recv.offset); in xs_read_stream()
626 if (ret <= 0) in xs_read_stream()
628 transport->recv.offset = ret; in xs_read_stream()
629 if (transport->recv.offset != want) in xs_read_stream()
630 return transport->recv.offset; in xs_read_stream()
631 transport->recv.len = be32_to_cpu(transport->recv.fraghdr) & in xs_read_stream()
633 transport->recv.offset -= sizeof(transport->recv.fraghdr); in xs_read_stream()
637 switch (be32_to_cpu(transport->recv.calldir)) { in xs_read_stream()
647 if (msg.msg_flags & MSG_TRUNC) { in xs_read_stream()
648 transport->recv.calldir = cpu_to_be32(-1); in xs_read_stream()
649 transport->recv.copied = -1; in xs_read_stream()
651 if (ret < 0) in xs_read_stream()
654 if (transport->recv.offset < transport->recv.len) { in xs_read_stream()
655 if (!(msg.msg_flags & MSG_TRUNC)) in xs_read_stream()
658 ret = xs_read_discard(transport->sock, &msg, flags, in xs_read_stream()
659 transport->recv.len - transport->recv.offset); in xs_read_stream()
660 if (ret <= 0) in xs_read_stream()
662 transport->recv.offset += ret; in xs_read_stream()
664 if (transport->recv.offset != transport->recv.len) in xs_read_stream()
667 if (xs_read_stream_request_done(transport)) { in xs_read_stream()
669 transport->recv.copied = 0; in xs_read_stream()
671 transport->recv.offset = 0; in xs_read_stream()
672 transport->recv.len = 0; in xs_read_stream()
675 return ret != 0 ? ret : -ESHUTDOWN; in xs_read_stream()
680 return transport->sock->ops->poll(transport->file, transport->sock, in xs_poll_socket()
694 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); in xs_poll_check_readable()
695 if (!xs_poll_socket_readable(transport)) in xs_poll_check_readable()
697 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state)) in xs_poll_check_readable()
698 queue_work(xprtiod_workqueue, &transport->recv_worker); in xs_poll_check_readable()
706 mutex_lock(&transport->recv_mutex); in xs_stream_data_receive()
707 if (transport->sock == NULL) in xs_stream_data_receive()
711 if (ret < 0) in xs_stream_data_receive()
716 if (ret == -ESHUTDOWN) in xs_stream_data_receive()
717 kernel_sock_shutdown(transport->sock, SHUT_RDWR); in xs_stream_data_receive()
721 mutex_unlock(&transport->recv_mutex); in xs_stream_data_receive()
722 trace_xs_stream_read_data(&transport->xprt, ret, read); in xs_stream_data_receive()
738 transport->recv.offset = 0; in xs_stream_reset_connect()
739 transport->recv.len = 0; in xs_stream_reset_connect()
740 transport->recv.copied = 0; in xs_stream_reset_connect()
741 transport->xmit.offset = 0; in xs_stream_reset_connect()
747 transport->xprt.stat.connect_count++; in xs_stream_start_connect()
748 transport->xprt.stat.connect_start = jiffies; in xs_stream_start_connect()
754 * xs_nospace - handle transmit was incomplete
755 * @req: pointer to RPC request
760 struct rpc_xprt *xprt = req->rq_xprt; in xs_nospace()
762 struct sock *sk = transport->inet; in xs_nospace()
763 int ret = -EAGAIN; in xs_nospace()
768 spin_lock(&xprt->transport_lock); in xs_nospace()
771 if (xprt_connected(xprt)) { in xs_nospace()
773 sk->sk_write_pending++; in xs_nospace()
776 ret = -ENOTCONN; in xs_nospace()
778 spin_unlock(&xprt->transport_lock); in xs_nospace()
781 if (ret == -EAGAIN) { in xs_nospace()
785 wq = rcu_dereference(sk->sk_wq); in xs_nospace()
786 set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags); in xs_nospace()
789 sk->sk_write_space(sk); in xs_nospace()
797 xdr_free_bvec(&req->rq_rcv_buf); in xs_stream_prepare_request()
798 req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_KERNEL); in xs_stream_prepare_request()
802 * Determine if the previous message in the stream was aborted before it
808 return transport->xmit.offset != 0 && req->rq_bytes_sent == 0; in xs_send_request_was_aborted()
812 * Return the stream record marker field for a record of length < 2^31-1
817 if (!xdr->len) in xs_stream_record_marker()
819 return cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | (u32)xdr->len); in xs_stream_record_marker()
823 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
824 * @req: pointer to RPC request
835 struct rpc_xprt *xprt = req->rq_xprt; in xs_local_send_request()
838 struct xdr_buf *xdr = &req->rq_snd_buf; in xs_local_send_request()
840 unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen; in xs_local_send_request()
847 /* Close the stream if the previous transmission was incomplete */ in xs_local_send_request()
848 if (xs_send_request_was_aborted(transport, req)) { in xs_local_send_request()
850 return -ENOTCONN; in xs_local_send_request()
854 req->rq_svec->iov_base, req->rq_svec->iov_len); in xs_local_send_request()
856 req->rq_xtime = ktime_get(); in xs_local_send_request()
857 status = xprt_sock_sendmsg(transport->sock, &msg, xdr, in xs_local_send_request()
858 transport->xmit.offset, rm, &sent); in xs_local_send_request()
859 dprintk("RPC: %s(%u) = %d\n", in xs_local_send_request()
860 __func__, xdr->len - transport->xmit.offset, status); in xs_local_send_request()
862 if (status == -EAGAIN && sock_writeable(transport->inet)) in xs_local_send_request()
863 status = -ENOBUFS; in xs_local_send_request()
865 if (likely(sent > 0) || status == 0) { in xs_local_send_request()
866 transport->xmit.offset += sent; in xs_local_send_request()
867 req->rq_bytes_sent = transport->xmit.offset; in xs_local_send_request()
868 if (likely(req->rq_bytes_sent >= msglen)) { in xs_local_send_request()
869 req->rq_xmit_bytes_sent += transport->xmit.offset; in xs_local_send_request()
870 transport->xmit.offset = 0; in xs_local_send_request()
873 status = -EAGAIN; in xs_local_send_request()
877 case -ENOBUFS: in xs_local_send_request()
879 case -EAGAIN: in xs_local_send_request()
883 dprintk("RPC: sendmsg returned unrecognized error %d\n", in xs_local_send_request()
884 -status); in xs_local_send_request()
886 case -EPIPE: in xs_local_send_request()
888 status = -ENOTCONN; in xs_local_send_request()
895 * xs_udp_send_request - write an RPC request to a UDP socket
896 * @req: pointer to RPC request
907 struct rpc_xprt *xprt = req->rq_xprt; in xs_udp_send_request()
909 struct xdr_buf *xdr = &req->rq_snd_buf; in xs_udp_send_request()
912 .msg_namelen = xprt->addrlen, in xs_udp_send_request()
919 req->rq_svec->iov_base, in xs_udp_send_request()
920 req->rq_svec->iov_len); in xs_udp_send_request()
922 if (!xprt_bound(xprt)) in xs_udp_send_request()
923 return -ENOTCONN; in xs_udp_send_request()
925 if (!xprt_request_get_cong(xprt, req)) in xs_udp_send_request()
926 return -EBADSLT; in xs_udp_send_request()
928 req->rq_xtime = ktime_get(); in xs_udp_send_request()
929 status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, 0, &sent); in xs_udp_send_request()
931 dprintk("RPC: xs_udp_send_request(%u) = %d\n", in xs_udp_send_request()
932 xdr->len, status); in xs_udp_send_request()
934 /* firewall is blocking us, don't return -EAGAIN or we end up looping */ in xs_udp_send_request()
935 if (status == -EPERM) in xs_udp_send_request()
938 if (status == -EAGAIN && sock_writeable(transport->inet)) in xs_udp_send_request()
939 status = -ENOBUFS; in xs_udp_send_request()
941 if (sent > 0 || status == 0) { in xs_udp_send_request()
942 req->rq_xmit_bytes_sent += sent; in xs_udp_send_request()
943 if (sent >= req->rq_slen) in xs_udp_send_request()
946 status = -EAGAIN; in xs_udp_send_request()
951 case -ENOTSOCK: in xs_udp_send_request()
952 status = -ENOTCONN; in xs_udp_send_request()
955 case -EAGAIN: in xs_udp_send_request()
958 case -ENETUNREACH: in xs_udp_send_request()
959 case -ENOBUFS: in xs_udp_send_request()
960 case -EPIPE: in xs_udp_send_request()
961 case -ECONNREFUSED: in xs_udp_send_request()
962 case -EPERM: in xs_udp_send_request()
967 dprintk("RPC: sendmsg returned unrecognized error %d\n", in xs_udp_send_request()
968 -status); in xs_udp_send_request()
975 * xs_tcp_send_request - write an RPC request to a TCP socket
976 * @req: pointer to RPC request
986 * if sendmsg is not able to make progress?
990 struct rpc_xprt *xprt = req->rq_xprt; in xs_tcp_send_request()
992 struct xdr_buf *xdr = &req->rq_snd_buf; in xs_tcp_send_request()
994 unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen; in xs_tcp_send_request()
1002 /* Close the stream if the previous transmission was incomplete */ in xs_tcp_send_request()
1003 if (xs_send_request_was_aborted(transport, req)) { in xs_tcp_send_request()
1004 if (transport->sock != NULL) in xs_tcp_send_request()
1005 kernel_sock_shutdown(transport->sock, SHUT_RDWR); in xs_tcp_send_request()
1006 return -ENOTCONN; in xs_tcp_send_request()
1010 req->rq_svec->iov_base, in xs_tcp_send_request()
1011 req->rq_svec->iov_len); in xs_tcp_send_request()
1013 if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state)) in xs_tcp_send_request()
1014 xs_tcp_set_socket_timeouts(xprt, transport->sock); in xs_tcp_send_request()
1019 req->rq_xtime = ktime_get(); in xs_tcp_send_request()
1021 status = xprt_sock_sendmsg(transport->sock, &msg, xdr, in xs_tcp_send_request()
1022 transport->xmit.offset, rm, &sent); in xs_tcp_send_request()
1024 dprintk("RPC: xs_tcp_send_request(%u) = %d\n", in xs_tcp_send_request()
1025 xdr->len - transport->xmit.offset, status); in xs_tcp_send_request()
1027 /* If we've sent the entire packet, immediately in xs_tcp_send_request()
1029 transport->xmit.offset += sent; in xs_tcp_send_request()
1030 req->rq_bytes_sent = transport->xmit.offset; in xs_tcp_send_request()
1031 if (likely(req->rq_bytes_sent >= msglen)) { in xs_tcp_send_request()
1032 req->rq_xmit_bytes_sent += transport->xmit.offset; in xs_tcp_send_request()
1033 transport->xmit.offset = 0; in xs_tcp_send_request()
1039 if (status == -EAGAIN ) { in xs_tcp_send_request()
1041 * Return EAGAIN if we're sure we're hitting the in xs_tcp_send_request()
1044 if (test_bit(SOCK_NOSPACE, &transport->sock->flags)) in xs_tcp_send_request()
1049 if (sent == 0) { in xs_tcp_send_request()
1050 status = -ENOBUFS; in xs_tcp_send_request()
1051 if (vm_wait) in xs_tcp_send_request()
1060 if (status < 0) in xs_tcp_send_request()
1066 case -ENOTSOCK: in xs_tcp_send_request()
1067 status = -ENOTCONN; in xs_tcp_send_request()
1070 case -EAGAIN: in xs_tcp_send_request()
1073 case -ECONNRESET: in xs_tcp_send_request()
1074 case -ECONNREFUSED: in xs_tcp_send_request()
1075 case -ENOTCONN: in xs_tcp_send_request()
1076 case -EADDRINUSE: in xs_tcp_send_request()
1077 case -ENOBUFS: in xs_tcp_send_request()
1078 case -EPIPE: in xs_tcp_send_request()
1081 dprintk("RPC: sendmsg returned unrecognized error %d\n", in xs_tcp_send_request()
1082 -status); in xs_tcp_send_request()
1090 transport->old_data_ready = sk->sk_data_ready; in xs_save_old_callbacks()
1091 transport->old_state_change = sk->sk_state_change; in xs_save_old_callbacks()
1092 transport->old_write_space = sk->sk_write_space; in xs_save_old_callbacks()
1093 transport->old_error_report = sk->sk_error_report; in xs_save_old_callbacks()
1098 sk->sk_data_ready = transport->old_data_ready; in xs_restore_old_callbacks()
1099 sk->sk_state_change = transport->old_state_change; in xs_restore_old_callbacks()
1100 sk->sk_write_space = transport->old_write_space; in xs_restore_old_callbacks()
1101 sk->sk_error_report = transport->old_error_report; in xs_restore_old_callbacks()
1108 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); in xs_sock_reset_state_flags()
1109 clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state); in xs_sock_reset_state_flags()
1110 clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state); in xs_sock_reset_state_flags()
1111 clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state); in xs_sock_reset_state_flags()
1116 set_bit(nr, &transport->sock_state); in xs_run_error_worker()
1117 queue_work(xprtiod_workqueue, &transport->error_worker); in xs_run_error_worker()
1123 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); in xs_sock_reset_connection_flags()
1124 clear_bit(XPRT_CLOSING, &xprt->state); in xs_sock_reset_connection_flags()
1130 * xs_error_report - callback to handle TCP socket state errors
1134 * using the socket, and so we don't want to clear sk->sk_err.
1141 read_lock_bh(&sk->sk_callback_lock); in xs_error_report()
1142 if (!(xprt = xprt_from_sock(sk))) in xs_error_report()
1146 transport->xprt_err = -sk->sk_err; in xs_error_report()
1147 if (transport->xprt_err == 0) in xs_error_report()
1149 dprintk("RPC: xs_error_report client %p, error=%d...\n", in xs_error_report()
1150 xprt, -transport->xprt_err); in xs_error_report()
1151 trace_rpc_socket_error(xprt, sk->sk_socket, transport->xprt_err); in xs_error_report()
1157 read_unlock_bh(&sk->sk_callback_lock); in xs_error_report()
1162 struct socket *sock = transport->sock; in xs_reset_transport()
1163 struct sock *sk = transport->inet; in xs_reset_transport()
1164 struct rpc_xprt *xprt = &transport->xprt; in xs_reset_transport()
1165 struct file *filp = transport->file; in xs_reset_transport()
1167 if (sk == NULL) in xs_reset_transport()
1170 if (atomic_read(&transport->xprt.swapper)) in xs_reset_transport()
1175 mutex_lock(&transport->recv_mutex); in xs_reset_transport()
1176 write_lock_bh(&sk->sk_callback_lock); in xs_reset_transport()
1177 transport->inet = NULL; in xs_reset_transport()
1178 transport->sock = NULL; in xs_reset_transport()
1179 transport->file = NULL; in xs_reset_transport()
1181 sk->sk_user_data = NULL; in xs_reset_transport()
1185 write_unlock_bh(&sk->sk_callback_lock); in xs_reset_transport()
1189 mutex_unlock(&transport->recv_mutex); in xs_reset_transport()
1198 * xs_close - close a socket
1211 dprintk("RPC: xs_close xprt %p\n", xprt); in xs_close()
1214 xprt->reestablish_timeout = 0; in xs_close()
1219 dprintk("RPC: injecting transport disconnect on xprt=%p\n", in xs_inject_disconnect()
1231 * xs_destroy - prepare to shutdown a transport
1239 dprintk("RPC: xs_destroy xprt %p\n", xprt); in xs_destroy()
1241 cancel_delayed_work_sync(&transport->connect_worker); in xs_destroy()
1243 cancel_work_sync(&transport->recv_worker); in xs_destroy()
1244 cancel_work_sync(&transport->error_worker); in xs_destroy()
1250 * xs_udp_data_read_skb - receive callback for UDP sockets
1266 repsize = skb->len; in xs_udp_data_read_skb()
1267 if (repsize < 4) { in xs_udp_data_read_skb()
1268 dprintk("RPC: impossible RPC reply size %d!\n", repsize); in xs_udp_data_read_skb()
1274 if (xp == NULL) in xs_udp_data_read_skb()
1278 spin_lock(&xprt->queue_lock); in xs_udp_data_read_skb()
1280 if (!rovr) in xs_udp_data_read_skb()
1283 xprt_update_rtt(rovr->rq_task); in xs_udp_data_read_skb()
1284 spin_unlock(&xprt->queue_lock); in xs_udp_data_read_skb()
1285 task = rovr->rq_task; in xs_udp_data_read_skb()
1287 if ((copied = rovr->rq_private_buf.buflen) > repsize) in xs_udp_data_read_skb()
1290 /* Suck it into the iovec, verify checksum if not done by hw. */ in xs_udp_data_read_skb()
1291 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) { in xs_udp_data_read_skb()
1292 spin_lock(&xprt->queue_lock); in xs_udp_data_read_skb()
1298 spin_lock(&xprt->transport_lock); in xs_udp_data_read_skb()
1300 spin_unlock(&xprt->transport_lock); in xs_udp_data_read_skb()
1301 spin_lock(&xprt->queue_lock); in xs_udp_data_read_skb()
1307 spin_unlock(&xprt->queue_lock); in xs_udp_data_read_skb()
1316 mutex_lock(&transport->recv_mutex); in xs_udp_data_receive()
1317 sk = transport->inet; in xs_udp_data_receive()
1318 if (sk == NULL) in xs_udp_data_receive()
1322 if (skb == NULL) in xs_udp_data_receive()
1324 xs_udp_data_read_skb(&transport->xprt, sk, skb); in xs_udp_data_receive()
1330 mutex_unlock(&transport->recv_mutex); in xs_udp_data_receive()
1344 * xs_data_ready - "data ready" callback for UDP sockets
1352 read_lock_bh(&sk->sk_callback_lock); in xs_data_ready()
1353 dprintk("RPC: xs_data_ready...\n"); in xs_data_ready()
1355 if (xprt != NULL) { in xs_data_ready()
1358 transport->old_data_ready(sk); in xs_data_ready()
1362 if (xprt->reestablish_timeout) in xs_data_ready()
1363 xprt->reestablish_timeout = 0; in xs_data_ready()
1364 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state)) in xs_data_ready()
1365 queue_work(xprtiod_workqueue, &transport->recv_worker); in xs_data_ready()
1367 read_unlock_bh(&sk->sk_callback_lock); in xs_data_ready()
1371 * Helper function to force a TCP close if the server is sending
1379 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1387 * xs_tcp_state_change - callback to handle TCP socket state changes
1396 read_lock_bh(&sk->sk_callback_lock); in xs_tcp_state_change()
1397 if (!(xprt = xprt_from_sock(sk))) in xs_tcp_state_change()
1399 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); in xs_tcp_state_change()
1400 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n", in xs_tcp_state_change()
1401 sk->sk_state, xprt_connected(xprt), in xs_tcp_state_change()
1404 sk->sk_shutdown); in xs_tcp_state_change()
1407 trace_rpc_socket_state_change(xprt, sk->sk_socket); in xs_tcp_state_change()
1408 switch (sk->sk_state) { in xs_tcp_state_change()
1410 if (!xprt_test_and_set_connected(xprt)) { in xs_tcp_state_change()
1411 xprt->connect_cookie++; in xs_tcp_state_change()
1412 clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); in xs_tcp_state_change()
1415 xprt->stat.connect_count++; in xs_tcp_state_change()
1416 xprt->stat.connect_time += (long)jiffies - in xs_tcp_state_change()
1417 xprt->stat.connect_start; in xs_tcp_state_change()
1423 xprt->connect_cookie++; in xs_tcp_state_change()
1424 xprt->reestablish_timeout = 0; in xs_tcp_state_change()
1425 set_bit(XPRT_CLOSING, &xprt->state); in xs_tcp_state_change()
1427 clear_bit(XPRT_CONNECTED, &xprt->state); in xs_tcp_state_change()
1428 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); in xs_tcp_state_change()
1433 xprt->connect_cookie++; in xs_tcp_state_change()
1434 clear_bit(XPRT_CONNECTED, &xprt->state); in xs_tcp_state_change()
1439 * If the server closed down the connection, make sure that in xs_tcp_state_change()
1442 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) in xs_tcp_state_change()
1443 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; in xs_tcp_state_change()
1446 set_bit(XPRT_CLOSING, &xprt->state); in xs_tcp_state_change()
1448 clear_bit(XPRT_CONNECTED, &xprt->state); in xs_tcp_state_change()
1452 if (test_and_clear_bit(XPRT_SOCK_CONNECTING, in xs_tcp_state_change()
1453 &transport->sock_state)) in xs_tcp_state_change()
1455 clear_bit(XPRT_CLOSING, &xprt->state); in xs_tcp_state_change()
1460 read_unlock_bh(&sk->sk_callback_lock); in xs_tcp_state_change()
1469 if (!sk->sk_socket) in xs_write_space()
1471 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in xs_write_space()
1473 if (unlikely(!(xprt = xprt_from_sock(sk)))) in xs_write_space()
1477 wq = rcu_dereference(sk->sk_wq); in xs_write_space()
1478 if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0) in xs_write_space()
1482 sk->sk_write_pending--; in xs_write_space()
1488 * xs_udp_write_space - callback invoked when socket buffer space
1499 read_lock_bh(&sk->sk_callback_lock); in xs_udp_write_space()
1502 if (sock_writeable(sk)) in xs_udp_write_space()
1505 read_unlock_bh(&sk->sk_callback_lock); in xs_udp_write_space()
1509 * xs_tcp_write_space - callback invoked when socket buffer space
1520 read_lock_bh(&sk->sk_callback_lock); in xs_tcp_write_space()
1523 if (sk_stream_is_writeable(sk)) in xs_tcp_write_space()
1526 read_unlock_bh(&sk->sk_callback_lock); in xs_tcp_write_space()
1532 struct sock *sk = transport->inet; in xs_udp_do_set_buffer_size()
1534 if (transport->rcvsize) { in xs_udp_do_set_buffer_size()
1535 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; in xs_udp_do_set_buffer_size()
1536 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2; in xs_udp_do_set_buffer_size()
1538 if (transport->sndsize) { in xs_udp_do_set_buffer_size()
1539 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; in xs_udp_do_set_buffer_size()
1540 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2; in xs_udp_do_set_buffer_size()
1541 sk->sk_write_space(sk); in xs_udp_do_set_buffer_size()
1546 * xs_udp_set_buffer_size - set send and receive limits
1557 transport->sndsize = 0; in xs_udp_set_buffer_size()
1558 if (sndsize) in xs_udp_set_buffer_size()
1559 transport->sndsize = sndsize + 1024; in xs_udp_set_buffer_size()
1560 transport->rcvsize = 0; in xs_udp_set_buffer_size()
1561 if (rcvsize) in xs_udp_set_buffer_size()
1562 transport->rcvsize = rcvsize + 1024; in xs_udp_set_buffer_size()
1568 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
1576 spin_lock(&xprt->transport_lock); in xs_udp_timer()
1577 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT); in xs_udp_timer()
1578 spin_unlock(&xprt->transport_lock); in xs_udp_timer()
1587 if (max < min) in xs_get_random_port()
1588 return -EADDRINUSE; in xs_get_random_port()
1589 range = max - min + 1; in xs_get_random_port()
1599 if (kernel_getsockname(sock, (struct sockaddr *)&buf) < 0) in xs_sock_getport()
1603 port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port); in xs_sock_getport()
1606 port = ntohs(((struct sockaddr_in *)&buf)->sin_port); in xs_sock_getport()
1613 * xs_set_port - reset the port number in the remote endpoint address
1620 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port); in xs_set_port()
1628 if (transport->srcport == 0 && transport->xprt.reuseport) in xs_set_srcport()
1629 transport->srcport = xs_sock_getport(sock); in xs_set_srcport()
1634 int port = transport->srcport; in xs_get_srcport()
1636 if (port == 0 && transport->xprt.resvport) in xs_get_srcport()
1643 if (transport->srcport != 0) in xs_next_srcport()
1644 transport->srcport = 0; in xs_next_srcport()
1645 if (!transport->xprt.resvport) in xs_next_srcport()
1647 if (port <= xprt_min_resvport || port > xprt_max_resvport) in xs_next_srcport()
1649 return --port; in xs_next_srcport()
1659 * If we are asking for any ephemeral port (i.e. port == 0 && in xs_bind()
1660 * transport->xprt.resvport == 0), don't bind. Let the local in xs_bind()
1669 * If we're asking for any reserved port (i.e. port == 0 && in xs_bind()
1670 * transport->xprt.resvport == 1) xs_get_srcport above will in xs_bind()
1671 * ensure that port is non-zero and we will bind as needed. in xs_bind()
1673 if (port <= 0) in xs_bind()
1676 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen); in xs_bind()
1680 transport->xprt.addrlen); in xs_bind()
1681 if (err == 0) { in xs_bind()
1682 transport->srcport = port; in xs_bind()
1687 if (port > last) in xs_bind()
1689 } while (err == -EADDRINUSE && nloop != 2); in xs_bind()
1691 if (myaddr.ss_family == AF_INET) in xs_bind()
1692 dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__, in xs_bind()
1693 &((struct sockaddr_in *)&myaddr)->sin_addr, in xs_bind()
1696 dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__, in xs_bind()
1697 &((struct sockaddr_in6 *)&myaddr)->sin6_addr, in xs_bind()
1707 xprt_set_bound(task->tk_xprt); in xs_local_rpcbind()
1720 struct sock *sk = sock->sk; in xs_reclassify_socketu()
1722 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC", in xs_reclassify_socketu()
1723 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]); in xs_reclassify_socketu()
1728 struct sock *sk = sock->sk; in xs_reclassify_socket4()
1730 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC", in xs_reclassify_socket4()
1731 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]); in xs_reclassify_socket4()
1736 struct sock *sk = sock->sk; in xs_reclassify_socket6()
1738 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC", in xs_reclassify_socket6()
1739 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]); in xs_reclassify_socket6()
1744 if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk))) in xs_reclassify_socket()
1777 err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1); in xs_create_sock()
1778 if (err < 0) { in xs_create_sock()
1779 dprintk("RPC: can't create %d transport socket (%d).\n", in xs_create_sock()
1780 protocol, -err); in xs_create_sock()
1785 if (reuseport) in xs_create_sock()
1786 sock_set_reuseport(sock->sk); in xs_create_sock()
1789 if (err) { in xs_create_sock()
1795 if (IS_ERR(filp)) in xs_create_sock()
1797 transport->file = filp; in xs_create_sock()
1810 if (!transport->inet) { in xs_local_finish_connecting()
1811 struct sock *sk = sock->sk; in xs_local_finish_connecting()
1813 write_lock_bh(&sk->sk_callback_lock); in xs_local_finish_connecting()
1817 sk->sk_user_data = xprt; in xs_local_finish_connecting()
1818 sk->sk_data_ready = xs_data_ready; in xs_local_finish_connecting()
1819 sk->sk_write_space = xs_udp_write_space; in xs_local_finish_connecting()
1821 sk->sk_error_report = xs_error_report; in xs_local_finish_connecting()
1826 transport->sock = sock; in xs_local_finish_connecting()
1827 transport->inet = sk; in xs_local_finish_connecting()
1829 write_unlock_bh(&sk->sk_callback_lock); in xs_local_finish_connecting()
1834 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0); in xs_local_finish_connecting()
1838 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
1843 struct rpc_xprt *xprt = &transport->xprt; in xs_local_setup_socket()
1848 status = __sock_create(xprt->xprt_net, AF_LOCAL, in xs_local_setup_socket()
1850 if (status < 0) { in xs_local_setup_socket()
1851 dprintk("RPC: can't create AF_LOCAL " in xs_local_setup_socket()
1852 "transport socket (%d).\n", -status); in xs_local_setup_socket()
1858 if (IS_ERR(filp)) { in xs_local_setup_socket()
1862 transport->file = filp; in xs_local_setup_socket()
1864 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n", in xs_local_setup_socket()
1865 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); in xs_local_setup_socket()
1871 dprintk("RPC: xprt %p connected to %s\n", in xs_local_setup_socket()
1872 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); in xs_local_setup_socket()
1873 xprt->stat.connect_count++; in xs_local_setup_socket()
1874 xprt->stat.connect_time += (long)jiffies - in xs_local_setup_socket()
1875 xprt->stat.connect_start; in xs_local_setup_socket()
1877 case -ENOBUFS: in xs_local_setup_socket()
1879 case -ENOENT: in xs_local_setup_socket()
1880 dprintk("RPC: xprt %p: socket %s does not exist\n", in xs_local_setup_socket()
1881 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); in xs_local_setup_socket()
1883 case -ECONNREFUSED: in xs_local_setup_socket()
1884 dprintk("RPC: xprt %p: connection refused for %s\n", in xs_local_setup_socket()
1885 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); in xs_local_setup_socket()
1889 __func__, -status, in xs_local_setup_socket()
1890 xprt->address_strings[RPC_DISPLAY_ADDR]); in xs_local_setup_socket()
1904 if (RPC_IS_ASYNC(task)) { in xs_local_connect()
1907 * filesystem namespace of the process making the rpc in xs_local_connect()
1910 * If we want to support asynchronous AF_LOCAL calls, in xs_local_connect()
1914 task->tk_rpc_status = -ENOTCONN; in xs_local_connect()
1915 rpc_exit(task, -ENOTCONN); in xs_local_connect()
1919 if (ret && !RPC_IS_SOFTCONN(task)) in xs_local_connect()
1923 #if IS_ENABLED(CONFIG_SUNRPC_SWAP)
1935 * If there's no sock, then we have nothing to set. The in xs_set_memalloc()
1938 if (!transport->inet) in xs_set_memalloc()
1940 if (atomic_read(&xprt->swapper)) in xs_set_memalloc()
1941 sk_set_memalloc(transport->inet); in xs_set_memalloc()
1945 * xs_enable_swap - Tag this transport as being used for swap.
1949 * optionally mark it for swapping if it wasn't already.
1956 if (atomic_inc_return(&xprt->swapper) != 1) in xs_enable_swap()
1958 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) in xs_enable_swap()
1959 return -ERESTARTSYS; in xs_enable_swap()
1960 if (xs->inet) in xs_enable_swap()
1961 sk_set_memalloc(xs->inet); in xs_enable_swap()
1967 * xs_disable_swap - Untag this transport as being used for swap.
1970 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the
1978 if (!atomic_dec_and_test(&xprt->swapper)) in xs_disable_swap()
1980 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) in xs_disable_swap()
1982 if (xs->inet) in xs_disable_swap()
1983 sk_clear_memalloc(xs->inet); in xs_disable_swap()
1994 return -EINVAL; in xs_enable_swap()
2007 if (!transport->inet) { in xs_udp_finish_connecting()
2008 struct sock *sk = sock->sk; in xs_udp_finish_connecting()
2010 write_lock_bh(&sk->sk_callback_lock); in xs_udp_finish_connecting()
2014 sk->sk_user_data = xprt; in xs_udp_finish_connecting()
2015 sk->sk_data_ready = xs_data_ready; in xs_udp_finish_connecting()
2016 sk->sk_write_space = xs_udp_write_space; in xs_udp_finish_connecting()
2022 transport->sock = sock; in xs_udp_finish_connecting()
2023 transport->inet = sk; in xs_udp_finish_connecting()
2027 write_unlock_bh(&sk->sk_callback_lock); in xs_udp_finish_connecting()
2031 xprt->stat.connect_start = jiffies; in xs_udp_finish_connecting()
2038 struct rpc_xprt *xprt = &transport->xprt; in xs_udp_setup_socket()
2040 int status = -EIO; in xs_udp_setup_socket()
2043 xs_addr(xprt)->sa_family, SOCK_DGRAM, in xs_udp_setup_socket()
2045 if (IS_ERR(sock)) in xs_udp_setup_socket()
2048 dprintk("RPC: worker connecting xprt %p via %s to " in xs_udp_setup_socket()
2050 xprt->address_strings[RPC_DISPLAY_PROTO], in xs_udp_setup_socket()
2051 xprt->address_strings[RPC_DISPLAY_ADDR], in xs_udp_setup_socket()
2052 xprt->address_strings[RPC_DISPLAY_PORT]); in xs_udp_setup_socket()
2064 * xs_tcp_shutdown - gracefully shut down a TCP socket
2073 struct socket *sock = transport->sock; in xs_tcp_shutdown()
2074 int skst = transport->inet ? transport->inet->sk_state : TCP_CLOSE; in xs_tcp_shutdown()
2076 if (sock == NULL) in xs_tcp_shutdown()
2097 spin_lock(&xprt->transport_lock); in xs_tcp_set_socket_timeouts()
2098 keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ); in xs_tcp_set_socket_timeouts()
2099 keepcnt = xprt->timeout->to_retries + 1; in xs_tcp_set_socket_timeouts()
2100 timeo = jiffies_to_msecs(xprt->timeout->to_initval) * in xs_tcp_set_socket_timeouts()
2101 (xprt->timeout->to_retries + 1); in xs_tcp_set_socket_timeouts()
2102 clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); in xs_tcp_set_socket_timeouts()
2103 spin_unlock(&xprt->transport_lock); in xs_tcp_set_socket_timeouts()
2106 sock_set_keepalive(sock->sk); in xs_tcp_set_socket_timeouts()
2107 tcp_sock_set_keepidle(sock->sk, keepidle); in xs_tcp_set_socket_timeouts()
2108 tcp_sock_set_keepintvl(sock->sk, keepidle); in xs_tcp_set_socket_timeouts()
2109 tcp_sock_set_keepcnt(sock->sk, keepcnt); in xs_tcp_set_socket_timeouts()
2112 tcp_sock_set_user_timeout(sock->sk, timeo); in xs_tcp_set_socket_timeouts()
2123 spin_lock(&xprt->transport_lock); in xs_tcp_set_connect_timeout()
2124 if (reconnect_timeout < xprt->max_reconnect_timeout) in xs_tcp_set_connect_timeout()
2125 xprt->max_reconnect_timeout = reconnect_timeout; in xs_tcp_set_connect_timeout()
2126 if (connect_timeout < xprt->connect_timeout) { in xs_tcp_set_connect_timeout()
2127 memcpy(&to, xprt->timeout, sizeof(to)); in xs_tcp_set_connect_timeout()
2130 if (initval < XS_TCP_INIT_REEST_TO << 1) in xs_tcp_set_connect_timeout()
2134 memcpy(&transport->tcp_timeout, &to, in xs_tcp_set_connect_timeout()
2135 sizeof(transport->tcp_timeout)); in xs_tcp_set_connect_timeout()
2136 xprt->timeout = &transport->tcp_timeout; in xs_tcp_set_connect_timeout()
2137 xprt->connect_timeout = connect_timeout; in xs_tcp_set_connect_timeout()
2139 set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); in xs_tcp_set_connect_timeout()
2140 spin_unlock(&xprt->transport_lock); in xs_tcp_set_connect_timeout()
2146 int ret = -ENOTCONN; in xs_tcp_finish_connecting()
2148 if (!transport->inet) { in xs_tcp_finish_connecting()
2149 struct sock *sk = sock->sk; in xs_tcp_finish_connecting()
2151 /* Avoid temporary address, they are bad for long-lived in xs_tcp_finish_connecting()
2158 if (xs_addr(xprt)->sa_family == PF_INET6) { in xs_tcp_finish_connecting()
2165 write_lock_bh(&sk->sk_callback_lock); in xs_tcp_finish_connecting()
2169 sk->sk_user_data = xprt; in xs_tcp_finish_connecting()
2170 sk->sk_data_ready = xs_data_ready; in xs_tcp_finish_connecting()
2171 sk->sk_state_change = xs_tcp_state_change; in xs_tcp_finish_connecting()
2172 sk->sk_write_space = xs_tcp_write_space; in xs_tcp_finish_connecting()
2174 sk->sk_error_report = xs_error_report; in xs_tcp_finish_connecting()
2178 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; in xs_tcp_finish_connecting()
2183 transport->sock = sock; in xs_tcp_finish_connecting()
2184 transport->inet = sk; in xs_tcp_finish_connecting()
2186 write_unlock_bh(&sk->sk_callback_lock); in xs_tcp_finish_connecting()
2189 if (!xprt_bound(xprt)) in xs_tcp_finish_connecting()
2197 set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); in xs_tcp_finish_connecting()
2198 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); in xs_tcp_finish_connecting()
2203 case -EINPROGRESS: in xs_tcp_finish_connecting()
2205 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) in xs_tcp_finish_connecting()
2206 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; in xs_tcp_finish_connecting()
2208 case -EADDRNOTAVAIL: in xs_tcp_finish_connecting()
2210 transport->srcport = 0; in xs_tcp_finish_connecting()
2217 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
2226 struct socket *sock = transport->sock; in xs_tcp_setup_socket()
2227 struct rpc_xprt *xprt = &transport->xprt; in xs_tcp_setup_socket()
2228 int status = -EIO; in xs_tcp_setup_socket()
2230 if (!sock) { in xs_tcp_setup_socket()
2232 xs_addr(xprt)->sa_family, SOCK_STREAM, in xs_tcp_setup_socket()
2234 if (IS_ERR(sock)) { in xs_tcp_setup_socket()
2240 dprintk("RPC: worker connecting xprt %p via %s to " in xs_tcp_setup_socket()
2242 xprt->address_strings[RPC_DISPLAY_PROTO], in xs_tcp_setup_socket()
2243 xprt->address_strings[RPC_DISPLAY_ADDR], in xs_tcp_setup_socket()
2244 xprt->address_strings[RPC_DISPLAY_PORT]); in xs_tcp_setup_socket()
2248 dprintk("RPC: %p connect status %d connected %d sock state %d\n", in xs_tcp_setup_socket()
2249 xprt, -status, xprt_connected(xprt), in xs_tcp_setup_socket()
2250 sock->sk->sk_state); in xs_tcp_setup_socket()
2256 case -EADDRNOTAVAIL: in xs_tcp_setup_socket()
2263 case -EINPROGRESS: in xs_tcp_setup_socket()
2264 case -EALREADY: in xs_tcp_setup_socket()
2267 case -EINVAL: in xs_tcp_setup_socket()
2268 /* Happens, for instance, if the user specified a link in xs_tcp_setup_socket()
2269 * local IPv6 address without a scope-id. in xs_tcp_setup_socket()
2271 case -ECONNREFUSED: in xs_tcp_setup_socket()
2272 case -ECONNRESET: in xs_tcp_setup_socket()
2273 case -ENETDOWN: in xs_tcp_setup_socket()
2274 case -ENETUNREACH: in xs_tcp_setup_socket()
2275 case -EHOSTUNREACH: in xs_tcp_setup_socket()
2276 case -EADDRINUSE: in xs_tcp_setup_socket()
2277 case -ENOBUFS: in xs_tcp_setup_socket()
2279 * xs_tcp_force_close() wakes tasks with -EIO. in xs_tcp_setup_socket()
2287 status = -EAGAIN; in xs_tcp_setup_socket()
2295 * xs_connect - connect a socket to a remote endpoint
2297 * @task: address of RPC task that manages state of connect request
2299 * TCP: If the remote end dropped the connection, delay reconnecting.
2305 * If a UDP socket connect fails, the delay behavior here prevents
2315 if (transport->sock != NULL) { in xs_connect()
2316 dprintk("RPC: xs_connect delayed xprt %p for %lu " in xs_connect()
2318 xprt, xprt->reestablish_timeout / HZ); in xs_connect()
2327 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); in xs_connect()
2330 &transport->connect_worker, in xs_connect()
2336 if (test_and_clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state)) in xs_wake_disconnect()
2337 xs_tcp_force_close(&transport->xprt); in xs_wake_disconnect()
2342 if (test_and_clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state)) in xs_wake_write()
2343 xprt_write_space(&transport->xprt); in xs_wake_write()
2350 if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state)) in xs_wake_error()
2352 mutex_lock(&transport->recv_mutex); in xs_wake_error()
2353 if (transport->sock == NULL) in xs_wake_error()
2355 if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state)) in xs_wake_error()
2357 sockerr = xchg(&transport->xprt_err, 0); in xs_wake_error()
2358 if (sockerr < 0) in xs_wake_error()
2359 xprt_wake_pending_tasks(&transport->xprt, sockerr); in xs_wake_error()
2361 mutex_unlock(&transport->recv_mutex); in xs_wake_error()
2366 if (test_and_clear_bit(XPRT_SOCK_WAKE_PENDING, &transport->sock_state)) in xs_wake_pending()
2367 xprt_wake_pending_tasks(&transport->xprt, -EAGAIN); in xs_wake_pending()
2382 * xs_local_print_stats - display AF_LOCAL socket-specifc stats
2391 if (xprt_connected(xprt)) in xs_local_print_stats()
2392 idle_time = (long)(jiffies - xprt->last_used) / HZ; in xs_local_print_stats()
2396 xprt->stat.bind_count, in xs_local_print_stats()
2397 xprt->stat.connect_count, in xs_local_print_stats()
2398 xprt->stat.connect_time / HZ, in xs_local_print_stats()
2400 xprt->stat.sends, in xs_local_print_stats()
2401 xprt->stat.recvs, in xs_local_print_stats()
2402 xprt->stat.bad_xids, in xs_local_print_stats()
2403 xprt->stat.req_u, in xs_local_print_stats()
2404 xprt->stat.bklog_u, in xs_local_print_stats()
2405 xprt->stat.max_slots, in xs_local_print_stats()
2406 xprt->stat.sending_u, in xs_local_print_stats()
2407 xprt->stat.pending_u); in xs_local_print_stats()
2411 * xs_udp_print_stats - display UDP socket-specifc stats
2422 transport->srcport, in xs_udp_print_stats()
2423 xprt->stat.bind_count, in xs_udp_print_stats()
2424 xprt->stat.sends, in xs_udp_print_stats()
2425 xprt->stat.recvs, in xs_udp_print_stats()
2426 xprt->stat.bad_xids, in xs_udp_print_stats()
2427 xprt->stat.req_u, in xs_udp_print_stats()
2428 xprt->stat.bklog_u, in xs_udp_print_stats()
2429 xprt->stat.max_slots, in xs_udp_print_stats()
2430 xprt->stat.sending_u, in xs_udp_print_stats()
2431 xprt->stat.pending_u); in xs_udp_print_stats()
2435 * xs_tcp_print_stats - display TCP socket-specifc stats
2445 if (xprt_connected(xprt)) in xs_tcp_print_stats()
2446 idle_time = (long)(jiffies - xprt->last_used) / HZ; in xs_tcp_print_stats()
2450 transport->srcport, in xs_tcp_print_stats()
2451 xprt->stat.bind_count, in xs_tcp_print_stats()
2452 xprt->stat.connect_count, in xs_tcp_print_stats()
2453 xprt->stat.connect_time / HZ, in xs_tcp_print_stats()
2455 xprt->stat.sends, in xs_tcp_print_stats()
2456 xprt->stat.recvs, in xs_tcp_print_stats()
2457 xprt->stat.bad_xids, in xs_tcp_print_stats()
2458 xprt->stat.req_u, in xs_tcp_print_stats()
2459 xprt->stat.bklog_u, in xs_tcp_print_stats()
2460 xprt->stat.max_slots, in xs_tcp_print_stats()
2461 xprt->stat.sending_u, in xs_tcp_print_stats()
2462 xprt->stat.pending_u); in xs_tcp_print_stats()
2466 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2472 struct rpc_rqst *rqst = task->tk_rqstp; in bc_malloc()
2473 size_t size = rqst->rq_callsize; in bc_malloc()
2477 if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) { in bc_malloc()
2480 return -EINVAL; in bc_malloc()
2484 if (!page) in bc_malloc()
2485 return -ENOMEM; in bc_malloc()
2488 buf->len = PAGE_SIZE; in bc_malloc()
2490 rqst->rq_buffer = buf->data; in bc_malloc()
2491 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize; in bc_malloc()
2500 void *buffer = task->tk_rqstp->rq_buffer; in bc_free()
2509 struct xdr_buf *xdr = &req->rq_snd_buf; in bc_sendto()
2511 container_of(req->rq_xprt, struct sock_xprt, xprt); in bc_sendto()
2516 (u32)xdr->len); in bc_sendto()
2520 req->rq_xtime = ktime_get(); in bc_sendto()
2521 err = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, marker, &sent); in bc_sendto()
2523 if (err < 0 || sent != (xdr->len + sizeof(marker))) in bc_sendto()
2524 return -EAGAIN; in bc_sendto()
2529 * bc_send_request - Send a backchannel Call on a TCP socket
2536 * %0 if the message was sent successfully
2537 * %ENOTCONN if the message was not sent
2547 xprt = req->rq_xprt->bc_xprt; in bc_send_request()
2553 mutex_lock(&xprt->xpt_mutex); in bc_send_request()
2554 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) in bc_send_request()
2555 len = -ENOTCONN; in bc_send_request()
2558 mutex_unlock(&xprt->xpt_mutex); in bc_send_request()
2560 if (len > 0) in bc_send_request()
2582 dprintk("RPC: bc_destroy xprt %p\n", xprt); in bc_destroy()
2702 dprintk("RPC: %s: Bad address family\n", __func__); in xs_init_anyaddr()
2703 return -EAFNOSUPPORT; in xs_init_anyaddr()
2715 if (args->addrlen > sizeof(xprt->addr)) { in xs_setup_xprt()
2716 dprintk("RPC: xs_setup_xprt: address too large\n"); in xs_setup_xprt()
2717 return ERR_PTR(-EBADF); in xs_setup_xprt()
2720 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size, in xs_setup_xprt()
2722 if (xprt == NULL) { in xs_setup_xprt()
2723 dprintk("RPC: xs_setup_xprt: couldn't allocate " in xs_setup_xprt()
2725 return ERR_PTR(-ENOMEM); in xs_setup_xprt()
2729 mutex_init(&new->recv_mutex); in xs_setup_xprt()
2730 memcpy(&xprt->addr, args->dstaddr, args->addrlen); in xs_setup_xprt()
2731 xprt->addrlen = args->addrlen; in xs_setup_xprt()
2732 if (args->srcaddr) in xs_setup_xprt()
2733 memcpy(&new->srcaddr, args->srcaddr, args->addrlen); in xs_setup_xprt()
2736 err = xs_init_anyaddr(args->dstaddr->sa_family, in xs_setup_xprt()
2737 (struct sockaddr *)&new->srcaddr); in xs_setup_xprt()
2738 if (err != 0) { in xs_setup_xprt()
2754 * xs_setup_local - Set up transport to use an AF_LOCAL socket
2755 * @args: rpc transport creation arguments
2761 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr; in xs_setup_local()
2768 if (IS_ERR(xprt)) in xs_setup_local()
2772 xprt->prot = 0; in xs_setup_local()
2773 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; in xs_setup_local()
2775 xprt->bind_timeout = XS_BIND_TO; in xs_setup_local()
2776 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; in xs_setup_local()
2777 xprt->idle_timeout = XS_IDLE_DISC_TO; in xs_setup_local()
2779 xprt->ops = &xs_local_ops; in xs_setup_local()
2780 xprt->timeout = &xs_local_default_timeout; in xs_setup_local()
2782 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn); in xs_setup_local()
2783 INIT_WORK(&transport->error_worker, xs_error_handle); in xs_setup_local()
2784 INIT_DELAYED_WORK(&transport->connect_worker, xs_dummy_setup_socket); in xs_setup_local()
2786 switch (sun->sun_family) { in xs_setup_local()
2788 if (sun->sun_path[0] != '/') { in xs_setup_local()
2789 dprintk("RPC: bad AF_LOCAL address: %s\n", in xs_setup_local()
2790 sun->sun_path); in xs_setup_local()
2791 ret = ERR_PTR(-EINVAL); in xs_setup_local()
2797 if (ret) in xs_setup_local()
2801 ret = ERR_PTR(-EAFNOSUPPORT); in xs_setup_local()
2805 dprintk("RPC: set up xprt to %s via AF_LOCAL\n", in xs_setup_local()
2806 xprt->address_strings[RPC_DISPLAY_ADDR]); in xs_setup_local()
2808 if (try_module_get(THIS_MODULE)) in xs_setup_local()
2810 ret = ERR_PTR(-EINVAL); in xs_setup_local()
2824 * xs_setup_udp - Set up transport to use a UDP socket
2825 * @args: rpc transport creation arguments
2830 struct sockaddr *addr = args->dstaddr; in xs_setup_udp()
2837 if (IS_ERR(xprt)) in xs_setup_udp()
2841 xprt->prot = IPPROTO_UDP; in xs_setup_udp()
2843 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); in xs_setup_udp()
2845 xprt->bind_timeout = XS_BIND_TO; in xs_setup_udp()
2846 xprt->reestablish_timeout = XS_UDP_REEST_TO; in xs_setup_udp()
2847 xprt->idle_timeout = XS_IDLE_DISC_TO; in xs_setup_udp()
2849 xprt->ops = &xs_udp_ops; in xs_setup_udp()
2851 xprt->timeout = &xs_udp_default_timeout; in xs_setup_udp()
2853 INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn); in xs_setup_udp()
2854 INIT_WORK(&transport->error_worker, xs_error_handle); in xs_setup_udp()
2855 INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket); in xs_setup_udp()
2857 switch (addr->sa_family) { in xs_setup_udp()
2859 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) in xs_setup_udp()
2865 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) in xs_setup_udp()
2871 ret = ERR_PTR(-EAFNOSUPPORT); in xs_setup_udp()
2875 if (xprt_bound(xprt)) in xs_setup_udp()
2876 dprintk("RPC: set up xprt to %s (port %s) via %s\n", in xs_setup_udp()
2877 xprt->address_strings[RPC_DISPLAY_ADDR], in xs_setup_udp()
2878 xprt->address_strings[RPC_DISPLAY_PORT], in xs_setup_udp()
2879 xprt->address_strings[RPC_DISPLAY_PROTO]); in xs_setup_udp()
2881 dprintk("RPC: set up xprt to %s (autobind) via %s\n", in xs_setup_udp()
2882 xprt->address_strings[RPC_DISPLAY_ADDR], in xs_setup_udp()
2883 xprt->address_strings[RPC_DISPLAY_PROTO]); in xs_setup_udp()
2885 if (try_module_get(THIS_MODULE)) in xs_setup_udp()
2887 ret = ERR_PTR(-EINVAL); in xs_setup_udp()
2900 * xs_setup_tcp - Set up transport to use a TCP socket
2901 * @args: rpc transport creation arguments
2906 struct sockaddr *addr = args->dstaddr; in xs_setup_tcp()
2912 if (args->flags & XPRT_CREATE_INFINITE_SLOTS) in xs_setup_tcp()
2917 if (IS_ERR(xprt)) in xs_setup_tcp()
2921 xprt->prot = IPPROTO_TCP; in xs_setup_tcp()
2922 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; in xs_setup_tcp()
2924 xprt->bind_timeout = XS_BIND_TO; in xs_setup_tcp()
2925 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; in xs_setup_tcp()
2926 xprt->idle_timeout = XS_IDLE_DISC_TO; in xs_setup_tcp()
2928 xprt->ops = &xs_tcp_ops; in xs_setup_tcp()
2929 xprt->timeout = &xs_tcp_default_timeout; in xs_setup_tcp()
2931 xprt->max_reconnect_timeout = xprt->timeout->to_maxval; in xs_setup_tcp()
2932 xprt->connect_timeout = xprt->timeout->to_initval * in xs_setup_tcp()
2933 (xprt->timeout->to_retries + 1); in xs_setup_tcp()
2935 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn); in xs_setup_tcp()
2936 INIT_WORK(&transport->error_worker, xs_error_handle); in xs_setup_tcp()
2937 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket); in xs_setup_tcp()
2939 switch (addr->sa_family) { in xs_setup_tcp()
2941 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) in xs_setup_tcp()
2947 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) in xs_setup_tcp()
2953 ret = ERR_PTR(-EAFNOSUPPORT); in xs_setup_tcp()
2957 if (xprt_bound(xprt)) in xs_setup_tcp()
2958 dprintk("RPC: set up xprt to %s (port %s) via %s\n", in xs_setup_tcp()
2959 xprt->address_strings[RPC_DISPLAY_ADDR], in xs_setup_tcp()
2960 xprt->address_strings[RPC_DISPLAY_PORT], in xs_setup_tcp()
2961 xprt->address_strings[RPC_DISPLAY_PROTO]); in xs_setup_tcp()
2963 dprintk("RPC: set up xprt to %s (autobind) via %s\n", in xs_setup_tcp()
2964 xprt->address_strings[RPC_DISPLAY_ADDR], in xs_setup_tcp()
2965 xprt->address_strings[RPC_DISPLAY_PROTO]); in xs_setup_tcp()
2967 if (try_module_get(THIS_MODULE)) in xs_setup_tcp()
2969 ret = ERR_PTR(-EINVAL); in xs_setup_tcp()
2976 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
2977 * @args: rpc transport creation arguments
2982 struct sockaddr *addr = args->dstaddr; in xs_setup_bc_tcp()
2990 if (IS_ERR(xprt)) in xs_setup_bc_tcp()
2994 xprt->prot = IPPROTO_TCP; in xs_setup_bc_tcp()
2995 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; in xs_setup_bc_tcp()
2996 xprt->timeout = &xs_tcp_default_timeout; in xs_setup_bc_tcp()
3000 xprt->bind_timeout = 0; in xs_setup_bc_tcp()
3001 xprt->reestablish_timeout = 0; in xs_setup_bc_tcp()
3002 xprt->idle_timeout = 0; in xs_setup_bc_tcp()
3004 xprt->ops = &bc_tcp_ops; in xs_setup_bc_tcp()
3006 switch (addr->sa_family) { in xs_setup_bc_tcp()
3016 ret = ERR_PTR(-EAFNOSUPPORT); in xs_setup_bc_tcp()
3020 dprintk("RPC: set up xprt to %s (port %s) via %s\n", in xs_setup_bc_tcp()
3021 xprt->address_strings[RPC_DISPLAY_ADDR], in xs_setup_bc_tcp()
3022 xprt->address_strings[RPC_DISPLAY_PORT], in xs_setup_bc_tcp()
3023 xprt->address_strings[RPC_DISPLAY_PROTO]); in xs_setup_bc_tcp()
3032 args->bc_xprt->xpt_bc_xprt = xprt; in xs_setup_bc_tcp()
3033 xprt->bc_xprt = args->bc_xprt; in xs_setup_bc_tcp()
3034 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt); in xs_setup_bc_tcp()
3035 transport->sock = bc_sock->sk_sock; in xs_setup_bc_tcp()
3036 transport->inet = bc_sock->sk_sk; in xs_setup_bc_tcp()
3044 if (try_module_get(THIS_MODULE)) in xs_setup_bc_tcp()
3047 args->bc_xprt->xpt_bc_xprt = NULL; in xs_setup_bc_tcp()
3048 args->bc_xprt->xpt_bc_xps = NULL; in xs_setup_bc_tcp()
3050 ret = ERR_PTR(-EINVAL); in xs_setup_bc_tcp()
3089 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
3094 if (!sunrpc_table_header) in init_socket_xprt()
3106 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
3111 if (sunrpc_table_header) { in cleanup_socket_xprt()
3129 if (!val) in param_set_uint_minmax()
3130 return -EINVAL; in param_set_uint_minmax()
3132 if (ret) in param_set_uint_minmax()
3134 if (num < min || num > max) in param_set_uint_minmax()
3135 return -EINVAL; in param_set_uint_minmax()
3136 *((unsigned int *)kp->arg) = num; in param_set_uint_minmax()