Lines Matching +full:multi +full:- +full:socket

1 // SPDX-License-Identifier: GPL-2.0-only
5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
6 ** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
15 * This is the "low-level" comms layer.
21 * simply 32 bit numbers to the locking module - if they need to
25 * whatever it needs for inter-node communication.
29 * up to the mid-level comms layer (which understands the
40 * cluster-wide mechanism as it must be the same on all nodes of the cluster
69 struct socket *sock; /* NULL if not connected */
98 #define sock2con(x) ((struct connection *)(x)->sk_user_data)
150 return nodeid & (CONN_HASH_SIZE-1); in nodeid_hash()
162 if (con->nodeid == nodeid) { in __find_con()
189 con->rx_buflen = dlm_config.ci_buffer_size; in nodeid2con()
190 con->rx_buf = kmalloc(con->rx_buflen, GFP_NOFS); in nodeid2con()
191 if (!con->rx_buf) { in nodeid2con()
196 con->nodeid = nodeid; in nodeid2con()
197 mutex_init(&con->sock_mutex); in nodeid2con()
198 INIT_LIST_HEAD(&con->writequeue); in nodeid2con()
199 spin_lock_init(&con->writequeue_lock); in nodeid2con()
200 INIT_WORK(&con->swork, process_send_sockets); in nodeid2con()
201 INIT_WORK(&con->rwork, process_recv_sockets); in nodeid2con()
202 init_waitqueue_head(&con->shutdown_wait); in nodeid2con()
205 if (con->nodeid) { in nodeid2con()
208 con->connect_action = zerocon->connect_action; in nodeid2con()
209 if (!con->rx_action) in nodeid2con()
210 con->rx_action = zerocon->rx_action; in nodeid2con()
225 kfree(con->rx_buf); in nodeid2con()
230 hlist_add_head_rcu(&con->list, &connection_hash[r]); in nodeid2con()
255 if (na->nodeid == nodeid) in find_node_addr()
263 switch (x->ss_family) { in addr_compare()
267 if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr) in addr_compare()
269 if (sinx->sin_port != siny->sin_port) in addr_compare()
276 if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr)) in addr_compare()
278 if (sinx->sin6_port != siny->sin6_port) in addr_compare()
295 return -1; in nodeid_to_addr()
299 if (na && na->addr_count) { in nodeid_to_addr()
300 memcpy(&sas, na->addr[na->curr_addr_index], in nodeid_to_addr()
304 na->curr_addr_index++; in nodeid_to_addr()
305 if (na->curr_addr_index == na->addr_count) in nodeid_to_addr()
306 na->curr_addr_index = 0; in nodeid_to_addr()
312 return -EEXIST; in nodeid_to_addr()
314 if (!na->addr_count) in nodeid_to_addr()
315 return -ENOENT; in nodeid_to_addr()
323 if (dlm_local_addr[0]->ss_family == AF_INET) { in nodeid_to_addr()
326 ret4->sin_addr.s_addr = in4->sin_addr.s_addr; in nodeid_to_addr()
330 ret6->sin6_addr = in6->sin6_addr; in nodeid_to_addr()
339 int rv = -EEXIST; in addr_to_nodeid()
344 if (!na->addr_count) in addr_to_nodeid()
347 for (addr_i = 0; addr_i < na->addr_count; addr_i++) { in addr_to_nodeid()
348 if (addr_compare(na->addr[addr_i], addr)) { in addr_to_nodeid()
349 *nodeid = na->nodeid; in addr_to_nodeid()
367 return -ENOMEM; in dlm_lowcomms_addr()
372 return -ENOMEM; in dlm_lowcomms_addr()
380 new_node->nodeid = nodeid; in dlm_lowcomms_addr()
381 new_node->addr[0] = new_addr; in dlm_lowcomms_addr()
382 new_node->addr_count = 1; in dlm_lowcomms_addr()
383 list_add(&new_node->list, &dlm_node_addrs); in dlm_lowcomms_addr()
388 if (na->addr_count >= DLM_MAX_ADDR_COUNT) { in dlm_lowcomms_addr()
392 return -ENOSPC; in dlm_lowcomms_addr()
395 na->addr[na->addr_count++] = new_addr; in dlm_lowcomms_addr()
401 /* Data available on socket or listen socket received a connect */
406 read_lock_bh(&sk->sk_callback_lock); in lowcomms_data_ready()
408 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags)) in lowcomms_data_ready()
409 queue_work(recv_workqueue, &con->rwork); in lowcomms_data_ready()
410 read_unlock_bh(&sk->sk_callback_lock); in lowcomms_data_ready()
417 read_lock_bh(&sk->sk_callback_lock); in lowcomms_write_space()
422 clear_bit(SOCK_NOSPACE, &con->sock->flags); in lowcomms_write_space()
424 if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) { in lowcomms_write_space()
425 con->sock->sk->sk_write_pending--; in lowcomms_write_space()
426 clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags); in lowcomms_write_space()
429 queue_work(send_workqueue, &con->swork); in lowcomms_write_space()
431 read_unlock_bh(&sk->sk_callback_lock); in lowcomms_write_space()
436 if (test_bit(CF_CLOSE, &con->flags)) in lowcomms_connect_sock()
438 queue_work(send_workqueue, &con->swork); in lowcomms_connect_sock()
446 * doesn't switch socket state when entering shutdown, so we in lowcomms_state_change()
449 if (sk->sk_shutdown) { in lowcomms_state_change()
450 if (sk->sk_shutdown == RCV_SHUTDOWN) in lowcomms_state_change()
452 } else if (sk->sk_state == TCP_ESTABLISHED) { in lowcomms_state_change()
466 return -ENOMEM; in dlm_lowcomms_connect_node()
477 read_lock_bh(&sk->sk_callback_lock); in lowcomms_error_report()
483 if (con->sock == NULL || in lowcomms_error_report()
484 kernel_getpeername(con->sock, (struct sockaddr *)&saddr) < 0) { in lowcomms_error_report()
485 printk_ratelimited(KERN_ERR "dlm: node %d: socket error " in lowcomms_error_report()
488 con->nodeid, dlm_config.ci_tcp_port, in lowcomms_error_report()
489 sk->sk_err, sk->sk_err_soft); in lowcomms_error_report()
493 printk_ratelimited(KERN_ERR "dlm: node %d: socket error " in lowcomms_error_report()
496 con->nodeid, &sin4->sin_addr.s_addr, in lowcomms_error_report()
497 dlm_config.ci_tcp_port, sk->sk_err, in lowcomms_error_report()
498 sk->sk_err_soft); in lowcomms_error_report()
502 printk_ratelimited(KERN_ERR "dlm: node %d: socket error " in lowcomms_error_report()
505 con->nodeid, sin6->sin6_addr.s6_addr32[0], in lowcomms_error_report()
506 sin6->sin6_addr.s6_addr32[1], in lowcomms_error_report()
507 sin6->sin6_addr.s6_addr32[2], in lowcomms_error_report()
508 sin6->sin6_addr.s6_addr32[3], in lowcomms_error_report()
509 dlm_config.ci_tcp_port, sk->sk_err, in lowcomms_error_report()
510 sk->sk_err_soft); in lowcomms_error_report()
513 read_unlock_bh(&sk->sk_callback_lock); in lowcomms_error_report()
519 static void save_listen_callbacks(struct socket *sock) in save_listen_callbacks()
521 struct sock *sk = sock->sk; in save_listen_callbacks()
523 listen_sock.sk_data_ready = sk->sk_data_ready; in save_listen_callbacks()
524 listen_sock.sk_state_change = sk->sk_state_change; in save_listen_callbacks()
525 listen_sock.sk_write_space = sk->sk_write_space; in save_listen_callbacks()
526 listen_sock.sk_error_report = sk->sk_error_report; in save_listen_callbacks()
529 static void restore_callbacks(struct socket *sock) in restore_callbacks()
531 struct sock *sk = sock->sk; in restore_callbacks()
533 write_lock_bh(&sk->sk_callback_lock); in restore_callbacks()
534 sk->sk_user_data = NULL; in restore_callbacks()
535 sk->sk_data_ready = listen_sock.sk_data_ready; in restore_callbacks()
536 sk->sk_state_change = listen_sock.sk_state_change; in restore_callbacks()
537 sk->sk_write_space = listen_sock.sk_write_space; in restore_callbacks()
538 sk->sk_error_report = listen_sock.sk_error_report; in restore_callbacks()
539 write_unlock_bh(&sk->sk_callback_lock); in restore_callbacks()
542 /* Make a socket active */
543 static void add_sock(struct socket *sock, struct connection *con) in add_sock()
545 struct sock *sk = sock->sk; in add_sock()
547 write_lock_bh(&sk->sk_callback_lock); in add_sock()
548 con->sock = sock; in add_sock()
550 sk->sk_user_data = con; in add_sock()
552 sk->sk_data_ready = lowcomms_data_ready; in add_sock()
553 sk->sk_write_space = lowcomms_write_space; in add_sock()
554 sk->sk_state_change = lowcomms_state_change; in add_sock()
555 sk->sk_allocation = GFP_NOFS; in add_sock()
556 sk->sk_error_report = lowcomms_error_report; in add_sock()
557 write_unlock_bh(&sk->sk_callback_lock); in add_sock()
565 saddr->ss_family = dlm_local_addr[0]->ss_family; in make_sockaddr()
566 if (saddr->ss_family == AF_INET) { in make_sockaddr()
568 in4_addr->sin_port = cpu_to_be16(port); in make_sockaddr()
570 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero)); in make_sockaddr()
573 in6_addr->sin6_port = cpu_to_be16(port); in make_sockaddr()
576 memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len); in make_sockaddr()
583 bool closing = test_and_set_bit(CF_CLOSING, &con->flags); in close_connection()
585 if (tx && !closing && cancel_work_sync(&con->swork)) { in close_connection()
586 log_print("canceled swork for node %d", con->nodeid); in close_connection()
587 clear_bit(CF_WRITE_PENDING, &con->flags); in close_connection()
589 if (rx && !closing && cancel_work_sync(&con->rwork)) { in close_connection()
590 log_print("canceled rwork for node %d", con->nodeid); in close_connection()
591 clear_bit(CF_READ_PENDING, &con->flags); in close_connection()
594 mutex_lock(&con->sock_mutex); in close_connection()
595 if (con->sock) { in close_connection()
596 restore_callbacks(con->sock); in close_connection()
597 sock_release(con->sock); in close_connection()
598 con->sock = NULL; in close_connection()
600 if (con->othercon && and_other) { in close_connection()
601 /* Will only re-enter once. */ in close_connection()
602 close_connection(con->othercon, false, true, true); in close_connection()
605 con->rx_leftover = 0; in close_connection()
606 con->retries = 0; in close_connection()
607 mutex_unlock(&con->sock_mutex); in close_connection()
608 clear_bit(CF_CLOSING, &con->flags); in close_connection()
615 if (cancel_work_sync(&con->swork)) { in shutdown_connection()
616 log_print("canceled swork for node %d", con->nodeid); in shutdown_connection()
617 clear_bit(CF_WRITE_PENDING, &con->flags); in shutdown_connection()
620 mutex_lock(&con->sock_mutex); in shutdown_connection()
622 if (!con->sock) { in shutdown_connection()
623 mutex_unlock(&con->sock_mutex); in shutdown_connection()
627 set_bit(CF_SHUTDOWN, &con->flags); in shutdown_connection()
628 ret = kernel_sock_shutdown(con->sock, SHUT_WR); in shutdown_connection()
629 mutex_unlock(&con->sock_mutex); in shutdown_connection()
635 ret = wait_event_timeout(con->shutdown_wait, in shutdown_connection()
636 !test_bit(CF_SHUTDOWN, &con->flags), in shutdown_connection()
648 clear_bit(CF_SHUTDOWN, &con->flags); in shutdown_connection()
654 if (con->othercon) in dlm_tcp_shutdown()
655 shutdown_connection(con->othercon); in dlm_tcp_shutdown()
665 return -ENOMEM; in con_realloc_receive_buf()
668 if (con->rx_leftover) in con_realloc_receive_buf()
669 memmove(newbuf, con->rx_buf, con->rx_leftover); in con_realloc_receive_buf()
672 kfree(con->rx_buf); in con_realloc_receive_buf()
673 con->rx_buflen = newlen; in con_realloc_receive_buf()
674 con->rx_buf = newbuf; in con_realloc_receive_buf()
687 mutex_lock(&con->sock_mutex); in receive_from_sock()
689 if (con->sock == NULL) { in receive_from_sock()
690 ret = -EAGAIN; in receive_from_sock()
694 if (con->nodeid == 0) { in receive_from_sock()
695 ret = -EINVAL; in receive_from_sock()
701 if (con->rx_buflen != buflen && con->rx_leftover <= buflen) { in receive_from_sock()
710 iov.iov_base = con->rx_buf + con->rx_leftover; in receive_from_sock()
711 iov.iov_len = con->rx_buflen - con->rx_leftover; in receive_from_sock()
715 ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len, in receive_from_sock()
723 buflen = ret + con->rx_leftover; in receive_from_sock()
724 ret = dlm_process_incoming_buffer(con->nodeid, con->rx_buf, buflen); in receive_from_sock()
732 con->rx_leftover = buflen - ret; in receive_from_sock()
733 if (con->rx_leftover) { in receive_from_sock()
734 memmove(con->rx_buf, con->rx_buf + ret, in receive_from_sock()
735 con->rx_leftover); in receive_from_sock()
742 mutex_unlock(&con->sock_mutex); in receive_from_sock()
746 if (!test_and_set_bit(CF_READ_PENDING, &con->flags)) in receive_from_sock()
747 queue_work(recv_workqueue, &con->rwork); in receive_from_sock()
748 mutex_unlock(&con->sock_mutex); in receive_from_sock()
749 return -EAGAIN; in receive_from_sock()
752 mutex_unlock(&con->sock_mutex); in receive_from_sock()
753 if (ret != -EAGAIN) { in receive_from_sock()
758 con, con->nodeid); in receive_from_sock()
760 clear_bit(CF_SHUTDOWN, &con->flags); in receive_from_sock()
761 wake_up(&con->shutdown_wait); in receive_from_sock()
763 ret = -1; in receive_from_sock()
769 /* Listening socket is busy, accept a connection */
774 struct socket *newsock; in accept_from_sock()
782 return -1; in accept_from_sock()
785 mutex_lock_nested(&con->sock_mutex, 0); in accept_from_sock()
787 if (!con->sock) { in accept_from_sock()
788 mutex_unlock(&con->sock_mutex); in accept_from_sock()
789 return -ENOTCONN; in accept_from_sock()
792 result = kernel_accept(con->sock, &newsock, O_NONBLOCK); in accept_from_sock()
796 /* Get the connected socket's peer */ in accept_from_sock()
798 len = newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 2); in accept_from_sock()
800 result = -ECONNABORTED; in accept_from_sock()
812 mutex_unlock(&con->sock_mutex); in accept_from_sock()
813 return -1; in accept_from_sock()
817 sock_set_mark(newsock->sk, mark); in accept_from_sock()
828 result = -ENOMEM; in accept_from_sock()
831 mutex_lock_nested(&newcon->sock_mutex, 1); in accept_from_sock()
832 if (newcon->sock) { in accept_from_sock()
833 struct connection *othercon = newcon->othercon; in accept_from_sock()
838 log_print("failed to allocate incoming socket"); in accept_from_sock()
839 mutex_unlock(&newcon->sock_mutex); in accept_from_sock()
840 result = -ENOMEM; in accept_from_sock()
844 othercon->rx_buflen = dlm_config.ci_buffer_size; in accept_from_sock()
845 othercon->rx_buf = kmalloc(othercon->rx_buflen, GFP_NOFS); in accept_from_sock()
846 if (!othercon->rx_buf) { in accept_from_sock()
847 mutex_unlock(&newcon->sock_mutex); in accept_from_sock()
849 log_print("failed to allocate incoming socket receive buffer"); in accept_from_sock()
850 result = -ENOMEM; in accept_from_sock()
854 othercon->nodeid = nodeid; in accept_from_sock()
855 othercon->rx_action = receive_from_sock; in accept_from_sock()
856 mutex_init(&othercon->sock_mutex); in accept_from_sock()
857 INIT_LIST_HEAD(&othercon->writequeue); in accept_from_sock()
858 spin_lock_init(&othercon->writequeue_lock); in accept_from_sock()
859 INIT_WORK(&othercon->swork, process_send_sockets); in accept_from_sock()
860 INIT_WORK(&othercon->rwork, process_recv_sockets); in accept_from_sock()
861 init_waitqueue_head(&othercon->shutdown_wait); in accept_from_sock()
862 set_bit(CF_IS_OTHERCON, &othercon->flags); in accept_from_sock()
868 mutex_lock_nested(&othercon->sock_mutex, 2); in accept_from_sock()
869 newcon->othercon = othercon; in accept_from_sock()
872 mutex_unlock(&othercon->sock_mutex); in accept_from_sock()
875 newcon->rx_action = receive_from_sock; in accept_from_sock()
883 mutex_unlock(&newcon->sock_mutex); in accept_from_sock()
887 * between processing the accept adding the socket in accept_from_sock()
890 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags)) in accept_from_sock()
891 queue_work(recv_workqueue, &addcon->rwork); in accept_from_sock()
892 mutex_unlock(&con->sock_mutex); in accept_from_sock()
897 mutex_unlock(&con->sock_mutex); in accept_from_sock()
901 if (result != -EAGAIN) in accept_from_sock()
908 __free_page(e->page); in free_entry()
913 * writequeue_entry_complete - try to delete and free write queue entry
921 e->offset += completed; in writequeue_entry_complete()
922 e->len -= completed; in writequeue_entry_complete()
924 if (e->len == 0 && e->users == 0) { in writequeue_entry_complete()
925 list_del(&e->list); in writequeue_entry_complete()
931 * sctp_bind_addrs - bind a SCTP socket to all our addresses
944 result = kernel_bind(con->sock, addr, addr_len); in sctp_bind_addrs()
946 result = sock_bind_add(con->sock->sk, addr, addr_len); in sctp_bind_addrs()
959 peeled-off socket for this association, so we use the listening socket
967 struct socket *sock; in sctp_connect_to_sock()
970 if (con->nodeid == 0) { in sctp_connect_to_sock()
975 dlm_comm_mark(con->nodeid, &mark); in sctp_connect_to_sock()
977 mutex_lock(&con->sock_mutex); in sctp_connect_to_sock()
979 /* Some odd races can cause double-connects, ignore them */ in sctp_connect_to_sock()
980 if (con->retries++ > MAX_CONNECT_RETRIES) in sctp_connect_to_sock()
983 if (con->sock) { in sctp_connect_to_sock()
984 log_print("node %d already connected.", con->nodeid); in sctp_connect_to_sock()
989 result = nodeid_to_addr(con->nodeid, &daddr, NULL, true); in sctp_connect_to_sock()
991 log_print("no address for nodeid %d", con->nodeid); in sctp_connect_to_sock()
995 /* Create a socket to communicate with */ in sctp_connect_to_sock()
996 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family, in sctp_connect_to_sock()
1001 sock_set_mark(sock->sk, mark); in sctp_connect_to_sock()
1003 con->rx_action = receive_from_sock; in sctp_connect_to_sock()
1004 con->connect_action = sctp_connect_to_sock; in sctp_connect_to_sock()
1013 log_print("connecting to %d", con->nodeid); in sctp_connect_to_sock()
1016 sctp_sock_set_nodelay(sock->sk); in sctp_connect_to_sock()
1019 * Make sock->ops->connect() function return in specified time, in sctp_connect_to_sock()
1023 sock_set_sndtimeo(sock->sk, 5); in sctp_connect_to_sock()
1024 result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len, in sctp_connect_to_sock()
1026 sock_set_sndtimeo(sock->sk, 0); in sctp_connect_to_sock()
1028 if (result == -EINPROGRESS) in sctp_connect_to_sock()
1034 con->sock = NULL; in sctp_connect_to_sock()
1042 if (result != -EHOSTUNREACH && in sctp_connect_to_sock()
1043 result != -ENETUNREACH && in sctp_connect_to_sock()
1044 result != -ENETDOWN && in sctp_connect_to_sock()
1045 result != -EINVAL && in sctp_connect_to_sock()
1046 result != -EPROTONOSUPPORT) { in sctp_connect_to_sock()
1047 log_print("connect %d try %d error %d", con->nodeid, in sctp_connect_to_sock()
1048 con->retries, result); in sctp_connect_to_sock()
1049 mutex_unlock(&con->sock_mutex); in sctp_connect_to_sock()
1056 mutex_unlock(&con->sock_mutex); in sctp_connect_to_sock()
1059 /* Connect a new socket to its peer */
1064 struct socket *sock = NULL; in tcp_connect_to_sock()
1068 if (con->nodeid == 0) { in tcp_connect_to_sock()
1073 dlm_comm_mark(con->nodeid, &mark); in tcp_connect_to_sock()
1075 mutex_lock(&con->sock_mutex); in tcp_connect_to_sock()
1076 if (con->retries++ > MAX_CONNECT_RETRIES) in tcp_connect_to_sock()
1079 /* Some odd races can cause double-connects, ignore them */ in tcp_connect_to_sock()
1080 if (con->sock) in tcp_connect_to_sock()
1083 /* Create a socket to communicate with */ in tcp_connect_to_sock()
1084 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family, in tcp_connect_to_sock()
1089 sock_set_mark(sock->sk, mark); in tcp_connect_to_sock()
1092 result = nodeid_to_addr(con->nodeid, &saddr, NULL, false); in tcp_connect_to_sock()
1094 log_print("no address for nodeid %d", con->nodeid); in tcp_connect_to_sock()
1098 con->rx_action = receive_from_sock; in tcp_connect_to_sock()
1099 con->connect_action = tcp_connect_to_sock; in tcp_connect_to_sock()
1100 con->shutdown_action = dlm_tcp_shutdown; in tcp_connect_to_sock()
1103 /* Bind to our cluster-known address connecting to avoid in tcp_connect_to_sock()
1107 result = sock->ops->bind(sock, (struct sockaddr *) &src_addr, in tcp_connect_to_sock()
1116 log_print("connecting to %d", con->nodeid); in tcp_connect_to_sock()
1119 tcp_sock_set_nodelay(sock->sk); in tcp_connect_to_sock()
1121 result = sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len, in tcp_connect_to_sock()
1123 if (result == -EINPROGRESS) in tcp_connect_to_sock()
1129 if (con->sock) { in tcp_connect_to_sock()
1130 sock_release(con->sock); in tcp_connect_to_sock()
1131 con->sock = NULL; in tcp_connect_to_sock()
1139 if (result != -EHOSTUNREACH && in tcp_connect_to_sock()
1140 result != -ENETUNREACH && in tcp_connect_to_sock()
1141 result != -ENETDOWN && in tcp_connect_to_sock()
1142 result != -EINVAL && in tcp_connect_to_sock()
1143 result != -EPROTONOSUPPORT) { in tcp_connect_to_sock()
1144 log_print("connect %d try %d error %d", con->nodeid, in tcp_connect_to_sock()
1145 con->retries, result); in tcp_connect_to_sock()
1146 mutex_unlock(&con->sock_mutex); in tcp_connect_to_sock()
1152 mutex_unlock(&con->sock_mutex); in tcp_connect_to_sock()
1156 static struct socket *tcp_create_listen_sock(struct connection *con, in tcp_create_listen_sock()
1159 struct socket *sock = NULL; in tcp_create_listen_sock()
1163 if (dlm_local_addr[0]->ss_family == AF_INET) in tcp_create_listen_sock()
1168 /* Create a socket to communicate with */ in tcp_create_listen_sock()
1169 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family, in tcp_create_listen_sock()
1172 log_print("Can't create listening comms socket"); in tcp_create_listen_sock()
1176 sock_set_mark(sock->sk, dlm_config.ci_mark); in tcp_create_listen_sock()
1179 tcp_sock_set_nodelay(sock->sk); in tcp_create_listen_sock()
1181 sock_set_reuseaddr(sock->sk); in tcp_create_listen_sock()
1183 write_lock_bh(&sock->sk->sk_callback_lock); in tcp_create_listen_sock()
1184 sock->sk->sk_user_data = con; in tcp_create_listen_sock()
1186 con->rx_action = accept_from_sock; in tcp_create_listen_sock()
1187 con->connect_action = tcp_connect_to_sock; in tcp_create_listen_sock()
1188 write_unlock_bh(&sock->sk->sk_callback_lock); in tcp_create_listen_sock()
1192 result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len); in tcp_create_listen_sock()
1197 con->sock = NULL; in tcp_create_listen_sock()
1200 sock_set_keepalive(sock->sk); in tcp_create_listen_sock()
1202 result = sock->ops->listen(sock, 5); in tcp_create_listen_sock()
1240 /* Initialise SCTP socket and bind to all interfaces */
1243 struct socket *sock = NULL; in sctp_listen_for_all()
1244 int result = -EINVAL; in sctp_listen_for_all()
1248 return -ENOMEM; in sctp_listen_for_all()
1252 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family, in sctp_listen_for_all()
1255 log_print("Can't create comms socket, check SCTP is loaded"); in sctp_listen_for_all()
1259 sock_set_rcvbuf(sock->sk, NEEDED_RMEM); in sctp_listen_for_all()
1260 sock_set_mark(sock->sk, dlm_config.ci_mark); in sctp_listen_for_all()
1261 sctp_sock_set_nodelay(sock->sk); in sctp_listen_for_all()
1263 write_lock_bh(&sock->sk->sk_callback_lock); in sctp_listen_for_all()
1265 sock->sk->sk_user_data = con; in sctp_listen_for_all()
1267 con->sock = sock; in sctp_listen_for_all()
1268 con->sock->sk->sk_data_ready = lowcomms_data_ready; in sctp_listen_for_all()
1269 con->rx_action = accept_from_sock; in sctp_listen_for_all()
1270 con->connect_action = sctp_connect_to_sock; in sctp_listen_for_all()
1272 write_unlock_bh(&sock->sk->sk_callback_lock); in sctp_listen_for_all()
1278 result = sock->ops->listen(sock, 5); in sctp_listen_for_all()
1280 log_print("Can't set socket listening"); in sctp_listen_for_all()
1288 con->sock = NULL; in sctp_listen_for_all()
1295 struct socket *sock = NULL; in tcp_listen_for_all()
1297 int result = -EINVAL; in tcp_listen_for_all()
1300 return -ENOMEM; in tcp_listen_for_all()
1302 /* We don't support multi-homed hosts */ in tcp_listen_for_all()
1304 log_print("TCP protocol can't handle multi-homed hosts, " in tcp_listen_for_all()
1306 return -EINVAL; in tcp_listen_for_all()
1317 result = -EADDRINUSE; in tcp_listen_for_all()
1334 entry->page = alloc_page(allocation); in new_writequeue_entry()
1335 if (!entry->page) { in new_writequeue_entry()
1340 entry->offset = 0; in new_writequeue_entry()
1341 entry->len = 0; in new_writequeue_entry()
1342 entry->end = 0; in new_writequeue_entry()
1343 entry->users = 0; in new_writequeue_entry()
1344 entry->con = con; in new_writequeue_entry()
1359 spin_lock(&con->writequeue_lock); in dlm_lowcomms_get_buffer()
1360 e = list_entry(con->writequeue.prev, struct writequeue_entry, list); in dlm_lowcomms_get_buffer()
1361 if ((&e->list == &con->writequeue) || in dlm_lowcomms_get_buffer()
1362 (PAGE_SIZE - e->end < len)) { in dlm_lowcomms_get_buffer()
1365 offset = e->end; in dlm_lowcomms_get_buffer()
1366 e->end += len; in dlm_lowcomms_get_buffer()
1367 e->users++; in dlm_lowcomms_get_buffer()
1369 spin_unlock(&con->writequeue_lock); in dlm_lowcomms_get_buffer()
1373 *ppc = page_address(e->page) + offset; in dlm_lowcomms_get_buffer()
1379 spin_lock(&con->writequeue_lock); in dlm_lowcomms_get_buffer()
1380 offset = e->end; in dlm_lowcomms_get_buffer()
1381 e->end += len; in dlm_lowcomms_get_buffer()
1382 e->users++; in dlm_lowcomms_get_buffer()
1383 list_add_tail(&e->list, &con->writequeue); in dlm_lowcomms_get_buffer()
1384 spin_unlock(&con->writequeue_lock); in dlm_lowcomms_get_buffer()
1393 struct connection *con = e->con; in dlm_lowcomms_commit_buffer()
1396 spin_lock(&con->writequeue_lock); in dlm_lowcomms_commit_buffer()
1397 users = --e->users; in dlm_lowcomms_commit_buffer()
1400 e->len = e->end - e->offset; in dlm_lowcomms_commit_buffer()
1401 spin_unlock(&con->writequeue_lock); in dlm_lowcomms_commit_buffer()
1403 queue_work(send_workqueue, &con->swork); in dlm_lowcomms_commit_buffer()
1407 spin_unlock(&con->writequeue_lock); in dlm_lowcomms_commit_buffer()
1420 mutex_lock(&con->sock_mutex); in send_to_sock()
1421 if (con->sock == NULL) in send_to_sock()
1424 spin_lock(&con->writequeue_lock); in send_to_sock()
1426 e = list_entry(con->writequeue.next, struct writequeue_entry, in send_to_sock()
1428 if ((struct list_head *) e == &con->writequeue) in send_to_sock()
1431 len = e->len; in send_to_sock()
1432 offset = e->offset; in send_to_sock()
1433 BUG_ON(len == 0 && e->users == 0); in send_to_sock()
1434 spin_unlock(&con->writequeue_lock); in send_to_sock()
1438 ret = kernel_sendpage(con->sock, e->page, offset, len, in send_to_sock()
1440 if (ret == -EAGAIN || ret == 0) { in send_to_sock()
1441 if (ret == -EAGAIN && in send_to_sock()
1442 test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) && in send_to_sock()
1443 !test_and_set_bit(CF_APP_LIMITED, &con->flags)) { in send_to_sock()
1447 set_bit(SOCK_NOSPACE, &con->sock->flags); in send_to_sock()
1448 con->sock->sk->sk_write_pending++; in send_to_sock()
1462 spin_lock(&con->writequeue_lock); in send_to_sock()
1465 spin_unlock(&con->writequeue_lock); in send_to_sock()
1467 mutex_unlock(&con->sock_mutex); in send_to_sock()
1471 mutex_unlock(&con->sock_mutex); in send_to_sock()
1475 queue_work(send_workqueue, &con->swork); in send_to_sock()
1479 mutex_unlock(&con->sock_mutex); in send_to_sock()
1480 queue_work(send_workqueue, &con->swork); in send_to_sock()
1488 spin_lock(&con->writequeue_lock); in clean_one_writequeue()
1489 list_for_each_entry_safe(e, safe, &con->writequeue, list) { in clean_one_writequeue()
1490 list_del(&e->list); in clean_one_writequeue()
1493 spin_unlock(&con->writequeue_lock); in clean_one_writequeue()
1506 set_bit(CF_CLOSE, &con->flags); in dlm_lowcomms_close()
1514 list_del(&na->list); in dlm_lowcomms_close()
1515 while (na->addr_count--) in dlm_lowcomms_close()
1516 kfree(na->addr[na->addr_count]); in dlm_lowcomms_close()
1530 clear_bit(CF_READ_PENDING, &con->flags); in process_recv_sockets()
1532 err = con->rx_action(con); in process_recv_sockets()
1541 clear_bit(CF_WRITE_PENDING, &con->flags); in process_send_sockets()
1542 if (con->sock == NULL) /* not mutex protected so check it inside too */ in process_send_sockets()
1543 con->connect_action(con); in process_send_sockets()
1544 if (!list_empty(&con->writequeue)) in process_send_sockets()
1562 return -ENOMEM; in work_start()
1570 return -ENOMEM; in work_start()
1578 mutex_lock(&con->sock_mutex); in _stop_conn()
1579 set_bit(CF_CLOSE, &con->flags); in _stop_conn()
1580 set_bit(CF_READ_PENDING, &con->flags); in _stop_conn()
1581 set_bit(CF_WRITE_PENDING, &con->flags); in _stop_conn()
1582 if (con->sock && con->sock->sk) { in _stop_conn()
1583 write_lock_bh(&con->sock->sk->sk_callback_lock); in _stop_conn()
1584 con->sock->sk->sk_user_data = NULL; in _stop_conn()
1585 write_unlock_bh(&con->sock->sk->sk_callback_lock); in _stop_conn()
1587 if (con->othercon && and_other) in _stop_conn()
1588 _stop_conn(con->othercon, false); in _stop_conn()
1589 mutex_unlock(&con->sock_mutex); in _stop_conn()
1599 if (con->shutdown_action) in shutdown_conn()
1600 con->shutdown_action(con); in shutdown_conn()
1607 kfree(con->rx_buf); in connection_release()
1615 hlist_del_rcu(&con->list); in free_conn()
1617 if (con->othercon) { in free_conn()
1618 clean_one_writequeue(con->othercon); in free_conn()
1619 call_rcu(&con->othercon->rcu, connection_release); in free_conn()
1622 call_rcu(&con->rcu, connection_release); in free_conn()
1642 ok &= test_bit(CF_READ_PENDING, &con->flags); in work_flush()
1643 ok &= test_bit(CF_WRITE_PENDING, &con->flags); in work_flush()
1644 if (con->othercon) { in work_flush()
1646 &con->othercon->flags); in work_flush()
1648 &con->othercon->flags); in work_flush()
1659 socket activity. in dlm_lowcomms_stop()
1677 int error = -EINVAL; in dlm_lowcomms_start()
1686 error = -ENOTCONN; in dlm_lowcomms_start()
1722 list_del(&na->list); in dlm_lowcomms_exit()
1723 while (na->addr_count--) in dlm_lowcomms_exit()
1724 kfree(na->addr[na->addr_count]); in dlm_lowcomms_exit()