Lines Matching +full:retain +full:- +full:state +full:- +full:shutdown

14  *      - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
46 #define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1)
65 lhash = (__force u32)laddr->s6_addr32[3]; in rds_conn_bucket()
69 fhash = (__force u32)faddr->s6_addr32[3]; in rds_conn_bucket()
92 if (ipv6_addr_equal(&conn->c_faddr, faddr) && in rds_conn_lookup()
93 ipv6_addr_equal(&conn->c_laddr, laddr) && in rds_conn_lookup()
94 conn->c_trans == trans && in rds_conn_lookup()
95 conn->c_tos == tos && in rds_conn_lookup()
97 conn->c_dev_if == dev_if) { in rds_conn_lookup()
102 rdsdebug("returning conn %p for %pI6c -> %pI6c\n", ret, in rds_conn_lookup()
109 * It clears partial message state so that the transport can start sending
115 struct rds_connection *conn = cp->cp_conn; in rds_conn_path_reset()
118 &conn->c_laddr, &conn->c_faddr); in rds_conn_path_reset()
122 cp->cp_flags = 0; in rds_conn_path_reset()
133 spin_lock_init(&cp->cp_lock); in __rds_conn_path_init()
134 cp->cp_next_tx_seq = 1; in __rds_conn_path_init()
135 init_waitqueue_head(&cp->cp_waitq); in __rds_conn_path_init()
136 INIT_LIST_HEAD(&cp->cp_send_queue); in __rds_conn_path_init()
137 INIT_LIST_HEAD(&cp->cp_retrans); in __rds_conn_path_init()
139 cp->cp_conn = conn; in __rds_conn_path_init()
140 atomic_set(&cp->cp_state, RDS_CONN_DOWN); in __rds_conn_path_init()
141 cp->cp_send_gen = 0; in __rds_conn_path_init()
142 cp->cp_reconnect_jiffies = 0; in __rds_conn_path_init()
143 cp->cp_conn->c_proposed_version = RDS_PROTOCOL_VERSION; in __rds_conn_path_init()
144 INIT_DELAYED_WORK(&cp->cp_send_w, rds_send_worker); in __rds_conn_path_init()
145 INIT_DELAYED_WORK(&cp->cp_recv_w, rds_recv_worker); in __rds_conn_path_init()
146 INIT_DELAYED_WORK(&cp->cp_conn_w, rds_connect_worker); in __rds_conn_path_init()
147 INIT_WORK(&cp->cp_down_w, rds_shutdown_worker); in __rds_conn_path_init()
148 mutex_init(&cp->cp_cm_lock); in __rds_conn_path_init()
149 cp->cp_flags = 0; in __rds_conn_path_init()
173 int npaths = (trans->t_mp_capable ? RDS_MPATH_WORKERS : 1); in __rds_conn_create()
178 conn->c_loopback && in __rds_conn_create()
179 conn->c_trans != &rds_loop_transport && in __rds_conn_create()
187 conn = parent->c_passive; in __rds_conn_create()
195 conn = ERR_PTR(-ENOMEM); in __rds_conn_create()
198 conn->c_path = kcalloc(npaths, sizeof(struct rds_conn_path), gfp); in __rds_conn_create()
199 if (!conn->c_path) { in __rds_conn_create()
201 conn = ERR_PTR(-ENOMEM); in __rds_conn_create()
205 INIT_HLIST_NODE(&conn->c_hash_node); in __rds_conn_create()
206 conn->c_laddr = *laddr; in __rds_conn_create()
207 conn->c_isv6 = !ipv6_addr_v4mapped(laddr); in __rds_conn_create()
208 conn->c_faddr = *faddr; in __rds_conn_create()
209 conn->c_dev_if = dev_if; in __rds_conn_create()
210 conn->c_tos = tos; in __rds_conn_create()
219 conn->c_bound_if = dev_if; in __rds_conn_create()
222 conn->c_bound_if = 0; in __rds_conn_create()
228 kfree(conn->c_path); in __rds_conn_create()
239 loop_trans = rds_trans_get_preferred(net, faddr, conn->c_dev_if); in __rds_conn_create()
242 conn->c_loopback = 1; in __rds_conn_create()
243 if (is_outgoing && trans->t_prefer_loopback) { in __rds_conn_create()
244 /* "outgoing" connection - and the transport in __rds_conn_create()
252 conn->c_trans = trans; in __rds_conn_create()
254 init_waitqueue_head(&conn->c_hs_waitq); in __rds_conn_create()
256 __rds_conn_path_init(conn, &conn->c_path[i], in __rds_conn_create()
258 conn->c_path[i].cp_index = i; in __rds_conn_create()
262 ret = -ENETDOWN; in __rds_conn_create()
264 ret = trans->conn_alloc(conn, GFP_ATOMIC); in __rds_conn_create()
267 kfree(conn->c_path); in __rds_conn_create()
273 rdsdebug("allocated conn %p for %pI6c -> %pI6c over %s %s\n", in __rds_conn_create()
275 strnlen(trans->t_name, sizeof(trans->t_name)) ? in __rds_conn_create()
276 trans->t_name : "[unknown]", is_outgoing ? "(outgoing)" : ""); in __rds_conn_create()
288 if (parent->c_passive) { in __rds_conn_create()
289 trans->conn_free(conn->c_path[0].cp_transport_data); in __rds_conn_create()
290 kfree(conn->c_path); in __rds_conn_create()
292 conn = parent->c_passive; in __rds_conn_create()
294 parent->c_passive = conn; in __rds_conn_create()
309 cp = &conn->c_path[i]; in __rds_conn_create()
310 /* The ->conn_alloc invocation may have in __rds_conn_create()
314 if (cp->cp_transport_data) in __rds_conn_create()
315 trans->conn_free(cp->cp_transport_data); in __rds_conn_create()
317 kfree(conn->c_path); in __rds_conn_create()
321 conn->c_my_gen_num = rds_gen_num; in __rds_conn_create()
322 conn->c_peer_gen_num = 0; in __rds_conn_create()
323 hlist_add_head_rcu(&conn->c_hash_node, head); in __rds_conn_create()
357 struct rds_connection *conn = cp->cp_conn; in rds_conn_shutdown()
364 * duration of the shutdown operation, else we may be in rds_conn_shutdown()
366 * handler is supposed to check for state DISCONNECTING in rds_conn_shutdown()
368 mutex_lock(&cp->cp_cm_lock); in rds_conn_shutdown()
374 "shutdown called in state %d\n", in rds_conn_shutdown()
375 atomic_read(&cp->cp_state)); in rds_conn_shutdown()
376 mutex_unlock(&cp->cp_cm_lock); in rds_conn_shutdown()
379 mutex_unlock(&cp->cp_cm_lock); in rds_conn_shutdown()
381 wait_event(cp->cp_waitq, in rds_conn_shutdown()
382 !test_bit(RDS_IN_XMIT, &cp->cp_flags)); in rds_conn_shutdown()
383 wait_event(cp->cp_waitq, in rds_conn_shutdown()
384 !test_bit(RDS_RECV_REFILL, &cp->cp_flags)); in rds_conn_shutdown()
386 conn->c_trans->conn_path_shutdown(cp); in rds_conn_shutdown()
393 /* This can happen - eg when we're in the middle of tearing in rds_conn_shutdown()
398 * Note that this also happens with rds-tcp because in rds_conn_shutdown()
405 "to state DOWN, current state " in rds_conn_shutdown()
407 atomic_read(&cp->cp_state)); in rds_conn_shutdown()
415 * conn - the reconnect is always triggered by the active peer. */ in rds_conn_shutdown()
416 cancel_delayed_work_sync(&cp->cp_conn_w); in rds_conn_shutdown()
418 if (!hlist_unhashed(&conn->c_hash_node)) { in rds_conn_shutdown()
433 if (!cp->cp_transport_data) in rds_conn_path_destroy()
437 cancel_delayed_work_sync(&cp->cp_send_w); in rds_conn_path_destroy()
438 cancel_delayed_work_sync(&cp->cp_recv_w); in rds_conn_path_destroy()
441 flush_work(&cp->cp_down_w); in rds_conn_path_destroy()
445 &cp->cp_send_queue, in rds_conn_path_destroy()
447 list_del_init(&rm->m_conn_item); in rds_conn_path_destroy()
448 BUG_ON(!list_empty(&rm->m_sock_item)); in rds_conn_path_destroy()
451 if (cp->cp_xmit_rm) in rds_conn_path_destroy()
452 rds_message_put(cp->cp_xmit_rm); in rds_conn_path_destroy()
454 WARN_ON(delayed_work_pending(&cp->cp_send_w)); in rds_conn_path_destroy()
455 WARN_ON(delayed_work_pending(&cp->cp_recv_w)); in rds_conn_path_destroy()
456 WARN_ON(delayed_work_pending(&cp->cp_conn_w)); in rds_conn_path_destroy()
457 WARN_ON(work_pending(&cp->cp_down_w)); in rds_conn_path_destroy()
459 cp->cp_conn->c_trans->conn_free(cp->cp_transport_data); in rds_conn_path_destroy()
466 * the conn has been shutdown that no one else is referencing the connection.
474 int npaths = (conn->c_trans->t_mp_capable ? RDS_MPATH_WORKERS : 1); in rds_conn_destroy()
476 rdsdebug("freeing conn %p for %pI4 -> " in rds_conn_destroy()
477 "%pI4\n", conn, &conn->c_laddr, in rds_conn_destroy()
478 &conn->c_faddr); in rds_conn_destroy()
482 hlist_del_init_rcu(&conn->c_hash_node); in rds_conn_destroy()
488 cp = &conn->c_path[i]; in rds_conn_destroy()
490 BUG_ON(!list_empty(&cp->cp_retrans)); in rds_conn_destroy()
500 kfree(conn->c_path); in rds_conn_destroy()
504 rds_conn_count--; in rds_conn_destroy()
549 if (!isv6 && conn->c_isv6) in rds_conn_message_info_cmn()
552 npaths = (conn->c_trans->t_mp_capable ? in rds_conn_message_info_cmn()
556 cp = &conn->c_path[j]; in rds_conn_message_info_cmn()
558 list = &cp->cp_send_queue; in rds_conn_message_info_cmn()
560 list = &cp->cp_retrans; in rds_conn_message_info_cmn()
562 spin_lock_irqsave(&cp->cp_lock, flags); in rds_conn_message_info_cmn()
568 __rds_inc_msg_cp(&rm->m_inc, in rds_conn_message_info_cmn()
570 &conn->c_laddr, in rds_conn_message_info_cmn()
571 &conn->c_faddr, in rds_conn_message_info_cmn()
575 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_conn_message_info_cmn()
581 lens->nr = total; in rds_conn_message_info_cmn()
583 lens->each = sizeof(struct rds6_info_message); in rds_conn_message_info_cmn()
585 lens->each = sizeof(struct rds_info_message); in rds_conn_message_info_cmn()
653 lens->nr = 0; in rds_for_each_conn_info()
654 lens->each = item_len; in rds_for_each_conn_info()
669 len -= item_len; in rds_for_each_conn_info()
671 lens->nr++; in rds_for_each_conn_info()
691 lens->nr = 0; in rds_walk_conn_path_info()
692 lens->each = item_len; in rds_walk_conn_path_info()
707 cp = conn->c_path; in rds_walk_conn_path_info()
719 len -= item_len; in rds_walk_conn_path_info()
721 lens->nr++; in rds_walk_conn_path_info()
730 struct rds_connection *conn = cp->cp_conn; in rds_conn_info_visitor()
732 if (conn->c_isv6) in rds_conn_info_visitor()
735 cinfo->next_tx_seq = cp->cp_next_tx_seq; in rds_conn_info_visitor()
736 cinfo->next_rx_seq = cp->cp_next_rx_seq; in rds_conn_info_visitor()
737 cinfo->laddr = conn->c_laddr.s6_addr32[3]; in rds_conn_info_visitor()
738 cinfo->faddr = conn->c_faddr.s6_addr32[3]; in rds_conn_info_visitor()
739 cinfo->tos = conn->c_tos; in rds_conn_info_visitor()
740 strncpy(cinfo->transport, conn->c_trans->t_name, in rds_conn_info_visitor()
741 sizeof(cinfo->transport)); in rds_conn_info_visitor()
742 cinfo->flags = 0; in rds_conn_info_visitor()
744 rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags), in rds_conn_info_visitor()
746 /* XXX Future: return the state rather than these funky bits */ in rds_conn_info_visitor()
747 rds_conn_info_set(cinfo->flags, in rds_conn_info_visitor()
748 atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING, in rds_conn_info_visitor()
750 rds_conn_info_set(cinfo->flags, in rds_conn_info_visitor()
751 atomic_read(&cp->cp_state) == RDS_CONN_UP, in rds_conn_info_visitor()
760 struct rds_connection *conn = cp->cp_conn; in rds6_conn_info_visitor()
762 cinfo6->next_tx_seq = cp->cp_next_tx_seq; in rds6_conn_info_visitor()
763 cinfo6->next_rx_seq = cp->cp_next_rx_seq; in rds6_conn_info_visitor()
764 cinfo6->laddr = conn->c_laddr; in rds6_conn_info_visitor()
765 cinfo6->faddr = conn->c_faddr; in rds6_conn_info_visitor()
766 strncpy(cinfo6->transport, conn->c_trans->t_name, in rds6_conn_info_visitor()
767 sizeof(cinfo6->transport)); in rds6_conn_info_visitor()
768 cinfo6->flags = 0; in rds6_conn_info_visitor()
770 rds_conn_info_set(cinfo6->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags), in rds6_conn_info_visitor()
772 /* XXX Future: return the state rather than these funky bits */ in rds6_conn_info_visitor()
773 rds_conn_info_set(cinfo6->flags, in rds6_conn_info_visitor()
774 atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING, in rds6_conn_info_visitor()
776 rds_conn_info_set(cinfo6->flags, in rds6_conn_info_visitor()
777 atomic_read(&cp->cp_state) == RDS_CONN_UP, in rds6_conn_info_visitor()
825 return -ENOMEM; in rds_conn_init()
871 atomic_set(&cp->cp_state, RDS_CONN_ERROR); in rds_conn_path_drop()
874 if (!destroy && rds_destroy_pending(cp->cp_conn)) { in rds_conn_path_drop()
878 queue_work(rds_wq, &cp->cp_down_w); in rds_conn_path_drop()
885 WARN_ON(conn->c_trans->t_mp_capable); in rds_conn_drop()
886 rds_conn_path_drop(&conn->c_path[0], false); in rds_conn_drop()
892 * delayed reconnect however - in this case we should not interfere.
897 if (rds_destroy_pending(cp->cp_conn)) { in rds_conn_path_connect_if_down()
902 !test_and_set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags)) in rds_conn_path_connect_if_down()
903 queue_delayed_work(rds_wq, &cp->cp_conn_w, 0); in rds_conn_path_connect_if_down()
915 rds_conn_path_connect_if_down(&conn->c_path[i]); in rds_check_all_paths()
916 } while (++i < conn->c_npaths); in rds_check_all_paths()
921 WARN_ON(conn->c_trans->t_mp_capable); in rds_conn_connect_if_down()
922 rds_conn_path_connect_if_down(&conn->c_path[0]); in rds_conn_connect_if_down()