Lines Matching defs:xmitq
175 struct sk_buff_head *xmitq,
805 struct sk_buff_head xmitq;
821 __skb_queue_head_init(&xmitq);
836 rc = tipc_link_timeout(le->link, &xmitq);
841 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n);
852 * @xmitq: queue for messages to be xmited on
857 struct sk_buff_head *xmitq)
886 tipc_link_build_state_msg(nl, xmitq);
895 tipc_bcast_add_peer(n->net, nl, xmitq);
914 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
921 * @xmitq: queue for messages to be xmited on
926 struct sk_buff_head *xmitq)
931 __tipc_node_link_up(n, bearer_id, xmitq);
933 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n);
956 * @xmitq: queue for messages to be xmited on tnl link later
960 struct sk_buff_head *xmitq)
974 tipc_link_failover_prepare(l, tnl, xmitq);
985 * @xmitq: queue for messages to be xmited on
989 struct sk_buff_head *xmitq,
1039 tipc_link_build_reset_msg(l, xmitq);
1053 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
1068 struct sk_buff_head xmitq;
1073 __skb_queue_head_init(&xmitq);
1077 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
1092 if (!skb_queue_empty(&xmitq))
1093 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n);
1692 struct sk_buff_head xmitq;
1736 __skb_queue_head_init(&xmitq);
1739 rc = tipc_link_xmit(le->link, list, &xmitq);
1746 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1773 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
1778 while ((skb = __skb_dequeue(xmitq))) {
1788 struct sk_buff_head xmitq;
1796 __skb_queue_head_init(&xmitq);
1797 __skb_queue_tail(&xmitq, skb);
1798 tipc_bcast_xmit(net, &xmitq, &dummy);
1834 int bearer_id, struct sk_buff_head *xmitq)
1839 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr, xmitq);
1857 tipc_link_build_state_msg(ucl, xmitq);
1872 struct sk_buff_head xmitq;
1880 __skb_queue_head_init(&xmitq);
1899 tipc_link_build_state_msg(le->link, &xmitq);
1903 if (!skb_queue_empty(&xmitq))
1904 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1927 * @xmitq: queue for messages to be xmited on
1931 int bearer_id, struct sk_buff_head *xmitq)
1998 __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
2012 tipc_node_link_failover(n, pl, l, xmitq);
2040 __tipc_node_link_up(n, bearer_id, xmitq);
2085 struct sk_buff_head xmitq;
2118 __skb_queue_head_init(&xmitq);
2148 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
2158 rc = tipc_link_rcv(le->link, skb, &xmitq);
2170 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
2172 rc = tipc_link_rcv(le->link, skb, &xmitq);
2180 tipc_node_link_up(n, bearer_id, &xmitq);
2196 if (!skb_queue_empty(&xmitq))
2197 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
2210 struct sk_buff_head xmitq;
2214 __skb_queue_head_init(&xmitq);
2224 &xmitq);
2233 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
2424 struct sk_buff_head xmitq;
2428 __skb_queue_head_init(&xmitq);
2472 tipc_link_set_tolerance(link, tol, &xmitq);
2478 tipc_link_set_prio(link, prio, &xmitq);
2492 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr,