Lines Matching defs:tsk

150 static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua);
151 static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua);
152 static int tipc_sk_leave(struct tipc_sock *tsk);
154 static int tipc_sk_insert(struct tipc_sock *tsk);
155 static void tipc_sk_remove(struct tipc_sock *tsk);
158 static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack);
167 static u32 tsk_own_node(struct tipc_sock *tsk)
169 return msg_prevnode(&tsk->phdr);
172 static u32 tsk_peer_node(struct tipc_sock *tsk)
174 return msg_destnode(&tsk->phdr);
177 static u32 tsk_peer_port(struct tipc_sock *tsk)
179 return msg_destport(&tsk->phdr);
182 static bool tsk_unreliable(struct tipc_sock *tsk)
184 return msg_src_droppable(&tsk->phdr) != 0;
187 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
189 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
192 static bool tsk_unreturnable(struct tipc_sock *tsk)
194 return msg_dest_droppable(&tsk->phdr) != 0;
197 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
199 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
202 static int tsk_importance(struct tipc_sock *tsk)
204 return msg_importance(&tsk->phdr);
220 static bool tsk_conn_cong(struct tipc_sock *tsk)
222 return tsk->snt_unacked > tsk->snd_win;
243 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
245 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
252 static void tsk_set_nagle(struct tipc_sock *tsk)
254 struct sock *sk = &tsk->sk;
256 tsk->maxnagle = 0;
259 if (tsk->nodelay)
261 if (!(tsk->peer_caps & TIPC_NAGLE))
264 if (tsk->max_pkt == MAX_MSG_SIZE)
265 tsk->maxnagle = 1500;
267 tsk->maxnagle = tsk->max_pkt;
334 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
336 struct sock *sk = &tsk->sk;
338 u32 peer_port = tsk_peer_port(tsk);
348 peer_node = tsk_peer_node(tsk);
463 struct tipc_sock *tsk;
490 tsk = tipc_sk(sk);
491 tsk->max_pkt = MAX_PKT_DEFAULT;
492 tsk->maxnagle = 0;
493 tsk->nagle_start = NAGLE_START_INIT;
494 INIT_LIST_HEAD(&tsk->publications);
495 INIT_LIST_HEAD(&tsk->cong_links);
496 msg = &tsk->phdr;
502 if (tipc_sk_insert(tsk)) {
508 /* Ensure tsk is visible before we read own_addr. */
514 msg_set_origport(msg, tsk->portid);
522 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
523 tsk->group_is_open = true;
524 atomic_set(&tsk->dupl_rcvcnt, 0);
527 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
528 tsk->rcv_win = tsk->snd_win;
531 tsk_set_unreturnable(tsk, true);
533 tsk_set_unreliable(tsk, true);
535 __skb_queue_head_init(&tsk->mc_method.deferredq);
542 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
544 sock_put(&tsk->sk);
551 struct tipc_sock *tsk = tipc_sk(sk);
554 u32 dnode = tsk_peer_node(tsk);
558 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
559 !tsk_conn_cong(tsk)));
562 tipc_sk_push_backlog(tsk, false);
583 tipc_node_remove_conn(net, dnode, tsk->portid);
593 tsk_own_node(tsk), tsk_peer_port(tsk),
594 tsk->portid, error);
596 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
627 struct tipc_sock *tsk;
636 tsk = tipc_sk(sk);
642 tipc_sk_leave(tsk);
643 tipc_sk_withdraw(tsk, NULL);
644 __skb_queue_purge(&tsk->mc_method.deferredq);
646 tipc_sk_remove(tsk);
651 tipc_dest_list_purge(&tsk->cong_links);
652 tsk->cong_link_cnt = 0;
653 call_rcu(&tsk->rcu, tipc_sk_callback);
677 struct tipc_sock *tsk = tipc_sk(sock->sk);
681 return tipc_sk_withdraw(tsk, NULL);
695 if (tsk->group)
699 return tipc_sk_withdraw(tsk, ua);
700 return tipc_sk_publish(tsk, ua);
749 struct tipc_sock *tsk = tipc_sk(sk);
756 addr->addr.id.ref = tsk_peer_port(tsk);
757 addr->addr.id.node = tsk_peer_node(tsk);
759 addr->addr.id.ref = tsk->portid;
793 struct tipc_sock *tsk = tipc_sk(sk);
806 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
815 if (tsk->group_is_open && !tsk->cong_link_cnt)
845 struct tipc_sock *tsk = tipc_sk(sk);
846 struct tipc_msg *hdr = &tsk->phdr;
853 if (tsk->group)
857 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
885 rc = tipc_mcast_xmit(net, &pkts, &tsk->mc_method, &dsts,
886 &tsk->cong_link_cnt);
897 * @tsk: tipc socket
904 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
908 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
909 struct tipc_mc_method *method = &tsk->mc_method;
911 struct tipc_msg *hdr = &tsk->phdr;
924 mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
930 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
932 tipc_dest_push(&tsk->cong_links, dnode, 0);
933 tsk->cong_link_cnt++;
961 struct tipc_sock *tsk = tipc_sk(sk);
974 !tipc_dest_find(&tsk->cong_links, node, 0) &&
975 tsk->group &&
976 !tipc_group_cong(tsk->group, node, port, blks,
984 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
1004 struct tipc_sock *tsk = tipc_sk(sk);
1005 struct list_head *cong_links = &tsk->cong_links;
1007 struct tipc_msg *hdr = &tsk->phdr;
1021 exclude = tipc_group_exclude(tsk->group);
1031 cong = tipc_group_cong(tsk->group, node, port, blks,
1051 tsk->group &&
1052 !tipc_group_cong(tsk->group, node, port,
1065 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
1086 struct tipc_sock *tsk = tipc_sk(sk);
1088 struct tipc_mc_method *method = &tsk->mc_method;
1091 struct tipc_msg *hdr = &tsk->phdr;
1098 !tsk->cong_link_cnt && tsk->group &&
1099 !tipc_group_bc_cong(tsk->group, blks));
1103 dsts = tipc_group_dests(tsk->group);
1118 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
1130 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1135 tipc_group_update_bc_members(tsk->group, blks, ack);
1159 struct tipc_sock *tsk = tipc_sk(sk);
1160 struct tipc_group *grp = tsk->group;
1161 struct tipc_msg *hdr = &tsk->phdr;
1270 static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack)
1272 struct sk_buff_head *txq = &tsk->sk.sk_write_queue;
1274 struct net *net = sock_net(&tsk->sk);
1275 u32 dnode = tsk_peer_node(tsk);
1279 tsk->pkt_cnt += skb_queue_len(txq);
1280 if (!tsk->pkt_cnt || tsk->msg_acc / tsk->pkt_cnt < 2) {
1281 tsk->oneway = 0;
1282 if (tsk->nagle_start < NAGLE_START_MAX)
1283 tsk->nagle_start *= 2;
1284 tsk->expect_ack = false;
1285 pr_debug("tsk %10u: bad nagle %u -> %u, next start %u!\n",
1286 tsk->portid, tsk->msg_acc, tsk->pkt_cnt,
1287 tsk->nagle_start);
1289 tsk->nagle_start = NAGLE_START_INIT;
1292 tsk->expect_ack = true;
1294 tsk->expect_ack = false;
1297 tsk->msg_acc = 0;
1298 tsk->pkt_cnt = 0;
1301 if (!skb || tsk->cong_link_cnt)
1308 if (tsk->msg_acc)
1309 tsk->pkt_cnt += skb_queue_len(txq);
1310 tsk->snt_unacked += tsk->snd_backlog;
1311 tsk->snd_backlog = 0;
1312 rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
1314 tsk->cong_link_cnt = 1;
1319 * @tsk: receiving socket
1324 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1329 u32 onode = tsk_own_node(tsk);
1330 struct sock *sk = &tsk->sk;
1335 if (!tsk_peer_msg(tsk, hdr)) {
1342 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1343 tsk_peer_port(tsk));
1357 tsk->probe_unacked = false;
1365 was_cong = tsk_conn_cong(tsk);
1366 tipc_sk_push_backlog(tsk, msg_nagle_ack(hdr));
1367 tsk->snt_unacked -= msg_conn_ack(hdr);
1368 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1369 tsk->snd_win = msg_adv_win(hdr);
1370 if (was_cong && !tsk_conn_cong(tsk))
1409 struct tipc_sock *tsk = tipc_sk(sk);
1412 struct list_head *clinks = &tsk->cong_links;
1414 struct tipc_group *grp = tsk->group;
1415 struct tipc_msg *hdr = &tsk->phdr;
1443 ua = (struct tipc_uaddr *)&tsk->peer;
1454 if (tsk->published)
1457 tsk->conn_addrtype = atype;
1500 mtu = tipc_node_get_mtu(net, skaddr.node, tsk->portid, true);
1511 rc = tipc_node_xmit(net, &pkts, skaddr.node, tsk->portid);
1514 tsk->cong_link_cnt++;
1558 struct tipc_sock *tsk = tipc_sk(sk);
1559 struct tipc_msg *hdr = &tsk->phdr;
1562 u32 dnode = tsk_peer_node(tsk);
1563 int maxnagle = tsk->maxnagle;
1564 int maxpkt = tsk->max_pkt;
1575 tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
1576 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1583 (!tsk->cong_link_cnt &&
1584 !tsk_conn_cong(tsk) &&
1589 blocks = tsk->snd_backlog;
1590 if (tsk->oneway++ >= tsk->nagle_start && maxnagle &&
1596 tsk->msg_acc++;
1597 if (blocks <= 64 && tsk->expect_ack) {
1598 tsk->snd_backlog = blocks;
1602 tsk->pkt_cnt += skb_queue_len(txq);
1607 tsk->expect_ack = true;
1609 tsk->expect_ack = false;
1611 tsk->msg_acc = 0;
1612 tsk->pkt_cnt = 0;
1618 blocks += tsk_inc(tsk, send + MIN_H_SIZE);
1622 rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
1624 tsk->cong_link_cnt = 1;
1628 tsk->snt_unacked += blocks;
1629 tsk->snd_backlog = 0;
1657 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1660 struct sock *sk = &tsk->sk;
1662 struct tipc_msg *msg = &tsk->phdr;
1673 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1674 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid, true);
1675 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1676 tsk_set_nagle(tsk);
1678 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1682 tsk->rcv_win = FLOWCTL_MSG_WIN;
1683 tsk->snd_win = FLOWCTL_MSG_WIN;
1726 * @tsk: TIPC port associated with message
1733 struct tipc_sock *tsk)
1776 has_addr = !!tsk->conn_addrtype;
1777 data[0] = msg_nametype(&tsk->phdr);
1778 data[1] = msg_nameinst(&tsk->phdr);
1789 static struct sk_buff *tipc_sk_build_ack(struct tipc_sock *tsk)
1791 struct sock *sk = &tsk->sk;
1794 u32 peer_port = tsk_peer_port(tsk);
1795 u32 dnode = tsk_peer_node(tsk);
1800 dnode, tsk_own_node(tsk), peer_port,
1801 tsk->portid, TIPC_OK);
1805 msg_set_conn_ack(msg, tsk->rcv_unacked);
1806 tsk->rcv_unacked = 0;
1809 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1810 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1811 msg_set_adv_win(msg, tsk->rcv_win);
1816 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1820 skb = tipc_sk_build_ack(tsk);
1824 tipc_node_xmit_skb(sock_net(&tsk->sk), skb, tsk_peer_node(tsk),
1886 struct tipc_sock *tsk = tipc_sk(sk);
1925 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1973 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1975 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1990 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1991 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1992 tipc_sk_send_ack(tsk);
2014 struct tipc_sock *tsk = tipc_sk(sk);
2057 rc = tipc_sk_anc_data_recv(m, skb, tsk);
2091 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
2092 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
2093 tipc_sk_send_ack(tsk);
2149 struct tipc_sock *tsk = tipc_sk(sk);
2151 struct tipc_group *grp = tsk->group;
2156 tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
2159 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
2162 tsk->cong_link_cnt--;
2164 tipc_sk_push_backlog(tsk, false);
2170 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
2185 * @tsk: TIPC socket
2190 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb,
2193 struct sock *sk = &tsk->sk;
2197 u32 pport = tsk_peer_port(tsk);
2198 u32 pnode = tsk_peer_node(tsk);
2206 tsk->oneway = 0;
2214 tipc_sk_finish_conn(tsk, oport, onode);
2215 msg_set_importance(&tsk->phdr, msg_importance(hdr));
2236 delay %= (tsk->conn_timeout / 4);
2253 tipc_sk_push_backlog(tsk, false);
2260 skb = tipc_sk_build_ack(tsk);
2268 if (!tsk_peer_msg(tsk, hdr))
2273 tipc_node_remove_conn(net, pnode, tsk->portid);
2306 struct tipc_sock *tsk = tipc_sk(sk);
2315 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2336 struct tipc_sock *tsk = tipc_sk(sk);
2337 struct tipc_group *grp = tsk->group;
2356 tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq);
2362 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb, xmitq)) ||
2483 struct tipc_sock *tsk;
2490 tsk = tipc_sk_lookup(net, dport);
2492 if (likely(tsk)) {
2493 sk = &tsk->sk;
2572 struct tipc_sock *tsk = tipc_sk(sk);
2575 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2584 if (tsk->group) {
2590 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2601 memcpy(&tsk->peer, dest, destlen);
2826 struct tipc_sock *tsk = tipc_sk(sk);
2827 u32 pnode = tsk_peer_node(tsk);
2828 u32 pport = tsk_peer_port(tsk);
2829 u32 self = tsk_own_node(tsk);
2830 u32 oport = tsk->portid;
2833 if (tsk->probe_unacked) {
2845 tsk->probe_unacked = true;
2851 struct tipc_sock *tsk = tipc_sk(sk);
2854 if (tsk->cong_link_cnt) {
2866 struct tipc_sock *tsk = tipc_sk(sk);
2867 u32 pnode = tsk_peer_node(tsk);
2890 rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
2894 tipc_dest_push(&tsk->cong_links, pnode, 0);
2895 tsk->cong_link_cnt = 1;
2900 static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua)
2902 struct sock *sk = &tsk->sk;
2910 key = tsk->portid + tsk->pub_count + 1;
2911 if (key == tsk->portid)
2913 skaddr.ref = tsk->portid;
2919 list_add(&p->binding_sock, &tsk->publications);
2920 tsk->pub_count++;
2921 tsk->published = true;
2925 static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua)
2927 struct net *net = sock_net(&tsk->sk);
2932 list_for_each_entry_safe(p, safe, &tsk->publications, binding_sock) {
2952 if (list_empty(&tsk->publications)) {
2953 tsk->published = 0;
2966 struct tipc_sock *tsk;
2974 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2975 sock_hold(&tsk->sk);
2977 lock_sock(&tsk->sk);
2978 msg = &tsk->phdr;
2981 release_sock(&tsk->sk);
2983 sock_put(&tsk->sk);
2987 } while (tsk == ERR_PTR(-EAGAIN));
2995 struct tipc_sock *tsk;
2998 tsk = rhashtable_lookup(&tn->sk_rht, &portid, tsk_rht_params);
2999 if (tsk)
3000 sock_hold(&tsk->sk);
3003 return tsk;
3006 static int tipc_sk_insert(struct tipc_sock *tsk)
3008 struct sock *sk = &tsk->sk;
3018 tsk->portid = portid;
3019 sock_hold(&tsk->sk);
3020 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
3023 sock_put(&tsk->sk);
3029 static void tipc_sk_remove(struct tipc_sock *tsk)
3031 struct sock *sk = &tsk->sk;
3034 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
3067 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
3069 struct net *net = sock_net(&tsk->sk);
3070 struct tipc_group *grp = tsk->group;
3071 struct tipc_msg *hdr = &tsk->phdr;
3083 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
3086 tsk->group = grp;
3093 rc = tipc_sk_publish(tsk, &ua);
3096 tsk->group = NULL;
3100 tsk->mc_method.rcast = true;
3101 tsk->mc_method.mandatory = true;
3102 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
3106 static int tipc_sk_leave(struct tipc_sock *tsk)
3108 struct net *net = sock_net(&tsk->sk);
3109 struct tipc_group *grp = tsk->group;
3119 tsk->group = NULL;
3120 tipc_sk_withdraw(tsk, &ua);
3141 struct tipc_sock *tsk = tipc_sk(sk);
3181 tsk_set_unreliable(tsk, value);
3186 tsk_set_unreturnable(tsk, value);
3192 tsk->mc_method.rcast = false;
3193 tsk->mc_method.mandatory = true;
3196 tsk->mc_method.rcast = true;
3197 tsk->mc_method.mandatory = true;
3200 res = tipc_sk_join(tsk, &mreq);
3203 res = tipc_sk_leave(tsk);
3206 tsk->nodelay = !!value;
3207 tsk_set_nagle(tsk);
3235 struct tipc_sock *tsk = tipc_sk(sk);
3253 value = tsk_importance(tsk);
3256 value = tsk_unreliable(tsk);
3259 value = tsk_unreturnable(tsk);
3262 value = tsk->conn_timeout;
3276 if (tsk->group)
3277 tipc_group_self(tsk->group, &seq, &scope);
3462 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3468 peer_node = tsk_peer_node(tsk);
3469 peer_port = tsk_peer_port(tsk);
3470 conn_type = msg_nametype(&tsk->phdr);
3471 conn_instance = msg_nameinst(&tsk->phdr);
3481 if (tsk->conn_addrtype != 0) {
3500 *tsk)
3503 struct sock *sk = &tsk->sk;
3505 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3510 if (__tipc_nl_add_sk_con(skb, tsk))
3512 } else if (!list_empty(&tsk->publications)) {
3521 struct tipc_sock *tsk)
3535 if (__tipc_nl_add_sk_info(skb, tsk))
3554 struct tipc_sock *tsk))
3557 struct tipc_sock *tsk;
3561 while ((tsk = rhashtable_walk_next(iter)) != NULL) {
3562 if (IS_ERR(tsk)) {
3563 if (PTR_ERR(tsk) == -EAGAIN)
3568 sock_hold(&tsk->sk);
3570 lock_sock(&tsk->sk);
3571 err = skb_handler(skb, cb, tsk);
3573 release_sock(&tsk->sk);
3574 sock_put(&tsk->sk);
3577 release_sock(&tsk->sk);
3579 sock_put(&tsk->sk);
3622 struct tipc_sock *tsk, u32 sk_filter_state,
3625 struct sock *sk = &tsk->sk;
3637 if (__tipc_nl_add_sk_info(skb, tsk))
3663 if (tsk->cong_link_cnt &&
3667 if (tsk_conn_cong(tsk) &&
3673 if (tsk->group)
3674 if (tipc_group_fill_sock_diag(tsk->group, skb))
3737 struct tipc_sock *tsk, u32 *last_publ)
3743 list_for_each_entry(p, &tsk->publications, binding_sock) {
3747 if (list_entry_is_head(p, &tsk->publications, binding_sock)) {
3759 p = list_first_entry(&tsk->publications, struct publication,
3763 list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3782 struct tipc_sock *tsk;
3806 tsk = tipc_sk_lookup(net, tsk_portid);
3807 if (!tsk)
3810 lock_sock(&tsk->sk);
3811 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3814 release_sock(&tsk->sk);
3815 sock_put(&tsk->sk);
3837 struct tipc_sock *tsk;
3845 tsk = tipc_sk(sk);
3857 return (_port == tsk->portid);
3862 if (tsk->published) {
3863 p = list_first_entry_or_null(&tsk->publications,
3873 type = msg_nametype(&tsk->phdr);
3874 lower = msg_nameinst(&tsk->phdr);
3941 struct tipc_sock *tsk;
3950 tsk = tipc_sk(sk);
3955 i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
3956 i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
3959 i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
3960 i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
3961 conn_type = msg_nametype(&tsk->phdr);
3962 conn_instance = msg_nameinst(&tsk->phdr);
3966 i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
3967 if (tsk->published) {
3968 p = list_first_entry_or_null(&tsk->publications,
3974 i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
3975 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
3976 i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
3977 i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
3978 i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
3979 i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
3980 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
3981 i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));