Lines Matching +full:mctp +full:- +full:handling
1 // SPDX-License-Identifier: GPL-2.0
3 * Management Component Transport Protocol (MCTP) - routing
15 #include <linux/mctp.h>
22 #include <net/mctp.h>
27 #include <trace/events/mctp.h>
50 /* TODO: look up in skb->cb? */ in mctp_lookup_bind()
56 type = (*(u8 *)skb->data) & 0x7f; in mctp_lookup_bind()
58 sk_for_each_rcu(sk, &net->mctp.binds) { in mctp_lookup_bind()
61 if (msk->bind_net != MCTP_NET_ANY && msk->bind_net != cb->net) in mctp_lookup_bind()
64 if (msk->bind_type != type) in mctp_lookup_bind()
67 if (!mctp_address_matches(msk->bind_addr, mh->dest)) in mctp_lookup_bind()
78 * struct net->mctp.keys contains our set of currently-allocated keys for
79 * MCTP tag management. The lookup tuple for these is the peer EID,
80 * local EID and MCTP tag.
85 * a key with (local = local-eid, peer = ANY). This allows a match on the
91 * - when a packet is sent, with a locally-owned tag: we need to find an
94 * - when a tag is manually allocated: we need to find an unused tag value
98 * (local = ANY, peer = peer-eid).
113 if (key->net != net) in mctp_key_match()
116 if (!mctp_address_matches(key->local_addr, local)) in mctp_key_match()
119 if (!mctp_address_matches(key->peer_addr, peer)) in mctp_key_match()
122 if (key->tag != tag) in mctp_key_match()
128 /* returns a key (with key->lock held, and refcounted), or NULL if no such
134 __acquires(&key->lock) in mctp_lookup_key()
142 tag = mh->flags_seq_tag & (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO); in mctp_lookup_key()
145 spin_lock_irqsave(&net->mctp.keys_lock, flags); in mctp_lookup_key()
147 hlist_for_each_entry(key, &net->mctp.keys, hlist) { in mctp_lookup_key()
148 if (!mctp_key_match(key, netid, mh->dest, peer, tag)) in mctp_lookup_key()
151 spin_lock(&key->lock); in mctp_lookup_key()
152 if (key->valid) { in mctp_lookup_key()
153 refcount_inc(&key->refs); in mctp_lookup_key()
157 spin_unlock(&key->lock); in mctp_lookup_key()
161 spin_unlock(&net->mctp.keys_lock); in mctp_lookup_key()
164 spin_unlock_irqrestore(&net->mctp.keys_lock, flags); in mctp_lookup_key()
181 key->net = net; in mctp_key_alloc()
182 key->peer_addr = peer; in mctp_key_alloc()
183 key->local_addr = local; in mctp_key_alloc()
184 key->tag = tag; in mctp_key_alloc()
185 key->sk = &msk->sk; in mctp_key_alloc()
186 key->valid = true; in mctp_key_alloc()
187 spin_lock_init(&key->lock); in mctp_key_alloc()
188 refcount_set(&key->refs, 1); in mctp_key_alloc()
189 sock_hold(key->sk); in mctp_key_alloc()
198 if (!refcount_dec_and_test(&key->refs)) in mctp_key_unref()
204 spin_lock_irqsave(&key->lock, flags); in mctp_key_unref()
205 mctp_dev_release_key(key->dev, key); in mctp_key_unref()
206 spin_unlock_irqrestore(&key->lock, flags); in mctp_key_unref()
208 sock_put(key->sk); in mctp_key_unref()
214 struct net *net = sock_net(&msk->sk); in mctp_key_add()
219 spin_lock_irqsave(&net->mctp.keys_lock, flags); in mctp_key_add()
221 if (sock_flag(&msk->sk, SOCK_DEAD)) { in mctp_key_add()
222 rc = -EINVAL; in mctp_key_add()
226 hlist_for_each_entry(tmp, &net->mctp.keys, hlist) { in mctp_key_add()
227 if (mctp_key_match(tmp, key->net, key->local_addr, in mctp_key_add()
228 key->peer_addr, key->tag)) { in mctp_key_add()
229 spin_lock(&tmp->lock); in mctp_key_add()
230 if (tmp->valid) in mctp_key_add()
231 rc = -EEXIST; in mctp_key_add()
232 spin_unlock(&tmp->lock); in mctp_key_add()
239 refcount_inc(&key->refs); in mctp_key_add()
240 key->expiry = jiffies + mctp_key_lifetime; in mctp_key_add()
241 timer_reduce(&msk->key_expiry, key->expiry); in mctp_key_add()
243 hlist_add_head(&key->hlist, &net->mctp.keys); in mctp_key_add()
244 hlist_add_head(&key->sklist, &msk->keys); in mctp_key_add()
248 spin_unlock_irqrestore(&net->mctp.keys_lock, flags); in mctp_key_add()
261 __releases(&key->lock) in __mctp_key_done_in()
266 skb = key->reasm_head; in __mctp_key_done_in()
267 key->reasm_head = NULL; in __mctp_key_done_in()
269 if (!key->manual_alloc) { in __mctp_key_done_in()
270 key->reasm_dead = true; in __mctp_key_done_in()
271 key->valid = false; in __mctp_key_done_in()
272 mctp_dev_release_key(key->dev, key); in __mctp_key_done_in()
274 spin_unlock_irqrestore(&key->lock, flags); in __mctp_key_done_in()
276 if (!key->manual_alloc) { in __mctp_key_done_in()
277 spin_lock_irqsave(&net->mctp.keys_lock, flags); in __mctp_key_done_in()
278 if (!hlist_unhashed(&key->hlist)) { in __mctp_key_done_in()
279 hlist_del_init(&key->hlist); in __mctp_key_done_in()
280 hlist_del_init(&key->sklist); in __mctp_key_done_in()
283 spin_unlock_irqrestore(&net->mctp.keys_lock, flags); in __mctp_key_done_in()
301 refcount_inc(&key->refs); in mctp_skb_set_flow()
302 flow->key = key; in mctp_skb_set_flow()
314 key = flow->key; in mctp_flow_prepare_output()
316 if (key->dev) { in mctp_flow_prepare_output()
317 WARN_ON(key->dev != dev); in mctp_flow_prepare_output()
333 this_seq = (hdr->flags_seq_tag >> MCTP_HDR_SEQ_SHIFT) in mctp_frag_queue()
336 if (!key->reasm_head) { in mctp_frag_queue()
340 key->reasm_head = skb_unshare(skb, GFP_ATOMIC); in mctp_frag_queue()
341 if (!key->reasm_head) in mctp_frag_queue()
342 return -ENOMEM; in mctp_frag_queue()
344 key->reasm_tailp = &(skb_shinfo(key->reasm_head)->frag_list); in mctp_frag_queue()
345 key->last_seq = this_seq; in mctp_frag_queue()
349 exp_seq = (key->last_seq + 1) & MCTP_HDR_SEQ_MASK; in mctp_frag_queue()
352 return -EINVAL; in mctp_frag_queue()
354 if (key->reasm_head->len + skb->len > mctp_message_maxlen) in mctp_frag_queue()
355 return -EINVAL; in mctp_frag_queue()
357 skb->next = NULL; in mctp_frag_queue()
358 skb->sk = NULL; in mctp_frag_queue()
359 *key->reasm_tailp = skb; in mctp_frag_queue()
360 key->reasm_tailp = &skb->next; in mctp_frag_queue()
362 key->last_seq = this_seq; in mctp_frag_queue()
364 key->reasm_head->data_len += skb->len; in mctp_frag_queue()
365 key->reasm_head->len += skb->len; in mctp_frag_queue()
366 key->reasm_head->truesize += skb->truesize; in mctp_frag_queue()
374 struct net *net = dev_net(skb->dev); in mctp_route_input()
383 rc = -EINVAL; in mctp_route_input()
385 /* We may be receiving a locally-routed packet; drop source sk in mctp_route_input()
388 * From here, we will either queue the skb - either to a frag_queue, or in mctp_route_input()
390 * a non-NULL skb on exit will be otherwise unowned, and hence in mctp_route_input()
391 * kfree_skb()-ed. in mctp_route_input()
396 if (skb->len < sizeof(struct mctp_hdr) + 1) in mctp_route_input()
401 netid = mctp_cb(skb)->net; in mctp_route_input()
404 if (mh->ver != 1) in mctp_route_input()
407 flags = mh->flags_seq_tag & (MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM); in mctp_route_input()
408 tag = mh->flags_seq_tag & (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO); in mctp_route_input()
413 * we hold a ref on the key, and key->lock held. in mctp_route_input()
415 key = mctp_lookup_key(net, skb, netid, mh->src, &f); in mctp_route_input()
419 msk = container_of(key->sk, struct mctp_sock, sk); in mctp_route_input()
423 * key for reassembly - we'll create a more specific in mctp_route_input()
426 * this lookup requires key->peer to be MCTP_ADDR_ANY, in mctp_route_input()
427 * it doesn't match just any key->peer. in mctp_route_input()
432 msk = container_of(any_key->sk, in mctp_route_input()
434 spin_unlock_irqrestore(&any_key->lock, f); in mctp_route_input()
442 rc = -ENOENT; in mctp_route_input()
446 /* single-packet message? deliver to socket, clean up any in mctp_route_input()
450 rc = sock_queue_rcv_skb(&msk->sk, skb); in mctp_route_input()
464 /* broadcast response or a bind() - create a key for further in mctp_route_input()
468 key = mctp_key_alloc(msk, netid, mh->dest, mh->src, in mctp_route_input()
471 rc = -ENOMEM; in mctp_route_input()
492 /* we don't need to release key->lock on exit, so in mctp_route_input()
500 if (key->reasm_head || key->reasm_dead) { in mctp_route_input()
504 rc = -EEXIST; in mctp_route_input()
515 * using the message-specific key in mctp_route_input()
519 if (!key->reasm_head) in mctp_route_input()
520 rc = -EINVAL; in mctp_route_input()
534 rc = sock_queue_rcv_skb(key->sk, key->reasm_head); in mctp_route_input()
536 key->reasm_head = NULL; in mctp_route_input()
543 rc = -ENOENT; in mctp_route_input()
549 spin_unlock_irqrestore(&key->lock, f); in mctp_route_input()
561 return rt->mtu ?: READ_ONCE(rt->dev->dev->mtu); in mctp_route_mtu()
573 skb->protocol = htons(ETH_P_MCTP); in mctp_route_output()
575 mtu = READ_ONCE(skb->dev->mtu); in mctp_route_output()
576 if (skb->len > mtu) { in mctp_route_output()
578 return -EMSGSIZE; in mctp_route_output()
581 if (cb->ifindex) { in mctp_route_output()
583 if (cb->halen != skb->dev->addr_len) { in mctp_route_output()
586 return -EMSGSIZE; in mctp_route_output()
588 daddr = cb->haddr; in mctp_route_output()
591 if (mctp_neigh_lookup(route->dev, hdr->dest, daddr_buf) == 0) in mctp_route_output()
595 rc = dev_hard_header(skb, skb->dev, ntohs(skb->protocol), in mctp_route_output()
596 daddr, skb->dev->dev_addr, skb->len); in mctp_route_output()
599 return -EHOSTUNREACH; in mctp_route_output()
602 mctp_flow_prepare_output(skb, route->dev); in mctp_route_output()
614 if (refcount_dec_and_test(&rt->refs)) { in mctp_route_release()
615 mctp_dev_put(rt->dev); in mctp_route_release()
629 INIT_LIST_HEAD(&rt->list); in mctp_route_alloc()
630 refcount_set(&rt->refs, 1); in mctp_route_alloc()
631 rt->output = mctp_route_discard; in mctp_route_alloc()
638 return READ_ONCE(net->mctp.default_net); in mctp_default_net()
644 return -EINVAL; in mctp_default_net_set()
645 WRITE_ONCE(net->mctp.default_net, index); in mctp_default_net_set()
653 struct netns_mctp *mns = &net->mctp; in mctp_reserve_tag()
655 lockdep_assert_held(&mns->keys_lock); in mctp_reserve_tag()
657 key->expiry = jiffies + mctp_key_lifetime; in mctp_reserve_tag()
658 timer_reduce(&msk->key_expiry, key->expiry); in mctp_reserve_tag()
660 /* we hold the net->key_lock here, allowing updates to both in mctp_reserve_tag()
663 hlist_add_head_rcu(&key->hlist, &mns->keys); in mctp_reserve_tag()
664 hlist_add_head_rcu(&key->sklist, &msk->keys); in mctp_reserve_tag()
665 refcount_inc(&key->refs); in mctp_reserve_tag()
668 /* Allocate a locally-owned tag value for (local, peer), and reserve
676 struct net *net = sock_net(&msk->sk); in mctp_alloc_local_tag()
677 struct netns_mctp *mns = &net->mctp; in mctp_alloc_local_tag()
689 return ERR_PTR(-ENOMEM); in mctp_alloc_local_tag()
694 spin_lock_irqsave(&mns->keys_lock, flags); in mctp_alloc_local_tag()
699 hlist_for_each_entry(tmp, &mns->keys, hlist) { in mctp_alloc_local_tag()
704 /* tags are net-specific */ in mctp_alloc_local_tag()
705 if (tmp->net != netid) in mctp_alloc_local_tag()
709 if (tmp->tag & MCTP_HDR_FLAG_TO) in mctp_alloc_local_tag()
717 !mctp_address_matches(tmp->peer_addr, peer)) in mctp_alloc_local_tag()
721 !mctp_address_matches(tmp->local_addr, local)) in mctp_alloc_local_tag()
724 spin_lock(&tmp->lock); in mctp_alloc_local_tag()
728 if (tmp->valid) in mctp_alloc_local_tag()
729 tagbits &= ~(1 << tmp->tag); in mctp_alloc_local_tag()
730 spin_unlock(&tmp->lock); in mctp_alloc_local_tag()
737 key->tag = __ffs(tagbits); in mctp_alloc_local_tag()
741 key->manual_alloc = manual; in mctp_alloc_local_tag()
742 *tagp = key->tag; in mctp_alloc_local_tag()
745 spin_unlock_irqrestore(&mns->keys_lock, flags); in mctp_alloc_local_tag()
749 return ERR_PTR(-EBUSY); in mctp_alloc_local_tag()
760 struct net *net = sock_net(&msk->sk); in mctp_lookup_prealloc_tag()
761 struct netns_mctp *mns = &net->mctp; in mctp_lookup_prealloc_tag()
768 spin_lock_irqsave(&mns->keys_lock, flags); in mctp_lookup_prealloc_tag()
770 hlist_for_each_entry(tmp, &mns->keys, hlist) { in mctp_lookup_prealloc_tag()
771 if (tmp->net != netid) in mctp_lookup_prealloc_tag()
774 if (tmp->tag != req_tag) in mctp_lookup_prealloc_tag()
777 if (!mctp_address_matches(tmp->peer_addr, daddr)) in mctp_lookup_prealloc_tag()
780 if (!tmp->manual_alloc) in mctp_lookup_prealloc_tag()
783 spin_lock(&tmp->lock); in mctp_lookup_prealloc_tag()
784 if (tmp->valid) { in mctp_lookup_prealloc_tag()
786 refcount_inc(&key->refs); in mctp_lookup_prealloc_tag()
787 spin_unlock(&tmp->lock); in mctp_lookup_prealloc_tag()
790 spin_unlock(&tmp->lock); in mctp_lookup_prealloc_tag()
792 spin_unlock_irqrestore(&mns->keys_lock, flags); in mctp_lookup_prealloc_tag()
795 return ERR_PTR(-ENOENT); in mctp_lookup_prealloc_tag()
798 *tagp = key->tag; in mctp_lookup_prealloc_tag()
807 return READ_ONCE(rt->dev->net) == net && in mctp_rt_match_eid()
808 rt->min <= eid && rt->max >= eid; in mctp_rt_match_eid()
816 return rt1->dev->net == rt2->dev->net && in mctp_rt_compare_exact()
817 rt1->min == rt2->min && in mctp_rt_compare_exact()
818 rt1->max == rt2->max; in mctp_rt_compare_exact()
828 list_for_each_entry_rcu(tmp, &net->mctp.routes, list) { in mctp_route_lookup()
831 if (refcount_inc_not_zero(&tmp->refs)) { in mctp_route_lookup()
850 list_for_each_entry_rcu(tmp, &net->mctp.routes, list) { in mctp_route_lookup_null()
851 if (tmp->dev->dev == dev && tmp->type == RTN_LOCAL && in mctp_route_lookup_null()
852 refcount_inc_not_zero(&tmp->refs)) { in mctp_route_lookup_null()
879 return -EMSGSIZE; in mctp_do_fragment_route()
888 for (pos = 0; pos < skb->len;) { in mctp_do_fragment_route()
890 size = min(mtu - hlen, skb->len - pos); in mctp_do_fragment_route()
894 rc = -ENOMEM; in mctp_do_fragment_route()
899 skb2->protocol = skb->protocol; in mctp_do_fragment_route()
900 skb2->priority = skb->priority; in mctp_do_fragment_route()
901 skb2->dev = skb->dev; in mctp_do_fragment_route()
902 memcpy(skb2->cb, skb->cb, sizeof(skb2->cb)); in mctp_do_fragment_route()
904 if (skb->sk) in mctp_do_fragment_route()
905 skb_set_owner_w(skb2, skb->sk); in mctp_do_fragment_route()
911 skb2->transport_header = skb2->network_header + hlen; in mctp_do_fragment_route()
915 hdr2->ver = hdr->ver; in mctp_do_fragment_route()
916 hdr2->dest = hdr->dest; in mctp_do_fragment_route()
917 hdr2->src = hdr->src; in mctp_do_fragment_route()
918 hdr2->flags_seq_tag = tag & in mctp_do_fragment_route()
922 hdr2->flags_seq_tag |= MCTP_HDR_FLAG_SOM; in mctp_do_fragment_route()
924 if (pos + size == skb->len) in mctp_do_fragment_route()
925 hdr2->flags_seq_tag |= MCTP_HDR_FLAG_EOM; in mctp_do_fragment_route()
927 hdr2->flags_seq_tag |= seq << MCTP_HDR_SEQ_SHIFT; in mctp_do_fragment_route()
932 /* we need to copy the extensions, for MCTP flow data */ in mctp_do_fragment_route()
936 rc = rt->output(rt, skb2); in mctp_do_fragment_route()
964 rc = -ENODEV; in mctp_local_output()
968 if (WARN_ON(!rt->dev)) in mctp_local_output()
971 } else if (cb->ifindex) { in mctp_local_output()
978 dev = dev_get_by_index_rcu(sock_net(sk), cb->ifindex); in mctp_local_output()
983 rt->dev = __mctp_dev_get(dev); in mctp_local_output()
986 if (!rt->dev) in mctp_local_output()
989 /* establish temporary route - we set up enough to keep in mctp_local_output()
992 rt->output = mctp_route_output; in mctp_local_output()
993 rt->mtu = 0; in mctp_local_output()
996 rc = -EINVAL; in mctp_local_output()
1000 spin_lock_irqsave(&rt->dev->addrs_lock, flags); in mctp_local_output()
1001 if (rt->dev->num_addrs == 0) { in mctp_local_output()
1002 rc = -EHOSTUNREACH; in mctp_local_output()
1005 saddr = rt->dev->addrs[0]; in mctp_local_output()
1008 spin_unlock_irqrestore(&rt->dev->addrs_lock, flags); in mctp_local_output()
1009 netid = READ_ONCE(rt->dev->net); in mctp_local_output()
1035 skb->protocol = htons(ETH_P_MCTP); in mctp_local_output()
1036 skb->priority = 0; in mctp_local_output()
1040 skb->dev = rt->dev->dev; in mctp_local_output()
1042 /* cb->net will have been set on initial ingress */ in mctp_local_output()
1043 cb->src = saddr; in mctp_local_output()
1047 hdr->ver = 1; in mctp_local_output()
1048 hdr->dest = daddr; in mctp_local_output()
1049 hdr->src = saddr; in mctp_local_output()
1053 if (skb->len + sizeof(struct mctp_hdr) <= mtu) { in mctp_local_output()
1054 hdr->flags_seq_tag = MCTP_HDR_FLAG_SOM | in mctp_local_output()
1056 rc = rt->output(rt, skb); in mctp_local_output()
1081 struct net *net = dev_net(mdev->dev); in mctp_route_add()
1085 return -EINVAL; in mctp_route_add()
1088 return -EINVAL; in mctp_route_add()
1098 return -EINVAL; in mctp_route_add()
1103 return -ENOMEM; in mctp_route_add()
1105 rt->min = daddr_start; in mctp_route_add()
1106 rt->max = daddr_start + daddr_extent; in mctp_route_add()
1107 rt->mtu = mtu; in mctp_route_add()
1108 rt->dev = mdev; in mctp_route_add()
1109 mctp_dev_hold(rt->dev); in mctp_route_add()
1110 rt->type = type; in mctp_route_add()
1111 rt->output = rtfn; in mctp_route_add()
1115 list_for_each_entry(ert, &net->mctp.routes, list) { in mctp_route_add()
1118 return -EEXIST; in mctp_route_add()
1122 list_add_rcu(&rt->list, &net->mctp.routes); in mctp_route_add()
1130 struct net *net = dev_net(mdev->dev); in mctp_route_remove()
1136 return -EINVAL; in mctp_route_remove()
1143 list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) { in mctp_route_remove()
1144 if (rt->dev == mdev && in mctp_route_remove()
1145 rt->min == daddr_start && rt->max == daddr_end && in mctp_route_remove()
1146 rt->type == type) { in mctp_route_remove()
1147 list_del_rcu(&rt->list); in mctp_route_remove()
1154 return dropped ? 0 : -ENOENT; in mctp_route_remove()
1170 struct net *net = dev_net(mdev->dev); in mctp_route_remove_dev()
1174 list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) { in mctp_route_remove_dev()
1175 if (rt->dev == mdev) { in mctp_route_remove_dev()
1176 list_del_rcu(&rt->list); in mctp_route_remove_dev()
1183 /* Incoming packet-handling */
1199 /* basic non-data sanity checks */ in mctp_pkttype_receive()
1211 if (mh->ver < MCTP_VER_MIN || mh->ver > MCTP_VER_MAX) in mctp_pkttype_receive()
1217 if (!(mctp_address_unicast(mh->src) || mctp_address_null(mh->src))) in mctp_pkttype_receive()
1221 if (!(mctp_address_unicast(mh->dest) || mctp_address_null(mh->dest) || in mctp_pkttype_receive()
1222 mctp_address_broadcast(mh->dest))) in mctp_pkttype_receive()
1225 /* MCTP drivers must populate halen/haddr */ in mctp_pkttype_receive()
1226 if (dev->type == ARPHRD_MCTP) { in mctp_pkttype_receive()
1230 cb->halen = 0; in mctp_pkttype_receive()
1232 cb->net = READ_ONCE(mdev->net); in mctp_pkttype_receive()
1233 cb->ifindex = dev->ifindex; in mctp_pkttype_receive()
1235 rt = mctp_route_lookup(net, cb->net, mh->dest); in mctp_pkttype_receive()
1238 if (!rt && mh->dest == MCTP_ADDR_NULL && skb->pkt_type == PACKET_HOST) in mctp_pkttype_receive()
1244 rt->output(rt, skb); in mctp_pkttype_receive()
1277 struct net *net = sock_net(skb->sk); in mctp_route_nlparse()
1291 return -EINVAL; in mctp_route_nlparse()
1297 return -EINVAL; in mctp_route_nlparse()
1302 if ((*rtm)->rtm_family != AF_MCTP) { in mctp_route_nlparse()
1304 return -EINVAL; in mctp_route_nlparse()
1310 return -ENODEV; in mctp_route_nlparse()
1314 return -ENODEV; in mctp_route_nlparse()
1316 if (dev->flags & IFF_LOOPBACK) { in mctp_route_nlparse()
1318 return -EINVAL; in mctp_route_nlparse()
1344 if (rtm->rtm_type != RTN_UNICAST) { in mctp_newroute()
1346 return -EINVAL; in mctp_newroute()
1359 rc = mctp_route_add(mdev, daddr_start, rtm->rtm_dst_len, mtu, in mctp_newroute()
1360 rtm->rtm_type); in mctp_newroute()
1379 if (rtm->rtm_type != RTN_UNICAST) in mctp_delroute()
1380 return -EINVAL; in mctp_delroute()
1382 rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len, RTN_UNICAST); in mctp_delroute()
1395 return -EMSGSIZE; in mctp_fill_rtinfo()
1398 hdr->rtm_family = AF_MCTP; in mctp_fill_rtinfo()
1403 hdr->rtm_dst_len = rt->max - rt->min; in mctp_fill_rtinfo()
1404 hdr->rtm_src_len = 0; in mctp_fill_rtinfo()
1405 hdr->rtm_tos = 0; in mctp_fill_rtinfo()
1406 hdr->rtm_table = RT_TABLE_DEFAULT; in mctp_fill_rtinfo()
1407 hdr->rtm_protocol = RTPROT_STATIC; /* everything is user-defined */ in mctp_fill_rtinfo()
1408 hdr->rtm_scope = RT_SCOPE_LINK; /* TODO: scope in mctp_route? */ in mctp_fill_rtinfo()
1409 hdr->rtm_type = rt->type; in mctp_fill_rtinfo()
1411 if (nla_put_u8(skb, RTA_DST, rt->min)) in mctp_fill_rtinfo()
1418 if (rt->mtu) { in mctp_fill_rtinfo()
1419 if (nla_put_u32(skb, RTAX_MTU, rt->mtu)) in mctp_fill_rtinfo()
1425 if (rt->dev) { in mctp_fill_rtinfo()
1426 if (nla_put_u32(skb, RTA_OIF, rt->dev->dev->ifindex)) in mctp_fill_rtinfo()
1438 return -EMSGSIZE; in mctp_fill_rtinfo()
1443 struct net *net = sock_net(skb->sk); in mctp_dump_rtinfo()
1448 * cb->strict_check in mctp_dump_rtinfo()
1452 s_idx = cb->args[0]; in mctp_dump_rtinfo()
1456 list_for_each_entry_rcu(rt, &net->mctp.routes, list) { in mctp_dump_rtinfo()
1460 NETLINK_CB(cb->skb).portid, in mctp_dump_rtinfo()
1461 cb->nlh->nlmsg_seq, in mctp_dump_rtinfo()
1467 cb->args[0] = idx; in mctp_dump_rtinfo()
1469 return skb->len; in mctp_dump_rtinfo()
1475 struct netns_mctp *ns = &net->mctp; in mctp_routes_net_init()
1477 INIT_LIST_HEAD(&ns->routes); in mctp_routes_net_init()
1478 INIT_HLIST_HEAD(&ns->binds); in mctp_routes_net_init()
1479 mutex_init(&ns->bind_lock); in mctp_routes_net_init()
1480 INIT_HLIST_HEAD(&ns->keys); in mctp_routes_net_init()
1481 spin_lock_init(&ns->keys_lock); in mctp_routes_net_init()
1491 list_for_each_entry_rcu(rt, &net->mctp.routes, list) in mctp_routes_net_exit()
1538 #include "test/route-test.c"