Lines Matching +full:echo +full:- +full:active +full:- +full:ms
1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH
35 /* An active session for the subscriber. */
130 head = >p->tid_hash[gtp0_hashfn(tid) % gtp->hash_size]; in gtp0_pdp_find()
133 if (pdp->gtp_version == GTP_V0 && in gtp0_pdp_find()
134 pdp->u.v0.tid == tid) in gtp0_pdp_find()
146 head = >p->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size]; in gtp1_pdp_find()
149 if (pdp->gtp_version == GTP_V1 && in gtp1_pdp_find()
150 pdp->u.v1.i_tei == tid) in gtp1_pdp_find()
156 /* Resolve a PDP context based on IPv4 address of MS. */
162 head = >p->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size]; in ipv4_pdp_find()
165 if (pdp->af == AF_INET && in ipv4_pdp_find()
166 pdp->ms_addr_ip4.s_addr == ms_addr) in ipv4_pdp_find()
181 iph = (struct iphdr *)(skb->data + hdrlen); in gtp_check_ms_ipv4()
184 return iph->daddr == pctx->ms_addr_ip4.s_addr; in gtp_check_ms_ipv4()
186 return iph->saddr == pctx->ms_addr_ip4.s_addr; in gtp_check_ms_ipv4()
195 switch (ntohs(skb->protocol)) { in gtp_check_ms()
206 netdev_dbg(pctx->dev, "No PDP ctx for this MS\n"); in gtp_rx()
211 if (iptunnel_pull_header(skb, hdrlen, skb->protocol, in gtp_rx()
212 !net_eq(sock_net(pctx->sk), dev_net(pctx->dev)))) { in gtp_rx()
213 pctx->dev->stats.rx_length_errors++; in gtp_rx()
217 netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n"); in gtp_rx()
226 skb->dev = pctx->dev; in gtp_rx()
228 dev_sw_netstats_rx_add(pctx->dev, skb->len); in gtp_rx()
234 pctx->dev->stats.rx_dropped++; in gtp_rx()
235 return -1; in gtp_rx()
243 fl4->flowi4_oif = sk->sk_bound_dev_if; in ip4_route_output_gtp()
244 fl4->daddr = daddr; in ip4_route_output_gtp()
245 fl4->saddr = saddr; in ip4_route_output_gtp()
246 fl4->flowi4_tos = ip_sock_rt_tos(sk); in ip4_route_output_gtp()
247 fl4->flowi4_scope = ip_sock_rt_scope(sk); in ip4_route_output_gtp()
248 fl4->flowi4_proto = sk->sk_protocol; in ip4_route_output_gtp()
255 * - TID: is not used and shall be set to 0.
256 * - Flow Label is not used and shall be set to 0
258 * - number: this field is not yet used in signalling messages.
261 * Returns true if the echo req was correct, false otherwise.
265 return !(gtp0->tid || (gtp0->flags ^ 0x1e) || in gtp0_validate_echo_hdr()
266 gtp0->number != 0xff || gtp0->flow); in gtp0_validate_echo_hdr()
274 hdr->flags = 0x1e; /* v0, GTP-non-prime. */ in gtp0_build_echo_msg()
275 hdr->type = msg_type; in gtp0_build_echo_msg()
279 hdr->flow = 0; in gtp0_build_echo_msg()
280 hdr->tid = 0; in gtp0_build_echo_msg()
281 hdr->number = 0xff; in gtp0_build_echo_msg()
282 hdr->spare[0] = 0xff; in gtp0_build_echo_msg()
283 hdr->spare[1] = 0xff; in gtp0_build_echo_msg()
284 hdr->spare[2] = 0xff; in gtp0_build_echo_msg()
290 hdr->length = htons(len_pkt - len_hdr); in gtp0_build_echo_msg()
292 hdr->length = 0; in gtp0_build_echo_msg()
304 gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr)); in gtp0_send_echo_resp()
307 return -1; in gtp0_send_echo_resp()
309 seq = gtp0->seq; in gtp0_send_echo_resp()
317 gtp0_build_echo_msg(>p_pkt->gtp0_h, GTP_ECHO_RSP); in gtp0_send_echo_resp()
323 gtp_pkt->gtp0_h.seq = seq; in gtp0_send_echo_resp()
325 gtp_pkt->ie.tag = GTPIE_RECOVERY; in gtp0_send_echo_resp()
326 gtp_pkt->ie.val = gtp->restart_count; in gtp0_send_echo_resp()
333 rt = ip4_route_output_gtp(&fl4, gtp->sk0, iph->saddr, iph->daddr); in gtp0_send_echo_resp()
335 netdev_dbg(gtp->dev, "no route for echo response from %pI4\n", in gtp0_send_echo_resp()
336 &iph->saddr); in gtp0_send_echo_resp()
337 return -1; in gtp0_send_echo_resp()
340 udp_tunnel_xmit_skb(rt, gtp->sk0, skb, in gtp0_send_echo_resp()
342 iph->tos, in gtp0_send_echo_resp()
343 ip4_dst_hoplimit(&rt->dst), in gtp0_send_echo_resp()
346 !net_eq(sock_net(gtp->sk1u), in gtp0_send_echo_resp()
347 dev_net(gtp->dev)), in gtp0_send_echo_resp()
353 int flags, u32 type, struct echo_info echo) in gtp_genl_fill_echo() argument
362 if (nla_put_u32(skb, GTPA_VERSION, echo.gtp_version) || in gtp_genl_fill_echo()
363 nla_put_be32(skb, GTPA_PEER_ADDRESS, echo.peer_addr_ip4.s_addr) || in gtp_genl_fill_echo()
364 nla_put_be32(skb, GTPA_MS_ADDRESS, echo.ms_addr_ip4.s_addr)) in gtp_genl_fill_echo()
372 return -EMSGSIZE; in gtp_genl_fill_echo()
378 struct echo_info echo; in gtp0_handle_echo_resp() local
383 gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr)); in gtp0_handle_echo_resp()
386 return -1; in gtp0_handle_echo_resp()
389 echo.ms_addr_ip4.s_addr = iph->daddr; in gtp0_handle_echo_resp()
390 echo.peer_addr_ip4.s_addr = iph->saddr; in gtp0_handle_echo_resp()
391 echo.gtp_version = GTP_V0; in gtp0_handle_echo_resp()
395 return -ENOMEM; in gtp0_handle_echo_resp()
397 ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo); in gtp0_handle_echo_resp()
403 return genlmsg_multicast_netns(>p_genl_family, dev_net(gtp->dev), in gtp0_handle_echo_resp()
407 /* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
416 return -1; in gtp0_udp_encap_recv()
418 gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr)); in gtp0_udp_encap_recv()
420 if ((gtp0->flags >> 5) != GTP_V0) in gtp0_udp_encap_recv()
425 * handle echo request. in gtp0_udp_encap_recv()
427 if (gtp0->type == GTP_ECHO_REQ && gtp->sk_created) in gtp0_udp_encap_recv()
430 if (gtp0->type == GTP_ECHO_RSP && gtp->sk_created) in gtp0_udp_encap_recv()
433 if (gtp0->type != GTP_TPDU) in gtp0_udp_encap_recv()
436 pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid)); in gtp0_udp_encap_recv()
438 netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); in gtp0_udp_encap_recv()
442 return gtp_rx(pctx, skb, hdrlen, gtp->role); in gtp0_udp_encap_recv()
451 hdr->flags = 0x32; /* v1, GTP-non-prime. */ in gtp1u_build_echo_msg()
452 hdr->type = msg_type; in gtp1u_build_echo_msg()
453 /* 3GPP TS 29.281 5.1 - TEID has to be set to 0 */ in gtp1u_build_echo_msg()
454 hdr->tid = 0; in gtp1u_build_echo_msg()
465 hdr->length = htons(len_pkt - len_hdr); in gtp1u_build_echo_msg()
471 hdr->length = htons(len_pkt - len_hdr); in gtp1u_build_echo_msg()
483 gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr)); in gtp1u_send_echo_resp()
485 /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response, in gtp1u_send_echo_resp()
489 if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid) in gtp1u_send_echo_resp()
490 return -1; in gtp1u_send_echo_resp()
499 gtp1u_build_echo_msg(>p_pkt->gtp1u_h, GTP_ECHO_RSP); in gtp1u_send_echo_resp()
501 /* 3GPP TS 29.281 7.7.2 - The Restart Counter value in the in gtp1u_send_echo_resp()
507 gtp_pkt->ie.tag = GTPIE_RECOVERY; in gtp1u_send_echo_resp()
508 gtp_pkt->ie.val = 0; in gtp1u_send_echo_resp()
515 rt = ip4_route_output_gtp(&fl4, gtp->sk1u, iph->saddr, iph->daddr); in gtp1u_send_echo_resp()
517 netdev_dbg(gtp->dev, "no route for echo response from %pI4\n", in gtp1u_send_echo_resp()
518 &iph->saddr); in gtp1u_send_echo_resp()
519 return -1; in gtp1u_send_echo_resp()
522 udp_tunnel_xmit_skb(rt, gtp->sk1u, skb, in gtp1u_send_echo_resp()
524 iph->tos, in gtp1u_send_echo_resp()
525 ip4_dst_hoplimit(&rt->dst), in gtp1u_send_echo_resp()
528 !net_eq(sock_net(gtp->sk1u), in gtp1u_send_echo_resp()
529 dev_net(gtp->dev)), in gtp1u_send_echo_resp()
537 struct echo_info echo; in gtp1u_handle_echo_resp() local
542 gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr)); in gtp1u_handle_echo_resp()
544 /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response, in gtp1u_handle_echo_resp()
548 if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid) in gtp1u_handle_echo_resp()
549 return -1; in gtp1u_handle_echo_resp()
552 echo.ms_addr_ip4.s_addr = iph->daddr; in gtp1u_handle_echo_resp()
553 echo.peer_addr_ip4.s_addr = iph->saddr; in gtp1u_handle_echo_resp()
554 echo.gtp_version = GTP_V1; in gtp1u_handle_echo_resp()
558 return -ENOMEM; in gtp1u_handle_echo_resp()
560 ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo); in gtp1u_handle_echo_resp()
566 return genlmsg_multicast_netns(>p_genl_family, dev_net(gtp->dev), in gtp1u_handle_echo_resp()
578 return -1; in gtp1u_udp_encap_recv()
580 gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); in gtp1u_udp_encap_recv()
582 if ((gtp1->flags >> 5) != GTP_V1) in gtp1u_udp_encap_recv()
587 * handle echo request. in gtp1u_udp_encap_recv()
589 if (gtp1->type == GTP_ECHO_REQ && gtp->sk_created) in gtp1u_udp_encap_recv()
592 if (gtp1->type == GTP_ECHO_RSP && gtp->sk_created) in gtp1u_udp_encap_recv()
595 if (gtp1->type != GTP_TPDU) in gtp1u_udp_encap_recv()
604 if (gtp1->flags & GTP1_F_MASK) in gtp1u_udp_encap_recv()
609 return -1; in gtp1u_udp_encap_recv()
611 gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); in gtp1u_udp_encap_recv()
613 pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid)); in gtp1u_udp_encap_recv()
615 netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); in gtp1u_udp_encap_recv()
619 return gtp_rx(pctx, skb, hdrlen, gtp->role); in gtp1u_udp_encap_recv()
627 gtp = sk->sk_user_data; in __gtp_encap_destroy()
629 if (gtp->sk0 == sk) in __gtp_encap_destroy()
630 gtp->sk0 = NULL; in __gtp_encap_destroy()
632 gtp->sk1u = NULL; in __gtp_encap_destroy()
633 WRITE_ONCE(udp_sk(sk)->encap_type, 0); in __gtp_encap_destroy()
659 if (gtp->sk_created) { in gtp_encap_disable()
660 udp_tunnel_sock_release(gtp->sk0->sk_socket); in gtp_encap_disable()
661 udp_tunnel_sock_release(gtp->sk1u->sk_socket); in gtp_encap_disable()
662 gtp->sk_created = false; in gtp_encap_disable()
663 gtp->sk0 = NULL; in gtp_encap_disable()
664 gtp->sk1u = NULL; in gtp_encap_disable()
666 gtp_encap_disable_sock(gtp->sk0); in gtp_encap_disable()
667 gtp_encap_disable_sock(gtp->sk1u); in gtp_encap_disable()
683 netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk); in gtp_encap_recv()
685 switch (READ_ONCE(udp_sk(sk)->encap_type)) { in gtp_encap_recv()
687 netdev_dbg(gtp->dev, "received GTP0 packet\n"); in gtp_encap_recv()
691 netdev_dbg(gtp->dev, "received GTP1U packet\n"); in gtp_encap_recv()
695 ret = -1; /* Shouldn't happen. */ in gtp_encap_recv()
700 netdev_dbg(gtp->dev, "pass up to the process\n"); in gtp_encap_recv()
704 case -1: in gtp_encap_recv()
705 netdev_dbg(gtp->dev, "GTP packet has been dropped\n"); in gtp_encap_recv()
718 gtp->dev = dev; in gtp_dev_init()
720 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); in gtp_dev_init()
721 if (!dev->tstats) in gtp_dev_init()
722 return -ENOMEM; in gtp_dev_init()
732 free_percpu(dev->tstats); in gtp_dev_uninit()
737 int payload_len = skb->len; in gtp0_push_header()
742 gtp0->flags = 0x1e; /* v0, GTP-non-prime. */ in gtp0_push_header()
743 gtp0->type = GTP_TPDU; in gtp0_push_header()
744 gtp0->length = htons(payload_len); in gtp0_push_header()
745 gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff); in gtp0_push_header()
746 gtp0->flow = htons(pctx->u.v0.flow); in gtp0_push_header()
747 gtp0->number = 0xff; in gtp0_push_header()
748 gtp0->spare[0] = gtp0->spare[1] = gtp0->spare[2] = 0xff; in gtp0_push_header()
749 gtp0->tid = cpu_to_be64(pctx->u.v0.tid); in gtp0_push_header()
754 int payload_len = skb->len; in gtp1_push_header()
760 * +--+--+--+--+--+--+--+--+ in gtp1_push_header()
762 * +--+--+--+--+--+--+--+--+ in gtp1_push_header()
765 gtp1->flags = 0x30; /* v1, GTP-non-prime. */ in gtp1_push_header()
766 gtp1->type = GTP_TPDU; in gtp1_push_header()
767 gtp1->length = htons(payload_len); in gtp1_push_header()
768 gtp1->tid = htonl(pctx->u.v1.o_tei); in gtp1_push_header()
770 /* TODO: Support for extension header, sequence number and N-PDU. in gtp1_push_header()
787 switch (pktinfo->pctx->gtp_version) { in gtp_push_header()
789 pktinfo->gtph_port = htons(GTP0_PORT); in gtp_push_header()
790 gtp0_push_header(skb, pktinfo->pctx); in gtp_push_header()
793 pktinfo->gtph_port = htons(GTP1U_PORT); in gtp_push_header()
794 gtp1_push_header(skb, pktinfo->pctx); in gtp_push_header()
805 pktinfo->sk = sk; in gtp_set_pktinfo_ipv4()
806 pktinfo->iph = iph; in gtp_set_pktinfo_ipv4()
807 pktinfo->pctx = pctx; in gtp_set_pktinfo_ipv4()
808 pktinfo->rt = rt; in gtp_set_pktinfo_ipv4()
809 pktinfo->fl4 = *fl4; in gtp_set_pktinfo_ipv4()
810 pktinfo->dev = dev; in gtp_set_pktinfo_ipv4()
828 if (gtp->role == GTP_ROLE_SGSN) in gtp_build_skb_ip4()
829 pctx = ipv4_pdp_find(gtp, iph->saddr); in gtp_build_skb_ip4()
831 pctx = ipv4_pdp_find(gtp, iph->daddr); in gtp_build_skb_ip4()
835 &iph->daddr); in gtp_build_skb_ip4()
836 return -ENOENT; in gtp_build_skb_ip4()
840 rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr, in gtp_build_skb_ip4()
841 inet_sk(pctx->sk)->inet_saddr); in gtp_build_skb_ip4()
844 &pctx->peer_addr_ip4.s_addr); in gtp_build_skb_ip4()
845 dev->stats.tx_carrier_errors++; in gtp_build_skb_ip4()
849 if (rt->dst.dev == dev) { in gtp_build_skb_ip4()
851 &pctx->peer_addr_ip4.s_addr); in gtp_build_skb_ip4()
852 dev->stats.collisions++; in gtp_build_skb_ip4()
857 df = iph->frag_off; in gtp_build_skb_ip4()
859 mtu = dst_mtu(&rt->dst) - dev->hard_header_len - in gtp_build_skb_ip4()
860 sizeof(struct iphdr) - sizeof(struct udphdr); in gtp_build_skb_ip4()
861 switch (pctx->gtp_version) { in gtp_build_skb_ip4()
863 mtu -= sizeof(struct gtp0_header); in gtp_build_skb_ip4()
866 mtu -= sizeof(struct gtp1_header); in gtp_build_skb_ip4()
870 mtu = dst_mtu(&rt->dst); in gtp_build_skb_ip4()
875 if (iph->frag_off & htons(IP_DF) && in gtp_build_skb_ip4()
876 ((!skb_is_gso(skb) && skb->len > mtu) || in gtp_build_skb_ip4()
884 gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev); in gtp_build_skb_ip4()
891 return -EBADMSG; in gtp_build_skb_ip4()
896 unsigned int proto = ntohs(skb->protocol); in gtp_dev_xmit()
901 if (skb_cow_head(skb, dev->needed_headroom)) in gtp_dev_xmit()
906 /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */ in gtp_dev_xmit()
913 err = -EOPNOTSUPP; in gtp_dev_xmit()
923 netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n", in gtp_dev_xmit()
924 &pktinfo.iph->saddr, &pktinfo.iph->daddr); in gtp_dev_xmit()
927 pktinfo.iph->tos, in gtp_dev_xmit()
928 ip4_dst_hoplimit(&pktinfo.rt->dst), in gtp_dev_xmit()
931 !net_eq(sock_net(pktinfo.pctx->sk), in gtp_dev_xmit()
939 dev->stats.tx_errors++; in gtp_dev_xmit()
961 dev->netdev_ops = >p_netdev_ops; in gtp_link_setup()
962 dev->needs_free_netdev = true; in gtp_link_setup()
965 dev->hard_header_len = 0; in gtp_link_setup()
966 dev->addr_len = 0; in gtp_link_setup()
967 dev->mtu = ETH_DATA_LEN - max_gtp_header_len; in gtp_link_setup()
970 dev->type = ARPHRD_NONE; in gtp_link_setup()
971 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; in gtp_link_setup()
973 dev->priv_flags |= IFF_NO_QUEUE; in gtp_link_setup()
974 dev->features |= NETIF_F_LLTX; in gtp_link_setup()
977 dev->needed_headroom = LL_MAX_HEADER + max_gtp_header_len; in gtp_link_setup()
987 kfree(gtp->addr_hash); in gtp_destructor()
988 kfree(gtp->tid_hash); in gtp_destructor()
998 struct net *net = gtp->net; in gtp_create_sock()
1007 return ERR_PTR(-EINVAL); in gtp_create_sock()
1020 return sock->sk; in gtp_create_sock()
1034 udp_tunnel_sock_release(sk0->sk_socket); in gtp_create_sockets()
1038 gtp->sk_created = true; in gtp_create_sockets()
1039 gtp->sk0 = sk0; in gtp_create_sockets()
1040 gtp->sk1u = sk1u; in gtp_create_sockets()
1067 return -EINVAL; in gtp_newlink()
1069 gtp->role = role; in gtp_newlink()
1072 gtp->restart_count = 0; in gtp_newlink()
1074 gtp->restart_count = nla_get_u8(data[IFLA_GTP_RESTART_COUNT]); in gtp_newlink()
1076 gtp->net = src_net; in gtp_newlink()
1096 list_add_rcu(>p->list, &gn->gtp_dev_list); in gtp_newlink()
1097 dev->priv_destructor = gtp_destructor; in gtp_newlink()
1106 kfree(gtp->addr_hash); in gtp_newlink()
1107 kfree(gtp->tid_hash); in gtp_newlink()
1117 for (i = 0; i < gtp->hash_size; i++) in gtp_dellink()
1118 hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) in gtp_dellink()
1121 list_del_rcu(>p->list); in gtp_dellink()
1138 return -EINVAL; in gtp_validate()
1154 if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size)) in gtp_fill_info()
1156 if (nla_put_u32(skb, IFLA_GTP_ROLE, gtp->role)) in gtp_fill_info()
1158 if (nla_put_u8(skb, IFLA_GTP_RESTART_COUNT, gtp->restart_count)) in gtp_fill_info()
1164 return -EMSGSIZE; in gtp_fill_info()
1184 gtp->addr_hash = kmalloc_array(hsize, sizeof(struct hlist_head), in gtp_hashtable_new()
1186 if (gtp->addr_hash == NULL) in gtp_hashtable_new()
1187 return -ENOMEM; in gtp_hashtable_new()
1189 gtp->tid_hash = kmalloc_array(hsize, sizeof(struct hlist_head), in gtp_hashtable_new()
1191 if (gtp->tid_hash == NULL) in gtp_hashtable_new()
1194 gtp->hash_size = hsize; in gtp_hashtable_new()
1197 INIT_HLIST_HEAD(>p->addr_hash[i]); in gtp_hashtable_new()
1198 INIT_HLIST_HEAD(>p->tid_hash[i]); in gtp_hashtable_new()
1202 kfree(gtp->addr_hash); in gtp_hashtable_new()
1203 return -ENOMEM; in gtp_hashtable_new()
1222 sk = sock->sk; in gtp_encap_enable_socket()
1223 if (sk->sk_protocol != IPPROTO_UDP || in gtp_encap_enable_socket()
1224 sk->sk_type != SOCK_DGRAM || in gtp_encap_enable_socket()
1225 (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) { in gtp_encap_enable_socket()
1227 sk = ERR_PTR(-EINVAL); in gtp_encap_enable_socket()
1232 if (sk->sk_user_data) { in gtp_encap_enable_socket()
1233 sk = ERR_PTR(-EBUSY); in gtp_encap_enable_socket()
1244 setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg); in gtp_encap_enable_socket()
1247 release_sock(sock->sk); in gtp_encap_enable_socket()
1259 return -EINVAL; in gtp_encap_enable()
1279 gtp->sk0 = sk0; in gtp_encap_enable()
1280 gtp->sk1u = sk1u; in gtp_encap_enable()
1304 if (dev && dev->netdev_ops == >p_netdev_ops) in gtp_find_dev()
1313 pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]); in ipv4_pdp_fill()
1314 pctx->af = AF_INET; in ipv4_pdp_fill()
1315 pctx->peer_addr_ip4.s_addr = in ipv4_pdp_fill()
1316 nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]); in ipv4_pdp_fill()
1317 pctx->ms_addr_ip4.s_addr = in ipv4_pdp_fill()
1318 nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); in ipv4_pdp_fill()
1320 switch (pctx->gtp_version) { in ipv4_pdp_fill()
1326 pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]); in ipv4_pdp_fill()
1327 pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]); in ipv4_pdp_fill()
1330 pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]); in ipv4_pdp_fill()
1331 pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]); in ipv4_pdp_fill()
1342 struct net_device *dev = gtp->dev; in gtp_pdp_add()
1348 ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); in gtp_pdp_add()
1349 hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size; in gtp_pdp_add()
1350 version = nla_get_u32(info->attrs[GTPA_VERSION]); in gtp_pdp_add()
1357 nla_get_u64(info->attrs[GTPA_TID])); in gtp_pdp_add()
1360 nla_get_u32(info->attrs[GTPA_I_TEI])); in gtp_pdp_add()
1365 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) in gtp_pdp_add()
1366 return ERR_PTR(-EEXIST); in gtp_pdp_add()
1367 if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE) in gtp_pdp_add()
1368 return ERR_PTR(-EOPNOTSUPP); in gtp_pdp_add()
1371 return ERR_PTR(-EEXIST); in gtp_pdp_add()
1377 if (pctx->gtp_version == GTP_V0) in gtp_pdp_add()
1378 netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n", in gtp_pdp_add()
1379 pctx->u.v0.tid, pctx); in gtp_pdp_add()
1380 else if (pctx->gtp_version == GTP_V1) in gtp_pdp_add()
1381 netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n", in gtp_pdp_add()
1382 pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx); in gtp_pdp_add()
1390 return ERR_PTR(-ENOMEM); in gtp_pdp_add()
1393 pctx->sk = sk; in gtp_pdp_add()
1394 pctx->dev = gtp->dev; in gtp_pdp_add()
1396 atomic_set(&pctx->tx_seq, 0); in gtp_pdp_add()
1398 switch (pctx->gtp_version) { in gtp_pdp_add()
1405 hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size; in gtp_pdp_add()
1408 hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size; in gtp_pdp_add()
1412 hlist_add_head_rcu(&pctx->hlist_addr, >p->addr_hash[hash_ms]); in gtp_pdp_add()
1413 hlist_add_head_rcu(&pctx->hlist_tid, >p->tid_hash[hash_tid]); in gtp_pdp_add()
1415 switch (pctx->gtp_version) { in gtp_pdp_add()
1417 netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n", in gtp_pdp_add()
1418 pctx->u.v0.tid, &pctx->peer_addr_ip4, in gtp_pdp_add()
1419 &pctx->ms_addr_ip4, pctx); in gtp_pdp_add()
1422 netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n", in gtp_pdp_add()
1423 pctx->u.v1.i_tei, pctx->u.v1.o_tei, in gtp_pdp_add()
1424 &pctx->peer_addr_ip4, &pctx->ms_addr_ip4, pctx); in gtp_pdp_add()
1435 sock_put(pctx->sk); in pdp_context_free()
1441 hlist_del_rcu(&pctx->hlist_tid); in pdp_context_delete()
1442 hlist_del_rcu(&pctx->hlist_addr); in pdp_context_delete()
1443 call_rcu(&pctx->rcu_head, pdp_context_free); in pdp_context_delete()
1456 if (!info->attrs[GTPA_VERSION] || in gtp_genl_new_pdp()
1457 !info->attrs[GTPA_LINK] || in gtp_genl_new_pdp()
1458 !info->attrs[GTPA_PEER_ADDRESS] || in gtp_genl_new_pdp()
1459 !info->attrs[GTPA_MS_ADDRESS]) in gtp_genl_new_pdp()
1460 return -EINVAL; in gtp_genl_new_pdp()
1462 version = nla_get_u32(info->attrs[GTPA_VERSION]); in gtp_genl_new_pdp()
1466 if (!info->attrs[GTPA_TID] || in gtp_genl_new_pdp()
1467 !info->attrs[GTPA_FLOW]) in gtp_genl_new_pdp()
1468 return -EINVAL; in gtp_genl_new_pdp()
1471 if (!info->attrs[GTPA_I_TEI] || in gtp_genl_new_pdp()
1472 !info->attrs[GTPA_O_TEI]) in gtp_genl_new_pdp()
1473 return -EINVAL; in gtp_genl_new_pdp()
1477 return -EINVAL; in gtp_genl_new_pdp()
1482 gtp = gtp_find_dev(sock_net(skb->sk), info->attrs); in gtp_genl_new_pdp()
1484 err = -ENODEV; in gtp_genl_new_pdp()
1489 sk = gtp->sk0; in gtp_genl_new_pdp()
1491 sk = gtp->sk1u; in gtp_genl_new_pdp()
1496 err = -ENODEV; in gtp_genl_new_pdp()
1520 return ERR_PTR(-ENODEV); in gtp_find_pdp_by_link()
1535 return ERR_PTR(-EINVAL); in gtp_find_pdp_by_link()
1545 pctx = ERR_PTR(-EINVAL); in gtp_find_pdp()
1548 pctx = ERR_PTR(-ENOENT); in gtp_find_pdp()
1558 if (!info->attrs[GTPA_VERSION]) in gtp_genl_del_pdp()
1559 return -EINVAL; in gtp_genl_del_pdp()
1563 pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs); in gtp_genl_del_pdp()
1569 if (pctx->gtp_version == GTP_V0) in gtp_genl_del_pdp()
1570 netdev_dbg(pctx->dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n", in gtp_genl_del_pdp()
1571 pctx->u.v0.tid, pctx); in gtp_genl_del_pdp()
1572 else if (pctx->gtp_version == GTP_V1) in gtp_genl_del_pdp()
1573 netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n", in gtp_genl_del_pdp()
1574 pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx); in gtp_genl_del_pdp()
1594 if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) || in gtp_genl_fill_info()
1595 nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) || in gtp_genl_fill_info()
1596 nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) || in gtp_genl_fill_info()
1597 nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr)) in gtp_genl_fill_info()
1600 switch (pctx->gtp_version) { in gtp_genl_fill_info()
1602 if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) || in gtp_genl_fill_info()
1603 nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow)) in gtp_genl_fill_info()
1607 if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) || in gtp_genl_fill_info()
1608 nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei)) in gtp_genl_fill_info()
1618 return -EMSGSIZE; in gtp_genl_fill_info()
1628 return -ENOMEM; in gtp_tunnel_notify()
1636 ret = genlmsg_multicast_netns(>p_genl_family, dev_net(pctx->dev), msg, in gtp_tunnel_notify()
1647 if (!info->attrs[GTPA_VERSION]) in gtp_genl_get_pdp()
1648 return -EINVAL; in gtp_genl_get_pdp()
1652 pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs); in gtp_genl_get_pdp()
1660 err = -ENOMEM; in gtp_genl_get_pdp()
1664 err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq, in gtp_genl_get_pdp()
1665 0, info->nlhdr->nlmsg_type, pctx); in gtp_genl_get_pdp()
1670 return genlmsg_unicast(genl_info_net(info), skb2, info->snd_portid); in gtp_genl_get_pdp()
1682 struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp; in gtp_genl_dump_pdp()
1683 int i, j, bucket = cb->args[0], skip = cb->args[1]; in gtp_genl_dump_pdp()
1684 struct net *net = sock_net(skb->sk); in gtp_genl_dump_pdp()
1690 if (cb->args[4]) in gtp_genl_dump_pdp()
1694 list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) { in gtp_genl_dump_pdp()
1700 for (i = bucket; i < gtp->hash_size; i++) { in gtp_genl_dump_pdp()
1702 hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], in gtp_genl_dump_pdp()
1706 NETLINK_CB(cb->skb).portid, in gtp_genl_dump_pdp()
1707 cb->nlh->nlmsg_seq, in gtp_genl_dump_pdp()
1709 cb->nlh->nlmsg_type, pctx)) { in gtp_genl_dump_pdp()
1710 cb->args[0] = i; in gtp_genl_dump_pdp()
1711 cb->args[1] = j; in gtp_genl_dump_pdp()
1712 cb->args[2] = (unsigned long)gtp; in gtp_genl_dump_pdp()
1721 cb->args[4] = 1; in gtp_genl_dump_pdp()
1724 return skb->len; in gtp_genl_dump_pdp()
1739 if (!info->attrs[GTPA_VERSION] || in gtp_genl_send_echo_req()
1740 !info->attrs[GTPA_LINK] || in gtp_genl_send_echo_req()
1741 !info->attrs[GTPA_PEER_ADDRESS] || in gtp_genl_send_echo_req()
1742 !info->attrs[GTPA_MS_ADDRESS]) in gtp_genl_send_echo_req()
1743 return -EINVAL; in gtp_genl_send_echo_req()
1745 version = nla_get_u32(info->attrs[GTPA_VERSION]); in gtp_genl_send_echo_req()
1746 dst_ip = nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]); in gtp_genl_send_echo_req()
1747 src_ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); in gtp_genl_send_echo_req()
1749 gtp = gtp_find_dev(sock_net(skb->sk), info->attrs); in gtp_genl_send_echo_req()
1751 return -ENODEV; in gtp_genl_send_echo_req()
1753 if (!gtp->sk_created) in gtp_genl_send_echo_req()
1754 return -EOPNOTSUPP; in gtp_genl_send_echo_req()
1755 if (!(gtp->dev->flags & IFF_UP)) in gtp_genl_send_echo_req()
1756 return -ENETDOWN; in gtp_genl_send_echo_req()
1761 len = LL_RESERVED_SPACE(gtp->dev) + sizeof(struct gtp0_header) + in gtp_genl_send_echo_req()
1764 skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len); in gtp_genl_send_echo_req()
1766 return -ENOMEM; in gtp_genl_send_echo_req()
1768 sk = gtp->sk0; in gtp_genl_send_echo_req()
1777 len = LL_RESERVED_SPACE(gtp->dev) + in gtp_genl_send_echo_req()
1781 skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len); in gtp_genl_send_echo_req()
1783 return -ENOMEM; in gtp_genl_send_echo_req()
1785 sk = gtp->sk1u; in gtp_genl_send_echo_req()
1793 return -ENODEV; in gtp_genl_send_echo_req()
1798 netdev_dbg(gtp->dev, "no route for echo request to %pI4\n", in gtp_genl_send_echo_req()
1801 return -ENODEV; in gtp_genl_send_echo_req()
1807 ip4_dst_hoplimit(&rt->dst), in gtp_genl_send_echo_req()
1811 dev_net(gtp->dev)), in gtp_genl_send_echo_req()
1875 INIT_LIST_HEAD(&gn->gtp_dev_list); in gtp_net_init()
1886 list_for_each_entry(gtp, &gn->gtp_dev_list, list) in gtp_net_exit()
1887 gtp_dellink(gtp->dev, &list); in gtp_net_exit()