Lines Matching +full:conntrack +full:- +full:related

1 // SPDX-License-Identifier: GPL-2.0-or-later
21 * : add ip6_append_data and related functions
38 #include <linux/bpf-cgroup.h>
63 struct net_device *dev = dst->dev; in ip6_finish_output2()
72 if (unlikely(hh_len > skb_headroom(skb)) && dev->header_ops) { in ip6_finish_output2()
76 return -ENOMEM; in ip6_finish_output2()
81 daddr = &hdr->daddr; in ip6_finish_output2()
83 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) && in ip6_finish_output2()
85 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || in ip6_finish_output2()
86 ipv6_chk_mcast_addr(dev, daddr, &hdr->saddr))) { in ip6_finish_output2()
94 net, sk, newskb, NULL, newskb->dev, in ip6_finish_output2()
97 if (hdr->hop_limit == 0) { in ip6_finish_output2()
105 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, skb->len); in ip6_finish_output2()
107 !(dev->flags & IFF_LOOPBACK)) { in ip6_finish_output2()
113 if (lwtunnel_xmit_redirect(dst->lwtstate)) { in ip6_finish_output2()
120 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); in ip6_finish_output2()
133 return -EINVAL; in ip6_finish_output2()
158 return -ENOMEM; in ip6_finish_output_gso_slowpath_drop()
169 * which is considered harmful (RFC-8021). Avoid that. in ip6_finish_output_gso_slowpath_drop()
171 err = segs->len > mtu ? in ip6_finish_output_gso_slowpath_drop()
184 if (!(IP6CB(skb)->flags & IP6SKB_FAKEJUMBO) && in ip6_finish_output_gso()
197 if (skb_dst(skb)->xfrm) { in __ip6_finish_output()
198 IP6CB(skb)->flags |= IP6SKB_REROUTED; in __ip6_finish_output()
207 if (skb->len > mtu || in __ip6_finish_output()
208 (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size)) in __ip6_finish_output()
231 struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev; in ip6_output()
234 skb->protocol = htons(ETH_P_IPV6); in ip6_output()
235 skb->dev = dev; in ip6_output()
237 if (unlikely(idev->cnf.disable_ipv6)) { in ip6_output()
246 !(IP6CB(skb)->flags & IP6SKB_REROUTED)); in ip6_output()
268 struct in6_addr *first_hop = &fl6->daddr; in ip6_xmit()
270 struct net_device *dev = dst->dev; in ip6_xmit()
276 u8 proto = fl6->flowi6_proto; in ip6_xmit()
277 int seg_len = skb->len; in ip6_xmit()
278 int hlimit = -1; in ip6_xmit()
283 head_room += opt->opt_nflen + opt->opt_flen; in ip6_xmit()
289 return -ENOBUFS; in ip6_xmit()
294 seg_len += opt->opt_nflen + opt->opt_flen; in ip6_xmit()
296 if (opt->opt_flen) in ip6_xmit()
299 if (opt->opt_nflen) in ip6_xmit()
301 &fl6->saddr); in ip6_xmit()
307 hop_jumbo->nexthdr = proto; in ip6_xmit()
308 hop_jumbo->hdrlen = 0; in ip6_xmit()
309 hop_jumbo->tlv_type = IPV6_TLV_JUMBO; in ip6_xmit()
310 hop_jumbo->tlv_len = 4; in ip6_xmit()
311 hop_jumbo->jumbo_payload_len = htonl(seg_len + hoplen); in ip6_xmit()
315 IP6CB(skb)->flags |= IP6SKB_FAKEJUMBO; in ip6_xmit()
326 hlimit = READ_ONCE(np->hop_limit); in ip6_xmit()
330 ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel, in ip6_xmit()
333 hdr->payload_len = htons(seg_len); in ip6_xmit()
334 hdr->nexthdr = proto; in ip6_xmit()
335 hdr->hop_limit = hlimit; in ip6_xmit()
337 hdr->saddr = fl6->saddr; in ip6_xmit()
338 hdr->daddr = *first_hop; in ip6_xmit()
340 skb->protocol = htons(ETH_P_IPV6); in ip6_xmit()
341 skb->priority = priority; in ip6_xmit()
342 skb->mark = mark; in ip6_xmit()
345 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) { in ip6_xmit()
363 skb->dev = dev; in ip6_xmit()
371 return -EMSGSIZE; in ip6_xmit()
381 for (ra = ip6_ra_chain; ra; ra = ra->next) { in ip6_call_ra_chain()
382 struct sock *sk = ra->sk; in ip6_call_ra_chain()
383 if (sk && ra->sel == sel && in ip6_call_ra_chain()
384 (!sk->sk_bound_dev_if || in ip6_call_ra_chain()
385 sk->sk_bound_dev_if == skb->dev->ifindex)) { in ip6_call_ra_chain()
388 !net_eq(sock_net(sk), dev_net(skb->dev))) { in ip6_call_ra_chain()
412 u8 nexthdr = hdr->nexthdr; in ip6_forward_proxy_check()
427 offset + 1 - skb->data))) in ip6_forward_proxy_check()
432 switch (icmp6->icmp6_type) { in ip6_forward_proxy_check()
449 * The proxying router can't forward traffic sent to a link-local in ip6_forward_proxy_check()
453 if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) { in ip6_forward_proxy_check()
455 return -1; in ip6_forward_proxy_check()
465 if (skb->offload_l3_fwd_mark) { in ip6_forward_finish()
477 if (skb->len <= mtu) in ip6_pkt_too_big()
480 /* ipv6 conntrack defrag sets max_frag_size + ignore_df */ in ip6_pkt_too_big()
481 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu) in ip6_pkt_too_big()
484 if (skb->ignore_df) in ip6_pkt_too_big()
498 struct net *net = dev_net(dst->dev); in ip6_forward()
503 idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif)); in ip6_forward()
504 if (net->ipv6.devconf_all->forwarding == 0) in ip6_forward()
507 if (skb->pkt_type != PACKET_HOST) in ip6_forward()
510 if (unlikely(skb->sk)) in ip6_forward()
516 if (!net->ipv6.devconf_all->disable_policy && in ip6_forward()
517 (!idev || !idev->cnf.disable_policy) && in ip6_forward()
532 * We are not end-node, so that if packet contains in ip6_forward()
536 * that different fragments will go along one path. --ANK in ip6_forward()
538 if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) { in ip6_forward()
539 if (ip6_call_ra_chain(skb, ntohs(opt->ra))) in ip6_forward()
546 if (hdr->hop_limit <= 1) { in ip6_forward()
551 return -ETIMEDOUT; in ip6_forward()
554 /* XXX: idev->cnf.proxy_ndp? */ in ip6_forward()
555 if (net->ipv6.devconf_all->proxy_ndp && in ip6_forward()
556 pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) { in ip6_forward()
591 if (IP6CB(skb)->iif == dst->dev->ifindex && in ip6_forward()
592 opt->srcrt == 0 && !skb_sec_path(skb)) { in ip6_forward()
603 if (rt->rt6i_flags & RTF_GATEWAY) in ip6_forward()
604 target = &rt->rt6i_gateway; in ip6_forward()
606 target = &hdr->daddr; in ip6_forward()
608 peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1); in ip6_forward()
618 int addrtype = ipv6_addr_type(&hdr->saddr); in ip6_forward()
639 skb->dev = dst->dev; in ip6_forward()
645 return -EMSGSIZE; in ip6_forward()
648 if (skb_cow(skb, dst->dev->hard_header_len)) { in ip6_forward()
658 hdr->hop_limit--; in ip6_forward()
661 net, NULL, skb, skb->dev, dst->dev, in ip6_forward()
669 return -EINVAL; in ip6_forward()
674 to->pkt_type = from->pkt_type; in ip6_copy_metadata()
675 to->priority = from->priority; in ip6_copy_metadata()
676 to->protocol = from->protocol; in ip6_copy_metadata()
679 to->dev = from->dev; in ip6_copy_metadata()
680 to->mark = from->mark; in ip6_copy_metadata()
685 to->tc_index = from->tc_index; in ip6_copy_metadata()
701 iter->tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); in ip6_fraglist_init()
702 if (!iter->tmp_hdr) in ip6_fraglist_init()
703 return -ENOMEM; in ip6_fraglist_init()
705 iter->frag = skb_shinfo(skb)->frag_list; in ip6_fraglist_init()
708 iter->offset = 0; in ip6_fraglist_init()
709 iter->hlen = hlen; in ip6_fraglist_init()
710 iter->frag_id = frag_id; in ip6_fraglist_init()
711 iter->nexthdr = nexthdr; in ip6_fraglist_init()
717 memcpy(skb_network_header(skb), iter->tmp_hdr, hlen); in ip6_fraglist_init()
719 fh->nexthdr = nexthdr; in ip6_fraglist_init()
720 fh->reserved = 0; in ip6_fraglist_init()
721 fh->frag_off = htons(IP6_MF); in ip6_fraglist_init()
722 fh->identification = frag_id; in ip6_fraglist_init()
725 skb->data_len = first_len - skb_headlen(skb); in ip6_fraglist_init()
726 skb->len = first_len; in ip6_fraglist_init()
727 ipv6_hdr(skb)->payload_len = htons(first_len - sizeof(struct ipv6hdr)); in ip6_fraglist_init()
736 struct sk_buff *frag = iter->frag; in ip6_fraglist_prepare()
737 unsigned int hlen = iter->hlen; in ip6_fraglist_prepare()
740 frag->ip_summed = CHECKSUM_NONE; in ip6_fraglist_prepare()
745 memcpy(skb_network_header(frag), iter->tmp_hdr, hlen); in ip6_fraglist_prepare()
746 iter->offset += skb->len - hlen - sizeof(struct frag_hdr); in ip6_fraglist_prepare()
747 fh->nexthdr = iter->nexthdr; in ip6_fraglist_prepare()
748 fh->reserved = 0; in ip6_fraglist_prepare()
749 fh->frag_off = htons(iter->offset); in ip6_fraglist_prepare()
750 if (frag->next) in ip6_fraglist_prepare()
751 fh->frag_off |= htons(IP6_MF); in ip6_fraglist_prepare()
752 fh->identification = iter->frag_id; in ip6_fraglist_prepare()
753 ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); in ip6_fraglist_prepare()
762 state->prevhdr = prevhdr; in ip6_frag_init()
763 state->nexthdr = nexthdr; in ip6_frag_init()
764 state->frag_id = frag_id; in ip6_frag_init()
766 state->hlen = hlen; in ip6_frag_init()
767 state->mtu = mtu; in ip6_frag_init()
769 state->left = skb->len - hlen; /* Space per frame */ in ip6_frag_init()
770 state->ptr = hlen; /* Where to start from */ in ip6_frag_init()
772 state->hroom = hdr_room; in ip6_frag_init()
773 state->troom = needed_tailroom; in ip6_frag_init()
775 state->offset = 0; in ip6_frag_init()
781 u8 *prevhdr = state->prevhdr, *fragnexthdr_offset; in ip6_frag_next()
786 len = state->left; in ip6_frag_next()
787 /* IF: it doesn't fit, use 'mtu' - the data space left */ in ip6_frag_next()
788 if (len > state->mtu) in ip6_frag_next()
789 len = state->mtu; in ip6_frag_next()
792 if (len < state->left) in ip6_frag_next()
796 frag = alloc_skb(len + state->hlen + sizeof(struct frag_hdr) + in ip6_frag_next()
797 state->hroom + state->troom, GFP_ATOMIC); in ip6_frag_next()
799 return ERR_PTR(-ENOMEM); in ip6_frag_next()
806 skb_reserve(frag, state->hroom); in ip6_frag_next()
807 skb_put(frag, len + state->hlen + sizeof(struct frag_hdr)); in ip6_frag_next()
809 fh = (struct frag_hdr *)(skb_network_header(frag) + state->hlen); in ip6_frag_next()
810 frag->transport_header = (frag->network_header + state->hlen + in ip6_frag_next()
817 if (skb->sk) in ip6_frag_next()
818 skb_set_owner_w(frag, skb->sk); in ip6_frag_next()
823 skb_copy_from_linear_data(skb, skb_network_header(frag), state->hlen); in ip6_frag_next()
826 fragnexthdr_offset += prevhdr - skb_network_header(skb); in ip6_frag_next()
832 fh->nexthdr = state->nexthdr; in ip6_frag_next()
833 fh->reserved = 0; in ip6_frag_next()
834 fh->identification = state->frag_id; in ip6_frag_next()
839 BUG_ON(skb_copy_bits(skb, state->ptr, skb_transport_header(frag), in ip6_frag_next()
841 state->left -= len; in ip6_frag_next()
843 fh->frag_off = htons(state->offset); in ip6_frag_next()
844 if (state->left > 0) in ip6_frag_next()
845 fh->frag_off |= htons(IP6_MF); in ip6_frag_next()
846 ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); in ip6_frag_next()
848 state->ptr += len; in ip6_frag_next()
849 state->offset += len; in ip6_frag_next()
860 struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? in ip6_fragment()
861 inet6_sk(skb->sk) : NULL; in ip6_fragment()
862 bool mono_delivery_time = skb->mono_delivery_time; in ip6_fragment()
865 ktime_t tstamp = skb->tstamp; in ip6_fragment()
875 nexthdr_offset = prevhdr - skb_network_header(skb); in ip6_fragment()
882 if (unlikely(!skb->ignore_df && skb->len > mtu)) in ip6_fragment()
885 if (IP6CB(skb)->frag_max_size) { in ip6_fragment()
886 if (IP6CB(skb)->frag_max_size > mtu) in ip6_fragment()
890 mtu = IP6CB(skb)->frag_max_size; in ip6_fragment()
896 u32 frag_size = READ_ONCE(np->frag_size); in ip6_fragment()
903 mtu -= hlen + sizeof(struct frag_hdr); in ip6_fragment()
905 frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr, in ip6_fragment()
906 &ipv6_hdr(skb)->saddr); in ip6_fragment()
908 if (skb->ip_summed == CHECKSUM_PARTIAL && in ip6_fragment()
913 hroom = LL_RESERVED_SPACE(rt->dst.dev); in ip6_fragment()
919 if (first_len - hlen > mtu || in ip6_fragment()
920 ((first_len - hlen) & 7) || in ip6_fragment()
927 if (frag->len > mtu || in ip6_fragment()
928 ((frag->len & 7) && frag->next) || in ip6_fragment()
936 BUG_ON(frag->sk); in ip6_fragment()
937 if (skb->sk) { in ip6_fragment()
938 frag->sk = skb->sk; in ip6_fragment()
939 frag->destructor = sock_wfree; in ip6_fragment()
941 skb->truesize -= frag->truesize; in ip6_fragment()
961 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), in ip6_fragment()
973 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), in ip6_fragment()
981 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), in ip6_fragment()
990 frag2->sk = NULL; in ip6_fragment()
991 frag2->destructor = NULL; in ip6_fragment()
992 skb->truesize += frag2->truesize; in ip6_fragment()
1001 ip6_frag_init(skb, hlen, mtu, rt->dst.dev->needed_tailroom, in ip6_fragment()
1002 LL_RESERVED_SPACE(rt->dst.dev), prevhdr, nexthdr, frag_id, in ip6_fragment()
1034 err = -EMSGSIZE; in ip6_fragment()
1047 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) && in ip6_rt_check()
1061 if (dst->ops->family != AF_INET6) { in ip6_sk_dst_check()
1070 * and MSG_DONTROUTE --ANK (980726) in ip6_sk_dst_check()
1084 if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) || in ip6_sk_dst_check()
1086 ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) || in ip6_sk_dst_check()
1088 (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) { in ip6_sk_dst_check()
1109 * the route-specific preferred source forces the in ip6_dst_lookup_tail()
1116 if (ipv6_addr_any(&fl6->saddr)) { in ip6_dst_lookup_tail()
1121 rt = (*dst)->error ? NULL : (struct rt6_info *)*dst; in ip6_dst_lookup_tail()
1124 from = rt ? rcu_dereference(rt->from) : NULL; in ip6_dst_lookup_tail()
1125 err = ip6_route_get_saddr(net, from, &fl6->daddr, in ip6_dst_lookup_tail()
1126 sk ? READ_ONCE(inet6_sk(sk)->srcprefs) : 0, in ip6_dst_lookup_tail()
1127 &fl6->saddr); in ip6_dst_lookup_tail()
1134 * never existed and let the SA-enabled version take in ip6_dst_lookup_tail()
1137 if ((*dst)->error) { in ip6_dst_lookup_tail()
1142 if (fl6->flowi6_oif) in ip6_dst_lookup_tail()
1149 err = (*dst)->error; in ip6_dst_lookup_tail()
1164 n = __ipv6_neigh_lookup_noref(rt->dst.dev, in ip6_dst_lookup_tail()
1165 rt6_nexthop(rt, &fl6->daddr)); in ip6_dst_lookup_tail()
1166 err = n && !(READ_ONCE(n->nud_state) & NUD_VALID) ? -EINVAL : 0; in ip6_dst_lookup_tail()
1174 ifp = ipv6_get_ifaddr(net, &fl6->saddr, in ip6_dst_lookup_tail()
1175 (*dst)->dev, 1); in ip6_dst_lookup_tail()
1177 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC); in ip6_dst_lookup_tail()
1190 err = (*dst)->error; in ip6_dst_lookup_tail()
1196 if (ipv6_addr_v4mapped(&fl6->saddr) && in ip6_dst_lookup_tail()
1197 !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) { in ip6_dst_lookup_tail()
1198 err = -EAFNOSUPPORT; in ip6_dst_lookup_tail()
1208 if (err == -ENETUNREACH) in ip6_dst_lookup_tail()
1214 * ip6_dst_lookup - perform route lookup on flow
1233 * ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1254 fl6->daddr = *final_dst; in ip6_dst_lookup_flow()
1261 * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1282 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); in ip6_sk_dst_lookup_flow()
1299 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL; in ip6_opt_dup()
1305 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL; in ip6_rthdr_dup()
1315 if (!(rt->dst.flags & DST_XFRM_TUNNEL)) { in ip6_append_data_mtu()
1318 *mtu = orig_mtu - rt->dst.header_len; in ip6_append_data_mtu()
1327 *maxfraglen = ((*mtu - fragheaderlen) & ~7) in ip6_append_data_mtu()
1328 + fragheaderlen - sizeof(struct frag_hdr); in ip6_append_data_mtu()
1338 struct ipv6_txoptions *nopt, *opt = ipc6->opt; in ip6_setup_cork()
1343 cork->base.dst = &rt->dst; in ip6_setup_cork()
1349 if (WARN_ON(v6_cork->opt)) in ip6_setup_cork()
1350 return -EINVAL; in ip6_setup_cork()
1352 nopt = v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation); in ip6_setup_cork()
1354 return -ENOBUFS; in ip6_setup_cork()
1356 nopt->tot_len = sizeof(*opt); in ip6_setup_cork()
1357 nopt->opt_flen = opt->opt_flen; in ip6_setup_cork()
1358 nopt->opt_nflen = opt->opt_nflen; in ip6_setup_cork()
1360 nopt->dst0opt = ip6_opt_dup(opt->dst0opt, sk->sk_allocation); in ip6_setup_cork()
1361 if (opt->dst0opt && !nopt->dst0opt) in ip6_setup_cork()
1362 return -ENOBUFS; in ip6_setup_cork()
1364 nopt->dst1opt = ip6_opt_dup(opt->dst1opt, sk->sk_allocation); in ip6_setup_cork()
1365 if (opt->dst1opt && !nopt->dst1opt) in ip6_setup_cork()
1366 return -ENOBUFS; in ip6_setup_cork()
1368 nopt->hopopt = ip6_opt_dup(opt->hopopt, sk->sk_allocation); in ip6_setup_cork()
1369 if (opt->hopopt && !nopt->hopopt) in ip6_setup_cork()
1370 return -ENOBUFS; in ip6_setup_cork()
1372 nopt->srcrt = ip6_rthdr_dup(opt->srcrt, sk->sk_allocation); in ip6_setup_cork()
1373 if (opt->srcrt && !nopt->srcrt) in ip6_setup_cork()
1374 return -ENOBUFS; in ip6_setup_cork()
1378 v6_cork->hop_limit = ipc6->hlimit; in ip6_setup_cork()
1379 v6_cork->tclass = ipc6->tclass; in ip6_setup_cork()
1380 if (rt->dst.flags & DST_XFRM_TUNNEL) in ip6_setup_cork()
1381 mtu = READ_ONCE(np->pmtudisc) >= IPV6_PMTUDISC_PROBE ? in ip6_setup_cork()
1382 READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst); in ip6_setup_cork()
1384 mtu = READ_ONCE(np->pmtudisc) >= IPV6_PMTUDISC_PROBE ? in ip6_setup_cork()
1385 READ_ONCE(rt->dst.dev->mtu) : dst_mtu(xfrm_dst_path(&rt->dst)); in ip6_setup_cork()
1387 frag_size = READ_ONCE(np->frag_size); in ip6_setup_cork()
1391 cork->base.fragsize = mtu; in ip6_setup_cork()
1392 cork->base.gso_size = ipc6->gso_size; in ip6_setup_cork()
1393 cork->base.tx_flags = 0; in ip6_setup_cork()
1394 cork->base.mark = ipc6->sockc.mark; in ip6_setup_cork()
1395 sock_tx_timestamp(sk, ipc6->sockc.tsflags, &cork->base.tx_flags); in ip6_setup_cork()
1397 cork->base.length = 0; in ip6_setup_cork()
1398 cork->base.transmit_time = ipc6->sockc.transmit_time; in ip6_setup_cork()
1414 struct inet_cork *cork = &cork_full->base; in __ip6_append_data()
1415 struct flowi6 *fl6 = &cork_full->fl.u.ip6; in __ip6_append_data()
1426 struct rt6_info *rt = (struct rt6_info *)cork->dst; in __ip6_append_data()
1428 struct ipv6_txoptions *opt = v6_cork->opt; in __ip6_append_data()
1435 exthdrlen = opt ? opt->opt_flen : 0; in __ip6_append_data()
1436 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len; in __ip6_append_data()
1439 paged = !!cork->gso_size; in __ip6_append_data()
1440 mtu = cork->gso_size ? IP6_MAX_MTU : cork->fragsize; in __ip6_append_data()
1443 hh_len = LL_RESERVED_SPACE(rt->dst.dev); in __ip6_append_data()
1445 fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + in __ip6_append_data()
1446 (opt ? opt->opt_nflen : 0); in __ip6_append_data()
1449 (opt ? opt->opt_flen + opt->opt_nflen : 0) + in __ip6_append_data()
1450 rt->rt6i_nfheader_len; in __ip6_append_data()
1453 ((mtu - fragheaderlen) & ~7) + fragheaderlen <= sizeof(struct frag_hdr)) in __ip6_append_data()
1456 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - in __ip6_append_data()
1465 if (cork->length + length > mtu - headersize && ipc6->dontfrag && in __ip6_append_data()
1466 (sk->sk_protocol == IPPROTO_UDP || in __ip6_append_data()
1467 sk->sk_protocol == IPPROTO_ICMPV6 || in __ip6_append_data()
1468 sk->sk_protocol == IPPROTO_RAW)) { in __ip6_append_data()
1469 ipv6_local_rxpmtu(sk, fl6, mtu - headersize + in __ip6_append_data()
1479 if (cork->length + length > maxnonfragsize - headersize) { in __ip6_append_data()
1481 pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0); in __ip6_append_data()
1483 return -EMSGSIZE; in __ip6_append_data()
1489 if (transhdrlen && sk->sk_protocol == IPPROTO_UDP && in __ip6_append_data()
1491 length <= mtu - headersize && in __ip6_append_data()
1492 (!(flags & MSG_MORE) || cork->gso_size) && in __ip6_append_data()
1493 rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) in __ip6_append_data()
1499 if (getfrag == ip_generic_getfrag && msg->msg_ubuf) { in __ip6_append_data()
1500 if (skb_zcopy(skb) && msg->msg_ubuf != skb_zcopy(skb)) in __ip6_append_data()
1501 return -EINVAL; in __ip6_append_data()
1506 if ((rt->dst.dev->features & NETIF_F_SG) && in __ip6_append_data()
1510 uarg = msg->msg_ubuf; in __ip6_append_data()
1515 return -ENOBUFS; in __ip6_append_data()
1517 if (rt->dst.dev->features & NETIF_F_SG && in __ip6_append_data()
1522 uarg_to_msgzc(uarg)->zerocopy = 0; in __ip6_append_data()
1528 return -EPERM; in __ip6_append_data()
1529 if (rt->dst.dev->features & NETIF_F_SG && in __ip6_append_data()
1537 hold_tskey = cork->tx_flags & SKBTX_ANY_TSTAMP && in __ip6_append_data()
1538 READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID; in __ip6_append_data()
1540 tskey = atomic_inc_return(&sk->sk_tskey) - 1; in __ip6_append_data()
1546 * fragment alignment (= 8-15 octects, in total). in __ip6_append_data()
1553 * at once if non-fragmentable extension headers in __ip6_append_data()
1555 * --yoshfuji in __ip6_append_data()
1558 cork->length += length; in __ip6_append_data()
1564 copy = (cork->length <= mtu ? mtu : maxfraglen) - skb->len; in __ip6_append_data()
1566 copy = maxfraglen - skb->len; in __ip6_append_data()
1578 fraggap = skb->len - maxfraglen; in __ip6_append_data()
1595 if (datalen > (cork->length <= mtu ? mtu : maxfraglen) - fragheaderlen) in __ip6_append_data()
1596 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len; in __ip6_append_data()
1602 alloc_extra += rt->dst.trailer_len; in __ip6_append_data()
1611 !(rt->dst.dev->features&NETIF_F_SG)) in __ip6_append_data()
1615 !(rt->dst.dev->features & NETIF_F_SG))) in __ip6_append_data()
1619 pagedlen = datalen - transhdrlen; in __ip6_append_data()
1628 datalen += rt->dst.trailer_len; in __ip6_append_data()
1633 copy = datalen - transhdrlen - fraggap - pagedlen; in __ip6_append_data()
1635 * because then the equation may reduces to -fraggap. in __ip6_append_data()
1638 err = -EINVAL; in __ip6_append_data()
1646 if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <= in __ip6_append_data()
1647 2 * sk->sk_sndbuf) in __ip6_append_data()
1649 sk->sk_allocation); in __ip6_append_data()
1651 err = -ENOBUFS; in __ip6_append_data()
1658 skb->protocol = htons(ETH_P_IPV6); in __ip6_append_data()
1659 skb->ip_summed = csummode; in __ip6_append_data()
1660 skb->csum = 0; in __ip6_append_data()
1668 data = skb_put(skb, fraglen - pagedlen); in __ip6_append_data()
1671 skb->transport_header = (skb->network_header + in __ip6_append_data()
1674 skb->csum = skb_copy_and_csum_bits( in __ip6_append_data()
1677 skb_prev->csum = csum_sub(skb_prev->csum, in __ip6_append_data()
1678 skb->csum); in __ip6_append_data()
1685 err = -EFAULT; in __ip6_append_data()
1693 length -= copy + transhdrlen; in __ip6_append_data()
1699 skb_shinfo(skb)->tx_flags = cork->tx_flags; in __ip6_append_data()
1700 cork->tx_flags = 0; in __ip6_append_data()
1701 skb_shinfo(skb)->tskey = tskey; in __ip6_append_data()
1711 if (!skb->destructor) { in __ip6_append_data()
1712 skb->destructor = sock_wfree; in __ip6_append_data()
1713 skb->sk = sk; in __ip6_append_data()
1714 wmem_alloc_delta += skb->truesize; in __ip6_append_data()
1723 if (!(rt->dst.dev->features&NETIF_F_SG) && in __ip6_append_data()
1727 off = skb->len; in __ip6_append_data()
1731 err = -EFAULT; in __ip6_append_data()
1737 err = -EIO; in __ip6_append_data()
1738 if (WARN_ON_ONCE(copy > msg->msg_iter.count)) in __ip6_append_data()
1741 err = skb_splice_from_iter(skb, &msg->msg_iter, copy, in __ip6_append_data()
1742 sk->sk_allocation); in __ip6_append_data()
1748 int i = skb_shinfo(skb)->nr_frags; in __ip6_append_data()
1750 err = -ENOMEM; in __ip6_append_data()
1755 if (!skb_can_coalesce(skb, i, pfrag->page, in __ip6_append_data()
1756 pfrag->offset)) { in __ip6_append_data()
1757 err = -EMSGSIZE; in __ip6_append_data()
1761 __skb_fill_page_desc(skb, i, pfrag->page, in __ip6_append_data()
1762 pfrag->offset, 0); in __ip6_append_data()
1763 skb_shinfo(skb)->nr_frags = ++i; in __ip6_append_data()
1764 get_page(pfrag->page); in __ip6_append_data()
1766 copy = min_t(int, copy, pfrag->size - pfrag->offset); in __ip6_append_data()
1768 page_address(pfrag->page) + pfrag->offset, in __ip6_append_data()
1769 offset, copy, skb->len, skb) < 0) in __ip6_append_data()
1772 pfrag->offset += copy; in __ip6_append_data()
1773 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); in __ip6_append_data()
1774 skb->len += copy; in __ip6_append_data()
1775 skb->data_len += copy; in __ip6_append_data()
1776 skb->truesize += copy; in __ip6_append_data()
1784 length -= copy; in __ip6_append_data()
1788 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc); in __ip6_append_data()
1792 err = -EFAULT; in __ip6_append_data()
1795 cork->length -= length; in __ip6_append_data()
1796 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); in __ip6_append_data()
1797 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc); in __ip6_append_data()
1799 atomic_dec(&sk->sk_tskey); in __ip6_append_data()
1817 if (skb_queue_empty(&sk->sk_write_queue)) { in ip6_append_data()
1821 dst_hold(&rt->dst); in ip6_append_data()
1822 err = ip6_setup_cork(sk, &inet->cork, &np->cork, in ip6_append_data()
1827 inet->cork.fl.u.ip6 = *fl6; in ip6_append_data()
1828 exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0); in ip6_append_data()
1835 return __ip6_append_data(sk, &sk->sk_write_queue, &inet->cork, in ip6_append_data()
1836 &np->cork, sk_page_frag(sk), getfrag, in ip6_append_data()
1843 struct dst_entry *dst = cork->base.dst; in ip6_cork_steal_dst()
1845 cork->base.dst = NULL; in ip6_cork_steal_dst()
1852 if (v6_cork->opt) { in ip6_cork_release()
1853 struct ipv6_txoptions *opt = v6_cork->opt; in ip6_cork_release()
1855 kfree(opt->dst0opt); in ip6_cork_release()
1856 kfree(opt->dst1opt); in ip6_cork_release()
1857 kfree(opt->hopopt); in ip6_cork_release()
1858 kfree(opt->srcrt); in ip6_cork_release()
1860 v6_cork->opt = NULL; in ip6_cork_release()
1863 if (cork->base.dst) { in ip6_cork_release()
1864 dst_release(cork->base.dst); in ip6_cork_release()
1865 cork->base.dst = NULL; in ip6_cork_release()
1879 struct ipv6_txoptions *opt = v6_cork->opt; in __ip6_make_skb()
1880 struct rt6_info *rt = (struct rt6_info *)cork->base.dst; in __ip6_make_skb()
1881 struct flowi6 *fl6 = &cork->fl.u.ip6; in __ip6_make_skb()
1882 unsigned char proto = fl6->flowi6_proto; in __ip6_make_skb()
1887 tail_skb = &(skb_shinfo(skb)->frag_list); in __ip6_make_skb()
1889 /* move skb->data to ip header from ext header */ in __ip6_make_skb()
1890 if (skb->data < skb_network_header(skb)) in __ip6_make_skb()
1895 tail_skb = &(tmp_skb->next); in __ip6_make_skb()
1896 skb->len += tmp_skb->len; in __ip6_make_skb()
1897 skb->data_len += tmp_skb->len; in __ip6_make_skb()
1898 skb->truesize += tmp_skb->truesize; in __ip6_make_skb()
1899 tmp_skb->destructor = NULL; in __ip6_make_skb()
1900 tmp_skb->sk = NULL; in __ip6_make_skb()
1904 skb->ignore_df = ip6_sk_ignore_df(sk); in __ip6_make_skb()
1907 final_dst = &fl6->daddr; in __ip6_make_skb()
1908 if (opt && opt->opt_flen) in __ip6_make_skb()
1910 if (opt && opt->opt_nflen) in __ip6_make_skb()
1911 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst, &fl6->saddr); in __ip6_make_skb()
1917 ip6_flow_hdr(hdr, v6_cork->tclass, in __ip6_make_skb()
1918 ip6_make_flowlabel(net, skb, fl6->flowlabel, in __ip6_make_skb()
1920 hdr->hop_limit = v6_cork->hop_limit; in __ip6_make_skb()
1921 hdr->nexthdr = proto; in __ip6_make_skb()
1922 hdr->saddr = fl6->saddr; in __ip6_make_skb()
1923 hdr->daddr = *final_dst; in __ip6_make_skb()
1925 skb->priority = READ_ONCE(sk->sk_priority); in __ip6_make_skb()
1926 skb->mark = cork->base.mark; in __ip6_make_skb()
1927 skb->tstamp = cork->base.transmit_time; in __ip6_make_skb()
1930 IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS); in __ip6_make_skb()
1935 if (sk->sk_socket->type == SOCK_RAW && in __ip6_make_skb()
1937 icmp6_type = fl6->fl6_icmp_type; in __ip6_make_skb()
1939 icmp6_type = icmp6_hdr(skb)->icmp6_type; in __ip6_make_skb()
1951 struct net *net = sock_net(skb->sk); in ip6_send_skb()
1955 err = ip6_local_out(net, skb->sk, skb); in ip6_send_skb()
1960 IP6_INC_STATS(net, rt->rt6i_idev, in ip6_send_skb()
1998 __ip6_flush_pending_frames(sk, &sk->sk_write_queue, in ip6_flush_pending_frames()
1999 &inet_sk(sk)->cork, &inet6_sk(sk)->cork); in ip6_flush_pending_frames()
2012 int exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0); in ip6_make_skb()
2016 dst_release(&rt->dst); in ip6_make_skb()
2022 cork->base.flags = 0; in ip6_make_skb()
2023 cork->base.addr = 0; in ip6_make_skb()
2024 cork->base.opt = NULL; in ip6_make_skb()
2031 if (ipc6->dontfrag < 0) in ip6_make_skb()
2032 ipc6->dontfrag = inet6_test_bit(DONTFRAG, sk); in ip6_make_skb()
2035 &current->task_frag, getfrag, from, in ip6_make_skb()