1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef TUN_VNET_H
3 #define TUN_VNET_H
4
5 /* High bits in flags field are unused. */
6 #define TUN_VNET_LE 0x80000000
7 #define TUN_VNET_BE 0x40000000
8
tun_vnet_legacy_is_little_endian(unsigned int flags)9 static inline bool tun_vnet_legacy_is_little_endian(unsigned int flags)
10 {
11 bool be = IS_ENABLED(CONFIG_TUN_VNET_CROSS_LE) &&
12 (flags & TUN_VNET_BE);
13
14 return !be && virtio_legacy_is_little_endian();
15 }
16
tun_get_vnet_be(unsigned int flags,int __user * argp)17 static inline long tun_get_vnet_be(unsigned int flags, int __user *argp)
18 {
19 int be = !!(flags & TUN_VNET_BE);
20
21 if (!IS_ENABLED(CONFIG_TUN_VNET_CROSS_LE))
22 return -EINVAL;
23
24 if (put_user(be, argp))
25 return -EFAULT;
26
27 return 0;
28 }
29
tun_set_vnet_be(unsigned int * flags,int __user * argp)30 static inline long tun_set_vnet_be(unsigned int *flags, int __user *argp)
31 {
32 int be;
33
34 if (!IS_ENABLED(CONFIG_TUN_VNET_CROSS_LE))
35 return -EINVAL;
36
37 if (get_user(be, argp))
38 return -EFAULT;
39
40 if (be)
41 *flags |= TUN_VNET_BE;
42 else
43 *flags &= ~TUN_VNET_BE;
44
45 return 0;
46 }
47
tun_vnet_is_little_endian(unsigned int flags)48 static inline bool tun_vnet_is_little_endian(unsigned int flags)
49 {
50 return flags & TUN_VNET_LE || tun_vnet_legacy_is_little_endian(flags);
51 }
52
tun_vnet16_to_cpu(unsigned int flags,__virtio16 val)53 static inline u16 tun_vnet16_to_cpu(unsigned int flags, __virtio16 val)
54 {
55 return __virtio16_to_cpu(tun_vnet_is_little_endian(flags), val);
56 }
57
cpu_to_tun_vnet16(unsigned int flags,u16 val)58 static inline __virtio16 cpu_to_tun_vnet16(unsigned int flags, u16 val)
59 {
60 return __cpu_to_virtio16(tun_vnet_is_little_endian(flags), val);
61 }
62
tun_vnet_ioctl(int * vnet_hdr_sz,unsigned int * flags,unsigned int cmd,int __user * sp)63 static inline long tun_vnet_ioctl(int *vnet_hdr_sz, unsigned int *flags,
64 unsigned int cmd, int __user *sp)
65 {
66 int s;
67
68 switch (cmd) {
69 case TUNGETVNETHDRSZ:
70 s = *vnet_hdr_sz;
71 if (put_user(s, sp))
72 return -EFAULT;
73 return 0;
74
75 case TUNSETVNETHDRSZ:
76 if (get_user(s, sp))
77 return -EFAULT;
78 if (s < (int)sizeof(struct virtio_net_hdr))
79 return -EINVAL;
80
81 *vnet_hdr_sz = s;
82 return 0;
83
84 case TUNGETVNETLE:
85 s = !!(*flags & TUN_VNET_LE);
86 if (put_user(s, sp))
87 return -EFAULT;
88 return 0;
89
90 case TUNSETVNETLE:
91 if (get_user(s, sp))
92 return -EFAULT;
93 if (s)
94 *flags |= TUN_VNET_LE;
95 else
96 *flags &= ~TUN_VNET_LE;
97 return 0;
98
99 case TUNGETVNETBE:
100 return tun_get_vnet_be(*flags, sp);
101
102 case TUNSETVNETBE:
103 return tun_set_vnet_be(flags, sp);
104
105 default:
106 return -EINVAL;
107 }
108 }
109
tun_vnet_hdr_get(int sz,unsigned int flags,struct iov_iter * from,struct virtio_net_hdr * hdr)110 static inline int tun_vnet_hdr_get(int sz, unsigned int flags,
111 struct iov_iter *from,
112 struct virtio_net_hdr *hdr)
113 {
114 u16 hdr_len;
115
116 if (iov_iter_count(from) < sz)
117 return -EINVAL;
118
119 if (!copy_from_iter_full(hdr, sizeof(*hdr), from))
120 return -EFAULT;
121
122 hdr_len = tun_vnet16_to_cpu(flags, hdr->hdr_len);
123
124 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
125 hdr_len = max(tun_vnet16_to_cpu(flags, hdr->csum_start) + tun_vnet16_to_cpu(flags, hdr->csum_offset) + 2, hdr_len);
126 hdr->hdr_len = cpu_to_tun_vnet16(flags, hdr_len);
127 }
128
129 if (hdr_len > iov_iter_count(from))
130 return -EINVAL;
131
132 iov_iter_advance(from, sz - sizeof(*hdr));
133
134 return hdr_len;
135 }
136
tun_vnet_hdr_put(int sz,struct iov_iter * iter,const struct virtio_net_hdr * hdr)137 static inline int tun_vnet_hdr_put(int sz, struct iov_iter *iter,
138 const struct virtio_net_hdr *hdr)
139 {
140 if (unlikely(iov_iter_count(iter) < sz))
141 return -EINVAL;
142
143 if (unlikely(copy_to_iter(hdr, sizeof(*hdr), iter) != sizeof(*hdr)))
144 return -EFAULT;
145
146 if (iov_iter_zero(sz - sizeof(*hdr), iter) != sz - sizeof(*hdr))
147 return -EFAULT;
148
149 return 0;
150 }
151
tun_vnet_hdr_to_skb(unsigned int flags,struct sk_buff * skb,const struct virtio_net_hdr * hdr)152 static inline int tun_vnet_hdr_to_skb(unsigned int flags, struct sk_buff *skb,
153 const struct virtio_net_hdr *hdr)
154 {
155 return virtio_net_hdr_to_skb(skb, hdr, tun_vnet_is_little_endian(flags));
156 }
157
tun_vnet_hdr_from_skb(unsigned int flags,const struct net_device * dev,const struct sk_buff * skb,struct virtio_net_hdr * hdr)158 static inline int tun_vnet_hdr_from_skb(unsigned int flags,
159 const struct net_device *dev,
160 const struct sk_buff *skb,
161 struct virtio_net_hdr *hdr)
162 {
163 int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
164
165 if (virtio_net_hdr_from_skb(skb, hdr,
166 tun_vnet_is_little_endian(flags), true,
167 vlan_hlen)) {
168 struct skb_shared_info *sinfo = skb_shinfo(skb);
169
170 if (net_ratelimit()) {
171 netdev_err(dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
172 sinfo->gso_type, tun_vnet16_to_cpu(flags, hdr->gso_size),
173 tun_vnet16_to_cpu(flags, hdr->hdr_len));
174 print_hex_dump(KERN_ERR, "tun: ",
175 DUMP_PREFIX_NONE,
176 16, 1, skb->head,
177 min(tun_vnet16_to_cpu(flags, hdr->hdr_len), 64), true);
178 }
179 WARN_ON_ONCE(1);
180 return -EINVAL;
181 }
182
183 return 0;
184 }
185
186 #endif /* TUN_VNET_H */
187