xref: /linux/include/linux/if_vlan.h (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * VLAN		An implementation of 802.1Q VLAN tagging.
4  *
5  * Authors:	Ben Greear <greearb@candelatech.com>
6  */
7 #ifndef _LINUX_IF_VLAN_H_
8 #define _LINUX_IF_VLAN_H_
9 
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/rtnetlink.h>
13 #include <linux/bug.h>
14 #include <uapi/linux/if_vlan.h>
15 
16 #define VLAN_HLEN	4		/* The additional bytes required by VLAN
17 					 * (in addition to the Ethernet header)
18 					 */
19 #define VLAN_ETH_HLEN	18		/* Total octets in header.	 */
20 #define VLAN_ETH_ZLEN	64		/* Min. octets in frame sans FCS */
21 
22 /*
23  * According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan
24  */
25 #define VLAN_ETH_DATA_LEN	1500	/* Max. octets in payload	 */
26 #define VLAN_ETH_FRAME_LEN	1518	/* Max. octets in frame sans FCS */
27 
28 #define VLAN_MAX_DEPTH	8		/* Max. number of nested VLAN tags parsed */
29 
30 /*
31  * 	struct vlan_hdr - vlan header
32  * 	@h_vlan_TCI: priority and VLAN ID
33  *	@h_vlan_encapsulated_proto: packet type ID or len
34  */
35 struct vlan_hdr {
36 	__be16	h_vlan_TCI;
37 	__be16	h_vlan_encapsulated_proto;
38 };
39 
40 /**
41  *	struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr)
42  *	@h_dest: destination ethernet address
43  *	@h_source: source ethernet address
44  *	@h_vlan_proto: ethernet protocol
45  *	@h_vlan_TCI: priority and VLAN ID
46  *	@h_vlan_encapsulated_proto: packet type ID or len
47  */
48 struct vlan_ethhdr {
49 	struct_group(addrs,
50 		unsigned char	h_dest[ETH_ALEN];
51 		unsigned char	h_source[ETH_ALEN];
52 	);
53 	__be16		h_vlan_proto;
54 	__be16		h_vlan_TCI;
55 	__be16		h_vlan_encapsulated_proto;
56 };
57 
58 #include <linux/skbuff.h>
59 
vlan_eth_hdr(const struct sk_buff * skb)60 static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
61 {
62 	return (struct vlan_ethhdr *)skb_mac_header(skb);
63 }
64 
65 /* Prefer this version in TX path, instead of
66  * skb_reset_mac_header() + vlan_eth_hdr()
67  */
skb_vlan_eth_hdr(const struct sk_buff * skb)68 static inline struct vlan_ethhdr *skb_vlan_eth_hdr(const struct sk_buff *skb)
69 {
70 	return (struct vlan_ethhdr *)skb->data;
71 }
72 
73 #define VLAN_PRIO_MASK		0xe000 /* Priority Code Point */
74 #define VLAN_PRIO_SHIFT		13
75 #define VLAN_CFI_MASK		0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */
76 #define VLAN_VID_MASK		0x0fff /* VLAN Identifier */
77 #define VLAN_N_VID		4096
78 
79 /* found in socket.c */
80 extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
81 
82 #define skb_vlan_tag_present(__skb)	(!!(__skb)->vlan_all)
83 #define skb_vlan_tag_get(__skb)		((__skb)->vlan_tci)
84 #define skb_vlan_tag_get_id(__skb)	((__skb)->vlan_tci & VLAN_VID_MASK)
85 #define skb_vlan_tag_get_cfi(__skb)	(!!((__skb)->vlan_tci & VLAN_CFI_MASK))
86 #define skb_vlan_tag_get_prio(__skb)	(((__skb)->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT)
87 
vlan_get_rx_ctag_filter_info(struct net_device * dev)88 static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev)
89 {
90 	ASSERT_RTNL();
91 	return notifier_to_errno(call_netdevice_notifiers(NETDEV_CVLAN_FILTER_PUSH_INFO, dev));
92 }
93 
vlan_drop_rx_ctag_filter_info(struct net_device * dev)94 static inline void vlan_drop_rx_ctag_filter_info(struct net_device *dev)
95 {
96 	ASSERT_RTNL();
97 	call_netdevice_notifiers(NETDEV_CVLAN_FILTER_DROP_INFO, dev);
98 }
99 
vlan_get_rx_stag_filter_info(struct net_device * dev)100 static inline int vlan_get_rx_stag_filter_info(struct net_device *dev)
101 {
102 	ASSERT_RTNL();
103 	return notifier_to_errno(call_netdevice_notifiers(NETDEV_SVLAN_FILTER_PUSH_INFO, dev));
104 }
105 
vlan_drop_rx_stag_filter_info(struct net_device * dev)106 static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev)
107 {
108 	ASSERT_RTNL();
109 	call_netdevice_notifiers(NETDEV_SVLAN_FILTER_DROP_INFO, dev);
110 }
111 
112 /**
113  *	struct vlan_pcpu_stats - VLAN percpu rx/tx stats
114  *	@rx_packets: number of received packets
115  *	@rx_bytes: number of received bytes
116  *	@rx_multicast: number of received multicast packets
117  *	@tx_packets: number of transmitted packets
118  *	@tx_bytes: number of transmitted bytes
119  *	@syncp: synchronization point for 64bit counters
120  *	@rx_errors: number of rx errors
121  *	@tx_dropped: number of tx drops
122  */
123 struct vlan_pcpu_stats {
124 	u64_stats_t		rx_packets;
125 	u64_stats_t		rx_bytes;
126 	u64_stats_t		rx_multicast;
127 	u64_stats_t		tx_packets;
128 	u64_stats_t		tx_bytes;
129 	struct u64_stats_sync	syncp;
130 	u32			rx_errors;
131 	u32			tx_dropped;
132 };
133 
134 #if IS_ENABLED(CONFIG_VLAN_8021Q)
135 
136 extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
137 					       __be16 vlan_proto, u16 vlan_id);
138 extern int vlan_for_each(struct net_device *dev,
139 			 int (*action)(struct net_device *dev, int vid,
140 				       void *arg), void *arg);
141 extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
142 extern u16 vlan_dev_vlan_id(const struct net_device *dev);
143 extern __be16 vlan_dev_vlan_proto(const struct net_device *dev);
144 
145 /**
146  *	struct vlan_priority_tci_mapping - vlan egress priority mappings
147  *	@priority: skb priority
148  *	@vlan_qos: vlan priority: (skb->priority << 13) & 0xE000
149  *	@next: pointer to next struct
150  */
151 struct vlan_priority_tci_mapping {
152 	u32					priority;
153 	u16					vlan_qos;
154 	struct vlan_priority_tci_mapping	*next;
155 };
156 
157 struct proc_dir_entry;
158 struct netpoll;
159 
160 /**
161  *	struct vlan_dev_priv - VLAN private device data
162  *	@nr_ingress_mappings: number of ingress priority mappings
163  *	@ingress_priority_map: ingress priority mappings
164  *	@nr_egress_mappings: number of egress priority mappings
165  *	@egress_priority_map: hash of egress priority mappings
166  *	@vlan_proto: VLAN encapsulation protocol
167  *	@vlan_id: VLAN identifier
168  *	@flags: device flags
169  *	@real_dev: underlying netdevice
170  *	@dev_tracker: refcount tracker for @real_dev reference
171  *	@real_dev_addr: address of underlying netdevice
172  *	@dent: proc dir entry
173  *	@vlan_pcpu_stats: ptr to percpu rx stats
174  *	@netpoll: netpoll instance "propagated" down to @real_dev
175  */
176 struct vlan_dev_priv {
177 	unsigned int				nr_ingress_mappings;
178 	u32					ingress_priority_map[8];
179 	unsigned int				nr_egress_mappings;
180 	struct vlan_priority_tci_mapping	*egress_priority_map[16];
181 
182 	__be16					vlan_proto;
183 	u16					vlan_id;
184 	u16					flags;
185 
186 	struct net_device			*real_dev;
187 	netdevice_tracker			dev_tracker;
188 
189 	unsigned char				real_dev_addr[ETH_ALEN];
190 
191 	struct proc_dir_entry			*dent;
192 	struct vlan_pcpu_stats __percpu		*vlan_pcpu_stats;
193 #ifdef CONFIG_NET_POLL_CONTROLLER
194 	struct netpoll				*netpoll;
195 #endif
196 };
197 
is_vlan_dev(const struct net_device * dev)198 static inline bool is_vlan_dev(const struct net_device *dev)
199 {
200 	return dev->priv_flags & IFF_802_1Q_VLAN;
201 }
202 
vlan_dev_priv(const struct net_device * dev)203 static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
204 {
205 	return netdev_priv(dev);
206 }
207 
208 static inline u16
vlan_dev_get_egress_qos_mask(struct net_device * dev,u32 skprio)209 vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio)
210 {
211 	struct vlan_priority_tci_mapping *mp;
212 
213 	smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
214 
215 	mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)];
216 	while (mp) {
217 		if (mp->priority == skprio) {
218 			return mp->vlan_qos; /* This should already be shifted
219 					      * to mask correctly with the
220 					      * VLAN's TCI */
221 		}
222 		mp = mp->next;
223 	}
224 	return 0;
225 }
226 
227 extern bool vlan_do_receive(struct sk_buff **skb);
228 
229 extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid);
230 extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid);
231 
232 extern int vlan_vids_add_by_dev(struct net_device *dev,
233 				const struct net_device *by_dev);
234 extern void vlan_vids_del_by_dev(struct net_device *dev,
235 				 const struct net_device *by_dev);
236 
237 extern bool vlan_uses_dev(const struct net_device *dev);
238 
239 #else
is_vlan_dev(const struct net_device * dev)240 static inline bool is_vlan_dev(const struct net_device *dev)
241 {
242 	return false;
243 }
244 
245 static inline struct net_device *
__vlan_find_dev_deep_rcu(struct net_device * real_dev,__be16 vlan_proto,u16 vlan_id)246 __vlan_find_dev_deep_rcu(struct net_device *real_dev,
247 		     __be16 vlan_proto, u16 vlan_id)
248 {
249 	return NULL;
250 }
251 
252 static inline int
vlan_for_each(struct net_device * dev,int (* action)(struct net_device * dev,int vid,void * arg),void * arg)253 vlan_for_each(struct net_device *dev,
254 	      int (*action)(struct net_device *dev, int vid, void *arg),
255 	      void *arg)
256 {
257 	return 0;
258 }
259 
vlan_dev_real_dev(const struct net_device * dev)260 static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
261 {
262 	WARN_ON_ONCE(1);
263 	return NULL;
264 }
265 
vlan_dev_vlan_id(const struct net_device * dev)266 static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
267 {
268 	WARN_ON_ONCE(1);
269 	return 0;
270 }
271 
vlan_dev_vlan_proto(const struct net_device * dev)272 static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev)
273 {
274 	WARN_ON_ONCE(1);
275 	return 0;
276 }
277 
vlan_dev_get_egress_qos_mask(struct net_device * dev,u32 skprio)278 static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev,
279 					       u32 skprio)
280 {
281 	return 0;
282 }
283 
vlan_do_receive(struct sk_buff ** skb)284 static inline bool vlan_do_receive(struct sk_buff **skb)
285 {
286 	return false;
287 }
288 
vlan_vid_add(struct net_device * dev,__be16 proto,u16 vid)289 static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
290 {
291 	return 0;
292 }
293 
vlan_vid_del(struct net_device * dev,__be16 proto,u16 vid)294 static inline void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
295 {
296 }
297 
vlan_vids_add_by_dev(struct net_device * dev,const struct net_device * by_dev)298 static inline int vlan_vids_add_by_dev(struct net_device *dev,
299 				       const struct net_device *by_dev)
300 {
301 	return 0;
302 }
303 
vlan_vids_del_by_dev(struct net_device * dev,const struct net_device * by_dev)304 static inline void vlan_vids_del_by_dev(struct net_device *dev,
305 					const struct net_device *by_dev)
306 {
307 }
308 
vlan_uses_dev(const struct net_device * dev)309 static inline bool vlan_uses_dev(const struct net_device *dev)
310 {
311 	return false;
312 }
313 #endif
314 
315 /**
316  * eth_type_vlan - check for valid vlan ether type.
317  * @ethertype: ether type to check
318  *
319  * Returns: true if the ether type is a vlan ether type.
320  */
eth_type_vlan(__be16 ethertype)321 static inline bool eth_type_vlan(__be16 ethertype)
322 {
323 	switch (ethertype) {
324 	case htons(ETH_P_8021Q):
325 	case htons(ETH_P_8021AD):
326 		return true;
327 	default:
328 		return false;
329 	}
330 }
331 
vlan_hw_offload_capable(netdev_features_t features,__be16 proto)332 static inline bool vlan_hw_offload_capable(netdev_features_t features,
333 					   __be16 proto)
334 {
335 	if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX)
336 		return true;
337 	if (proto == htons(ETH_P_8021AD) && features & NETIF_F_HW_VLAN_STAG_TX)
338 		return true;
339 	return false;
340 }
341 
342 /**
343  * __vlan_insert_inner_tag - inner VLAN tag inserting
344  * @skb: skbuff to tag
345  * @vlan_proto: VLAN encapsulation protocol
346  * @vlan_tci: VLAN TCI to insert
347  * @mac_len: MAC header length including outer vlan headers
348  *
349  * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
350  * Does not change skb->protocol so this function can be used during receive.
351  *
352  * Returns: error if skb_cow_head fails.
353  */
__vlan_insert_inner_tag(struct sk_buff * skb,__be16 vlan_proto,u16 vlan_tci,unsigned int mac_len)354 static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
355 					  __be16 vlan_proto, u16 vlan_tci,
356 					  unsigned int mac_len)
357 {
358 	struct vlan_ethhdr *veth;
359 
360 	if (skb_cow_head(skb, VLAN_HLEN) < 0)
361 		return -ENOMEM;
362 
363 	skb_push(skb, VLAN_HLEN);
364 
365 	/* Move the mac header sans proto to the beginning of the new header. */
366 	if (likely(mac_len > ETH_TLEN))
367 		memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN);
368 	if (skb_mac_header_was_set(skb))
369 		skb->mac_header -= VLAN_HLEN;
370 
371 	veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN);
372 
373 	/* first, the ethernet type */
374 	if (likely(mac_len >= ETH_TLEN)) {
375 		/* h_vlan_encapsulated_proto should already be populated, and
376 		 * skb->data has space for h_vlan_proto
377 		 */
378 		veth->h_vlan_proto = vlan_proto;
379 	} else {
380 		/* h_vlan_encapsulated_proto should not be populated, and
381 		 * skb->data has no space for h_vlan_proto
382 		 */
383 		veth->h_vlan_encapsulated_proto = skb->protocol;
384 	}
385 
386 	/* now, the TCI */
387 	veth->h_vlan_TCI = htons(vlan_tci);
388 
389 	return 0;
390 }
391 
392 /**
393  * __vlan_insert_tag - regular VLAN tag inserting
394  * @skb: skbuff to tag
395  * @vlan_proto: VLAN encapsulation protocol
396  * @vlan_tci: VLAN TCI to insert
397  *
398  * Inserts the VLAN tag into @skb as part of the payload
399  * Does not change skb->protocol so this function can be used during receive.
400  *
401  * Returns: error if skb_cow_head fails.
402  */
__vlan_insert_tag(struct sk_buff * skb,__be16 vlan_proto,u16 vlan_tci)403 static inline int __vlan_insert_tag(struct sk_buff *skb,
404 				    __be16 vlan_proto, u16 vlan_tci)
405 {
406 	return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
407 }
408 
409 /**
410  * vlan_insert_inner_tag - inner VLAN tag inserting
411  * @skb: skbuff to tag
412  * @vlan_proto: VLAN encapsulation protocol
413  * @vlan_tci: VLAN TCI to insert
414  * @mac_len: MAC header length including outer vlan headers
415  *
416  * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
417  * Returns a VLAN tagged skb. This might change skb->head.
418  *
419  * Following the skb_unshare() example, in case of error, the calling function
420  * doesn't have to worry about freeing the original skb.
421  *
422  * Does not change skb->protocol so this function can be used during receive.
423  *
424  * Return: modified @skb on success, NULL on error (@skb is freed).
425  */
vlan_insert_inner_tag(struct sk_buff * skb,__be16 vlan_proto,u16 vlan_tci,unsigned int mac_len)426 static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
427 						    __be16 vlan_proto,
428 						    u16 vlan_tci,
429 						    unsigned int mac_len)
430 {
431 	int err;
432 
433 	err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len);
434 	if (err) {
435 		dev_kfree_skb_any(skb);
436 		return NULL;
437 	}
438 	return skb;
439 }
440 
441 /**
442  * vlan_insert_tag - regular VLAN tag inserting
443  * @skb: skbuff to tag
444  * @vlan_proto: VLAN encapsulation protocol
445  * @vlan_tci: VLAN TCI to insert
446  *
447  * Inserts the VLAN tag into @skb as part of the payload
448  * Returns a VLAN tagged skb. This might change skb->head.
449  *
450  * Following the skb_unshare() example, in case of error, the calling function
451  * doesn't have to worry about freeing the original skb.
452  *
453  * Does not change skb->protocol so this function can be used during receive.
454  *
455  * Return: modified @skb on success, NULL on error (@skb is freed).
456  */
vlan_insert_tag(struct sk_buff * skb,__be16 vlan_proto,u16 vlan_tci)457 static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
458 					      __be16 vlan_proto, u16 vlan_tci)
459 {
460 	return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
461 }
462 
463 /**
464  * vlan_insert_tag_set_proto - regular VLAN tag inserting
465  * @skb: skbuff to tag
466  * @vlan_proto: VLAN encapsulation protocol
467  * @vlan_tci: VLAN TCI to insert
468  *
469  * Inserts the VLAN tag into @skb as part of the payload
470  * Returns a VLAN tagged skb. This might change skb->head.
471  *
472  * Following the skb_unshare() example, in case of error, the calling function
473  * doesn't have to worry about freeing the original skb.
474  *
475  * Return: modified @skb on success, NULL on error (@skb is freed).
476  */
vlan_insert_tag_set_proto(struct sk_buff * skb,__be16 vlan_proto,u16 vlan_tci)477 static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
478 							__be16 vlan_proto,
479 							u16 vlan_tci)
480 {
481 	skb = vlan_insert_tag(skb, vlan_proto, vlan_tci);
482 	if (skb)
483 		skb->protocol = vlan_proto;
484 	return skb;
485 }
486 
487 /**
488  * __vlan_hwaccel_clear_tag - clear hardware accelerated VLAN info
489  * @skb: skbuff to clear
490  *
491  * Clears the VLAN information from @skb
492  */
__vlan_hwaccel_clear_tag(struct sk_buff * skb)493 static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb)
494 {
495 	skb->vlan_all = 0;
496 }
497 
498 /**
499  * __vlan_hwaccel_copy_tag - copy hardware accelerated VLAN info from another skb
500  * @dst: skbuff to copy to
501  * @src: skbuff to copy from
502  *
503  * Copies VLAN information from @src to @dst (for branchless code)
504  */
__vlan_hwaccel_copy_tag(struct sk_buff * dst,const struct sk_buff * src)505 static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src)
506 {
507 	dst->vlan_all = src->vlan_all;
508 }
509 
510 /*
511  * __vlan_hwaccel_push_inside - pushes vlan tag to the payload
512  * @skb: skbuff to tag
513  *
514  * Pushes the VLAN tag from @skb->vlan_tci inside to the payload.
515  *
516  * Following the skb_unshare() example, in case of error, the calling function
517  * doesn't have to worry about freeing the original skb.
518  */
__vlan_hwaccel_push_inside(struct sk_buff * skb)519 static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
520 {
521 	skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
522 					skb_vlan_tag_get(skb));
523 	if (likely(skb))
524 		__vlan_hwaccel_clear_tag(skb);
525 	return skb;
526 }
527 
528 /**
529  * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
530  * @skb: skbuff to tag
531  * @vlan_proto: VLAN encapsulation protocol
532  * @vlan_tci: VLAN TCI to insert
533  *
534  * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
535  */
__vlan_hwaccel_put_tag(struct sk_buff * skb,__be16 vlan_proto,u16 vlan_tci)536 static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
537 					  __be16 vlan_proto, u16 vlan_tci)
538 {
539 	skb->vlan_proto = vlan_proto;
540 	skb->vlan_tci = vlan_tci;
541 }
542 
543 /**
544  * __vlan_get_tag - get the VLAN ID that is part of the payload
545  * @skb: skbuff to query
546  * @vlan_tci: buffer to store value
547  *
548  * Returns: error if the skb is not of VLAN type
549  */
__vlan_get_tag(const struct sk_buff * skb,u16 * vlan_tci)550 static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
551 {
552 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
553 
554 	if (!eth_type_vlan(veth->h_vlan_proto))
555 		return -ENODATA;
556 
557 	*vlan_tci = ntohs(veth->h_vlan_TCI);
558 	return 0;
559 }
560 
561 /**
562  * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
563  * @skb: skbuff to query
564  * @vlan_tci: buffer to store value
565  *
566  * Returns: error if @skb->vlan_tci is not set correctly
567  */
__vlan_hwaccel_get_tag(const struct sk_buff * skb,u16 * vlan_tci)568 static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
569 					 u16 *vlan_tci)
570 {
571 	if (skb_vlan_tag_present(skb)) {
572 		*vlan_tci = skb_vlan_tag_get(skb);
573 		return 0;
574 	} else {
575 		*vlan_tci = 0;
576 		return -ENODATA;
577 	}
578 }
579 
580 /**
581  * vlan_get_tag - get the VLAN ID from the skb
582  * @skb: skbuff to query
583  * @vlan_tci: buffer to store value
584  *
585  * Returns: error if the skb is not VLAN tagged
586  */
vlan_get_tag(const struct sk_buff * skb,u16 * vlan_tci)587 static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
588 {
589 	if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
590 		return __vlan_hwaccel_get_tag(skb, vlan_tci);
591 	} else {
592 		return __vlan_get_tag(skb, vlan_tci);
593 	}
594 }
595 
596 /**
597  * __vlan_get_protocol_offset() - get protocol EtherType.
598  * @skb: skbuff to query
599  * @type: first vlan protocol
600  * @mac_offset: MAC offset
601  * @depth: buffer to store length of eth and vlan tags in bytes
602  *
603  * Returns: the EtherType of the packet, regardless of whether it is
604  * vlan encapsulated (normal or hardware accelerated) or not.
605  */
__vlan_get_protocol_offset(const struct sk_buff * skb,__be16 type,int mac_offset,int * depth)606 static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb,
607 						__be16 type,
608 						int mac_offset,
609 						int *depth)
610 {
611 	unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
612 
613 	/* if type is 802.1Q/AD then the header should already be
614 	 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
615 	 * ETH_HLEN otherwise
616 	 */
617 	if (eth_type_vlan(type)) {
618 		if (vlan_depth) {
619 			if (WARN_ON(vlan_depth < VLAN_HLEN))
620 				return 0;
621 			vlan_depth -= VLAN_HLEN;
622 		} else {
623 			vlan_depth = ETH_HLEN;
624 		}
625 		do {
626 			struct vlan_hdr vhdr, *vh;
627 
628 			vh = skb_header_pointer(skb, mac_offset + vlan_depth,
629 						sizeof(vhdr), &vhdr);
630 			if (unlikely(!vh || !--parse_depth))
631 				return 0;
632 
633 			type = vh->h_vlan_encapsulated_proto;
634 			vlan_depth += VLAN_HLEN;
635 		} while (eth_type_vlan(type));
636 	}
637 
638 	if (depth)
639 		*depth = vlan_depth;
640 
641 	return type;
642 }
643 
__vlan_get_protocol(const struct sk_buff * skb,__be16 type,int * depth)644 static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
645 					 int *depth)
646 {
647 	return __vlan_get_protocol_offset(skb, type, 0, depth);
648 }
649 
650 /**
651  * vlan_get_protocol - get protocol EtherType.
652  * @skb: skbuff to query
653  *
654  * Returns: the EtherType of the packet, regardless of whether it is
655  * vlan encapsulated (normal or hardware accelerated) or not.
656  */
vlan_get_protocol(const struct sk_buff * skb)657 static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
658 {
659 	return __vlan_get_protocol(skb, skb->protocol, NULL);
660 }
661 
662 /* This version of __vlan_get_protocol() also pulls mac header in skb->head */
vlan_get_protocol_and_depth(struct sk_buff * skb,__be16 type,int * depth)663 static inline __be16 vlan_get_protocol_and_depth(struct sk_buff *skb,
664 						 __be16 type, int *depth)
665 {
666 	int maclen;
667 
668 	type = __vlan_get_protocol(skb, type, &maclen);
669 
670 	if (type) {
671 		if (!pskb_may_pull(skb, maclen))
672 			type = 0;
673 		else if (depth)
674 			*depth = maclen;
675 	}
676 	return type;
677 }
678 
679 /* A getter for the SKB protocol field which will handle VLAN tags consistently
680  * whether VLAN acceleration is enabled or not.
681  */
skb_protocol(const struct sk_buff * skb,bool skip_vlan)682 static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan)
683 {
684 	if (!skip_vlan)
685 		/* VLAN acceleration strips the VLAN header from the skb and
686 		 * moves it to skb->vlan_proto
687 		 */
688 		return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol;
689 
690 	return vlan_get_protocol(skb);
691 }
692 
vlan_set_encap_proto(struct sk_buff * skb,struct vlan_hdr * vhdr)693 static inline void vlan_set_encap_proto(struct sk_buff *skb,
694 					struct vlan_hdr *vhdr)
695 {
696 	__be16 proto;
697 	unsigned short *rawp;
698 
699 	/*
700 	 * Was a VLAN packet, grab the encapsulated protocol, which the layer
701 	 * three protocols care about.
702 	 */
703 
704 	proto = vhdr->h_vlan_encapsulated_proto;
705 	if (eth_proto_is_802_3(proto)) {
706 		skb->protocol = proto;
707 		return;
708 	}
709 
710 	rawp = (unsigned short *)(vhdr + 1);
711 	if (*rawp == 0xFFFF)
712 		/*
713 		 * This is a magic hack to spot IPX packets. Older Novell
714 		 * breaks the protocol design and runs IPX over 802.3 without
715 		 * an 802.2 LLC layer. We look for FFFF which isn't a used
716 		 * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
717 		 * but does for the rest.
718 		 */
719 		skb->protocol = htons(ETH_P_802_3);
720 	else
721 		/*
722 		 * Real 802.2 LLC
723 		 */
724 		skb->protocol = htons(ETH_P_802_2);
725 }
726 
727 /**
728  * vlan_remove_tag - remove outer VLAN tag from payload
729  * @skb: skbuff to remove tag from
730  * @vlan_tci: buffer to store value
731  *
732  * Expects the skb to contain a VLAN tag in the payload, and to have skb->data
733  * pointing at the MAC header.
734  *
735  * Returns: a new pointer to skb->data, or NULL on failure to pull.
736  */
vlan_remove_tag(struct sk_buff * skb,u16 * vlan_tci)737 static inline void *vlan_remove_tag(struct sk_buff *skb, u16 *vlan_tci)
738 {
739 	struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
740 
741 	*vlan_tci = ntohs(vhdr->h_vlan_TCI);
742 
743 	memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
744 	vlan_set_encap_proto(skb, vhdr);
745 	return __skb_pull(skb, VLAN_HLEN);
746 }
747 
748 /**
749  * skb_vlan_tagged - check if skb is vlan tagged.
750  * @skb: skbuff to query
751  *
752  * Returns: true if the skb is tagged, regardless of whether it is hardware
753  * accelerated or not.
754  */
skb_vlan_tagged(const struct sk_buff * skb)755 static inline bool skb_vlan_tagged(const struct sk_buff *skb)
756 {
757 	if (!skb_vlan_tag_present(skb) &&
758 	    likely(!eth_type_vlan(skb->protocol)))
759 		return false;
760 
761 	return true;
762 }
763 
764 /**
765  * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers.
766  * @skb: skbuff to query
767  *
768  * Returns: true if the skb is tagged with multiple vlan headers, regardless
769  * of whether it is hardware accelerated or not.
770  */
skb_vlan_tagged_multi(struct sk_buff * skb)771 static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
772 {
773 	__be16 protocol = skb->protocol;
774 
775 	if (!skb_vlan_tag_present(skb)) {
776 		struct vlan_ethhdr *veh;
777 
778 		if (likely(!eth_type_vlan(protocol)))
779 			return false;
780 
781 		if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
782 			return false;
783 
784 		veh = skb_vlan_eth_hdr(skb);
785 		protocol = veh->h_vlan_encapsulated_proto;
786 	}
787 
788 	if (!eth_type_vlan(protocol))
789 		return false;
790 
791 	return true;
792 }
793 
794 /**
795  * vlan_features_check - drop unsafe features for skb with multiple tags.
796  * @skb: skbuff to query
797  * @features: features to be checked
798  *
799  * Returns: features without unsafe ones if the skb has multiple tags.
800  */
vlan_features_check(struct sk_buff * skb,netdev_features_t features)801 static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
802 						    netdev_features_t features)
803 {
804 	if (skb_vlan_tagged_multi(skb)) {
805 		/* In the case of multi-tagged packets, use a direct mask
806 		 * instead of using netdev_interesect_features(), to make
807 		 * sure that only devices supporting NETIF_F_HW_CSUM will
808 		 * have checksum offloading support.
809 		 */
810 		features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
811 			    NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX |
812 			    NETIF_F_HW_VLAN_STAG_TX;
813 	}
814 
815 	return features;
816 }
817 
818 /**
819  * compare_vlan_header - Compare two vlan headers
820  * @h1: Pointer to vlan header
821  * @h2: Pointer to vlan header
822  *
823  * Compare two vlan headers.
824  *
825  * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits.
826  *
827  * Return: 0 if equal, arbitrary non-zero value if not equal.
828  */
compare_vlan_header(const struct vlan_hdr * h1,const struct vlan_hdr * h2)829 static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1,
830 						const struct vlan_hdr *h2)
831 {
832 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
833 	return *(u32 *)h1 ^ *(u32 *)h2;
834 #else
835 	return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) |
836 	       ((__force u32)h1->h_vlan_encapsulated_proto ^
837 		(__force u32)h2->h_vlan_encapsulated_proto);
838 #endif
839 }
840 #endif /* !(_LINUX_IF_VLAN_H_) */
841