1 /*
2  * NET3:	Token ring device handling subroutines
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Fixes:       3 Feb 97 Paul Norton <pnorton@cts.com> Minor routing fixes.
10  *              Added rif table to /proc/net/tr_rif and rif timeout to
11  *              /proc/sys/net/token-ring/rif_timeout.
12  *              22 Jun 98 Paul Norton <p.norton@computer.org> Rearranged
13  *              tr_header and tr_type_trans to handle passing IPX SNAP and
14  *              802.2 through the correct layers. Eliminated tr_reformat.
15  *
16  */
17 
18 #include <asm/uaccess.h>
19 #include <asm/system.h>
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/jiffies.h>
24 #include <linux/string.h>
25 #include <linux/mm.h>
26 #include <linux/socket.h>
27 #include <linux/in.h>
28 #include <linux/inet.h>
29 #include <linux/netdevice.h>
30 #include <linux/trdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/errno.h>
33 #include <linux/timer.h>
34 #include <linux/net.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/init.h>
38 #include <linux/sysctl.h>
39 #include <linux/slab.h>
40 #include <net/arp.h>
41 #include <net/net_namespace.h>
42 
43 static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev);
44 static void rif_check_expire(unsigned long dummy);
45 
46 #define TR_SR_DEBUG 0
47 
48 /*
49  *	Each RIF entry we learn is kept this way
50  */
51 
52 struct rif_cache {
53 	unsigned char addr[TR_ALEN];
54 	int iface;
55 	__be16 rcf;
56 	__be16 rseg[8];
57 	struct rif_cache *next;
58 	unsigned long last_used;
59 	unsigned char local_ring;
60 };
61 
62 #define RIF_TABLE_SIZE 32
63 
64 /*
65  *	We hash the RIF cache 32 ways. We do after all have to look it
66  *	up a lot.
67  */
68 
69 static struct rif_cache *rif_table[RIF_TABLE_SIZE];
70 
71 static DEFINE_SPINLOCK(rif_lock);
72 
73 
74 /*
75  *	Garbage disposal timer.
76  */
77 
78 static struct timer_list rif_timer;
79 
80 static int sysctl_tr_rif_timeout = 60*10*HZ;
81 
rif_hash(const unsigned char * addr)82 static inline unsigned long rif_hash(const unsigned char *addr)
83 {
84 	unsigned long x;
85 
86 	x = addr[0];
87 	x = (x << 2) ^ addr[1];
88 	x = (x << 2) ^ addr[2];
89 	x = (x << 2) ^ addr[3];
90 	x = (x << 2) ^ addr[4];
91 	x = (x << 2) ^ addr[5];
92 
93 	x ^= x >> 8;
94 
95 	return x & (RIF_TABLE_SIZE - 1);
96 }
97 
98 /*
99  *	Put the headers on a token ring packet. Token ring source routing
100  *	makes this a little more exciting than on ethernet.
101  */
102 
tr_header(struct sk_buff * skb,struct net_device * dev,unsigned short type,const void * daddr,const void * saddr,unsigned len)103 static int tr_header(struct sk_buff *skb, struct net_device *dev,
104 		     unsigned short type,
105 		     const void *daddr, const void *saddr, unsigned len)
106 {
107 	struct trh_hdr *trh;
108 	int hdr_len;
109 
110 	/*
111 	 * Add the 802.2 SNAP header if IP as the IPv4/IPv6 code calls
112 	 * dev->hard_header directly.
113 	 */
114 	if (type == ETH_P_IP || type == ETH_P_IPV6 || type == ETH_P_ARP)
115 	{
116 		struct trllc *trllc;
117 
118 		hdr_len = sizeof(struct trh_hdr) + sizeof(struct trllc);
119 		trh = (struct trh_hdr *)skb_push(skb, hdr_len);
120 		trllc = (struct trllc *)(trh+1);
121 		trllc->dsap = trllc->ssap = EXTENDED_SAP;
122 		trllc->llc = UI_CMD;
123 		trllc->protid[0] = trllc->protid[1] = trllc->protid[2] = 0x00;
124 		trllc->ethertype = htons(type);
125 	}
126 	else
127 	{
128 		hdr_len = sizeof(struct trh_hdr);
129 		trh = (struct trh_hdr *)skb_push(skb, hdr_len);
130 	}
131 
132 	trh->ac=AC;
133 	trh->fc=LLC_FRAME;
134 
135 	if(saddr)
136 		memcpy(trh->saddr,saddr,dev->addr_len);
137 	else
138 		memcpy(trh->saddr,dev->dev_addr,dev->addr_len);
139 
140 	/*
141 	 *	Build the destination and then source route the frame
142 	 */
143 
144 	if(daddr)
145 	{
146 		memcpy(trh->daddr,daddr,dev->addr_len);
147 		tr_source_route(skb, trh, dev);
148 		return hdr_len;
149 	}
150 
151 	return -hdr_len;
152 }
153 
154 /*
155  *	A neighbour discovery of some species (eg arp) has completed. We
156  *	can now send the packet.
157  */
158 
tr_rebuild_header(struct sk_buff * skb)159 static int tr_rebuild_header(struct sk_buff *skb)
160 {
161 	struct trh_hdr *trh=(struct trh_hdr *)skb->data;
162 	struct trllc *trllc=(struct trllc *)(skb->data+sizeof(struct trh_hdr));
163 	struct net_device *dev = skb->dev;
164 
165 	/*
166 	 *	FIXME: We don't yet support IPv6 over token rings
167 	 */
168 
169 	if(trllc->ethertype != htons(ETH_P_IP)) {
170 		printk("tr_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(trllc->ethertype));
171 		return 0;
172 	}
173 
174 #ifdef CONFIG_INET
175 	if(arp_find(trh->daddr, skb)) {
176 			return 1;
177 	}
178 	else
179 #endif
180 	{
181 		tr_source_route(skb,trh,dev);
182 		return 0;
183 	}
184 }
185 
186 /*
187  *	Some of this is a bit hackish. We intercept RIF information
188  *	used for source routing. We also grab IP directly and don't feed
189  *	it via SNAP.
190  */
191 
tr_type_trans(struct sk_buff * skb,struct net_device * dev)192 __be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev)
193 {
194 
195 	struct trh_hdr *trh;
196 	struct trllc *trllc;
197 	unsigned riflen=0;
198 
199 	skb->dev = dev;
200 	skb_reset_mac_header(skb);
201 	trh = tr_hdr(skb);
202 
203 	if(trh->saddr[0] & TR_RII)
204 		riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8;
205 
206 	trllc = (struct trllc *)(skb->data+sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
207 
208 	skb_pull(skb,sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
209 
210 	if(*trh->daddr & 0x80)
211 	{
212 		if(!memcmp(trh->daddr,dev->broadcast,TR_ALEN))
213 			skb->pkt_type=PACKET_BROADCAST;
214 		else
215 			skb->pkt_type=PACKET_MULTICAST;
216 	}
217 	else if ( (trh->daddr[0] & 0x01) && (trh->daddr[1] & 0x00) && (trh->daddr[2] & 0x5E))
218 	{
219 		skb->pkt_type=PACKET_MULTICAST;
220 	}
221 	else if(dev->flags & IFF_PROMISC)
222 	{
223 		if(memcmp(trh->daddr, dev->dev_addr, TR_ALEN))
224 			skb->pkt_type=PACKET_OTHERHOST;
225 	}
226 
227 	if ((skb->pkt_type != PACKET_BROADCAST) &&
228 	    (skb->pkt_type != PACKET_MULTICAST))
229 		tr_add_rif_info(trh,dev) ;
230 
231 	/*
232 	 * Strip the SNAP header from ARP packets since we don't
233 	 * pass them through to the 802.2/SNAP layers.
234 	 */
235 
236 	if (trllc->dsap == EXTENDED_SAP &&
237 	    (trllc->ethertype == htons(ETH_P_IP) ||
238 	     trllc->ethertype == htons(ETH_P_IPV6) ||
239 	     trllc->ethertype == htons(ETH_P_ARP)))
240 	{
241 		skb_pull(skb, sizeof(struct trllc));
242 		return trllc->ethertype;
243 	}
244 
245 	return htons(ETH_P_TR_802_2);
246 }
247 
248 /*
249  *	We try to do source routing...
250  */
251 
tr_source_route(struct sk_buff * skb,struct trh_hdr * trh,struct net_device * dev)252 void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,
253 		     struct net_device *dev)
254 {
255 	int slack;
256 	unsigned int hash;
257 	struct rif_cache *entry;
258 	unsigned char *olddata;
259 	unsigned long flags;
260 	static const unsigned char mcast_func_addr[]
261 		= {0xC0,0x00,0x00,0x04,0x00,0x00};
262 
263 	spin_lock_irqsave(&rif_lock, flags);
264 
265 	/*
266 	 *	Broadcasts are single route as stated in RFC 1042
267 	 */
268 	if( (!memcmp(&(trh->daddr[0]),&(dev->broadcast[0]),TR_ALEN)) ||
269 	    (!memcmp(&(trh->daddr[0]),&(mcast_func_addr[0]), TR_ALEN))  )
270 	{
271 		trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
272 			       | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
273 		trh->saddr[0]|=TR_RII;
274 	}
275 	else
276 	{
277 		hash = rif_hash(trh->daddr);
278 		/*
279 		 *	Walk the hash table and look for an entry
280 		 */
281 		for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->daddr[0]),TR_ALEN);entry=entry->next);
282 
283 		/*
284 		 *	If we found an entry we can route the frame.
285 		 */
286 		if(entry)
287 		{
288 #if TR_SR_DEBUG
289 printk("source routing for %pM\n", trh->daddr);
290 #endif
291 			if(!entry->local_ring && (ntohs(entry->rcf) & TR_RCF_LEN_MASK) >> 8)
292 			{
293 				trh->rcf=entry->rcf;
294 				memcpy(&trh->rseg[0],&entry->rseg[0],8*sizeof(unsigned short));
295 				trh->rcf^=htons(TR_RCF_DIR_BIT);
296 				trh->rcf&=htons(0x1fff);	/* Issam Chehab <ichehab@madge1.demon.co.uk> */
297 
298 				trh->saddr[0]|=TR_RII;
299 #if TR_SR_DEBUG
300 				printk("entry found with rcf %04x\n", entry->rcf);
301 			}
302 			else
303 			{
304 				printk("entry found but without rcf length, local=%02x\n", entry->local_ring);
305 #endif
306 			}
307 			entry->last_used=jiffies;
308 		}
309 		else
310 		{
311 			/*
312 			 *	Without the information we simply have to shout
313 			 *	on the wire. The replies should rapidly clean this
314 			 *	situation up.
315 			 */
316 			trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
317 				       | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
318 			trh->saddr[0]|=TR_RII;
319 #if TR_SR_DEBUG
320 			printk("no entry in rif table found - broadcasting frame\n");
321 #endif
322 		}
323 	}
324 
325 	/* Compress the RIF here so we don't have to do it in the driver(s) */
326 	if (!(trh->saddr[0] & 0x80))
327 		slack = 18;
328 	else
329 		slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8);
330 	olddata = skb->data;
331 	spin_unlock_irqrestore(&rif_lock, flags);
332 
333 	skb_pull(skb, slack);
334 	memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack);
335 }
336 
337 /*
338  *	We have learned some new RIF information for our source
339  *	routing.
340  */
341 
tr_add_rif_info(struct trh_hdr * trh,struct net_device * dev)342 static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
343 {
344 	unsigned int hash, rii_p = 0;
345 	unsigned long flags;
346 	struct rif_cache *entry;
347 	unsigned char saddr0;
348 
349 	spin_lock_irqsave(&rif_lock, flags);
350 	saddr0 = trh->saddr[0];
351 
352 	/*
353 	 *	Firstly see if the entry exists
354 	 */
355 
356 	if(trh->saddr[0] & TR_RII)
357 	{
358 		trh->saddr[0]&=0x7f;
359 		if (((ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8) > 2)
360 		{
361 			rii_p = 1;
362 		}
363 	}
364 
365 	hash = rif_hash(trh->saddr);
366 	for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);entry=entry->next);
367 
368 	if(entry==NULL)
369 	{
370 #if TR_SR_DEBUG
371 		printk("adding rif_entry: addr:%pM rcf:%04X\n",
372 		       trh->saddr, ntohs(trh->rcf));
373 #endif
374 		/*
375 		 *	Allocate our new entry. A failure to allocate loses
376 		 *	use the information. This is harmless.
377 		 *
378 		 *	FIXME: We ought to keep some kind of cache size
379 		 *	limiting and adjust the timers to suit.
380 		 */
381 		entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC);
382 
383 		if(!entry)
384 		{
385 			printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n");
386 			spin_unlock_irqrestore(&rif_lock, flags);
387 			return;
388 		}
389 
390 		memcpy(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);
391 		entry->iface = dev->ifindex;
392 		entry->next=rif_table[hash];
393 		entry->last_used=jiffies;
394 		rif_table[hash]=entry;
395 
396 		if (rii_p)
397 		{
398 			entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
399 			memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
400 			entry->local_ring = 0;
401 		}
402 		else
403 		{
404 			entry->local_ring = 1;
405 		}
406 	}
407 	else	/* Y. Tahara added */
408 	{
409 		/*
410 		 *	Update existing entries
411 		 */
412 		if (!entry->local_ring)
413 		    if (entry->rcf != (trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK)) &&
414 			 !(trh->rcf & htons(TR_RCF_BROADCAST_MASK)))
415 		    {
416 #if TR_SR_DEBUG
417 printk("updating rif_entry: addr:%pM rcf:%04X\n",
418 		trh->saddr, ntohs(trh->rcf));
419 #endif
420 			    entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
421 			    memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
422 		    }
423 		entry->last_used=jiffies;
424 	}
425 	trh->saddr[0]=saddr0; /* put the routing indicator back for tcpdump */
426 	spin_unlock_irqrestore(&rif_lock, flags);
427 }
428 
429 /*
430  *	Scan the cache with a timer and see what we need to throw out.
431  */
432 
rif_check_expire(unsigned long dummy)433 static void rif_check_expire(unsigned long dummy)
434 {
435 	int i;
436 	unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2;
437 
438 	spin_lock_irqsave(&rif_lock, flags);
439 
440 	for(i =0; i < RIF_TABLE_SIZE; i++) {
441 		struct rif_cache *entry, **pentry;
442 
443 		pentry = rif_table+i;
444 		while((entry=*pentry) != NULL) {
445 			unsigned long expires
446 				= entry->last_used + sysctl_tr_rif_timeout;
447 
448 			if (time_before_eq(expires, jiffies)) {
449 				*pentry = entry->next;
450 				kfree(entry);
451 			} else {
452 				pentry = &entry->next;
453 
454 				if (time_before(expires, next_interval))
455 					next_interval = expires;
456 			}
457 		}
458 	}
459 
460 	spin_unlock_irqrestore(&rif_lock, flags);
461 
462 	mod_timer(&rif_timer, next_interval);
463 
464 }
465 
466 /*
467  *	Generate the /proc/net information for the token ring RIF
468  *	routing.
469  */
470 
471 #ifdef CONFIG_PROC_FS
472 
rif_get_idx(loff_t pos)473 static struct rif_cache *rif_get_idx(loff_t pos)
474 {
475 	int i;
476 	struct rif_cache *entry;
477 	loff_t off = 0;
478 
479 	for(i = 0; i < RIF_TABLE_SIZE; i++)
480 		for(entry = rif_table[i]; entry; entry = entry->next) {
481 			if (off == pos)
482 				return entry;
483 			++off;
484 		}
485 
486 	return NULL;
487 }
488 
rif_seq_start(struct seq_file * seq,loff_t * pos)489 static void *rif_seq_start(struct seq_file *seq, loff_t *pos)
490 	__acquires(&rif_lock)
491 {
492 	spin_lock_irq(&rif_lock);
493 
494 	return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN;
495 }
496 
rif_seq_next(struct seq_file * seq,void * v,loff_t * pos)497 static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
498 {
499 	int i;
500 	struct rif_cache *ent = v;
501 
502 	++*pos;
503 
504 	if (v == SEQ_START_TOKEN) {
505 		i = -1;
506 		goto scan;
507 	}
508 
509 	if (ent->next)
510 		return ent->next;
511 
512 	i = rif_hash(ent->addr);
513  scan:
514 	while (++i < RIF_TABLE_SIZE) {
515 		if ((ent = rif_table[i]) != NULL)
516 			return ent;
517 	}
518 	return NULL;
519 }
520 
rif_seq_stop(struct seq_file * seq,void * v)521 static void rif_seq_stop(struct seq_file *seq, void *v)
522 	__releases(&rif_lock)
523 {
524 	spin_unlock_irq(&rif_lock);
525 }
526 
rif_seq_show(struct seq_file * seq,void * v)527 static int rif_seq_show(struct seq_file *seq, void *v)
528 {
529 	int j, rcf_len, segment, brdgnmb;
530 	struct rif_cache *entry = v;
531 
532 	if (v == SEQ_START_TOKEN)
533 		seq_puts(seq,
534 		     "if     TR address       TTL   rcf   routing segments\n");
535 	else {
536 		struct net_device *dev = dev_get_by_index(&init_net, entry->iface);
537 		long ttl = (long) (entry->last_used + sysctl_tr_rif_timeout)
538 				- (long) jiffies;
539 
540 		seq_printf(seq, "%s %pM %7li ",
541 			   dev?dev->name:"?",
542 			   entry->addr,
543 			   ttl/HZ);
544 
545 			if (entry->local_ring)
546 				seq_puts(seq, "local\n");
547 			else {
548 
549 				seq_printf(seq, "%04X", ntohs(entry->rcf));
550 				rcf_len = ((ntohs(entry->rcf) & TR_RCF_LEN_MASK)>>8)-2;
551 				if (rcf_len)
552 					rcf_len >>= 1;
553 				for(j = 1; j < rcf_len; j++) {
554 					if(j==1) {
555 						segment=ntohs(entry->rseg[j-1])>>4;
556 						seq_printf(seq,"  %03X",segment);
557 					}
558 
559 					segment=ntohs(entry->rseg[j])>>4;
560 					brdgnmb=ntohs(entry->rseg[j-1])&0x00f;
561 					seq_printf(seq,"-%01X-%03X",brdgnmb,segment);
562 				}
563 				seq_putc(seq, '\n');
564 			}
565 
566 		if (dev)
567 			dev_put(dev);
568 		}
569 	return 0;
570 }
571 
572 
573 static const struct seq_operations rif_seq_ops = {
574 	.start = rif_seq_start,
575 	.next  = rif_seq_next,
576 	.stop  = rif_seq_stop,
577 	.show  = rif_seq_show,
578 };
579 
rif_seq_open(struct inode * inode,struct file * file)580 static int rif_seq_open(struct inode *inode, struct file *file)
581 {
582 	return seq_open(file, &rif_seq_ops);
583 }
584 
585 static const struct file_operations rif_seq_fops = {
586 	.owner	 = THIS_MODULE,
587 	.open    = rif_seq_open,
588 	.read    = seq_read,
589 	.llseek  = seq_lseek,
590 	.release = seq_release,
591 };
592 
593 #endif
594 
595 static const struct header_ops tr_header_ops = {
596 	.create = tr_header,
597 	.rebuild= tr_rebuild_header,
598 };
599 
tr_setup(struct net_device * dev)600 static void tr_setup(struct net_device *dev)
601 {
602 	/*
603 	 *	Configure and register
604 	 */
605 
606 	dev->header_ops	= &tr_header_ops;
607 
608 	dev->type		= ARPHRD_IEEE802_TR;
609 	dev->hard_header_len	= TR_HLEN;
610 	dev->mtu		= 2000;
611 	dev->addr_len		= TR_ALEN;
612 	dev->tx_queue_len	= 100;	/* Long queues on tr */
613 
614 	memset(dev->broadcast,0xFF, TR_ALEN);
615 
616 	/* New-style flags. */
617 	dev->flags		= IFF_BROADCAST | IFF_MULTICAST ;
618 }
619 
620 /**
621  * alloc_trdev - Register token ring device
622  * @sizeof_priv: Size of additional driver-private structure to be allocated
623  *	for this token ring device
624  *
625  * Fill in the fields of the device structure with token ring-generic values.
626  *
627  * Constructs a new net device, complete with a private data area of
628  * size @sizeof_priv.  A 32-byte (not bit) alignment is enforced for
629  * this private data area.
630  */
alloc_trdev(int sizeof_priv)631 struct net_device *alloc_trdev(int sizeof_priv)
632 {
633 	return alloc_netdev(sizeof_priv, "tr%d", tr_setup);
634 }
635 
636 #ifdef CONFIG_SYSCTL
637 static struct ctl_table tr_table[] = {
638 	{
639 		.procname	= "rif_timeout",
640 		.data		= &sysctl_tr_rif_timeout,
641 		.maxlen		= sizeof(int),
642 		.mode		= 0644,
643 		.proc_handler	= proc_dointvec
644 	},
645 	{ },
646 };
647 
648 static __initdata struct ctl_path tr_path[] = {
649 	{ .procname = "net", },
650 	{ .procname = "token-ring", },
651 	{ }
652 };
653 #endif
654 
655 /*
656  *	Called during bootup.  We don't actually have to initialise
657  *	too much for this.
658  */
659 
rif_init(void)660 static int __init rif_init(void)
661 {
662 	rif_timer.expires  = jiffies + sysctl_tr_rif_timeout;
663 	setup_timer(&rif_timer, rif_check_expire, 0);
664 	add_timer(&rif_timer);
665 #ifdef CONFIG_SYSCTL
666 	register_sysctl_paths(tr_path, tr_table);
667 #endif
668 	proc_net_fops_create(&init_net, "tr_rif", S_IRUGO, &rif_seq_fops);
669 	return 0;
670 }
671 
672 module_init(rif_init);
673 
674 EXPORT_SYMBOL(tr_type_trans);
675 EXPORT_SYMBOL(alloc_trdev);
676 
677 MODULE_LICENSE("GPL");
678