xref: /linux/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c (revision b5a02f503caa0837b64907468359b075990afcce)
1*b5a02f50SAnish Bhatt /*
2*b5a02f50SAnish Bhatt  *  This file is part of the Chelsio T4 Ethernet driver for Linux.
3*b5a02f50SAnish Bhatt  *  Copyright (C) 2003-2014 Chelsio Communications.  All rights reserved.
4*b5a02f50SAnish Bhatt  *
5*b5a02f50SAnish Bhatt  *  Written by Deepak (deepak.s@chelsio.com)
6*b5a02f50SAnish Bhatt  *
7*b5a02f50SAnish Bhatt  *  This program is distributed in the hope that it will be useful, but WITHOUT
8*b5a02f50SAnish Bhatt  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9*b5a02f50SAnish Bhatt  *  FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
10*b5a02f50SAnish Bhatt  *  release for licensing terms and conditions.
11*b5a02f50SAnish Bhatt  */
12*b5a02f50SAnish Bhatt 
13*b5a02f50SAnish Bhatt #include <linux/module.h>
14*b5a02f50SAnish Bhatt #include <linux/netdevice.h>
15*b5a02f50SAnish Bhatt #include <linux/jhash.h>
16*b5a02f50SAnish Bhatt #include <linux/if_vlan.h>
17*b5a02f50SAnish Bhatt #include <net/addrconf.h>
18*b5a02f50SAnish Bhatt #include "cxgb4.h"
19*b5a02f50SAnish Bhatt #include "clip_tbl.h"
20*b5a02f50SAnish Bhatt 
21*b5a02f50SAnish Bhatt static inline unsigned int ipv4_clip_hash(struct clip_tbl *c, const u32 *key)
22*b5a02f50SAnish Bhatt {
23*b5a02f50SAnish Bhatt 	unsigned int clipt_size_half = c->clipt_size / 2;
24*b5a02f50SAnish Bhatt 
25*b5a02f50SAnish Bhatt 	return jhash_1word(*key, 0) % clipt_size_half;
26*b5a02f50SAnish Bhatt }
27*b5a02f50SAnish Bhatt 
28*b5a02f50SAnish Bhatt static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
29*b5a02f50SAnish Bhatt {
30*b5a02f50SAnish Bhatt 	unsigned int clipt_size_half = d->clipt_size / 2;
31*b5a02f50SAnish Bhatt 	u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
32*b5a02f50SAnish Bhatt 
33*b5a02f50SAnish Bhatt 	return clipt_size_half +
34*b5a02f50SAnish Bhatt 		(jhash_1word(xor, 0) % clipt_size_half);
35*b5a02f50SAnish Bhatt }
36*b5a02f50SAnish Bhatt 
37*b5a02f50SAnish Bhatt static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
38*b5a02f50SAnish Bhatt 				   int addr_len)
39*b5a02f50SAnish Bhatt {
40*b5a02f50SAnish Bhatt 	return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) :
41*b5a02f50SAnish Bhatt 				ipv6_clip_hash(ctbl, addr);
42*b5a02f50SAnish Bhatt }
43*b5a02f50SAnish Bhatt 
44*b5a02f50SAnish Bhatt static int clip6_get_mbox(const struct net_device *dev,
45*b5a02f50SAnish Bhatt 			  const struct in6_addr *lip)
46*b5a02f50SAnish Bhatt {
47*b5a02f50SAnish Bhatt 	struct adapter *adap = netdev2adap(dev);
48*b5a02f50SAnish Bhatt 	struct fw_clip_cmd c;
49*b5a02f50SAnish Bhatt 
50*b5a02f50SAnish Bhatt 	memset(&c, 0, sizeof(c));
51*b5a02f50SAnish Bhatt 	c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
52*b5a02f50SAnish Bhatt 			      FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
53*b5a02f50SAnish Bhatt 	c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c));
54*b5a02f50SAnish Bhatt 	*(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
55*b5a02f50SAnish Bhatt 	*(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
56*b5a02f50SAnish Bhatt 	return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
57*b5a02f50SAnish Bhatt }
58*b5a02f50SAnish Bhatt 
59*b5a02f50SAnish Bhatt static int clip6_release_mbox(const struct net_device *dev,
60*b5a02f50SAnish Bhatt 			      const struct in6_addr *lip)
61*b5a02f50SAnish Bhatt {
62*b5a02f50SAnish Bhatt 	struct adapter *adap = netdev2adap(dev);
63*b5a02f50SAnish Bhatt 	struct fw_clip_cmd c;
64*b5a02f50SAnish Bhatt 
65*b5a02f50SAnish Bhatt 	memset(&c, 0, sizeof(c));
66*b5a02f50SAnish Bhatt 	c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
67*b5a02f50SAnish Bhatt 			      FW_CMD_REQUEST_F | FW_CMD_READ_F);
68*b5a02f50SAnish Bhatt 	c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c));
69*b5a02f50SAnish Bhatt 	*(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
70*b5a02f50SAnish Bhatt 	*(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
71*b5a02f50SAnish Bhatt 	return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
72*b5a02f50SAnish Bhatt }
73*b5a02f50SAnish Bhatt 
74*b5a02f50SAnish Bhatt int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
75*b5a02f50SAnish Bhatt {
76*b5a02f50SAnish Bhatt 	struct adapter *adap = netdev2adap(dev);
77*b5a02f50SAnish Bhatt 	struct clip_tbl *ctbl = adap->clipt;
78*b5a02f50SAnish Bhatt 	struct clip_entry *ce, *cte;
79*b5a02f50SAnish Bhatt 	u32 *addr = (u32 *)lip;
80*b5a02f50SAnish Bhatt 	int hash;
81*b5a02f50SAnish Bhatt 	int addr_len;
82*b5a02f50SAnish Bhatt 	int ret = 0;
83*b5a02f50SAnish Bhatt 
84*b5a02f50SAnish Bhatt 	if (v6)
85*b5a02f50SAnish Bhatt 		addr_len = 16;
86*b5a02f50SAnish Bhatt 	else
87*b5a02f50SAnish Bhatt 		addr_len = 4;
88*b5a02f50SAnish Bhatt 
89*b5a02f50SAnish Bhatt 	hash = clip_addr_hash(ctbl, addr, addr_len);
90*b5a02f50SAnish Bhatt 
91*b5a02f50SAnish Bhatt 	read_lock_bh(&ctbl->lock);
92*b5a02f50SAnish Bhatt 	list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
93*b5a02f50SAnish Bhatt 		if (addr_len == cte->addr_len &&
94*b5a02f50SAnish Bhatt 		    memcmp(lip, cte->addr, cte->addr_len) == 0) {
95*b5a02f50SAnish Bhatt 			ce = cte;
96*b5a02f50SAnish Bhatt 			read_unlock_bh(&ctbl->lock);
97*b5a02f50SAnish Bhatt 			goto found;
98*b5a02f50SAnish Bhatt 		}
99*b5a02f50SAnish Bhatt 	}
100*b5a02f50SAnish Bhatt 	read_unlock_bh(&ctbl->lock);
101*b5a02f50SAnish Bhatt 
102*b5a02f50SAnish Bhatt 	write_lock_bh(&ctbl->lock);
103*b5a02f50SAnish Bhatt 	if (!list_empty(&ctbl->ce_free_head)) {
104*b5a02f50SAnish Bhatt 		ce = list_first_entry(&ctbl->ce_free_head,
105*b5a02f50SAnish Bhatt 				      struct clip_entry, list);
106*b5a02f50SAnish Bhatt 		list_del(&ce->list);
107*b5a02f50SAnish Bhatt 		INIT_LIST_HEAD(&ce->list);
108*b5a02f50SAnish Bhatt 		spin_lock_init(&ce->lock);
109*b5a02f50SAnish Bhatt 		atomic_set(&ce->refcnt, 0);
110*b5a02f50SAnish Bhatt 		atomic_dec(&ctbl->nfree);
111*b5a02f50SAnish Bhatt 		ce->addr_len = addr_len;
112*b5a02f50SAnish Bhatt 		memcpy(ce->addr, lip, addr_len);
113*b5a02f50SAnish Bhatt 		list_add_tail(&ce->list, &ctbl->hash_list[hash]);
114*b5a02f50SAnish Bhatt 		if (v6) {
115*b5a02f50SAnish Bhatt 			ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
116*b5a02f50SAnish Bhatt 			if (ret) {
117*b5a02f50SAnish Bhatt 				write_unlock_bh(&ctbl->lock);
118*b5a02f50SAnish Bhatt 				return ret;
119*b5a02f50SAnish Bhatt 			}
120*b5a02f50SAnish Bhatt 		}
121*b5a02f50SAnish Bhatt 	} else {
122*b5a02f50SAnish Bhatt 		write_unlock_bh(&ctbl->lock);
123*b5a02f50SAnish Bhatt 		return -ENOMEM;
124*b5a02f50SAnish Bhatt 	}
125*b5a02f50SAnish Bhatt 	write_unlock_bh(&ctbl->lock);
126*b5a02f50SAnish Bhatt found:
127*b5a02f50SAnish Bhatt 	atomic_inc(&ce->refcnt);
128*b5a02f50SAnish Bhatt 
129*b5a02f50SAnish Bhatt 	return 0;
130*b5a02f50SAnish Bhatt }
131*b5a02f50SAnish Bhatt EXPORT_SYMBOL(cxgb4_clip_get);
132*b5a02f50SAnish Bhatt 
133*b5a02f50SAnish Bhatt void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
134*b5a02f50SAnish Bhatt {
135*b5a02f50SAnish Bhatt 	struct adapter *adap = netdev2adap(dev);
136*b5a02f50SAnish Bhatt 	struct clip_tbl *ctbl = adap->clipt;
137*b5a02f50SAnish Bhatt 	struct clip_entry *ce, *cte;
138*b5a02f50SAnish Bhatt 	u32 *addr = (u32 *)lip;
139*b5a02f50SAnish Bhatt 	int hash;
140*b5a02f50SAnish Bhatt 	int addr_len;
141*b5a02f50SAnish Bhatt 
142*b5a02f50SAnish Bhatt 	if (v6)
143*b5a02f50SAnish Bhatt 		addr_len = 16;
144*b5a02f50SAnish Bhatt 	else
145*b5a02f50SAnish Bhatt 		addr_len = 4;
146*b5a02f50SAnish Bhatt 
147*b5a02f50SAnish Bhatt 	hash = clip_addr_hash(ctbl, addr, addr_len);
148*b5a02f50SAnish Bhatt 
149*b5a02f50SAnish Bhatt 	read_lock_bh(&ctbl->lock);
150*b5a02f50SAnish Bhatt 	list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
151*b5a02f50SAnish Bhatt 		if (addr_len == cte->addr_len &&
152*b5a02f50SAnish Bhatt 		    memcmp(lip, cte->addr, cte->addr_len) == 0) {
153*b5a02f50SAnish Bhatt 			ce = cte;
154*b5a02f50SAnish Bhatt 			read_unlock_bh(&ctbl->lock);
155*b5a02f50SAnish Bhatt 			goto found;
156*b5a02f50SAnish Bhatt 		}
157*b5a02f50SAnish Bhatt 	}
158*b5a02f50SAnish Bhatt 	read_unlock_bh(&ctbl->lock);
159*b5a02f50SAnish Bhatt 
160*b5a02f50SAnish Bhatt 	return;
161*b5a02f50SAnish Bhatt found:
162*b5a02f50SAnish Bhatt 	write_lock_bh(&ctbl->lock);
163*b5a02f50SAnish Bhatt 	spin_lock_bh(&ce->lock);
164*b5a02f50SAnish Bhatt 	if (atomic_dec_and_test(&ce->refcnt)) {
165*b5a02f50SAnish Bhatt 		list_del(&ce->list);
166*b5a02f50SAnish Bhatt 		INIT_LIST_HEAD(&ce->list);
167*b5a02f50SAnish Bhatt 		list_add_tail(&ce->list, &ctbl->ce_free_head);
168*b5a02f50SAnish Bhatt 		atomic_inc(&ctbl->nfree);
169*b5a02f50SAnish Bhatt 		if (v6)
170*b5a02f50SAnish Bhatt 			clip6_release_mbox(dev, (const struct in6_addr *)lip);
171*b5a02f50SAnish Bhatt 	}
172*b5a02f50SAnish Bhatt 	spin_unlock_bh(&ce->lock);
173*b5a02f50SAnish Bhatt 	write_unlock_bh(&ctbl->lock);
174*b5a02f50SAnish Bhatt }
175*b5a02f50SAnish Bhatt EXPORT_SYMBOL(cxgb4_clip_release);
176*b5a02f50SAnish Bhatt 
177*b5a02f50SAnish Bhatt /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
178*b5a02f50SAnish Bhatt  * a physical device.
179*b5a02f50SAnish Bhatt  * The physical device reference is needed to send the actul CLIP command.
180*b5a02f50SAnish Bhatt  */
181*b5a02f50SAnish Bhatt static int cxgb4_update_dev_clip(struct net_device *root_dev,
182*b5a02f50SAnish Bhatt 				 struct net_device *dev)
183*b5a02f50SAnish Bhatt {
184*b5a02f50SAnish Bhatt 	struct inet6_dev *idev = NULL;
185*b5a02f50SAnish Bhatt 	struct inet6_ifaddr *ifa;
186*b5a02f50SAnish Bhatt 	int ret = 0;
187*b5a02f50SAnish Bhatt 
188*b5a02f50SAnish Bhatt 	idev = __in6_dev_get(root_dev);
189*b5a02f50SAnish Bhatt 	if (!idev)
190*b5a02f50SAnish Bhatt 		return ret;
191*b5a02f50SAnish Bhatt 
192*b5a02f50SAnish Bhatt 	read_lock_bh(&idev->lock);
193*b5a02f50SAnish Bhatt 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
194*b5a02f50SAnish Bhatt 		ret = cxgb4_clip_get(dev, (const u32 *)ifa->addr.s6_addr, 1);
195*b5a02f50SAnish Bhatt 		if (ret < 0)
196*b5a02f50SAnish Bhatt 			break;
197*b5a02f50SAnish Bhatt 	}
198*b5a02f50SAnish Bhatt 	read_unlock_bh(&idev->lock);
199*b5a02f50SAnish Bhatt 
200*b5a02f50SAnish Bhatt 	return ret;
201*b5a02f50SAnish Bhatt }
202*b5a02f50SAnish Bhatt 
203*b5a02f50SAnish Bhatt int cxgb4_update_root_dev_clip(struct net_device *dev)
204*b5a02f50SAnish Bhatt {
205*b5a02f50SAnish Bhatt 	struct net_device *root_dev = NULL;
206*b5a02f50SAnish Bhatt 	int i, ret = 0;
207*b5a02f50SAnish Bhatt 
208*b5a02f50SAnish Bhatt 	/* First populate the real net device's IPv6 addresses */
209*b5a02f50SAnish Bhatt 	ret = cxgb4_update_dev_clip(dev, dev);
210*b5a02f50SAnish Bhatt 	if (ret)
211*b5a02f50SAnish Bhatt 		return ret;
212*b5a02f50SAnish Bhatt 
213*b5a02f50SAnish Bhatt 	/* Parse all bond and vlan devices layered on top of the physical dev */
214*b5a02f50SAnish Bhatt 	root_dev = netdev_master_upper_dev_get_rcu(dev);
215*b5a02f50SAnish Bhatt 	if (root_dev) {
216*b5a02f50SAnish Bhatt 		ret = cxgb4_update_dev_clip(root_dev, dev);
217*b5a02f50SAnish Bhatt 		if (ret)
218*b5a02f50SAnish Bhatt 			return ret;
219*b5a02f50SAnish Bhatt 	}
220*b5a02f50SAnish Bhatt 
221*b5a02f50SAnish Bhatt 	for (i = 0; i < VLAN_N_VID; i++) {
222*b5a02f50SAnish Bhatt 		root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
223*b5a02f50SAnish Bhatt 		if (!root_dev)
224*b5a02f50SAnish Bhatt 			continue;
225*b5a02f50SAnish Bhatt 
226*b5a02f50SAnish Bhatt 		ret = cxgb4_update_dev_clip(root_dev, dev);
227*b5a02f50SAnish Bhatt 		if (ret)
228*b5a02f50SAnish Bhatt 			break;
229*b5a02f50SAnish Bhatt 	}
230*b5a02f50SAnish Bhatt 
231*b5a02f50SAnish Bhatt 	return ret;
232*b5a02f50SAnish Bhatt }
233*b5a02f50SAnish Bhatt EXPORT_SYMBOL(cxgb4_update_root_dev_clip);
234*b5a02f50SAnish Bhatt 
235*b5a02f50SAnish Bhatt int clip_tbl_show(struct seq_file *seq, void *v)
236*b5a02f50SAnish Bhatt {
237*b5a02f50SAnish Bhatt 	struct adapter *adapter = seq->private;
238*b5a02f50SAnish Bhatt 	struct clip_tbl *ctbl = adapter->clipt;
239*b5a02f50SAnish Bhatt 	struct clip_entry *ce;
240*b5a02f50SAnish Bhatt 	char ip[60];
241*b5a02f50SAnish Bhatt 	int i;
242*b5a02f50SAnish Bhatt 
243*b5a02f50SAnish Bhatt 	read_lock_bh(&ctbl->lock);
244*b5a02f50SAnish Bhatt 
245*b5a02f50SAnish Bhatt 	seq_puts(seq, "IP Address                  Users\n");
246*b5a02f50SAnish Bhatt 	for (i = 0 ; i < ctbl->clipt_size;  ++i) {
247*b5a02f50SAnish Bhatt 		list_for_each_entry(ce, &ctbl->hash_list[i], list) {
248*b5a02f50SAnish Bhatt 			ip[0] = '\0';
249*b5a02f50SAnish Bhatt 			if (ce->addr_len == 16)
250*b5a02f50SAnish Bhatt 				sprintf(ip, "%pI6c", ce->addr);
251*b5a02f50SAnish Bhatt 			else
252*b5a02f50SAnish Bhatt 				sprintf(ip, "%pI4c", ce->addr);
253*b5a02f50SAnish Bhatt 			seq_printf(seq, "%-25s   %u\n", ip,
254*b5a02f50SAnish Bhatt 				   atomic_read(&ce->refcnt));
255*b5a02f50SAnish Bhatt 		}
256*b5a02f50SAnish Bhatt 	}
257*b5a02f50SAnish Bhatt 	seq_printf(seq, "Free clip entries : %d\n", atomic_read(&ctbl->nfree));
258*b5a02f50SAnish Bhatt 
259*b5a02f50SAnish Bhatt 	read_unlock_bh(&ctbl->lock);
260*b5a02f50SAnish Bhatt 
261*b5a02f50SAnish Bhatt 	return 0;
262*b5a02f50SAnish Bhatt }
263*b5a02f50SAnish Bhatt 
264*b5a02f50SAnish Bhatt struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
265*b5a02f50SAnish Bhatt 				  unsigned int clipt_end)
266*b5a02f50SAnish Bhatt {
267*b5a02f50SAnish Bhatt 	struct clip_entry *cl_list;
268*b5a02f50SAnish Bhatt 	struct clip_tbl *ctbl;
269*b5a02f50SAnish Bhatt 	unsigned int clipt_size;
270*b5a02f50SAnish Bhatt 	int i;
271*b5a02f50SAnish Bhatt 
272*b5a02f50SAnish Bhatt 	if (clipt_start >= clipt_end)
273*b5a02f50SAnish Bhatt 		return NULL;
274*b5a02f50SAnish Bhatt 	clipt_size = clipt_end - clipt_start + 1;
275*b5a02f50SAnish Bhatt 	if (clipt_size < CLIPT_MIN_HASH_BUCKETS)
276*b5a02f50SAnish Bhatt 		return NULL;
277*b5a02f50SAnish Bhatt 
278*b5a02f50SAnish Bhatt 	ctbl = t4_alloc_mem(sizeof(*ctbl) +
279*b5a02f50SAnish Bhatt 			    clipt_size*sizeof(struct list_head));
280*b5a02f50SAnish Bhatt 	if (!ctbl)
281*b5a02f50SAnish Bhatt 		return NULL;
282*b5a02f50SAnish Bhatt 
283*b5a02f50SAnish Bhatt 	ctbl->clipt_start = clipt_start;
284*b5a02f50SAnish Bhatt 	ctbl->clipt_size = clipt_size;
285*b5a02f50SAnish Bhatt 	INIT_LIST_HEAD(&ctbl->ce_free_head);
286*b5a02f50SAnish Bhatt 
287*b5a02f50SAnish Bhatt 	atomic_set(&ctbl->nfree, clipt_size);
288*b5a02f50SAnish Bhatt 	rwlock_init(&ctbl->lock);
289*b5a02f50SAnish Bhatt 
290*b5a02f50SAnish Bhatt 	for (i = 0; i < ctbl->clipt_size; ++i)
291*b5a02f50SAnish Bhatt 		INIT_LIST_HEAD(&ctbl->hash_list[i]);
292*b5a02f50SAnish Bhatt 
293*b5a02f50SAnish Bhatt 	cl_list = t4_alloc_mem(clipt_size*sizeof(struct clip_entry));
294*b5a02f50SAnish Bhatt 	ctbl->cl_list = (void *)cl_list;
295*b5a02f50SAnish Bhatt 
296*b5a02f50SAnish Bhatt 	for (i = 0; i < clipt_size; i++) {
297*b5a02f50SAnish Bhatt 		INIT_LIST_HEAD(&cl_list[i].list);
298*b5a02f50SAnish Bhatt 		list_add_tail(&cl_list[i].list, &ctbl->ce_free_head);
299*b5a02f50SAnish Bhatt 	}
300*b5a02f50SAnish Bhatt 
301*b5a02f50SAnish Bhatt 	return ctbl;
302*b5a02f50SAnish Bhatt }
303*b5a02f50SAnish Bhatt 
304*b5a02f50SAnish Bhatt void t4_cleanup_clip_tbl(struct adapter *adap)
305*b5a02f50SAnish Bhatt {
306*b5a02f50SAnish Bhatt 	struct clip_tbl *ctbl = adap->clipt;
307*b5a02f50SAnish Bhatt 
308*b5a02f50SAnish Bhatt 	if (ctbl) {
309*b5a02f50SAnish Bhatt 		if (ctbl->cl_list)
310*b5a02f50SAnish Bhatt 			t4_free_mem(ctbl->cl_list);
311*b5a02f50SAnish Bhatt 		t4_free_mem(ctbl);
312*b5a02f50SAnish Bhatt 	}
313*b5a02f50SAnish Bhatt }
314*b5a02f50SAnish Bhatt EXPORT_SYMBOL(t4_cleanup_clip_tbl);
315