xref: /linux/drivers/infiniband/hw/mlx4/alias_GUID.c (revision a0c64a17aba88c29d55ba989b96ac6ccb1268f0a)
1*a0c64a17SJack Morgenstein /*
2*a0c64a17SJack Morgenstein  * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
3*a0c64a17SJack Morgenstein  *
4*a0c64a17SJack Morgenstein  * This software is available to you under a choice of one of two
5*a0c64a17SJack Morgenstein  * licenses.  You may choose to be licensed under the terms of the GNU
6*a0c64a17SJack Morgenstein  * General Public License (GPL) Version 2, available from the file
7*a0c64a17SJack Morgenstein  * COPYING in the main directory of this source tree, or the
8*a0c64a17SJack Morgenstein  * OpenIB.org BSD license below:
9*a0c64a17SJack Morgenstein  *
10*a0c64a17SJack Morgenstein  *     Redistribution and use in source and binary forms, with or
11*a0c64a17SJack Morgenstein  *     without modification, are permitted provided that the following
12*a0c64a17SJack Morgenstein  *     conditions are met:
13*a0c64a17SJack Morgenstein  *
14*a0c64a17SJack Morgenstein  *      - Redistributions of source code must retain the above
15*a0c64a17SJack Morgenstein  *        copyright notice, this list of conditions and the following
16*a0c64a17SJack Morgenstein  *        disclaimer.
17*a0c64a17SJack Morgenstein  *
18*a0c64a17SJack Morgenstein  *      - Redistributions in binary form must reproduce the above
19*a0c64a17SJack Morgenstein  *        copyright notice, this list of conditions and the following
20*a0c64a17SJack Morgenstein  *        disclaimer in the documentation and/or other materials
21*a0c64a17SJack Morgenstein  *        provided with the distribution.
22*a0c64a17SJack Morgenstein  *
23*a0c64a17SJack Morgenstein  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*a0c64a17SJack Morgenstein  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*a0c64a17SJack Morgenstein  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*a0c64a17SJack Morgenstein  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*a0c64a17SJack Morgenstein  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*a0c64a17SJack Morgenstein  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*a0c64a17SJack Morgenstein  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*a0c64a17SJack Morgenstein  * SOFTWARE.
31*a0c64a17SJack Morgenstein  */
32*a0c64a17SJack Morgenstein  /***********************************************************/
33*a0c64a17SJack Morgenstein /*This file support the handling of the Alias GUID feature. */
34*a0c64a17SJack Morgenstein /***********************************************************/
35*a0c64a17SJack Morgenstein #include <rdma/ib_mad.h>
36*a0c64a17SJack Morgenstein #include <rdma/ib_smi.h>
37*a0c64a17SJack Morgenstein #include <rdma/ib_cache.h>
38*a0c64a17SJack Morgenstein #include <rdma/ib_sa.h>
39*a0c64a17SJack Morgenstein #include <rdma/ib_pack.h>
40*a0c64a17SJack Morgenstein #include <linux/mlx4/cmd.h>
41*a0c64a17SJack Morgenstein #include <linux/module.h>
42*a0c64a17SJack Morgenstein #include <linux/init.h>
43*a0c64a17SJack Morgenstein #include <linux/errno.h>
44*a0c64a17SJack Morgenstein #include <rdma/ib_user_verbs.h>
45*a0c64a17SJack Morgenstein #include <linux/delay.h>
46*a0c64a17SJack Morgenstein #include "mlx4_ib.h"
47*a0c64a17SJack Morgenstein 
48*a0c64a17SJack Morgenstein /*
49*a0c64a17SJack Morgenstein The driver keeps the current state of all guids, as they are in the HW.
50*a0c64a17SJack Morgenstein Whenever we receive an smp mad GUIDInfo record, the data will be cached.
51*a0c64a17SJack Morgenstein */
52*a0c64a17SJack Morgenstein 
53*a0c64a17SJack Morgenstein struct mlx4_alias_guid_work_context {
54*a0c64a17SJack Morgenstein 	u8 port;
55*a0c64a17SJack Morgenstein 	struct mlx4_ib_dev     *dev ;
56*a0c64a17SJack Morgenstein 	struct ib_sa_query     *sa_query;
57*a0c64a17SJack Morgenstein 	struct completion	done;
58*a0c64a17SJack Morgenstein 	int			query_id;
59*a0c64a17SJack Morgenstein 	struct list_head	list;
60*a0c64a17SJack Morgenstein 	int			block_num;
61*a0c64a17SJack Morgenstein };
62*a0c64a17SJack Morgenstein 
63*a0c64a17SJack Morgenstein struct mlx4_next_alias_guid_work {
64*a0c64a17SJack Morgenstein 	u8 port;
65*a0c64a17SJack Morgenstein 	u8 block_num;
66*a0c64a17SJack Morgenstein 	struct mlx4_sriov_alias_guid_info_rec_det rec_det;
67*a0c64a17SJack Morgenstein };
68*a0c64a17SJack Morgenstein 
69*a0c64a17SJack Morgenstein 
70*a0c64a17SJack Morgenstein void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num,
71*a0c64a17SJack Morgenstein 					 u8 port_num, u8 *p_data)
72*a0c64a17SJack Morgenstein {
73*a0c64a17SJack Morgenstein 	int i;
74*a0c64a17SJack Morgenstein 	u64 guid_indexes;
75*a0c64a17SJack Morgenstein 	int slave_id;
76*a0c64a17SJack Morgenstein 	int port_index = port_num - 1;
77*a0c64a17SJack Morgenstein 
78*a0c64a17SJack Morgenstein 	if (!mlx4_is_master(dev->dev))
79*a0c64a17SJack Morgenstein 		return;
80*a0c64a17SJack Morgenstein 
81*a0c64a17SJack Morgenstein 	guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
82*a0c64a17SJack Morgenstein 				   ports_guid[port_num - 1].
83*a0c64a17SJack Morgenstein 				   all_rec_per_port[block_num].guid_indexes);
84*a0c64a17SJack Morgenstein 	pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes);
85*a0c64a17SJack Morgenstein 
86*a0c64a17SJack Morgenstein 	for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
87*a0c64a17SJack Morgenstein 		/* The location of the specific index starts from bit number 4
88*a0c64a17SJack Morgenstein 		 * until bit num 11 */
89*a0c64a17SJack Morgenstein 		if (test_bit(i + 4, (unsigned long *)&guid_indexes)) {
90*a0c64a17SJack Morgenstein 			slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
91*a0c64a17SJack Morgenstein 			if (slave_id >= dev->dev->num_slaves) {
92*a0c64a17SJack Morgenstein 				pr_debug("The last slave: %d\n", slave_id);
93*a0c64a17SJack Morgenstein 				return;
94*a0c64a17SJack Morgenstein 			}
95*a0c64a17SJack Morgenstein 
96*a0c64a17SJack Morgenstein 			/* cache the guid: */
97*a0c64a17SJack Morgenstein 			memcpy(&dev->sriov.demux[port_index].guid_cache[slave_id],
98*a0c64a17SJack Morgenstein 			       &p_data[i * GUID_REC_SIZE],
99*a0c64a17SJack Morgenstein 			       GUID_REC_SIZE);
100*a0c64a17SJack Morgenstein 		} else
101*a0c64a17SJack Morgenstein 			pr_debug("Guid number: %d in block: %d"
102*a0c64a17SJack Morgenstein 				 " was not updated\n", i, block_num);
103*a0c64a17SJack Morgenstein 	}
104*a0c64a17SJack Morgenstein }
105*a0c64a17SJack Morgenstein 
106*a0c64a17SJack Morgenstein static __be64 get_cached_alias_guid(struct mlx4_ib_dev *dev, int port, int index)
107*a0c64a17SJack Morgenstein {
108*a0c64a17SJack Morgenstein 	if (index >= NUM_ALIAS_GUID_PER_PORT) {
109*a0c64a17SJack Morgenstein 		pr_err("%s: ERROR: asked for index:%d\n", __func__, index);
110*a0c64a17SJack Morgenstein 		return  (__force __be64) ((u64) 0xFFFFFFFFFFFFFFFFUL);
111*a0c64a17SJack Morgenstein 	}
112*a0c64a17SJack Morgenstein 	return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index];
113*a0c64a17SJack Morgenstein }
114*a0c64a17SJack Morgenstein 
115*a0c64a17SJack Morgenstein 
116*a0c64a17SJack Morgenstein static ib_sa_comp_mask get_aguid_comp_mask_from_ix(int index)
117*a0c64a17SJack Morgenstein {
118*a0c64a17SJack Morgenstein 	return IB_SA_COMP_MASK(4 + index);
119*a0c64a17SJack Morgenstein }
120*a0c64a17SJack Morgenstein 
121*a0c64a17SJack Morgenstein /*
122*a0c64a17SJack Morgenstein  * Whenever new GUID is set/unset (guid table change) create event and
123*a0c64a17SJack Morgenstein  * notify the relevant slave (master also should be notified).
124*a0c64a17SJack Morgenstein  * If the GUID value is not as we have in the cache the slave will not be
125*a0c64a17SJack Morgenstein  * updated; in this case it waits for the smp_snoop or the port management
126*a0c64a17SJack Morgenstein  * event to call the function and to update the slave.
127*a0c64a17SJack Morgenstein  * block_number - the index of the block (16 blocks available)
128*a0c64a17SJack Morgenstein  * port_number - 1 or 2
129*a0c64a17SJack Morgenstein  */
130*a0c64a17SJack Morgenstein void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
131*a0c64a17SJack Morgenstein 					  int block_num, u8 port_num,
132*a0c64a17SJack Morgenstein 					  u8 *p_data)
133*a0c64a17SJack Morgenstein {
134*a0c64a17SJack Morgenstein 	int i;
135*a0c64a17SJack Morgenstein 	u64 guid_indexes;
136*a0c64a17SJack Morgenstein 	int slave_id;
137*a0c64a17SJack Morgenstein 	enum slave_port_state new_state;
138*a0c64a17SJack Morgenstein 	enum slave_port_state prev_state;
139*a0c64a17SJack Morgenstein 	__be64 tmp_cur_ag, form_cache_ag;
140*a0c64a17SJack Morgenstein 	enum slave_port_gen_event gen_event;
141*a0c64a17SJack Morgenstein 
142*a0c64a17SJack Morgenstein 	if (!mlx4_is_master(dev->dev))
143*a0c64a17SJack Morgenstein 		return;
144*a0c64a17SJack Morgenstein 
145*a0c64a17SJack Morgenstein 	guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
146*a0c64a17SJack Morgenstein 				   ports_guid[port_num - 1].
147*a0c64a17SJack Morgenstein 				   all_rec_per_port[block_num].guid_indexes);
148*a0c64a17SJack Morgenstein 	pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes);
149*a0c64a17SJack Morgenstein 
150*a0c64a17SJack Morgenstein 	/*calculate the slaves and notify them*/
151*a0c64a17SJack Morgenstein 	for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
152*a0c64a17SJack Morgenstein 		/* the location of the specific index runs from bits 4..11 */
153*a0c64a17SJack Morgenstein 		if (!(test_bit(i + 4, (unsigned long *)&guid_indexes)))
154*a0c64a17SJack Morgenstein 			continue;
155*a0c64a17SJack Morgenstein 
156*a0c64a17SJack Morgenstein 		slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
157*a0c64a17SJack Morgenstein 		if (slave_id >= dev->dev->num_slaves)
158*a0c64a17SJack Morgenstein 			return;
159*a0c64a17SJack Morgenstein 		tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
160*a0c64a17SJack Morgenstein 		form_cache_ag = get_cached_alias_guid(dev, port_num,
161*a0c64a17SJack Morgenstein 					(NUM_ALIAS_GUID_IN_REC * block_num) + i);
162*a0c64a17SJack Morgenstein 		/*
163*a0c64a17SJack Morgenstein 		 * Check if guid is not the same as in the cache,
164*a0c64a17SJack Morgenstein 		 * If it is different, wait for the snoop_smp or the port mgmt
165*a0c64a17SJack Morgenstein 		 * change event to update the slave on its port state change
166*a0c64a17SJack Morgenstein 		 */
167*a0c64a17SJack Morgenstein 		if (tmp_cur_ag != form_cache_ag)
168*a0c64a17SJack Morgenstein 			continue;
169*a0c64a17SJack Morgenstein 		mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num);
170*a0c64a17SJack Morgenstein 
171*a0c64a17SJack Morgenstein 		/*2 cases: Valid GUID, and Invalid Guid*/
172*a0c64a17SJack Morgenstein 
173*a0c64a17SJack Morgenstein 		if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/
174*a0c64a17SJack Morgenstein 			prev_state = mlx4_get_slave_port_state(dev->dev, slave_id, port_num);
175*a0c64a17SJack Morgenstein 			new_state = set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
176*a0c64a17SJack Morgenstein 								  MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID,
177*a0c64a17SJack Morgenstein 								  &gen_event);
178*a0c64a17SJack Morgenstein 			pr_debug("slave: %d, port: %d prev_port_state: %d,"
179*a0c64a17SJack Morgenstein 				 " new_port_state: %d, gen_event: %d\n",
180*a0c64a17SJack Morgenstein 				 slave_id, port_num, prev_state, new_state, gen_event);
181*a0c64a17SJack Morgenstein 			if (gen_event == SLAVE_PORT_GEN_EVENT_UP) {
182*a0c64a17SJack Morgenstein 				pr_debug("sending PORT_UP event to slave: %d, port: %d\n",
183*a0c64a17SJack Morgenstein 					 slave_id, port_num);
184*a0c64a17SJack Morgenstein 				mlx4_gen_port_state_change_eqe(dev->dev, slave_id,
185*a0c64a17SJack Morgenstein 							       port_num, MLX4_PORT_CHANGE_SUBTYPE_ACTIVE);
186*a0c64a17SJack Morgenstein 			}
187*a0c64a17SJack Morgenstein 		} else { /* request to invalidate GUID */
188*a0c64a17SJack Morgenstein 			set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
189*a0c64a17SJack Morgenstein 						      MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
190*a0c64a17SJack Morgenstein 						      &gen_event);
191*a0c64a17SJack Morgenstein 			pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
192*a0c64a17SJack Morgenstein 				 slave_id, port_num);
193*a0c64a17SJack Morgenstein 			mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num,
194*a0c64a17SJack Morgenstein 						       MLX4_PORT_CHANGE_SUBTYPE_DOWN);
195*a0c64a17SJack Morgenstein 		}
196*a0c64a17SJack Morgenstein 	}
197*a0c64a17SJack Morgenstein }
198*a0c64a17SJack Morgenstein 
199*a0c64a17SJack Morgenstein static void aliasguid_query_handler(int status,
200*a0c64a17SJack Morgenstein 				    struct ib_sa_guidinfo_rec *guid_rec,
201*a0c64a17SJack Morgenstein 				    void *context)
202*a0c64a17SJack Morgenstein {
203*a0c64a17SJack Morgenstein 	struct mlx4_ib_dev *dev;
204*a0c64a17SJack Morgenstein 	struct mlx4_alias_guid_work_context *cb_ctx = context;
205*a0c64a17SJack Morgenstein 	u8 port_index ;
206*a0c64a17SJack Morgenstein 	int i;
207*a0c64a17SJack Morgenstein 	struct mlx4_sriov_alias_guid_info_rec_det *rec;
208*a0c64a17SJack Morgenstein 	unsigned long flags, flags1;
209*a0c64a17SJack Morgenstein 
210*a0c64a17SJack Morgenstein 	if (!context)
211*a0c64a17SJack Morgenstein 		return;
212*a0c64a17SJack Morgenstein 
213*a0c64a17SJack Morgenstein 	dev = cb_ctx->dev;
214*a0c64a17SJack Morgenstein 	port_index = cb_ctx->port - 1;
215*a0c64a17SJack Morgenstein 	rec = &dev->sriov.alias_guid.ports_guid[port_index].
216*a0c64a17SJack Morgenstein 		all_rec_per_port[cb_ctx->block_num];
217*a0c64a17SJack Morgenstein 
218*a0c64a17SJack Morgenstein 	if (status) {
219*a0c64a17SJack Morgenstein 		rec->status = MLX4_GUID_INFO_STATUS_IDLE;
220*a0c64a17SJack Morgenstein 		pr_debug("(port: %d) failed: status = %d\n",
221*a0c64a17SJack Morgenstein 			 cb_ctx->port, status);
222*a0c64a17SJack Morgenstein 		goto out;
223*a0c64a17SJack Morgenstein 	}
224*a0c64a17SJack Morgenstein 
225*a0c64a17SJack Morgenstein 	if (guid_rec->block_num != cb_ctx->block_num) {
226*a0c64a17SJack Morgenstein 		pr_err("block num mismatch: %d != %d\n",
227*a0c64a17SJack Morgenstein 		       cb_ctx->block_num, guid_rec->block_num);
228*a0c64a17SJack Morgenstein 		goto out;
229*a0c64a17SJack Morgenstein 	}
230*a0c64a17SJack Morgenstein 
231*a0c64a17SJack Morgenstein 	pr_debug("lid/port: %d/%d, block_num: %d\n",
232*a0c64a17SJack Morgenstein 		 be16_to_cpu(guid_rec->lid), cb_ctx->port,
233*a0c64a17SJack Morgenstein 		 guid_rec->block_num);
234*a0c64a17SJack Morgenstein 
235*a0c64a17SJack Morgenstein 	rec = &dev->sriov.alias_guid.ports_guid[port_index].
236*a0c64a17SJack Morgenstein 		all_rec_per_port[guid_rec->block_num];
237*a0c64a17SJack Morgenstein 
238*a0c64a17SJack Morgenstein 	rec->status = MLX4_GUID_INFO_STATUS_SET;
239*a0c64a17SJack Morgenstein 	rec->method = MLX4_GUID_INFO_RECORD_SET;
240*a0c64a17SJack Morgenstein 
241*a0c64a17SJack Morgenstein 	for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) {
242*a0c64a17SJack Morgenstein 		__be64 tmp_cur_ag;
243*a0c64a17SJack Morgenstein 		tmp_cur_ag = *(__be64 *)&guid_rec->guid_info_list[i * GUID_REC_SIZE];
244*a0c64a17SJack Morgenstein 		/* check if the SM didn't assign one of the records.
245*a0c64a17SJack Morgenstein 		 * if it didn't, if it was not sysadmin request:
246*a0c64a17SJack Morgenstein 		 * ask the SM to give a new GUID, (instead of the driver request).
247*a0c64a17SJack Morgenstein 		 */
248*a0c64a17SJack Morgenstein 		if (tmp_cur_ag == MLX4_NOT_SET_GUID) {
249*a0c64a17SJack Morgenstein 			mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in "
250*a0c64a17SJack Morgenstein 				     "block_num: %d was declined by SM, "
251*a0c64a17SJack Morgenstein 				     "ownership by %d (0 = driver, 1=sysAdmin,"
252*a0c64a17SJack Morgenstein 				     " 2=None)\n", __func__, i,
253*a0c64a17SJack Morgenstein 				     guid_rec->block_num, rec->ownership);
254*a0c64a17SJack Morgenstein 			if (rec->ownership == MLX4_GUID_DRIVER_ASSIGN) {
255*a0c64a17SJack Morgenstein 				/* if it is driver assign, asks for new GUID from SM*/
256*a0c64a17SJack Morgenstein 				*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] =
257*a0c64a17SJack Morgenstein 					MLX4_NOT_SET_GUID;
258*a0c64a17SJack Morgenstein 
259*a0c64a17SJack Morgenstein 				/* Mark the record as not assigned, and let it
260*a0c64a17SJack Morgenstein 				 * be sent again in the next work sched.*/
261*a0c64a17SJack Morgenstein 				rec->status = MLX4_GUID_INFO_STATUS_IDLE;
262*a0c64a17SJack Morgenstein 				rec->guid_indexes |= get_aguid_comp_mask_from_ix(i);
263*a0c64a17SJack Morgenstein 			}
264*a0c64a17SJack Morgenstein 		} else {
265*a0c64a17SJack Morgenstein 		       /* properly assigned record. */
266*a0c64a17SJack Morgenstein 		       /* We save the GUID we just got from the SM in the
267*a0c64a17SJack Morgenstein 			* admin_guid in order to be persistent, and in the
268*a0c64a17SJack Morgenstein 			* request from the sm the process will ask for the same GUID */
269*a0c64a17SJack Morgenstein 			if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN &&
270*a0c64a17SJack Morgenstein 			    tmp_cur_ag != *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]) {
271*a0c64a17SJack Morgenstein 				/* the sysadmin assignment failed.*/
272*a0c64a17SJack Morgenstein 				mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set"
273*a0c64a17SJack Morgenstein 					     " admin guid after SysAdmin "
274*a0c64a17SJack Morgenstein 					     "configuration. "
275*a0c64a17SJack Morgenstein 					     "Record num %d in block_num:%d "
276*a0c64a17SJack Morgenstein 					     "was declined by SM, "
277*a0c64a17SJack Morgenstein 					     "new val(0x%llx) was kept\n",
278*a0c64a17SJack Morgenstein 					      __func__, i,
279*a0c64a17SJack Morgenstein 					     guid_rec->block_num,
280*a0c64a17SJack Morgenstein 					     be64_to_cpu(*(__be64 *) &
281*a0c64a17SJack Morgenstein 							 rec->all_recs[i * GUID_REC_SIZE]));
282*a0c64a17SJack Morgenstein 			} else {
283*a0c64a17SJack Morgenstein 				memcpy(&rec->all_recs[i * GUID_REC_SIZE],
284*a0c64a17SJack Morgenstein 				       &guid_rec->guid_info_list[i * GUID_REC_SIZE],
285*a0c64a17SJack Morgenstein 				       GUID_REC_SIZE);
286*a0c64a17SJack Morgenstein 			}
287*a0c64a17SJack Morgenstein 		}
288*a0c64a17SJack Morgenstein 	}
289*a0c64a17SJack Morgenstein 	/*
290*a0c64a17SJack Morgenstein 	The func is call here to close the cases when the
291*a0c64a17SJack Morgenstein 	sm doesn't send smp, so in the sa response the driver
292*a0c64a17SJack Morgenstein 	notifies the slave.
293*a0c64a17SJack Morgenstein 	*/
294*a0c64a17SJack Morgenstein 	mlx4_ib_notify_slaves_on_guid_change(dev, guid_rec->block_num,
295*a0c64a17SJack Morgenstein 					     cb_ctx->port,
296*a0c64a17SJack Morgenstein 					     guid_rec->guid_info_list);
297*a0c64a17SJack Morgenstein out:
298*a0c64a17SJack Morgenstein 	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
299*a0c64a17SJack Morgenstein 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
300*a0c64a17SJack Morgenstein 	if (!dev->sriov.is_going_down)
301*a0c64a17SJack Morgenstein 		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq,
302*a0c64a17SJack Morgenstein 				   &dev->sriov.alias_guid.ports_guid[port_index].
303*a0c64a17SJack Morgenstein 				   alias_guid_work, 0);
304*a0c64a17SJack Morgenstein 	if (cb_ctx->sa_query) {
305*a0c64a17SJack Morgenstein 		list_del(&cb_ctx->list);
306*a0c64a17SJack Morgenstein 		kfree(cb_ctx);
307*a0c64a17SJack Morgenstein 	} else
308*a0c64a17SJack Morgenstein 		complete(&cb_ctx->done);
309*a0c64a17SJack Morgenstein 	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
310*a0c64a17SJack Morgenstein 	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
311*a0c64a17SJack Morgenstein }
312*a0c64a17SJack Morgenstein 
313*a0c64a17SJack Morgenstein static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index)
314*a0c64a17SJack Morgenstein {
315*a0c64a17SJack Morgenstein 	int i;
316*a0c64a17SJack Morgenstein 	u64 cur_admin_val;
317*a0c64a17SJack Morgenstein 	ib_sa_comp_mask comp_mask = 0;
318*a0c64a17SJack Morgenstein 
319*a0c64a17SJack Morgenstein 	dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status
320*a0c64a17SJack Morgenstein 		= MLX4_GUID_INFO_STATUS_IDLE;
321*a0c64a17SJack Morgenstein 	dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].method
322*a0c64a17SJack Morgenstein 		= MLX4_GUID_INFO_RECORD_SET;
323*a0c64a17SJack Morgenstein 
324*a0c64a17SJack Morgenstein 	/* calculate the comp_mask for that record.*/
325*a0c64a17SJack Morgenstein 	for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
326*a0c64a17SJack Morgenstein 		cur_admin_val =
327*a0c64a17SJack Morgenstein 			*(u64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
328*a0c64a17SJack Morgenstein 			all_rec_per_port[index].all_recs[GUID_REC_SIZE * i];
329*a0c64a17SJack Morgenstein 		/*
330*a0c64a17SJack Morgenstein 		check the admin value: if it's for delete (~00LL) or
331*a0c64a17SJack Morgenstein 		it is the first guid of the first record (hw guid) or
332*a0c64a17SJack Morgenstein 		the records is not in ownership of the sysadmin and the sm doesn't
333*a0c64a17SJack Morgenstein 		need to assign GUIDs, then don't put it up for assignment.
334*a0c64a17SJack Morgenstein 		*/
335*a0c64a17SJack Morgenstein 		if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val ||
336*a0c64a17SJack Morgenstein 		    (!index && !i) ||
337*a0c64a17SJack Morgenstein 		    MLX4_GUID_NONE_ASSIGN == dev->sriov.alias_guid.
338*a0c64a17SJack Morgenstein 		    ports_guid[port - 1].all_rec_per_port[index].ownership)
339*a0c64a17SJack Morgenstein 			continue;
340*a0c64a17SJack Morgenstein 		comp_mask |= get_aguid_comp_mask_from_ix(i);
341*a0c64a17SJack Morgenstein 	}
342*a0c64a17SJack Morgenstein 	dev->sriov.alias_guid.ports_guid[port - 1].
343*a0c64a17SJack Morgenstein 		all_rec_per_port[index].guid_indexes = comp_mask;
344*a0c64a17SJack Morgenstein }
345*a0c64a17SJack Morgenstein 
346*a0c64a17SJack Morgenstein static int set_guid_rec(struct ib_device *ibdev,
347*a0c64a17SJack Morgenstein 			u8 port, int index,
348*a0c64a17SJack Morgenstein 			struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
349*a0c64a17SJack Morgenstein {
350*a0c64a17SJack Morgenstein 	int err;
351*a0c64a17SJack Morgenstein 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
352*a0c64a17SJack Morgenstein 	struct ib_sa_guidinfo_rec guid_info_rec;
353*a0c64a17SJack Morgenstein 	ib_sa_comp_mask comp_mask;
354*a0c64a17SJack Morgenstein 	struct ib_port_attr attr;
355*a0c64a17SJack Morgenstein 	struct mlx4_alias_guid_work_context *callback_context;
356*a0c64a17SJack Morgenstein 	unsigned long resched_delay, flags, flags1;
357*a0c64a17SJack Morgenstein 	struct list_head *head =
358*a0c64a17SJack Morgenstein 		&dev->sriov.alias_guid.ports_guid[port - 1].cb_list;
359*a0c64a17SJack Morgenstein 
360*a0c64a17SJack Morgenstein 	err = __mlx4_ib_query_port(ibdev, port, &attr, 1);
361*a0c64a17SJack Morgenstein 	if (err) {
362*a0c64a17SJack Morgenstein 		pr_debug("mlx4_ib_query_port failed (err: %d), port: %d\n",
363*a0c64a17SJack Morgenstein 			 err, port);
364*a0c64a17SJack Morgenstein 		return err;
365*a0c64a17SJack Morgenstein 	}
366*a0c64a17SJack Morgenstein 	/*check the port was configured by the sm, otherwise no need to send */
367*a0c64a17SJack Morgenstein 	if (attr.state != IB_PORT_ACTIVE) {
368*a0c64a17SJack Morgenstein 		pr_debug("port %d not active...rescheduling\n", port);
369*a0c64a17SJack Morgenstein 		resched_delay = 5 * HZ;
370*a0c64a17SJack Morgenstein 		err = -EAGAIN;
371*a0c64a17SJack Morgenstein 		goto new_schedule;
372*a0c64a17SJack Morgenstein 	}
373*a0c64a17SJack Morgenstein 
374*a0c64a17SJack Morgenstein 	callback_context = kmalloc(sizeof *callback_context, GFP_KERNEL);
375*a0c64a17SJack Morgenstein 	if (!callback_context) {
376*a0c64a17SJack Morgenstein 		err = -ENOMEM;
377*a0c64a17SJack Morgenstein 		resched_delay = HZ * 5;
378*a0c64a17SJack Morgenstein 		goto new_schedule;
379*a0c64a17SJack Morgenstein 	}
380*a0c64a17SJack Morgenstein 	callback_context->port = port;
381*a0c64a17SJack Morgenstein 	callback_context->dev = dev;
382*a0c64a17SJack Morgenstein 	callback_context->block_num = index;
383*a0c64a17SJack Morgenstein 
384*a0c64a17SJack Morgenstein 	memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec));
385*a0c64a17SJack Morgenstein 
386*a0c64a17SJack Morgenstein 	guid_info_rec.lid = cpu_to_be16(attr.lid);
387*a0c64a17SJack Morgenstein 	guid_info_rec.block_num = index;
388*a0c64a17SJack Morgenstein 
389*a0c64a17SJack Morgenstein 	memcpy(guid_info_rec.guid_info_list, rec_det->all_recs,
390*a0c64a17SJack Morgenstein 	       GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC);
391*a0c64a17SJack Morgenstein 	comp_mask = IB_SA_GUIDINFO_REC_LID | IB_SA_GUIDINFO_REC_BLOCK_NUM |
392*a0c64a17SJack Morgenstein 		rec_det->guid_indexes;
393*a0c64a17SJack Morgenstein 
394*a0c64a17SJack Morgenstein 	init_completion(&callback_context->done);
395*a0c64a17SJack Morgenstein 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
396*a0c64a17SJack Morgenstein 	list_add_tail(&callback_context->list, head);
397*a0c64a17SJack Morgenstein 	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
398*a0c64a17SJack Morgenstein 
399*a0c64a17SJack Morgenstein 	callback_context->query_id =
400*a0c64a17SJack Morgenstein 		ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client,
401*a0c64a17SJack Morgenstein 					  ibdev, port, &guid_info_rec,
402*a0c64a17SJack Morgenstein 					  comp_mask, rec_det->method, 1000,
403*a0c64a17SJack Morgenstein 					  GFP_KERNEL, aliasguid_query_handler,
404*a0c64a17SJack Morgenstein 					  callback_context,
405*a0c64a17SJack Morgenstein 					  &callback_context->sa_query);
406*a0c64a17SJack Morgenstein 	if (callback_context->query_id < 0) {
407*a0c64a17SJack Morgenstein 		pr_debug("ib_sa_guid_info_rec_query failed, query_id: "
408*a0c64a17SJack Morgenstein 			 "%d. will reschedule to the next 1 sec.\n",
409*a0c64a17SJack Morgenstein 			 callback_context->query_id);
410*a0c64a17SJack Morgenstein 		spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
411*a0c64a17SJack Morgenstein 		list_del(&callback_context->list);
412*a0c64a17SJack Morgenstein 		kfree(callback_context);
413*a0c64a17SJack Morgenstein 		spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
414*a0c64a17SJack Morgenstein 		resched_delay = 1 * HZ;
415*a0c64a17SJack Morgenstein 		err = -EAGAIN;
416*a0c64a17SJack Morgenstein 		goto new_schedule;
417*a0c64a17SJack Morgenstein 	}
418*a0c64a17SJack Morgenstein 	err = 0;
419*a0c64a17SJack Morgenstein 	goto out;
420*a0c64a17SJack Morgenstein 
421*a0c64a17SJack Morgenstein new_schedule:
422*a0c64a17SJack Morgenstein 	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
423*a0c64a17SJack Morgenstein 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
424*a0c64a17SJack Morgenstein 	invalidate_guid_record(dev, port, index);
425*a0c64a17SJack Morgenstein 	if (!dev->sriov.is_going_down) {
426*a0c64a17SJack Morgenstein 		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
427*a0c64a17SJack Morgenstein 				   &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
428*a0c64a17SJack Morgenstein 				   resched_delay);
429*a0c64a17SJack Morgenstein 	}
430*a0c64a17SJack Morgenstein 	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
431*a0c64a17SJack Morgenstein 	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
432*a0c64a17SJack Morgenstein 
433*a0c64a17SJack Morgenstein out:
434*a0c64a17SJack Morgenstein 	return err;
435*a0c64a17SJack Morgenstein }
436*a0c64a17SJack Morgenstein 
437*a0c64a17SJack Morgenstein void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
438*a0c64a17SJack Morgenstein {
439*a0c64a17SJack Morgenstein 	int i;
440*a0c64a17SJack Morgenstein 	unsigned long flags, flags1;
441*a0c64a17SJack Morgenstein 
442*a0c64a17SJack Morgenstein 	pr_debug("port %d\n", port);
443*a0c64a17SJack Morgenstein 
444*a0c64a17SJack Morgenstein 	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
445*a0c64a17SJack Morgenstein 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
446*a0c64a17SJack Morgenstein 	for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++)
447*a0c64a17SJack Morgenstein 		invalidate_guid_record(dev, port, i);
448*a0c64a17SJack Morgenstein 
449*a0c64a17SJack Morgenstein 	if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) {
450*a0c64a17SJack Morgenstein 		/*
451*a0c64a17SJack Morgenstein 		make sure no work waits in the queue, if the work is already
452*a0c64a17SJack Morgenstein 		queued(not on the timer) the cancel will fail. That is not a problem
453*a0c64a17SJack Morgenstein 		because we just want the work started.
454*a0c64a17SJack Morgenstein 		*/
455*a0c64a17SJack Morgenstein 		__cancel_delayed_work(&dev->sriov.alias_guid.
456*a0c64a17SJack Morgenstein 				      ports_guid[port - 1].alias_guid_work);
457*a0c64a17SJack Morgenstein 		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
458*a0c64a17SJack Morgenstein 				   &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
459*a0c64a17SJack Morgenstein 				   0);
460*a0c64a17SJack Morgenstein 	}
461*a0c64a17SJack Morgenstein 	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
462*a0c64a17SJack Morgenstein 	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
463*a0c64a17SJack Morgenstein }
464*a0c64a17SJack Morgenstein 
465*a0c64a17SJack Morgenstein /* The function returns the next record that was
466*a0c64a17SJack Morgenstein  * not configured (or failed to be configured) */
467*a0c64a17SJack Morgenstein static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port,
468*a0c64a17SJack Morgenstein 				     struct mlx4_next_alias_guid_work *rec)
469*a0c64a17SJack Morgenstein {
470*a0c64a17SJack Morgenstein 	int j;
471*a0c64a17SJack Morgenstein 	unsigned long flags;
472*a0c64a17SJack Morgenstein 
473*a0c64a17SJack Morgenstein 	for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
474*a0c64a17SJack Morgenstein 		spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
475*a0c64a17SJack Morgenstein 		if (dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status ==
476*a0c64a17SJack Morgenstein 		    MLX4_GUID_INFO_STATUS_IDLE) {
477*a0c64a17SJack Morgenstein 			memcpy(&rec->rec_det,
478*a0c64a17SJack Morgenstein 			       &dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j],
479*a0c64a17SJack Morgenstein 			       sizeof (struct mlx4_sriov_alias_guid_info_rec_det));
480*a0c64a17SJack Morgenstein 			rec->port = port;
481*a0c64a17SJack Morgenstein 			rec->block_num = j;
482*a0c64a17SJack Morgenstein 			dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status =
483*a0c64a17SJack Morgenstein 				MLX4_GUID_INFO_STATUS_PENDING;
484*a0c64a17SJack Morgenstein 			spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
485*a0c64a17SJack Morgenstein 			return 0;
486*a0c64a17SJack Morgenstein 		}
487*a0c64a17SJack Morgenstein 		spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
488*a0c64a17SJack Morgenstein 	}
489*a0c64a17SJack Morgenstein 	return -ENOENT;
490*a0c64a17SJack Morgenstein }
491*a0c64a17SJack Morgenstein 
492*a0c64a17SJack Morgenstein static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port,
493*a0c64a17SJack Morgenstein 					     int rec_index,
494*a0c64a17SJack Morgenstein 					     struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
495*a0c64a17SJack Morgenstein {
496*a0c64a17SJack Morgenstein 	dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].guid_indexes =
497*a0c64a17SJack Morgenstein 		rec_det->guid_indexes;
498*a0c64a17SJack Morgenstein 	memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs,
499*a0c64a17SJack Morgenstein 	       rec_det->all_recs, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE);
500*a0c64a17SJack Morgenstein 	dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].status =
501*a0c64a17SJack Morgenstein 		rec_det->status;
502*a0c64a17SJack Morgenstein }
503*a0c64a17SJack Morgenstein 
504*a0c64a17SJack Morgenstein static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port)
505*a0c64a17SJack Morgenstein {
506*a0c64a17SJack Morgenstein 	int j;
507*a0c64a17SJack Morgenstein 	struct mlx4_sriov_alias_guid_info_rec_det rec_det ;
508*a0c64a17SJack Morgenstein 
509*a0c64a17SJack Morgenstein 	for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT ; j++) {
510*a0c64a17SJack Morgenstein 		memset(rec_det.all_recs, 0, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE);
511*a0c64a17SJack Morgenstein 		rec_det.guid_indexes = (!j ? 0 : IB_SA_GUIDINFO_REC_GID0) |
512*a0c64a17SJack Morgenstein 			IB_SA_GUIDINFO_REC_GID1 | IB_SA_GUIDINFO_REC_GID2 |
513*a0c64a17SJack Morgenstein 			IB_SA_GUIDINFO_REC_GID3 | IB_SA_GUIDINFO_REC_GID4 |
514*a0c64a17SJack Morgenstein 			IB_SA_GUIDINFO_REC_GID5 | IB_SA_GUIDINFO_REC_GID6 |
515*a0c64a17SJack Morgenstein 			IB_SA_GUIDINFO_REC_GID7;
516*a0c64a17SJack Morgenstein 		rec_det.status = MLX4_GUID_INFO_STATUS_IDLE;
517*a0c64a17SJack Morgenstein 		set_administratively_guid_record(dev, port, j, &rec_det);
518*a0c64a17SJack Morgenstein 	}
519*a0c64a17SJack Morgenstein }
520*a0c64a17SJack Morgenstein 
521*a0c64a17SJack Morgenstein static void alias_guid_work(struct work_struct *work)
522*a0c64a17SJack Morgenstein {
523*a0c64a17SJack Morgenstein 	struct delayed_work *delay = to_delayed_work(work);
524*a0c64a17SJack Morgenstein 	int ret = 0;
525*a0c64a17SJack Morgenstein 	struct mlx4_next_alias_guid_work *rec;
526*a0c64a17SJack Morgenstein 	struct mlx4_sriov_alias_guid_port_rec_det *sriov_alias_port =
527*a0c64a17SJack Morgenstein 		container_of(delay, struct mlx4_sriov_alias_guid_port_rec_det,
528*a0c64a17SJack Morgenstein 			     alias_guid_work);
529*a0c64a17SJack Morgenstein 	struct mlx4_sriov_alias_guid *sriov_alias_guid = sriov_alias_port->parent;
530*a0c64a17SJack Morgenstein 	struct mlx4_ib_sriov *ib_sriov = container_of(sriov_alias_guid,
531*a0c64a17SJack Morgenstein 						struct mlx4_ib_sriov,
532*a0c64a17SJack Morgenstein 						alias_guid);
533*a0c64a17SJack Morgenstein 	struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov);
534*a0c64a17SJack Morgenstein 
535*a0c64a17SJack Morgenstein 	rec = kzalloc(sizeof *rec, GFP_KERNEL);
536*a0c64a17SJack Morgenstein 	if (!rec) {
537*a0c64a17SJack Morgenstein 		pr_err("alias_guid_work: No Memory\n");
538*a0c64a17SJack Morgenstein 		return;
539*a0c64a17SJack Morgenstein 	}
540*a0c64a17SJack Morgenstein 
541*a0c64a17SJack Morgenstein 	pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1);
542*a0c64a17SJack Morgenstein 	ret = get_next_record_to_update(dev, sriov_alias_port->port, rec);
543*a0c64a17SJack Morgenstein 	if (ret) {
544*a0c64a17SJack Morgenstein 		pr_debug("No more records to update.\n");
545*a0c64a17SJack Morgenstein 		goto out;
546*a0c64a17SJack Morgenstein 	}
547*a0c64a17SJack Morgenstein 
548*a0c64a17SJack Morgenstein 	set_guid_rec(&dev->ib_dev, rec->port + 1, rec->block_num,
549*a0c64a17SJack Morgenstein 		     &rec->rec_det);
550*a0c64a17SJack Morgenstein 
551*a0c64a17SJack Morgenstein out:
552*a0c64a17SJack Morgenstein 	kfree(rec);
553*a0c64a17SJack Morgenstein }
554*a0c64a17SJack Morgenstein 
555*a0c64a17SJack Morgenstein 
556*a0c64a17SJack Morgenstein void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port)
557*a0c64a17SJack Morgenstein {
558*a0c64a17SJack Morgenstein 	unsigned long flags, flags1;
559*a0c64a17SJack Morgenstein 
560*a0c64a17SJack Morgenstein 	if (!mlx4_is_master(dev->dev))
561*a0c64a17SJack Morgenstein 		return;
562*a0c64a17SJack Morgenstein 	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
563*a0c64a17SJack Morgenstein 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
564*a0c64a17SJack Morgenstein 	if (!dev->sriov.is_going_down) {
565*a0c64a17SJack Morgenstein 		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq,
566*a0c64a17SJack Morgenstein 			   &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0);
567*a0c64a17SJack Morgenstein 	}
568*a0c64a17SJack Morgenstein 	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
569*a0c64a17SJack Morgenstein 	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
570*a0c64a17SJack Morgenstein }
571*a0c64a17SJack Morgenstein 
572*a0c64a17SJack Morgenstein void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
573*a0c64a17SJack Morgenstein {
574*a0c64a17SJack Morgenstein 	int i;
575*a0c64a17SJack Morgenstein 	struct mlx4_ib_sriov *sriov = &dev->sriov;
576*a0c64a17SJack Morgenstein 	struct mlx4_alias_guid_work_context *cb_ctx;
577*a0c64a17SJack Morgenstein 	struct mlx4_sriov_alias_guid_port_rec_det *det;
578*a0c64a17SJack Morgenstein 	struct ib_sa_query *sa_query;
579*a0c64a17SJack Morgenstein 	unsigned long flags;
580*a0c64a17SJack Morgenstein 
581*a0c64a17SJack Morgenstein 	for (i = 0 ; i < dev->num_ports; i++) {
582*a0c64a17SJack Morgenstein 		cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
583*a0c64a17SJack Morgenstein 		det = &sriov->alias_guid.ports_guid[i];
584*a0c64a17SJack Morgenstein 		spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
585*a0c64a17SJack Morgenstein 		while (!list_empty(&det->cb_list)) {
586*a0c64a17SJack Morgenstein 			cb_ctx = list_entry(det->cb_list.next,
587*a0c64a17SJack Morgenstein 					    struct mlx4_alias_guid_work_context,
588*a0c64a17SJack Morgenstein 					    list);
589*a0c64a17SJack Morgenstein 			sa_query = cb_ctx->sa_query;
590*a0c64a17SJack Morgenstein 			cb_ctx->sa_query = NULL;
591*a0c64a17SJack Morgenstein 			list_del(&cb_ctx->list);
592*a0c64a17SJack Morgenstein 			spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
593*a0c64a17SJack Morgenstein 			ib_sa_cancel_query(cb_ctx->query_id, sa_query);
594*a0c64a17SJack Morgenstein 			wait_for_completion(&cb_ctx->done);
595*a0c64a17SJack Morgenstein 			kfree(cb_ctx);
596*a0c64a17SJack Morgenstein 			spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
597*a0c64a17SJack Morgenstein 		}
598*a0c64a17SJack Morgenstein 		spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
599*a0c64a17SJack Morgenstein 	}
600*a0c64a17SJack Morgenstein 	for (i = 0 ; i < dev->num_ports; i++) {
601*a0c64a17SJack Morgenstein 		flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
602*a0c64a17SJack Morgenstein 		destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
603*a0c64a17SJack Morgenstein 	}
604*a0c64a17SJack Morgenstein 	ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
605*a0c64a17SJack Morgenstein 	kfree(dev->sriov.alias_guid.sa_client);
606*a0c64a17SJack Morgenstein }
607*a0c64a17SJack Morgenstein 
608*a0c64a17SJack Morgenstein int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
609*a0c64a17SJack Morgenstein {
610*a0c64a17SJack Morgenstein 	char alias_wq_name[15];
611*a0c64a17SJack Morgenstein 	int ret = 0;
612*a0c64a17SJack Morgenstein 	int i, j, k;
613*a0c64a17SJack Morgenstein 	union ib_gid gid;
614*a0c64a17SJack Morgenstein 
615*a0c64a17SJack Morgenstein 	if (!mlx4_is_master(dev->dev))
616*a0c64a17SJack Morgenstein 		return 0;
617*a0c64a17SJack Morgenstein 	dev->sriov.alias_guid.sa_client =
618*a0c64a17SJack Morgenstein 		kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL);
619*a0c64a17SJack Morgenstein 	if (!dev->sriov.alias_guid.sa_client)
620*a0c64a17SJack Morgenstein 		return -ENOMEM;
621*a0c64a17SJack Morgenstein 
622*a0c64a17SJack Morgenstein 	ib_sa_register_client(dev->sriov.alias_guid.sa_client);
623*a0c64a17SJack Morgenstein 
624*a0c64a17SJack Morgenstein 	spin_lock_init(&dev->sriov.alias_guid.ag_work_lock);
625*a0c64a17SJack Morgenstein 
626*a0c64a17SJack Morgenstein 	for (i = 1; i <= dev->num_ports; ++i) {
627*a0c64a17SJack Morgenstein 		if (dev->ib_dev.query_gid(&dev->ib_dev , i, 0, &gid)) {
628*a0c64a17SJack Morgenstein 			ret = -EFAULT;
629*a0c64a17SJack Morgenstein 			goto err_unregister;
630*a0c64a17SJack Morgenstein 		}
631*a0c64a17SJack Morgenstein 	}
632*a0c64a17SJack Morgenstein 
633*a0c64a17SJack Morgenstein 	for (i = 0 ; i < dev->num_ports; i++) {
634*a0c64a17SJack Morgenstein 		memset(&dev->sriov.alias_guid.ports_guid[i], 0,
635*a0c64a17SJack Morgenstein 		       sizeof (struct mlx4_sriov_alias_guid_port_rec_det));
636*a0c64a17SJack Morgenstein 		/*Check if the SM doesn't need to assign the GUIDs*/
637*a0c64a17SJack Morgenstein 		for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
638*a0c64a17SJack Morgenstein 			if (mlx4_ib_sm_guid_assign) {
639*a0c64a17SJack Morgenstein 				dev->sriov.alias_guid.ports_guid[i].
640*a0c64a17SJack Morgenstein 					all_rec_per_port[j].
641*a0c64a17SJack Morgenstein 					ownership = MLX4_GUID_DRIVER_ASSIGN;
642*a0c64a17SJack Morgenstein 				continue;
643*a0c64a17SJack Morgenstein 			}
644*a0c64a17SJack Morgenstein 			dev->sriov.alias_guid.ports_guid[i].all_rec_per_port[j].
645*a0c64a17SJack Morgenstein 					ownership = MLX4_GUID_NONE_ASSIGN;
646*a0c64a17SJack Morgenstein 			/*mark each val as it was deleted,
647*a0c64a17SJack Morgenstein 			  till the sysAdmin will give it valid val*/
648*a0c64a17SJack Morgenstein 			for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) {
649*a0c64a17SJack Morgenstein 				*(__be64 *)&dev->sriov.alias_guid.ports_guid[i].
650*a0c64a17SJack Morgenstein 					all_rec_per_port[j].all_recs[GUID_REC_SIZE * k] =
651*a0c64a17SJack Morgenstein 						cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL);
652*a0c64a17SJack Morgenstein 			}
653*a0c64a17SJack Morgenstein 		}
654*a0c64a17SJack Morgenstein 		INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list);
655*a0c64a17SJack Morgenstein 		/*prepare the records, set them to be allocated by sm*/
656*a0c64a17SJack Morgenstein 		for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++)
657*a0c64a17SJack Morgenstein 			invalidate_guid_record(dev, i + 1, j);
658*a0c64a17SJack Morgenstein 
659*a0c64a17SJack Morgenstein 		dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid;
660*a0c64a17SJack Morgenstein 		dev->sriov.alias_guid.ports_guid[i].port  = i;
661*a0c64a17SJack Morgenstein 		if (mlx4_ib_sm_guid_assign)
662*a0c64a17SJack Morgenstein 			set_all_slaves_guids(dev, i);
663*a0c64a17SJack Morgenstein 
664*a0c64a17SJack Morgenstein 		snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i);
665*a0c64a17SJack Morgenstein 		dev->sriov.alias_guid.ports_guid[i].wq =
666*a0c64a17SJack Morgenstein 			create_singlethread_workqueue(alias_wq_name);
667*a0c64a17SJack Morgenstein 		if (!dev->sriov.alias_guid.ports_guid[i].wq) {
668*a0c64a17SJack Morgenstein 			ret = -ENOMEM;
669*a0c64a17SJack Morgenstein 			goto err_thread;
670*a0c64a17SJack Morgenstein 		}
671*a0c64a17SJack Morgenstein 		INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work,
672*a0c64a17SJack Morgenstein 			  alias_guid_work);
673*a0c64a17SJack Morgenstein 	}
674*a0c64a17SJack Morgenstein 	return 0;
675*a0c64a17SJack Morgenstein 
676*a0c64a17SJack Morgenstein err_thread:
677*a0c64a17SJack Morgenstein 	for (--i; i >= 0; i--) {
678*a0c64a17SJack Morgenstein 		destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
679*a0c64a17SJack Morgenstein 		dev->sriov.alias_guid.ports_guid[i].wq = NULL;
680*a0c64a17SJack Morgenstein 	}
681*a0c64a17SJack Morgenstein 
682*a0c64a17SJack Morgenstein err_unregister:
683*a0c64a17SJack Morgenstein 	ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
684*a0c64a17SJack Morgenstein 	kfree(dev->sriov.alias_guid.sa_client);
685*a0c64a17SJack Morgenstein 	dev->sriov.alias_guid.sa_client = NULL;
686*a0c64a17SJack Morgenstein 	pr_err("init_alias_guid_service: Failed. (ret:%d)\n", ret);
687*a0c64a17SJack Morgenstein 	return ret;
688*a0c64a17SJack Morgenstein }
689