1a0c64a17SJack Morgenstein /* 2a0c64a17SJack Morgenstein * Copyright (c) 2012 Mellanox Technologies. All rights reserved. 3a0c64a17SJack Morgenstein * 4a0c64a17SJack Morgenstein * This software is available to you under a choice of one of two 5a0c64a17SJack Morgenstein * licenses. You may choose to be licensed under the terms of the GNU 6a0c64a17SJack Morgenstein * General Public License (GPL) Version 2, available from the file 7a0c64a17SJack Morgenstein * COPYING in the main directory of this source tree, or the 8a0c64a17SJack Morgenstein * OpenIB.org BSD license below: 9a0c64a17SJack Morgenstein * 10a0c64a17SJack Morgenstein * Redistribution and use in source and binary forms, with or 11a0c64a17SJack Morgenstein * without modification, are permitted provided that the following 12a0c64a17SJack Morgenstein * conditions are met: 13a0c64a17SJack Morgenstein * 14a0c64a17SJack Morgenstein * - Redistributions of source code must retain the above 15a0c64a17SJack Morgenstein * copyright notice, this list of conditions and the following 16a0c64a17SJack Morgenstein * disclaimer. 17a0c64a17SJack Morgenstein * 18a0c64a17SJack Morgenstein * - Redistributions in binary form must reproduce the above 19a0c64a17SJack Morgenstein * copyright notice, this list of conditions and the following 20a0c64a17SJack Morgenstein * disclaimer in the documentation and/or other materials 21a0c64a17SJack Morgenstein * provided with the distribution. 22a0c64a17SJack Morgenstein * 23a0c64a17SJack Morgenstein * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24a0c64a17SJack Morgenstein * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25a0c64a17SJack Morgenstein * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26a0c64a17SJack Morgenstein * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27a0c64a17SJack Morgenstein * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28a0c64a17SJack Morgenstein * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29a0c64a17SJack Morgenstein * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30a0c64a17SJack Morgenstein * SOFTWARE. 31a0c64a17SJack Morgenstein */ 32a0c64a17SJack Morgenstein /***********************************************************/ 33a0c64a17SJack Morgenstein /*This file support the handling of the Alias GUID feature. */ 34a0c64a17SJack Morgenstein /***********************************************************/ 35a0c64a17SJack Morgenstein #include <rdma/ib_mad.h> 36a0c64a17SJack Morgenstein #include <rdma/ib_smi.h> 37a0c64a17SJack Morgenstein #include <rdma/ib_cache.h> 38a0c64a17SJack Morgenstein #include <rdma/ib_sa.h> 39a0c64a17SJack Morgenstein #include <rdma/ib_pack.h> 40a0c64a17SJack Morgenstein #include <linux/mlx4/cmd.h> 41a0c64a17SJack Morgenstein #include <linux/module.h> 42a0c64a17SJack Morgenstein #include <linux/init.h> 43a0c64a17SJack Morgenstein #include <linux/errno.h> 44a0c64a17SJack Morgenstein #include <rdma/ib_user_verbs.h> 45a0c64a17SJack Morgenstein #include <linux/delay.h> 46a0c64a17SJack Morgenstein #include "mlx4_ib.h" 47a0c64a17SJack Morgenstein 48a0c64a17SJack Morgenstein /* 49a0c64a17SJack Morgenstein The driver keeps the current state of all guids, as they are in the HW. 50a0c64a17SJack Morgenstein Whenever we receive an smp mad GUIDInfo record, the data will be cached. 51a0c64a17SJack Morgenstein */ 52a0c64a17SJack Morgenstein 53a0c64a17SJack Morgenstein struct mlx4_alias_guid_work_context { 54a0c64a17SJack Morgenstein u8 port; 55a0c64a17SJack Morgenstein struct mlx4_ib_dev *dev ; 56a0c64a17SJack Morgenstein struct ib_sa_query *sa_query; 57a0c64a17SJack Morgenstein struct completion done; 58a0c64a17SJack Morgenstein int query_id; 59a0c64a17SJack Morgenstein struct list_head list; 60a0c64a17SJack Morgenstein int block_num; 61*99ee4df6SYishai Hadas ib_sa_comp_mask guid_indexes; 62*99ee4df6SYishai Hadas u8 method; 63a0c64a17SJack Morgenstein }; 64a0c64a17SJack Morgenstein 65a0c64a17SJack Morgenstein struct mlx4_next_alias_guid_work { 66a0c64a17SJack Morgenstein u8 port; 67a0c64a17SJack Morgenstein u8 block_num; 68*99ee4df6SYishai Hadas u8 method; 69a0c64a17SJack Morgenstein struct mlx4_sriov_alias_guid_info_rec_det rec_det; 70a0c64a17SJack Morgenstein }; 71a0c64a17SJack Morgenstein 72*99ee4df6SYishai Hadas static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port, 73*99ee4df6SYishai Hadas int *resched_delay_sec); 74a0c64a17SJack Morgenstein 75a0c64a17SJack Morgenstein void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num, 76a0c64a17SJack Morgenstein u8 port_num, u8 *p_data) 77a0c64a17SJack Morgenstein { 78a0c64a17SJack Morgenstein int i; 79a0c64a17SJack Morgenstein u64 guid_indexes; 80a0c64a17SJack Morgenstein int slave_id; 81a0c64a17SJack Morgenstein int port_index = port_num - 1; 82a0c64a17SJack Morgenstein 83a0c64a17SJack Morgenstein if (!mlx4_is_master(dev->dev)) 84a0c64a17SJack Morgenstein return; 85a0c64a17SJack Morgenstein 86a0c64a17SJack Morgenstein guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. 87a0c64a17SJack Morgenstein ports_guid[port_num - 1]. 88a0c64a17SJack Morgenstein all_rec_per_port[block_num].guid_indexes); 89a0c64a17SJack Morgenstein pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes); 90a0c64a17SJack Morgenstein 91a0c64a17SJack Morgenstein for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { 92a0c64a17SJack Morgenstein /* The location of the specific index starts from bit number 4 93a0c64a17SJack Morgenstein * until bit num 11 */ 94a0c64a17SJack Morgenstein if (test_bit(i + 4, (unsigned long *)&guid_indexes)) { 95a0c64a17SJack Morgenstein slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ; 96a0c64a17SJack Morgenstein if (slave_id >= dev->dev->num_slaves) { 97a0c64a17SJack Morgenstein pr_debug("The last slave: %d\n", slave_id); 98a0c64a17SJack Morgenstein return; 99a0c64a17SJack Morgenstein } 100a0c64a17SJack Morgenstein 101a0c64a17SJack Morgenstein /* cache the guid: */ 102a0c64a17SJack Morgenstein memcpy(&dev->sriov.demux[port_index].guid_cache[slave_id], 103a0c64a17SJack Morgenstein &p_data[i * GUID_REC_SIZE], 104a0c64a17SJack Morgenstein GUID_REC_SIZE); 105a0c64a17SJack Morgenstein } else 106a0c64a17SJack Morgenstein pr_debug("Guid number: %d in block: %d" 107a0c64a17SJack Morgenstein " was not updated\n", i, block_num); 108a0c64a17SJack Morgenstein } 109a0c64a17SJack Morgenstein } 110a0c64a17SJack Morgenstein 111a0c64a17SJack Morgenstein static __be64 get_cached_alias_guid(struct mlx4_ib_dev *dev, int port, int index) 112a0c64a17SJack Morgenstein { 113a0c64a17SJack Morgenstein if (index >= NUM_ALIAS_GUID_PER_PORT) { 114a0c64a17SJack Morgenstein pr_err("%s: ERROR: asked for index:%d\n", __func__, index); 1158a095030SDoug Ledford return (__force __be64) -1; 116a0c64a17SJack Morgenstein } 117a0c64a17SJack Morgenstein return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index]; 118a0c64a17SJack Morgenstein } 119a0c64a17SJack Morgenstein 120a0c64a17SJack Morgenstein 121c1e7e466SJack Morgenstein ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index) 122a0c64a17SJack Morgenstein { 123a0c64a17SJack Morgenstein return IB_SA_COMP_MASK(4 + index); 124a0c64a17SJack Morgenstein } 125a0c64a17SJack Morgenstein 126a0c64a17SJack Morgenstein /* 127a0c64a17SJack Morgenstein * Whenever new GUID is set/unset (guid table change) create event and 128a0c64a17SJack Morgenstein * notify the relevant slave (master also should be notified). 129a0c64a17SJack Morgenstein * If the GUID value is not as we have in the cache the slave will not be 130a0c64a17SJack Morgenstein * updated; in this case it waits for the smp_snoop or the port management 131a0c64a17SJack Morgenstein * event to call the function and to update the slave. 132a0c64a17SJack Morgenstein * block_number - the index of the block (16 blocks available) 133a0c64a17SJack Morgenstein * port_number - 1 or 2 134a0c64a17SJack Morgenstein */ 135a0c64a17SJack Morgenstein void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, 136a0c64a17SJack Morgenstein int block_num, u8 port_num, 137a0c64a17SJack Morgenstein u8 *p_data) 138a0c64a17SJack Morgenstein { 139a0c64a17SJack Morgenstein int i; 140a0c64a17SJack Morgenstein u64 guid_indexes; 141a0c64a17SJack Morgenstein int slave_id; 142a0c64a17SJack Morgenstein enum slave_port_state new_state; 143a0c64a17SJack Morgenstein enum slave_port_state prev_state; 144a0c64a17SJack Morgenstein __be64 tmp_cur_ag, form_cache_ag; 145a0c64a17SJack Morgenstein enum slave_port_gen_event gen_event; 146*99ee4df6SYishai Hadas struct mlx4_sriov_alias_guid_info_rec_det *rec; 147*99ee4df6SYishai Hadas unsigned long flags; 148*99ee4df6SYishai Hadas __be64 required_value; 149a0c64a17SJack Morgenstein 150a0c64a17SJack Morgenstein if (!mlx4_is_master(dev->dev)) 151a0c64a17SJack Morgenstein return; 152a0c64a17SJack Morgenstein 153*99ee4df6SYishai Hadas rec = &dev->sriov.alias_guid.ports_guid[port_num - 1]. 154*99ee4df6SYishai Hadas all_rec_per_port[block_num]; 155a0c64a17SJack Morgenstein guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. 156a0c64a17SJack Morgenstein ports_guid[port_num - 1]. 157a0c64a17SJack Morgenstein all_rec_per_port[block_num].guid_indexes); 158a0c64a17SJack Morgenstein pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes); 159a0c64a17SJack Morgenstein 160a0c64a17SJack Morgenstein /*calculate the slaves and notify them*/ 161a0c64a17SJack Morgenstein for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { 162a0c64a17SJack Morgenstein /* the location of the specific index runs from bits 4..11 */ 163a0c64a17SJack Morgenstein if (!(test_bit(i + 4, (unsigned long *)&guid_indexes))) 164a0c64a17SJack Morgenstein continue; 165a0c64a17SJack Morgenstein 166a0c64a17SJack Morgenstein slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ; 167872bf2fbSYishai Hadas if (slave_id >= dev->dev->persist->num_vfs + 1) 168a0c64a17SJack Morgenstein return; 169a0c64a17SJack Morgenstein tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE]; 170a0c64a17SJack Morgenstein form_cache_ag = get_cached_alias_guid(dev, port_num, 171a0c64a17SJack Morgenstein (NUM_ALIAS_GUID_IN_REC * block_num) + i); 172a0c64a17SJack Morgenstein /* 173a0c64a17SJack Morgenstein * Check if guid is not the same as in the cache, 174a0c64a17SJack Morgenstein * If it is different, wait for the snoop_smp or the port mgmt 175a0c64a17SJack Morgenstein * change event to update the slave on its port state change 176a0c64a17SJack Morgenstein */ 177a0c64a17SJack Morgenstein if (tmp_cur_ag != form_cache_ag) 178a0c64a17SJack Morgenstein continue; 179a0c64a17SJack Morgenstein 180*99ee4df6SYishai Hadas spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); 181*99ee4df6SYishai Hadas required_value = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]; 182*99ee4df6SYishai Hadas 183*99ee4df6SYishai Hadas if (required_value == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) 184*99ee4df6SYishai Hadas required_value = 0; 185*99ee4df6SYishai Hadas 186*99ee4df6SYishai Hadas if (tmp_cur_ag == required_value) { 187*99ee4df6SYishai Hadas rec->guid_indexes = rec->guid_indexes & 188*99ee4df6SYishai Hadas ~mlx4_ib_get_aguid_comp_mask_from_ix(i); 189*99ee4df6SYishai Hadas } else { 190*99ee4df6SYishai Hadas /* may notify port down if value is 0 */ 191*99ee4df6SYishai Hadas if (tmp_cur_ag != MLX4_NOT_SET_GUID) { 192*99ee4df6SYishai Hadas spin_unlock_irqrestore(&dev->sriov. 193*99ee4df6SYishai Hadas alias_guid.ag_work_lock, flags); 194*99ee4df6SYishai Hadas continue; 195*99ee4df6SYishai Hadas } 196*99ee4df6SYishai Hadas } 197*99ee4df6SYishai Hadas spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, 198*99ee4df6SYishai Hadas flags); 199*99ee4df6SYishai Hadas mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num); 200a0c64a17SJack Morgenstein /*2 cases: Valid GUID, and Invalid Guid*/ 201a0c64a17SJack Morgenstein 202a0c64a17SJack Morgenstein if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/ 203a0c64a17SJack Morgenstein prev_state = mlx4_get_slave_port_state(dev->dev, slave_id, port_num); 204a0c64a17SJack Morgenstein new_state = set_and_calc_slave_port_state(dev->dev, slave_id, port_num, 205a0c64a17SJack Morgenstein MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID, 206a0c64a17SJack Morgenstein &gen_event); 207a0c64a17SJack Morgenstein pr_debug("slave: %d, port: %d prev_port_state: %d," 208a0c64a17SJack Morgenstein " new_port_state: %d, gen_event: %d\n", 209a0c64a17SJack Morgenstein slave_id, port_num, prev_state, new_state, gen_event); 210a0c64a17SJack Morgenstein if (gen_event == SLAVE_PORT_GEN_EVENT_UP) { 211a0c64a17SJack Morgenstein pr_debug("sending PORT_UP event to slave: %d, port: %d\n", 212a0c64a17SJack Morgenstein slave_id, port_num); 213a0c64a17SJack Morgenstein mlx4_gen_port_state_change_eqe(dev->dev, slave_id, 214a0c64a17SJack Morgenstein port_num, MLX4_PORT_CHANGE_SUBTYPE_ACTIVE); 215a0c64a17SJack Morgenstein } 216a0c64a17SJack Morgenstein } else { /* request to invalidate GUID */ 217a0c64a17SJack Morgenstein set_and_calc_slave_port_state(dev->dev, slave_id, port_num, 218a0c64a17SJack Morgenstein MLX4_PORT_STATE_IB_EVENT_GID_INVALID, 219a0c64a17SJack Morgenstein &gen_event); 220*99ee4df6SYishai Hadas if (gen_event == SLAVE_PORT_GEN_EVENT_DOWN) { 221a0c64a17SJack Morgenstein pr_debug("sending PORT DOWN event to slave: %d, port: %d\n", 222a0c64a17SJack Morgenstein slave_id, port_num); 223*99ee4df6SYishai Hadas mlx4_gen_port_state_change_eqe(dev->dev, 224*99ee4df6SYishai Hadas slave_id, 225*99ee4df6SYishai Hadas port_num, 226a0c64a17SJack Morgenstein MLX4_PORT_CHANGE_SUBTYPE_DOWN); 227a0c64a17SJack Morgenstein } 228a0c64a17SJack Morgenstein } 229a0c64a17SJack Morgenstein } 230*99ee4df6SYishai Hadas } 231a0c64a17SJack Morgenstein 232a0c64a17SJack Morgenstein static void aliasguid_query_handler(int status, 233a0c64a17SJack Morgenstein struct ib_sa_guidinfo_rec *guid_rec, 234a0c64a17SJack Morgenstein void *context) 235a0c64a17SJack Morgenstein { 236a0c64a17SJack Morgenstein struct mlx4_ib_dev *dev; 237a0c64a17SJack Morgenstein struct mlx4_alias_guid_work_context *cb_ctx = context; 238a0c64a17SJack Morgenstein u8 port_index ; 239a0c64a17SJack Morgenstein int i; 240a0c64a17SJack Morgenstein struct mlx4_sriov_alias_guid_info_rec_det *rec; 241a0c64a17SJack Morgenstein unsigned long flags, flags1; 242*99ee4df6SYishai Hadas ib_sa_comp_mask declined_guid_indexes = 0; 243*99ee4df6SYishai Hadas ib_sa_comp_mask applied_guid_indexes = 0; 244*99ee4df6SYishai Hadas unsigned int resched_delay_sec = 0; 245a0c64a17SJack Morgenstein 246a0c64a17SJack Morgenstein if (!context) 247a0c64a17SJack Morgenstein return; 248a0c64a17SJack Morgenstein 249a0c64a17SJack Morgenstein dev = cb_ctx->dev; 250a0c64a17SJack Morgenstein port_index = cb_ctx->port - 1; 251a0c64a17SJack Morgenstein rec = &dev->sriov.alias_guid.ports_guid[port_index]. 252a0c64a17SJack Morgenstein all_rec_per_port[cb_ctx->block_num]; 253a0c64a17SJack Morgenstein 254a0c64a17SJack Morgenstein if (status) { 255a0c64a17SJack Morgenstein pr_debug("(port: %d) failed: status = %d\n", 256a0c64a17SJack Morgenstein cb_ctx->port, status); 257*99ee4df6SYishai Hadas rec->time_to_run = ktime_get_real_ns() + 1 * NSEC_PER_SEC; 258a0c64a17SJack Morgenstein goto out; 259a0c64a17SJack Morgenstein } 260a0c64a17SJack Morgenstein 261a0c64a17SJack Morgenstein if (guid_rec->block_num != cb_ctx->block_num) { 262a0c64a17SJack Morgenstein pr_err("block num mismatch: %d != %d\n", 263a0c64a17SJack Morgenstein cb_ctx->block_num, guid_rec->block_num); 264a0c64a17SJack Morgenstein goto out; 265a0c64a17SJack Morgenstein } 266a0c64a17SJack Morgenstein 267a0c64a17SJack Morgenstein pr_debug("lid/port: %d/%d, block_num: %d\n", 268a0c64a17SJack Morgenstein be16_to_cpu(guid_rec->lid), cb_ctx->port, 269a0c64a17SJack Morgenstein guid_rec->block_num); 270a0c64a17SJack Morgenstein 271a0c64a17SJack Morgenstein rec = &dev->sriov.alias_guid.ports_guid[port_index]. 272a0c64a17SJack Morgenstein all_rec_per_port[guid_rec->block_num]; 273a0c64a17SJack Morgenstein 274*99ee4df6SYishai Hadas spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); 275a0c64a17SJack Morgenstein for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) { 276*99ee4df6SYishai Hadas __be64 sm_response, required_val; 277*99ee4df6SYishai Hadas 278*99ee4df6SYishai Hadas if (!(cb_ctx->guid_indexes & 279*99ee4df6SYishai Hadas mlx4_ib_get_aguid_comp_mask_from_ix(i))) 280*99ee4df6SYishai Hadas continue; 281*99ee4df6SYishai Hadas sm_response = *(__be64 *)&guid_rec->guid_info_list 282*99ee4df6SYishai Hadas [i * GUID_REC_SIZE]; 283*99ee4df6SYishai Hadas required_val = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]; 284*99ee4df6SYishai Hadas if (cb_ctx->method == MLX4_GUID_INFO_RECORD_DELETE) { 285*99ee4df6SYishai Hadas if (required_val == 286*99ee4df6SYishai Hadas cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) 287*99ee4df6SYishai Hadas goto next_entry; 288*99ee4df6SYishai Hadas 289*99ee4df6SYishai Hadas /* A new value was set till we got the response */ 290*99ee4df6SYishai Hadas pr_debug("need to set new value %llx, record num %d, block_num:%d\n", 291*99ee4df6SYishai Hadas be64_to_cpu(required_val), 292*99ee4df6SYishai Hadas i, guid_rec->block_num); 293*99ee4df6SYishai Hadas goto entry_declined; 294*99ee4df6SYishai Hadas } 295*99ee4df6SYishai Hadas 296a0c64a17SJack Morgenstein /* check if the SM didn't assign one of the records. 297*99ee4df6SYishai Hadas * if it didn't, re-ask for. 298a0c64a17SJack Morgenstein */ 299*99ee4df6SYishai Hadas if (sm_response == MLX4_NOT_SET_GUID) { 300*99ee4df6SYishai Hadas if (rec->guids_retry_schedule[i] == 0) 301a0c64a17SJack Morgenstein mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in " 302a0c64a17SJack Morgenstein "block_num: %d was declined by SM, " 303a0c64a17SJack Morgenstein "ownership by %d (0 = driver, 1=sysAdmin," 304a0c64a17SJack Morgenstein " 2=None)\n", __func__, i, 305*99ee4df6SYishai Hadas guid_rec->block_num, 306*99ee4df6SYishai Hadas rec->ownership); 307*99ee4df6SYishai Hadas goto entry_declined; 308a0c64a17SJack Morgenstein } else { 309a0c64a17SJack Morgenstein /* properly assigned record. */ 310a0c64a17SJack Morgenstein /* We save the GUID we just got from the SM in the 311a0c64a17SJack Morgenstein * admin_guid in order to be persistent, and in the 312a0c64a17SJack Morgenstein * request from the sm the process will ask for the same GUID */ 313a0c64a17SJack Morgenstein if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN && 314*99ee4df6SYishai Hadas sm_response != required_val) { 315*99ee4df6SYishai Hadas /* Warn only on first retry */ 316*99ee4df6SYishai Hadas if (rec->guids_retry_schedule[i] == 0) 317a0c64a17SJack Morgenstein mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set" 318a0c64a17SJack Morgenstein " admin guid after SysAdmin " 319a0c64a17SJack Morgenstein "configuration. " 320a0c64a17SJack Morgenstein "Record num %d in block_num:%d " 321a0c64a17SJack Morgenstein "was declined by SM, " 322*99ee4df6SYishai Hadas "new val(0x%llx) was kept, SM returned (0x%llx)\n", 323a0c64a17SJack Morgenstein __func__, i, 324a0c64a17SJack Morgenstein guid_rec->block_num, 325*99ee4df6SYishai Hadas be64_to_cpu(required_val), 326*99ee4df6SYishai Hadas be64_to_cpu(sm_response)); 327*99ee4df6SYishai Hadas goto entry_declined; 328a0c64a17SJack Morgenstein } else { 329*99ee4df6SYishai Hadas *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] = 330*99ee4df6SYishai Hadas sm_response; 331*99ee4df6SYishai Hadas goto next_entry; 332a0c64a17SJack Morgenstein } 333a0c64a17SJack Morgenstein } 334*99ee4df6SYishai Hadas entry_declined: 335*99ee4df6SYishai Hadas declined_guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i); 336*99ee4df6SYishai Hadas rec->guids_retry_schedule[i] = 337*99ee4df6SYishai Hadas (rec->guids_retry_schedule[i] == 0) ? 1 : 338*99ee4df6SYishai Hadas min((unsigned int)60, 339*99ee4df6SYishai Hadas rec->guids_retry_schedule[i] * 2); 340*99ee4df6SYishai Hadas /* using the minimum value among all entries in that record */ 341*99ee4df6SYishai Hadas resched_delay_sec = (resched_delay_sec == 0) ? 342*99ee4df6SYishai Hadas rec->guids_retry_schedule[i] : 343*99ee4df6SYishai Hadas min(resched_delay_sec, 344*99ee4df6SYishai Hadas rec->guids_retry_schedule[i]); 345*99ee4df6SYishai Hadas continue; 346*99ee4df6SYishai Hadas 347*99ee4df6SYishai Hadas next_entry: 348*99ee4df6SYishai Hadas rec->guids_retry_schedule[i] = 0; 349a0c64a17SJack Morgenstein } 350*99ee4df6SYishai Hadas 351*99ee4df6SYishai Hadas applied_guid_indexes = cb_ctx->guid_indexes & ~declined_guid_indexes; 352*99ee4df6SYishai Hadas if (declined_guid_indexes || 353*99ee4df6SYishai Hadas rec->guid_indexes & ~(applied_guid_indexes)) { 354*99ee4df6SYishai Hadas pr_debug("record=%d wasn't fully set, guid_indexes=0x%llx applied_indexes=0x%llx, declined_indexes=0x%llx\n", 355*99ee4df6SYishai Hadas guid_rec->block_num, 356*99ee4df6SYishai Hadas be64_to_cpu((__force __be64)rec->guid_indexes), 357*99ee4df6SYishai Hadas be64_to_cpu((__force __be64)applied_guid_indexes), 358*99ee4df6SYishai Hadas be64_to_cpu((__force __be64)declined_guid_indexes)); 359*99ee4df6SYishai Hadas rec->time_to_run = ktime_get_real_ns() + 360*99ee4df6SYishai Hadas resched_delay_sec * NSEC_PER_SEC; 361*99ee4df6SYishai Hadas } else { 362*99ee4df6SYishai Hadas rec->status = MLX4_GUID_INFO_STATUS_SET; 363*99ee4df6SYishai Hadas } 364*99ee4df6SYishai Hadas spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); 365a0c64a17SJack Morgenstein /* 366a0c64a17SJack Morgenstein The func is call here to close the cases when the 367a0c64a17SJack Morgenstein sm doesn't send smp, so in the sa response the driver 368a0c64a17SJack Morgenstein notifies the slave. 369a0c64a17SJack Morgenstein */ 370a0c64a17SJack Morgenstein mlx4_ib_notify_slaves_on_guid_change(dev, guid_rec->block_num, 371a0c64a17SJack Morgenstein cb_ctx->port, 372a0c64a17SJack Morgenstein guid_rec->guid_info_list); 373a0c64a17SJack Morgenstein out: 374a0c64a17SJack Morgenstein spin_lock_irqsave(&dev->sriov.going_down_lock, flags); 375a0c64a17SJack Morgenstein spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); 376*99ee4df6SYishai Hadas if (!dev->sriov.is_going_down) { 377*99ee4df6SYishai Hadas get_low_record_time_index(dev, port_index, &resched_delay_sec); 378a0c64a17SJack Morgenstein queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, 379a0c64a17SJack Morgenstein &dev->sriov.alias_guid.ports_guid[port_index]. 380*99ee4df6SYishai Hadas alias_guid_work, 381*99ee4df6SYishai Hadas msecs_to_jiffies(resched_delay_sec * 1000)); 382*99ee4df6SYishai Hadas } 383a0c64a17SJack Morgenstein if (cb_ctx->sa_query) { 384a0c64a17SJack Morgenstein list_del(&cb_ctx->list); 385a0c64a17SJack Morgenstein kfree(cb_ctx); 386a0c64a17SJack Morgenstein } else 387a0c64a17SJack Morgenstein complete(&cb_ctx->done); 388a0c64a17SJack Morgenstein spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); 389a0c64a17SJack Morgenstein spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); 390a0c64a17SJack Morgenstein } 391a0c64a17SJack Morgenstein 392a0c64a17SJack Morgenstein static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index) 393a0c64a17SJack Morgenstein { 394a0c64a17SJack Morgenstein int i; 395a0c64a17SJack Morgenstein u64 cur_admin_val; 396a0c64a17SJack Morgenstein ib_sa_comp_mask comp_mask = 0; 397a0c64a17SJack Morgenstein 398a0c64a17SJack Morgenstein dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status 399*99ee4df6SYishai Hadas = MLX4_GUID_INFO_STATUS_SET; 400a0c64a17SJack Morgenstein 401a0c64a17SJack Morgenstein /* calculate the comp_mask for that record.*/ 402a0c64a17SJack Morgenstein for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { 403a0c64a17SJack Morgenstein cur_admin_val = 404a0c64a17SJack Morgenstein *(u64 *)&dev->sriov.alias_guid.ports_guid[port - 1]. 405a0c64a17SJack Morgenstein all_rec_per_port[index].all_recs[GUID_REC_SIZE * i]; 406a0c64a17SJack Morgenstein /* 407a0c64a17SJack Morgenstein check the admin value: if it's for delete (~00LL) or 408a0c64a17SJack Morgenstein it is the first guid of the first record (hw guid) or 409a0c64a17SJack Morgenstein the records is not in ownership of the sysadmin and the sm doesn't 410a0c64a17SJack Morgenstein need to assign GUIDs, then don't put it up for assignment. 411a0c64a17SJack Morgenstein */ 412a0c64a17SJack Morgenstein if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val || 413a0c64a17SJack Morgenstein (!index && !i) || 414a0c64a17SJack Morgenstein MLX4_GUID_NONE_ASSIGN == dev->sriov.alias_guid. 415a0c64a17SJack Morgenstein ports_guid[port - 1].all_rec_per_port[index].ownership) 416a0c64a17SJack Morgenstein continue; 417c1e7e466SJack Morgenstein comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i); 418a0c64a17SJack Morgenstein } 419a0c64a17SJack Morgenstein dev->sriov.alias_guid.ports_guid[port - 1]. 420*99ee4df6SYishai Hadas all_rec_per_port[index].guid_indexes |= comp_mask; 421*99ee4df6SYishai Hadas if (dev->sriov.alias_guid.ports_guid[port - 1]. 422*99ee4df6SYishai Hadas all_rec_per_port[index].guid_indexes) 423*99ee4df6SYishai Hadas dev->sriov.alias_guid.ports_guid[port - 1]. 424*99ee4df6SYishai Hadas all_rec_per_port[index].status = MLX4_GUID_INFO_STATUS_IDLE; 425*99ee4df6SYishai Hadas 426a0c64a17SJack Morgenstein } 427a0c64a17SJack Morgenstein 428a0c64a17SJack Morgenstein static int set_guid_rec(struct ib_device *ibdev, 429*99ee4df6SYishai Hadas struct mlx4_next_alias_guid_work *rec) 430a0c64a17SJack Morgenstein { 431a0c64a17SJack Morgenstein int err; 432a0c64a17SJack Morgenstein struct mlx4_ib_dev *dev = to_mdev(ibdev); 433a0c64a17SJack Morgenstein struct ib_sa_guidinfo_rec guid_info_rec; 434a0c64a17SJack Morgenstein ib_sa_comp_mask comp_mask; 435a0c64a17SJack Morgenstein struct ib_port_attr attr; 436a0c64a17SJack Morgenstein struct mlx4_alias_guid_work_context *callback_context; 437a0c64a17SJack Morgenstein unsigned long resched_delay, flags, flags1; 438*99ee4df6SYishai Hadas u8 port = rec->port + 1; 439*99ee4df6SYishai Hadas int index = rec->block_num; 440*99ee4df6SYishai Hadas struct mlx4_sriov_alias_guid_info_rec_det *rec_det = &rec->rec_det; 441a0c64a17SJack Morgenstein struct list_head *head = 442a0c64a17SJack Morgenstein &dev->sriov.alias_guid.ports_guid[port - 1].cb_list; 443a0c64a17SJack Morgenstein 444a0c64a17SJack Morgenstein err = __mlx4_ib_query_port(ibdev, port, &attr, 1); 445a0c64a17SJack Morgenstein if (err) { 446a0c64a17SJack Morgenstein pr_debug("mlx4_ib_query_port failed (err: %d), port: %d\n", 447a0c64a17SJack Morgenstein err, port); 448a0c64a17SJack Morgenstein return err; 449a0c64a17SJack Morgenstein } 450a0c64a17SJack Morgenstein /*check the port was configured by the sm, otherwise no need to send */ 451a0c64a17SJack Morgenstein if (attr.state != IB_PORT_ACTIVE) { 452a0c64a17SJack Morgenstein pr_debug("port %d not active...rescheduling\n", port); 453a0c64a17SJack Morgenstein resched_delay = 5 * HZ; 454a0c64a17SJack Morgenstein err = -EAGAIN; 455a0c64a17SJack Morgenstein goto new_schedule; 456a0c64a17SJack Morgenstein } 457a0c64a17SJack Morgenstein 458a0c64a17SJack Morgenstein callback_context = kmalloc(sizeof *callback_context, GFP_KERNEL); 459a0c64a17SJack Morgenstein if (!callback_context) { 460a0c64a17SJack Morgenstein err = -ENOMEM; 461a0c64a17SJack Morgenstein resched_delay = HZ * 5; 462a0c64a17SJack Morgenstein goto new_schedule; 463a0c64a17SJack Morgenstein } 464a0c64a17SJack Morgenstein callback_context->port = port; 465a0c64a17SJack Morgenstein callback_context->dev = dev; 466a0c64a17SJack Morgenstein callback_context->block_num = index; 467*99ee4df6SYishai Hadas callback_context->guid_indexes = rec_det->guid_indexes; 468*99ee4df6SYishai Hadas callback_context->method = rec->method; 469a0c64a17SJack Morgenstein 470a0c64a17SJack Morgenstein memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec)); 471a0c64a17SJack Morgenstein 472a0c64a17SJack Morgenstein guid_info_rec.lid = cpu_to_be16(attr.lid); 473a0c64a17SJack Morgenstein guid_info_rec.block_num = index; 474a0c64a17SJack Morgenstein 475a0c64a17SJack Morgenstein memcpy(guid_info_rec.guid_info_list, rec_det->all_recs, 476a0c64a17SJack Morgenstein GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC); 477a0c64a17SJack Morgenstein comp_mask = IB_SA_GUIDINFO_REC_LID | IB_SA_GUIDINFO_REC_BLOCK_NUM | 478a0c64a17SJack Morgenstein rec_det->guid_indexes; 479a0c64a17SJack Morgenstein 480a0c64a17SJack Morgenstein init_completion(&callback_context->done); 481a0c64a17SJack Morgenstein spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); 482a0c64a17SJack Morgenstein list_add_tail(&callback_context->list, head); 483a0c64a17SJack Morgenstein spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); 484a0c64a17SJack Morgenstein 485a0c64a17SJack Morgenstein callback_context->query_id = 486a0c64a17SJack Morgenstein ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client, 487a0c64a17SJack Morgenstein ibdev, port, &guid_info_rec, 488*99ee4df6SYishai Hadas comp_mask, rec->method, 1000, 489a0c64a17SJack Morgenstein GFP_KERNEL, aliasguid_query_handler, 490a0c64a17SJack Morgenstein callback_context, 491a0c64a17SJack Morgenstein &callback_context->sa_query); 492a0c64a17SJack Morgenstein if (callback_context->query_id < 0) { 493a0c64a17SJack Morgenstein pr_debug("ib_sa_guid_info_rec_query failed, query_id: " 494a0c64a17SJack Morgenstein "%d. will reschedule to the next 1 sec.\n", 495a0c64a17SJack Morgenstein callback_context->query_id); 496a0c64a17SJack Morgenstein spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); 497a0c64a17SJack Morgenstein list_del(&callback_context->list); 498a0c64a17SJack Morgenstein kfree(callback_context); 499a0c64a17SJack Morgenstein spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); 500a0c64a17SJack Morgenstein resched_delay = 1 * HZ; 501a0c64a17SJack Morgenstein err = -EAGAIN; 502a0c64a17SJack Morgenstein goto new_schedule; 503a0c64a17SJack Morgenstein } 504a0c64a17SJack Morgenstein err = 0; 505a0c64a17SJack Morgenstein goto out; 506a0c64a17SJack Morgenstein 507a0c64a17SJack Morgenstein new_schedule: 508a0c64a17SJack Morgenstein spin_lock_irqsave(&dev->sriov.going_down_lock, flags); 509a0c64a17SJack Morgenstein spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); 510a0c64a17SJack Morgenstein invalidate_guid_record(dev, port, index); 511a0c64a17SJack Morgenstein if (!dev->sriov.is_going_down) { 512a0c64a17SJack Morgenstein queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, 513a0c64a17SJack Morgenstein &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work, 514a0c64a17SJack Morgenstein resched_delay); 515a0c64a17SJack Morgenstein } 516a0c64a17SJack Morgenstein spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); 517a0c64a17SJack Morgenstein spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); 518a0c64a17SJack Morgenstein 519a0c64a17SJack Morgenstein out: 520a0c64a17SJack Morgenstein return err; 521a0c64a17SJack Morgenstein } 522a0c64a17SJack Morgenstein 523a0c64a17SJack Morgenstein void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) 524a0c64a17SJack Morgenstein { 525a0c64a17SJack Morgenstein int i; 526a0c64a17SJack Morgenstein unsigned long flags, flags1; 527a0c64a17SJack Morgenstein 528a0c64a17SJack Morgenstein pr_debug("port %d\n", port); 529a0c64a17SJack Morgenstein 530a0c64a17SJack Morgenstein spin_lock_irqsave(&dev->sriov.going_down_lock, flags); 531a0c64a17SJack Morgenstein spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); 532a0c64a17SJack Morgenstein for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++) 533a0c64a17SJack Morgenstein invalidate_guid_record(dev, port, i); 534a0c64a17SJack Morgenstein 535a0c64a17SJack Morgenstein if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) { 536a0c64a17SJack Morgenstein /* 537a0c64a17SJack Morgenstein make sure no work waits in the queue, if the work is already 538a0c64a17SJack Morgenstein queued(not on the timer) the cancel will fail. That is not a problem 539a0c64a17SJack Morgenstein because we just want the work started. 540a0c64a17SJack Morgenstein */ 5417a9a2970SLinus Torvalds cancel_delayed_work(&dev->sriov.alias_guid. 542a0c64a17SJack Morgenstein ports_guid[port - 1].alias_guid_work); 543a0c64a17SJack Morgenstein queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, 544a0c64a17SJack Morgenstein &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work, 545a0c64a17SJack Morgenstein 0); 546a0c64a17SJack Morgenstein } 547a0c64a17SJack Morgenstein spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); 548a0c64a17SJack Morgenstein spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); 549a0c64a17SJack Morgenstein } 550a0c64a17SJack Morgenstein 551*99ee4df6SYishai Hadas static void set_required_record(struct mlx4_ib_dev *dev, u8 port, 552*99ee4df6SYishai Hadas struct mlx4_next_alias_guid_work *next_rec, 553*99ee4df6SYishai Hadas int record_index) 554*99ee4df6SYishai Hadas { 555*99ee4df6SYishai Hadas int i; 556*99ee4df6SYishai Hadas int lowset_time_entry = -1; 557*99ee4df6SYishai Hadas int lowest_time = 0; 558*99ee4df6SYishai Hadas ib_sa_comp_mask delete_guid_indexes = 0; 559*99ee4df6SYishai Hadas ib_sa_comp_mask set_guid_indexes = 0; 560*99ee4df6SYishai Hadas struct mlx4_sriov_alias_guid_info_rec_det *rec = 561*99ee4df6SYishai Hadas &dev->sriov.alias_guid.ports_guid[port]. 562*99ee4df6SYishai Hadas all_rec_per_port[record_index]; 563*99ee4df6SYishai Hadas 564*99ee4df6SYishai Hadas for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { 565*99ee4df6SYishai Hadas if (!(rec->guid_indexes & 566*99ee4df6SYishai Hadas mlx4_ib_get_aguid_comp_mask_from_ix(i))) 567*99ee4df6SYishai Hadas continue; 568*99ee4df6SYishai Hadas 569*99ee4df6SYishai Hadas if (*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] == 570*99ee4df6SYishai Hadas cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) 571*99ee4df6SYishai Hadas delete_guid_indexes |= 572*99ee4df6SYishai Hadas mlx4_ib_get_aguid_comp_mask_from_ix(i); 573*99ee4df6SYishai Hadas else 574*99ee4df6SYishai Hadas set_guid_indexes |= 575*99ee4df6SYishai Hadas mlx4_ib_get_aguid_comp_mask_from_ix(i); 576*99ee4df6SYishai Hadas 577*99ee4df6SYishai Hadas if (lowset_time_entry == -1 || rec->guids_retry_schedule[i] <= 578*99ee4df6SYishai Hadas lowest_time) { 579*99ee4df6SYishai Hadas lowset_time_entry = i; 580*99ee4df6SYishai Hadas lowest_time = rec->guids_retry_schedule[i]; 581*99ee4df6SYishai Hadas } 582*99ee4df6SYishai Hadas } 583*99ee4df6SYishai Hadas 584*99ee4df6SYishai Hadas memcpy(&next_rec->rec_det, rec, sizeof(*rec)); 585*99ee4df6SYishai Hadas next_rec->port = port; 586*99ee4df6SYishai Hadas next_rec->block_num = record_index; 587*99ee4df6SYishai Hadas 588*99ee4df6SYishai Hadas if (*(__be64 *)&rec->all_recs[lowset_time_entry * GUID_REC_SIZE] == 589*99ee4df6SYishai Hadas cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) { 590*99ee4df6SYishai Hadas next_rec->rec_det.guid_indexes = delete_guid_indexes; 591*99ee4df6SYishai Hadas next_rec->method = MLX4_GUID_INFO_RECORD_DELETE; 592*99ee4df6SYishai Hadas } else { 593*99ee4df6SYishai Hadas next_rec->rec_det.guid_indexes = set_guid_indexes; 594*99ee4df6SYishai Hadas next_rec->method = MLX4_GUID_INFO_RECORD_SET; 595*99ee4df6SYishai Hadas } 596*99ee4df6SYishai Hadas } 597*99ee4df6SYishai Hadas 598*99ee4df6SYishai Hadas /* return index of record that should be updated based on lowest 599*99ee4df6SYishai Hadas * rescheduled time 600*99ee4df6SYishai Hadas */ 601*99ee4df6SYishai Hadas static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port, 602*99ee4df6SYishai Hadas int *resched_delay_sec) 603*99ee4df6SYishai Hadas { 604*99ee4df6SYishai Hadas int record_index = -1; 605*99ee4df6SYishai Hadas u64 low_record_time = 0; 606*99ee4df6SYishai Hadas struct mlx4_sriov_alias_guid_info_rec_det rec; 607*99ee4df6SYishai Hadas int j; 608*99ee4df6SYishai Hadas 609*99ee4df6SYishai Hadas for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { 610*99ee4df6SYishai Hadas rec = dev->sriov.alias_guid.ports_guid[port]. 611*99ee4df6SYishai Hadas all_rec_per_port[j]; 612*99ee4df6SYishai Hadas if (rec.status == MLX4_GUID_INFO_STATUS_IDLE && 613*99ee4df6SYishai Hadas rec.guid_indexes) { 614*99ee4df6SYishai Hadas if (record_index == -1 || 615*99ee4df6SYishai Hadas rec.time_to_run < low_record_time) { 616*99ee4df6SYishai Hadas record_index = j; 617*99ee4df6SYishai Hadas low_record_time = rec.time_to_run; 618*99ee4df6SYishai Hadas } 619*99ee4df6SYishai Hadas } 620*99ee4df6SYishai Hadas } 621*99ee4df6SYishai Hadas if (resched_delay_sec) { 622*99ee4df6SYishai Hadas u64 curr_time = ktime_get_real_ns(); 623*99ee4df6SYishai Hadas 624*99ee4df6SYishai Hadas *resched_delay_sec = (low_record_time < curr_time) ? 0 : 625*99ee4df6SYishai Hadas div_u64((low_record_time - curr_time), NSEC_PER_SEC); 626*99ee4df6SYishai Hadas } 627*99ee4df6SYishai Hadas 628*99ee4df6SYishai Hadas return record_index; 629*99ee4df6SYishai Hadas } 630*99ee4df6SYishai Hadas 631a0c64a17SJack Morgenstein /* The function returns the next record that was 632a0c64a17SJack Morgenstein * not configured (or failed to be configured) */ 633a0c64a17SJack Morgenstein static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port, 634a0c64a17SJack Morgenstein struct mlx4_next_alias_guid_work *rec) 635a0c64a17SJack Morgenstein { 636a0c64a17SJack Morgenstein unsigned long flags; 637*99ee4df6SYishai Hadas int record_index; 638*99ee4df6SYishai Hadas int ret = 0; 639a0c64a17SJack Morgenstein 640a0c64a17SJack Morgenstein spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); 641*99ee4df6SYishai Hadas record_index = get_low_record_time_index(dev, port, NULL); 642*99ee4df6SYishai Hadas 643*99ee4df6SYishai Hadas if (record_index < 0) { 644*99ee4df6SYishai Hadas ret = -ENOENT; 645*99ee4df6SYishai Hadas goto out; 646a0c64a17SJack Morgenstein } 647*99ee4df6SYishai Hadas 648*99ee4df6SYishai Hadas set_required_record(dev, port, rec, record_index); 649*99ee4df6SYishai Hadas out: 650a0c64a17SJack Morgenstein spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); 651*99ee4df6SYishai Hadas return ret; 652a0c64a17SJack Morgenstein } 653a0c64a17SJack Morgenstein 654a0c64a17SJack Morgenstein static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port, 655a0c64a17SJack Morgenstein int rec_index, 656a0c64a17SJack Morgenstein struct mlx4_sriov_alias_guid_info_rec_det *rec_det) 657a0c64a17SJack Morgenstein { 658a0c64a17SJack Morgenstein dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].guid_indexes = 659a0c64a17SJack Morgenstein rec_det->guid_indexes; 660a0c64a17SJack Morgenstein memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs, 661a0c64a17SJack Morgenstein rec_det->all_recs, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE); 662a0c64a17SJack Morgenstein } 663a0c64a17SJack Morgenstein 664a0c64a17SJack Morgenstein static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port) 665a0c64a17SJack Morgenstein { 666a0c64a17SJack Morgenstein int j; 667a0c64a17SJack Morgenstein struct mlx4_sriov_alias_guid_info_rec_det rec_det ; 668a0c64a17SJack Morgenstein 669a0c64a17SJack Morgenstein for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT ; j++) { 670a0c64a17SJack Morgenstein memset(rec_det.all_recs, 0, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE); 671a0c64a17SJack Morgenstein rec_det.guid_indexes = (!j ? 0 : IB_SA_GUIDINFO_REC_GID0) | 672a0c64a17SJack Morgenstein IB_SA_GUIDINFO_REC_GID1 | IB_SA_GUIDINFO_REC_GID2 | 673a0c64a17SJack Morgenstein IB_SA_GUIDINFO_REC_GID3 | IB_SA_GUIDINFO_REC_GID4 | 674a0c64a17SJack Morgenstein IB_SA_GUIDINFO_REC_GID5 | IB_SA_GUIDINFO_REC_GID6 | 675a0c64a17SJack Morgenstein IB_SA_GUIDINFO_REC_GID7; 676a0c64a17SJack Morgenstein rec_det.status = MLX4_GUID_INFO_STATUS_IDLE; 677a0c64a17SJack Morgenstein set_administratively_guid_record(dev, port, j, &rec_det); 678a0c64a17SJack Morgenstein } 679a0c64a17SJack Morgenstein } 680a0c64a17SJack Morgenstein 681a0c64a17SJack Morgenstein static void alias_guid_work(struct work_struct *work) 682a0c64a17SJack Morgenstein { 683a0c64a17SJack Morgenstein struct delayed_work *delay = to_delayed_work(work); 684a0c64a17SJack Morgenstein int ret = 0; 685a0c64a17SJack Morgenstein struct mlx4_next_alias_guid_work *rec; 686a0c64a17SJack Morgenstein struct mlx4_sriov_alias_guid_port_rec_det *sriov_alias_port = 687a0c64a17SJack Morgenstein container_of(delay, struct mlx4_sriov_alias_guid_port_rec_det, 688a0c64a17SJack Morgenstein alias_guid_work); 689a0c64a17SJack Morgenstein struct mlx4_sriov_alias_guid *sriov_alias_guid = sriov_alias_port->parent; 690a0c64a17SJack Morgenstein struct mlx4_ib_sriov *ib_sriov = container_of(sriov_alias_guid, 691a0c64a17SJack Morgenstein struct mlx4_ib_sriov, 692a0c64a17SJack Morgenstein alias_guid); 693a0c64a17SJack Morgenstein struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov); 694a0c64a17SJack Morgenstein 695a0c64a17SJack Morgenstein rec = kzalloc(sizeof *rec, GFP_KERNEL); 696a0c64a17SJack Morgenstein if (!rec) { 697a0c64a17SJack Morgenstein pr_err("alias_guid_work: No Memory\n"); 698a0c64a17SJack Morgenstein return; 699a0c64a17SJack Morgenstein } 700a0c64a17SJack Morgenstein 701a0c64a17SJack Morgenstein pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1); 702a0c64a17SJack Morgenstein ret = get_next_record_to_update(dev, sriov_alias_port->port, rec); 703a0c64a17SJack Morgenstein if (ret) { 704a0c64a17SJack Morgenstein pr_debug("No more records to update.\n"); 705a0c64a17SJack Morgenstein goto out; 706a0c64a17SJack Morgenstein } 707a0c64a17SJack Morgenstein 708*99ee4df6SYishai Hadas set_guid_rec(&dev->ib_dev, rec); 709a0c64a17SJack Morgenstein out: 710a0c64a17SJack Morgenstein kfree(rec); 711a0c64a17SJack Morgenstein } 712a0c64a17SJack Morgenstein 713a0c64a17SJack Morgenstein 714a0c64a17SJack Morgenstein void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port) 715a0c64a17SJack Morgenstein { 716a0c64a17SJack Morgenstein unsigned long flags, flags1; 717a0c64a17SJack Morgenstein 718a0c64a17SJack Morgenstein if (!mlx4_is_master(dev->dev)) 719a0c64a17SJack Morgenstein return; 720a0c64a17SJack Morgenstein spin_lock_irqsave(&dev->sriov.going_down_lock, flags); 721a0c64a17SJack Morgenstein spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); 722a0c64a17SJack Morgenstein if (!dev->sriov.is_going_down) { 723*99ee4df6SYishai Hadas /* If there is pending one should cancell then run, otherwise 724*99ee4df6SYishai Hadas * won't run till previous one is ended as same work 725*99ee4df6SYishai Hadas * struct is used. 726*99ee4df6SYishai Hadas */ 727*99ee4df6SYishai Hadas cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port]. 728*99ee4df6SYishai Hadas alias_guid_work); 729a0c64a17SJack Morgenstein queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, 730a0c64a17SJack Morgenstein &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0); 731a0c64a17SJack Morgenstein } 732a0c64a17SJack Morgenstein spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); 733a0c64a17SJack Morgenstein spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); 734a0c64a17SJack Morgenstein } 735a0c64a17SJack Morgenstein 736a0c64a17SJack Morgenstein void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev) 737a0c64a17SJack Morgenstein { 738a0c64a17SJack Morgenstein int i; 739a0c64a17SJack Morgenstein struct mlx4_ib_sriov *sriov = &dev->sriov; 740a0c64a17SJack Morgenstein struct mlx4_alias_guid_work_context *cb_ctx; 741a0c64a17SJack Morgenstein struct mlx4_sriov_alias_guid_port_rec_det *det; 742a0c64a17SJack Morgenstein struct ib_sa_query *sa_query; 743a0c64a17SJack Morgenstein unsigned long flags; 744a0c64a17SJack Morgenstein 745a0c64a17SJack Morgenstein for (i = 0 ; i < dev->num_ports; i++) { 746a0c64a17SJack Morgenstein cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work); 747a0c64a17SJack Morgenstein det = &sriov->alias_guid.ports_guid[i]; 748a0c64a17SJack Morgenstein spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags); 749a0c64a17SJack Morgenstein while (!list_empty(&det->cb_list)) { 750a0c64a17SJack Morgenstein cb_ctx = list_entry(det->cb_list.next, 751a0c64a17SJack Morgenstein struct mlx4_alias_guid_work_context, 752a0c64a17SJack Morgenstein list); 753a0c64a17SJack Morgenstein sa_query = cb_ctx->sa_query; 754a0c64a17SJack Morgenstein cb_ctx->sa_query = NULL; 755a0c64a17SJack Morgenstein list_del(&cb_ctx->list); 756a0c64a17SJack Morgenstein spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags); 757a0c64a17SJack Morgenstein ib_sa_cancel_query(cb_ctx->query_id, sa_query); 758a0c64a17SJack Morgenstein wait_for_completion(&cb_ctx->done); 759a0c64a17SJack Morgenstein kfree(cb_ctx); 760a0c64a17SJack Morgenstein spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags); 761a0c64a17SJack Morgenstein } 762a0c64a17SJack Morgenstein spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags); 763a0c64a17SJack Morgenstein } 764a0c64a17SJack Morgenstein for (i = 0 ; i < dev->num_ports; i++) { 765a0c64a17SJack Morgenstein flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); 766a0c64a17SJack Morgenstein destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); 767a0c64a17SJack Morgenstein } 768a0c64a17SJack Morgenstein ib_sa_unregister_client(dev->sriov.alias_guid.sa_client); 769a0c64a17SJack Morgenstein kfree(dev->sriov.alias_guid.sa_client); 770a0c64a17SJack Morgenstein } 771a0c64a17SJack Morgenstein 772a0c64a17SJack Morgenstein int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) 773a0c64a17SJack Morgenstein { 774a0c64a17SJack Morgenstein char alias_wq_name[15]; 775a0c64a17SJack Morgenstein int ret = 0; 776a0c64a17SJack Morgenstein int i, j, k; 777a0c64a17SJack Morgenstein union ib_gid gid; 778a0c64a17SJack Morgenstein 779a0c64a17SJack Morgenstein if (!mlx4_is_master(dev->dev)) 780a0c64a17SJack Morgenstein return 0; 781a0c64a17SJack Morgenstein dev->sriov.alias_guid.sa_client = 782a0c64a17SJack Morgenstein kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL); 783a0c64a17SJack Morgenstein if (!dev->sriov.alias_guid.sa_client) 784a0c64a17SJack Morgenstein return -ENOMEM; 785a0c64a17SJack Morgenstein 786a0c64a17SJack Morgenstein ib_sa_register_client(dev->sriov.alias_guid.sa_client); 787a0c64a17SJack Morgenstein 788a0c64a17SJack Morgenstein spin_lock_init(&dev->sriov.alias_guid.ag_work_lock); 789a0c64a17SJack Morgenstein 790a0c64a17SJack Morgenstein for (i = 1; i <= dev->num_ports; ++i) { 791a0c64a17SJack Morgenstein if (dev->ib_dev.query_gid(&dev->ib_dev , i, 0, &gid)) { 792a0c64a17SJack Morgenstein ret = -EFAULT; 793a0c64a17SJack Morgenstein goto err_unregister; 794a0c64a17SJack Morgenstein } 795a0c64a17SJack Morgenstein } 796a0c64a17SJack Morgenstein 797a0c64a17SJack Morgenstein for (i = 0 ; i < dev->num_ports; i++) { 798a0c64a17SJack Morgenstein memset(&dev->sriov.alias_guid.ports_guid[i], 0, 799a0c64a17SJack Morgenstein sizeof (struct mlx4_sriov_alias_guid_port_rec_det)); 800a0c64a17SJack Morgenstein /*Check if the SM doesn't need to assign the GUIDs*/ 801a0c64a17SJack Morgenstein for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { 802a0c64a17SJack Morgenstein if (mlx4_ib_sm_guid_assign) { 803a0c64a17SJack Morgenstein dev->sriov.alias_guid.ports_guid[i]. 804a0c64a17SJack Morgenstein all_rec_per_port[j]. 805a0c64a17SJack Morgenstein ownership = MLX4_GUID_DRIVER_ASSIGN; 806a0c64a17SJack Morgenstein continue; 807a0c64a17SJack Morgenstein } 808a0c64a17SJack Morgenstein dev->sriov.alias_guid.ports_guid[i].all_rec_per_port[j]. 809a0c64a17SJack Morgenstein ownership = MLX4_GUID_NONE_ASSIGN; 810a0c64a17SJack Morgenstein /*mark each val as it was deleted, 811a0c64a17SJack Morgenstein till the sysAdmin will give it valid val*/ 812a0c64a17SJack Morgenstein for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) { 813a0c64a17SJack Morgenstein *(__be64 *)&dev->sriov.alias_guid.ports_guid[i]. 814a0c64a17SJack Morgenstein all_rec_per_port[j].all_recs[GUID_REC_SIZE * k] = 815a0c64a17SJack Morgenstein cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL); 816a0c64a17SJack Morgenstein } 817a0c64a17SJack Morgenstein } 818a0c64a17SJack Morgenstein INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list); 819a0c64a17SJack Morgenstein /*prepare the records, set them to be allocated by sm*/ 820a0c64a17SJack Morgenstein for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) 821a0c64a17SJack Morgenstein invalidate_guid_record(dev, i + 1, j); 822a0c64a17SJack Morgenstein 823a0c64a17SJack Morgenstein dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid; 824a0c64a17SJack Morgenstein dev->sriov.alias_guid.ports_guid[i].port = i; 825a0c64a17SJack Morgenstein if (mlx4_ib_sm_guid_assign) 826a0c64a17SJack Morgenstein set_all_slaves_guids(dev, i); 827a0c64a17SJack Morgenstein 828a0c64a17SJack Morgenstein snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i); 829a0c64a17SJack Morgenstein dev->sriov.alias_guid.ports_guid[i].wq = 830a0c64a17SJack Morgenstein create_singlethread_workqueue(alias_wq_name); 831a0c64a17SJack Morgenstein if (!dev->sriov.alias_guid.ports_guid[i].wq) { 832a0c64a17SJack Morgenstein ret = -ENOMEM; 833a0c64a17SJack Morgenstein goto err_thread; 834a0c64a17SJack Morgenstein } 835a0c64a17SJack Morgenstein INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work, 836a0c64a17SJack Morgenstein alias_guid_work); 837a0c64a17SJack Morgenstein } 838a0c64a17SJack Morgenstein return 0; 839a0c64a17SJack Morgenstein 840a0c64a17SJack Morgenstein err_thread: 841a0c64a17SJack Morgenstein for (--i; i >= 0; i--) { 842a0c64a17SJack Morgenstein destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); 843a0c64a17SJack Morgenstein dev->sriov.alias_guid.ports_guid[i].wq = NULL; 844a0c64a17SJack Morgenstein } 845a0c64a17SJack Morgenstein 846a0c64a17SJack Morgenstein err_unregister: 847a0c64a17SJack Morgenstein ib_sa_unregister_client(dev->sriov.alias_guid.sa_client); 848a0c64a17SJack Morgenstein kfree(dev->sriov.alias_guid.sa_client); 849a0c64a17SJack Morgenstein dev->sriov.alias_guid.sa_client = NULL; 850a0c64a17SJack Morgenstein pr_err("init_alias_guid_service: Failed. (ret:%d)\n", ret); 851a0c64a17SJack Morgenstein return ret; 852a0c64a17SJack Morgenstein } 853