1e2cb1decSSalil Mehta // SPDX-License-Identifier: GPL-2.0+ 2e2cb1decSSalil Mehta // Copyright (c) 2016-2017 Hisilicon Limited. 3e2cb1decSSalil Mehta 4e2cb1decSSalil Mehta #include <linux/etherdevice.h> 5aa5c4f17SHuazhong Tan #include <linux/iopoll.h> 66988eb2aSSalil Mehta #include <net/rtnetlink.h> 7e2cb1decSSalil Mehta #include "hclgevf_cmd.h" 8e2cb1decSSalil Mehta #include "hclgevf_main.h" 9939ccd10SJijie Shao #include "hclgevf_regs.h" 10e2cb1decSSalil Mehta #include "hclge_mbx.h" 11e2cb1decSSalil Mehta #include "hnae3.h" 12cd624299SYufeng Mo #include "hclgevf_devlink.h" 13027733b1SJie Wang #include "hclge_comm_rss.h" 142a1a1a7bSHao Lan #include "hclgevf_trace.h" 15e2cb1decSSalil Mehta 16e2cb1decSSalil Mehta #define HCLGEVF_NAME "hclgevf" 17e2cb1decSSalil Mehta 18bbe6540eSHuazhong Tan #define HCLGEVF_RESET_MAX_FAIL_CNT 5 19bbe6540eSHuazhong Tan 209c6f7085SHuazhong Tan static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 215e7414cdSJian Shen static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 225e7414cdSJian Shen unsigned long delay); 235e7414cdSJian Shen 24e2cb1decSSalil Mehta static struct hnae3_ae_algo ae_algovf; 25e2cb1decSSalil Mehta 260ea68902SYunsheng Lin static struct workqueue_struct *hclgevf_wq; 270ea68902SYunsheng Lin 28e2cb1decSSalil Mehta static const struct pci_device_id ae_algovf_pci_tbl[] = { 29c155e22bSGuangbin Huang {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, 30c155e22bSGuangbin Huang {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 31c155e22bSGuangbin Huang HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 32e2cb1decSSalil Mehta /* required last entry */ 33e2cb1decSSalil Mehta {0, } 34e2cb1decSSalil Mehta }; 35e2cb1decSSalil Mehta 362f550a46SYunsheng Lin MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 372f550a46SYunsheng Lin 38aab8d1c6SJie Wang /* hclgevf_cmd_send - send command to command queue 39aab8d1c6SJie Wang * @hw: pointer to the hw struct 40aab8d1c6SJie Wang * @desc: prefilled descriptor for describing the command 41aab8d1c6SJie Wang * @num : the number of descriptors to be sent 42aab8d1c6SJie Wang * 43aab8d1c6SJie Wang * This is the main send command for command queue, it 44aab8d1c6SJie Wang * sends the queue, cleans the queue, etc 45aab8d1c6SJie Wang */ 46aab8d1c6SJie Wang int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num) 47aab8d1c6SJie Wang { 489970308fSJie Wang return hclge_comm_cmd_send(&hw->hw, desc, num); 49aab8d1c6SJie Wang } 50aab8d1c6SJie Wang 512a1a1a7bSHao Lan static void hclgevf_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, 522a1a1a7bSHao Lan int num, bool is_special) 532a1a1a7bSHao Lan { 542a1a1a7bSHao Lan int i; 552a1a1a7bSHao Lan 562a1a1a7bSHao Lan trace_hclge_vf_cmd_send(hw, desc, 0, num); 572a1a1a7bSHao Lan 582a1a1a7bSHao Lan if (is_special) 592a1a1a7bSHao Lan return; 602a1a1a7bSHao Lan 612a1a1a7bSHao Lan for (i = 1; i < num; i++) 622a1a1a7bSHao Lan trace_hclge_vf_cmd_send(hw, &desc[i], i, num); 632a1a1a7bSHao Lan } 642a1a1a7bSHao Lan 652a1a1a7bSHao Lan static void hclgevf_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc, 662a1a1a7bSHao Lan int num, bool is_special) 672a1a1a7bSHao Lan { 682a1a1a7bSHao Lan int i; 692a1a1a7bSHao Lan 702a1a1a7bSHao Lan if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag))) 712a1a1a7bSHao Lan return; 722a1a1a7bSHao Lan 732a1a1a7bSHao Lan trace_hclge_vf_cmd_get(hw, desc, 0, num); 742a1a1a7bSHao Lan 752a1a1a7bSHao Lan if (is_special) 762a1a1a7bSHao Lan return; 772a1a1a7bSHao Lan 782a1a1a7bSHao Lan for (i = 1; i < num; i++) 792a1a1a7bSHao Lan trace_hclge_vf_cmd_get(hw, &desc[i], i, num); 802a1a1a7bSHao Lan } 812a1a1a7bSHao Lan 822a1a1a7bSHao Lan static const struct hclge_comm_cmq_ops hclgevf_cmq_ops = { 832a1a1a7bSHao Lan .trace_cmd_send = hclgevf_trace_cmd_send, 842a1a1a7bSHao Lan .trace_cmd_get = hclgevf_trace_cmd_get, 852a1a1a7bSHao Lan }; 862a1a1a7bSHao Lan 87aab8d1c6SJie Wang void hclgevf_arq_init(struct hclgevf_dev *hdev) 88aab8d1c6SJie Wang { 89aab8d1c6SJie Wang struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq; 90aab8d1c6SJie Wang 91aab8d1c6SJie Wang spin_lock(&cmdq->crq.lock); 92aab8d1c6SJie Wang /* initialize the pointers of async rx queue of mailbox */ 93aab8d1c6SJie Wang hdev->arq.hdev = hdev; 94aab8d1c6SJie Wang hdev->arq.head = 0; 95aab8d1c6SJie Wang hdev->arq.tail = 0; 96aab8d1c6SJie Wang atomic_set(&hdev->arq.count, 0); 97aab8d1c6SJie Wang spin_unlock(&cmdq->crq.lock); 98aab8d1c6SJie Wang } 99aab8d1c6SJie Wang 100939ccd10SJijie Shao struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 101e2cb1decSSalil Mehta { 102eed9535fSPeng Li if (!handle->client) 103eed9535fSPeng Li return container_of(handle, struct hclgevf_dev, nic); 104eed9535fSPeng Li else if (handle->client->type == HNAE3_CLIENT_ROCE) 105eed9535fSPeng Li return container_of(handle, struct hclgevf_dev, roce); 106eed9535fSPeng Li else 107e2cb1decSSalil Mehta return container_of(handle, struct hclgevf_dev, nic); 108e2cb1decSSalil Mehta } 109e2cb1decSSalil Mehta 110ed1c6f35SPeiyang Wang static void hclgevf_update_stats(struct hnae3_handle *handle) 111e2cb1decSSalil Mehta { 112e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 113e2cb1decSSalil Mehta int status; 114e2cb1decSSalil Mehta 1154afc310cSJie Wang status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); 116e2cb1decSSalil Mehta if (status) 117e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 118e2cb1decSSalil Mehta "VF update of TQPS stats fail, status = %d.\n", 119e2cb1decSSalil Mehta status); 120e2cb1decSSalil Mehta } 121e2cb1decSSalil Mehta 122e2cb1decSSalil Mehta static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 123e2cb1decSSalil Mehta { 124e2cb1decSSalil Mehta if (strset == ETH_SS_TEST) 125e2cb1decSSalil Mehta return -EOPNOTSUPP; 126e2cb1decSSalil Mehta else if (strset == ETH_SS_STATS) 1274afc310cSJie Wang return hclge_comm_tqps_get_sset_count(handle); 128e2cb1decSSalil Mehta 129e2cb1decSSalil Mehta return 0; 130e2cb1decSSalil Mehta } 131e2cb1decSSalil Mehta 132e2cb1decSSalil Mehta static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 133e2cb1decSSalil Mehta u8 *data) 134e2cb1decSSalil Mehta { 135e2cb1decSSalil Mehta u8 *p = (char *)data; 136e2cb1decSSalil Mehta 137e2cb1decSSalil Mehta if (strset == ETH_SS_STATS) 1384afc310cSJie Wang p = hclge_comm_tqps_get_strings(handle, p); 139e2cb1decSSalil Mehta } 140e2cb1decSSalil Mehta 141e2cb1decSSalil Mehta static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 142e2cb1decSSalil Mehta { 1434afc310cSJie Wang hclge_comm_tqps_get_stats(handle, data); 144e2cb1decSSalil Mehta } 145e2cb1decSSalil Mehta 146d3410018SYufeng Mo static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, 147d3410018SYufeng Mo u8 subcode) 148d3410018SYufeng Mo { 149d3410018SYufeng Mo if (msg) { 150d3410018SYufeng Mo memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); 151d3410018SYufeng Mo msg->code = code; 152d3410018SYufeng Mo msg->subcode = subcode; 153d3410018SYufeng Mo } 154d3410018SYufeng Mo } 155d3410018SYufeng Mo 15632e6d104SJian Shen static int hclgevf_get_basic_info(struct hclgevf_dev *hdev) 157e2cb1decSSalil Mehta { 15832e6d104SJian Shen struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 15932e6d104SJian Shen u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE]; 16032e6d104SJian Shen struct hclge_basic_info *basic_info; 161d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 16232e6d104SJian Shen unsigned long caps; 163e2cb1decSSalil Mehta int status; 164e2cb1decSSalil Mehta 16532e6d104SJian Shen hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0); 16632e6d104SJian Shen status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 167d3410018SYufeng Mo sizeof(resp_msg)); 168e2cb1decSSalil Mehta if (status) { 169e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 17032e6d104SJian Shen "failed to get basic info from pf, ret = %d", status); 171e2cb1decSSalil Mehta return status; 172e2cb1decSSalil Mehta } 173e2cb1decSSalil Mehta 17432e6d104SJian Shen basic_info = (struct hclge_basic_info *)resp_msg; 17532e6d104SJian Shen 17632e6d104SJian Shen hdev->hw_tc_map = basic_info->hw_tc_map; 177416eedb6SJie Wang hdev->mbx_api_version = le16_to_cpu(basic_info->mbx_api_version); 178416eedb6SJie Wang caps = le32_to_cpu(basic_info->pf_caps); 17932e6d104SJian Shen if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps)) 18032e6d104SJian Shen set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); 181e2cb1decSSalil Mehta 182e2cb1decSSalil Mehta return 0; 183e2cb1decSSalil Mehta } 184e2cb1decSSalil Mehta 18592f11ea1SJian Shen static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 18692f11ea1SJian Shen { 18792f11ea1SJian Shen struct hnae3_handle *nic = &hdev->nic; 188d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 18992f11ea1SJian Shen u8 resp_msg; 19092f11ea1SJian Shen int ret; 19192f11ea1SJian Shen 192d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 193d3410018SYufeng Mo HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); 194d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 195d3410018SYufeng Mo sizeof(u8)); 19692f11ea1SJian Shen if (ret) { 19792f11ea1SJian Shen dev_err(&hdev->pdev->dev, 19892f11ea1SJian Shen "VF request to get port based vlan state failed %d", 19992f11ea1SJian Shen ret); 20092f11ea1SJian Shen return ret; 20192f11ea1SJian Shen } 20292f11ea1SJian Shen 20392f11ea1SJian Shen nic->port_base_vlan_state = resp_msg; 20492f11ea1SJian Shen 20592f11ea1SJian Shen return 0; 20692f11ea1SJian Shen } 20792f11ea1SJian Shen 2086cee6fc3SJian Shen static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 209e2cb1decSSalil Mehta { 210c0425944SPeng Li #define HCLGEVF_TQPS_RSS_INFO_LEN 6 211d3410018SYufeng Mo 212416eedb6SJie Wang struct hclge_mbx_vf_queue_info *queue_info; 213e2cb1decSSalil Mehta u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 214d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 215e2cb1decSSalil Mehta int status; 216e2cb1decSSalil Mehta 217d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); 218d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 219e2cb1decSSalil Mehta HCLGEVF_TQPS_RSS_INFO_LEN); 220e2cb1decSSalil Mehta if (status) { 221e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 222e2cb1decSSalil Mehta "VF request to get tqp info from PF failed %d", 223e2cb1decSSalil Mehta status); 224e2cb1decSSalil Mehta return status; 225e2cb1decSSalil Mehta } 226e2cb1decSSalil Mehta 227416eedb6SJie Wang queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg; 228416eedb6SJie Wang hdev->num_tqps = le16_to_cpu(queue_info->num_tqps); 229416eedb6SJie Wang hdev->rss_size_max = le16_to_cpu(queue_info->rss_size); 230416eedb6SJie Wang hdev->rx_buf_len = le16_to_cpu(queue_info->rx_buf_len); 231c0425944SPeng Li 232c0425944SPeng Li return 0; 233c0425944SPeng Li } 234c0425944SPeng Li 235c0425944SPeng Li static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 236c0425944SPeng Li { 237c0425944SPeng Li #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 238d3410018SYufeng Mo 239416eedb6SJie Wang struct hclge_mbx_vf_queue_depth *queue_depth; 240c0425944SPeng Li u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 241d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 242c0425944SPeng Li int ret; 243c0425944SPeng Li 244d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); 245d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 246c0425944SPeng Li HCLGEVF_TQPS_DEPTH_INFO_LEN); 247c0425944SPeng Li if (ret) { 248c0425944SPeng Li dev_err(&hdev->pdev->dev, 249c0425944SPeng Li "VF request to get tqp depth info from PF failed %d", 250c0425944SPeng Li ret); 251c0425944SPeng Li return ret; 252c0425944SPeng Li } 253c0425944SPeng Li 254416eedb6SJie Wang queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg; 255416eedb6SJie Wang hdev->num_tx_desc = le16_to_cpu(queue_depth->num_tx_desc); 256416eedb6SJie Wang hdev->num_rx_desc = le16_to_cpu(queue_depth->num_rx_desc); 257e2cb1decSSalil Mehta 258e2cb1decSSalil Mehta return 0; 259e2cb1decSSalil Mehta } 260e2cb1decSSalil Mehta 2610c29d191Sliuzhongzhu static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 2620c29d191Sliuzhongzhu { 2630c29d191Sliuzhongzhu struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 264d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 2650c29d191Sliuzhongzhu u16 qid_in_pf = 0; 266d3410018SYufeng Mo u8 resp_data[2]; 2670c29d191Sliuzhongzhu int ret; 2680c29d191Sliuzhongzhu 269d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); 270416eedb6SJie Wang *(__le16 *)send_msg.data = cpu_to_le16(queue_id); 271d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, 27263cbf7a9SYufeng Mo sizeof(resp_data)); 2730c29d191Sliuzhongzhu if (!ret) 274416eedb6SJie Wang qid_in_pf = le16_to_cpu(*(__le16 *)resp_data); 2750c29d191Sliuzhongzhu 2760c29d191Sliuzhongzhu return qid_in_pf; 2770c29d191Sliuzhongzhu } 2780c29d191Sliuzhongzhu 2799c3e7130Sliuzhongzhu static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 2809c3e7130Sliuzhongzhu { 281d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 28288d10bd6SJian Shen u8 resp_msg[2]; 2839c3e7130Sliuzhongzhu int ret; 2849c3e7130Sliuzhongzhu 285d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); 286d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 287d3410018SYufeng Mo sizeof(resp_msg)); 2889c3e7130Sliuzhongzhu if (ret) { 2899c3e7130Sliuzhongzhu dev_err(&hdev->pdev->dev, 2909c3e7130Sliuzhongzhu "VF request to get the pf port media type failed %d", 2919c3e7130Sliuzhongzhu ret); 2929c3e7130Sliuzhongzhu return ret; 2939c3e7130Sliuzhongzhu } 2949c3e7130Sliuzhongzhu 29588d10bd6SJian Shen hdev->hw.mac.media_type = resp_msg[0]; 29688d10bd6SJian Shen hdev->hw.mac.module_type = resp_msg[1]; 2979c3e7130Sliuzhongzhu 2989c3e7130Sliuzhongzhu return 0; 2999c3e7130Sliuzhongzhu } 3009c3e7130Sliuzhongzhu 301e2cb1decSSalil Mehta static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 302e2cb1decSSalil Mehta { 30387a9b2fdSYufeng Mo struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3044afc310cSJie Wang struct hclge_comm_tqp *tqp; 305e2cb1decSSalil Mehta int i; 306e2cb1decSSalil Mehta 307e2cb1decSSalil Mehta hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 3084afc310cSJie Wang sizeof(struct hclge_comm_tqp), GFP_KERNEL); 309e2cb1decSSalil Mehta if (!hdev->htqp) 310e2cb1decSSalil Mehta return -ENOMEM; 311e2cb1decSSalil Mehta 312e2cb1decSSalil Mehta tqp = hdev->htqp; 313e2cb1decSSalil Mehta 314e2cb1decSSalil Mehta for (i = 0; i < hdev->num_tqps; i++) { 315e2cb1decSSalil Mehta tqp->dev = &hdev->pdev->dev; 316e2cb1decSSalil Mehta tqp->index = i; 317e2cb1decSSalil Mehta 318e2cb1decSSalil Mehta tqp->q.ae_algo = &ae_algovf; 319e2cb1decSSalil Mehta tqp->q.buf_size = hdev->rx_buf_len; 320c0425944SPeng Li tqp->q.tx_desc_num = hdev->num_tx_desc; 321c0425944SPeng Li tqp->q.rx_desc_num = hdev->num_rx_desc; 3229a5ef4aaSYonglong Liu 3239a5ef4aaSYonglong Liu /* need an extended offset to configure queues >= 3249a5ef4aaSYonglong Liu * HCLGEVF_TQP_MAX_SIZE_DEV_V2. 3259a5ef4aaSYonglong Liu */ 3269a5ef4aaSYonglong Liu if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2) 327076bb537SJie Wang tqp->q.io_base = hdev->hw.hw.io_base + 3289a5ef4aaSYonglong Liu HCLGEVF_TQP_REG_OFFSET + 329e2cb1decSSalil Mehta i * HCLGEVF_TQP_REG_SIZE; 3309a5ef4aaSYonglong Liu else 331076bb537SJie Wang tqp->q.io_base = hdev->hw.hw.io_base + 3329a5ef4aaSYonglong Liu HCLGEVF_TQP_REG_OFFSET + 3339a5ef4aaSYonglong Liu HCLGEVF_TQP_EXT_REG_OFFSET + 3349a5ef4aaSYonglong Liu (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) * 3359a5ef4aaSYonglong Liu HCLGEVF_TQP_REG_SIZE; 336e2cb1decSSalil Mehta 33787a9b2fdSYufeng Mo /* when device supports tx push and has device memory, 33887a9b2fdSYufeng Mo * the queue can execute push mode or doorbell mode on 33987a9b2fdSYufeng Mo * device memory. 34087a9b2fdSYufeng Mo */ 34187a9b2fdSYufeng Mo if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps)) 34287a9b2fdSYufeng Mo tqp->q.mem_base = hdev->hw.hw.mem_base + 34387a9b2fdSYufeng Mo HCLGEVF_TQP_MEM_OFFSET(hdev, i); 34487a9b2fdSYufeng Mo 345e2cb1decSSalil Mehta tqp++; 346e2cb1decSSalil Mehta } 347e2cb1decSSalil Mehta 348e2cb1decSSalil Mehta return 0; 349e2cb1decSSalil Mehta } 350e2cb1decSSalil Mehta 351e2cb1decSSalil Mehta static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 352e2cb1decSSalil Mehta { 353e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 354e2cb1decSSalil Mehta struct hnae3_knic_private_info *kinfo; 355e2cb1decSSalil Mehta u16 new_tqps = hdev->num_tqps; 356ebaf1908SWeihang Li unsigned int i; 35735244430SJian Shen u8 num_tc = 0; 358e2cb1decSSalil Mehta 359e2cb1decSSalil Mehta kinfo = &nic->kinfo; 360c0425944SPeng Li kinfo->num_tx_desc = hdev->num_tx_desc; 361c0425944SPeng Li kinfo->num_rx_desc = hdev->num_rx_desc; 362e2cb1decSSalil Mehta kinfo->rx_buf_len = hdev->rx_buf_len; 36393969dc1SJie Wang for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) 364e2cb1decSSalil Mehta if (hdev->hw_tc_map & BIT(i)) 36535244430SJian Shen num_tc++; 366e2cb1decSSalil Mehta 36735244430SJian Shen num_tc = num_tc ? num_tc : 1; 36835244430SJian Shen kinfo->tc_info.num_tc = num_tc; 36935244430SJian Shen kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc); 37035244430SJian Shen new_tqps = kinfo->rss_size * num_tc; 371e2cb1decSSalil Mehta kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 372e2cb1decSSalil Mehta 373e2cb1decSSalil Mehta kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 374e2cb1decSSalil Mehta sizeof(struct hnae3_queue *), GFP_KERNEL); 375e2cb1decSSalil Mehta if (!kinfo->tqp) 376e2cb1decSSalil Mehta return -ENOMEM; 377e2cb1decSSalil Mehta 378e2cb1decSSalil Mehta for (i = 0; i < kinfo->num_tqps; i++) { 379e2cb1decSSalil Mehta hdev->htqp[i].q.handle = &hdev->nic; 380e2cb1decSSalil Mehta hdev->htqp[i].q.tqp_index = i; 381e2cb1decSSalil Mehta kinfo->tqp[i] = &hdev->htqp[i].q; 382e2cb1decSSalil Mehta } 383e2cb1decSSalil Mehta 384580a05f9SYonglong Liu /* after init the max rss_size and tqps, adjust the default tqp numbers 385580a05f9SYonglong Liu * and rss size with the actual vector numbers 386580a05f9SYonglong Liu */ 387580a05f9SYonglong Liu kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 38835244430SJian Shen kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc, 389580a05f9SYonglong Liu kinfo->rss_size); 390580a05f9SYonglong Liu 391e2cb1decSSalil Mehta return 0; 392e2cb1decSSalil Mehta } 393e2cb1decSSalil Mehta 394e2cb1decSSalil Mehta static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 395e2cb1decSSalil Mehta { 396d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 397e2cb1decSSalil Mehta int status; 398e2cb1decSSalil Mehta 399d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); 400d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 401e2cb1decSSalil Mehta if (status) 402e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 403e2cb1decSSalil Mehta "VF failed to fetch link status(%d) from PF", status); 404e2cb1decSSalil Mehta } 405e2cb1decSSalil Mehta 406e2cb1decSSalil Mehta void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 407e2cb1decSSalil Mehta { 40845e92b7eSPeng Li struct hnae3_handle *rhandle = &hdev->roce; 409e2cb1decSSalil Mehta struct hnae3_handle *handle = &hdev->nic; 41045e92b7eSPeng Li struct hnae3_client *rclient; 411e2cb1decSSalil Mehta struct hnae3_client *client; 412e2cb1decSSalil Mehta 413ff200099SYunsheng Lin if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 414ff200099SYunsheng Lin return; 415ff200099SYunsheng Lin 416e2cb1decSSalil Mehta client = handle->client; 41745e92b7eSPeng Li rclient = hdev->roce_client; 418e2cb1decSSalil Mehta 419582d37bbSPeng Li link_state = 420582d37bbSPeng Li test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 421e2cb1decSSalil Mehta if (link_state != hdev->hw.mac.link) { 422b15c072aSYonglong Liu hdev->hw.mac.link = link_state; 423e2cb1decSSalil Mehta client->ops->link_status_change(handle, !!link_state); 42445e92b7eSPeng Li if (rclient && rclient->ops->link_status_change) 42545e92b7eSPeng Li rclient->ops->link_status_change(rhandle, !!link_state); 426e2cb1decSSalil Mehta } 427ff200099SYunsheng Lin 428ff200099SYunsheng Lin clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 429e2cb1decSSalil Mehta } 430e2cb1decSSalil Mehta 431538abaf3SYueHaibing static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 4329194d18bSliuzhongzhu { 4339194d18bSliuzhongzhu #define HCLGEVF_ADVERTISING 0 4349194d18bSliuzhongzhu #define HCLGEVF_SUPPORTED 1 4359194d18bSliuzhongzhu 436d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 437d3410018SYufeng Mo 438d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); 439d3410018SYufeng Mo send_msg.data[0] = HCLGEVF_ADVERTISING; 440d3410018SYufeng Mo hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 441d3410018SYufeng Mo send_msg.data[0] = HCLGEVF_SUPPORTED; 442d3410018SYufeng Mo hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 4439194d18bSliuzhongzhu } 4449194d18bSliuzhongzhu 445e2cb1decSSalil Mehta static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 446e2cb1decSSalil Mehta { 447e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 448e2cb1decSSalil Mehta int ret; 449e2cb1decSSalil Mehta 450e2cb1decSSalil Mehta nic->ae_algo = &ae_algovf; 451e2cb1decSSalil Mehta nic->pdev = hdev->pdev; 4526639a7b9SPeiyang Wang bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits, 4536639a7b9SPeiyang Wang MAX_NUMNODES); 454424eb834SSalil Mehta nic->flags |= HNAE3_SUPPORT_VF; 455076bb537SJie Wang nic->kinfo.io_base = hdev->hw.hw.io_base; 456e2cb1decSSalil Mehta 457e2cb1decSSalil Mehta ret = hclgevf_knic_setup(hdev); 458e2cb1decSSalil Mehta if (ret) 459e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 460e2cb1decSSalil Mehta ret); 461e2cb1decSSalil Mehta return ret; 462e2cb1decSSalil Mehta } 463e2cb1decSSalil Mehta 464e2cb1decSSalil Mehta static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 465e2cb1decSSalil Mehta { 46636cbbdf6SPeng Li if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 46736cbbdf6SPeng Li dev_warn(&hdev->pdev->dev, 46836cbbdf6SPeng Li "vector(vector_id %d) has been freed.\n", vector_id); 46936cbbdf6SPeng Li return; 47036cbbdf6SPeng Li } 47136cbbdf6SPeng Li 472e2cb1decSSalil Mehta hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 473e2cb1decSSalil Mehta hdev->num_msi_left += 1; 474e2cb1decSSalil Mehta hdev->num_msi_used -= 1; 475e2cb1decSSalil Mehta } 476e2cb1decSSalil Mehta 477e2cb1decSSalil Mehta static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 478e2cb1decSSalil Mehta struct hnae3_vector_info *vector_info) 479e2cb1decSSalil Mehta { 480e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 481e2cb1decSSalil Mehta struct hnae3_vector_info *vector = vector_info; 482e2cb1decSSalil Mehta int alloc = 0; 483e2cb1decSSalil Mehta int i, j; 484e2cb1decSSalil Mehta 485580a05f9SYonglong Liu vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 486e2cb1decSSalil Mehta vector_num = min(hdev->num_msi_left, vector_num); 487e2cb1decSSalil Mehta 488e2cb1decSSalil Mehta for (j = 0; j < vector_num; j++) { 489e2cb1decSSalil Mehta for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 490e2cb1decSSalil Mehta if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 491e2cb1decSSalil Mehta vector->vector = pci_irq_vector(hdev->pdev, i); 492076bb537SJie Wang vector->io_addr = hdev->hw.hw.io_base + 493e2cb1decSSalil Mehta HCLGEVF_VECTOR_REG_BASE + 494e2cb1decSSalil Mehta (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 495e2cb1decSSalil Mehta hdev->vector_status[i] = 0; 496e2cb1decSSalil Mehta hdev->vector_irq[i] = vector->vector; 497e2cb1decSSalil Mehta 498e2cb1decSSalil Mehta vector++; 499e2cb1decSSalil Mehta alloc++; 500e2cb1decSSalil Mehta 501e2cb1decSSalil Mehta break; 502e2cb1decSSalil Mehta } 503e2cb1decSSalil Mehta } 504e2cb1decSSalil Mehta } 505e2cb1decSSalil Mehta hdev->num_msi_left -= alloc; 506e2cb1decSSalil Mehta hdev->num_msi_used += alloc; 507e2cb1decSSalil Mehta 508e2cb1decSSalil Mehta return alloc; 509e2cb1decSSalil Mehta } 510e2cb1decSSalil Mehta 511e2cb1decSSalil Mehta static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 512e2cb1decSSalil Mehta { 513e2cb1decSSalil Mehta int i; 514e2cb1decSSalil Mehta 515e2cb1decSSalil Mehta for (i = 0; i < hdev->num_msi; i++) 516e2cb1decSSalil Mehta if (vector == hdev->vector_irq[i]) 517e2cb1decSSalil Mehta return i; 518e2cb1decSSalil Mehta 519e2cb1decSSalil Mehta return -EINVAL; 520e2cb1decSSalil Mehta } 521e2cb1decSSalil Mehta 522a638b1d8SJian Shen /* for revision 0x20, vf shared the same rss config with pf */ 523a638b1d8SJian Shen static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 524a638b1d8SJian Shen { 525a638b1d8SJian Shen #define HCLGEVF_RSS_MBX_RESP_LEN 8 526027733b1SJie Wang struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 527a638b1d8SJian Shen u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 528d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 529a638b1d8SJian Shen u16 msg_num, hash_key_index; 530a638b1d8SJian Shen u8 index; 531a638b1d8SJian Shen int ret; 532a638b1d8SJian Shen 533d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); 5347428d6c9SJie Wang msg_num = (HCLGE_COMM_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 535a638b1d8SJian Shen HCLGEVF_RSS_MBX_RESP_LEN; 536a638b1d8SJian Shen for (index = 0; index < msg_num; index++) { 537d3410018SYufeng Mo send_msg.data[0] = index; 538d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 539a638b1d8SJian Shen HCLGEVF_RSS_MBX_RESP_LEN); 540a638b1d8SJian Shen if (ret) { 541a638b1d8SJian Shen dev_err(&hdev->pdev->dev, 542a638b1d8SJian Shen "VF get rss hash key from PF failed, ret=%d", 543a638b1d8SJian Shen ret); 544a638b1d8SJian Shen return ret; 545a638b1d8SJian Shen } 546a638b1d8SJian Shen 547a638b1d8SJian Shen hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 548a638b1d8SJian Shen if (index == msg_num - 1) 549a638b1d8SJian Shen memcpy(&rss_cfg->rss_hash_key[hash_key_index], 550a638b1d8SJian Shen &resp_msg[0], 5517428d6c9SJie Wang HCLGE_COMM_RSS_KEY_SIZE - hash_key_index); 552a638b1d8SJian Shen else 553a638b1d8SJian Shen memcpy(&rss_cfg->rss_hash_key[hash_key_index], 554a638b1d8SJian Shen &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 555a638b1d8SJian Shen } 556a638b1d8SJian Shen 557a638b1d8SJian Shen return 0; 558a638b1d8SJian Shen } 559a638b1d8SJian Shen 560e2cb1decSSalil Mehta static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 561e2cb1decSSalil Mehta u8 *hfunc) 562e2cb1decSSalil Mehta { 563e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 564027733b1SJie Wang struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 5657428d6c9SJie Wang int ret; 566e2cb1decSSalil Mehta 567295ba232SGuangbin Huang if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 5687428d6c9SJie Wang hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc); 569a638b1d8SJian Shen } else { 570a638b1d8SJian Shen if (hfunc) 571a638b1d8SJian Shen *hfunc = ETH_RSS_HASH_TOP; 572a638b1d8SJian Shen if (key) { 573a638b1d8SJian Shen ret = hclgevf_get_rss_hash_key(hdev); 574a638b1d8SJian Shen if (ret) 575a638b1d8SJian Shen return ret; 576a638b1d8SJian Shen memcpy(key, rss_cfg->rss_hash_key, 5777428d6c9SJie Wang HCLGE_COMM_RSS_KEY_SIZE); 578a638b1d8SJian Shen } 579374ad291SJian Shen } 580374ad291SJian Shen 5817428d6c9SJie Wang hclge_comm_get_rss_indir_tbl(rss_cfg, indir, 5827428d6c9SJie Wang hdev->ae_dev->dev_specs.rss_ind_tbl_size); 583e2cb1decSSalil Mehta 584374ad291SJian Shen return 0; 585e2cb1decSSalil Mehta } 586e2cb1decSSalil Mehta 587e2cb1decSSalil Mehta static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 588e2cb1decSSalil Mehta const u8 *key, const u8 hfunc) 589e2cb1decSSalil Mehta { 590e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 591027733b1SJie Wang struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 592374ad291SJian Shen int ret, i; 593374ad291SJian Shen 594295ba232SGuangbin Huang if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 59593969dc1SJie Wang ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, 59693969dc1SJie Wang hfunc); 597374ad291SJian Shen if (ret) 598374ad291SJian Shen return ret; 599374ad291SJian Shen } 600e2cb1decSSalil Mehta 601e2cb1decSSalil Mehta /* update the shadow RSS table with user specified qids */ 60287ce161eSGuangbin Huang for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 603e2cb1decSSalil Mehta rss_cfg->rss_indirection_tbl[i] = indir[i]; 604e2cb1decSSalil Mehta 605e2cb1decSSalil Mehta /* update the hardware */ 6067428d6c9SJie Wang return hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, 6077428d6c9SJie Wang rss_cfg->rss_indirection_tbl); 6085fd0e7b4SHuazhong Tan } 6095fd0e7b4SHuazhong Tan 6105fd0e7b4SHuazhong Tan static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 6115fd0e7b4SHuazhong Tan struct ethtool_rxnfc *nfc) 6125fd0e7b4SHuazhong Tan { 6135fd0e7b4SHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 6145fd0e7b4SHuazhong Tan int ret; 6155fd0e7b4SHuazhong Tan 6165fd0e7b4SHuazhong Tan if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 6175fd0e7b4SHuazhong Tan return -EOPNOTSUPP; 6185fd0e7b4SHuazhong Tan 61993969dc1SJie Wang ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw, 62093969dc1SJie Wang &hdev->rss_cfg, nfc); 62193969dc1SJie Wang if (ret) 6225fd0e7b4SHuazhong Tan dev_err(&hdev->pdev->dev, 62393969dc1SJie Wang "failed to set rss tuple, ret = %d.\n", ret); 6245fd0e7b4SHuazhong Tan 625d97b3072SJian Shen return ret; 626d97b3072SJian Shen } 627d97b3072SJian Shen 628d97b3072SJian Shen static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 629d97b3072SJian Shen struct ethtool_rxnfc *nfc) 630d97b3072SJian Shen { 631d97b3072SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 632d97b3072SJian Shen u8 tuple_sets; 63373f7767eSJian Shen int ret; 634d97b3072SJian Shen 635295ba232SGuangbin Huang if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 636d97b3072SJian Shen return -EOPNOTSUPP; 637d97b3072SJian Shen 638d97b3072SJian Shen nfc->data = 0; 639d97b3072SJian Shen 640027733b1SJie Wang ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type, 64173f7767eSJian Shen &tuple_sets); 64273f7767eSJian Shen if (ret || !tuple_sets) 64373f7767eSJian Shen return ret; 644d97b3072SJian Shen 6457428d6c9SJie Wang nfc->data = hclge_comm_convert_rss_tuple(tuple_sets); 646d97b3072SJian Shen 647d97b3072SJian Shen return 0; 648d97b3072SJian Shen } 649d97b3072SJian Shen 650e2cb1decSSalil Mehta static int hclgevf_get_tc_size(struct hnae3_handle *handle) 651e2cb1decSSalil Mehta { 652e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 653027733b1SJie Wang struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 654e2cb1decSSalil Mehta 655e2cb1decSSalil Mehta return rss_cfg->rss_size; 656e2cb1decSSalil Mehta } 657e2cb1decSSalil Mehta 658e2cb1decSSalil Mehta static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 659b204bc74SPeng Li int vector_id, 660e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 661e2cb1decSSalil Mehta { 662e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 663d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 664e2cb1decSSalil Mehta struct hnae3_ring_chain_node *node; 665e2cb1decSSalil Mehta int status; 666d3410018SYufeng Mo int i = 0; 667e2cb1decSSalil Mehta 668d3410018SYufeng Mo memset(&send_msg, 0, sizeof(send_msg)); 669d3410018SYufeng Mo send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 670c09ba484SPeng Li HCLGE_MBX_UNMAP_RING_TO_VECTOR; 671d3410018SYufeng Mo send_msg.vector_id = vector_id; 672e2cb1decSSalil Mehta 673e2cb1decSSalil Mehta for (node = ring_chain; node; node = node->next) { 674d3410018SYufeng Mo send_msg.param[i].ring_type = 675e4e87715SPeng Li hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 676d3410018SYufeng Mo 677d3410018SYufeng Mo send_msg.param[i].tqp_index = node->tqp_index; 678d3410018SYufeng Mo send_msg.param[i].int_gl_index = 679d3410018SYufeng Mo hnae3_get_field(node->int_gl_idx, 68079eee410SFuyun Liang HNAE3_RING_GL_IDX_M, 68179eee410SFuyun Liang HNAE3_RING_GL_IDX_S); 68279eee410SFuyun Liang 6835d02a58dSYunsheng Lin i++; 684d3410018SYufeng Mo if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { 685d3410018SYufeng Mo send_msg.ring_num = i; 686e2cb1decSSalil Mehta 687d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, false, 688d3410018SYufeng Mo NULL, 0); 689e2cb1decSSalil Mehta if (status) { 690e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 691e2cb1decSSalil Mehta "Map TQP fail, status is %d.\n", 692e2cb1decSSalil Mehta status); 693e2cb1decSSalil Mehta return status; 694e2cb1decSSalil Mehta } 695e2cb1decSSalil Mehta i = 0; 696e2cb1decSSalil Mehta } 697e2cb1decSSalil Mehta } 698e2cb1decSSalil Mehta 699e2cb1decSSalil Mehta return 0; 700e2cb1decSSalil Mehta } 701e2cb1decSSalil Mehta 702e2cb1decSSalil Mehta static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 703e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 704e2cb1decSSalil Mehta { 705b204bc74SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 706b204bc74SPeng Li int vector_id; 707b204bc74SPeng Li 708b204bc74SPeng Li vector_id = hclgevf_get_vector_index(hdev, vector); 709b204bc74SPeng Li if (vector_id < 0) { 710b204bc74SPeng Li dev_err(&handle->pdev->dev, 711b204bc74SPeng Li "Get vector index fail. ret =%d\n", vector_id); 712b204bc74SPeng Li return vector_id; 713b204bc74SPeng Li } 714b204bc74SPeng Li 715b204bc74SPeng Li return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 716e2cb1decSSalil Mehta } 717e2cb1decSSalil Mehta 718e2cb1decSSalil Mehta static int hclgevf_unmap_ring_from_vector( 719e2cb1decSSalil Mehta struct hnae3_handle *handle, 720e2cb1decSSalil Mehta int vector, 721e2cb1decSSalil Mehta struct hnae3_ring_chain_node *ring_chain) 722e2cb1decSSalil Mehta { 723e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 724e2cb1decSSalil Mehta int ret, vector_id; 725e2cb1decSSalil Mehta 726dea846e8SHuazhong Tan if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 727dea846e8SHuazhong Tan return 0; 728dea846e8SHuazhong Tan 729e2cb1decSSalil Mehta vector_id = hclgevf_get_vector_index(hdev, vector); 730e2cb1decSSalil Mehta if (vector_id < 0) { 731e2cb1decSSalil Mehta dev_err(&handle->pdev->dev, 732e2cb1decSSalil Mehta "Get vector index fail. ret =%d\n", vector_id); 733e2cb1decSSalil Mehta return vector_id; 734e2cb1decSSalil Mehta } 735e2cb1decSSalil Mehta 736b204bc74SPeng Li ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 7370d3e6631SYunsheng Lin if (ret) 738e2cb1decSSalil Mehta dev_err(&handle->pdev->dev, 739e2cb1decSSalil Mehta "Unmap ring from vector fail. vector=%d, ret =%d\n", 740e2cb1decSSalil Mehta vector_id, 741e2cb1decSSalil Mehta ret); 7420d3e6631SYunsheng Lin 743e2cb1decSSalil Mehta return ret; 744e2cb1decSSalil Mehta } 745e2cb1decSSalil Mehta 7460d3e6631SYunsheng Lin static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 7470d3e6631SYunsheng Lin { 7480d3e6631SYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 74903718db9SYunsheng Lin int vector_id; 7500d3e6631SYunsheng Lin 75103718db9SYunsheng Lin vector_id = hclgevf_get_vector_index(hdev, vector); 75203718db9SYunsheng Lin if (vector_id < 0) { 75303718db9SYunsheng Lin dev_err(&handle->pdev->dev, 75403718db9SYunsheng Lin "hclgevf_put_vector get vector index fail. ret =%d\n", 75503718db9SYunsheng Lin vector_id); 75603718db9SYunsheng Lin return vector_id; 75703718db9SYunsheng Lin } 75803718db9SYunsheng Lin 75903718db9SYunsheng Lin hclgevf_free_vector(hdev, vector_id); 760e2cb1decSSalil Mehta 761e2cb1decSSalil Mehta return 0; 762e2cb1decSSalil Mehta } 763e2cb1decSSalil Mehta 7643b75c3dfSPeng Li static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 765e196ec75SJian Shen bool en_uc_pmc, bool en_mc_pmc, 766f01f5559SJian Shen bool en_bc_pmc) 767e2cb1decSSalil Mehta { 7685e7414cdSJian Shen struct hnae3_handle *handle = &hdev->nic; 769d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 770f01f5559SJian Shen int ret; 771e2cb1decSSalil Mehta 772d3410018SYufeng Mo memset(&send_msg, 0, sizeof(send_msg)); 773d3410018SYufeng Mo send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; 774d3410018SYufeng Mo send_msg.en_bc = en_bc_pmc ? 1 : 0; 775d3410018SYufeng Mo send_msg.en_uc = en_uc_pmc ? 1 : 0; 776d3410018SYufeng Mo send_msg.en_mc = en_mc_pmc ? 1 : 0; 7775e7414cdSJian Shen send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC, 7785e7414cdSJian Shen &handle->priv_flags) ? 1 : 0; 779e2cb1decSSalil Mehta 780d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 781f01f5559SJian Shen if (ret) 782e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 783f01f5559SJian Shen "Set promisc mode fail, status is %d.\n", ret); 784e2cb1decSSalil Mehta 785f01f5559SJian Shen return ret; 786e2cb1decSSalil Mehta } 787e2cb1decSSalil Mehta 788e196ec75SJian Shen static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 789e196ec75SJian Shen bool en_mc_pmc) 790e2cb1decSSalil Mehta { 791e196ec75SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 792e196ec75SJian Shen bool en_bc_pmc; 793e196ec75SJian Shen 794295ba232SGuangbin Huang en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2; 795e196ec75SJian Shen 796e196ec75SJian Shen return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 797e196ec75SJian Shen en_bc_pmc); 798e2cb1decSSalil Mehta } 799e2cb1decSSalil Mehta 800c631c696SJian Shen static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) 801c631c696SJian Shen { 802c631c696SJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 803c631c696SJian Shen 804c631c696SJian Shen set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 8055e7414cdSJian Shen hclgevf_task_schedule(hdev, 0); 806c631c696SJian Shen } 807c631c696SJian Shen 808c631c696SJian Shen static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) 809c631c696SJian Shen { 810c631c696SJian Shen struct hnae3_handle *handle = &hdev->nic; 811c631c696SJian Shen bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; 812c631c696SJian Shen bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; 813c631c696SJian Shen int ret; 814c631c696SJian Shen 815c631c696SJian Shen if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { 816c631c696SJian Shen ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); 817c631c696SJian Shen if (!ret) 818c631c696SJian Shen clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 819c631c696SJian Shen } 820c631c696SJian Shen } 821c631c696SJian Shen 8228fa86551SYufeng Mo static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id, 8238fa86551SYufeng Mo u16 stream_id, bool enable) 824e2cb1decSSalil Mehta { 825e2cb1decSSalil Mehta struct hclgevf_cfg_com_tqp_queue_cmd *req; 8266befad60SJie Wang struct hclge_desc desc; 827e2cb1decSSalil Mehta 828e2cb1decSSalil Mehta req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 829e2cb1decSSalil Mehta 83043710bfeSJie Wang hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); 831e2cb1decSSalil Mehta req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 832e2cb1decSSalil Mehta req->stream_id = cpu_to_le16(stream_id); 833ebaf1908SWeihang Li if (enable) 834ebaf1908SWeihang Li req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 835e2cb1decSSalil Mehta 8368fa86551SYufeng Mo return hclgevf_cmd_send(&hdev->hw, &desc, 1); 8378fa86551SYufeng Mo } 838e2cb1decSSalil Mehta 8398fa86551SYufeng Mo static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable) 8408fa86551SYufeng Mo { 8418fa86551SYufeng Mo struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 8428fa86551SYufeng Mo int ret; 8438fa86551SYufeng Mo u16 i; 8448fa86551SYufeng Mo 8458fa86551SYufeng Mo for (i = 0; i < handle->kinfo.num_tqps; i++) { 8468fa86551SYufeng Mo ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable); 8478fa86551SYufeng Mo if (ret) 8488fa86551SYufeng Mo return ret; 8498fa86551SYufeng Mo } 8508fa86551SYufeng Mo 8518fa86551SYufeng Mo return 0; 852e2cb1decSSalil Mehta } 853e2cb1decSSalil Mehta 8548e6de441SHuazhong Tan static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 8558e6de441SHuazhong Tan { 856d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 8578e6de441SHuazhong Tan u8 host_mac[ETH_ALEN]; 8588e6de441SHuazhong Tan int status; 8598e6de441SHuazhong Tan 860d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); 861d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, 862d3410018SYufeng Mo ETH_ALEN); 8638e6de441SHuazhong Tan if (status) { 8648e6de441SHuazhong Tan dev_err(&hdev->pdev->dev, 8658e6de441SHuazhong Tan "fail to get VF MAC from host %d", status); 8668e6de441SHuazhong Tan return status; 8678e6de441SHuazhong Tan } 8688e6de441SHuazhong Tan 8698e6de441SHuazhong Tan ether_addr_copy(p, host_mac); 8708e6de441SHuazhong Tan 8718e6de441SHuazhong Tan return 0; 8728e6de441SHuazhong Tan } 8738e6de441SHuazhong Tan 874e2cb1decSSalil Mehta static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 875e2cb1decSSalil Mehta { 876e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 8778e6de441SHuazhong Tan u8 host_mac_addr[ETH_ALEN]; 878e2cb1decSSalil Mehta 8798e6de441SHuazhong Tan if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 8808e6de441SHuazhong Tan return; 8818e6de441SHuazhong Tan 8828e6de441SHuazhong Tan hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 8838e6de441SHuazhong Tan if (hdev->has_pf_mac) 8848e6de441SHuazhong Tan ether_addr_copy(p, host_mac_addr); 8858e6de441SHuazhong Tan else 886e2cb1decSSalil Mehta ether_addr_copy(p, hdev->hw.mac.mac_addr); 887e2cb1decSSalil Mehta } 888e2cb1decSSalil Mehta 88976660757SJakub Kicinski static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p, 89059098055SFuyun Liang bool is_first) 891e2cb1decSSalil Mehta { 892e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 893e2cb1decSSalil Mehta u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 894d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 895e2cb1decSSalil Mehta u8 *new_mac_addr = (u8 *)p; 896e2cb1decSSalil Mehta int status; 897e2cb1decSSalil Mehta 898d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); 899ee4bcd3bSJian Shen send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; 900d3410018SYufeng Mo ether_addr_copy(send_msg.data, new_mac_addr); 901ee4bcd3bSJian Shen if (is_first && !hdev->has_pf_mac) 902ee4bcd3bSJian Shen eth_zero_addr(&send_msg.data[ETH_ALEN]); 903ee4bcd3bSJian Shen else 904d3410018SYufeng Mo ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); 905d3410018SYufeng Mo status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 906e2cb1decSSalil Mehta if (!status) 907e2cb1decSSalil Mehta ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 908e2cb1decSSalil Mehta 909e2cb1decSSalil Mehta return status; 910e2cb1decSSalil Mehta } 911e2cb1decSSalil Mehta 912ee4bcd3bSJian Shen static struct hclgevf_mac_addr_node * 913ee4bcd3bSJian Shen hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) 914ee4bcd3bSJian Shen { 915ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp; 916ee4bcd3bSJian Shen 917ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, list, node) 918ee4bcd3bSJian Shen if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 919ee4bcd3bSJian Shen return mac_node; 920ee4bcd3bSJian Shen 921ee4bcd3bSJian Shen return NULL; 922ee4bcd3bSJian Shen } 923ee4bcd3bSJian Shen 924ee4bcd3bSJian Shen static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, 925ee4bcd3bSJian Shen enum HCLGEVF_MAC_NODE_STATE state) 926ee4bcd3bSJian Shen { 927ee4bcd3bSJian Shen switch (state) { 928ee4bcd3bSJian Shen /* from set_rx_mode or tmp_add_list */ 929ee4bcd3bSJian Shen case HCLGEVF_MAC_TO_ADD: 930ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_DEL) 931ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_ACTIVE; 932ee4bcd3bSJian Shen break; 933ee4bcd3bSJian Shen /* only from set_rx_mode */ 934ee4bcd3bSJian Shen case HCLGEVF_MAC_TO_DEL: 935ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 936ee4bcd3bSJian Shen list_del(&mac_node->node); 937ee4bcd3bSJian Shen kfree(mac_node); 938ee4bcd3bSJian Shen } else { 939ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_TO_DEL; 940ee4bcd3bSJian Shen } 941ee4bcd3bSJian Shen break; 942ee4bcd3bSJian Shen /* only from tmp_add_list, the mac_node->state won't be 943ee4bcd3bSJian Shen * HCLGEVF_MAC_ACTIVE 944ee4bcd3bSJian Shen */ 945ee4bcd3bSJian Shen case HCLGEVF_MAC_ACTIVE: 946ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) 947ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_ACTIVE; 948ee4bcd3bSJian Shen break; 949ee4bcd3bSJian Shen } 950ee4bcd3bSJian Shen } 951ee4bcd3bSJian Shen 952ee4bcd3bSJian Shen static int hclgevf_update_mac_list(struct hnae3_handle *handle, 953ee4bcd3bSJian Shen enum HCLGEVF_MAC_NODE_STATE state, 954ee4bcd3bSJian Shen enum HCLGEVF_MAC_ADDR_TYPE mac_type, 955e2cb1decSSalil Mehta const unsigned char *addr) 956e2cb1decSSalil Mehta { 957e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 958ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node; 959ee4bcd3bSJian Shen struct list_head *list; 960e2cb1decSSalil Mehta 961ee4bcd3bSJian Shen list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 962ee4bcd3bSJian Shen &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 963ee4bcd3bSJian Shen 964ee4bcd3bSJian Shen spin_lock_bh(&hdev->mac_table.mac_list_lock); 965ee4bcd3bSJian Shen 966ee4bcd3bSJian Shen /* if the mac addr is already in the mac list, no need to add a new 967ee4bcd3bSJian Shen * one into it, just check the mac addr state, convert it to a new 96834eff17eSJilin Yuan * state, or just remove it, or do nothing. 969ee4bcd3bSJian Shen */ 970ee4bcd3bSJian Shen mac_node = hclgevf_find_mac_node(list, addr); 971ee4bcd3bSJian Shen if (mac_node) { 972ee4bcd3bSJian Shen hclgevf_update_mac_node(mac_node, state); 973ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 974ee4bcd3bSJian Shen return 0; 975ee4bcd3bSJian Shen } 976ee4bcd3bSJian Shen /* if this address is never added, unnecessary to delete */ 977ee4bcd3bSJian Shen if (state == HCLGEVF_MAC_TO_DEL) { 978ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 979ee4bcd3bSJian Shen return -ENOENT; 980ee4bcd3bSJian Shen } 981ee4bcd3bSJian Shen 982ee4bcd3bSJian Shen mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 983ee4bcd3bSJian Shen if (!mac_node) { 984ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 985ee4bcd3bSJian Shen return -ENOMEM; 986ee4bcd3bSJian Shen } 987ee4bcd3bSJian Shen 988ee4bcd3bSJian Shen mac_node->state = state; 989ee4bcd3bSJian Shen ether_addr_copy(mac_node->mac_addr, addr); 990ee4bcd3bSJian Shen list_add_tail(&mac_node->node, list); 991ee4bcd3bSJian Shen 992ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 993ee4bcd3bSJian Shen return 0; 994ee4bcd3bSJian Shen } 995ee4bcd3bSJian Shen 996ee4bcd3bSJian Shen static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 997ee4bcd3bSJian Shen const unsigned char *addr) 998ee4bcd3bSJian Shen { 999ee4bcd3bSJian Shen return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1000ee4bcd3bSJian Shen HCLGEVF_MAC_ADDR_UC, addr); 1001e2cb1decSSalil Mehta } 1002e2cb1decSSalil Mehta 1003e2cb1decSSalil Mehta static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1004e2cb1decSSalil Mehta const unsigned char *addr) 1005e2cb1decSSalil Mehta { 1006ee4bcd3bSJian Shen return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1007ee4bcd3bSJian Shen HCLGEVF_MAC_ADDR_UC, addr); 1008e2cb1decSSalil Mehta } 1009e2cb1decSSalil Mehta 1010e2cb1decSSalil Mehta static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1011e2cb1decSSalil Mehta const unsigned char *addr) 1012e2cb1decSSalil Mehta { 1013ee4bcd3bSJian Shen return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1014ee4bcd3bSJian Shen HCLGEVF_MAC_ADDR_MC, addr); 1015e2cb1decSSalil Mehta } 1016e2cb1decSSalil Mehta 1017e2cb1decSSalil Mehta static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1018e2cb1decSSalil Mehta const unsigned char *addr) 1019e2cb1decSSalil Mehta { 1020ee4bcd3bSJian Shen return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1021ee4bcd3bSJian Shen HCLGEVF_MAC_ADDR_MC, addr); 1022ee4bcd3bSJian Shen } 1023e2cb1decSSalil Mehta 1024ee4bcd3bSJian Shen static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, 1025ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, 1026ee4bcd3bSJian Shen enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1027ee4bcd3bSJian Shen { 1028ee4bcd3bSJian Shen struct hclge_vf_to_pf_msg send_msg; 1029ee4bcd3bSJian Shen u8 code, subcode; 1030ee4bcd3bSJian Shen 1031ee4bcd3bSJian Shen if (mac_type == HCLGEVF_MAC_ADDR_UC) { 1032ee4bcd3bSJian Shen code = HCLGE_MBX_SET_UNICAST; 1033ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1034ee4bcd3bSJian Shen subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; 1035ee4bcd3bSJian Shen else 1036ee4bcd3bSJian Shen subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; 1037ee4bcd3bSJian Shen } else { 1038ee4bcd3bSJian Shen code = HCLGE_MBX_SET_MULTICAST; 1039ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1040ee4bcd3bSJian Shen subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; 1041ee4bcd3bSJian Shen else 1042ee4bcd3bSJian Shen subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; 1043ee4bcd3bSJian Shen } 1044ee4bcd3bSJian Shen 1045ee4bcd3bSJian Shen hclgevf_build_send_msg(&send_msg, code, subcode); 1046ee4bcd3bSJian Shen ether_addr_copy(send_msg.data, mac_node->mac_addr); 1047d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1048e2cb1decSSalil Mehta } 1049e2cb1decSSalil Mehta 1050ee4bcd3bSJian Shen static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, 1051ee4bcd3bSJian Shen struct list_head *list, 1052ee4bcd3bSJian Shen enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1053ee4bcd3bSJian Shen { 10544f331fdaSYufeng Mo char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 1055ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp; 1056ee4bcd3bSJian Shen int ret; 1057ee4bcd3bSJian Shen 1058ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, list, node) { 1059ee4bcd3bSJian Shen ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); 1060ee4bcd3bSJian Shen if (ret) { 10614f331fdaSYufeng Mo hnae3_format_mac_addr(format_mac_addr, 10624f331fdaSYufeng Mo mac_node->mac_addr); 1063ee4bcd3bSJian Shen dev_err(&hdev->pdev->dev, 10644f331fdaSYufeng Mo "failed to configure mac %s, state = %d, ret = %d\n", 10654f331fdaSYufeng Mo format_mac_addr, mac_node->state, ret); 1066ee4bcd3bSJian Shen return; 1067ee4bcd3bSJian Shen } 1068ee4bcd3bSJian Shen if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1069ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_ACTIVE; 1070ee4bcd3bSJian Shen } else { 1071ee4bcd3bSJian Shen list_del(&mac_node->node); 1072ee4bcd3bSJian Shen kfree(mac_node); 1073ee4bcd3bSJian Shen } 1074ee4bcd3bSJian Shen } 1075ee4bcd3bSJian Shen } 1076ee4bcd3bSJian Shen 1077ee4bcd3bSJian Shen static void hclgevf_sync_from_add_list(struct list_head *add_list, 1078ee4bcd3bSJian Shen struct list_head *mac_list) 1079ee4bcd3bSJian Shen { 1080ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1081ee4bcd3bSJian Shen 1082ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, add_list, node) { 1083ee4bcd3bSJian Shen /* if the mac address from tmp_add_list is not in the 1084ee4bcd3bSJian Shen * uc/mc_mac_list, it means have received a TO_DEL request 1085ee4bcd3bSJian Shen * during the time window of sending mac config request to PF 1086ee4bcd3bSJian Shen * If mac_node state is ACTIVE, then change its state to TO_DEL, 1087ee4bcd3bSJian Shen * then it will be removed at next time. If is TO_ADD, it means 1088ee4bcd3bSJian Shen * send TO_ADD request failed, so just remove the mac node. 1089ee4bcd3bSJian Shen */ 1090ee4bcd3bSJian Shen new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1091ee4bcd3bSJian Shen if (new_node) { 1092ee4bcd3bSJian Shen hclgevf_update_mac_node(new_node, mac_node->state); 1093ee4bcd3bSJian Shen list_del(&mac_node->node); 1094ee4bcd3bSJian Shen kfree(mac_node); 1095ee4bcd3bSJian Shen } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { 1096ee4bcd3bSJian Shen mac_node->state = HCLGEVF_MAC_TO_DEL; 109749768ce9SBaokun Li list_move_tail(&mac_node->node, mac_list); 1098ee4bcd3bSJian Shen } else { 1099ee4bcd3bSJian Shen list_del(&mac_node->node); 1100ee4bcd3bSJian Shen kfree(mac_node); 1101ee4bcd3bSJian Shen } 1102ee4bcd3bSJian Shen } 1103ee4bcd3bSJian Shen } 1104ee4bcd3bSJian Shen 1105ee4bcd3bSJian Shen static void hclgevf_sync_from_del_list(struct list_head *del_list, 1106ee4bcd3bSJian Shen struct list_head *mac_list) 1107ee4bcd3bSJian Shen { 1108ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1109ee4bcd3bSJian Shen 1110ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, del_list, node) { 1111ee4bcd3bSJian Shen new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1112ee4bcd3bSJian Shen if (new_node) { 1113ee4bcd3bSJian Shen /* If the mac addr is exist in the mac list, it means 1114ee4bcd3bSJian Shen * received a new request TO_ADD during the time window 1115ee4bcd3bSJian Shen * of sending mac addr configurrequest to PF, so just 1116ee4bcd3bSJian Shen * change the mac state to ACTIVE. 1117ee4bcd3bSJian Shen */ 1118ee4bcd3bSJian Shen new_node->state = HCLGEVF_MAC_ACTIVE; 1119ee4bcd3bSJian Shen list_del(&mac_node->node); 1120ee4bcd3bSJian Shen kfree(mac_node); 1121ee4bcd3bSJian Shen } else { 112249768ce9SBaokun Li list_move_tail(&mac_node->node, mac_list); 1123ee4bcd3bSJian Shen } 1124ee4bcd3bSJian Shen } 1125ee4bcd3bSJian Shen } 1126ee4bcd3bSJian Shen 1127ee4bcd3bSJian Shen static void hclgevf_clear_list(struct list_head *list) 1128ee4bcd3bSJian Shen { 1129ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp; 1130ee4bcd3bSJian Shen 1131ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, list, node) { 1132ee4bcd3bSJian Shen list_del(&mac_node->node); 1133ee4bcd3bSJian Shen kfree(mac_node); 1134ee4bcd3bSJian Shen } 1135ee4bcd3bSJian Shen } 1136ee4bcd3bSJian Shen 1137ee4bcd3bSJian Shen static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, 1138ee4bcd3bSJian Shen enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1139ee4bcd3bSJian Shen { 1140ee4bcd3bSJian Shen struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1141ee4bcd3bSJian Shen struct list_head tmp_add_list, tmp_del_list; 1142ee4bcd3bSJian Shen struct list_head *list; 1143ee4bcd3bSJian Shen 1144ee4bcd3bSJian Shen INIT_LIST_HEAD(&tmp_add_list); 1145ee4bcd3bSJian Shen INIT_LIST_HEAD(&tmp_del_list); 1146ee4bcd3bSJian Shen 1147ee4bcd3bSJian Shen /* move the mac addr to the tmp_add_list and tmp_del_list, then 1148ee4bcd3bSJian Shen * we can add/delete these mac addr outside the spin lock 1149ee4bcd3bSJian Shen */ 1150ee4bcd3bSJian Shen list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1151ee4bcd3bSJian Shen &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1152ee4bcd3bSJian Shen 1153ee4bcd3bSJian Shen spin_lock_bh(&hdev->mac_table.mac_list_lock); 1154ee4bcd3bSJian Shen 1155ee4bcd3bSJian Shen list_for_each_entry_safe(mac_node, tmp, list, node) { 1156ee4bcd3bSJian Shen switch (mac_node->state) { 1157ee4bcd3bSJian Shen case HCLGEVF_MAC_TO_DEL: 115849768ce9SBaokun Li list_move_tail(&mac_node->node, &tmp_del_list); 1159ee4bcd3bSJian Shen break; 1160ee4bcd3bSJian Shen case HCLGEVF_MAC_TO_ADD: 1161ee4bcd3bSJian Shen new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 1162ee4bcd3bSJian Shen if (!new_node) 1163ee4bcd3bSJian Shen goto stop_traverse; 1164ee4bcd3bSJian Shen 1165ee4bcd3bSJian Shen ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 1166ee4bcd3bSJian Shen new_node->state = mac_node->state; 1167ee4bcd3bSJian Shen list_add_tail(&new_node->node, &tmp_add_list); 1168ee4bcd3bSJian Shen break; 1169ee4bcd3bSJian Shen default: 1170ee4bcd3bSJian Shen break; 1171ee4bcd3bSJian Shen } 1172ee4bcd3bSJian Shen } 1173ee4bcd3bSJian Shen 1174ee4bcd3bSJian Shen stop_traverse: 1175ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1176ee4bcd3bSJian Shen 1177ee4bcd3bSJian Shen /* delete first, in order to get max mac table space for adding */ 1178ee4bcd3bSJian Shen hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); 1179ee4bcd3bSJian Shen hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); 1180ee4bcd3bSJian Shen 1181ee4bcd3bSJian Shen /* if some mac addresses were added/deleted fail, move back to the 1182ee4bcd3bSJian Shen * mac_list, and retry at next time. 1183ee4bcd3bSJian Shen */ 1184ee4bcd3bSJian Shen spin_lock_bh(&hdev->mac_table.mac_list_lock); 1185ee4bcd3bSJian Shen 1186ee4bcd3bSJian Shen hclgevf_sync_from_del_list(&tmp_del_list, list); 1187ee4bcd3bSJian Shen hclgevf_sync_from_add_list(&tmp_add_list, list); 1188ee4bcd3bSJian Shen 1189ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1190ee4bcd3bSJian Shen } 1191ee4bcd3bSJian Shen 1192ee4bcd3bSJian Shen static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) 1193ee4bcd3bSJian Shen { 1194ee4bcd3bSJian Shen hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); 1195ee4bcd3bSJian Shen hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); 1196ee4bcd3bSJian Shen } 1197ee4bcd3bSJian Shen 1198ee4bcd3bSJian Shen static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) 1199ee4bcd3bSJian Shen { 1200ee4bcd3bSJian Shen spin_lock_bh(&hdev->mac_table.mac_list_lock); 1201ee4bcd3bSJian Shen 1202ee4bcd3bSJian Shen hclgevf_clear_list(&hdev->mac_table.uc_mac_list); 1203ee4bcd3bSJian Shen hclgevf_clear_list(&hdev->mac_table.mc_mac_list); 1204ee4bcd3bSJian Shen 1205ee4bcd3bSJian Shen spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1206ee4bcd3bSJian Shen } 1207ee4bcd3bSJian Shen 1208fa6a262aSJian Shen static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable) 1209fa6a262aSJian Shen { 1210fa6a262aSJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1211fa6a262aSJian Shen struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 1212fa6a262aSJian Shen struct hclge_vf_to_pf_msg send_msg; 1213fa6a262aSJian Shen 1214fa6a262aSJian Shen if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) 1215fa6a262aSJian Shen return -EOPNOTSUPP; 1216fa6a262aSJian Shen 1217fa6a262aSJian Shen hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1218fa6a262aSJian Shen HCLGE_MBX_ENABLE_VLAN_FILTER); 1219fa6a262aSJian Shen send_msg.data[0] = enable ? 1 : 0; 1220fa6a262aSJian Shen 1221fa6a262aSJian Shen return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1222fa6a262aSJian Shen } 1223fa6a262aSJian Shen 1224e2cb1decSSalil Mehta static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1225e2cb1decSSalil Mehta __be16 proto, u16 vlan_id, 1226e2cb1decSSalil Mehta bool is_kill) 1227e2cb1decSSalil Mehta { 1228e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1229416eedb6SJie Wang struct hclge_mbx_vlan_filter *vlan_filter; 1230d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1231fe4144d4SJian Shen int ret; 1232e2cb1decSSalil Mehta 1233b37ce587SYufeng Mo if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1234e2cb1decSSalil Mehta return -EINVAL; 1235e2cb1decSSalil Mehta 1236e2cb1decSSalil Mehta if (proto != htons(ETH_P_8021Q)) 1237e2cb1decSSalil Mehta return -EPROTONOSUPPORT; 1238e2cb1decSSalil Mehta 1239b7b5d25bSGuojia Liao /* When device is resetting or reset failed, firmware is unable to 1240b7b5d25bSGuojia Liao * handle mailbox. Just record the vlan id, and remove it after 1241fe4144d4SJian Shen * reset finished. 1242fe4144d4SJian Shen */ 1243b7b5d25bSGuojia Liao if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 1244b7b5d25bSGuojia Liao test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { 1245fe4144d4SJian Shen set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1246fe4144d4SJian Shen return -EBUSY; 1247472a2ff6SJian Shen } else if (!is_kill && test_bit(vlan_id, hdev->vlan_del_fail_bmap)) { 1248472a2ff6SJian Shen clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1249fe4144d4SJian Shen } 1250fe4144d4SJian Shen 1251d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1252d3410018SYufeng Mo HCLGE_MBX_VLAN_FILTER); 1253416eedb6SJie Wang vlan_filter = (struct hclge_mbx_vlan_filter *)send_msg.data; 1254416eedb6SJie Wang vlan_filter->is_kill = is_kill; 1255416eedb6SJie Wang vlan_filter->vlan_id = cpu_to_le16(vlan_id); 1256416eedb6SJie Wang vlan_filter->proto = cpu_to_le16(be16_to_cpu(proto)); 1257416eedb6SJie Wang 125846ee7350SGuojia Liao /* when remove hw vlan filter failed, record the vlan id, 1259fe4144d4SJian Shen * and try to remove it from hw later, to be consistence 1260fe4144d4SJian Shen * with stack. 1261fe4144d4SJian Shen */ 1262d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1263fe4144d4SJian Shen if (is_kill && ret) 1264fe4144d4SJian Shen set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1265fe4144d4SJian Shen 1266fe4144d4SJian Shen return ret; 1267fe4144d4SJian Shen } 1268fe4144d4SJian Shen 1269fe4144d4SJian Shen static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1270fe4144d4SJian Shen { 1271fe4144d4SJian Shen #define HCLGEVF_MAX_SYNC_COUNT 60 1272fe4144d4SJian Shen struct hnae3_handle *handle = &hdev->nic; 1273fe4144d4SJian Shen int ret, sync_cnt = 0; 1274fe4144d4SJian Shen u16 vlan_id; 1275fe4144d4SJian Shen 1276472a2ff6SJian Shen if (bitmap_empty(hdev->vlan_del_fail_bmap, VLAN_N_VID)) 1277472a2ff6SJian Shen return; 1278472a2ff6SJian Shen 1279472a2ff6SJian Shen rtnl_lock(); 1280fe4144d4SJian Shen vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1281fe4144d4SJian Shen while (vlan_id != VLAN_N_VID) { 1282fe4144d4SJian Shen ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1283fe4144d4SJian Shen vlan_id, true); 1284fe4144d4SJian Shen if (ret) 1285472a2ff6SJian Shen break; 1286fe4144d4SJian Shen 1287fe4144d4SJian Shen clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1288fe4144d4SJian Shen sync_cnt++; 1289fe4144d4SJian Shen if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1290472a2ff6SJian Shen break; 1291fe4144d4SJian Shen 1292fe4144d4SJian Shen vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1293fe4144d4SJian Shen } 1294472a2ff6SJian Shen rtnl_unlock(); 1295e2cb1decSSalil Mehta } 1296e2cb1decSSalil Mehta 1297b2641e2aSYunsheng Lin static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1298b2641e2aSYunsheng Lin { 1299b2641e2aSYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1300d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1301b2641e2aSYunsheng Lin 1302d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1303d3410018SYufeng Mo HCLGE_MBX_VLAN_RX_OFF_CFG); 1304d3410018SYufeng Mo send_msg.data[0] = enable ? 1 : 0; 1305d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1306b2641e2aSYunsheng Lin } 1307b2641e2aSYunsheng Lin 13088fa86551SYufeng Mo static int hclgevf_reset_tqp(struct hnae3_handle *handle) 1309e2cb1decSSalil Mehta { 13108fa86551SYufeng Mo #define HCLGEVF_RESET_ALL_QUEUE_DONE 1U 1311e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1312d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 13138fa86551SYufeng Mo u8 return_status = 0; 13141a426f8bSPeng Li int ret; 13158fa86551SYufeng Mo u16 i; 1316e2cb1decSSalil Mehta 13171a426f8bSPeng Li /* disable vf queue before send queue reset msg to PF */ 13188fa86551SYufeng Mo ret = hclgevf_tqp_enable(handle, false); 13198fa86551SYufeng Mo if (ret) { 13208fa86551SYufeng Mo dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n", 13218fa86551SYufeng Mo ret); 13227fa6be4fSHuazhong Tan return ret; 13238fa86551SYufeng Mo } 13241a426f8bSPeng Li 1325d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 13268fa86551SYufeng Mo 13278fa86551SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status, 13288fa86551SYufeng Mo sizeof(return_status)); 13298fa86551SYufeng Mo if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE) 13308fa86551SYufeng Mo return ret; 13318fa86551SYufeng Mo 13328fa86551SYufeng Mo for (i = 1; i < handle->kinfo.num_tqps; i++) { 13338fa86551SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1334416eedb6SJie Wang *(__le16 *)send_msg.data = cpu_to_le16(i); 13358fa86551SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 13368fa86551SYufeng Mo if (ret) 13378fa86551SYufeng Mo return ret; 13388fa86551SYufeng Mo } 13398fa86551SYufeng Mo 13408fa86551SYufeng Mo return 0; 1341e2cb1decSSalil Mehta } 1342e2cb1decSSalil Mehta 1343818f1675SYunsheng Lin static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1344818f1675SYunsheng Lin { 1345818f1675SYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1346416eedb6SJie Wang struct hclge_mbx_mtu_info *mtu_info; 1347d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1348818f1675SYunsheng Lin 1349d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); 1350416eedb6SJie Wang mtu_info = (struct hclge_mbx_mtu_info *)send_msg.data; 1351416eedb6SJie Wang mtu_info->mtu = cpu_to_le32(new_mtu); 1352416eedb6SJie Wang 1353d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1354818f1675SYunsheng Lin } 1355818f1675SYunsheng Lin 13566988eb2aSSalil Mehta static int hclgevf_notify_client(struct hclgevf_dev *hdev, 13576988eb2aSSalil Mehta enum hnae3_reset_notify_type type) 13586988eb2aSSalil Mehta { 13596988eb2aSSalil Mehta struct hnae3_client *client = hdev->nic_client; 13606988eb2aSSalil Mehta struct hnae3_handle *handle = &hdev->nic; 13616a5f6fa3SHuazhong Tan int ret; 13626988eb2aSSalil Mehta 136325d1817cSHuazhong Tan if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 136425d1817cSHuazhong Tan !client) 136525d1817cSHuazhong Tan return 0; 136625d1817cSHuazhong Tan 13676988eb2aSSalil Mehta if (!client->ops->reset_notify) 13686988eb2aSSalil Mehta return -EOPNOTSUPP; 13696988eb2aSSalil Mehta 13706a5f6fa3SHuazhong Tan ret = client->ops->reset_notify(handle, type); 13716a5f6fa3SHuazhong Tan if (ret) 13726a5f6fa3SHuazhong Tan dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 13736a5f6fa3SHuazhong Tan type, ret); 13746a5f6fa3SHuazhong Tan 13756a5f6fa3SHuazhong Tan return ret; 13766988eb2aSSalil Mehta } 13776988eb2aSSalil Mehta 1378fe735c84SHuazhong Tan static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev, 1379fe735c84SHuazhong Tan enum hnae3_reset_notify_type type) 1380fe735c84SHuazhong Tan { 1381fe735c84SHuazhong Tan struct hnae3_client *client = hdev->roce_client; 1382fe735c84SHuazhong Tan struct hnae3_handle *handle = &hdev->roce; 1383fe735c84SHuazhong Tan int ret; 1384fe735c84SHuazhong Tan 1385fe735c84SHuazhong Tan if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client) 1386fe735c84SHuazhong Tan return 0; 1387fe735c84SHuazhong Tan 1388fe735c84SHuazhong Tan if (!client->ops->reset_notify) 1389fe735c84SHuazhong Tan return -EOPNOTSUPP; 1390fe735c84SHuazhong Tan 1391fe735c84SHuazhong Tan ret = client->ops->reset_notify(handle, type); 1392fe735c84SHuazhong Tan if (ret) 1393fe735c84SHuazhong Tan dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", 1394fe735c84SHuazhong Tan type, ret); 1395fe735c84SHuazhong Tan return ret; 1396fe735c84SHuazhong Tan } 1397fe735c84SHuazhong Tan 13986988eb2aSSalil Mehta static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 13996988eb2aSSalil Mehta { 1400aa5c4f17SHuazhong Tan #define HCLGEVF_RESET_WAIT_US 20000 1401aa5c4f17SHuazhong Tan #define HCLGEVF_RESET_WAIT_CNT 2000 1402aa5c4f17SHuazhong Tan #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1403aa5c4f17SHuazhong Tan (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1404aa5c4f17SHuazhong Tan 1405aa5c4f17SHuazhong Tan u32 val; 1406aa5c4f17SHuazhong Tan int ret; 14076988eb2aSSalil Mehta 1408f28368bbSHuazhong Tan if (hdev->reset_type == HNAE3_VF_RESET) 1409076bb537SJie Wang ret = readl_poll_timeout(hdev->hw.hw.io_base + 141072e2fb07SHuazhong Tan HCLGEVF_VF_RST_ING, val, 141172e2fb07SHuazhong Tan !(val & HCLGEVF_VF_RST_ING_BIT), 141272e2fb07SHuazhong Tan HCLGEVF_RESET_WAIT_US, 141372e2fb07SHuazhong Tan HCLGEVF_RESET_WAIT_TIMEOUT_US); 141472e2fb07SHuazhong Tan else 1415076bb537SJie Wang ret = readl_poll_timeout(hdev->hw.hw.io_base + 141672e2fb07SHuazhong Tan HCLGEVF_RST_ING, val, 1417aa5c4f17SHuazhong Tan !(val & HCLGEVF_RST_ING_BITS), 1418aa5c4f17SHuazhong Tan HCLGEVF_RESET_WAIT_US, 1419aa5c4f17SHuazhong Tan HCLGEVF_RESET_WAIT_TIMEOUT_US); 14206988eb2aSSalil Mehta 14216988eb2aSSalil Mehta /* hardware completion status should be available by this time */ 1422aa5c4f17SHuazhong Tan if (ret) { 1423aa5c4f17SHuazhong Tan dev_err(&hdev->pdev->dev, 14248912fd6aSColin Ian King "couldn't get reset done status from h/w, timeout!\n"); 1425aa5c4f17SHuazhong Tan return ret; 14266988eb2aSSalil Mehta } 14276988eb2aSSalil Mehta 14286988eb2aSSalil Mehta /* we will wait a bit more to let reset of the stack to complete. This 14296988eb2aSSalil Mehta * might happen in case reset assertion was made by PF. Yes, this also 14306988eb2aSSalil Mehta * means we might end up waiting bit more even for VF reset. 14316988eb2aSSalil Mehta */ 1432814d0c78SJie Wang if (hdev->reset_type == HNAE3_VF_FULL_RESET) 14336988eb2aSSalil Mehta msleep(5000); 1434814d0c78SJie Wang else 1435814d0c78SJie Wang msleep(500); 14366988eb2aSSalil Mehta 14376988eb2aSSalil Mehta return 0; 14386988eb2aSSalil Mehta } 14396988eb2aSSalil Mehta 14406b428b4fSHuazhong Tan static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 14416b428b4fSHuazhong Tan { 14426b428b4fSHuazhong Tan u32 reg_val; 14436b428b4fSHuazhong Tan 1444cb413bfaSJie Wang reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); 14456b428b4fSHuazhong Tan if (enable) 14466b428b4fSHuazhong Tan reg_val |= HCLGEVF_NIC_SW_RST_RDY; 14476b428b4fSHuazhong Tan else 14486b428b4fSHuazhong Tan reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 14496b428b4fSHuazhong Tan 1450cb413bfaSJie Wang hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 14516b428b4fSHuazhong Tan reg_val); 14526b428b4fSHuazhong Tan } 14536b428b4fSHuazhong Tan 14546988eb2aSSalil Mehta static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 14556988eb2aSSalil Mehta { 14567a01c897SSalil Mehta int ret; 14577a01c897SSalil Mehta 14586988eb2aSSalil Mehta /* uninitialize the nic client */ 14596a5f6fa3SHuazhong Tan ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 14606a5f6fa3SHuazhong Tan if (ret) 14616a5f6fa3SHuazhong Tan return ret; 14626988eb2aSSalil Mehta 14637a01c897SSalil Mehta /* re-initialize the hclge device */ 14649c6f7085SHuazhong Tan ret = hclgevf_reset_hdev(hdev); 14657a01c897SSalil Mehta if (ret) { 14667a01c897SSalil Mehta dev_err(&hdev->pdev->dev, 14677a01c897SSalil Mehta "hclge device re-init failed, VF is disabled!\n"); 14687a01c897SSalil Mehta return ret; 14697a01c897SSalil Mehta } 14706988eb2aSSalil Mehta 14716988eb2aSSalil Mehta /* bring up the nic client again */ 14726a5f6fa3SHuazhong Tan ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 14736a5f6fa3SHuazhong Tan if (ret) 14746a5f6fa3SHuazhong Tan return ret; 14756988eb2aSSalil Mehta 14766b428b4fSHuazhong Tan /* clear handshake status with IMP */ 14776b428b4fSHuazhong Tan hclgevf_reset_handshake(hdev, false); 14786b428b4fSHuazhong Tan 14791cc9bc6eSHuazhong Tan /* bring up the nic to enable TX/RX again */ 14801cc9bc6eSHuazhong Tan return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 14816988eb2aSSalil Mehta } 14826988eb2aSSalil Mehta 1483dea846e8SHuazhong Tan static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1484dea846e8SHuazhong Tan { 1485ada13ee3SHuazhong Tan #define HCLGEVF_RESET_SYNC_TIME 100 1486ada13ee3SHuazhong Tan 1487f28368bbSHuazhong Tan if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1488d41884eeSHuazhong Tan struct hclge_vf_to_pf_msg send_msg; 1489d41884eeSHuazhong Tan int ret; 1490d41884eeSHuazhong Tan 1491d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1492d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1493cddd5648SHuazhong Tan if (ret) { 1494cddd5648SHuazhong Tan dev_err(&hdev->pdev->dev, 1495cddd5648SHuazhong Tan "failed to assert VF reset, ret = %d\n", ret); 1496cddd5648SHuazhong Tan return ret; 1497cddd5648SHuazhong Tan } 1498c88a6e7dSHuazhong Tan hdev->rst_stats.vf_func_rst_cnt++; 1499dea846e8SHuazhong Tan } 1500dea846e8SHuazhong Tan 1501076bb537SJie Wang set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 1502ada13ee3SHuazhong Tan /* inform hardware that preparatory work is done */ 1503ada13ee3SHuazhong Tan msleep(HCLGEVF_RESET_SYNC_TIME); 15046b428b4fSHuazhong Tan hclgevf_reset_handshake(hdev, true); 1505d41884eeSHuazhong Tan dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n", 1506d41884eeSHuazhong Tan hdev->reset_type); 1507dea846e8SHuazhong Tan 1508d41884eeSHuazhong Tan return 0; 1509dea846e8SHuazhong Tan } 1510dea846e8SHuazhong Tan 15113d77d0cbSHuazhong Tan static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 15123d77d0cbSHuazhong Tan { 15133d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 15143d77d0cbSHuazhong Tan hdev->rst_stats.vf_func_rst_cnt); 15153d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 15163d77d0cbSHuazhong Tan hdev->rst_stats.flr_rst_cnt); 15173d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 15183d77d0cbSHuazhong Tan hdev->rst_stats.vf_rst_cnt); 15193d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "reset done count: %u\n", 15203d77d0cbSHuazhong Tan hdev->rst_stats.rst_done_cnt); 15213d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 15223d77d0cbSHuazhong Tan hdev->rst_stats.hw_rst_done_cnt); 15233d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "reset count: %u\n", 15243d77d0cbSHuazhong Tan hdev->rst_stats.rst_cnt); 15253d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 15263d77d0cbSHuazhong Tan hdev->rst_stats.rst_fail_cnt); 15273d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 15283d77d0cbSHuazhong Tan hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 15293d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 1530cb413bfaSJie Wang hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG)); 15313d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1532cb413bfaSJie Wang hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG)); 15333d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 15343d77d0cbSHuazhong Tan hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 15353d77d0cbSHuazhong Tan dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 15363d77d0cbSHuazhong Tan } 15373d77d0cbSHuazhong Tan 1538bbe6540eSHuazhong Tan static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1539bbe6540eSHuazhong Tan { 15406b428b4fSHuazhong Tan /* recover handshake status with IMP when reset fail */ 15416b428b4fSHuazhong Tan hclgevf_reset_handshake(hdev, true); 1542bbe6540eSHuazhong Tan hdev->rst_stats.rst_fail_cnt++; 1543adcf738bSGuojia Liao dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1544bbe6540eSHuazhong Tan hdev->rst_stats.rst_fail_cnt); 1545bbe6540eSHuazhong Tan 1546bbe6540eSHuazhong Tan if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1547bbe6540eSHuazhong Tan set_bit(hdev->reset_type, &hdev->reset_pending); 1548bbe6540eSHuazhong Tan 1549bbe6540eSHuazhong Tan if (hclgevf_is_reset_pending(hdev)) { 1550bbe6540eSHuazhong Tan set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1551bbe6540eSHuazhong Tan hclgevf_reset_task_schedule(hdev); 15523d77d0cbSHuazhong Tan } else { 1553d5432455SGuojia Liao set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 15543d77d0cbSHuazhong Tan hclgevf_dump_rst_info(hdev); 1555bbe6540eSHuazhong Tan } 1556bbe6540eSHuazhong Tan } 1557bbe6540eSHuazhong Tan 15581cc9bc6eSHuazhong Tan static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 15596988eb2aSSalil Mehta { 15606988eb2aSSalil Mehta int ret; 15616988eb2aSSalil Mehta 1562c88a6e7dSHuazhong Tan hdev->rst_stats.rst_cnt++; 15636988eb2aSSalil Mehta 1564fe735c84SHuazhong Tan /* perform reset of the stack & ae device for a client */ 1565fe735c84SHuazhong Tan ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); 1566fe735c84SHuazhong Tan if (ret) 1567fe735c84SHuazhong Tan return ret; 1568fe735c84SHuazhong Tan 15691cc9bc6eSHuazhong Tan rtnl_lock(); 15706988eb2aSSalil Mehta /* bring down the nic to stop any ongoing TX/RX */ 15716a5f6fa3SHuazhong Tan ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 157229118ab9SHuazhong Tan rtnl_unlock(); 15736a5f6fa3SHuazhong Tan if (ret) 15741cc9bc6eSHuazhong Tan return ret; 1575dea846e8SHuazhong Tan 15761cc9bc6eSHuazhong Tan return hclgevf_reset_prepare_wait(hdev); 15776988eb2aSSalil Mehta } 15786988eb2aSSalil Mehta 15791cc9bc6eSHuazhong Tan static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 15801cc9bc6eSHuazhong Tan { 15811cc9bc6eSHuazhong Tan int ret; 15821cc9bc6eSHuazhong Tan 1583c88a6e7dSHuazhong Tan hdev->rst_stats.hw_rst_done_cnt++; 1584fe735c84SHuazhong Tan ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); 1585fe735c84SHuazhong Tan if (ret) 1586fe735c84SHuazhong Tan return ret; 1587c88a6e7dSHuazhong Tan 158829118ab9SHuazhong Tan rtnl_lock(); 15896988eb2aSSalil Mehta /* now, re-initialize the nic client and ae device */ 15906988eb2aSSalil Mehta ret = hclgevf_reset_stack(hdev); 15911cc9bc6eSHuazhong Tan rtnl_unlock(); 15926a5f6fa3SHuazhong Tan if (ret) { 15936988eb2aSSalil Mehta dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 15941cc9bc6eSHuazhong Tan return ret; 15956a5f6fa3SHuazhong Tan } 15966988eb2aSSalil Mehta 1597fe735c84SHuazhong Tan ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT); 1598fe735c84SHuazhong Tan /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1 1599fe735c84SHuazhong Tan * times 1600fe735c84SHuazhong Tan */ 1601fe735c84SHuazhong Tan if (ret && 1602fe735c84SHuazhong Tan hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1) 1603fe735c84SHuazhong Tan return ret; 1604fe735c84SHuazhong Tan 1605fe735c84SHuazhong Tan ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT); 1606fe735c84SHuazhong Tan if (ret) 1607fe735c84SHuazhong Tan return ret; 1608fe735c84SHuazhong Tan 1609b644a8d4SHuazhong Tan hdev->last_reset_time = jiffies; 1610c88a6e7dSHuazhong Tan hdev->rst_stats.rst_done_cnt++; 1611bbe6540eSHuazhong Tan hdev->rst_stats.rst_fail_cnt = 0; 1612d5432455SGuojia Liao clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1613b644a8d4SHuazhong Tan 16141cc9bc6eSHuazhong Tan return 0; 16151cc9bc6eSHuazhong Tan } 16161cc9bc6eSHuazhong Tan 16171cc9bc6eSHuazhong Tan static void hclgevf_reset(struct hclgevf_dev *hdev) 16181cc9bc6eSHuazhong Tan { 16191cc9bc6eSHuazhong Tan if (hclgevf_reset_prepare(hdev)) 16201cc9bc6eSHuazhong Tan goto err_reset; 16211cc9bc6eSHuazhong Tan 16221cc9bc6eSHuazhong Tan /* check if VF could successfully fetch the hardware reset completion 16231cc9bc6eSHuazhong Tan * status from the hardware 16241cc9bc6eSHuazhong Tan */ 16251cc9bc6eSHuazhong Tan if (hclgevf_reset_wait(hdev)) { 16261cc9bc6eSHuazhong Tan /* can't do much in this situation, will disable VF */ 16271cc9bc6eSHuazhong Tan dev_err(&hdev->pdev->dev, 16281cc9bc6eSHuazhong Tan "failed to fetch H/W reset completion status\n"); 16291cc9bc6eSHuazhong Tan goto err_reset; 16301cc9bc6eSHuazhong Tan } 16311cc9bc6eSHuazhong Tan 16321cc9bc6eSHuazhong Tan if (hclgevf_reset_rebuild(hdev)) 16331cc9bc6eSHuazhong Tan goto err_reset; 16341cc9bc6eSHuazhong Tan 16351cc9bc6eSHuazhong Tan return; 16361cc9bc6eSHuazhong Tan 16376a5f6fa3SHuazhong Tan err_reset: 1638bbe6540eSHuazhong Tan hclgevf_reset_err_handle(hdev); 16396988eb2aSSalil Mehta } 16406988eb2aSSalil Mehta 1641ed1c6f35SPeiyang Wang static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr) 1642720bd583SHuazhong Tan { 1643720bd583SHuazhong Tan enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1644720bd583SHuazhong Tan 1645dea846e8SHuazhong Tan /* return the highest priority reset level amongst all */ 1646b90fcc5bSHuazhong Tan if (test_bit(HNAE3_VF_RESET, addr)) { 1647b90fcc5bSHuazhong Tan rst_level = HNAE3_VF_RESET; 1648b90fcc5bSHuazhong Tan clear_bit(HNAE3_VF_RESET, addr); 1649b90fcc5bSHuazhong Tan clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1650b90fcc5bSHuazhong Tan clear_bit(HNAE3_VF_FUNC_RESET, addr); 1651b90fcc5bSHuazhong Tan } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1652dea846e8SHuazhong Tan rst_level = HNAE3_VF_FULL_RESET; 1653dea846e8SHuazhong Tan clear_bit(HNAE3_VF_FULL_RESET, addr); 1654dea846e8SHuazhong Tan clear_bit(HNAE3_VF_FUNC_RESET, addr); 1655aa5c4f17SHuazhong Tan } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1656aa5c4f17SHuazhong Tan rst_level = HNAE3_VF_PF_FUNC_RESET; 1657aa5c4f17SHuazhong Tan clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1658aa5c4f17SHuazhong Tan clear_bit(HNAE3_VF_FUNC_RESET, addr); 1659dea846e8SHuazhong Tan } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1660dea846e8SHuazhong Tan rst_level = HNAE3_VF_FUNC_RESET; 1661dea846e8SHuazhong Tan clear_bit(HNAE3_VF_FUNC_RESET, addr); 16626ff3cf07SHuazhong Tan } else if (test_bit(HNAE3_FLR_RESET, addr)) { 16636ff3cf07SHuazhong Tan rst_level = HNAE3_FLR_RESET; 16646ff3cf07SHuazhong Tan clear_bit(HNAE3_FLR_RESET, addr); 1665720bd583SHuazhong Tan } 1666720bd583SHuazhong Tan 1667720bd583SHuazhong Tan return rst_level; 1668720bd583SHuazhong Tan } 1669720bd583SHuazhong Tan 16706ae4e733SShiju Jose static void hclgevf_reset_event(struct pci_dev *pdev, 16716ae4e733SShiju Jose struct hnae3_handle *handle) 16726d4c3981SSalil Mehta { 16736ff3cf07SHuazhong Tan struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 16746ff3cf07SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 16756d4c3981SSalil Mehta 16766d4c3981SSalil Mehta dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); 16776d4c3981SSalil Mehta 16786ff3cf07SHuazhong Tan if (hdev->default_reset_request) 16790742ed7cSHuazhong Tan hdev->reset_level = 1680ed1c6f35SPeiyang Wang hclgevf_get_reset_level(&hdev->default_reset_request); 1681720bd583SHuazhong Tan else 1682dea846e8SHuazhong Tan hdev->reset_level = HNAE3_VF_FUNC_RESET; 16836d4c3981SSalil Mehta 1684436667d2SSalil Mehta /* reset of this VF requested */ 1685436667d2SSalil Mehta set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1686436667d2SSalil Mehta hclgevf_reset_task_schedule(hdev); 16876d4c3981SSalil Mehta 16880742ed7cSHuazhong Tan hdev->last_reset_time = jiffies; 16896d4c3981SSalil Mehta } 16906d4c3981SSalil Mehta 1691720bd583SHuazhong Tan static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1692720bd583SHuazhong Tan enum hnae3_reset_type rst_type) 1693720bd583SHuazhong Tan { 1694720bd583SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 1695720bd583SHuazhong Tan 1696720bd583SHuazhong Tan set_bit(rst_type, &hdev->default_reset_request); 1697720bd583SHuazhong Tan } 1698720bd583SHuazhong Tan 1699f28368bbSHuazhong Tan static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1700f28368bbSHuazhong Tan { 1701f28368bbSHuazhong Tan writel(en ? 1 : 0, vector->addr); 1702f28368bbSHuazhong Tan } 1703f28368bbSHuazhong Tan 1704bb1890d5SJiaran Zhang static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev, 1705bb1890d5SJiaran Zhang enum hnae3_reset_type rst_type) 17066ff3cf07SHuazhong Tan { 1707bb1890d5SJiaran Zhang #define HCLGEVF_RESET_RETRY_WAIT_MS 500 1708bb1890d5SJiaran Zhang #define HCLGEVF_RESET_RETRY_CNT 5 1709f28368bbSHuazhong Tan 17106ff3cf07SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 1711f28368bbSHuazhong Tan int retry_cnt = 0; 1712f28368bbSHuazhong Tan int ret; 17136ff3cf07SHuazhong Tan 1714ed0e658cSJiaran Zhang while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) { 1715f28368bbSHuazhong Tan down(&hdev->reset_sem); 1716f28368bbSHuazhong Tan set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1717bb1890d5SJiaran Zhang hdev->reset_type = rst_type; 1718f28368bbSHuazhong Tan ret = hclgevf_reset_prepare(hdev); 1719ed0e658cSJiaran Zhang if (!ret && !hdev->reset_pending) 1720ed0e658cSJiaran Zhang break; 1721ed0e658cSJiaran Zhang 17226ff3cf07SHuazhong Tan dev_err(&hdev->pdev->dev, 1723ed0e658cSJiaran Zhang "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n", 1724ed0e658cSJiaran Zhang ret, hdev->reset_pending, retry_cnt); 1725f28368bbSHuazhong Tan clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1726f28368bbSHuazhong Tan up(&hdev->reset_sem); 1727bb1890d5SJiaran Zhang msleep(HCLGEVF_RESET_RETRY_WAIT_MS); 1728f28368bbSHuazhong Tan } 1729f28368bbSHuazhong Tan 1730bb1890d5SJiaran Zhang /* disable misc vector before reset done */ 1731f28368bbSHuazhong Tan hclgevf_enable_vector(&hdev->misc_vector, false); 1732bb1890d5SJiaran Zhang 1733bb1890d5SJiaran Zhang if (hdev->reset_type == HNAE3_FLR_RESET) 1734f28368bbSHuazhong Tan hdev->rst_stats.flr_rst_cnt++; 1735f28368bbSHuazhong Tan } 1736f28368bbSHuazhong Tan 1737bb1890d5SJiaran Zhang static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev) 1738f28368bbSHuazhong Tan { 1739f28368bbSHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 1740f28368bbSHuazhong Tan int ret; 1741f28368bbSHuazhong Tan 1742f28368bbSHuazhong Tan hclgevf_enable_vector(&hdev->misc_vector, true); 1743f28368bbSHuazhong Tan 1744f28368bbSHuazhong Tan ret = hclgevf_reset_rebuild(hdev); 1745f28368bbSHuazhong Tan if (ret) 1746f28368bbSHuazhong Tan dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 1747f28368bbSHuazhong Tan ret); 1748f28368bbSHuazhong Tan 1749f28368bbSHuazhong Tan hdev->reset_type = HNAE3_NONE_RESET; 17508445d9d3SJie Wang if (test_and_clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1751f28368bbSHuazhong Tan up(&hdev->reset_sem); 17526ff3cf07SHuazhong Tan } 17536ff3cf07SHuazhong Tan 1754e2cb1decSSalil Mehta static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1755e2cb1decSSalil Mehta { 1756e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1757e2cb1decSSalil Mehta 1758e2cb1decSSalil Mehta return hdev->fw_version; 1759e2cb1decSSalil Mehta } 1760e2cb1decSSalil Mehta 1761e2cb1decSSalil Mehta static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1762e2cb1decSSalil Mehta { 1763e2cb1decSSalil Mehta struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1764e2cb1decSSalil Mehta 1765e2cb1decSSalil Mehta vector->vector_irq = pci_irq_vector(hdev->pdev, 1766e2cb1decSSalil Mehta HCLGEVF_MISC_VECTOR_NUM); 1767076bb537SJie Wang vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1768e2cb1decSSalil Mehta /* vector status always valid for Vector 0 */ 1769e2cb1decSSalil Mehta hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1770e2cb1decSSalil Mehta hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1771e2cb1decSSalil Mehta 1772e2cb1decSSalil Mehta hdev->num_msi_left -= 1; 1773e2cb1decSSalil Mehta hdev->num_msi_used += 1; 1774e2cb1decSSalil Mehta } 1775e2cb1decSSalil Mehta 177635a1e503SSalil Mehta void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 177735a1e503SSalil Mehta { 1778ff200099SYunsheng Lin if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 17790251d196SGuangbin Huang test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) && 1780ff200099SYunsheng Lin !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 1781ff200099SYunsheng Lin &hdev->state)) 17820ea68902SYunsheng Lin mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 178335a1e503SSalil Mehta } 178435a1e503SSalil Mehta 178507a0556aSSalil Mehta void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1786e2cb1decSSalil Mehta { 1787ff200099SYunsheng Lin if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 1788ff200099SYunsheng Lin !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 1789ff200099SYunsheng Lin &hdev->state)) 17900ea68902SYunsheng Lin mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 179107a0556aSSalil Mehta } 1792e2cb1decSSalil Mehta 1793ff200099SYunsheng Lin static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 1794ff200099SYunsheng Lin unsigned long delay) 1795e2cb1decSSalil Mehta { 1796d5432455SGuojia Liao if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 1797d5432455SGuojia Liao !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 17980ea68902SYunsheng Lin mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 1799e2cb1decSSalil Mehta } 1800e2cb1decSSalil Mehta 1801ff200099SYunsheng Lin static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 180235a1e503SSalil Mehta { 1803d6ad7c53SGuojia Liao #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 1804d6ad7c53SGuojia Liao 1805ff200099SYunsheng Lin if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 1806ff200099SYunsheng Lin return; 1807ff200099SYunsheng Lin 1808f28368bbSHuazhong Tan down(&hdev->reset_sem); 1809f28368bbSHuazhong Tan set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 181035a1e503SSalil Mehta 1811436667d2SSalil Mehta if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1812436667d2SSalil Mehta &hdev->reset_state)) { 1813cd7e963dSSalil Mehta /* PF has intimated that it is about to reset the hardware. 18149b2f3477SWeihang Li * We now have to poll & check if hardware has actually 18159b2f3477SWeihang Li * completed the reset sequence. On hardware reset completion, 18169b2f3477SWeihang Li * VF needs to reset the client and ae device. 181735a1e503SSalil Mehta */ 1818436667d2SSalil Mehta hdev->reset_attempts = 0; 1819436667d2SSalil Mehta 1820dea846e8SHuazhong Tan hdev->last_reset_time = jiffies; 18211385cc81SYufeng Mo hdev->reset_type = 1822ed1c6f35SPeiyang Wang hclgevf_get_reset_level(&hdev->reset_pending); 18231385cc81SYufeng Mo if (hdev->reset_type != HNAE3_NONE_RESET) 18241cc9bc6eSHuazhong Tan hclgevf_reset(hdev); 1825436667d2SSalil Mehta } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1826436667d2SSalil Mehta &hdev->reset_state)) { 1827436667d2SSalil Mehta /* we could be here when either of below happens: 18289b2f3477SWeihang Li * 1. reset was initiated due to watchdog timeout caused by 1829436667d2SSalil Mehta * a. IMP was earlier reset and our TX got choked down and 1830436667d2SSalil Mehta * which resulted in watchdog reacting and inducing VF 1831436667d2SSalil Mehta * reset. This also means our cmdq would be unreliable. 1832436667d2SSalil Mehta * b. problem in TX due to other lower layer(example link 1833436667d2SSalil Mehta * layer not functioning properly etc.) 1834436667d2SSalil Mehta * 2. VF reset might have been initiated due to some config 1835436667d2SSalil Mehta * change. 1836436667d2SSalil Mehta * 1837436667d2SSalil Mehta * NOTE: Theres no clear way to detect above cases than to react 1838436667d2SSalil Mehta * to the response of PF for this reset request. PF will ack the 1839436667d2SSalil Mehta * 1b and 2. cases but we will not get any intimation about 1a 1840436667d2SSalil Mehta * from PF as cmdq would be in unreliable state i.e. mailbox 1841436667d2SSalil Mehta * communication between PF and VF would be broken. 184246ee7350SGuojia Liao * 184346ee7350SGuojia Liao * if we are never geting into pending state it means either: 1844436667d2SSalil Mehta * 1. PF is not receiving our request which could be due to IMP 1845436667d2SSalil Mehta * reset 1846436667d2SSalil Mehta * 2. PF is screwed 1847436667d2SSalil Mehta * We cannot do much for 2. but to check first we can try reset 1848436667d2SSalil Mehta * our PCIe + stack and see if it alleviates the problem. 1849436667d2SSalil Mehta */ 1850d6ad7c53SGuojia Liao if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 1851436667d2SSalil Mehta /* prepare for full reset of stack + pcie interface */ 1852dea846e8SHuazhong Tan set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); 1853436667d2SSalil Mehta 1854436667d2SSalil Mehta /* "defer" schedule the reset task again */ 1855436667d2SSalil Mehta set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1856436667d2SSalil Mehta } else { 1857436667d2SSalil Mehta hdev->reset_attempts++; 1858436667d2SSalil Mehta 1859dea846e8SHuazhong Tan set_bit(hdev->reset_level, &hdev->reset_pending); 1860dea846e8SHuazhong Tan set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1861436667d2SSalil Mehta } 1862dea846e8SHuazhong Tan hclgevf_reset_task_schedule(hdev); 1863436667d2SSalil Mehta } 186435a1e503SSalil Mehta 1865afb6afdbSHuazhong Tan hdev->reset_type = HNAE3_NONE_RESET; 186635a1e503SSalil Mehta clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1867f28368bbSHuazhong Tan up(&hdev->reset_sem); 186835a1e503SSalil Mehta } 186935a1e503SSalil Mehta 1870ff200099SYunsheng Lin static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 1871e2cb1decSSalil Mehta { 1872ff200099SYunsheng Lin if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 1873ff200099SYunsheng Lin return; 1874e2cb1decSSalil Mehta 1875e2cb1decSSalil Mehta if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1876e2cb1decSSalil Mehta return; 1877e2cb1decSSalil Mehta 187807a0556aSSalil Mehta hclgevf_mbx_async_handler(hdev); 1879e2cb1decSSalil Mehta 1880e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1881e2cb1decSSalil Mehta } 1882e2cb1decSSalil Mehta 1883ff200099SYunsheng Lin static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 1884a6d818e3SYunsheng Lin { 1885d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 1886a6d818e3SYunsheng Lin int ret; 1887a6d818e3SYunsheng Lin 1888076bb537SJie Wang if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) 1889c59a85c0SJian Shen return; 1890c59a85c0SJian Shen 1891d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); 1892d3410018SYufeng Mo ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1893a6d818e3SYunsheng Lin if (ret) 1894a6d818e3SYunsheng Lin dev_err(&hdev->pdev->dev, 1895a6d818e3SYunsheng Lin "VF sends keep alive cmd failed(=%d)\n", ret); 1896a6d818e3SYunsheng Lin } 1897a6d818e3SYunsheng Lin 1898ff200099SYunsheng Lin static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 1899e2cb1decSSalil Mehta { 1900ff200099SYunsheng Lin unsigned long delta = round_jiffies_relative(HZ); 1901ff200099SYunsheng Lin struct hnae3_handle *handle = &hdev->nic; 1902e2cb1decSSalil Mehta 1903bd3caddfSJie Wang if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state) || 1904bd3caddfSJie Wang test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) 1905e6394363SGuangbin Huang return; 1906e6394363SGuangbin Huang 1907ff200099SYunsheng Lin if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 1908ff200099SYunsheng Lin delta = jiffies - hdev->last_serv_processed; 1909db01afebSliuzhongzhu 1910ff200099SYunsheng Lin if (delta < round_jiffies_relative(HZ)) { 1911ff200099SYunsheng Lin delta = round_jiffies_relative(HZ) - delta; 1912ff200099SYunsheng Lin goto out; 1913db01afebSliuzhongzhu } 1914ff200099SYunsheng Lin } 1915ff200099SYunsheng Lin 1916ff200099SYunsheng Lin hdev->serv_processed_cnt++; 1917ff200099SYunsheng Lin if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 1918ff200099SYunsheng Lin hclgevf_keep_alive(hdev); 1919ff200099SYunsheng Lin 1920ff200099SYunsheng Lin if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 1921ff200099SYunsheng Lin hdev->last_serv_processed = jiffies; 1922ff200099SYunsheng Lin goto out; 1923ff200099SYunsheng Lin } 1924ff200099SYunsheng Lin 1925ff200099SYunsheng Lin if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 19264afc310cSJie Wang hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); 1927e2cb1decSSalil Mehta 192801305e16SGuangbin Huang /* VF does not need to request link status when this bit is set, because 192901305e16SGuangbin Huang * PF will push its link status to VFs when link status changed. 1930e2cb1decSSalil Mehta */ 193101305e16SGuangbin Huang if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state)) 1932e2cb1decSSalil Mehta hclgevf_request_link_info(hdev); 1933e2cb1decSSalil Mehta 19349194d18bSliuzhongzhu hclgevf_update_link_mode(hdev); 19359194d18bSliuzhongzhu 1936fe4144d4SJian Shen hclgevf_sync_vlan_filter(hdev); 1937fe4144d4SJian Shen 1938ee4bcd3bSJian Shen hclgevf_sync_mac_table(hdev); 1939ee4bcd3bSJian Shen 1940c631c696SJian Shen hclgevf_sync_promisc_mode(hdev); 1941c631c696SJian Shen 1942ff200099SYunsheng Lin hdev->last_serv_processed = jiffies; 1943436667d2SSalil Mehta 1944ff200099SYunsheng Lin out: 1945ff200099SYunsheng Lin hclgevf_task_schedule(hdev, delta); 1946ff200099SYunsheng Lin } 1947b3c3fe8eSYunsheng Lin 1948ff200099SYunsheng Lin static void hclgevf_service_task(struct work_struct *work) 1949ff200099SYunsheng Lin { 1950ff200099SYunsheng Lin struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 1951ff200099SYunsheng Lin service_task.work); 1952ff200099SYunsheng Lin 1953ff200099SYunsheng Lin hclgevf_reset_service_task(hdev); 1954ff200099SYunsheng Lin hclgevf_mailbox_service_task(hdev); 1955ff200099SYunsheng Lin hclgevf_periodic_service_task(hdev); 1956ff200099SYunsheng Lin 1957ff200099SYunsheng Lin /* Handle reset and mbx again in case periodical task delays the 1958ff200099SYunsheng Lin * handling by calling hclgevf_task_schedule() in 1959ff200099SYunsheng Lin * hclgevf_periodic_service_task() 1960ff200099SYunsheng Lin */ 1961ff200099SYunsheng Lin hclgevf_reset_service_task(hdev); 1962ff200099SYunsheng Lin hclgevf_mailbox_service_task(hdev); 1963e2cb1decSSalil Mehta } 1964e2cb1decSSalil Mehta 1965e2cb1decSSalil Mehta static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1966e2cb1decSSalil Mehta { 1967cb413bfaSJie Wang hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr); 1968e2cb1decSSalil Mehta } 1969e2cb1decSSalil Mehta 1970b90fcc5bSHuazhong Tan static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1971b90fcc5bSHuazhong Tan u32 *clearval) 1972e2cb1decSSalil Mehta { 197313050921SHuazhong Tan u32 val, cmdq_stat_reg, rst_ing_reg; 1974e2cb1decSSalil Mehta 1975e2cb1decSSalil Mehta /* fetch the events from their corresponding regs */ 197613050921SHuazhong Tan cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 1977cb413bfaSJie Wang HCLGE_COMM_VECTOR0_CMDQ_STATE_REG); 197813050921SHuazhong Tan if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 1979b90fcc5bSHuazhong Tan rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 1980b90fcc5bSHuazhong Tan dev_info(&hdev->pdev->dev, 1981b90fcc5bSHuazhong Tan "receive reset interrupt 0x%x!\n", rst_ing_reg); 1982b90fcc5bSHuazhong Tan set_bit(HNAE3_VF_RESET, &hdev->reset_pending); 1983b90fcc5bSHuazhong Tan set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1984076bb537SJie Wang set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 198513050921SHuazhong Tan *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 1986c88a6e7dSHuazhong Tan hdev->rst_stats.vf_rst_cnt++; 198772e2fb07SHuazhong Tan /* set up VF hardware reset status, its PF will clear 198872e2fb07SHuazhong Tan * this status when PF has initialized done. 198972e2fb07SHuazhong Tan */ 199072e2fb07SHuazhong Tan val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 199172e2fb07SHuazhong Tan hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 199272e2fb07SHuazhong Tan val | HCLGEVF_VF_RST_ING_BIT); 1993b90fcc5bSHuazhong Tan return HCLGEVF_VECTOR0_EVENT_RST; 1994b90fcc5bSHuazhong Tan } 1995b90fcc5bSHuazhong Tan 1996e2cb1decSSalil Mehta /* check for vector0 mailbox(=CMDQ RX) event source */ 199713050921SHuazhong Tan if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 199813050921SHuazhong Tan /* for revision 0x21, clearing interrupt is writing bit 0 199913050921SHuazhong Tan * to the clear register, writing bit 1 means to keep the 200013050921SHuazhong Tan * old value. 200113050921SHuazhong Tan * for revision 0x20, the clear register is a read & write 200213050921SHuazhong Tan * register, so we should just write 0 to the bit we are 200313050921SHuazhong Tan * handling, and keep other bits as cmdq_stat_reg. 200413050921SHuazhong Tan */ 2005295ba232SGuangbin Huang if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) 200613050921SHuazhong Tan *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 200713050921SHuazhong Tan else 200813050921SHuazhong Tan *clearval = cmdq_stat_reg & 200913050921SHuazhong Tan ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 201013050921SHuazhong Tan 2011b90fcc5bSHuazhong Tan return HCLGEVF_VECTOR0_EVENT_MBX; 2012e2cb1decSSalil Mehta } 2013e2cb1decSSalil Mehta 2014e45afb39SHuazhong Tan /* print other vector0 event source */ 2015e45afb39SHuazhong Tan dev_info(&hdev->pdev->dev, 2016e45afb39SHuazhong Tan "vector 0 interrupt from unknown source, cmdq_src = %#x\n", 2017e45afb39SHuazhong Tan cmdq_stat_reg); 2018e2cb1decSSalil Mehta 2019b90fcc5bSHuazhong Tan return HCLGEVF_VECTOR0_EVENT_OTHER; 2020e2cb1decSSalil Mehta } 2021e2cb1decSSalil Mehta 202265e98bb5SJijie Shao static void hclgevf_reset_timer(struct timer_list *t) 202365e98bb5SJijie Shao { 202465e98bb5SJijie Shao struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer); 202565e98bb5SJijie Shao 202665e98bb5SJijie Shao hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST); 202765e98bb5SJijie Shao hclgevf_reset_task_schedule(hdev); 202865e98bb5SJijie Shao } 202965e98bb5SJijie Shao 2030e2cb1decSSalil Mehta static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2031e2cb1decSSalil Mehta { 203265e98bb5SJijie Shao #define HCLGEVF_RESET_DELAY 5 203365e98bb5SJijie Shao 2034b90fcc5bSHuazhong Tan enum hclgevf_evt_cause event_cause; 2035e2cb1decSSalil Mehta struct hclgevf_dev *hdev = data; 2036e2cb1decSSalil Mehta u32 clearval; 2037e2cb1decSSalil Mehta 2038e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, false); 2039b90fcc5bSHuazhong Tan event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2040427900d2SJiaran Zhang if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) 2041427900d2SJiaran Zhang hclgevf_clear_event_cause(hdev, clearval); 2042e2cb1decSSalil Mehta 2043b90fcc5bSHuazhong Tan switch (event_cause) { 2044b90fcc5bSHuazhong Tan case HCLGEVF_VECTOR0_EVENT_RST: 204565e98bb5SJijie Shao mod_timer(&hdev->reset_timer, 204665e98bb5SJijie Shao jiffies + msecs_to_jiffies(HCLGEVF_RESET_DELAY)); 2047b90fcc5bSHuazhong Tan break; 2048b90fcc5bSHuazhong Tan case HCLGEVF_VECTOR0_EVENT_MBX: 204907a0556aSSalil Mehta hclgevf_mbx_handler(hdev); 2050b90fcc5bSHuazhong Tan break; 2051b90fcc5bSHuazhong Tan default: 2052b90fcc5bSHuazhong Tan break; 2053b90fcc5bSHuazhong Tan } 2054e2cb1decSSalil Mehta 2055e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, true); 2056e2cb1decSSalil Mehta 2057e2cb1decSSalil Mehta return IRQ_HANDLED; 2058e2cb1decSSalil Mehta } 2059e2cb1decSSalil Mehta 2060e2cb1decSSalil Mehta static int hclgevf_configure(struct hclgevf_dev *hdev) 2061e2cb1decSSalil Mehta { 2062e2cb1decSSalil Mehta int ret; 2063e2cb1decSSalil Mehta 20643462207dSYufeng Mo hdev->gro_en = true; 20653462207dSYufeng Mo 206632e6d104SJian Shen ret = hclgevf_get_basic_info(hdev); 206732e6d104SJian Shen if (ret) 206832e6d104SJian Shen return ret; 206932e6d104SJian Shen 207092f11ea1SJian Shen /* get current port based vlan state from PF */ 207192f11ea1SJian Shen ret = hclgevf_get_port_base_vlan_filter_state(hdev); 207292f11ea1SJian Shen if (ret) 207392f11ea1SJian Shen return ret; 207492f11ea1SJian Shen 2075e2cb1decSSalil Mehta /* get queue configuration from PF */ 20766cee6fc3SJian Shen ret = hclgevf_get_queue_info(hdev); 2077e2cb1decSSalil Mehta if (ret) 2078e2cb1decSSalil Mehta return ret; 2079c0425944SPeng Li 2080c0425944SPeng Li /* get queue depth info from PF */ 2081c0425944SPeng Li ret = hclgevf_get_queue_depth(hdev); 2082c0425944SPeng Li if (ret) 2083c0425944SPeng Li return ret; 2084c0425944SPeng Li 208532e6d104SJian Shen return hclgevf_get_pf_media_type(hdev); 2086e2cb1decSSalil Mehta } 2087e2cb1decSSalil Mehta 20887a01c897SSalil Mehta static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 20897a01c897SSalil Mehta { 20907a01c897SSalil Mehta struct pci_dev *pdev = ae_dev->pdev; 20911154bb26SPeng Li struct hclgevf_dev *hdev; 20927a01c897SSalil Mehta 20937a01c897SSalil Mehta hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 20947a01c897SSalil Mehta if (!hdev) 20957a01c897SSalil Mehta return -ENOMEM; 20967a01c897SSalil Mehta 20977a01c897SSalil Mehta hdev->pdev = pdev; 20987a01c897SSalil Mehta hdev->ae_dev = ae_dev; 20997a01c897SSalil Mehta ae_dev->priv = hdev; 21007a01c897SSalil Mehta 21017a01c897SSalil Mehta return 0; 21027a01c897SSalil Mehta } 21037a01c897SSalil Mehta 2104e2cb1decSSalil Mehta static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2105e2cb1decSSalil Mehta { 2106e2cb1decSSalil Mehta struct hnae3_handle *roce = &hdev->roce; 2107e2cb1decSSalil Mehta struct hnae3_handle *nic = &hdev->nic; 2108e2cb1decSSalil Mehta 210907acf909SJian Shen roce->rinfo.num_vectors = hdev->num_roce_msix; 2110e2cb1decSSalil Mehta 2111e2cb1decSSalil Mehta if (hdev->num_msi_left < roce->rinfo.num_vectors || 2112e2cb1decSSalil Mehta hdev->num_msi_left == 0) 2113e2cb1decSSalil Mehta return -EINVAL; 2114e2cb1decSSalil Mehta 2115beb27ca4SJie Wang roce->rinfo.base_vector = hdev->roce_base_msix_offset; 2116e2cb1decSSalil Mehta 2117e2cb1decSSalil Mehta roce->rinfo.netdev = nic->kinfo.netdev; 2118076bb537SJie Wang roce->rinfo.roce_io_base = hdev->hw.hw.io_base; 2119076bb537SJie Wang roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; 2120e2cb1decSSalil Mehta 2121e2cb1decSSalil Mehta roce->pdev = nic->pdev; 2122e2cb1decSSalil Mehta roce->ae_algo = nic->ae_algo; 21236639a7b9SPeiyang Wang bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits, 21246639a7b9SPeiyang Wang MAX_NUMNODES); 2125e2cb1decSSalil Mehta return 0; 2126e2cb1decSSalil Mehta } 2127e2cb1decSSalil Mehta 21283462207dSYufeng Mo static int hclgevf_config_gro(struct hclgevf_dev *hdev) 2129b26a6feaSPeng Li { 2130b26a6feaSPeng Li struct hclgevf_cfg_gro_status_cmd *req; 21316befad60SJie Wang struct hclge_desc desc; 2132b26a6feaSPeng Li int ret; 2133b26a6feaSPeng Li 2134507e46aeSGuangbin Huang if (!hnae3_ae_dev_gro_supported(hdev->ae_dev)) 2135b26a6feaSPeng Li return 0; 2136b26a6feaSPeng Li 213743710bfeSJie Wang hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, 2138b26a6feaSPeng Li false); 2139b26a6feaSPeng Li req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2140b26a6feaSPeng Li 21413462207dSYufeng Mo req->gro_en = hdev->gro_en ? 1 : 0; 2142b26a6feaSPeng Li 2143b26a6feaSPeng Li ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2144b26a6feaSPeng Li if (ret) 2145b26a6feaSPeng Li dev_err(&hdev->pdev->dev, 2146b26a6feaSPeng Li "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2147b26a6feaSPeng Li 2148b26a6feaSPeng Li return ret; 2149b26a6feaSPeng Li } 2150b26a6feaSPeng Li 2151944de484SGuojia Liao static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2152944de484SGuojia Liao { 2153027733b1SJie Wang struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 215493969dc1SJie Wang u16 tc_offset[HCLGE_COMM_MAX_TC_NUM]; 215593969dc1SJie Wang u16 tc_valid[HCLGE_COMM_MAX_TC_NUM]; 215693969dc1SJie Wang u16 tc_size[HCLGE_COMM_MAX_TC_NUM]; 2157944de484SGuojia Liao int ret; 2158944de484SGuojia Liao 2159295ba232SGuangbin Huang if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 21607428d6c9SJie Wang ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, 21617428d6c9SJie Wang rss_cfg->rss_algo, 2162944de484SGuojia Liao rss_cfg->rss_hash_key); 2163944de484SGuojia Liao if (ret) 2164944de484SGuojia Liao return ret; 2165944de484SGuojia Liao 2166ed1c6f35SPeiyang Wang ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, rss_cfg); 2167944de484SGuojia Liao if (ret) 2168944de484SGuojia Liao return ret; 2169944de484SGuojia Liao } 2170e2cb1decSSalil Mehta 21717428d6c9SJie Wang ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, 21727428d6c9SJie Wang rss_cfg->rss_indirection_tbl); 2173e2cb1decSSalil Mehta if (ret) 2174e2cb1decSSalil Mehta return ret; 2175e2cb1decSSalil Mehta 217693969dc1SJie Wang hclge_comm_get_rss_tc_info(rss_cfg->rss_size, hdev->hw_tc_map, 217793969dc1SJie Wang tc_offset, tc_valid, tc_size); 217893969dc1SJie Wang 217993969dc1SJie Wang return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, 218093969dc1SJie Wang tc_valid, tc_size); 2181e2cb1decSSalil Mehta } 2182e2cb1decSSalil Mehta 2183e2cb1decSSalil Mehta static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2184e2cb1decSSalil Mehta { 2185bbfd4506SJian Shen struct hnae3_handle *nic = &hdev->nic; 2186bbfd4506SJian Shen int ret; 2187bbfd4506SJian Shen 2188bbfd4506SJian Shen ret = hclgevf_en_hw_strip_rxvtag(nic, true); 2189bbfd4506SJian Shen if (ret) { 2190bbfd4506SJian Shen dev_err(&hdev->pdev->dev, 2191bbfd4506SJian Shen "failed to enable rx vlan offload, ret = %d\n", ret); 2192bbfd4506SJian Shen return ret; 2193bbfd4506SJian Shen } 2194bbfd4506SJian Shen 2195e2cb1decSSalil Mehta return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2196e2cb1decSSalil Mehta false); 2197e2cb1decSSalil Mehta } 2198e2cb1decSSalil Mehta 2199ff200099SYunsheng Lin static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2200ff200099SYunsheng Lin { 2201ff200099SYunsheng Lin #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2202ff200099SYunsheng Lin 2203ff200099SYunsheng Lin unsigned long last = hdev->serv_processed_cnt; 2204ff200099SYunsheng Lin int i = 0; 2205ff200099SYunsheng Lin 2206ff200099SYunsheng Lin while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2207ff200099SYunsheng Lin i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2208ff200099SYunsheng Lin last == hdev->serv_processed_cnt) 2209ff200099SYunsheng Lin usleep_range(1, 1); 2210ff200099SYunsheng Lin } 2211ff200099SYunsheng Lin 22128cdb992fSJian Shen static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 22138cdb992fSJian Shen { 22148cdb992fSJian Shen struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 22158cdb992fSJian Shen 22168cdb992fSJian Shen if (enable) { 2217ff200099SYunsheng Lin hclgevf_task_schedule(hdev, 0); 22188cdb992fSJian Shen } else { 2219b3c3fe8eSYunsheng Lin set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2220ff200099SYunsheng Lin 2221094c2812SPeiyang Wang smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */ 2222ff200099SYunsheng Lin hclgevf_flush_link_update(hdev); 22238cdb992fSJian Shen } 22248cdb992fSJian Shen } 22258cdb992fSJian Shen 2226e2cb1decSSalil Mehta static int hclgevf_ae_start(struct hnae3_handle *handle) 2227e2cb1decSSalil Mehta { 2228e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2229e2cb1decSSalil Mehta 2230ed7bedd2SGuangbin Huang clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 223101305e16SGuangbin Huang clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state); 2232ed7bedd2SGuangbin Huang 22334afc310cSJie Wang hclge_comm_reset_tqp_stats(handle); 2234e2cb1decSSalil Mehta 2235e2cb1decSSalil Mehta hclgevf_request_link_info(hdev); 2236e2cb1decSSalil Mehta 22379194d18bSliuzhongzhu hclgevf_update_link_mode(hdev); 22389194d18bSliuzhongzhu 2239e2cb1decSSalil Mehta return 0; 2240e2cb1decSSalil Mehta } 2241e2cb1decSSalil Mehta 2242e2cb1decSSalil Mehta static void hclgevf_ae_stop(struct hnae3_handle *handle) 2243e2cb1decSSalil Mehta { 2244e2cb1decSSalil Mehta struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2245e2cb1decSSalil Mehta 22462f7e4896SFuyun Liang set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 22472f7e4896SFuyun Liang 2248146e92c1SHuazhong Tan if (hdev->reset_type != HNAE3_VF_RESET) 22498fa86551SYufeng Mo hclgevf_reset_tqp(handle); 225039cfbc9cSHuazhong Tan 22514afc310cSJie Wang hclge_comm_reset_tqp_stats(handle); 22528cc6c1f7SFuyun Liang hclgevf_update_link_status(hdev, 0); 2253e2cb1decSSalil Mehta } 2254e2cb1decSSalil Mehta 2255a6d818e3SYunsheng Lin static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2256a6d818e3SYunsheng Lin { 2257d3410018SYufeng Mo #define HCLGEVF_STATE_ALIVE 1 2258d3410018SYufeng Mo #define HCLGEVF_STATE_NOT_ALIVE 0 2259a6d818e3SYunsheng Lin 2260d3410018SYufeng Mo struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2261d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 2262d3410018SYufeng Mo 2263d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); 2264d3410018SYufeng Mo send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : 2265d3410018SYufeng Mo HCLGEVF_STATE_NOT_ALIVE; 2266d3410018SYufeng Mo return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2267a6d818e3SYunsheng Lin } 2268a6d818e3SYunsheng Lin 2269a6d818e3SYunsheng Lin static int hclgevf_client_start(struct hnae3_handle *handle) 2270a6d818e3SYunsheng Lin { 2271f621df96SQinglang Miao return hclgevf_set_alive(handle, true); 2272a6d818e3SYunsheng Lin } 2273a6d818e3SYunsheng Lin 2274a6d818e3SYunsheng Lin static void hclgevf_client_stop(struct hnae3_handle *handle) 2275a6d818e3SYunsheng Lin { 2276a6d818e3SYunsheng Lin struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2277a6d818e3SYunsheng Lin int ret; 2278a6d818e3SYunsheng Lin 2279a6d818e3SYunsheng Lin ret = hclgevf_set_alive(handle, false); 2280a6d818e3SYunsheng Lin if (ret) 2281a6d818e3SYunsheng Lin dev_warn(&hdev->pdev->dev, 2282a6d818e3SYunsheng Lin "%s failed %d\n", __func__, ret); 2283a6d818e3SYunsheng Lin } 2284a6d818e3SYunsheng Lin 2285e2cb1decSSalil Mehta static void hclgevf_state_init(struct hclgevf_dev *hdev) 2286e2cb1decSSalil Mehta { 2287e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2288e2cb1decSSalil Mehta clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2289d5432455SGuojia Liao clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2290e2cb1decSSalil Mehta 2291b3c3fe8eSYunsheng Lin INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 229235a1e503SSalil Mehta 2293e2cb1decSSalil Mehta mutex_init(&hdev->mbx_resp.mbx_mutex); 2294f28368bbSHuazhong Tan sema_init(&hdev->reset_sem, 1); 2295e2cb1decSSalil Mehta 2296ee4bcd3bSJian Shen spin_lock_init(&hdev->mac_table.mac_list_lock); 2297ee4bcd3bSJian Shen INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); 2298ee4bcd3bSJian Shen INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); 2299ee4bcd3bSJian Shen 2300e2cb1decSSalil Mehta /* bring the device down */ 2301e2cb1decSSalil Mehta set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2302e2cb1decSSalil Mehta } 2303e2cb1decSSalil Mehta 2304e2cb1decSSalil Mehta static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2305e2cb1decSSalil Mehta { 2306e2cb1decSSalil Mehta set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2307acfc3d55SHuazhong Tan set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2308e2cb1decSSalil Mehta 2309b3c3fe8eSYunsheng Lin if (hdev->service_task.work.func) 2310b3c3fe8eSYunsheng Lin cancel_delayed_work_sync(&hdev->service_task); 2311e2cb1decSSalil Mehta 2312e2cb1decSSalil Mehta mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2313e2cb1decSSalil Mehta } 2314e2cb1decSSalil Mehta 2315e2cb1decSSalil Mehta static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2316e2cb1decSSalil Mehta { 2317e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 2318e2cb1decSSalil Mehta int vectors; 2319e2cb1decSSalil Mehta int i; 2320e2cb1decSSalil Mehta 2321580a05f9SYonglong Liu if (hnae3_dev_roce_supported(hdev)) 232207acf909SJian Shen vectors = pci_alloc_irq_vectors(pdev, 232307acf909SJian Shen hdev->roce_base_msix_offset + 1, 232407acf909SJian Shen hdev->num_msi, 232507acf909SJian Shen PCI_IRQ_MSIX); 232607acf909SJian Shen else 2327580a05f9SYonglong Liu vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2328580a05f9SYonglong Liu hdev->num_msi, 2329e2cb1decSSalil Mehta PCI_IRQ_MSI | PCI_IRQ_MSIX); 233007acf909SJian Shen 2331e2cb1decSSalil Mehta if (vectors < 0) { 2332e2cb1decSSalil Mehta dev_err(&pdev->dev, 2333e2cb1decSSalil Mehta "failed(%d) to allocate MSI/MSI-X vectors\n", 2334e2cb1decSSalil Mehta vectors); 2335e2cb1decSSalil Mehta return vectors; 2336e2cb1decSSalil Mehta } 2337e2cb1decSSalil Mehta if (vectors < hdev->num_msi) 2338e2cb1decSSalil Mehta dev_warn(&hdev->pdev->dev, 2339adcf738bSGuojia Liao "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2340e2cb1decSSalil Mehta hdev->num_msi, vectors); 2341e2cb1decSSalil Mehta 2342e2cb1decSSalil Mehta hdev->num_msi = vectors; 2343e2cb1decSSalil Mehta hdev->num_msi_left = vectors; 2344580a05f9SYonglong Liu 2345e2cb1decSSalil Mehta hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2346e2cb1decSSalil Mehta sizeof(u16), GFP_KERNEL); 2347e2cb1decSSalil Mehta if (!hdev->vector_status) { 2348e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 2349e2cb1decSSalil Mehta return -ENOMEM; 2350e2cb1decSSalil Mehta } 2351e2cb1decSSalil Mehta 2352e2cb1decSSalil Mehta for (i = 0; i < hdev->num_msi; i++) 2353e2cb1decSSalil Mehta hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2354e2cb1decSSalil Mehta 2355e2cb1decSSalil Mehta hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2356e2cb1decSSalil Mehta sizeof(int), GFP_KERNEL); 2357e2cb1decSSalil Mehta if (!hdev->vector_irq) { 2358862d969aSHuazhong Tan devm_kfree(&pdev->dev, hdev->vector_status); 2359e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 2360e2cb1decSSalil Mehta return -ENOMEM; 2361e2cb1decSSalil Mehta } 2362e2cb1decSSalil Mehta 2363e2cb1decSSalil Mehta return 0; 2364e2cb1decSSalil Mehta } 2365e2cb1decSSalil Mehta 2366e2cb1decSSalil Mehta static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2367e2cb1decSSalil Mehta { 2368e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 2369e2cb1decSSalil Mehta 2370862d969aSHuazhong Tan devm_kfree(&pdev->dev, hdev->vector_status); 2371862d969aSHuazhong Tan devm_kfree(&pdev->dev, hdev->vector_irq); 2372e2cb1decSSalil Mehta pci_free_irq_vectors(pdev); 2373e2cb1decSSalil Mehta } 2374e2cb1decSSalil Mehta 2375e2cb1decSSalil Mehta static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2376e2cb1decSSalil Mehta { 2377cdd332acSGuojia Liao int ret; 2378e2cb1decSSalil Mehta 2379e2cb1decSSalil Mehta hclgevf_get_misc_vector(hdev); 2380e2cb1decSSalil Mehta 2381f97c4d82SYonglong Liu snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2382f97c4d82SYonglong Liu HCLGEVF_NAME, pci_name(hdev->pdev)); 2383e2cb1decSSalil Mehta ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2384f97c4d82SYonglong Liu 0, hdev->misc_vector.name, hdev); 2385e2cb1decSSalil Mehta if (ret) { 2386e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2387e2cb1decSSalil Mehta hdev->misc_vector.vector_irq); 2388e2cb1decSSalil Mehta return ret; 2389e2cb1decSSalil Mehta } 2390e2cb1decSSalil Mehta 23911819e409SXi Wang hclgevf_clear_event_cause(hdev, 0); 23921819e409SXi Wang 2393e2cb1decSSalil Mehta /* enable misc. vector(vector 0) */ 2394e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, true); 2395e2cb1decSSalil Mehta 2396e2cb1decSSalil Mehta return ret; 2397e2cb1decSSalil Mehta } 2398e2cb1decSSalil Mehta 2399e2cb1decSSalil Mehta static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2400e2cb1decSSalil Mehta { 2401e2cb1decSSalil Mehta /* disable misc vector(vector 0) */ 2402e2cb1decSSalil Mehta hclgevf_enable_vector(&hdev->misc_vector, false); 24031819e409SXi Wang synchronize_irq(hdev->misc_vector.vector_irq); 2404e2cb1decSSalil Mehta free_irq(hdev->misc_vector.vector_irq, hdev); 2405e2cb1decSSalil Mehta hclgevf_free_vector(hdev, 0); 2406e2cb1decSSalil Mehta } 2407e2cb1decSSalil Mehta 2408bb87be87SYonglong Liu static void hclgevf_info_show(struct hclgevf_dev *hdev) 2409bb87be87SYonglong Liu { 2410bb87be87SYonglong Liu struct device *dev = &hdev->pdev->dev; 2411bb87be87SYonglong Liu 2412bb87be87SYonglong Liu dev_info(dev, "VF info begin:\n"); 2413bb87be87SYonglong Liu 2414adcf738bSGuojia Liao dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2415adcf738bSGuojia Liao dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2416adcf738bSGuojia Liao dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2417adcf738bSGuojia Liao dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2418adcf738bSGuojia Liao dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2419adcf738bSGuojia Liao dev_info(dev, "PF media type of this VF: %u\n", 2420bb87be87SYonglong Liu hdev->hw.mac.media_type); 2421bb87be87SYonglong Liu 2422bb87be87SYonglong Liu dev_info(dev, "VF info end.\n"); 2423bb87be87SYonglong Liu } 2424bb87be87SYonglong Liu 24251db58f86SHuazhong Tan static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 24261db58f86SHuazhong Tan struct hnae3_client *client) 24271db58f86SHuazhong Tan { 24281db58f86SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 24294cd5beaaSGuangbin Huang int rst_cnt = hdev->rst_stats.rst_cnt; 24301db58f86SHuazhong Tan int ret; 24311db58f86SHuazhong Tan 24321db58f86SHuazhong Tan ret = client->ops->init_instance(&hdev->nic); 24331db58f86SHuazhong Tan if (ret) 24341db58f86SHuazhong Tan return ret; 24351db58f86SHuazhong Tan 24361db58f86SHuazhong Tan set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 24374cd5beaaSGuangbin Huang if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 24384cd5beaaSGuangbin Huang rst_cnt != hdev->rst_stats.rst_cnt) { 24394cd5beaaSGuangbin Huang clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 24404cd5beaaSGuangbin Huang 24414cd5beaaSGuangbin Huang client->ops->uninit_instance(&hdev->nic, 0); 24424cd5beaaSGuangbin Huang return -EBUSY; 24434cd5beaaSGuangbin Huang } 24444cd5beaaSGuangbin Huang 24451db58f86SHuazhong Tan hnae3_set_client_init_flag(client, ae_dev, 1); 24461db58f86SHuazhong Tan 24471db58f86SHuazhong Tan if (netif_msg_drv(&hdev->nic)) 24481db58f86SHuazhong Tan hclgevf_info_show(hdev); 24491db58f86SHuazhong Tan 24501db58f86SHuazhong Tan return 0; 24511db58f86SHuazhong Tan } 24521db58f86SHuazhong Tan 24531db58f86SHuazhong Tan static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 24541db58f86SHuazhong Tan struct hnae3_client *client) 24551db58f86SHuazhong Tan { 24561db58f86SHuazhong Tan struct hclgevf_dev *hdev = ae_dev->priv; 24571db58f86SHuazhong Tan int ret; 24581db58f86SHuazhong Tan 24591db58f86SHuazhong Tan if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 24601db58f86SHuazhong Tan !hdev->nic_client) 24611db58f86SHuazhong Tan return 0; 24621db58f86SHuazhong Tan 24631db58f86SHuazhong Tan ret = hclgevf_init_roce_base_info(hdev); 24641db58f86SHuazhong Tan if (ret) 24651db58f86SHuazhong Tan return ret; 24661db58f86SHuazhong Tan 24671db58f86SHuazhong Tan ret = client->ops->init_instance(&hdev->roce); 24681db58f86SHuazhong Tan if (ret) 24691db58f86SHuazhong Tan return ret; 24701db58f86SHuazhong Tan 2471fe735c84SHuazhong Tan set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 24721db58f86SHuazhong Tan hnae3_set_client_init_flag(client, ae_dev, 1); 24731db58f86SHuazhong Tan 24741db58f86SHuazhong Tan return 0; 24751db58f86SHuazhong Tan } 24761db58f86SHuazhong Tan 2477e718a93fSPeng Li static int hclgevf_init_client_instance(struct hnae3_client *client, 2478e718a93fSPeng Li struct hnae3_ae_dev *ae_dev) 2479e2cb1decSSalil Mehta { 2480e718a93fSPeng Li struct hclgevf_dev *hdev = ae_dev->priv; 2481e2cb1decSSalil Mehta int ret; 2482e2cb1decSSalil Mehta 2483e2cb1decSSalil Mehta switch (client->type) { 2484e2cb1decSSalil Mehta case HNAE3_CLIENT_KNIC: 2485e2cb1decSSalil Mehta hdev->nic_client = client; 2486e2cb1decSSalil Mehta hdev->nic.client = client; 2487e2cb1decSSalil Mehta 24881db58f86SHuazhong Tan ret = hclgevf_init_nic_client_instance(ae_dev, client); 2489e2cb1decSSalil Mehta if (ret) 249049dd8054SJian Shen goto clear_nic; 2491e2cb1decSSalil Mehta 24921db58f86SHuazhong Tan ret = hclgevf_init_roce_client_instance(ae_dev, 24931db58f86SHuazhong Tan hdev->roce_client); 2494e2cb1decSSalil Mehta if (ret) 249549dd8054SJian Shen goto clear_roce; 2496d9f28fc2SJian Shen 2497e2cb1decSSalil Mehta break; 2498e2cb1decSSalil Mehta case HNAE3_CLIENT_ROCE: 2499544a7bcdSLijun Ou if (hnae3_dev_roce_supported(hdev)) { 2500e2cb1decSSalil Mehta hdev->roce_client = client; 2501e2cb1decSSalil Mehta hdev->roce.client = client; 2502544a7bcdSLijun Ou } 2503e2cb1decSSalil Mehta 25041db58f86SHuazhong Tan ret = hclgevf_init_roce_client_instance(ae_dev, client); 2505e2cb1decSSalil Mehta if (ret) 250649dd8054SJian Shen goto clear_roce; 2507e2cb1decSSalil Mehta 2508fa7a4bd5SJian Shen break; 2509fa7a4bd5SJian Shen default: 2510fa7a4bd5SJian Shen return -EINVAL; 2511e2cb1decSSalil Mehta } 2512e2cb1decSSalil Mehta 2513e2cb1decSSalil Mehta return 0; 251449dd8054SJian Shen 251549dd8054SJian Shen clear_nic: 251649dd8054SJian Shen hdev->nic_client = NULL; 251749dd8054SJian Shen hdev->nic.client = NULL; 251849dd8054SJian Shen return ret; 251949dd8054SJian Shen clear_roce: 252049dd8054SJian Shen hdev->roce_client = NULL; 252149dd8054SJian Shen hdev->roce.client = NULL; 252249dd8054SJian Shen return ret; 2523e2cb1decSSalil Mehta } 2524e2cb1decSSalil Mehta 2525e718a93fSPeng Li static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2526e718a93fSPeng Li struct hnae3_ae_dev *ae_dev) 2527e2cb1decSSalil Mehta { 2528e718a93fSPeng Li struct hclgevf_dev *hdev = ae_dev->priv; 2529e718a93fSPeng Li 2530e2cb1decSSalil Mehta /* un-init roce, if it exists */ 253149dd8054SJian Shen if (hdev->roce_client) { 2532e140c798SYufeng Mo while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 2533e140c798SYufeng Mo msleep(HCLGEVF_WAIT_RESET_DONE); 2534fe735c84SHuazhong Tan clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2535e140c798SYufeng Mo 2536e2cb1decSSalil Mehta hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 253749dd8054SJian Shen hdev->roce_client = NULL; 253849dd8054SJian Shen hdev->roce.client = NULL; 253949dd8054SJian Shen } 2540e2cb1decSSalil Mehta 2541e2cb1decSSalil Mehta /* un-init nic/unic, if this was not called by roce client */ 254249dd8054SJian Shen if (client->ops->uninit_instance && hdev->nic_client && 254349dd8054SJian Shen client->type != HNAE3_CLIENT_ROCE) { 2544e140c798SYufeng Mo while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 2545e140c798SYufeng Mo msleep(HCLGEVF_WAIT_RESET_DONE); 254625d1817cSHuazhong Tan clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 254725d1817cSHuazhong Tan 2548e2cb1decSSalil Mehta client->ops->uninit_instance(&hdev->nic, 0); 254949dd8054SJian Shen hdev->nic_client = NULL; 255049dd8054SJian Shen hdev->nic.client = NULL; 255149dd8054SJian Shen } 2552e2cb1decSSalil Mehta } 2553e2cb1decSSalil Mehta 255430ae7f8aSHuazhong Tan static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev) 255530ae7f8aSHuazhong Tan { 255630ae7f8aSHuazhong Tan struct pci_dev *pdev = hdev->pdev; 255730ae7f8aSHuazhong Tan struct hclgevf_hw *hw = &hdev->hw; 255830ae7f8aSHuazhong Tan 255930ae7f8aSHuazhong Tan /* for device does not have device memory, return directly */ 256030ae7f8aSHuazhong Tan if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR))) 256130ae7f8aSHuazhong Tan return 0; 256230ae7f8aSHuazhong Tan 2563076bb537SJie Wang hw->hw.mem_base = 2564076bb537SJie Wang devm_ioremap_wc(&pdev->dev, 2565076bb537SJie Wang pci_resource_start(pdev, HCLGEVF_MEM_BAR), 256630ae7f8aSHuazhong Tan pci_resource_len(pdev, HCLGEVF_MEM_BAR)); 2567076bb537SJie Wang if (!hw->hw.mem_base) { 2568be419fcaSColin Ian King dev_err(&pdev->dev, "failed to map device memory\n"); 256930ae7f8aSHuazhong Tan return -EFAULT; 257030ae7f8aSHuazhong Tan } 257130ae7f8aSHuazhong Tan 257230ae7f8aSHuazhong Tan return 0; 257330ae7f8aSHuazhong Tan } 257430ae7f8aSHuazhong Tan 2575e2cb1decSSalil Mehta static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2576e2cb1decSSalil Mehta { 2577e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 2578e2cb1decSSalil Mehta struct hclgevf_hw *hw; 2579e2cb1decSSalil Mehta int ret; 2580e2cb1decSSalil Mehta 2581e2cb1decSSalil Mehta ret = pci_enable_device(pdev); 2582e2cb1decSSalil Mehta if (ret) { 2583e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed to enable PCI device\n"); 25843e249d3bSFuyun Liang return ret; 2585e2cb1decSSalil Mehta } 2586e2cb1decSSalil Mehta 2587e2cb1decSSalil Mehta ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2588e2cb1decSSalil Mehta if (ret) { 2589e2cb1decSSalil Mehta dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2590e2cb1decSSalil Mehta goto err_disable_device; 2591e2cb1decSSalil Mehta } 2592e2cb1decSSalil Mehta 2593e2cb1decSSalil Mehta ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2594e2cb1decSSalil Mehta if (ret) { 2595e2cb1decSSalil Mehta dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2596e2cb1decSSalil Mehta goto err_disable_device; 2597e2cb1decSSalil Mehta } 2598e2cb1decSSalil Mehta 2599e2cb1decSSalil Mehta pci_set_master(pdev); 2600e2cb1decSSalil Mehta hw = &hdev->hw; 2601076bb537SJie Wang hw->hw.io_base = pci_iomap(pdev, 2, 0); 2602076bb537SJie Wang if (!hw->hw.io_base) { 2603e2cb1decSSalil Mehta dev_err(&pdev->dev, "can't map configuration register space\n"); 2604e2cb1decSSalil Mehta ret = -ENOMEM; 2605fc3e07e8SCai Huoqing goto err_release_regions; 2606e2cb1decSSalil Mehta } 2607e2cb1decSSalil Mehta 260830ae7f8aSHuazhong Tan ret = hclgevf_dev_mem_map(hdev); 260930ae7f8aSHuazhong Tan if (ret) 261030ae7f8aSHuazhong Tan goto err_unmap_io_base; 261130ae7f8aSHuazhong Tan 2612e2cb1decSSalil Mehta return 0; 2613e2cb1decSSalil Mehta 261430ae7f8aSHuazhong Tan err_unmap_io_base: 2615076bb537SJie Wang pci_iounmap(pdev, hdev->hw.hw.io_base); 2616fc3e07e8SCai Huoqing err_release_regions: 2617e2cb1decSSalil Mehta pci_release_regions(pdev); 2618e2cb1decSSalil Mehta err_disable_device: 2619e2cb1decSSalil Mehta pci_disable_device(pdev); 26203e249d3bSFuyun Liang 2621e2cb1decSSalil Mehta return ret; 2622e2cb1decSSalil Mehta } 2623e2cb1decSSalil Mehta 2624e2cb1decSSalil Mehta static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2625e2cb1decSSalil Mehta { 2626e2cb1decSSalil Mehta struct pci_dev *pdev = hdev->pdev; 2627e2cb1decSSalil Mehta 2628076bb537SJie Wang if (hdev->hw.hw.mem_base) 2629076bb537SJie Wang devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); 263030ae7f8aSHuazhong Tan 2631076bb537SJie Wang pci_iounmap(pdev, hdev->hw.hw.io_base); 2632e2cb1decSSalil Mehta pci_release_regions(pdev); 2633e2cb1decSSalil Mehta pci_disable_device(pdev); 2634e2cb1decSSalil Mehta } 2635e2cb1decSSalil Mehta 263607acf909SJian Shen static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 263707acf909SJian Shen { 263807acf909SJian Shen struct hclgevf_query_res_cmd *req; 26396befad60SJie Wang struct hclge_desc desc; 264007acf909SJian Shen int ret; 264107acf909SJian Shen 264243710bfeSJie Wang hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RSRC, true); 264307acf909SJian Shen ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 264407acf909SJian Shen if (ret) { 264507acf909SJian Shen dev_err(&hdev->pdev->dev, 264607acf909SJian Shen "query vf resource failed, ret = %d.\n", ret); 264707acf909SJian Shen return ret; 264807acf909SJian Shen } 264907acf909SJian Shen 265007acf909SJian Shen req = (struct hclgevf_query_res_cmd *)desc.data; 265107acf909SJian Shen 2652580a05f9SYonglong Liu if (hnae3_dev_roce_supported(hdev)) { 265307acf909SJian Shen hdev->roce_base_msix_offset = 265460df7e91SHuazhong Tan hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 265507acf909SJian Shen HCLGEVF_MSIX_OFT_ROCEE_M, 265607acf909SJian Shen HCLGEVF_MSIX_OFT_ROCEE_S); 265707acf909SJian Shen hdev->num_roce_msix = 265860df7e91SHuazhong Tan hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 265907acf909SJian Shen HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 266007acf909SJian Shen 2661580a05f9SYonglong Liu /* nic's msix numbers is always equals to the roce's. */ 2662580a05f9SYonglong Liu hdev->num_nic_msix = hdev->num_roce_msix; 2663580a05f9SYonglong Liu 266407acf909SJian Shen /* VF should have NIC vectors and Roce vectors, NIC vectors 266507acf909SJian Shen * are queued before Roce vectors. The offset is fixed to 64. 266607acf909SJian Shen */ 266707acf909SJian Shen hdev->num_msi = hdev->num_roce_msix + 266807acf909SJian Shen hdev->roce_base_msix_offset; 266907acf909SJian Shen } else { 267007acf909SJian Shen hdev->num_msi = 267160df7e91SHuazhong Tan hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 267207acf909SJian Shen HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2673580a05f9SYonglong Liu 2674580a05f9SYonglong Liu hdev->num_nic_msix = hdev->num_msi; 2675580a05f9SYonglong Liu } 2676580a05f9SYonglong Liu 2677580a05f9SYonglong Liu if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 2678580a05f9SYonglong Liu dev_err(&hdev->pdev->dev, 2679580a05f9SYonglong Liu "Just %u msi resources, not enough for vf(min:2).\n", 2680580a05f9SYonglong Liu hdev->num_nic_msix); 2681580a05f9SYonglong Liu return -EINVAL; 268207acf909SJian Shen } 268307acf909SJian Shen 268407acf909SJian Shen return 0; 268507acf909SJian Shen } 268607acf909SJian Shen 2687af2aedc5SGuangbin Huang static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) 2688af2aedc5SGuangbin Huang { 2689af2aedc5SGuangbin Huang #define HCLGEVF_MAX_NON_TSO_BD_NUM 8U 2690af2aedc5SGuangbin Huang 2691af2aedc5SGuangbin Huang struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 2692af2aedc5SGuangbin Huang 2693af2aedc5SGuangbin Huang ae_dev->dev_specs.max_non_tso_bd_num = 2694af2aedc5SGuangbin Huang HCLGEVF_MAX_NON_TSO_BD_NUM; 2695af2aedc5SGuangbin Huang ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 26967428d6c9SJie Wang ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; 2697ab16b49cSHuazhong Tan ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 2698e070c8b9SYufeng Mo ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME; 2699af2aedc5SGuangbin Huang } 2700af2aedc5SGuangbin Huang 2701af2aedc5SGuangbin Huang static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, 27026befad60SJie Wang struct hclge_desc *desc) 2703af2aedc5SGuangbin Huang { 2704af2aedc5SGuangbin Huang struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 2705af2aedc5SGuangbin Huang struct hclgevf_dev_specs_0_cmd *req0; 2706ab16b49cSHuazhong Tan struct hclgevf_dev_specs_1_cmd *req1; 2707af2aedc5SGuangbin Huang 2708af2aedc5SGuangbin Huang req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data; 2709ab16b49cSHuazhong Tan req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data; 2710af2aedc5SGuangbin Huang 2711af2aedc5SGuangbin Huang ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; 2712af2aedc5SGuangbin Huang ae_dev->dev_specs.rss_ind_tbl_size = 2713af2aedc5SGuangbin Huang le16_to_cpu(req0->rss_ind_tbl_size); 271491bfae25SHuazhong Tan ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); 2715af2aedc5SGuangbin Huang ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); 2716ab16b49cSHuazhong Tan ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); 2717e070c8b9SYufeng Mo ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); 2718af2aedc5SGuangbin Huang } 2719af2aedc5SGuangbin Huang 272013297028SGuangbin Huang static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) 272113297028SGuangbin Huang { 272213297028SGuangbin Huang struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; 272313297028SGuangbin Huang 272413297028SGuangbin Huang if (!dev_specs->max_non_tso_bd_num) 272513297028SGuangbin Huang dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM; 272613297028SGuangbin Huang if (!dev_specs->rss_ind_tbl_size) 272713297028SGuangbin Huang dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 272813297028SGuangbin Huang if (!dev_specs->rss_key_size) 27297428d6c9SJie Wang dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; 2730ab16b49cSHuazhong Tan if (!dev_specs->max_int_gl) 2731ab16b49cSHuazhong Tan dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 2732e070c8b9SYufeng Mo if (!dev_specs->max_frm_size) 2733e070c8b9SYufeng Mo dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME; 273413297028SGuangbin Huang } 273513297028SGuangbin Huang 2736af2aedc5SGuangbin Huang static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) 2737af2aedc5SGuangbin Huang { 27386befad60SJie Wang struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM]; 2739af2aedc5SGuangbin Huang int ret; 2740af2aedc5SGuangbin Huang int i; 2741af2aedc5SGuangbin Huang 2742af2aedc5SGuangbin Huang /* set default specifications as devices lower than version V3 do not 2743af2aedc5SGuangbin Huang * support querying specifications from firmware. 2744af2aedc5SGuangbin Huang */ 2745af2aedc5SGuangbin Huang if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 2746af2aedc5SGuangbin Huang hclgevf_set_default_dev_specs(hdev); 2747af2aedc5SGuangbin Huang return 0; 2748af2aedc5SGuangbin Huang } 2749af2aedc5SGuangbin Huang 2750af2aedc5SGuangbin Huang for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 2751af2aedc5SGuangbin Huang hclgevf_cmd_setup_basic_desc(&desc[i], 275243710bfeSJie Wang HCLGE_OPC_QUERY_DEV_SPECS, true); 2753cb413bfaSJie Wang desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2754af2aedc5SGuangbin Huang } 275543710bfeSJie Wang hclgevf_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true); 2756af2aedc5SGuangbin Huang 2757af2aedc5SGuangbin Huang ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM); 2758af2aedc5SGuangbin Huang if (ret) 2759af2aedc5SGuangbin Huang return ret; 2760af2aedc5SGuangbin Huang 2761af2aedc5SGuangbin Huang hclgevf_parse_dev_specs(hdev, desc); 276213297028SGuangbin Huang hclgevf_check_dev_specs(hdev); 2763af2aedc5SGuangbin Huang 2764af2aedc5SGuangbin Huang return 0; 2765af2aedc5SGuangbin Huang } 2766af2aedc5SGuangbin Huang 2767862d969aSHuazhong Tan static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2768862d969aSHuazhong Tan { 2769862d969aSHuazhong Tan struct pci_dev *pdev = hdev->pdev; 2770862d969aSHuazhong Tan int ret = 0; 2771862d969aSHuazhong Tan 277209e6b30eSJie Wang if ((hdev->reset_type == HNAE3_VF_FULL_RESET || 277309e6b30eSJie Wang hdev->reset_type == HNAE3_FLR_RESET) && 2774862d969aSHuazhong Tan test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2775862d969aSHuazhong Tan hclgevf_misc_irq_uninit(hdev); 2776862d969aSHuazhong Tan hclgevf_uninit_msi(hdev); 2777862d969aSHuazhong Tan clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2778862d969aSHuazhong Tan } 2779862d969aSHuazhong Tan 2780862d969aSHuazhong Tan if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2781862d969aSHuazhong Tan pci_set_master(pdev); 2782862d969aSHuazhong Tan ret = hclgevf_init_msi(hdev); 2783862d969aSHuazhong Tan if (ret) { 2784862d969aSHuazhong Tan dev_err(&pdev->dev, 2785862d969aSHuazhong Tan "failed(%d) to init MSI/MSI-X\n", ret); 2786862d969aSHuazhong Tan return ret; 2787862d969aSHuazhong Tan } 2788862d969aSHuazhong Tan 2789862d969aSHuazhong Tan ret = hclgevf_misc_irq_init(hdev); 2790862d969aSHuazhong Tan if (ret) { 2791862d969aSHuazhong Tan hclgevf_uninit_msi(hdev); 2792862d969aSHuazhong Tan dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2793862d969aSHuazhong Tan ret); 2794862d969aSHuazhong Tan return ret; 2795862d969aSHuazhong Tan } 2796862d969aSHuazhong Tan 2797862d969aSHuazhong Tan set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2798862d969aSHuazhong Tan } 2799862d969aSHuazhong Tan 2800862d969aSHuazhong Tan return ret; 2801862d969aSHuazhong Tan } 2802862d969aSHuazhong Tan 2803039ba863SJian Shen static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) 2804039ba863SJian Shen { 2805039ba863SJian Shen struct hclge_vf_to_pf_msg send_msg; 2806039ba863SJian Shen 2807039ba863SJian Shen hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, 2808039ba863SJian Shen HCLGE_MBX_VPORT_LIST_CLEAR); 2809039ba863SJian Shen return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2810039ba863SJian Shen } 2811039ba863SJian Shen 281279664077SHuazhong Tan static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev) 281379664077SHuazhong Tan { 281479664077SHuazhong Tan if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 281579664077SHuazhong Tan hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1); 281679664077SHuazhong Tan } 281779664077SHuazhong Tan 281879664077SHuazhong Tan static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev) 281979664077SHuazhong Tan { 282079664077SHuazhong Tan if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 282179664077SHuazhong Tan hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0); 282279664077SHuazhong Tan } 282379664077SHuazhong Tan 28249c6f7085SHuazhong Tan static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2825e2cb1decSSalil Mehta { 28267a01c897SSalil Mehta struct pci_dev *pdev = hdev->pdev; 2827e2cb1decSSalil Mehta int ret; 2828e2cb1decSSalil Mehta 2829862d969aSHuazhong Tan ret = hclgevf_pci_reset(hdev); 2830862d969aSHuazhong Tan if (ret) { 2831862d969aSHuazhong Tan dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2832862d969aSHuazhong Tan return ret; 2833862d969aSHuazhong Tan } 2834862d969aSHuazhong Tan 2835cb413bfaSJie Wang hclgevf_arq_init(hdev); 28362a1a1a7bSHao Lan 2837cb413bfaSJie Wang ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, 2838cb413bfaSJie Wang &hdev->fw_version, false, 2839cb413bfaSJie Wang hdev->reset_pending); 28409c6f7085SHuazhong Tan if (ret) { 28419c6f7085SHuazhong Tan dev_err(&pdev->dev, "cmd failed %d\n", ret); 28429c6f7085SHuazhong Tan return ret; 28437a01c897SSalil Mehta } 2844e2cb1decSSalil Mehta 28459c6f7085SHuazhong Tan ret = hclgevf_rss_init_hw(hdev); 28469c6f7085SHuazhong Tan if (ret) { 28479c6f7085SHuazhong Tan dev_err(&hdev->pdev->dev, 28489c6f7085SHuazhong Tan "failed(%d) to initialize RSS\n", ret); 28499c6f7085SHuazhong Tan return ret; 28509c6f7085SHuazhong Tan } 28519c6f7085SHuazhong Tan 28523462207dSYufeng Mo ret = hclgevf_config_gro(hdev); 2853b26a6feaSPeng Li if (ret) 2854b26a6feaSPeng Li return ret; 2855b26a6feaSPeng Li 28569c6f7085SHuazhong Tan ret = hclgevf_init_vlan_config(hdev); 28579c6f7085SHuazhong Tan if (ret) { 28589c6f7085SHuazhong Tan dev_err(&hdev->pdev->dev, 28599c6f7085SHuazhong Tan "failed(%d) to initialize VLAN config\n", ret); 28609c6f7085SHuazhong Tan return ret; 28619c6f7085SHuazhong Tan } 28629c6f7085SHuazhong Tan 2863190cd8a7SJian Shen /* get current port based vlan state from PF */ 2864190cd8a7SJian Shen ret = hclgevf_get_port_base_vlan_filter_state(hdev); 2865190cd8a7SJian Shen if (ret) 2866190cd8a7SJian Shen return ret; 2867190cd8a7SJian Shen 2868c631c696SJian Shen set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 2869c631c696SJian Shen 287079664077SHuazhong Tan hclgevf_init_rxd_adv_layout(hdev); 287179664077SHuazhong Tan 28729c6f7085SHuazhong Tan dev_info(&hdev->pdev->dev, "Reset done\n"); 28739c6f7085SHuazhong Tan 28749c6f7085SHuazhong Tan return 0; 28759c6f7085SHuazhong Tan } 28769c6f7085SHuazhong Tan 28779c6f7085SHuazhong Tan static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 28789c6f7085SHuazhong Tan { 28799c6f7085SHuazhong Tan struct pci_dev *pdev = hdev->pdev; 28809c6f7085SHuazhong Tan int ret; 28819c6f7085SHuazhong Tan 2882e2cb1decSSalil Mehta ret = hclgevf_pci_init(hdev); 288360df7e91SHuazhong Tan if (ret) 2884e2cb1decSSalil Mehta return ret; 2885e2cb1decSSalil Mehta 2886cb413bfaSJie Wang ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); 288760df7e91SHuazhong Tan if (ret) 28888b0195a3SHuazhong Tan goto err_cmd_queue_init; 28898b0195a3SHuazhong Tan 2890cb413bfaSJie Wang hclgevf_arq_init(hdev); 28912a1a1a7bSHao Lan 28922a1a1a7bSHao Lan hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclgevf_cmq_ops); 2893cb413bfaSJie Wang ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, 2894cb413bfaSJie Wang &hdev->fw_version, false, 2895cb413bfaSJie Wang hdev->reset_pending); 2896eddf0462SYunsheng Lin if (ret) 2897eddf0462SYunsheng Lin goto err_cmd_init; 2898eddf0462SYunsheng Lin 289907acf909SJian Shen /* Get vf resource */ 290007acf909SJian Shen ret = hclgevf_query_vf_resource(hdev); 290160df7e91SHuazhong Tan if (ret) 29028b0195a3SHuazhong Tan goto err_cmd_init; 290307acf909SJian Shen 2904af2aedc5SGuangbin Huang ret = hclgevf_query_dev_specs(hdev); 2905af2aedc5SGuangbin Huang if (ret) { 2906af2aedc5SGuangbin Huang dev_err(&pdev->dev, 2907af2aedc5SGuangbin Huang "failed to query dev specifications, ret = %d\n", ret); 2908af2aedc5SGuangbin Huang goto err_cmd_init; 2909af2aedc5SGuangbin Huang } 2910af2aedc5SGuangbin Huang 291107acf909SJian Shen ret = hclgevf_init_msi(hdev); 291207acf909SJian Shen if (ret) { 291307acf909SJian Shen dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 29148b0195a3SHuazhong Tan goto err_cmd_init; 291507acf909SJian Shen } 291607acf909SJian Shen 291707acf909SJian Shen hclgevf_state_init(hdev); 2918dea846e8SHuazhong Tan hdev->reset_level = HNAE3_VF_FUNC_RESET; 2919afb6afdbSHuazhong Tan hdev->reset_type = HNAE3_NONE_RESET; 292007acf909SJian Shen 2921e2cb1decSSalil Mehta ret = hclgevf_misc_irq_init(hdev); 292260df7e91SHuazhong Tan if (ret) 2923e2cb1decSSalil Mehta goto err_misc_irq_init; 2924e2cb1decSSalil Mehta 2925862d969aSHuazhong Tan set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2926862d969aSHuazhong Tan 2927e2cb1decSSalil Mehta ret = hclgevf_configure(hdev); 2928e2cb1decSSalil Mehta if (ret) { 2929e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2930e2cb1decSSalil Mehta goto err_config; 2931e2cb1decSSalil Mehta } 2932e2cb1decSSalil Mehta 2933e2cb1decSSalil Mehta ret = hclgevf_alloc_tqps(hdev); 2934e2cb1decSSalil Mehta if (ret) { 2935e2cb1decSSalil Mehta dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2936e2cb1decSSalil Mehta goto err_config; 2937e2cb1decSSalil Mehta } 2938e2cb1decSSalil Mehta 2939e2cb1decSSalil Mehta ret = hclgevf_set_handle_info(hdev); 294060df7e91SHuazhong Tan if (ret) 2941e2cb1decSSalil Mehta goto err_config; 2942e2cb1decSSalil Mehta 29433462207dSYufeng Mo ret = hclgevf_config_gro(hdev); 2944b26a6feaSPeng Li if (ret) 2945b26a6feaSPeng Li goto err_config; 2946b26a6feaSPeng Li 2947e2cb1decSSalil Mehta /* Initialize RSS for this VF */ 294893969dc1SJie Wang ret = hclge_comm_rss_init_cfg(&hdev->nic, hdev->ae_dev, 294993969dc1SJie Wang &hdev->rss_cfg); 295087ce161eSGuangbin Huang if (ret) { 295187ce161eSGuangbin Huang dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); 295287ce161eSGuangbin Huang goto err_config; 295387ce161eSGuangbin Huang } 295487ce161eSGuangbin Huang 2955e2cb1decSSalil Mehta ret = hclgevf_rss_init_hw(hdev); 2956e2cb1decSSalil Mehta if (ret) { 2957e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 2958e2cb1decSSalil Mehta "failed(%d) to initialize RSS\n", ret); 2959e2cb1decSSalil Mehta goto err_config; 2960e2cb1decSSalil Mehta } 2961e2cb1decSSalil Mehta 2962039ba863SJian Shen /* ensure vf tbl list as empty before init */ 2963039ba863SJian Shen ret = hclgevf_clear_vport_list(hdev); 2964039ba863SJian Shen if (ret) { 2965039ba863SJian Shen dev_err(&pdev->dev, 2966039ba863SJian Shen "failed to clear tbl list configuration, ret = %d.\n", 2967039ba863SJian Shen ret); 2968039ba863SJian Shen goto err_config; 2969039ba863SJian Shen } 2970039ba863SJian Shen 2971e2cb1decSSalil Mehta ret = hclgevf_init_vlan_config(hdev); 2972e2cb1decSSalil Mehta if (ret) { 2973e2cb1decSSalil Mehta dev_err(&hdev->pdev->dev, 2974e2cb1decSSalil Mehta "failed(%d) to initialize VLAN config\n", ret); 2975e2cb1decSSalil Mehta goto err_config; 2976e2cb1decSSalil Mehta } 2977e2cb1decSSalil Mehta 297879664077SHuazhong Tan hclgevf_init_rxd_adv_layout(hdev); 297979664077SHuazhong Tan 298035d92abfSYonglong Liu ret = hclgevf_devlink_init(hdev); 298135d92abfSYonglong Liu if (ret) 298235d92abfSYonglong Liu goto err_config; 298335d92abfSYonglong Liu 29840251d196SGuangbin Huang set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state); 29850251d196SGuangbin Huang 29860742ed7cSHuazhong Tan hdev->last_reset_time = jiffies; 298708d80a4cSHuazhong Tan dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 298808d80a4cSHuazhong Tan HCLGEVF_DRIVER_NAME); 2989e2cb1decSSalil Mehta 2990ff200099SYunsheng Lin hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 299165e98bb5SJijie Shao timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0); 2992ff200099SYunsheng Lin 2993e2cb1decSSalil Mehta return 0; 2994e2cb1decSSalil Mehta 2995e2cb1decSSalil Mehta err_config: 2996e2cb1decSSalil Mehta hclgevf_misc_irq_uninit(hdev); 2997e2cb1decSSalil Mehta err_misc_irq_init: 2998e2cb1decSSalil Mehta hclgevf_state_uninit(hdev); 2999e2cb1decSSalil Mehta hclgevf_uninit_msi(hdev); 300007acf909SJian Shen err_cmd_init: 30019970308fSJie Wang hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 30028b0195a3SHuazhong Tan err_cmd_queue_init: 3003e2cb1decSSalil Mehta hclgevf_pci_uninit(hdev); 3004862d969aSHuazhong Tan clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3005e2cb1decSSalil Mehta return ret; 3006e2cb1decSSalil Mehta } 3007e2cb1decSSalil Mehta 30087a01c897SSalil Mehta static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 3009e2cb1decSSalil Mehta { 3010d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 3011d3410018SYufeng Mo 3012e2cb1decSSalil Mehta hclgevf_state_uninit(hdev); 301379664077SHuazhong Tan hclgevf_uninit_rxd_adv_layout(hdev); 3014862d969aSHuazhong Tan 3015d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); 3016d3410018SYufeng Mo hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 301723b4201dSJian Shen 3018862d969aSHuazhong Tan if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3019eddf0462SYunsheng Lin hclgevf_misc_irq_uninit(hdev); 3020e2cb1decSSalil Mehta hclgevf_uninit_msi(hdev); 30217a01c897SSalil Mehta } 30227a01c897SSalil Mehta 30239970308fSJie Wang hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 3024cd624299SYufeng Mo hclgevf_devlink_uninit(hdev); 3025e3364c5fSZenghui Yu hclgevf_pci_uninit(hdev); 3026ee4bcd3bSJian Shen hclgevf_uninit_mac_list(hdev); 3027862d969aSHuazhong Tan } 3028862d969aSHuazhong Tan 30297a01c897SSalil Mehta static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 30307a01c897SSalil Mehta { 30317a01c897SSalil Mehta struct pci_dev *pdev = ae_dev->pdev; 30327a01c897SSalil Mehta int ret; 30337a01c897SSalil Mehta 30347a01c897SSalil Mehta ret = hclgevf_alloc_hdev(ae_dev); 30357a01c897SSalil Mehta if (ret) { 30367a01c897SSalil Mehta dev_err(&pdev->dev, "hclge device allocation failed\n"); 30377a01c897SSalil Mehta return ret; 30387a01c897SSalil Mehta } 30397a01c897SSalil Mehta 30407a01c897SSalil Mehta ret = hclgevf_init_hdev(ae_dev->priv); 3041a6d818e3SYunsheng Lin if (ret) { 30427a01c897SSalil Mehta dev_err(&pdev->dev, "hclge device initialization failed\n"); 30437a01c897SSalil Mehta return ret; 30447a01c897SSalil Mehta } 30457a01c897SSalil Mehta 3046a6d818e3SYunsheng Lin return 0; 3047a6d818e3SYunsheng Lin } 3048a6d818e3SYunsheng Lin 30497a01c897SSalil Mehta static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 30507a01c897SSalil Mehta { 30517a01c897SSalil Mehta struct hclgevf_dev *hdev = ae_dev->priv; 30527a01c897SSalil Mehta 30537a01c897SSalil Mehta hclgevf_uninit_hdev(hdev); 3054e2cb1decSSalil Mehta ae_dev->priv = NULL; 3055e2cb1decSSalil Mehta } 3056e2cb1decSSalil Mehta 3057849e4607SPeng Li static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 3058849e4607SPeng Li { 3059849e4607SPeng Li struct hnae3_handle *nic = &hdev->nic; 3060849e4607SPeng Li struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3061849e4607SPeng Li 30628be73621SHuazhong Tan return min_t(u32, hdev->rss_size_max, 306335244430SJian Shen hdev->num_tqps / kinfo->tc_info.num_tc); 3064849e4607SPeng Li } 3065849e4607SPeng Li 3066849e4607SPeng Li /** 3067849e4607SPeng Li * hclgevf_get_channels - Get the current channels enabled and max supported. 3068849e4607SPeng Li * @handle: hardware information for network interface 3069849e4607SPeng Li * @ch: ethtool channels structure 3070849e4607SPeng Li * 3071849e4607SPeng Li * We don't support separate tx and rx queues as channels. The other count 3072849e4607SPeng Li * represents how many queues are being used for control. max_combined counts 3073849e4607SPeng Li * how many queue pairs we can support. They may not be mapped 1 to 1 with 3074849e4607SPeng Li * q_vectors since we support a lot more queue pairs than q_vectors. 3075849e4607SPeng Li **/ 3076849e4607SPeng Li static void hclgevf_get_channels(struct hnae3_handle *handle, 3077849e4607SPeng Li struct ethtool_channels *ch) 3078849e4607SPeng Li { 3079849e4607SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3080849e4607SPeng Li 3081849e4607SPeng Li ch->max_combined = hclgevf_get_max_channels(hdev); 3082849e4607SPeng Li ch->other_count = 0; 3083849e4607SPeng Li ch->max_other = 0; 30848be73621SHuazhong Tan ch->combined_count = handle->kinfo.rss_size; 3085849e4607SPeng Li } 3086849e4607SPeng Li 3087cc719218SPeng Li static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 30880d43bf45SHuazhong Tan u16 *alloc_tqps, u16 *max_rss_size) 3089cc719218SPeng Li { 3090cc719218SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3091cc719218SPeng Li 30920d43bf45SHuazhong Tan *alloc_tqps = hdev->num_tqps; 3093cc719218SPeng Li *max_rss_size = hdev->rss_size_max; 3094cc719218SPeng Li } 3095cc719218SPeng Li 30964093d1a2SGuangbin Huang static void hclgevf_update_rss_size(struct hnae3_handle *handle, 30974093d1a2SGuangbin Huang u32 new_tqps_num) 30984093d1a2SGuangbin Huang { 30994093d1a2SGuangbin Huang struct hnae3_knic_private_info *kinfo = &handle->kinfo; 31004093d1a2SGuangbin Huang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 31014093d1a2SGuangbin Huang u16 max_rss_size; 31024093d1a2SGuangbin Huang 31034093d1a2SGuangbin Huang kinfo->req_rss_size = new_tqps_num; 31044093d1a2SGuangbin Huang 31054093d1a2SGuangbin Huang max_rss_size = min_t(u16, hdev->rss_size_max, 310635244430SJian Shen hdev->num_tqps / kinfo->tc_info.num_tc); 31074093d1a2SGuangbin Huang 31084093d1a2SGuangbin Huang /* Use the user's configuration when it is not larger than 31094093d1a2SGuangbin Huang * max_rss_size, otherwise, use the maximum specification value. 31104093d1a2SGuangbin Huang */ 31114093d1a2SGuangbin Huang if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 31124093d1a2SGuangbin Huang kinfo->req_rss_size <= max_rss_size) 31134093d1a2SGuangbin Huang kinfo->rss_size = kinfo->req_rss_size; 31144093d1a2SGuangbin Huang else if (kinfo->rss_size > max_rss_size || 31154093d1a2SGuangbin Huang (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 31164093d1a2SGuangbin Huang kinfo->rss_size = max_rss_size; 31174093d1a2SGuangbin Huang 311835244430SJian Shen kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size; 31194093d1a2SGuangbin Huang } 31204093d1a2SGuangbin Huang 31214093d1a2SGuangbin Huang static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 31224093d1a2SGuangbin Huang bool rxfh_configured) 31234093d1a2SGuangbin Huang { 31244093d1a2SGuangbin Huang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 31254093d1a2SGuangbin Huang struct hnae3_knic_private_info *kinfo = &handle->kinfo; 312693969dc1SJie Wang u16 tc_offset[HCLGE_COMM_MAX_TC_NUM]; 312793969dc1SJie Wang u16 tc_valid[HCLGE_COMM_MAX_TC_NUM]; 312893969dc1SJie Wang u16 tc_size[HCLGE_COMM_MAX_TC_NUM]; 31294093d1a2SGuangbin Huang u16 cur_rss_size = kinfo->rss_size; 31304093d1a2SGuangbin Huang u16 cur_tqps = kinfo->num_tqps; 31314093d1a2SGuangbin Huang u32 *rss_indir; 31324093d1a2SGuangbin Huang unsigned int i; 31334093d1a2SGuangbin Huang int ret; 31344093d1a2SGuangbin Huang 31354093d1a2SGuangbin Huang hclgevf_update_rss_size(handle, new_tqps_num); 31364093d1a2SGuangbin Huang 3137ae9f29fdSJie Wang hclge_comm_get_rss_tc_info(kinfo->rss_size, hdev->hw_tc_map, 313893969dc1SJie Wang tc_offset, tc_valid, tc_size); 313993969dc1SJie Wang ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, 314093969dc1SJie Wang tc_valid, tc_size); 31414093d1a2SGuangbin Huang if (ret) 31424093d1a2SGuangbin Huang return ret; 31434093d1a2SGuangbin Huang 3144cd7e963dSSalil Mehta /* RSS indirection table has been configured by user */ 31454093d1a2SGuangbin Huang if (rxfh_configured) 31464093d1a2SGuangbin Huang goto out; 31474093d1a2SGuangbin Huang 31484093d1a2SGuangbin Huang /* Reinitializes the rss indirect table according to the new RSS size */ 314987ce161eSGuangbin Huang rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size, 315087ce161eSGuangbin Huang sizeof(u32), GFP_KERNEL); 31514093d1a2SGuangbin Huang if (!rss_indir) 31524093d1a2SGuangbin Huang return -ENOMEM; 31534093d1a2SGuangbin Huang 315487ce161eSGuangbin Huang for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 31554093d1a2SGuangbin Huang rss_indir[i] = i % kinfo->rss_size; 31564093d1a2SGuangbin Huang 3157944de484SGuojia Liao hdev->rss_cfg.rss_size = kinfo->rss_size; 3158944de484SGuojia Liao 31594093d1a2SGuangbin Huang ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 31604093d1a2SGuangbin Huang if (ret) 31614093d1a2SGuangbin Huang dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 31624093d1a2SGuangbin Huang ret); 31634093d1a2SGuangbin Huang 31644093d1a2SGuangbin Huang kfree(rss_indir); 31654093d1a2SGuangbin Huang 31664093d1a2SGuangbin Huang out: 31674093d1a2SGuangbin Huang if (!ret) 31684093d1a2SGuangbin Huang dev_info(&hdev->pdev->dev, 31694093d1a2SGuangbin Huang "Channels changed, rss_size from %u to %u, tqps from %u to %u", 31704093d1a2SGuangbin Huang cur_rss_size, kinfo->rss_size, 317135244430SJian Shen cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); 31724093d1a2SGuangbin Huang 31734093d1a2SGuangbin Huang return ret; 31744093d1a2SGuangbin Huang } 31754093d1a2SGuangbin Huang 3176175ec96bSFuyun Liang static int hclgevf_get_status(struct hnae3_handle *handle) 3177175ec96bSFuyun Liang { 3178175ec96bSFuyun Liang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3179175ec96bSFuyun Liang 3180175ec96bSFuyun Liang return hdev->hw.mac.link; 3181175ec96bSFuyun Liang } 3182175ec96bSFuyun Liang 31834a152de9SFuyun Liang static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 31844a152de9SFuyun Liang u8 *auto_neg, u32 *speed, 31850f032f93SHao Chen u8 *duplex, u32 *lane_num) 31864a152de9SFuyun Liang { 31874a152de9SFuyun Liang struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 31884a152de9SFuyun Liang 31894a152de9SFuyun Liang if (speed) 31904a152de9SFuyun Liang *speed = hdev->hw.mac.speed; 31914a152de9SFuyun Liang if (duplex) 31924a152de9SFuyun Liang *duplex = hdev->hw.mac.duplex; 31934a152de9SFuyun Liang if (auto_neg) 31944a152de9SFuyun Liang *auto_neg = AUTONEG_DISABLE; 31954a152de9SFuyun Liang } 31964a152de9SFuyun Liang 31974a152de9SFuyun Liang void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 31984a152de9SFuyun Liang u8 duplex) 31994a152de9SFuyun Liang { 32004a152de9SFuyun Liang hdev->hw.mac.speed = speed; 32014a152de9SFuyun Liang hdev->hw.mac.duplex = duplex; 32024a152de9SFuyun Liang } 32034a152de9SFuyun Liang 32041731be4cSYonglong Liu static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 32055c9f6b39SPeng Li { 32065c9f6b39SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 32073462207dSYufeng Mo bool gro_en_old = hdev->gro_en; 32083462207dSYufeng Mo int ret; 32095c9f6b39SPeng Li 32103462207dSYufeng Mo hdev->gro_en = enable; 32113462207dSYufeng Mo ret = hclgevf_config_gro(hdev); 32123462207dSYufeng Mo if (ret) 32133462207dSYufeng Mo hdev->gro_en = gro_en_old; 32143462207dSYufeng Mo 32153462207dSYufeng Mo return ret; 32165c9f6b39SPeng Li } 32175c9f6b39SPeng Li 321888d10bd6SJian Shen static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 321988d10bd6SJian Shen u8 *module_type) 3220c136b884SPeng Li { 3221c136b884SPeng Li struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 322288d10bd6SJian Shen 3223c136b884SPeng Li if (media_type) 3224c136b884SPeng Li *media_type = hdev->hw.mac.media_type; 322588d10bd6SJian Shen 322688d10bd6SJian Shen if (module_type) 322788d10bd6SJian Shen *module_type = hdev->hw.mac.module_type; 3228c136b884SPeng Li } 3229c136b884SPeng Li 32304d60291bSHuazhong Tan static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 32314d60291bSHuazhong Tan { 32324d60291bSHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 32334d60291bSHuazhong Tan 3234aa5c4f17SHuazhong Tan return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 32354d60291bSHuazhong Tan } 32364d60291bSHuazhong Tan 3237fe735c84SHuazhong Tan static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle) 3238fe735c84SHuazhong Tan { 3239fe735c84SHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3240fe735c84SHuazhong Tan 3241076bb537SJie Wang return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 3242fe735c84SHuazhong Tan } 3243fe735c84SHuazhong Tan 32444d60291bSHuazhong Tan static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 32454d60291bSHuazhong Tan { 32464d60291bSHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 32474d60291bSHuazhong Tan 32484d60291bSHuazhong Tan return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 32494d60291bSHuazhong Tan } 32504d60291bSHuazhong Tan 32514d60291bSHuazhong Tan static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 32524d60291bSHuazhong Tan { 32534d60291bSHuazhong Tan struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 32544d60291bSHuazhong Tan 3255c88a6e7dSHuazhong Tan return hdev->rst_stats.hw_rst_done_cnt; 32564d60291bSHuazhong Tan } 32574d60291bSHuazhong Tan 32589194d18bSliuzhongzhu static void hclgevf_get_link_mode(struct hnae3_handle *handle, 32599194d18bSliuzhongzhu unsigned long *supported, 32609194d18bSliuzhongzhu unsigned long *advertising) 32619194d18bSliuzhongzhu { 32629194d18bSliuzhongzhu struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 32639194d18bSliuzhongzhu 32649194d18bSliuzhongzhu *supported = hdev->hw.mac.supported; 32659194d18bSliuzhongzhu *advertising = hdev->hw.mac.advertising; 32669194d18bSliuzhongzhu } 32679194d18bSliuzhongzhu 326892f11ea1SJian Shen void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 3269767975e5SJie Wang struct hclge_mbx_port_base_vlan *port_base_vlan) 327092f11ea1SJian Shen { 327192f11ea1SJian Shen struct hnae3_handle *nic = &hdev->nic; 3272d3410018SYufeng Mo struct hclge_vf_to_pf_msg send_msg; 3273a6f7bfdcSJian Shen int ret; 327492f11ea1SJian Shen 327592f11ea1SJian Shen rtnl_lock(); 3276a6f7bfdcSJian Shen 3277b7b5d25bSGuojia Liao if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 3278b7b5d25bSGuojia Liao test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { 3279a6f7bfdcSJian Shen dev_warn(&hdev->pdev->dev, 3280a6f7bfdcSJian Shen "is resetting when updating port based vlan info\n"); 328192f11ea1SJian Shen rtnl_unlock(); 3282a6f7bfdcSJian Shen return; 3283a6f7bfdcSJian Shen } 3284a6f7bfdcSJian Shen 3285a6f7bfdcSJian Shen ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 3286a6f7bfdcSJian Shen if (ret) { 3287a6f7bfdcSJian Shen rtnl_unlock(); 3288a6f7bfdcSJian Shen return; 3289a6f7bfdcSJian Shen } 329092f11ea1SJian Shen 329192f11ea1SJian Shen /* send msg to PF and wait update port based vlan info */ 3292d3410018SYufeng Mo hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 3293d3410018SYufeng Mo HCLGE_MBX_PORT_BASE_VLAN_CFG); 3294767975e5SJie Wang memcpy(send_msg.data, port_base_vlan, sizeof(*port_base_vlan)); 3295a6f7bfdcSJian Shen ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3296a6f7bfdcSJian Shen if (!ret) { 329792f11ea1SJian Shen if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 3298a6f7bfdcSJian Shen nic->port_base_vlan_state = state; 329992f11ea1SJian Shen else 330092f11ea1SJian Shen nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 3301a6f7bfdcSJian Shen } 330292f11ea1SJian Shen 330392f11ea1SJian Shen hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 330492f11ea1SJian Shen rtnl_unlock(); 330592f11ea1SJian Shen } 330692f11ea1SJian Shen 3307e2cb1decSSalil Mehta static const struct hnae3_ae_ops hclgevf_ops = { 3308e2cb1decSSalil Mehta .init_ae_dev = hclgevf_init_ae_dev, 3309e2cb1decSSalil Mehta .uninit_ae_dev = hclgevf_uninit_ae_dev, 3310bb1890d5SJiaran Zhang .reset_prepare = hclgevf_reset_prepare_general, 3311bb1890d5SJiaran Zhang .reset_done = hclgevf_reset_done, 3312e718a93fSPeng Li .init_client_instance = hclgevf_init_client_instance, 3313e718a93fSPeng Li .uninit_client_instance = hclgevf_uninit_client_instance, 3314e2cb1decSSalil Mehta .start = hclgevf_ae_start, 3315e2cb1decSSalil Mehta .stop = hclgevf_ae_stop, 3316a6d818e3SYunsheng Lin .client_start = hclgevf_client_start, 3317a6d818e3SYunsheng Lin .client_stop = hclgevf_client_stop, 3318e2cb1decSSalil Mehta .map_ring_to_vector = hclgevf_map_ring_to_vector, 3319e2cb1decSSalil Mehta .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3320e2cb1decSSalil Mehta .get_vector = hclgevf_get_vector, 33210d3e6631SYunsheng Lin .put_vector = hclgevf_put_vector, 3322e2cb1decSSalil Mehta .reset_queue = hclgevf_reset_tqp, 3323e2cb1decSSalil Mehta .get_mac_addr = hclgevf_get_mac_addr, 3324e2cb1decSSalil Mehta .set_mac_addr = hclgevf_set_mac_addr, 3325e2cb1decSSalil Mehta .add_uc_addr = hclgevf_add_uc_addr, 3326e2cb1decSSalil Mehta .rm_uc_addr = hclgevf_rm_uc_addr, 3327e2cb1decSSalil Mehta .add_mc_addr = hclgevf_add_mc_addr, 3328e2cb1decSSalil Mehta .rm_mc_addr = hclgevf_rm_mc_addr, 3329e2cb1decSSalil Mehta .get_stats = hclgevf_get_stats, 3330e2cb1decSSalil Mehta .update_stats = hclgevf_update_stats, 3331e2cb1decSSalil Mehta .get_strings = hclgevf_get_strings, 3332e2cb1decSSalil Mehta .get_sset_count = hclgevf_get_sset_count, 3333027733b1SJie Wang .get_rss_key_size = hclge_comm_get_rss_key_size, 3334e2cb1decSSalil Mehta .get_rss = hclgevf_get_rss, 3335e2cb1decSSalil Mehta .set_rss = hclgevf_set_rss, 3336d97b3072SJian Shen .get_rss_tuple = hclgevf_get_rss_tuple, 3337d97b3072SJian Shen .set_rss_tuple = hclgevf_set_rss_tuple, 3338e2cb1decSSalil Mehta .get_tc_size = hclgevf_get_tc_size, 3339e2cb1decSSalil Mehta .get_fw_version = hclgevf_get_fw_version, 3340e2cb1decSSalil Mehta .set_vlan_filter = hclgevf_set_vlan_filter, 3341fa6a262aSJian Shen .enable_vlan_filter = hclgevf_enable_vlan_filter, 3342b2641e2aSYunsheng Lin .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 33436d4c3981SSalil Mehta .reset_event = hclgevf_reset_event, 3344720bd583SHuazhong Tan .set_default_reset_request = hclgevf_set_def_reset_request, 33454093d1a2SGuangbin Huang .set_channels = hclgevf_set_channels, 3346849e4607SPeng Li .get_channels = hclgevf_get_channels, 3347cc719218SPeng Li .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 33481600c3e5SJian Shen .get_regs_len = hclgevf_get_regs_len, 33491600c3e5SJian Shen .get_regs = hclgevf_get_regs, 3350175ec96bSFuyun Liang .get_status = hclgevf_get_status, 33514a152de9SFuyun Liang .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3352c136b884SPeng Li .get_media_type = hclgevf_get_media_type, 33534d60291bSHuazhong Tan .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 33544d60291bSHuazhong Tan .ae_dev_resetting = hclgevf_ae_dev_resetting, 33554d60291bSHuazhong Tan .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 33565c9f6b39SPeng Li .set_gro_en = hclgevf_gro_en, 3357818f1675SYunsheng Lin .set_mtu = hclgevf_set_mtu, 33580c29d191Sliuzhongzhu .get_global_queue_id = hclgevf_get_qid_global, 33598cdb992fSJian Shen .set_timer_task = hclgevf_set_timer_task, 33609194d18bSliuzhongzhu .get_link_mode = hclgevf_get_link_mode, 3361e196ec75SJian Shen .set_promisc_mode = hclgevf_set_promisc_mode, 3362c631c696SJian Shen .request_update_promisc_mode = hclgevf_request_update_promisc_mode, 3363fe735c84SHuazhong Tan .get_cmdq_stat = hclgevf_get_cmdq_stat, 3364e2cb1decSSalil Mehta }; 3365e2cb1decSSalil Mehta 3366e2cb1decSSalil Mehta static struct hnae3_ae_algo ae_algovf = { 3367e2cb1decSSalil Mehta .ops = &hclgevf_ops, 3368e2cb1decSSalil Mehta .pdev_id_table = ae_algovf_pci_tbl, 3369e2cb1decSSalil Mehta }; 3370e2cb1decSSalil Mehta 3371134a4647SXiu Jianfeng static int __init hclgevf_init(void) 3372e2cb1decSSalil Mehta { 3373e2cb1decSSalil Mehta pr_info("%s is initializing\n", HCLGEVF_NAME); 3374e2cb1decSSalil Mehta 3375f29da408SYufeng Mo hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME); 33760ea68902SYunsheng Lin if (!hclgevf_wq) { 33770ea68902SYunsheng Lin pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 33780ea68902SYunsheng Lin return -ENOMEM; 33790ea68902SYunsheng Lin } 33800ea68902SYunsheng Lin 3381854cf33aSFuyun Liang hnae3_register_ae_algo(&ae_algovf); 3382854cf33aSFuyun Liang 3383854cf33aSFuyun Liang return 0; 3384e2cb1decSSalil Mehta } 3385e2cb1decSSalil Mehta 3386134a4647SXiu Jianfeng static void __exit hclgevf_exit(void) 3387e2cb1decSSalil Mehta { 3388e2cb1decSSalil Mehta hnae3_unregister_ae_algo(&ae_algovf); 33890ea68902SYunsheng Lin destroy_workqueue(hclgevf_wq); 3390e2cb1decSSalil Mehta } 3391e2cb1decSSalil Mehta module_init(hclgevf_init); 3392e2cb1decSSalil Mehta module_exit(hclgevf_exit); 3393e2cb1decSSalil Mehta 3394e2cb1decSSalil Mehta MODULE_LICENSE("GPL"); 3395e2cb1decSSalil Mehta MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3396e2cb1decSSalil Mehta MODULE_DESCRIPTION("HCLGEVF Driver"); 3397e2cb1decSSalil Mehta MODULE_VERSION(HCLGEVF_MOD_VERSION); 3398