Lines Matching +full:rx +full:- +full:eq

1 // SPDX-License-Identifier: GPL-2.0-or-later
11 * Jan-Bernd Themann <themann@de.ibm.com>
46 static int msg_level = -1;
66 "[2^x - 1], x = [7..14]. Default = "
69 "[2^x - 1], x = [7..14]. Default = "
72 "[2^x - 1], x = [7..14]. Default = "
75 "[2^x - 1], x = [7..14]. Default = "
99 .compatible = "IBM,lhea-ethernet",
136 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags)) in ehea_schedule_port_reset()
137 schedule_work(&port->reset_task); in ehea_schedule_port_reset()
157 struct ehea_port *port = adapter->port[k]; in ehea_update_firmware_handles()
159 if (!port || (port->state != EHEA_PORT_UP)) in ehea_update_firmware_handles()
163 num_portres += port->num_def_qps; in ehea_update_firmware_handles()
183 struct ehea_port *port = adapter->port[k]; in ehea_update_firmware_handles()
185 if (!port || (port->state != EHEA_PORT_UP) || in ehea_update_firmware_handles()
189 for (l = 0; l < port->num_def_qps; l++) { in ehea_update_firmware_handles()
190 struct ehea_port_res *pr = &port->port_res[l]; in ehea_update_firmware_handles()
192 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
193 arr[i++].fwh = pr->qp->fw_handle; in ehea_update_firmware_handles()
194 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
195 arr[i++].fwh = pr->send_cq->fw_handle; in ehea_update_firmware_handles()
196 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
197 arr[i++].fwh = pr->recv_cq->fw_handle; in ehea_update_firmware_handles()
198 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
199 arr[i++].fwh = pr->eq->fw_handle; in ehea_update_firmware_handles()
200 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
201 arr[i++].fwh = pr->send_mr.handle; in ehea_update_firmware_handles()
202 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
203 arr[i++].fwh = pr->recv_mr.handle; in ehea_update_firmware_handles()
205 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
206 arr[i++].fwh = port->qp_eq->fw_handle; in ehea_update_firmware_handles()
207 num_ports--; in ehea_update_firmware_handles()
210 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
211 arr[i++].fwh = adapter->neq->fw_handle; in ehea_update_firmware_handles()
213 if (adapter->mr.handle) { in ehea_update_firmware_handles()
214 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
215 arr[i++].fwh = adapter->mr.handle; in ehea_update_firmware_handles()
217 num_adapters--; in ehea_update_firmware_handles()
243 struct ehea_port *port = adapter->port[k]; in ehea_update_bcmc_registrations()
245 if (!port || (port->state != EHEA_PORT_UP)) in ehea_update_bcmc_registrations()
250 list_for_each_entry(mc_entry, &port->mc_list->list,list) in ehea_update_bcmc_registrations()
263 struct ehea_port *port = adapter->port[k]; in ehea_update_bcmc_registrations()
265 if (!port || (port->state != EHEA_PORT_UP)) in ehea_update_bcmc_registrations()
271 arr[i].adh = adapter->handle; in ehea_update_bcmc_registrations()
272 arr[i].port_id = port->logical_port_id; in ehea_update_bcmc_registrations()
275 arr[i++].macaddr = port->mac_addr; in ehea_update_bcmc_registrations()
277 arr[i].adh = adapter->handle; in ehea_update_bcmc_registrations()
278 arr[i].port_id = port->logical_port_id; in ehea_update_bcmc_registrations()
281 arr[i++].macaddr = port->mac_addr; in ehea_update_bcmc_registrations()
282 num_registrations -= 2; in ehea_update_bcmc_registrations()
285 &port->mc_list->list, list) { in ehea_update_bcmc_registrations()
289 arr[i].adh = adapter->handle; in ehea_update_bcmc_registrations()
290 arr[i].port_id = port->logical_port_id; in ehea_update_bcmc_registrations()
293 if (mc_entry->macaddr == 0) in ehea_update_bcmc_registrations()
295 arr[i++].macaddr = mc_entry->macaddr; in ehea_update_bcmc_registrations()
297 arr[i].adh = adapter->handle; in ehea_update_bcmc_registrations()
298 arr[i].port_id = port->logical_port_id; in ehea_update_bcmc_registrations()
301 if (mc_entry->macaddr == 0) in ehea_update_bcmc_registrations()
303 arr[i++].macaddr = mc_entry->macaddr; in ehea_update_bcmc_registrations()
304 num_registrations -= 2; in ehea_update_bcmc_registrations()
324 for (i = 0; i < port->num_def_qps; i++) { in ehea_get_stats64()
325 rx_packets += port->port_res[i].rx_packets; in ehea_get_stats64()
326 rx_bytes += port->port_res[i].rx_bytes; in ehea_get_stats64()
329 for (i = 0; i < port->num_def_qps; i++) { in ehea_get_stats64()
330 tx_packets += port->port_res[i].tx_packets; in ehea_get_stats64()
331 tx_bytes += port->port_res[i].tx_bytes; in ehea_get_stats64()
334 stats->tx_packets = tx_packets; in ehea_get_stats64()
335 stats->rx_bytes = rx_bytes; in ehea_get_stats64()
336 stats->tx_bytes = tx_bytes; in ehea_get_stats64()
337 stats->rx_packets = rx_packets; in ehea_get_stats64()
339 stats->multicast = port->stats.multicast; in ehea_get_stats64()
340 stats->rx_errors = port->stats.rx_errors; in ehea_get_stats64()
347 struct net_device *dev = port->netdev; in ehea_update_stats()
348 struct rtnl_link_stats64 *stats = &port->stats; in ehea_update_stats()
358 hret = ehea_h_query_ehea_port(port->adapter->handle, in ehea_update_stats()
359 port->logical_port_id, in ehea_update_stats()
369 stats->multicast = cb2->rxmcp; in ehea_update_stats()
370 stats->rx_errors = cb2->rxuerr; in ehea_update_stats()
375 schedule_delayed_work(&port->stats_work, in ehea_update_stats()
381 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; in ehea_refill_rq1()
382 struct net_device *dev = pr->port->netdev; in ehea_refill_rq1()
383 int max_index_mask = pr->rq1_skba.len - 1; in ehea_refill_rq1()
384 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes; in ehea_refill_rq1()
388 pr->rq1_skba.os_skbs = 0; in ehea_refill_rq1()
392 pr->rq1_skba.index = index; in ehea_refill_rq1()
393 pr->rq1_skba.os_skbs = fill_wqes; in ehea_refill_rq1()
402 pr->rq1_skba.os_skbs = fill_wqes - i; in ehea_refill_rq1()
406 index--; in ehea_refill_rq1()
415 ehea_update_rq1a(pr->qp, adder); in ehea_refill_rq1()
420 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; in ehea_init_fill_rq1()
421 struct net_device *dev = pr->port->netdev; in ehea_init_fill_rq1()
424 if (nr_rq1a > pr->rq1_skba.len) { in ehea_init_fill_rq1()
435 ehea_update_rq1a(pr->qp, i - 1); in ehea_init_fill_rq1()
442 struct net_device *dev = pr->port->netdev; in ehea_refill_rq_def()
443 struct ehea_qp *qp = pr->qp; in ehea_refill_rq_def()
444 struct sk_buff **skb_arr = q_skba->arr; in ehea_refill_rq_def()
450 fill_wqes = q_skba->os_skbs + num_wqes; in ehea_refill_rq_def()
451 q_skba->os_skbs = 0; in ehea_refill_rq_def()
454 q_skba->os_skbs = fill_wqes; in ehea_refill_rq_def()
458 index = q_skba->index; in ehea_refill_rq_def()
459 max_index_mask = q_skba->len - 1; in ehea_refill_rq_def()
466 q_skba->os_skbs = fill_wqes - i; in ehea_refill_rq_def()
467 if (q_skba->os_skbs == q_skba->len - 2) { in ehea_refill_rq_def()
468 netdev_info(pr->port->netdev, in ehea_refill_rq_def()
469 "rq%i ran dry - no mem for skb\n", in ehea_refill_rq_def()
471 ret = -ENOMEM; in ehea_refill_rq_def()
477 tmp_addr = ehea_map_vaddr(skb->data); in ehea_refill_rq_def()
478 if (tmp_addr == -1) { in ehea_refill_rq_def()
480 q_skba->os_skbs = fill_wqes - i; in ehea_refill_rq_def()
486 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type) in ehea_refill_rq_def()
488 rwqe->sg_list[0].l_key = pr->recv_mr.lkey; in ehea_refill_rq_def()
489 rwqe->sg_list[0].vaddr = tmp_addr; in ehea_refill_rq_def()
490 rwqe->sg_list[0].len = packet_size; in ehea_refill_rq_def()
491 rwqe->data_segments = 1; in ehea_refill_rq_def()
498 q_skba->index = index; in ehea_refill_rq_def()
505 ehea_update_rq2a(pr->qp, adder); in ehea_refill_rq_def()
507 ehea_update_rq3a(pr->qp, adder); in ehea_refill_rq_def()
515 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2, in ehea_refill_rq2()
523 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3, in ehea_refill_rq3()
530 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5; in ehea_check_cqe()
531 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0) in ehea_check_cqe()
533 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) && in ehea_check_cqe()
534 (cqe->header_length == 0)) in ehea_check_cqe()
536 return -EINVAL; in ehea_check_cqe()
543 int length = cqe->num_bytes_transfered - 4; /*remove CRC */ in ehea_fill_skb()
546 skb->protocol = eth_type_trans(skb, dev); in ehea_fill_skb()
550 if (cqe->status & EHEA_CQE_BLIND_CKSUM) { in ehea_fill_skb()
551 skb->ip_summed = CHECKSUM_COMPLETE; in ehea_fill_skb()
552 skb->csum = csum_unfold(~cqe->inet_checksum_value); in ehea_fill_skb()
554 skb->ip_summed = CHECKSUM_UNNECESSARY; in ehea_fill_skb()
556 skb_record_rx_queue(skb, pr - &pr->port->port_res[0]); in ehea_fill_skb()
563 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); in get_skb_by_index()
569 x &= (arr_len - 1); in get_skb_by_index()
576 pref = (skb_array[x]->data); in get_skb_by_index()
596 x &= (arr_len - 1); in get_skb_by_index_ll()
603 pref = (skb_array[x]->data); in get_skb_by_index_ll()
619 if (cqe->status & EHEA_CQE_STAT_ERR_TCP) in ehea_treat_poll_error()
620 pr->p_stats.err_tcp_cksum++; in ehea_treat_poll_error()
621 if (cqe->status & EHEA_CQE_STAT_ERR_IP) in ehea_treat_poll_error()
622 pr->p_stats.err_ip_cksum++; in ehea_treat_poll_error()
623 if (cqe->status & EHEA_CQE_STAT_ERR_CRC) in ehea_treat_poll_error()
624 pr->p_stats.err_frame_crc++; in ehea_treat_poll_error()
628 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe); in ehea_treat_poll_error()
632 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe); in ehea_treat_poll_error()
636 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { in ehea_treat_poll_error()
637 if (netif_msg_rx_err(pr->port)) { in ehea_treat_poll_error()
639 pr->qp->init_attr.qp_nr); in ehea_treat_poll_error()
642 ehea_schedule_port_reset(pr->port); in ehea_treat_poll_error()
653 struct ehea_port *port = pr->port; in ehea_proc_rwqes()
654 struct ehea_qp *qp = pr->qp; in ehea_proc_rwqes()
657 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; in ehea_proc_rwqes()
658 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr; in ehea_proc_rwqes()
659 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr; in ehea_proc_rwqes()
660 int skb_arr_rq1_len = pr->rq1_skba.len; in ehea_proc_rwqes()
661 int skb_arr_rq2_len = pr->rq2_skba.len; in ehea_proc_rwqes()
662 int skb_arr_rq3_len = pr->rq3_skba.len; in ehea_proc_rwqes()
696 cqe->num_bytes_transfered - 4); in ehea_proc_rwqes()
722 processed_bytes += skb->len; in ehea_proc_rwqes()
724 if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) in ehea_proc_rwqes()
726 cqe->vlan_tag); in ehea_proc_rwqes()
728 napi_gro_receive(&pr->napi, skb); in ehea_proc_rwqes()
730 pr->p_stats.poll_receive_errors++; in ehea_proc_rwqes()
740 pr->rx_packets += processed; in ehea_proc_rwqes()
741 pr->rx_bytes += processed_bytes; in ehea_proc_rwqes()
756 for (i = 0; i < port->num_def_qps; i++) { in reset_sq_restart_flag()
757 struct ehea_port_res *pr = &port->port_res[i]; in reset_sq_restart_flag()
758 pr->sq_restart_flag = 0; in reset_sq_restart_flag()
760 wake_up(&port->restart_wq); in reset_sq_restart_flag()
769 for (i = 0; i < port->num_def_qps; i++) { in check_sqs()
770 struct ehea_port_res *pr = &port->port_res[i]; in check_sqs()
772 swqe = ehea_get_swqe(pr->qp, &swqe_index); in check_sqs()
774 atomic_dec(&pr->swqe_avail); in check_sqs()
776 swqe->tx_control |= EHEA_SWQE_PURGE; in check_sqs()
777 swqe->wr_id = SWQE_RESTART_CHECK; in check_sqs()
778 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; in check_sqs()
779 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT; in check_sqs()
780 swqe->immediate_data_length = 80; in check_sqs()
782 ehea_post_swqe(pr->qp, swqe); in check_sqs()
784 ret = wait_event_timeout(port->restart_wq, in check_sqs()
785 pr->sq_restart_flag == 0, in check_sqs()
790 ehea_schedule_port_reset(pr->port); in check_sqs()
800 struct ehea_cq *send_cq = pr->send_cq; in ehea_proc_cqes()
806 struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev, in ehea_proc_cqes()
807 pr - &pr->port->port_res[0]); in ehea_proc_cqes()
816 if (cqe->wr_id == SWQE_RESTART_CHECK) { in ehea_proc_cqes()
817 pr->sq_restart_flag = 1; in ehea_proc_cqes()
822 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { in ehea_proc_cqes()
824 cqe->status); in ehea_proc_cqes()
826 if (netif_msg_tx_err(pr->port)) in ehea_proc_cqes()
829 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) { in ehea_proc_cqes()
831 ehea_schedule_port_reset(pr->port); in ehea_proc_cqes()
836 if (netif_msg_tx_done(pr->port)) in ehea_proc_cqes()
839 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id) in ehea_proc_cqes()
842 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); in ehea_proc_cqes()
843 skb = pr->sq_skba.arr[index]; in ehea_proc_cqes()
845 pr->sq_skba.arr[index] = NULL; in ehea_proc_cqes()
848 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); in ehea_proc_cqes()
849 quota--; in ehea_proc_cqes()
855 atomic_add(swqe_av, &pr->swqe_avail); in ehea_proc_cqes()
858 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) { in ehea_proc_cqes()
861 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th)) in ehea_proc_cqes()
866 wake_up(&pr->port->swqe_avail_wq); in ehea_proc_cqes()
877 struct net_device *dev = pr->port->netdev; in ehea_poll()
881 int rx = 0; in ehea_poll() local
884 rx += ehea_proc_rwqes(dev, pr, budget - rx); in ehea_poll()
886 while (rx != budget) { in ehea_poll()
888 ehea_reset_cq_ep(pr->recv_cq); in ehea_poll()
889 ehea_reset_cq_ep(pr->send_cq); in ehea_poll()
890 ehea_reset_cq_n1(pr->recv_cq); in ehea_poll()
891 ehea_reset_cq_n1(pr->send_cq); in ehea_poll()
893 cqe = ehea_poll_rq1(pr->qp, &wqe_index); in ehea_poll()
894 cqe_skb = ehea_poll_cq(pr->send_cq); in ehea_poll()
897 return rx; in ehea_poll()
900 return rx; in ehea_poll()
903 rx += ehea_proc_rwqes(dev, pr, budget - rx); in ehea_poll()
906 return rx; in ehea_poll()
913 napi_schedule(&pr->napi); in ehea_recv_irq_handler()
927 eqe = ehea_poll_eq(port->qp_eq); in ehea_qp_aff_irq_handler()
930 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry); in ehea_qp_aff_irq_handler()
932 eqe->entry, qp_token); in ehea_qp_aff_irq_handler()
934 qp = port->port_res[qp_token].qp; in ehea_qp_aff_irq_handler()
936 resource_type = ehea_error_data(port->adapter, qp->fw_handle, in ehea_qp_aff_irq_handler()
944 reset_port = 1; /* Reset in case of CQ or EQ error */ in ehea_qp_aff_irq_handler()
946 eqe = ehea_poll_eq(port->qp_eq); in ehea_qp_aff_irq_handler()
963 if (adapter->port[i]) in ehea_get_port()
964 if (adapter->port[i]->logical_port_id == logical_port) in ehea_get_port()
965 return adapter->port[i]; in ehea_get_port()
979 ret = -ENOMEM; in ehea_sense_port_attr()
983 hret = ehea_h_query_ehea_port(port->adapter->handle, in ehea_sense_port_attr()
984 port->logical_port_id, H_PORT_CB0, in ehea_sense_port_attr()
988 ret = -EIO; in ehea_sense_port_attr()
993 port->mac_addr = cb0->port_mac_addr << 16; in ehea_sense_port_attr()
995 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) { in ehea_sense_port_attr()
996 ret = -EADDRNOTAVAIL; in ehea_sense_port_attr()
1001 switch (cb0->port_speed) { in ehea_sense_port_attr()
1003 port->port_speed = EHEA_SPEED_10M; in ehea_sense_port_attr()
1004 port->full_duplex = 0; in ehea_sense_port_attr()
1007 port->port_speed = EHEA_SPEED_10M; in ehea_sense_port_attr()
1008 port->full_duplex = 1; in ehea_sense_port_attr()
1011 port->port_speed = EHEA_SPEED_100M; in ehea_sense_port_attr()
1012 port->full_duplex = 0; in ehea_sense_port_attr()
1015 port->port_speed = EHEA_SPEED_100M; in ehea_sense_port_attr()
1016 port->full_duplex = 1; in ehea_sense_port_attr()
1019 port->port_speed = EHEA_SPEED_1G; in ehea_sense_port_attr()
1020 port->full_duplex = 1; in ehea_sense_port_attr()
1023 port->port_speed = EHEA_SPEED_10G; in ehea_sense_port_attr()
1024 port->full_duplex = 1; in ehea_sense_port_attr()
1027 port->port_speed = 0; in ehea_sense_port_attr()
1028 port->full_duplex = 0; in ehea_sense_port_attr()
1032 port->autoneg = 1; in ehea_sense_port_attr()
1033 port->num_mcs = cb0->num_default_qps; in ehea_sense_port_attr()
1037 port->num_def_qps = cb0->num_default_qps; in ehea_sense_port_attr()
1039 port->num_def_qps = 1; in ehea_sense_port_attr()
1041 if (!port->num_def_qps) { in ehea_sense_port_attr()
1042 ret = -EINVAL; in ehea_sense_port_attr()
1064 ret = -ENOMEM; in ehea_set_portspeed()
1068 cb4->port_speed = port_speed; in ehea_set_portspeed()
1070 netif_carrier_off(port->netdev); in ehea_set_portspeed()
1072 hret = ehea_h_modify_ehea_port(port->adapter->handle, in ehea_set_portspeed()
1073 port->logical_port_id, in ehea_set_portspeed()
1076 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0; in ehea_set_portspeed()
1078 hret = ehea_h_query_ehea_port(port->adapter->handle, in ehea_set_portspeed()
1079 port->logical_port_id, in ehea_set_portspeed()
1083 switch (cb4->port_speed) { in ehea_set_portspeed()
1085 port->port_speed = EHEA_SPEED_10M; in ehea_set_portspeed()
1086 port->full_duplex = 0; in ehea_set_portspeed()
1089 port->port_speed = EHEA_SPEED_10M; in ehea_set_portspeed()
1090 port->full_duplex = 1; in ehea_set_portspeed()
1093 port->port_speed = EHEA_SPEED_100M; in ehea_set_portspeed()
1094 port->full_duplex = 0; in ehea_set_portspeed()
1097 port->port_speed = EHEA_SPEED_100M; in ehea_set_portspeed()
1098 port->full_duplex = 1; in ehea_set_portspeed()
1101 port->port_speed = EHEA_SPEED_1G; in ehea_set_portspeed()
1102 port->full_duplex = 1; in ehea_set_portspeed()
1105 port->port_speed = EHEA_SPEED_10G; in ehea_set_portspeed()
1106 port->full_duplex = 1; in ehea_set_portspeed()
1109 port->port_speed = 0; in ehea_set_portspeed()
1110 port->full_duplex = 0; in ehea_set_portspeed()
1115 ret = -EIO; in ehea_set_portspeed()
1120 ret = -EPERM; in ehea_set_portspeed()
1122 ret = -EIO; in ehea_set_portspeed()
1126 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP)) in ehea_set_portspeed()
1127 netif_carrier_on(port->netdev); in ehea_set_portspeed()
1149 dev = port->netdev; in ehea_parse_eqe()
1164 port->port_speed, in ehea_parse_eqe()
1165 port->full_duplex == 1 ? in ehea_parse_eqe()
1180 port->phy_link = EHEA_PHY_LINK_UP; in ehea_parse_eqe()
1186 port->phy_link = EHEA_PHY_LINK_DOWN; in ehea_parse_eqe()
1221 eqe = ehea_poll_eq(adapter->neq); in ehea_neq_tasklet()
1225 pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry); in ehea_neq_tasklet()
1226 ehea_parse_eqe(adapter, eqe->entry); in ehea_neq_tasklet()
1227 eqe = ehea_poll_eq(adapter->neq); in ehea_neq_tasklet()
1235 ehea_h_reset_events(adapter->handle, in ehea_neq_tasklet()
1236 adapter->neq->fw_handle, event_mask); in ehea_neq_tasklet()
1242 tasklet_hi_schedule(&adapter->neq_tasklet); in ehea_interrupt_neq()
1250 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; in ehea_fill_port_res()
1252 ehea_init_fill_rq1(pr, pr->rq1_skba.len); in ehea_fill_port_res()
1254 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); in ehea_fill_port_res()
1256 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1); in ehea_fill_port_res()
1268 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff", in ehea_reg_interrupts()
1269 dev->name); in ehea_reg_interrupts()
1271 ret = ibmebus_request_irq(port->qp_eq->attr.ist1, in ehea_reg_interrupts()
1273 0, port->int_aff_name, port); in ehea_reg_interrupts()
1276 port->qp_eq->attr.ist1); in ehea_reg_interrupts()
1282 port->qp_eq->attr.ist1); in ehea_reg_interrupts()
1285 for (i = 0; i < port->num_def_qps; i++) { in ehea_reg_interrupts()
1286 pr = &port->port_res[i]; in ehea_reg_interrupts()
1287 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1, in ehea_reg_interrupts()
1288 "%s-queue%d", dev->name, i); in ehea_reg_interrupts()
1289 ret = ibmebus_request_irq(pr->eq->attr.ist1, in ehea_reg_interrupts()
1291 0, pr->int_send_name, pr); in ehea_reg_interrupts()
1294 i, pr->eq->attr.ist1); in ehea_reg_interrupts()
1299 pr->eq->attr.ist1, i); in ehea_reg_interrupts()
1306 while (--i >= 0) { in ehea_reg_interrupts()
1307 u32 ist = port->port_res[i].eq->attr.ist1; in ehea_reg_interrupts()
1308 ibmebus_free_irq(ist, &port->port_res[i]); in ehea_reg_interrupts()
1312 ibmebus_free_irq(port->qp_eq->attr.ist1, port); in ehea_reg_interrupts()
1313 i = port->num_def_qps; in ehea_reg_interrupts()
1327 for (i = 0; i < port->num_def_qps; i++) { in ehea_free_interrupts()
1328 pr = &port->port_res[i]; in ehea_free_interrupts()
1329 ibmebus_free_irq(pr->eq->attr.ist1, pr); in ehea_free_interrupts()
1332 i, pr->eq->attr.ist1); in ehea_free_interrupts()
1336 ibmebus_free_irq(port->qp_eq->attr.ist1, port); in ehea_free_interrupts()
1339 port->qp_eq->attr.ist1); in ehea_free_interrupts()
1348 ret = -ENOMEM; in ehea_configure_port()
1353 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1) in ehea_configure_port()
1361 for (i = 0; i < port->num_mcs; i++) in ehea_configure_port()
1363 cb0->default_qpn_arr[i] = in ehea_configure_port()
1364 port->port_res[i].qp->init_attr.qp_nr; in ehea_configure_port()
1366 cb0->default_qpn_arr[i] = in ehea_configure_port()
1367 port->port_res[0].qp->init_attr.qp_nr; in ehea_configure_port()
1375 hret = ehea_h_modify_ehea_port(port->adapter->handle, in ehea_configure_port()
1376 port->logical_port_id, in ehea_configure_port()
1378 ret = -EIO; in ehea_configure_port()
1393 struct ehea_adapter *adapter = pr->port->adapter; in ehea_gen_smrs()
1395 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr); in ehea_gen_smrs()
1399 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr); in ehea_gen_smrs()
1406 ehea_rem_mr(&pr->send_mr); in ehea_gen_smrs()
1409 return -EIO; in ehea_gen_smrs()
1414 if ((ehea_rem_mr(&pr->send_mr)) || in ehea_rem_smrs()
1415 (ehea_rem_mr(&pr->recv_mr))) in ehea_rem_smrs()
1416 return -EIO; in ehea_rem_smrs()
1425 q_skba->arr = vzalloc(arr_size); in ehea_init_q_skba()
1426 if (!q_skba->arr) in ehea_init_q_skba()
1427 return -ENOMEM; in ehea_init_q_skba()
1429 q_skba->len = max_q_entries; in ehea_init_q_skba()
1430 q_skba->index = 0; in ehea_init_q_skba()
1431 q_skba->os_skbs = 0; in ehea_init_q_skba()
1439 struct ehea_adapter *adapter = port->adapter; in ehea_init_port_res()
1442 int ret = -EIO; in ehea_init_port_res()
1445 tx_bytes = pr->tx_bytes; in ehea_init_port_res()
1446 tx_packets = pr->tx_packets; in ehea_init_port_res()
1447 rx_bytes = pr->rx_bytes; in ehea_init_port_res()
1448 rx_packets = pr->rx_packets; in ehea_init_port_res()
1452 pr->tx_bytes = tx_bytes; in ehea_init_port_res()
1453 pr->tx_packets = tx_packets; in ehea_init_port_res()
1454 pr->rx_bytes = rx_bytes; in ehea_init_port_res()
1455 pr->rx_packets = rx_packets; in ehea_init_port_res()
1457 pr->port = port; in ehea_init_port_res()
1459 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); in ehea_init_port_res()
1460 if (!pr->eq) { in ehea_init_port_res()
1461 pr_err("create_eq failed (eq)\n"); in ehea_init_port_res()
1465 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq, in ehea_init_port_res()
1466 pr->eq->fw_handle, in ehea_init_port_res()
1467 port->logical_port_id); in ehea_init_port_res()
1468 if (!pr->recv_cq) { in ehea_init_port_res()
1473 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, in ehea_init_port_res()
1474 pr->eq->fw_handle, in ehea_init_port_res()
1475 port->logical_port_id); in ehea_init_port_res()
1476 if (!pr->send_cq) { in ehea_init_port_res()
1483 pr->send_cq->attr.act_nr_of_cqes, in ehea_init_port_res()
1484 pr->recv_cq->attr.act_nr_of_cqes); in ehea_init_port_res()
1488 ret = -ENOMEM; in ehea_init_port_res()
1493 init_attr->low_lat_rq1 = 1; in ehea_init_port_res()
1494 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */ in ehea_init_port_res()
1495 init_attr->rq_count = 3; in ehea_init_port_res()
1496 init_attr->qp_token = queue_token; in ehea_init_port_res()
1497 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq; in ehea_init_port_res()
1498 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1; in ehea_init_port_res()
1499 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2; in ehea_init_port_res()
1500 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3; in ehea_init_port_res()
1501 init_attr->wqe_size_enc_sq = EHEA_SG_SQ; in ehea_init_port_res()
1502 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1; in ehea_init_port_res()
1503 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2; in ehea_init_port_res()
1504 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3; in ehea_init_port_res()
1505 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD; in ehea_init_port_res()
1506 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD; in ehea_init_port_res()
1507 init_attr->port_nr = port->logical_port_id; in ehea_init_port_res()
1508 init_attr->send_cq_handle = pr->send_cq->fw_handle; in ehea_init_port_res()
1509 init_attr->recv_cq_handle = pr->recv_cq->fw_handle; in ehea_init_port_res()
1510 init_attr->aff_eq_handle = port->qp_eq->fw_handle; in ehea_init_port_res()
1512 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); in ehea_init_port_res()
1513 if (!pr->qp) { in ehea_init_port_res()
1515 ret = -EIO; in ehea_init_port_res()
1521 init_attr->qp_nr, in ehea_init_port_res()
1522 init_attr->act_nr_send_wqes, in ehea_init_port_res()
1523 init_attr->act_nr_rwqes_rq1, in ehea_init_port_res()
1524 init_attr->act_nr_rwqes_rq2, in ehea_init_port_res()
1525 init_attr->act_nr_rwqes_rq3); in ehea_init_port_res()
1527 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1; in ehea_init_port_res()
1529 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size); in ehea_init_port_res()
1530 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1); in ehea_init_port_res()
1531 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1); in ehea_init_port_res()
1532 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1); in ehea_init_port_res()
1536 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10; in ehea_init_port_res()
1538 ret = -EIO; in ehea_init_port_res()
1542 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1); in ehea_init_port_res()
1546 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64); in ehea_init_port_res()
1553 vfree(pr->sq_skba.arr); in ehea_init_port_res()
1554 vfree(pr->rq1_skba.arr); in ehea_init_port_res()
1555 vfree(pr->rq2_skba.arr); in ehea_init_port_res()
1556 vfree(pr->rq3_skba.arr); in ehea_init_port_res()
1557 ehea_destroy_qp(pr->qp); in ehea_init_port_res()
1558 ehea_destroy_cq(pr->send_cq); in ehea_init_port_res()
1559 ehea_destroy_cq(pr->recv_cq); in ehea_init_port_res()
1560 ehea_destroy_eq(pr->eq); in ehea_init_port_res()
1569 if (pr->qp) in ehea_clean_portres()
1570 netif_napi_del(&pr->napi); in ehea_clean_portres()
1572 ret = ehea_destroy_qp(pr->qp); in ehea_clean_portres()
1575 ehea_destroy_cq(pr->send_cq); in ehea_clean_portres()
1576 ehea_destroy_cq(pr->recv_cq); in ehea_clean_portres()
1577 ehea_destroy_eq(pr->eq); in ehea_clean_portres()
1579 for (i = 0; i < pr->rq1_skba.len; i++) in ehea_clean_portres()
1580 dev_kfree_skb(pr->rq1_skba.arr[i]); in ehea_clean_portres()
1582 for (i = 0; i < pr->rq2_skba.len; i++) in ehea_clean_portres()
1583 dev_kfree_skb(pr->rq2_skba.arr[i]); in ehea_clean_portres()
1585 for (i = 0; i < pr->rq3_skba.len; i++) in ehea_clean_portres()
1586 dev_kfree_skb(pr->rq3_skba.arr[i]); in ehea_clean_portres()
1588 for (i = 0; i < pr->sq_skba.len; i++) in ehea_clean_portres()
1589 dev_kfree_skb(pr->sq_skba.arr[i]); in ehea_clean_portres()
1591 vfree(pr->rq1_skba.arr); in ehea_clean_portres()
1592 vfree(pr->rq2_skba.arr); in ehea_clean_portres()
1593 vfree(pr->rq3_skba.arr); in ehea_clean_portres()
1594 vfree(pr->sq_skba.arr); in ehea_clean_portres()
1604 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; in write_swqe2_immediate()
1605 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; in write_swqe2_immediate()
1608 swqe->descriptors = 0; in write_swqe2_immediate()
1611 swqe->tx_control |= EHEA_SWQE_TSO; in write_swqe2_immediate()
1612 swqe->mss = skb_shinfo(skb)->gso_size; in write_swqe2_immediate()
1622 swqe->immediate_data_length = immediate_len; in write_swqe2_immediate()
1625 sg1entry->l_key = lkey; in write_swqe2_immediate()
1626 sg1entry->len = skb_data_size - immediate_len; in write_swqe2_immediate()
1627 sg1entry->vaddr = in write_swqe2_immediate()
1628 ehea_map_vaddr(skb->data + immediate_len); in write_swqe2_immediate()
1629 swqe->descriptors++; in write_swqe2_immediate()
1633 swqe->immediate_data_length = skb_data_size; in write_swqe2_immediate()
1644 nfrags = skb_shinfo(skb)->nr_frags; in write_swqe2_data()
1645 sg1entry = &swqe->u.immdata_desc.sg_entry; in write_swqe2_data()
1646 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list; in write_swqe2_data()
1653 if (swqe->descriptors == 0) { in write_swqe2_data()
1655 frag = &skb_shinfo(skb)->frags[0]; in write_swqe2_data()
1658 sg1entry->l_key = lkey; in write_swqe2_data()
1659 sg1entry->len = skb_frag_size(frag); in write_swqe2_data()
1660 sg1entry->vaddr = in write_swqe2_data()
1662 swqe->descriptors++; in write_swqe2_data()
1668 frag = &skb_shinfo(skb)->frags[i]; in write_swqe2_data()
1669 sgentry = &sg_list[i - sg1entry_contains_frag_data]; in write_swqe2_data()
1671 sgentry->l_key = lkey; in write_swqe2_data()
1672 sgentry->len = skb_frag_size(frag); in write_swqe2_data()
1673 sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag)); in write_swqe2_data()
1674 swqe->descriptors++; in write_swqe2_data()
1687 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, in ehea_broadcast_reg_helper()
1688 port->logical_port_id, in ehea_broadcast_reg_helper()
1689 reg_type, port->mac_addr, 0, hcallid); in ehea_broadcast_reg_helper()
1693 ret = -EIO; in ehea_broadcast_reg_helper()
1699 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, in ehea_broadcast_reg_helper()
1700 port->logical_port_id, in ehea_broadcast_reg_helper()
1701 reg_type, port->mac_addr, 0, hcallid); in ehea_broadcast_reg_helper()
1705 ret = -EIO; in ehea_broadcast_reg_helper()
1719 if (!is_valid_ether_addr(mac_addr->sa_data)) { in ehea_set_mac_addr()
1720 ret = -EADDRNOTAVAIL; in ehea_set_mac_addr()
1727 ret = -ENOMEM; in ehea_set_mac_addr()
1731 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN); in ehea_set_mac_addr()
1733 cb0->port_mac_addr = cb0->port_mac_addr >> 16; in ehea_set_mac_addr()
1735 hret = ehea_h_modify_ehea_port(port->adapter->handle, in ehea_set_mac_addr()
1736 port->logical_port_id, H_PORT_CB0, in ehea_set_mac_addr()
1739 ret = -EIO; in ehea_set_mac_addr()
1743 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); in ehea_set_mac_addr()
1746 if (port->state == EHEA_PORT_UP) { in ehea_set_mac_addr()
1752 port->mac_addr = cb0->port_mac_addr << 16; in ehea_set_mac_addr()
1755 if (port->state == EHEA_PORT_UP) { in ehea_set_mac_addr()
1787 if (enable == port->promisc) in ehea_promiscuous()
1797 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0; in ehea_promiscuous()
1799 hret = ehea_h_modify_ehea_port(port->adapter->handle, in ehea_promiscuous()
1800 port->logical_port_id, in ehea_promiscuous()
1807 port->promisc = enable; in ehea_promiscuous()
1822 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, in ehea_multicast_reg_helper()
1823 port->logical_port_id, in ehea_multicast_reg_helper()
1832 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, in ehea_multicast_reg_helper()
1833 port->logical_port_id, in ehea_multicast_reg_helper()
1842 struct ehea_mc_list *mc_entry = port->mc_list; in ehea_drop_multicast_list()
1848 list_for_each_safe(pos, temp, &(port->mc_list->list)) { in ehea_drop_multicast_list()
1851 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr, in ehea_drop_multicast_list()
1855 ret = -EIO; in ehea_drop_multicast_list()
1869 if (!port->allmulti) { in ehea_allmulti()
1875 port->allmulti = 1; in ehea_allmulti()
1885 port->allmulti = 0; in ehea_allmulti()
1902 INIT_LIST_HEAD(&ehea_mcl_entry->list); in ehea_add_multicast_entry()
1904 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN); in ehea_add_multicast_entry()
1906 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr, in ehea_add_multicast_entry()
1909 list_add(&ehea_mcl_entry->list, &port->mc_list->list); in ehea_add_multicast_entry()
1922 ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC)); in ehea_set_multicast_list()
1924 if (dev->flags & IFF_ALLMULTI) { in ehea_set_multicast_list()
1939 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) { in ehea_set_multicast_list()
1941 port->adapter->max_mc_mac); in ehea_set_multicast_list()
1946 ehea_add_multicast_entry(port, ha->addr); in ehea_set_multicast_list()
1955 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC; in xmit_common()
1960 if (skb->ip_summed == CHECKSUM_PARTIAL) in xmit_common()
1961 swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM; in xmit_common()
1963 swqe->ip_start = skb_network_offset(skb); in xmit_common()
1964 swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1; in xmit_common()
1966 switch (ip_hdr(skb)->protocol) { in xmit_common()
1968 if (skb->ip_summed == CHECKSUM_PARTIAL) in xmit_common()
1969 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM; in xmit_common()
1971 swqe->tcp_offset = swqe->ip_end + 1 + in xmit_common()
1976 if (skb->ip_summed == CHECKSUM_PARTIAL) in xmit_common()
1977 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM; in xmit_common()
1979 swqe->tcp_offset = swqe->ip_end + 1 + in xmit_common()
1988 swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT; in ehea_xmit2()
1998 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0]; in ehea_xmit3()
2002 if (!skb->data_len) in ehea_xmit3()
2003 skb_copy_from_linear_data(skb, imm_data, skb->len); in ehea_xmit3()
2005 skb_copy_bits(skb, 0, imm_data, skb->len); in ehea_xmit3()
2007 swqe->immediate_data_length = skb->len; in ehea_xmit3()
2020 pr = &port->port_res[skb_get_queue_mapping(skb)]; in ehea_start_xmit()
2023 swqe = ehea_get_swqe(pr->qp, &swqe_index); in ehea_start_xmit()
2025 atomic_dec(&pr->swqe_avail); in ehea_start_xmit()
2028 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT; in ehea_start_xmit()
2029 swqe->vlan_tag = skb_vlan_tag_get(skb); in ehea_start_xmit()
2032 pr->tx_packets++; in ehea_start_xmit()
2033 pr->tx_bytes += skb->len; in ehea_start_xmit()
2035 if (skb->len <= SWQE3_MAX_IMM) { in ehea_start_xmit()
2036 u32 sig_iv = port->sig_comp_iv; in ehea_start_xmit()
2037 u32 swqe_num = pr->swqe_id_counter; in ehea_start_xmit()
2039 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE) in ehea_start_xmit()
2041 if (pr->swqe_ll_count >= (sig_iv - 1)) { in ehea_start_xmit()
2042 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL, in ehea_start_xmit()
2044 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; in ehea_start_xmit()
2045 pr->swqe_ll_count = 0; in ehea_start_xmit()
2047 pr->swqe_ll_count += 1; in ehea_start_xmit()
2049 swqe->wr_id = in ehea_start_xmit()
2051 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter) in ehea_start_xmit()
2053 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index); in ehea_start_xmit()
2054 pr->sq_skba.arr[pr->sq_skba.index] = skb; in ehea_start_xmit()
2056 pr->sq_skba.index++; in ehea_start_xmit()
2057 pr->sq_skba.index &= (pr->sq_skba.len - 1); in ehea_start_xmit()
2059 lkey = pr->send_mr.lkey; in ehea_start_xmit()
2061 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; in ehea_start_xmit()
2063 pr->swqe_id_counter += 1; in ehea_start_xmit()
2066 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr); in ehea_start_xmit()
2072 swqe->tx_control |= EHEA_SWQE_PURGE; in ehea_start_xmit()
2075 ehea_post_swqe(pr->qp, swqe); in ehea_start_xmit()
2077 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { in ehea_start_xmit()
2078 pr->p_stats.queue_stopped++; in ehea_start_xmit()
2088 struct ehea_adapter *adapter = port->adapter; in ehea_vlan_rx_add_vid()
2097 err = -ENOMEM; in ehea_vlan_rx_add_vid()
2101 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, in ehea_vlan_rx_add_vid()
2105 err = -EINVAL; in ehea_vlan_rx_add_vid()
2110 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F))); in ehea_vlan_rx_add_vid()
2112 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, in ehea_vlan_rx_add_vid()
2116 err = -EINVAL; in ehea_vlan_rx_add_vid()
2126 struct ehea_adapter *adapter = port->adapter; in ehea_vlan_rx_kill_vid()
2135 err = -ENOMEM; in ehea_vlan_rx_kill_vid()
2139 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, in ehea_vlan_rx_kill_vid()
2143 err = -EINVAL; in ehea_vlan_rx_kill_vid()
2148 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F))); in ehea_vlan_rx_kill_vid()
2150 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, in ehea_vlan_rx_kill_vid()
2154 err = -EINVAL; in ehea_vlan_rx_kill_vid()
2163 int ret = -EIO; in ehea_activate_qp()
2171 ret = -ENOMEM; in ehea_activate_qp()
2175 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_activate_qp()
2182 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED; in ehea_activate_qp()
2183 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_activate_qp()
2191 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_activate_qp()
2198 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED; in ehea_activate_qp()
2199 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_activate_qp()
2207 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_activate_qp()
2214 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND; in ehea_activate_qp()
2215 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_activate_qp()
2223 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_activate_qp()
2242 port->qp_eq = ehea_create_eq(port->adapter, eq_type, in ehea_port_res_setup()
2244 if (!port->qp_eq) { in ehea_port_res_setup()
2245 ret = -EINVAL; in ehea_port_res_setup()
2265 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i); in ehea_port_res_setup()
2270 ret = ehea_init_port_res(port, &port->port_res[i], in ehea_port_res_setup()
2279 while (--i >= 0) in ehea_port_res_setup()
2280 ehea_clean_portres(port, &port->port_res[i]); in ehea_port_res_setup()
2283 ehea_destroy_eq(port->qp_eq); in ehea_port_res_setup()
2292 for (i = 0; i < port->num_def_qps; i++) in ehea_clean_all_portres()
2293 ret |= ehea_clean_portres(port, &port->port_res[i]); in ehea_clean_all_portres()
2295 ret |= ehea_destroy_eq(port->qp_eq); in ehea_clean_all_portres()
2302 if (adapter->active_ports) in ehea_remove_adapter_mr()
2305 ehea_rem_mr(&adapter->mr); in ehea_remove_adapter_mr()
2310 if (adapter->active_ports) in ehea_add_adapter_mr()
2313 return ehea_reg_kernel_mr(adapter, &adapter->mr); in ehea_add_adapter_mr()
2321 if (port->state == EHEA_PORT_UP) in ehea_up()
2324 ret = ehea_port_res_setup(port, port->num_def_qps); in ehea_up()
2343 for (i = 0; i < port->num_def_qps; i++) { in ehea_up()
2344 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); in ehea_up()
2351 for (i = 0; i < port->num_def_qps; i++) { in ehea_up()
2352 ret = ehea_fill_port_res(&port->port_res[i]); in ehea_up()
2361 ret = -EIO; in ehea_up()
2365 port->state = EHEA_PORT_UP; in ehea_up()
2389 for (i = 0; i < port->num_def_qps; i++) in port_napi_disable()
2390 napi_disable(&port->port_res[i].napi); in port_napi_disable()
2397 for (i = 0; i < port->num_def_qps; i++) in port_napi_enable()
2398 napi_enable(&port->port_res[i].napi); in port_napi_enable()
2406 mutex_lock(&port->port_lock); in ehea_open()
2418 mutex_unlock(&port->port_lock); in ehea_open()
2419 schedule_delayed_work(&port->stats_work, in ehea_open()
2430 if (port->state == EHEA_PORT_DOWN) in ehea_down()
2439 port->state = EHEA_PORT_DOWN; in ehea_down()
2459 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags); in ehea_stop()
2460 cancel_work_sync(&port->reset_task); in ehea_stop()
2461 cancel_delayed_work_sync(&port->stats_work); in ehea_stop()
2462 mutex_lock(&port->port_lock); in ehea_stop()
2466 mutex_unlock(&port->port_lock); in ehea_stop()
2467 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags); in ehea_stop()
2479 for (i = 0; i < init_attr->act_nr_send_wqes; i++) { in ehea_purge_sq()
2481 swqe->tx_control |= EHEA_SWQE_PURGE; in ehea_purge_sq()
2489 for (i = 0; i < port->num_def_qps; i++) { in ehea_flush_sq()
2490 struct ehea_port_res *pr = &port->port_res[i]; in ehea_flush_sq()
2491 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count; in ehea_flush_sq()
2494 ret = wait_event_timeout(port->swqe_avail_wq, in ehea_flush_sq()
2495 atomic_read(&pr->swqe_avail) >= swqe_max, in ehea_flush_sq()
2508 struct ehea_adapter *adapter = port->adapter; in ehea_stop_qps()
2510 int ret = -EIO; in ehea_stop_qps()
2519 ret = -ENOMEM; in ehea_stop_qps()
2523 for (i = 0; i < (port->num_def_qps); i++) { in ehea_stop_qps()
2524 struct ehea_port_res *pr = &port->port_res[i]; in ehea_stop_qps()
2525 struct ehea_qp *qp = pr->qp; in ehea_stop_qps()
2531 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_stop_qps()
2539 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8; in ehea_stop_qps()
2540 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED; in ehea_stop_qps()
2542 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_stop_qps()
2551 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_stop_qps()
2579 struct sk_buff **skba_rq2 = pr->rq2_skba.arr; in ehea_update_rqs()
2580 struct sk_buff **skba_rq3 = pr->rq3_skba.arr; in ehea_update_rqs()
2582 u32 lkey = pr->recv_mr.lkey; in ehea_update_rqs()
2588 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) { in ehea_update_rqs()
2590 rwqe->sg_list[0].l_key = lkey; in ehea_update_rqs()
2591 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id); in ehea_update_rqs()
2594 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); in ehea_update_rqs()
2597 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) { in ehea_update_rqs()
2599 rwqe->sg_list[0].l_key = lkey; in ehea_update_rqs()
2600 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id); in ehea_update_rqs()
2603 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); in ehea_update_rqs()
2610 struct ehea_adapter *adapter = port->adapter; in ehea_restart_qps()
2621 ret = -ENOMEM; in ehea_restart_qps()
2625 for (i = 0; i < (port->num_def_qps); i++) { in ehea_restart_qps()
2626 struct ehea_port_res *pr = &port->port_res[i]; in ehea_restart_qps()
2627 struct ehea_qp *qp = pr->qp; in ehea_restart_qps()
2638 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_restart_qps()
2646 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8; in ehea_restart_qps()
2647 cb0->qp_ctl_reg |= H_QP_CR_ENABLED; in ehea_restart_qps()
2649 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_restart_qps()
2658 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_restart_qps()
2667 ehea_refill_rq1(pr, pr->rq1_skba.index, 0); in ehea_restart_qps()
2682 struct net_device *dev = port->netdev; in ehea_reset_port()
2685 port->resets++; in ehea_reset_port()
2686 mutex_lock(&port->port_lock); in ehea_reset_port()
2705 mutex_unlock(&port->port_lock); in ehea_reset_port()
2714 pr_info("LPAR memory changed - re-initializing driver\n"); in ehea_rereg_mrs()
2717 if (adapter->active_ports) { in ehea_rereg_mrs()
2720 struct ehea_port *port = adapter->port[i]; in ehea_rereg_mrs()
2726 dev = port->netdev; in ehea_rereg_mrs()
2728 if (dev->flags & IFF_UP) { in ehea_rereg_mrs()
2729 mutex_lock(&port->port_lock); in ehea_rereg_mrs()
2734 mutex_unlock(&port->port_lock); in ehea_rereg_mrs()
2738 mutex_unlock(&port->port_lock); in ehea_rereg_mrs()
2744 ret = ehea_rem_mr(&adapter->mr); in ehea_rereg_mrs()
2746 pr_err("unregister MR failed - driver inoperable!\n"); in ehea_rereg_mrs()
2754 if (adapter->active_ports) { in ehea_rereg_mrs()
2756 ret = ehea_reg_kernel_mr(adapter, &adapter->mr); in ehea_rereg_mrs()
2758 pr_err("register MR failed - driver inoperable!\n"); in ehea_rereg_mrs()
2764 struct ehea_port *port = adapter->port[i]; in ehea_rereg_mrs()
2767 struct net_device *dev = port->netdev; in ehea_rereg_mrs()
2769 if (dev->flags & IFF_UP) { in ehea_rereg_mrs()
2770 mutex_lock(&port->port_lock); in ehea_rereg_mrs()
2779 mutex_unlock(&port->port_lock); in ehea_rereg_mrs()
2784 pr_info("re-initializing driver complete\n"); in ehea_rereg_mrs()
2806 ret = -ENOMEM; in ehea_sense_adapter_attr()
2810 hret = ehea_h_query_ehea(adapter->handle, cb); in ehea_sense_adapter_attr()
2813 ret = -EIO; in ehea_sense_adapter_attr()
2817 adapter->max_mc_mac = cb->max_mc_mac - 1; in ehea_sense_adapter_attr()
2838 ret = -ENOMEM; in ehea_get_jumboframe_status()
2841 hret = ehea_h_query_ehea_port(port->adapter->handle, in ehea_get_jumboframe_status()
2842 port->logical_port_id, in ehea_get_jumboframe_status()
2846 if (cb4->jumbo_frame) in ehea_get_jumboframe_status()
2849 cb4->jumbo_frame = 1; in ehea_get_jumboframe_status()
2850 hret = ehea_h_modify_ehea_port(port->adapter-> in ehea_get_jumboframe_status()
2852 port-> in ehea_get_jumboframe_status()
2861 ret = -EINVAL; in ehea_get_jumboframe_status()
2873 return sprintf(buf, "%d", port->logical_port_id); in ehea_show_port_id()
2881 of_node_put(port->ofdev.dev.of_node); in logical_port_release()
2889 port->ofdev.dev.of_node = of_node_get(dn); in ehea_register_port()
2890 port->ofdev.dev.parent = &port->adapter->ofdev->dev; in ehea_register_port()
2891 port->ofdev.dev.bus = &ibmebus_bus_type; in ehea_register_port()
2893 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++); in ehea_register_port()
2894 port->ofdev.dev.release = logical_port_release; in ehea_register_port()
2896 ret = of_device_register(&port->ofdev); in ehea_register_port()
2902 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id); in ehea_register_port()
2908 return &port->ofdev.dev; in ehea_register_port()
2911 of_device_unregister(&port->ofdev); in ehea_register_port()
2918 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id); in ehea_unregister_port()
2919 of_device_unregister(&port->ofdev); in ehea_unregister_port()
2949 ret = -ENOMEM; in ehea_setup_single_port()
2955 mutex_init(&port->port_lock); in ehea_setup_single_port()
2956 port->state = EHEA_PORT_DOWN; in ehea_setup_single_port()
2957 port->sig_comp_iv = sq_entries / 10; in ehea_setup_single_port()
2959 port->adapter = adapter; in ehea_setup_single_port()
2960 port->netdev = dev; in ehea_setup_single_port()
2961 port->logical_port_id = logical_port_id; in ehea_setup_single_port()
2963 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT); in ehea_setup_single_port()
2965 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL); in ehea_setup_single_port()
2966 if (!port->mc_list) { in ehea_setup_single_port()
2967 ret = -ENOMEM; in ehea_setup_single_port()
2971 INIT_LIST_HEAD(&port->mc_list->list); in ehea_setup_single_port()
2977 netif_set_real_num_rx_queues(dev, port->num_def_qps); in ehea_setup_single_port()
2978 netif_set_real_num_tx_queues(dev, port->num_def_qps); in ehea_setup_single_port()
2987 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN); in ehea_setup_single_port()
2989 dev->netdev_ops = &ehea_netdev_ops; in ehea_setup_single_port()
2992 dev->hw_features = NETIF_F_SG | NETIF_F_TSO | in ehea_setup_single_port()
2994 dev->features = NETIF_F_SG | NETIF_F_TSO | in ehea_setup_single_port()
2998 dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA | in ehea_setup_single_port()
3000 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; in ehea_setup_single_port()
3002 /* MTU range: 68 - 9022 */ in ehea_setup_single_port()
3003 dev->min_mtu = ETH_MIN_MTU; in ehea_setup_single_port()
3004 dev->max_mtu = EHEA_MAX_PACKET_SIZE; in ehea_setup_single_port()
3006 INIT_WORK(&port->reset_task, ehea_reset_port); in ehea_setup_single_port()
3007 INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats); in ehea_setup_single_port()
3009 init_waitqueue_head(&port->swqe_avail_wq); in ehea_setup_single_port()
3010 init_waitqueue_head(&port->restart_wq); in ehea_setup_single_port()
3025 adapter->active_ports++; in ehea_setup_single_port()
3033 kfree(port->mc_list); in ehea_setup_single_port()
3046 struct ehea_adapter *adapter = port->adapter; in ehea_shutdown_single_port()
3048 cancel_work_sync(&port->reset_task); in ehea_shutdown_single_port()
3049 cancel_delayed_work_sync(&port->stats_work); in ehea_shutdown_single_port()
3050 unregister_netdev(port->netdev); in ehea_shutdown_single_port()
3052 kfree(port->mc_list); in ehea_shutdown_single_port()
3053 free_netdev(port->netdev); in ehea_shutdown_single_port()
3054 adapter->active_ports--; in ehea_shutdown_single_port()
3065 lhea_dn = adapter->ofdev->dev.of_node; in ehea_setup_ports()
3068 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", in ehea_setup_ports()
3078 return -EIO; in ehea_setup_ports()
3081 adapter->port[i] = ehea_setup_single_port(adapter, in ehea_setup_ports()
3084 if (adapter->port[i]) in ehea_setup_ports()
3085 netdev_info(adapter->port[i]->netdev, in ehea_setup_ports()
3102 lhea_dn = adapter->ofdev->dev.of_node; in ehea_get_eth_dn()
3105 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", in ehea_get_eth_dn()
3131 netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n", in ehea_probe_port()
3133 return -EINVAL; in ehea_probe_port()
3140 return -EINVAL; in ehea_probe_port()
3146 return -EIO; in ehea_probe_port()
3155 if (!adapter->port[i]) { in ehea_probe_port()
3156 adapter->port[i] = port; in ehea_probe_port()
3160 netdev_info(port->netdev, "added: (logical port id=%d)\n", in ehea_probe_port()
3164 return -EIO; in ehea_probe_port()
3184 netdev_info(port->netdev, "removed: (logical port id=%d)\n", in ehea_remove_port()
3190 if (adapter->port[i] == port) { in ehea_remove_port()
3191 adapter->port[i] = NULL; in ehea_remove_port()
3197 return -EINVAL; in ehea_remove_port()
3210 int ret = device_create_file(&dev->dev, &dev_attr_probe_port); in ehea_create_device_sysfs()
3214 ret = device_create_file(&dev->dev, &dev_attr_remove_port); in ehea_create_device_sysfs()
3221 device_remove_file(&dev->dev, &dev_attr_probe_port); in ehea_remove_device_sysfs()
3222 device_remove_file(&dev->dev, &dev_attr_remove_port); in ehea_remove_device_sysfs()
3250 fallthrough; /* re-add canceled memory block */ in ehea_mem_notifier()
3255 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) in ehea_mem_notifier()
3263 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) in ehea_mem_notifier()
3371 if (!dev || !dev->dev.of_node) { in ehea_probe_adapter()
3373 return -EINVAL; in ehea_probe_adapter()
3376 adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL); in ehea_probe_adapter()
3378 ret = -ENOMEM; in ehea_probe_adapter()
3379 dev_err(&dev->dev, "no mem for ehea_adapter\n"); in ehea_probe_adapter()
3383 list_add(&adapter->list, &adapter_list); in ehea_probe_adapter()
3385 adapter->ofdev = dev; in ehea_probe_adapter()
3387 adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle", in ehea_probe_adapter()
3390 adapter->handle = *adapter_handle; in ehea_probe_adapter()
3392 if (!adapter->handle) { in ehea_probe_adapter()
3393 dev_err(&dev->dev, "failed getting handle for adapter" in ehea_probe_adapter()
3394 " '%pOF'\n", dev->dev.of_node); in ehea_probe_adapter()
3395 ret = -ENODEV; in ehea_probe_adapter()
3399 adapter->pd = EHEA_PD_ID; in ehea_probe_adapter()
3408 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret); in ehea_probe_adapter()
3412 adapter->neq = ehea_create_eq(adapter, in ehea_probe_adapter()
3414 if (!adapter->neq) { in ehea_probe_adapter()
3415 ret = -EIO; in ehea_probe_adapter()
3416 dev_err(&dev->dev, "NEQ creation failed\n"); in ehea_probe_adapter()
3420 tasklet_setup(&adapter->neq_tasklet, ehea_neq_tasklet); in ehea_probe_adapter()
3428 dev_err(&dev->dev, "setup_ports failed\n"); in ehea_probe_adapter()
3432 ret = ibmebus_request_irq(adapter->neq->attr.ist1, in ehea_probe_adapter()
3436 dev_err(&dev->dev, "requesting NEQ IRQ failed\n"); in ehea_probe_adapter()
3441 tasklet_hi_schedule(&adapter->neq_tasklet); in ehea_probe_adapter()
3448 if (adapter->port[i]) { in ehea_probe_adapter()
3449 ehea_shutdown_single_port(adapter->port[i]); in ehea_probe_adapter()
3450 adapter->port[i] = NULL; in ehea_probe_adapter()
3457 ehea_destroy_eq(adapter->neq); in ehea_probe_adapter()
3460 list_del(&adapter->list); in ehea_probe_adapter()
3474 if (adapter->port[i]) { in ehea_remove()
3475 ehea_shutdown_single_port(adapter->port[i]); in ehea_remove()
3476 adapter->port[i] = NULL; in ehea_remove()
3481 ibmebus_free_irq(adapter->neq->attr.ist1, adapter); in ehea_remove()
3482 tasklet_kill(&adapter->neq_tasklet); in ehea_remove()
3484 ehea_destroy_eq(adapter->neq); in ehea_remove()
3486 list_del(&adapter->list); in ehea_remove()
3500 ret = -EINVAL; in check_module_parm()
3505 ret = -EINVAL; in check_module_parm()
3510 ret = -EINVAL; in check_module_parm()
3515 ret = -EINVAL; in check_module_parm()