Lines Matching +full:supports +full:- +full:cqe

3  * Copyright (c) 2007-2013 Broadcom Corporation
47 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll); in bnx2x_add_all_napi_cnic()
57 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll); in bnx2x_add_all_napi()
74 * bnx2x_move_fp - move content of the fastpath structure.
80 * Makes sure the contents of the bp->fp[to].napi is kept
88 struct bnx2x_fastpath *from_fp = &bp->fp[from]; in bnx2x_move_fp()
89 struct bnx2x_fastpath *to_fp = &bp->fp[to]; in bnx2x_move_fp()
90 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from]; in bnx2x_move_fp()
91 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to]; in bnx2x_move_fp()
92 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from]; in bnx2x_move_fp()
93 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; in bnx2x_move_fp()
96 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info; in bnx2x_move_fp()
99 from_fp->napi = to_fp->napi; in bnx2x_move_fp()
103 to_fp->index = to; in bnx2x_move_fp()
108 to_fp->tpa_info = old_tpa_info; in bnx2x_move_fp()
121 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos; in bnx2x_move_fp()
122 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) * in bnx2x_move_fp()
123 (bp)->max_cos; in bnx2x_move_fp()
129 memcpy(&bp->bnx2x_txq[new_txdata_index], in bnx2x_move_fp()
130 &bp->bnx2x_txq[old_txdata_index], in bnx2x_move_fp()
132 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index]; in bnx2x_move_fp()
136 * bnx2x_fill_fw_str - Fill buffer with FW version string.
149 bnx2x_get_ext_phy_fw_version(&bp->link_params, in bnx2x_fill_fw_str()
151 strscpy(buf, bp->fw_ver, buf_len); in bnx2x_fill_fw_str()
152 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver), in bnx2x_fill_fw_str()
154 (bp->common.bc_ver & 0xff0000) >> 16, in bnx2x_fill_fw_str()
155 (bp->common.bc_ver & 0xff00) >> 8, in bnx2x_fill_fw_str()
156 (bp->common.bc_ver & 0xff), in bnx2x_fill_fw_str()
164 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
173 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer in bnx2x_shrink_eth_fp()
176 for (cos = 1; cos < bp->max_cos; cos++) { in bnx2x_shrink_eth_fp()
177 for (i = 0; i < old_eth_num - delta; i++) { in bnx2x_shrink_eth_fp()
178 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_shrink_eth_fp()
179 int new_idx = cos * (old_eth_num - delta) + i; in bnx2x_shrink_eth_fp()
181 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos], in bnx2x_shrink_eth_fp()
183 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx]; in bnx2x_shrink_eth_fp()
188 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
197 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx]; in bnx2x_free_tx_pkt()
200 struct sk_buff *skb = tx_buf->skb; in bnx2x_free_tx_pkt()
201 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; in bnx2x_free_tx_pkt()
206 prefetch(&skb->end); in bnx2x_free_tx_pkt()
208 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", in bnx2x_free_tx_pkt()
209 txdata->txq_index, idx, tx_buf, skb); in bnx2x_free_tx_pkt()
211 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; in bnx2x_free_tx_pkt()
213 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; in bnx2x_free_tx_pkt()
215 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { in bnx2x_free_tx_pkt()
220 new_cons = nbd + tx_buf->first_bd; in bnx2x_free_tx_pkt()
226 --nbd; in bnx2x_free_tx_pkt()
229 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) { in bnx2x_free_tx_pkt()
231 --nbd; in bnx2x_free_tx_pkt()
236 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { in bnx2x_free_tx_pkt()
237 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; in bnx2x_free_tx_pkt()
239 --nbd; in bnx2x_free_tx_pkt()
244 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), in bnx2x_free_tx_pkt()
251 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; in bnx2x_free_tx_pkt()
252 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), in bnx2x_free_tx_pkt()
254 if (--nbd) in bnx2x_free_tx_pkt()
262 (*bytes_compl) += skb->len; in bnx2x_free_tx_pkt()
266 tx_buf->first_bd = 0; in bnx2x_free_tx_pkt()
267 tx_buf->skb = NULL; in bnx2x_free_tx_pkt()
275 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons; in bnx2x_tx_int()
279 if (unlikely(bp->panic)) in bnx2x_tx_int()
280 return -1; in bnx2x_tx_int()
283 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); in bnx2x_tx_int()
284 hw_cons = le16_to_cpu(*txdata->tx_cons_sb); in bnx2x_tx_int()
285 sw_cons = txdata->tx_pkt_cons; in bnx2x_tx_int()
297 txdata->txq_index, hw_cons, sw_cons, pkt_cons); in bnx2x_tx_int()
307 txdata->tx_pkt_cons = sw_cons; in bnx2x_tx_int()
308 txdata->tx_bd_cons = bd_cons; in bnx2x_tx_int()
322 /* Taking tx_lock() is needed to prevent re-enabling the queue in bnx2x_tx_int()
327 * stops the queue->sees fresh tx_bd_cons->releases the queue-> in bnx2x_tx_int()
328 * sends some packets consuming the whole queue again-> in bnx2x_tx_int()
335 (bp->state == BNX2X_STATE_OPEN) && in bnx2x_tx_int()
347 u16 last_max = fp->last_max_sge; in bnx2x_update_last_max_sge()
350 fp->last_max_sge = idx; in bnx2x_update_last_max_sge()
355 struct eth_end_agg_rx_cqe *cqe) in bnx2x_update_sge_prod() argument
357 struct bnx2x *bp = fp->bp; in bnx2x_update_sge_prod()
367 BIT_VEC64_CLEAR_BIT(fp->sge_mask, in bnx2x_update_sge_prod()
368 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i]))); in bnx2x_update_sge_prod()
370 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", in bnx2x_update_sge_prod()
371 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); in bnx2x_update_sge_prod()
374 prefetch((void *)(fp->sge_mask)); in bnx2x_update_sge_prod()
376 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); in bnx2x_update_sge_prod()
378 last_max = RX_SGE(fp->last_max_sge); in bnx2x_update_sge_prod()
380 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; in bnx2x_update_sge_prod()
388 if (likely(fp->sge_mask[i])) in bnx2x_update_sge_prod()
391 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; in bnx2x_update_sge_prod()
396 fp->rx_sge_prod += delta; in bnx2x_update_sge_prod()
397 /* clear page-end entries */ in bnx2x_update_sge_prod()
402 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n", in bnx2x_update_sge_prod()
403 fp->last_max_sge, fp->rx_sge_prod); in bnx2x_update_sge_prod()
407 * CQE (calculated by HW).
410 const struct eth_fast_path_rx_cqe *cqe, in bnx2x_get_rxhash() argument
413 /* Get Toeplitz hash from CQE */ in bnx2x_get_rxhash()
414 if ((bp->dev->features & NETIF_F_RXHASH) && in bnx2x_get_rxhash()
415 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) { in bnx2x_get_rxhash()
418 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE; in bnx2x_get_rxhash()
423 return le32_to_cpu(cqe->rss_hash_result); in bnx2x_get_rxhash()
431 struct eth_fast_path_rx_cqe *cqe) in bnx2x_tpa_start() argument
433 struct bnx2x *bp = fp->bp; in bnx2x_tpa_start()
434 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; in bnx2x_tpa_start()
435 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; in bnx2x_tpa_start()
436 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; in bnx2x_tpa_start()
438 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; in bnx2x_tpa_start()
439 struct sw_rx_bd *first_buf = &tpa_info->first_buf; in bnx2x_tpa_start()
442 if (tpa_info->tpa_state != BNX2X_TPA_STOP) in bnx2x_tpa_start()
446 mapping = dma_map_single(&bp->pdev->dev, in bnx2x_tpa_start()
447 first_buf->data + NET_SKB_PAD, in bnx2x_tpa_start()
448 fp->rx_buf_size, DMA_FROM_DEVICE); in bnx2x_tpa_start()
450 * ...if it fails - move the skb from the consumer to the producer in bnx2x_tpa_start()
455 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_tpa_start()
458 tpa_info->tpa_state = BNX2X_TPA_ERROR; in bnx2x_tpa_start()
463 prod_rx_buf->data = first_buf->data; in bnx2x_tpa_start()
466 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); in bnx2x_tpa_start()
467 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); in bnx2x_tpa_start()
473 tpa_info->parsing_flags = in bnx2x_tpa_start()
474 le16_to_cpu(cqe->pars_flags.flags); in bnx2x_tpa_start()
475 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); in bnx2x_tpa_start()
476 tpa_info->tpa_state = BNX2X_TPA_START; in bnx2x_tpa_start()
477 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); in bnx2x_tpa_start()
478 tpa_info->placement_offset = cqe->placement_offset; in bnx2x_tpa_start()
479 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type); in bnx2x_tpa_start()
480 if (fp->mode == TPA_MODE_GRO) { in bnx2x_tpa_start()
481 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len); in bnx2x_tpa_start()
482 tpa_info->full_page = SGE_PAGES / gro_size * gro_size; in bnx2x_tpa_start()
483 tpa_info->gro_size = gro_size; in bnx2x_tpa_start()
487 fp->tpa_queue_used |= (1 << queue); in bnx2x_tpa_start()
488 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", in bnx2x_tpa_start()
489 fp->tpa_queue_used); in bnx2x_tpa_start()
499 * bnx2x_set_gro_params - compute GRO values
502 * @parsing_flags: parsing flags from the START CQE
524 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in bnx2x_set_gro_params()
527 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in bnx2x_set_gro_params()
538 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len; in bnx2x_set_gro_params()
540 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count in bnx2x_set_gro_params()
541 * to skb_shinfo(skb)->gso_segs in bnx2x_set_gro_params()
543 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs; in bnx2x_set_gro_params()
549 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; in bnx2x_alloc_rx_sge()
550 struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; in bnx2x_alloc_rx_sge()
551 struct bnx2x_alloc_pool *pool = &fp->page_pool; in bnx2x_alloc_rx_sge()
554 if (!pool->page) { in bnx2x_alloc_rx_sge()
555 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT); in bnx2x_alloc_rx_sge()
556 if (unlikely(!pool->page)) in bnx2x_alloc_rx_sge()
557 return -ENOMEM; in bnx2x_alloc_rx_sge()
559 pool->offset = 0; in bnx2x_alloc_rx_sge()
562 mapping = dma_map_page(&bp->pdev->dev, pool->page, in bnx2x_alloc_rx_sge()
563 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE); in bnx2x_alloc_rx_sge()
564 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_alloc_rx_sge()
566 return -ENOMEM; in bnx2x_alloc_rx_sge()
569 sw_buf->page = pool->page; in bnx2x_alloc_rx_sge()
570 sw_buf->offset = pool->offset; in bnx2x_alloc_rx_sge()
574 sge->addr_hi = cpu_to_le32(U64_HI(mapping)); in bnx2x_alloc_rx_sge()
575 sge->addr_lo = cpu_to_le32(U64_LO(mapping)); in bnx2x_alloc_rx_sge()
577 pool->offset += SGE_PAGE_SIZE; in bnx2x_alloc_rx_sge()
578 if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE) in bnx2x_alloc_rx_sge()
579 get_page(pool->page); in bnx2x_alloc_rx_sge()
581 pool->page = NULL; in bnx2x_alloc_rx_sge()
589 struct eth_end_agg_rx_cqe *cqe, in bnx2x_fill_frag_skb() argument
595 u16 len_on_bd = tpa_info->len_on_bd; in bnx2x_fill_frag_skb()
598 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd; in bnx2x_fill_frag_skb()
600 if (fp->mode == TPA_MODE_GRO) { in bnx2x_fill_frag_skb()
601 gro_size = tpa_info->gro_size; in bnx2x_fill_frag_skb()
602 full_page = tpa_info->full_page; in bnx2x_fill_frag_skb()
607 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd, in bnx2x_fill_frag_skb()
608 le16_to_cpu(cqe->pkt_len), in bnx2x_fill_frag_skb()
609 le16_to_cpu(cqe->num_of_coalesced_segs)); in bnx2x_fill_frag_skb()
613 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", in bnx2x_fill_frag_skb()
615 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len); in bnx2x_fill_frag_skb()
617 return -EINVAL; in bnx2x_fill_frag_skb()
623 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j])); in bnx2x_fill_frag_skb()
627 if (fp->mode == TPA_MODE_GRO) in bnx2x_fill_frag_skb()
632 rx_pg = &fp->rx_page_ring[sge_idx]; in bnx2x_fill_frag_skb()
639 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_fill_frag_skb()
643 dma_unmap_page(&bp->pdev->dev, in bnx2x_fill_frag_skb()
647 if (fp->mode == TPA_MODE_LRO) in bnx2x_fill_frag_skb()
653 for (rem = frag_len; rem > 0; rem -= gro_size) { in bnx2x_fill_frag_skb()
665 skb->data_len += frag_len; in bnx2x_fill_frag_skb()
666 skb->truesize += SGE_PAGES; in bnx2x_fill_frag_skb()
667 skb->len += frag_len; in bnx2x_fill_frag_skb()
669 frag_size -= frag_len; in bnx2x_fill_frag_skb()
680 if (fp->rx_frag_size) in bnx2x_build_skb()
681 skb = build_skb(data, fp->rx_frag_size); in bnx2x_build_skb()
689 if (fp->rx_frag_size) in bnx2x_frag_free()
697 if (fp->rx_frag_size) { in bnx2x_frag_alloc()
702 return napi_alloc_frag(fp->rx_frag_size); in bnx2x_frag_alloc()
705 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask); in bnx2x_frag_alloc()
717 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), in bnx2x_gro_ip_csum()
718 iph->saddr, iph->daddr, 0); in bnx2x_gro_ip_csum()
729 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), in bnx2x_gro_ipv6_csum()
730 &iph->saddr, &iph->daddr, 0); in bnx2x_gro_ipv6_csum()
746 if (skb_shinfo(skb)->gso_size) { in bnx2x_gro_receive()
747 switch (be16_to_cpu(skb->protocol)) { in bnx2x_gro_receive()
755 netdev_WARN_ONCE(bp->dev, in bnx2x_gro_receive()
756 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", in bnx2x_gro_receive()
757 be16_to_cpu(skb->protocol)); in bnx2x_gro_receive()
761 skb_record_rx_queue(skb, fp->rx_queue); in bnx2x_gro_receive()
762 napi_gro_receive(&fp->napi, skb); in bnx2x_gro_receive()
768 struct eth_end_agg_rx_cqe *cqe, in bnx2x_tpa_stop() argument
771 struct sw_rx_bd *rx_buf = &tpa_info->first_buf; in bnx2x_tpa_stop()
772 u8 pad = tpa_info->placement_offset; in bnx2x_tpa_stop()
773 u16 len = tpa_info->len_on_bd; in bnx2x_tpa_stop()
775 u8 *new_data, *data = rx_buf->data; in bnx2x_tpa_stop()
776 u8 old_tpa_state = tpa_info->tpa_state; in bnx2x_tpa_stop()
778 tpa_info->tpa_state = BNX2X_TPA_STOP; in bnx2x_tpa_stop()
780 /* If we there was an error during the handling of the TPA_START - in bnx2x_tpa_stop()
791 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), in bnx2x_tpa_stop()
792 fp->rx_buf_size, DMA_FROM_DEVICE); in bnx2x_tpa_stop()
798 if (pad + len > fp->rx_buf_size) { in bnx2x_tpa_stop()
800 pad, len, fp->rx_buf_size); in bnx2x_tpa_stop()
809 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type); in bnx2x_tpa_stop()
811 skb->protocol = eth_type_trans(skb, bp->dev); in bnx2x_tpa_stop()
812 skb->ip_summed = CHECKSUM_UNNECESSARY; in bnx2x_tpa_stop()
815 skb, cqe, cqe_idx)) { in bnx2x_tpa_stop()
816 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) in bnx2x_tpa_stop()
817 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag); in bnx2x_tpa_stop()
821 "Failed to allocate new pages - dropping packet!\n"); in bnx2x_tpa_stop()
826 rx_buf->data = new_data; in bnx2x_tpa_stop()
835 "Failed to allocate or map a new skb - dropping packet!\n"); in bnx2x_tpa_stop()
836 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++; in bnx2x_tpa_stop()
843 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; in bnx2x_alloc_rx_data()
844 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; in bnx2x_alloc_rx_data()
849 return -ENOMEM; in bnx2x_alloc_rx_data()
851 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, in bnx2x_alloc_rx_data()
852 fp->rx_buf_size, in bnx2x_alloc_rx_data()
854 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_alloc_rx_data()
857 return -ENOMEM; in bnx2x_alloc_rx_data()
860 rx_buf->data = data; in bnx2x_alloc_rx_data()
863 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); in bnx2x_alloc_rx_data()
864 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); in bnx2x_alloc_rx_data()
870 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, in bnx2x_csum_validate() argument
879 if (cqe->fast_path_cqe.status_flags & in bnx2x_csum_validate()
885 if (cqe->fast_path_cqe.type_error_flags & in bnx2x_csum_validate()
888 qstats->hw_csum_err++; in bnx2x_csum_validate()
890 skb->ip_summed = CHECKSUM_UNNECESSARY; in bnx2x_csum_validate()
895 struct bnx2x *bp = fp->bp; in bnx2x_rx_int()
899 union eth_rx_cqe *cqe; in bnx2x_rx_int() local
903 if (unlikely(bp->panic)) in bnx2x_rx_int()
909 bd_cons = fp->rx_bd_cons; in bnx2x_rx_int()
910 bd_prod = fp->rx_bd_prod; in bnx2x_rx_int()
912 sw_comp_cons = fp->rx_comp_cons; in bnx2x_rx_int()
913 sw_comp_prod = fp->rx_comp_prod; in bnx2x_rx_int()
916 cqe = &fp->rx_comp_ring[comp_ring_cons]; in bnx2x_rx_int()
917 cqe_fp = &cqe->fast_path_cqe; in bnx2x_rx_int()
920 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons); in bnx2x_rx_int()
933 if (unlikely(bp->panic)) in bnx2x_rx_int()
940 /* A rmb() is required to ensure that the CQE is not read in bnx2x_rx_int()
945 * stale data. Without the barrier TPA state-machine might in bnx2x_rx_int()
947 * provided with incorrect packet description - these lead in bnx2x_rx_int()
952 cqe_fp_flags = cqe_fp->type_error_flags; in bnx2x_rx_int()
956 "CQE type %x err %x status %x queue %x vlan %x len %u\n", in bnx2x_rx_int()
958 cqe_fp_flags, cqe_fp->status_flags, in bnx2x_rx_int()
959 le32_to_cpu(cqe_fp->rss_hash_result), in bnx2x_rx_int()
960 le16_to_cpu(cqe_fp->vlan_tag), in bnx2x_rx_int()
961 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len)); in bnx2x_rx_int()
965 bnx2x_sp_event(fp, cqe); in bnx2x_rx_int()
969 rx_buf = &fp->rx_buf_ring[bd_cons]; in bnx2x_rx_int()
970 data = rx_buf->data; in bnx2x_rx_int()
977 if (fp->mode == TPA_MODE_DISABLED && in bnx2x_rx_int()
985 u16 queue = cqe_fp->queue_index; in bnx2x_rx_int()
996 queue = cqe->end_agg_cqe.queue_index; in bnx2x_rx_int()
997 tpa_info = &fp->tpa_info[queue]; in bnx2x_rx_int()
1002 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) - in bnx2x_rx_int()
1003 tpa_info->len_on_bd; in bnx2x_rx_int()
1005 if (fp->mode == TPA_MODE_GRO) in bnx2x_rx_int()
1006 pages = (frag_size + tpa_info->full_page - 1) / in bnx2x_rx_int()
1007 tpa_info->full_page; in bnx2x_rx_int()
1013 &cqe->end_agg_cqe, comp_ring_cons); in bnx2x_rx_int()
1015 if (bp->panic) in bnx2x_rx_int()
1019 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe); in bnx2x_rx_int()
1023 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len); in bnx2x_rx_int()
1024 pad = cqe_fp->placement_offset; in bnx2x_rx_int()
1025 dma_sync_single_for_cpu(&bp->pdev->dev, in bnx2x_rx_int()
1036 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++; in bnx2x_rx_int()
1043 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && in bnx2x_rx_int()
1045 skb = napi_alloc_skb(&fp->napi, len); in bnx2x_rx_int()
1049 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_rx_int()
1052 memcpy(skb->data, data + pad, len); in bnx2x_rx_int()
1057 dma_unmap_single(&bp->pdev->dev, in bnx2x_rx_int()
1059 fp->rx_buf_size, in bnx2x_rx_int()
1064 bnx2x_fp_qstats(bp, fp)-> in bnx2x_rx_int()
1072 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_rx_int()
1080 skb->protocol = eth_type_trans(skb, bp->dev); in bnx2x_rx_int()
1082 /* Set Toeplitz hash for a none-LRO skb */ in bnx2x_rx_int()
1088 if (bp->dev->features & NETIF_F_RXCSUM) in bnx2x_rx_int()
1089 bnx2x_csum_validate(skb, cqe, fp, in bnx2x_rx_int()
1092 skb_record_rx_queue(skb, fp->rx_queue); in bnx2x_rx_int()
1095 if (unlikely(cqe->fast_path_cqe.type_error_flags & in bnx2x_rx_int()
1099 if (le16_to_cpu(cqe_fp->pars_flags.flags) & in bnx2x_rx_int()
1102 le16_to_cpu(cqe_fp->vlan_tag)); in bnx2x_rx_int()
1104 napi_gro_receive(&fp->napi, skb); in bnx2x_rx_int()
1106 rx_buf->data = NULL; in bnx2x_rx_int()
1116 /* mark CQE as free */ in bnx2x_rx_int()
1123 cqe = &fp->rx_comp_ring[comp_ring_cons]; in bnx2x_rx_int()
1124 cqe_fp = &cqe->fast_path_cqe; in bnx2x_rx_int()
1127 fp->rx_bd_cons = bd_cons; in bnx2x_rx_int()
1128 fp->rx_bd_prod = bd_prod_fw; in bnx2x_rx_int()
1129 fp->rx_comp_cons = sw_comp_cons; in bnx2x_rx_int()
1130 fp->rx_comp_prod = sw_comp_prod; in bnx2x_rx_int()
1134 fp->rx_sge_prod); in bnx2x_rx_int()
1142 struct bnx2x *bp = fp->bp; in bnx2x_msix_fp_int()
1146 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n", in bnx2x_msix_fp_int()
1147 fp->index, fp->fw_sb_id, fp->igu_sb_id); in bnx2x_msix_fp_int()
1149 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); in bnx2x_msix_fp_int()
1152 if (unlikely(bp->panic)) in bnx2x_msix_fp_int()
1156 /* Handle Rx and Tx according to MSI-X vector */ in bnx2x_msix_fp_int()
1158 prefetch(fp->txdata_ptr[cos]->tx_cons_sb); in bnx2x_msix_fp_int()
1160 prefetch(&fp->sb_running_index[SM_RX_ID]); in bnx2x_msix_fp_int()
1161 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); in bnx2x_msix_fp_int()
1169 mutex_lock(&bp->port.phy_mutex); in bnx2x_acquire_phy_lock()
1178 mutex_unlock(&bp->port.phy_mutex); in bnx2x_release_phy_lock()
1184 u16 line_speed = bp->link_vars.line_speed; in bnx2x_get_mf_speed()
1187 bp->mf_config[BP_VN(bp)]); in bnx2x_get_mf_speed()
1206 * bnx2x_fill_report_data - fill link report data to report
1211 * It uses a none-atomic bit operations because is called under the mutex.
1220 data->line_speed = bnx2x_get_mf_speed(bp); in bnx2x_fill_report_data()
1223 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS)) in bnx2x_fill_report_data()
1225 &data->link_report_flags); in bnx2x_fill_report_data()
1229 &data->link_report_flags); in bnx2x_fill_report_data()
1232 if (bp->link_vars.duplex == DUPLEX_FULL) in bnx2x_fill_report_data()
1234 &data->link_report_flags); in bnx2x_fill_report_data()
1237 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) in bnx2x_fill_report_data()
1239 &data->link_report_flags); in bnx2x_fill_report_data()
1242 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) in bnx2x_fill_report_data()
1244 &data->link_report_flags); in bnx2x_fill_report_data()
1246 *data = bp->vf_link_vars; in bnx2x_fill_report_data()
1251 * bnx2x_link_report - report link status to OS.
1268 * __bnx2x_link_report - report link status to OS.
1279 if (bp->force_link_down) { in __bnx2x_link_report()
1280 bp->link_vars.link_up = 0; in __bnx2x_link_report()
1292 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) || in __bnx2x_link_report()
1294 &bp->last_reported_link.link_report_flags) && in __bnx2x_link_report()
1299 bp->link_cnt++; in __bnx2x_link_report()
1301 /* We are going to report a new link parameters now - in __bnx2x_link_report()
1304 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data)); in __bnx2x_link_report()
1312 netif_carrier_off(bp->dev); in __bnx2x_link_report()
1313 netdev_err(bp->dev, "NIC Link is Down\n"); in __bnx2x_link_report()
1319 netif_carrier_on(bp->dev); in __bnx2x_link_report()
1336 flow = "ON - receive & transmit"; in __bnx2x_link_report()
1338 flow = "ON - receive"; in __bnx2x_link_report()
1340 flow = "ON - transmit"; in __bnx2x_link_report()
1345 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", in __bnx2x_link_report()
1357 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; in bnx2x_set_next_page_sgl()
1358 sge->addr_hi = in bnx2x_set_next_page_sgl()
1359 cpu_to_le32(U64_HI(fp->rx_sge_mapping + in bnx2x_set_next_page_sgl()
1362 sge->addr_lo = in bnx2x_set_next_page_sgl()
1363 cpu_to_le32(U64_LO(fp->rx_sge_mapping + in bnx2x_set_next_page_sgl()
1374 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; in bnx2x_free_tpa_pool()
1375 struct sw_rx_bd *first_buf = &tpa_info->first_buf; in bnx2x_free_tpa_pool()
1376 u8 *data = first_buf->data; in bnx2x_free_tpa_pool()
1382 if (tpa_info->tpa_state == BNX2X_TPA_START) in bnx2x_free_tpa_pool()
1383 dma_unmap_single(&bp->pdev->dev, in bnx2x_free_tpa_pool()
1385 fp->rx_buf_size, DMA_FROM_DEVICE); in bnx2x_free_tpa_pool()
1387 first_buf->data = NULL; in bnx2x_free_tpa_pool()
1396 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings_cnic()
1398 fp->rx_bd_cons = 0; in bnx2x_init_rx_rings_cnic()
1405 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, in bnx2x_init_rx_rings_cnic()
1406 fp->rx_sge_prod); in bnx2x_init_rx_rings_cnic()
1418 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings()
1421 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); in bnx2x_init_rx_rings()
1423 if (fp->mode != TPA_MODE_DISABLED) { in bnx2x_init_rx_rings()
1424 /* Fill the per-aggregation pool */ in bnx2x_init_rx_rings()
1427 &fp->tpa_info[i]; in bnx2x_init_rx_rings()
1429 &tpa_info->first_buf; in bnx2x_init_rx_rings()
1431 first_buf->data = in bnx2x_init_rx_rings()
1433 if (!first_buf->data) { in bnx2x_init_rx_rings()
1434 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n", in bnx2x_init_rx_rings()
1437 fp->mode = TPA_MODE_DISABLED; in bnx2x_init_rx_rings()
1441 tpa_info->tpa_state = BNX2X_TPA_STOP; in bnx2x_init_rx_rings()
1465 fp->mode = TPA_MODE_DISABLED; in bnx2x_init_rx_rings()
1472 fp->rx_sge_prod = ring_prod; in bnx2x_init_rx_rings()
1477 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings()
1479 fp->rx_bd_cons = 0; in bnx2x_init_rx_rings()
1486 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, in bnx2x_init_rx_rings()
1487 fp->rx_sge_prod); in bnx2x_init_rx_rings()
1495 U64_LO(fp->rx_comp_mapping)); in bnx2x_init_rx_rings()
1498 U64_HI(fp->rx_comp_mapping)); in bnx2x_init_rx_rings()
1506 struct bnx2x *bp = fp->bp; in bnx2x_free_tx_skbs_queue()
1509 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; in bnx2x_free_tx_skbs_queue()
1512 u16 sw_prod = txdata->tx_pkt_prod; in bnx2x_free_tx_skbs_queue()
1513 u16 sw_cons = txdata->tx_pkt_cons; in bnx2x_free_tx_skbs_queue()
1522 netdev_get_tx_queue(bp->dev, in bnx2x_free_tx_skbs_queue()
1523 txdata->txq_index)); in bnx2x_free_tx_skbs_queue()
1532 bnx2x_free_tx_skbs_queue(&bp->fp[i]); in bnx2x_free_tx_skbs_cnic()
1541 bnx2x_free_tx_skbs_queue(&bp->fp[i]); in bnx2x_free_tx_skbs()
1547 struct bnx2x *bp = fp->bp; in bnx2x_free_rx_bds()
1551 if (fp->rx_buf_ring == NULL) in bnx2x_free_rx_bds()
1555 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; in bnx2x_free_rx_bds()
1556 u8 *data = rx_buf->data; in bnx2x_free_rx_bds()
1560 dma_unmap_single(&bp->pdev->dev, in bnx2x_free_rx_bds()
1562 fp->rx_buf_size, DMA_FROM_DEVICE); in bnx2x_free_rx_bds()
1564 rx_buf->data = NULL; in bnx2x_free_rx_bds()
1574 bnx2x_free_rx_bds(&bp->fp[j]); in bnx2x_free_rx_skbs_cnic()
1583 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_free_rx_skbs()
1587 if (fp->mode != TPA_MODE_DISABLED) in bnx2x_free_rx_skbs()
1607 u32 mf_cfg = bp->mf_config[BP_VN(bp)]; in bnx2x_update_max_mf_config()
1622 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1636 free_irq(bp->msix_table[offset].vector, bp->dev); in bnx2x_free_msix_irqs()
1638 bp->msix_table[offset].vector); in bnx2x_free_msix_irqs()
1651 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n", in bnx2x_free_msix_irqs()
1652 i, bp->msix_table[offset].vector); in bnx2x_free_msix_irqs()
1654 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); in bnx2x_free_msix_irqs()
1660 if (bp->flags & USING_MSIX_FLAG && in bnx2x_free_irq()
1661 !(bp->flags & USING_SINGLE_MSIX_FLAG)) { in bnx2x_free_irq()
1670 free_irq(bp->dev->irq, bp->dev); in bnx2x_free_irq()
1680 bp->msix_table[msix_vec].entry = msix_vec; in bnx2x_enable_msix()
1682 bp->msix_table[0].entry); in bnx2x_enable_msix()
1688 bp->msix_table[msix_vec].entry = msix_vec; in bnx2x_enable_msix()
1690 msix_vec, bp->msix_table[msix_vec].entry); in bnx2x_enable_msix()
1696 bp->msix_table[msix_vec].entry = msix_vec; in bnx2x_enable_msix()
1705 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], in bnx2x_enable_msix()
1709 * MSI-X vectors in bnx2x_enable_msix()
1711 if (rc == -ENOSPC) { in bnx2x_enable_msix()
1713 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1); in bnx2x_enable_msix()
1715 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n", in bnx2x_enable_msix()
1720 BNX2X_DEV_INFO("Using single MSI-X vector\n"); in bnx2x_enable_msix()
1721 bp->flags |= USING_SINGLE_MSIX_FLAG; in bnx2x_enable_msix()
1724 bp->num_ethernet_queues = 1; in bnx2x_enable_msix()
1725 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; in bnx2x_enable_msix()
1727 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); in bnx2x_enable_msix()
1731 int diff = msix_vec - rc; in bnx2x_enable_msix()
1733 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc); in bnx2x_enable_msix()
1738 bp->num_ethernet_queues -= diff; in bnx2x_enable_msix()
1739 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; in bnx2x_enable_msix()
1742 bp->num_queues); in bnx2x_enable_msix()
1745 bp->flags |= USING_MSIX_FLAG; in bnx2x_enable_msix()
1751 if (rc == -ENOMEM) in bnx2x_enable_msix()
1752 bp->flags |= DISABLE_MSI_FLAG; in bnx2x_enable_msix()
1763 rc = request_irq(bp->msix_table[offset++].vector, in bnx2x_req_msix_irqs()
1765 bp->dev->name, bp->dev); in bnx2x_req_msix_irqs()
1768 return -EBUSY; in bnx2x_req_msix_irqs()
1776 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_req_msix_irqs()
1777 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", in bnx2x_req_msix_irqs()
1778 bp->dev->name, i); in bnx2x_req_msix_irqs()
1780 rc = request_irq(bp->msix_table[offset].vector, in bnx2x_req_msix_irqs()
1781 bnx2x_msix_fp_int, 0, fp->name, fp); in bnx2x_req_msix_irqs()
1784 bp->msix_table[offset].vector, rc); in bnx2x_req_msix_irqs()
1786 return -EBUSY; in bnx2x_req_msix_irqs()
1795 netdev_info(bp->dev, in bnx2x_req_msix_irqs()
1796 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n", in bnx2x_req_msix_irqs()
1797 bp->msix_table[0].vector, in bnx2x_req_msix_irqs()
1798 0, bp->msix_table[offset].vector, in bnx2x_req_msix_irqs()
1799 i - 1, bp->msix_table[offset + i - 1].vector); in bnx2x_req_msix_irqs()
1802 netdev_info(bp->dev, in bnx2x_req_msix_irqs()
1803 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n", in bnx2x_req_msix_irqs()
1804 0, bp->msix_table[offset].vector, in bnx2x_req_msix_irqs()
1805 i - 1, bp->msix_table[offset + i - 1].vector); in bnx2x_req_msix_irqs()
1814 rc = pci_enable_msi(bp->pdev); in bnx2x_enable_msi()
1817 return -1; in bnx2x_enable_msi()
1819 bp->flags |= USING_MSI_FLAG; in bnx2x_enable_msi()
1829 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG)) in bnx2x_req_irq()
1834 if (bp->flags & USING_MSIX_FLAG) in bnx2x_req_irq()
1835 irq = bp->msix_table[0].vector; in bnx2x_req_irq()
1837 irq = bp->pdev->irq; in bnx2x_req_irq()
1839 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev); in bnx2x_req_irq()
1845 if (bp->flags & USING_MSIX_FLAG && in bnx2x_setup_irqs()
1846 !(bp->flags & USING_SINGLE_MSIX_FLAG)) { in bnx2x_setup_irqs()
1856 if (bp->flags & USING_MSI_FLAG) { in bnx2x_setup_irqs()
1857 bp->dev->irq = bp->pdev->irq; in bnx2x_setup_irqs()
1858 netdev_info(bp->dev, "using MSI IRQ %d\n", in bnx2x_setup_irqs()
1859 bp->dev->irq); in bnx2x_setup_irqs()
1861 if (bp->flags & USING_MSIX_FLAG) { in bnx2x_setup_irqs()
1862 bp->dev->irq = bp->msix_table[0].vector; in bnx2x_setup_irqs()
1863 netdev_info(bp->dev, "using MSIX IRQ %d\n", in bnx2x_setup_irqs()
1864 bp->dev->irq); in bnx2x_setup_irqs()
1909 if (netif_running(bp->dev)) { in bnx2x_netif_start()
1914 if (bp->state == BNX2X_STATE_OPEN) in bnx2x_netif_start()
1915 netif_tx_wake_all_queues(bp->dev); in bnx2x_netif_start()
1933 struct ethhdr *hdr = (struct ethhdr *)skb->data; in bnx2x_select_queue()
1934 u16 ether_type = ntohs(hdr->h_proto); in bnx2x_select_queue()
1940 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto); in bnx2x_select_queue()
1943 /* If ethertype is FCoE or FIP - use FCoE ring */ in bnx2x_select_queue()
1948 /* select a non-FCoE queue */ in bnx2x_select_queue()
1950 (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); in bnx2x_select_queue()
1956 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp); in bnx2x_set_num_queues()
1960 bp->num_ethernet_queues = 1; in bnx2x_set_num_queues()
1963 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */ in bnx2x_set_num_queues()
1964 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; in bnx2x_set_num_queues()
1966 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); in bnx2x_set_num_queues()
1970 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1977 * bp->max_cos.
1986 * The proper configuration of skb->queue_mapping is handled by
1990 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1996 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos; in bnx2x_set_real_num_queues()
2005 rc = netif_set_real_num_tx_queues(bp->dev, tx); in bnx2x_set_real_num_queues()
2010 rc = netif_set_real_num_rx_queues(bp->dev, rx); in bnx2x_set_real_num_queues()
2027 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_set_rx_buf_size()
2030 /* Always use a mini-jumbo MTU for the FCoE L2 ring */ in bnx2x_set_rx_buf_size()
2040 mtu = bp->dev->mtu; in bnx2x_set_rx_buf_size()
2041 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START + in bnx2x_set_rx_buf_size()
2046 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size); in bnx2x_set_rx_buf_size()
2048 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) in bnx2x_set_rx_buf_size()
2049 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; in bnx2x_set_rx_buf_size()
2051 fp->rx_frag_size = 0; in bnx2x_set_rx_buf_size()
2063 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) in bnx2x_init_rss()
2064 bp->rss_conf_obj.ind_table[i] = in bnx2x_init_rss()
2065 bp->fp->cl_id + in bnx2x_init_rss()
2070 * per-port, so if explicit configuration is needed , do it only in bnx2x_init_rss()
2073 * For 57712 and newer on the other hand it's a per-function in bnx2x_init_rss()
2076 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp)); in bnx2x_init_rss()
2088 * bp->multi_mode = ETH_RSS_MODE_DISABLED; in bnx2x_rss()
2103 if (rss_obj->udp_rss_v4) in bnx2x_rss()
2105 if (rss_obj->udp_rss_v6) in bnx2x_rss()
2123 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); in bnx2x_rss()
2144 func_params.f_obj = &bp->func_obj; in bnx2x_init_hw()
2161 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; in bnx2x_squeeze_objects()
2172 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags, in bnx2x_squeeze_objects()
2180 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, in bnx2x_squeeze_objects()
2186 rparam.mcast_obj = &bp->mcast_obj; in bnx2x_squeeze_objects()
2189 /* Add a DEL command... - Since we're doing a driver cleanup only, in bnx2x_squeeze_objects()
2193 netif_addr_lock_bh(bp->dev); in bnx2x_squeeze_objects()
2196 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n", in bnx2x_squeeze_objects()
2203 BNX2X_ERR("Failed to clean multi-cast object: %d\n", in bnx2x_squeeze_objects()
2205 netif_addr_unlock_bh(bp->dev); in bnx2x_squeeze_objects()
2211 netif_addr_unlock_bh(bp->dev); in bnx2x_squeeze_objects()
2217 (bp)->state = BNX2X_STATE_ERROR; \
2223 bp->cnic_loaded = false; \
2229 (bp)->state = BNX2X_STATE_ERROR; \
2230 (bp)->panic = 1; \
2231 return -EBUSY; \
2235 bp->cnic_loaded = false; \
2236 (bp)->panic = 1; \
2237 return -EBUSY; \
2243 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, in bnx2x_free_fw_stats_mem()
2244 bp->fw_stats_data_sz + bp->fw_stats_req_sz); in bnx2x_free_fw_stats_mem()
2261 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; in bnx2x_alloc_fw_stats_mem()
2264 * the VFs themselves. We don't include them in the bp->fw_stats_num as in bnx2x_alloc_fw_stats_mem()
2277 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) + in bnx2x_alloc_fw_stats_mem()
2278 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ? in bnx2x_alloc_fw_stats_mem()
2282 bp->fw_stats_num, vf_headroom, num_groups); in bnx2x_alloc_fw_stats_mem()
2283 bp->fw_stats_req_sz = sizeof(struct stats_query_header) + in bnx2x_alloc_fw_stats_mem()
2287 * stats_counter holds per-STORM counters that are incremented in bnx2x_alloc_fw_stats_mem()
2294 bp->fw_stats_data_sz = sizeof(struct per_port_stats) + in bnx2x_alloc_fw_stats_mem()
2300 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping, in bnx2x_alloc_fw_stats_mem()
2301 bp->fw_stats_data_sz + bp->fw_stats_req_sz); in bnx2x_alloc_fw_stats_mem()
2302 if (!bp->fw_stats) in bnx2x_alloc_fw_stats_mem()
2306 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; in bnx2x_alloc_fw_stats_mem()
2307 bp->fw_stats_req_mapping = bp->fw_stats_mapping; in bnx2x_alloc_fw_stats_mem()
2308 bp->fw_stats_data = (struct bnx2x_fw_stats_data *) in bnx2x_alloc_fw_stats_mem()
2309 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); in bnx2x_alloc_fw_stats_mem()
2310 bp->fw_stats_data_mapping = bp->fw_stats_mapping + in bnx2x_alloc_fw_stats_mem()
2311 bp->fw_stats_req_sz; in bnx2x_alloc_fw_stats_mem()
2314 U64_HI(bp->fw_stats_req_mapping), in bnx2x_alloc_fw_stats_mem()
2315 U64_LO(bp->fw_stats_req_mapping)); in bnx2x_alloc_fw_stats_mem()
2317 U64_HI(bp->fw_stats_data_mapping), in bnx2x_alloc_fw_stats_mem()
2318 U64_LO(bp->fw_stats_data_mapping)); in bnx2x_alloc_fw_stats_mem()
2324 return -ENOMEM; in bnx2x_alloc_fw_stats_mem()
2333 bp->fw_seq = in bnx2x_nic_load_request()
2336 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); in bnx2x_nic_load_request()
2339 bp->fw_drv_pulse_wr_seq = in bnx2x_nic_load_request()
2342 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); in bnx2x_nic_load_request()
2355 return -EBUSY; in bnx2x_nic_load_request()
2363 return -EBUSY; in bnx2x_nic_load_request()
2401 return -EBUSY; in bnx2x_compare_fw_ver()
2412 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n", in bnx2x_nic_load_no_mcp()
2417 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n", in bnx2x_nic_load_no_mcp()
2434 bp->port.pmf = 1; in bnx2x_nic_load_pmf()
2436 * writing to bp->port.pmf here and reading it from the in bnx2x_nic_load_pmf()
2441 bp->port.pmf = 0; in bnx2x_nic_load_pmf()
2444 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); in bnx2x_nic_load_pmf()
2451 (bp->common.shmem2_base)) { in bnx2x_nic_load_afex_dcc()
2462 bp->afex_def_vlan_tag = -1; in bnx2x_nic_load_afex_dcc()
2466 * bnx2x_bz_fp - zero content of the fastpath structure.
2471 * Makes sure the contents of the bp->fp[index].napi is kept
2476 struct bnx2x_fastpath *fp = &bp->fp[index]; in bnx2x_bz_fp()
2478 struct napi_struct orig_napi = fp->napi; in bnx2x_bz_fp()
2479 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info; in bnx2x_bz_fp()
2482 if (fp->tpa_info) in bnx2x_bz_fp()
2483 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 * in bnx2x_bz_fp()
2488 fp->napi = orig_napi; in bnx2x_bz_fp()
2489 fp->tpa_info = orig_tpa_info; in bnx2x_bz_fp()
2490 fp->bp = bp; in bnx2x_bz_fp()
2491 fp->index = index; in bnx2x_bz_fp()
2493 fp->max_cos = bp->max_cos; in bnx2x_bz_fp()
2496 fp->max_cos = 1; in bnx2x_bz_fp()
2500 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; in bnx2x_bz_fp()
2503 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * in bnx2x_bz_fp()
2509 if (bp->dev->features & NETIF_F_LRO) in bnx2x_bz_fp()
2510 fp->mode = TPA_MODE_LRO; in bnx2x_bz_fp()
2511 else if (bp->dev->features & NETIF_F_GRO_HW) in bnx2x_bz_fp()
2512 fp->mode = TPA_MODE_GRO; in bnx2x_bz_fp()
2514 fp->mode = TPA_MODE_DISABLED; in bnx2x_bz_fp()
2519 if (bp->disable_tpa || IS_FCOE_FP(fp)) in bnx2x_bz_fp()
2520 fp->mode = TPA_MODE_DISABLED; in bnx2x_bz_fp()
2531 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n", in bnx2x_set_os_driver_state()
2541 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n"); in bnx2x_load_cnic()
2543 mutex_init(&bp->cnic_mutex); in bnx2x_load_cnic()
2583 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); in bnx2x_load_cnic()
2594 /* re-read iscsi info */ in bnx2x_load_cnic()
2598 bp->cnic_loaded = true; in bnx2x_load_cnic()
2599 if (bp->state == BNX2X_STATE_OPEN) in bnx2x_load_cnic()
2602 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n"); in bnx2x_load_cnic()
2617 BNX2X_ERR("CNIC-related load failed\n"); in bnx2x_load_cnic()
2635 if (unlikely(bp->panic)) { in bnx2x_nic_load()
2637 return -EPERM; in bnx2x_nic_load()
2641 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; in bnx2x_nic_load()
2644 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); in bnx2x_nic_load()
2646 &bp->last_reported_link.link_report_flags); in bnx2x_nic_load()
2655 * Also set fp->mode and txdata_ptr. in bnx2x_nic_load()
2657 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); in bnx2x_nic_load()
2660 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + in bnx2x_nic_load()
2661 bp->num_cnic_queues) * in bnx2x_nic_load()
2664 bp->fcoe_init = false; in bnx2x_nic_load()
2699 * bp->num_queues, bnx2x_set_real_num_queues() should always in bnx2x_nic_load()
2712 bnx2x_setup_tc(bp->dev, bp->max_cos); in bnx2x_nic_load()
2718 bp->nic_stopped = false; in bnx2x_nic_load()
2767 /* Init per-function objects */ in bnx2x_nic_load()
2776 bp->afex_def_vlan_tag = -1; in bnx2x_nic_load()
2778 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; in bnx2x_nic_load()
2793 rc = -EBUSY; in bnx2x_nic_load()
2812 rc = bnx2x_setup_queue(bp, &bp->fp[i], false); in bnx2x_nic_load()
2814 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false); in bnx2x_nic_load()
2829 bp->state = BNX2X_STATE_OPEN; in bnx2x_nic_load()
2835 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, in bnx2x_nic_load()
2842 if (IS_PF(bp) && bp->pending_max) { in bnx2x_nic_load()
2843 bnx2x_update_max_mf_config(bp, bp->pending_max); in bnx2x_nic_load()
2844 bp->pending_max = 0; in bnx2x_nic_load()
2847 bp->force_link_down = false; in bnx2x_nic_load()
2848 if (bp->port.pmf) { in bnx2x_nic_load()
2853 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN; in bnx2x_nic_load()
2857 /* Re-configure vlan filters */ in bnx2x_nic_load()
2865 if (bp->flags & PTP_SUPPORTED) { in bnx2x_nic_load()
2873 /* Tx queue should be only re-enabled */ in bnx2x_nic_load()
2874 netif_tx_wake_all_queues(bp->dev); in bnx2x_nic_load()
2878 netif_tx_start_all_queues(bp->dev); in bnx2x_nic_load()
2884 bp->state = BNX2X_STATE_DIAG; in bnx2x_nic_load()
2891 if (bp->port.pmf) in bnx2x_nic_load()
2897 mod_timer(&bp->timer, jiffies + bp->current_interval); in bnx2x_nic_load()
2910 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT); in bnx2x_nic_load()
2920 return -EBUSY; in bnx2x_nic_load()
2923 /* Update driver data for On-Chip MFW dump. */ in bnx2x_nic_load()
2927 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */ in bnx2x_nic_load()
2928 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) in bnx2x_nic_load()
2950 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); in bnx2x_nic_load()
2960 bp->port.pmf = 0; in bnx2x_nic_load()
2964 bp->nic_stopped = true; in bnx2x_nic_load()
2984 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_drain_tx_queues()
2987 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); in bnx2x_drain_tx_queues()
3013 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE && in bnx2x_nic_unload()
3014 (bp->state == BNX2X_STATE_CLOSED || in bnx2x_nic_unload()
3015 bp->state == BNX2X_STATE_ERROR)) { in bnx2x_nic_unload()
3023 bp->recovery_state = BNX2X_RECOVERY_DONE; in bnx2x_nic_unload()
3024 bp->is_leader = 0; in bnx2x_nic_unload()
3030 return -EINVAL; in bnx2x_nic_unload()
3034 * have not completed successfully - all resources are released. in bnx2x_nic_unload()
3037 * dev->IFF_UP flag is still on. in bnx2x_nic_unload()
3039 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR) in bnx2x_nic_unload()
3042 /* It's important to set the bp->state to the value different from in bnx2x_nic_unload()
3046 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; in bnx2x_nic_unload()
3057 netdev_reset_tc(bp->dev); in bnx2x_nic_unload()
3059 bp->rx_mode = BNX2X_RX_MODE_NONE; in bnx2x_nic_unload()
3061 del_timer_sync(&bp->timer); in bnx2x_nic_unload()
3065 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; in bnx2x_nic_unload()
3100 if (!bp->nic_stopped) { in bnx2x_nic_unload()
3109 bp->nic_stopped = true; in bnx2x_nic_unload()
3124 bp->sp_state = 0; in bnx2x_nic_unload()
3126 bp->port.pmf = 0; in bnx2x_nic_unload()
3129 bp->sp_rtnl_state = 0; in bnx2x_nic_unload()
3137 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); in bnx2x_nic_unload()
3149 bp->state = BNX2X_STATE_CLOSED; in bnx2x_nic_unload()
3150 bp->cnic_loaded = false; in bnx2x_nic_unload()
3156 /* Check if there are pending parity attentions. If there are - set in bnx2x_nic_unload()
3185 if (!bp->pdev->pm_cap) { in bnx2x_set_power_state()
3190 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr); in bnx2x_set_power_state()
3194 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, in bnx2x_set_power_state()
3206 if (atomic_read(&bp->pdev->enable_cnt) != 1) in bnx2x_set_power_state()
3215 if (bp->wol) in bnx2x_set_power_state()
3218 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, in bnx2x_set_power_state()
3227 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state); in bnx2x_set_power_state()
3228 return -EINVAL; in bnx2x_set_power_state()
3240 struct bnx2x *bp = fp->bp; in bnx2x_poll()
3245 if (unlikely(bp->panic)) { in bnx2x_poll()
3251 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) in bnx2x_poll()
3252 bnx2x_tx_int(bp, fp->txdata_ptr[cos]); in bnx2x_poll()
3282 /* Re-enable interrupts */ in bnx2x_poll()
3284 "Update index to %d\n", fp->fp_hc_idx); in bnx2x_poll()
3285 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, in bnx2x_poll()
3286 le16_to_cpu(fp->fp_hc_idx), in bnx2x_poll()
3311 int old_len = le16_to_cpu(h_tx_bd->nbytes); in bnx2x_tx_split()
3314 h_tx_bd->nbytes = cpu_to_le16(hlen); in bnx2x_tx_split()
3317 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo); in bnx2x_tx_split()
3322 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; in bnx2x_tx_split()
3324 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), in bnx2x_tx_split()
3325 le32_to_cpu(h_tx_bd->addr_lo)) + hlen; in bnx2x_tx_split()
3327 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); in bnx2x_tx_split()
3328 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); in bnx2x_tx_split()
3329 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen); in bnx2x_tx_split()
3332 tx_buf->flags |= BNX2X_TSO_SPLIT_BD; in bnx2x_tx_split()
3336 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo); in bnx2x_tx_split()
3352 csum_partial(t_header - fix, fix, 0))); in bnx2x_csum_fix()
3356 csum_partial(t_header, -fix, 0))); in bnx2x_csum_fix()
3367 if (skb->ip_summed != CHECKSUM_PARTIAL) in bnx2x_xmit_type()
3373 prot = ipv6_hdr(skb)->nexthdr; in bnx2x_xmit_type()
3376 prot = ip_hdr(skb)->protocol; in bnx2x_xmit_type()
3379 if (!CHIP_IS_E1x(bp) && skb->encapsulation) { in bnx2x_xmit_type()
3380 if (inner_ip_hdr(skb)->version == 6) { in bnx2x_xmit_type()
3382 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) in bnx2x_xmit_type()
3386 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP) in bnx2x_xmit_type()
3414 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3427 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) { in bnx2x_pkt_req_lin()
3429 unsigned short lso_mss = skb_shinfo(skb)->gso_size; in bnx2x_pkt_req_lin()
3430 int wnd_size = MAX_FETCH_BD - num_tso_win_sub; in bnx2x_pkt_req_lin()
3432 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; in bnx2x_pkt_req_lin()
3444 first_bd_sz = skb_headlen(skb) - hlen; in bnx2x_pkt_req_lin()
3448 /* Calculate the first sum - it's special */ in bnx2x_pkt_req_lin()
3449 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++) in bnx2x_pkt_req_lin()
3451 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]); in bnx2x_pkt_req_lin()
3453 /* If there was data on linear skb data - check it */ in bnx2x_pkt_req_lin()
3460 wnd_sum -= first_bd_sz; in bnx2x_pkt_req_lin()
3467 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]); in bnx2x_pkt_req_lin()
3473 wnd_sum -= in bnx2x_pkt_req_lin()
3474 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]); in bnx2x_pkt_req_lin()
3477 /* in non-LSO too fragmented packet should always in bnx2x_pkt_req_lin()
3487 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO", in bnx2x_pkt_req_lin()
3488 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz); in bnx2x_pkt_req_lin()
3495 * bnx2x_set_pbd_gso - update PBD in GSO case.
3505 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); in bnx2x_set_pbd_gso()
3506 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq); in bnx2x_set_pbd_gso()
3507 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb)); in bnx2x_set_pbd_gso()
3510 pbd->ip_id = bswab16(ip_hdr(skb)->id); in bnx2x_set_pbd_gso()
3511 pbd->tcp_pseudo_csum = in bnx2x_set_pbd_gso()
3512 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, in bnx2x_set_pbd_gso()
3513 ip_hdr(skb)->daddr, in bnx2x_set_pbd_gso()
3516 pbd->tcp_pseudo_csum = in bnx2x_set_pbd_gso()
3517 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in bnx2x_set_pbd_gso()
3518 &ipv6_hdr(skb)->daddr, in bnx2x_set_pbd_gso()
3522 pbd->global_data |= in bnx2x_set_pbd_gso()
3527 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3540 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) << in bnx2x_set_pbd_csum_enc()
3553 * No need to pass the UDP header length - it's a constant. in bnx2x_set_pbd_csum_enc()
3559 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3572 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) << in bnx2x_set_pbd_csum_e2()
3584 * No need to pass the UDP header length - it's a constant. in bnx2x_set_pbd_csum_e2()
3594 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; in bnx2x_set_sbd_csum()
3597 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6; in bnx2x_set_sbd_csum()
3600 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; in bnx2x_set_sbd_csum()
3604 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3615 u8 hlen = (skb_network_header(skb) - skb->data) >> 1; in bnx2x_set_pbd_csum()
3618 pbd->global_data = in bnx2x_set_pbd_csum()
3620 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << in bnx2x_set_pbd_csum()
3623 pbd->ip_hlen_w = (skb_transport_header(skb) - in bnx2x_set_pbd_csum()
3626 hlen += pbd->ip_hlen_w; in bnx2x_set_pbd_csum()
3634 pbd->total_hlen_w = cpu_to_le16(hlen); in bnx2x_set_pbd_csum()
3638 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check); in bnx2x_set_pbd_csum()
3645 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb)); in bnx2x_set_pbd_csum()
3648 pbd->tcp_pseudo_csum = in bnx2x_set_pbd_csum()
3653 pbd->tcp_pseudo_csum); in bnx2x_set_pbd_csum()
3669 hlen_w = (skb_inner_transport_header(skb) - in bnx2x_update_pbds_gso_enc()
3675 pbd2->fw_ip_hdr_to_payload_w = hlen_w; in bnx2x_update_pbds_gso_enc()
3680 u32 csum = (__force u32)(~iph->check) - in bnx2x_update_pbds_gso_enc()
3681 (__force u32)iph->tot_len - in bnx2x_update_pbds_gso_enc()
3682 (__force u32)iph->frag_off; in bnx2x_update_pbds_gso_enc()
3684 outerip_len = iph->ihl << 1; in bnx2x_update_pbds_gso_enc()
3686 pbd2->fw_ip_csum_wo_len_flags_frag = in bnx2x_update_pbds_gso_enc()
3689 pbd2->fw_ip_hdr_to_payload_w = in bnx2x_update_pbds_gso_enc()
3690 hlen_w - ((sizeof(struct ipv6hdr)) >> 1); in bnx2x_update_pbds_gso_enc()
3691 pbd_e2->data.tunnel_data.flags |= in bnx2x_update_pbds_gso_enc()
3695 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq); in bnx2x_update_pbds_gso_enc()
3697 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb)); in bnx2x_update_pbds_gso_enc()
3701 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id); in bnx2x_update_pbds_gso_enc()
3703 pbd_e2->data.tunnel_data.pseudo_csum = in bnx2x_update_pbds_gso_enc()
3705 inner_ip_hdr(skb)->saddr, in bnx2x_update_pbds_gso_enc()
3706 inner_ip_hdr(skb)->daddr, in bnx2x_update_pbds_gso_enc()
3709 pbd_e2->data.tunnel_data.pseudo_csum = in bnx2x_update_pbds_gso_enc()
3711 &inner_ipv6_hdr(skb)->saddr, in bnx2x_update_pbds_gso_enc()
3712 &inner_ipv6_hdr(skb)->daddr, in bnx2x_update_pbds_gso_enc()
3716 outerip_off = (skb_network_header(skb) - skb->data) >> 1; in bnx2x_update_pbds_gso_enc()
3722 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << in bnx2x_update_pbds_gso_enc()
3725 if (ip_hdr(skb)->protocol == IPPROTO_UDP) { in bnx2x_update_pbds_gso_enc()
3727 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1; in bnx2x_update_pbds_gso_enc()
3744 if (ipv6->nexthdr == NEXTHDR_IPV6) in bnx2x_set_ipv6_ext_e2()
3776 if (unlikely(bp->panic)) in bnx2x_start_xmit()
3785 txdata = &bp->bnx2x_txq[txq_index]; in bnx2x_start_xmit()
3794 txdata->cid, fp_index, txdata_index, txdata, fp); */ in bnx2x_start_xmit()
3797 skb_shinfo(skb)->nr_frags + in bnx2x_start_xmit()
3801 if (txdata->tx_ring_size == 0) { in bnx2x_start_xmit()
3803 bnx2x_fp_qstats(bp, txdata->parent_fp); in bnx2x_start_xmit()
3804 q_stats->driver_filtered_tx_pkt++; in bnx2x_start_xmit()
3808 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; in bnx2x_start_xmit()
3817 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, in bnx2x_start_xmit()
3818 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type, in bnx2x_start_xmit()
3819 skb->len); in bnx2x_start_xmit()
3821 eth = (struct ethhdr *)skb->data; in bnx2x_start_xmit()
3824 if (unlikely(is_multicast_ether_addr(eth->h_dest))) { in bnx2x_start_xmit()
3825 if (is_broadcast_ether_addr(eth->h_dest)) in bnx2x_start_xmit()
3831 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT) in bnx2x_start_xmit()
3837 bp->lin_cnt++; in bnx2x_start_xmit()
3840 "SKB linearization failed - silently dropping this SKB\n"); in bnx2x_start_xmit()
3847 mapping = dma_map_single(&bp->pdev->dev, skb->data, in bnx2x_start_xmit()
3849 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_start_xmit()
3851 "SKB mapping failed - silently dropping this SKB\n"); in bnx2x_start_xmit()
3861 And above all, all pdb sizes are in words - NOT DWORDS! in bnx2x_start_xmit()
3864 /* get current pkt produced now - advance it just before sending packet in bnx2x_start_xmit()
3867 pkt_prod = txdata->tx_pkt_prod; in bnx2x_start_xmit()
3868 bd_prod = TX_BD(txdata->tx_bd_prod); in bnx2x_start_xmit()
3874 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)]; in bnx2x_start_xmit()
3875 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd; in bnx2x_start_xmit()
3878 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; in bnx2x_start_xmit()
3880 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { in bnx2x_start_xmit()
3881 if (!(bp->flags & TX_TIMESTAMPING_EN)) { in bnx2x_start_xmit()
3882 bp->eth_stats.ptp_skip_tx_ts++; in bnx2x_start_xmit()
3884 } else if (bp->ptp_tx_skb) { in bnx2x_start_xmit()
3885 bp->eth_stats.ptp_skip_tx_ts++; in bnx2x_start_xmit()
3886 netdev_err_once(bp->dev, in bnx2x_start_xmit()
3887 …"Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n… in bnx2x_start_xmit()
3889 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in bnx2x_start_xmit()
3891 bp->ptp_tx_skb = skb_get(skb); in bnx2x_start_xmit()
3892 bp->ptp_tx_start = jiffies; in bnx2x_start_xmit()
3893 schedule_work(&bp->ptp_task); in bnx2x_start_xmit()
3898 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT; in bnx2x_start_xmit()
3901 tx_buf->first_bd = txdata->tx_bd_prod; in bnx2x_start_xmit()
3902 tx_buf->skb = skb; in bnx2x_start_xmit()
3903 tx_buf->flags = 0; in bnx2x_start_xmit()
3907 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd); in bnx2x_start_xmit()
3910 tx_start_bd->vlan_or_ethertype = in bnx2x_start_xmit()
3912 tx_start_bd->bd_flags.as_bitfield |= in bnx2x_start_xmit()
3924 tx_start_bd->vlan_or_ethertype = in bnx2x_start_xmit()
3925 cpu_to_le16(ntohs(eth->h_proto)); in bnx2x_start_xmit()
3927 tx_start_bd->bd_flags.as_bitfield |= in bnx2x_start_xmit()
3930 tx_start_bd->vlan_or_ethertype = in bnx2x_start_xmit()
3936 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); in bnx2x_start_xmit()
3950 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; in bnx2x_start_xmit()
3964 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd; in bnx2x_start_xmit()
3968 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w = in bnx2x_start_xmit()
3969 (skb_inner_network_header(skb) - in bnx2x_start_xmit()
3970 skb->data) >> 1; in bnx2x_start_xmit()
3977 pbd2->global_data = cpu_to_le16(global_data); in bnx2x_start_xmit()
3980 SET_FLAG(tx_start_bd->general_data, in bnx2x_start_xmit()
3983 SET_FLAG(tx_start_bd->general_data, in bnx2x_start_xmit()
3986 tx_buf->flags |= BNX2X_HAS_SECOND_PBD; in bnx2x_start_xmit()
4002 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, in bnx2x_start_xmit()
4003 &pbd_e2->data.mac_addr.src_mid, in bnx2x_start_xmit()
4004 &pbd_e2->data.mac_addr.src_lo, in bnx2x_start_xmit()
4005 eth->h_source); in bnx2x_start_xmit()
4007 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, in bnx2x_start_xmit()
4008 &pbd_e2->data.mac_addr.dst_mid, in bnx2x_start_xmit()
4009 &pbd_e2->data.mac_addr.dst_lo, in bnx2x_start_xmit()
4010 eth->h_dest); in bnx2x_start_xmit()
4012 if (bp->flags & TX_SWITCHING) in bnx2x_start_xmit()
4014 &pbd_e2->data.mac_addr.dst_hi, in bnx2x_start_xmit()
4015 &pbd_e2->data.mac_addr.dst_mid, in bnx2x_start_xmit()
4016 &pbd_e2->data.mac_addr.dst_lo, in bnx2x_start_xmit()
4017 eth->h_dest); in bnx2x_start_xmit()
4019 /* Enforce security is always set in Stop on Error - in bnx2x_start_xmit()
4022 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, in bnx2x_start_xmit()
4023 &pbd_e2->data.mac_addr.src_mid, in bnx2x_start_xmit()
4024 &pbd_e2->data.mac_addr.src_lo, in bnx2x_start_xmit()
4025 eth->h_source); in bnx2x_start_xmit()
4033 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; in bnx2x_start_xmit()
4041 pbd_e1x->global_data |= cpu_to_le16(global_data); in bnx2x_start_xmit()
4045 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); in bnx2x_start_xmit()
4046 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); in bnx2x_start_xmit()
4047 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); in bnx2x_start_xmit()
4048 pkt_size = tx_start_bd->nbytes; in bnx2x_start_xmit()
4052 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, in bnx2x_start_xmit()
4053 le16_to_cpu(tx_start_bd->nbytes), in bnx2x_start_xmit()
4054 tx_start_bd->bd_flags.as_bitfield, in bnx2x_start_xmit()
4055 le16_to_cpu(tx_start_bd->vlan_or_ethertype)); in bnx2x_start_xmit()
4061 skb->len, hlen, skb_headlen(skb), in bnx2x_start_xmit()
4062 skb_shinfo(skb)->gso_size); in bnx2x_start_xmit()
4064 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; in bnx2x_start_xmit()
4074 (skb_shinfo(skb)->gso_size << in bnx2x_start_xmit()
4085 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data); in bnx2x_start_xmit()
4090 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in bnx2x_start_xmit()
4091 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in bnx2x_start_xmit()
4093 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, in bnx2x_start_xmit()
4095 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_start_xmit()
4099 "Unable to map page - dropping packet...\n"); in bnx2x_start_xmit()
4103 * first_bd->nbd need to be properly updated in bnx2x_start_xmit()
4106 first_bd->nbd = cpu_to_le16(nbd); in bnx2x_start_xmit()
4108 TX_BD(txdata->tx_pkt_prod), in bnx2x_start_xmit()
4114 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; in bnx2x_start_xmit()
4116 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; in bnx2x_start_xmit()
4118 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); in bnx2x_start_xmit()
4119 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); in bnx2x_start_xmit()
4120 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag)); in bnx2x_start_xmit()
4126 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo, in bnx2x_start_xmit()
4127 le16_to_cpu(tx_data_bd->nbytes)); in bnx2x_start_xmit()
4133 first_bd->nbd = cpu_to_le16(nbd); in bnx2x_start_xmit()
4147 * have to) in order to save some CPU cycles in a none-LSO in bnx2x_start_xmit()
4151 total_pkt_bd->total_pkt_bytes = pkt_size; in bnx2x_start_xmit()
4156 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w, in bnx2x_start_xmit()
4157 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags, in bnx2x_start_xmit()
4158 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq, in bnx2x_start_xmit()
4159 le16_to_cpu(pbd_e1x->total_hlen_w)); in bnx2x_start_xmit()
4164 pbd_e2->data.mac_addr.dst_hi, in bnx2x_start_xmit()
4165 pbd_e2->data.mac_addr.dst_mid, in bnx2x_start_xmit()
4166 pbd_e2->data.mac_addr.dst_lo, in bnx2x_start_xmit()
4167 pbd_e2->data.mac_addr.src_hi, in bnx2x_start_xmit()
4168 pbd_e2->data.mac_addr.src_mid, in bnx2x_start_xmit()
4169 pbd_e2->data.mac_addr.src_lo, in bnx2x_start_xmit()
4170 pbd_e2->parsing_data); in bnx2x_start_xmit()
4173 netdev_tx_sent_queue(txq, skb->len); in bnx2x_start_xmit()
4177 txdata->tx_pkt_prod++; in bnx2x_start_xmit()
4181 * This is only applicable for weak-ordered memory model archs such in bnx2x_start_xmit()
4182 * as IA-64. The following barrier is also mandatory since FW will in bnx2x_start_xmit()
4187 txdata->tx_db.data.prod += nbd; in bnx2x_start_xmit()
4191 DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw); in bnx2x_start_xmit()
4193 txdata->tx_bd_prod += nbd; in bnx2x_start_xmit()
4200 * fp->bd_tx_cons */ in bnx2x_start_xmit()
4203 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; in bnx2x_start_xmit()
4207 txdata->tx_pkt++; in bnx2x_start_xmit()
4248 * bnx2x_setup_tc - routine to configure net_device for multi tc
4271 if (num_tc > bp->max_cos) { in bnx2x_setup_tc()
4273 num_tc, bp->max_cos); in bnx2x_setup_tc()
4274 return -EINVAL; in bnx2x_setup_tc()
4280 return -EINVAL; in bnx2x_setup_tc()
4289 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]); in bnx2x_setup_tc()
4292 outer_prio, bp->prio_to_cos[outer_prio]); in bnx2x_setup_tc()
4306 for (cos = 0; cos < bp->max_cos; cos++) { in bnx2x_setup_tc()
4324 return -EOPNOTSUPP; in __bnx2x_setup_tc()
4326 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; in __bnx2x_setup_tc()
4328 return bnx2x_setup_tc(dev, mqprio->num_tc); in __bnx2x_setup_tc()
4338 if (!is_valid_ether_addr(addr->sa_data)) { in bnx2x_change_mac_addr()
4340 return -EINVAL; in bnx2x_change_mac_addr()
4345 return -EINVAL; in bnx2x_change_mac_addr()
4354 eth_hw_addr_set(dev, addr->sa_data); in bnx2x_change_mac_addr()
4368 struct bnx2x_fastpath *fp = &bp->fp[fp_index]; in bnx2x_free_fp_mem_at()
4375 fp->status_blk_mapping = 0; in bnx2x_free_fp_mem_at()
4379 BNX2X_PCI_FREE(sb->e2_sb, in bnx2x_free_fp_mem_at()
4384 BNX2X_PCI_FREE(sb->e1x_sb, in bnx2x_free_fp_mem_at()
4416 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; in bnx2x_free_fp_mem_at()
4420 fp_index, cos, txdata->cid); in bnx2x_free_fp_mem_at()
4422 BNX2X_FREE(txdata->tx_buf_ring); in bnx2x_free_fp_mem_at()
4423 BNX2X_PCI_FREE(txdata->tx_desc_ring, in bnx2x_free_fp_mem_at()
4424 txdata->tx_desc_mapping, in bnx2x_free_fp_mem_at()
4450 (__le16 *)status_blk.e2_sb->sb.index_values; in set_sb_shortcuts()
4452 (__le16 *)status_blk.e2_sb->sb.running_index; in set_sb_shortcuts()
4455 (__le16 *)status_blk.e1x_sb->sb.index_values; in set_sb_shortcuts()
4457 (__le16 *)status_blk.e1x_sb->sb.running_index; in set_sb_shortcuts()
4465 struct bnx2x *bp = fp->bp; in bnx2x_alloc_rx_bds()
4469 fp->rx_comp_cons = 0; in bnx2x_alloc_rx_bds()
4473 * fp->eth_q_stats.rx_skb_alloc_failed = 0 in bnx2x_alloc_rx_bds()
4482 WARN_ON(ring_prod <= (i - failure_cnt)); in bnx2x_alloc_rx_bds()
4487 i - failure_cnt, fp->index); in bnx2x_alloc_rx_bds()
4489 fp->rx_bd_prod = ring_prod; in bnx2x_alloc_rx_bds()
4490 /* Limit the CQE producer by the CQE ring size */ in bnx2x_alloc_rx_bds()
4491 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, in bnx2x_alloc_rx_bds()
4494 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt; in bnx2x_alloc_rx_bds()
4496 return i - failure_cnt; in bnx2x_alloc_rx_bds()
4507 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; in bnx2x_set_next_page_rx_cq()
4508 nextpg->addr_hi = in bnx2x_set_next_page_rx_cq()
4509 cpu_to_le32(U64_HI(fp->rx_comp_mapping + in bnx2x_set_next_page_rx_cq()
4511 nextpg->addr_lo = in bnx2x_set_next_page_rx_cq()
4512 cpu_to_le32(U64_LO(fp->rx_comp_mapping + in bnx2x_set_next_page_rx_cq()
4520 struct bnx2x_fastpath *fp = &bp->fp[index]; in bnx2x_alloc_fp_mem_at()
4525 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) { in bnx2x_alloc_fp_mem_at()
4527 bp->rx_ring_size = rx_ring_size; in bnx2x_alloc_fp_mem_at()
4528 } else if (!bp->rx_ring_size) { in bnx2x_alloc_fp_mem_at()
4543 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : in bnx2x_alloc_fp_mem_at()
4546 bp->rx_ring_size = rx_ring_size; in bnx2x_alloc_fp_mem_at()
4547 } else /* if rx_ring_size specified - use it */ in bnx2x_alloc_fp_mem_at()
4548 rx_ring_size = bp->rx_ring_size; in bnx2x_alloc_fp_mem_at()
4558 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), in bnx2x_alloc_fp_mem_at()
4560 if (!sb->e2_sb) in bnx2x_alloc_fp_mem_at()
4563 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), in bnx2x_alloc_fp_mem_at()
4565 if (!sb->e1x_sb) in bnx2x_alloc_fp_mem_at()
4580 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; in bnx2x_alloc_fp_mem_at()
4586 txdata->tx_buf_ring = kcalloc(NUM_TX_BD, in bnx2x_alloc_fp_mem_at()
4589 if (!txdata->tx_buf_ring) in bnx2x_alloc_fp_mem_at()
4591 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping, in bnx2x_alloc_fp_mem_at()
4593 if (!txdata->tx_desc_ring) in bnx2x_alloc_fp_mem_at()
4649 * Min size is different for OOO, TPA and non-TPA queues in bnx2x_alloc_fp_mem_at()
4651 if (ring_size < (fp->mode == TPA_MODE_DISABLED ? in bnx2x_alloc_fp_mem_at()
4655 return -ENOMEM; in bnx2x_alloc_fp_mem_at()
4668 return -ENOMEM; in bnx2x_alloc_fp_mem_cnic()
4677 /* 1. Allocate FP for leading - fatal if error in bnx2x_alloc_fp_mem()
4678 * 2. Allocate RSS - fix number of queues if error in bnx2x_alloc_fp_mem()
4683 return -ENOMEM; in bnx2x_alloc_fp_mem()
4692 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; in bnx2x_alloc_fp_mem()
4703 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta); in bnx2x_alloc_fp_mem()
4704 bp->num_ethernet_queues -= delta; in bnx2x_alloc_fp_mem()
4705 bp->num_queues = bp->num_ethernet_queues + in bnx2x_alloc_fp_mem()
4706 bp->num_cnic_queues; in bnx2x_alloc_fp_mem()
4708 bp->num_queues + delta, bp->num_queues); in bnx2x_alloc_fp_mem()
4718 for (i = 0; i < bp->fp_array_size; i++) in bnx2x_free_mem_bp()
4719 kfree(bp->fp[i].tpa_info); in bnx2x_free_mem_bp()
4720 kfree(bp->fp); in bnx2x_free_mem_bp()
4721 kfree(bp->sp_objs); in bnx2x_free_mem_bp()
4722 kfree(bp->fp_stats); in bnx2x_free_mem_bp()
4723 kfree(bp->bnx2x_txq); in bnx2x_free_mem_bp()
4724 kfree(bp->msix_table); in bnx2x_free_mem_bp()
4725 kfree(bp->ilt); in bnx2x_free_mem_bp()
4738 * The biggest MSI-X table we might need is as a maximum number of fast in bnx2x_alloc_mem_bp()
4741 msix_table_size = bp->igu_sb_cnt; in bnx2x_alloc_mem_bp()
4748 bp->fp_array_size = fp_array_size; in bnx2x_alloc_mem_bp()
4749 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size); in bnx2x_alloc_mem_bp()
4751 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL); in bnx2x_alloc_mem_bp()
4754 for (i = 0; i < bp->fp_array_size; i++) { in bnx2x_alloc_mem_bp()
4762 bp->fp = fp; in bnx2x_alloc_mem_bp()
4765 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs), in bnx2x_alloc_mem_bp()
4767 if (!bp->sp_objs) in bnx2x_alloc_mem_bp()
4771 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats), in bnx2x_alloc_mem_bp()
4773 if (!bp->fp_stats) in bnx2x_alloc_mem_bp()
4781 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata), in bnx2x_alloc_mem_bp()
4783 if (!bp->bnx2x_txq) in bnx2x_alloc_mem_bp()
4790 bp->msix_table = tbl; in bnx2x_alloc_mem_bp()
4796 bp->ilt = ilt; in bnx2x_alloc_mem_bp()
4801 return -ENOMEM; in bnx2x_alloc_mem_bp()
4818 if (bp->link_params.num_phys <= 1) in bnx2x_get_cur_phy_idx()
4821 if (bp->link_vars.link_up) { in bnx2x_get_cur_phy_idx()
4824 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) && in bnx2x_get_cur_phy_idx()
4825 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE)) in bnx2x_get_cur_phy_idx()
4829 switch (bnx2x_phy_selection(&bp->link_params)) { in bnx2x_get_cur_phy_idx()
4853 if (bp->link_params.multi_phy_config & in bnx2x_get_link_cfg_idx()
4867 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_fcoe_get_wwn()
4871 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi, in bnx2x_fcoe_get_wwn()
4872 cp->fcoe_wwn_node_name_lo); in bnx2x_fcoe_get_wwn()
4875 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi, in bnx2x_fcoe_get_wwn()
4876 cp->fcoe_wwn_port_name_lo); in bnx2x_fcoe_get_wwn()
4879 BNX2X_ERR("Wrong WWN type requested - %d\n", type); in bnx2x_fcoe_get_wwn()
4880 return -EINVAL; in bnx2x_fcoe_get_wwn()
4892 if (pci_num_vf(bp->pdev)) { in bnx2x_change_mtu()
4894 return -EPERM; in bnx2x_change_mtu()
4897 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { in bnx2x_change_mtu()
4899 return -EAGAIN; in bnx2x_change_mtu()
4906 dev->mtu = new_mtu; in bnx2x_change_mtu()
4909 dev->features &= ~NETIF_F_GRO_HW; in bnx2x_change_mtu()
4922 if (pci_num_vf(bp->pdev)) { in bnx2x_fix_features()
4923 netdev_features_t changed = dev->features ^ features; in bnx2x_fix_features()
4928 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) { in bnx2x_fix_features()
4930 features |= dev->features & NETIF_F_RXCSUM; in bnx2x_fix_features()
4935 features |= dev->features & NETIF_F_LOOPBACK; in bnx2x_fix_features()
4943 if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu)) in bnx2x_fix_features()
4954 netdev_features_t changes = features ^ dev->features; in bnx2x_set_features()
4959 if (!pci_num_vf(bp->pdev)) { in bnx2x_set_features()
4961 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { in bnx2x_set_features()
4962 bp->link_params.loopback_mode = LOOPBACK_BMAC; in bnx2x_set_features()
4966 if (bp->link_params.loopback_mode != LOOPBACK_NONE) { in bnx2x_set_features()
4967 bp->link_params.loopback_mode = LOOPBACK_NONE; in bnx2x_set_features()
4980 if (bp->recovery_state == BNX2X_RECOVERY_DONE) { in bnx2x_set_features()
4981 dev->features = features; in bnx2x_set_features()
4998 if (!bp->panic) in bnx2x_tx_timeout()
5016 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); in bnx2x_suspend()
5017 return -ENODEV; in bnx2x_suspend()
5045 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); in bnx2x_resume()
5046 return -ENODEV; in bnx2x_resume()
5050 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { in bnx2x_resume()
5052 return -EAGAIN; in bnx2x_resume()
5082 cxt->ustorm_ag_context.cdu_usage = in bnx2x_set_ctx_validation()
5086 cxt->xstorm_ag_context.cdu_reserved = in bnx2x_set_ctx_validation()
5136 set_bit(flag, &bp->sp_rtnl_state); in bnx2x_schedule_sp_rtnl()
5140 schedule_delayed_work(&bp->sp_rtnl_task, 0); in bnx2x_schedule_sp_rtnl()