Lines Matching full:ss

197 	struct myri10ge_slice_state *ss;  member
915 struct myri10ge_slice_state *ss; in myri10ge_reset() local
943 bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry); in myri10ge_reset()
1000 ss = &mgp->ss[i]; in myri10ge_reset()
1001 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus); in myri10ge_reset()
1002 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus); in myri10ge_reset()
1011 ss = &mgp->ss[i]; in myri10ge_reset()
1012 ss->irq_claim = in myri10ge_reset()
1032 ss = &mgp->ss[i]; in myri10ge_reset()
1034 ss->dca_tag = (__iomem __be32 *) in myri10ge_reset()
1037 ss->dca_tag = NULL; in myri10ge_reset()
1046 ss = &mgp->ss[i]; in myri10ge_reset()
1048 memset(ss->rx_done.entry, 0, bytes); in myri10ge_reset()
1049 ss->tx.req = 0; in myri10ge_reset()
1050 ss->tx.done = 0; in myri10ge_reset()
1051 ss->tx.pkt_start = 0; in myri10ge_reset()
1052 ss->tx.pkt_done = 0; in myri10ge_reset()
1053 ss->rx_big.cnt = 0; in myri10ge_reset()
1054 ss->rx_small.cnt = 0; in myri10ge_reset()
1055 ss->rx_done.idx = 0; in myri10ge_reset()
1056 ss->rx_done.cnt = 0; in myri10ge_reset()
1057 ss->tx.wake_queue = 0; in myri10ge_reset()
1058 ss->tx.stop_queue = 0; in myri10ge_reset()
1085 myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag) in myri10ge_write_dca() argument
1087 ss->cached_dca_tag = tag; in myri10ge_write_dca()
1088 put_be32(htonl(tag), ss->dca_tag); in myri10ge_write_dca()
1091 static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss) in myri10ge_update_dca() argument
1096 if (cpu != ss->cpu) { in myri10ge_update_dca()
1097 tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu); in myri10ge_update_dca()
1098 if (ss->cached_dca_tag != tag) in myri10ge_update_dca()
1099 myri10ge_write_dca(ss, cpu, tag); in myri10ge_update_dca()
1100 ss->cpu = cpu; in myri10ge_update_dca()
1110 if (mgp->ss[0].dca_tag == NULL || mgp->dca_enabled) in myri10ge_setup_dca()
1126 mgp->ss[i].cpu = -1; in myri10ge_setup_dca()
1127 mgp->ss[i].cached_dca_tag = -1; in myri10ge_setup_dca()
1128 myri10ge_update_dca(&mgp->ss[i]); in myri10ge_setup_dca()
1305 myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) in myri10ge_rx_done() argument
1307 struct myri10ge_priv *mgp = ss->mgp; in myri10ge_rx_done()
1317 rx = &ss->rx_small; in myri10ge_rx_done()
1320 rx = &ss->rx_big; in myri10ge_rx_done()
1329 skb = napi_get_frags(&ss->napi); in myri10ge_rx_done()
1331 ss->stats.rx_dropped++; in myri10ge_rx_done()
1367 skb_record_rx_queue(skb, ss - &mgp->ss[0]); in myri10ge_rx_done()
1369 napi_gro_frags(&ss->napi); in myri10ge_rx_done()
1375 myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) in myri10ge_tx_done() argument
1377 struct pci_dev *pdev = ss->mgp->pdev; in myri10ge_tx_done()
1378 struct myri10ge_tx_buf *tx = &ss->tx; in myri10ge_tx_done()
1397 ss->stats.tx_bytes += skb->len; in myri10ge_tx_done()
1398 ss->stats.tx_packets++; in myri10ge_tx_done()
1414 dev_queue = netdev_get_tx_queue(ss->dev, ss - ss->mgp->ss); in myri10ge_tx_done()
1424 if ((ss->mgp->dev->real_num_tx_queues > 1) && in myri10ge_tx_done()
1437 ss->mgp->running == MYRI10GE_ETH_RUNNING) { in myri10ge_tx_done()
1444 myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) in myri10ge_clean_rx_done() argument
1446 struct myri10ge_rx_done *rx_done = &ss->rx_done; in myri10ge_clean_rx_done()
1447 struct myri10ge_priv *mgp = ss->mgp; in myri10ge_clean_rx_done()
1461 rx_ok = myri10ge_rx_done(ss, length, checksum); in myri10ge_clean_rx_done()
1470 ss->stats.rx_packets += rx_packets; in myri10ge_clean_rx_done()
1471 ss->stats.rx_bytes += rx_bytes; in myri10ge_clean_rx_done()
1474 if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh) in myri10ge_clean_rx_done()
1475 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, in myri10ge_clean_rx_done()
1477 if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh) in myri10ge_clean_rx_done()
1478 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); in myri10ge_clean_rx_done()
1485 struct mcp_irq_data *stats = mgp->ss[0].fw_stats; in myri10ge_check_statblock()
1520 struct myri10ge_slice_state *ss = in myri10ge_poll() local
1525 if (ss->mgp->dca_enabled) in myri10ge_poll()
1526 myri10ge_update_dca(ss); in myri10ge_poll()
1529 work_done = myri10ge_clean_rx_done(ss, budget); in myri10ge_poll()
1533 put_be32(htonl(3), ss->irq_claim); in myri10ge_poll()
1540 struct myri10ge_slice_state *ss = arg; in myri10ge_intr() local
1541 struct myri10ge_priv *mgp = ss->mgp; in myri10ge_intr()
1542 struct mcp_irq_data *stats = ss->fw_stats; in myri10ge_intr()
1543 struct myri10ge_tx_buf *tx = &ss->tx; in myri10ge_intr()
1549 if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) { in myri10ge_intr()
1550 napi_schedule(&ss->napi); in myri10ge_intr()
1561 napi_schedule(&ss->napi); in myri10ge_intr()
1578 myri10ge_tx_done(ss, (int)send_done_count); in myri10ge_intr()
1591 if (ss == mgp->ss) in myri10ge_intr()
1594 put_be32(htonl(3), ss->irq_claim + 1); in myri10ge_intr()
1705 ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1; in myri10ge_get_ringparam()
1706 ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1; in myri10ge_get_ringparam()
1708 ring->tx_max_pending = mgp->ss[0].tx.mask + 1; in myri10ge_get_ringparam()
1786 struct myri10ge_slice_state *ss; in myri10ge_get_ethtool_stats() local
1807 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL); in myri10ge_get_ethtool_stats()
1813 ss = &mgp->ss[0]; in myri10ge_get_ethtool_stats()
1814 data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up); in myri10ge_get_ethtool_stats()
1815 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow); in myri10ge_get_ethtool_stats()
1817 (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered); in myri10ge_get_ethtool_stats()
1818 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause); in myri10ge_get_ethtool_stats()
1819 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy); in myri10ge_get_ethtool_stats()
1820 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32); in myri10ge_get_ethtool_stats()
1821 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered); in myri10ge_get_ethtool_stats()
1823 (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered); in myri10ge_get_ethtool_stats()
1824 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt); in myri10ge_get_ethtool_stats()
1825 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun); in myri10ge_get_ethtool_stats()
1826 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer); in myri10ge_get_ethtool_stats()
1827 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer); in myri10ge_get_ethtool_stats()
1830 ss = &mgp->ss[slice]; in myri10ge_get_ethtool_stats()
1832 data[i++] = (unsigned int)ss->tx.pkt_start; in myri10ge_get_ethtool_stats()
1833 data[i++] = (unsigned int)ss->tx.pkt_done; in myri10ge_get_ethtool_stats()
1834 data[i++] = (unsigned int)ss->tx.req; in myri10ge_get_ethtool_stats()
1835 data[i++] = (unsigned int)ss->tx.done; in myri10ge_get_ethtool_stats()
1836 data[i++] = (unsigned int)ss->rx_small.cnt; in myri10ge_get_ethtool_stats()
1837 data[i++] = (unsigned int)ss->rx_big.cnt; in myri10ge_get_ethtool_stats()
1838 data[i++] = (unsigned int)ss->tx.wake_queue; in myri10ge_get_ethtool_stats()
1839 data[i++] = (unsigned int)ss->tx.stop_queue; in myri10ge_get_ethtool_stats()
1840 data[i++] = (unsigned int)ss->tx.linearized; in myri10ge_get_ethtool_stats()
1928 static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss) in myri10ge_allocate_rings() argument
1930 struct myri10ge_priv *mgp = ss->mgp; in myri10ge_allocate_rings()
1939 slice = ss - mgp->ss; in myri10ge_allocate_rings()
1951 ss->tx.mask = tx_ring_entries - 1; in myri10ge_allocate_rings()
1952 ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1; in myri10ge_allocate_rings()
1959 * sizeof(*ss->tx.req_list); in myri10ge_allocate_rings()
1960 ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); in myri10ge_allocate_rings()
1961 if (ss->tx.req_bytes == NULL) in myri10ge_allocate_rings()
1965 ss->tx.req_list = (struct mcp_kreq_ether_send *) in myri10ge_allocate_rings()
1966 ALIGN((unsigned long)ss->tx.req_bytes, 8); in myri10ge_allocate_rings()
1967 ss->tx.queue_active = 0; in myri10ge_allocate_rings()
1969 bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow); in myri10ge_allocate_rings()
1970 ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); in myri10ge_allocate_rings()
1971 if (ss->rx_small.shadow == NULL) in myri10ge_allocate_rings()
1974 bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow); in myri10ge_allocate_rings()
1975 ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); in myri10ge_allocate_rings()
1976 if (ss->rx_big.shadow == NULL) in myri10ge_allocate_rings()
1981 bytes = tx_ring_entries * sizeof(*ss->tx.info); in myri10ge_allocate_rings()
1982 ss->tx.info = kzalloc(bytes, GFP_KERNEL); in myri10ge_allocate_rings()
1983 if (ss->tx.info == NULL) in myri10ge_allocate_rings()
1986 bytes = rx_ring_entries * sizeof(*ss->rx_small.info); in myri10ge_allocate_rings()
1987 ss->rx_small.info = kzalloc(bytes, GFP_KERNEL); in myri10ge_allocate_rings()
1988 if (ss->rx_small.info == NULL) in myri10ge_allocate_rings()
1991 bytes = rx_ring_entries * sizeof(*ss->rx_big.info); in myri10ge_allocate_rings()
1992 ss->rx_big.info = kzalloc(bytes, GFP_KERNEL); in myri10ge_allocate_rings()
1993 if (ss->rx_big.info == NULL) in myri10ge_allocate_rings()
1997 ss->rx_big.cnt = 0; in myri10ge_allocate_rings()
1998 ss->rx_small.cnt = 0; in myri10ge_allocate_rings()
1999 ss->rx_big.fill_cnt = 0; in myri10ge_allocate_rings()
2000 ss->rx_small.fill_cnt = 0; in myri10ge_allocate_rings()
2001 ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; in myri10ge_allocate_rings()
2002 ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE; in myri10ge_allocate_rings()
2003 ss->rx_small.watchdog_needed = 0; in myri10ge_allocate_rings()
2004 ss->rx_big.watchdog_needed = 0; in myri10ge_allocate_rings()
2006 ss->rx_small.fill_cnt = ss->rx_small.mask + 1; in myri10ge_allocate_rings()
2008 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, in myri10ge_allocate_rings()
2012 if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) { in myri10ge_allocate_rings()
2014 slice, ss->rx_small.fill_cnt); in myri10ge_allocate_rings()
2018 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); in myri10ge_allocate_rings()
2019 if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) { in myri10ge_allocate_rings()
2021 slice, ss->rx_big.fill_cnt); in myri10ge_allocate_rings()
2028 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { in myri10ge_allocate_rings()
2029 int idx = i & ss->rx_big.mask; in myri10ge_allocate_rings()
2030 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], in myri10ge_allocate_rings()
2032 put_page(ss->rx_big.info[idx].page); in myri10ge_allocate_rings()
2037 ss->rx_small.fill_cnt = ss->rx_small.cnt; in myri10ge_allocate_rings()
2038 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { in myri10ge_allocate_rings()
2039 int idx = i & ss->rx_small.mask; in myri10ge_allocate_rings()
2040 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], in myri10ge_allocate_rings()
2042 put_page(ss->rx_small.info[idx].page); in myri10ge_allocate_rings()
2045 kfree(ss->rx_big.info); in myri10ge_allocate_rings()
2048 kfree(ss->rx_small.info); in myri10ge_allocate_rings()
2051 kfree(ss->tx.info); in myri10ge_allocate_rings()
2054 kfree(ss->rx_big.shadow); in myri10ge_allocate_rings()
2057 kfree(ss->rx_small.shadow); in myri10ge_allocate_rings()
2060 kfree(ss->tx.req_bytes); in myri10ge_allocate_rings()
2061 ss->tx.req_bytes = NULL; in myri10ge_allocate_rings()
2062 ss->tx.req_list = NULL; in myri10ge_allocate_rings()
2068 static void myri10ge_free_rings(struct myri10ge_slice_state *ss) in myri10ge_free_rings() argument
2070 struct myri10ge_priv *mgp = ss->mgp; in myri10ge_free_rings()
2076 if (ss->tx.req_list == NULL) in myri10ge_free_rings()
2079 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { in myri10ge_free_rings()
2080 idx = i & ss->rx_big.mask; in myri10ge_free_rings()
2081 if (i == ss->rx_big.fill_cnt - 1) in myri10ge_free_rings()
2082 ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; in myri10ge_free_rings()
2083 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], in myri10ge_free_rings()
2085 put_page(ss->rx_big.info[idx].page); in myri10ge_free_rings()
2089 ss->rx_small.fill_cnt = ss->rx_small.cnt; in myri10ge_free_rings()
2090 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { in myri10ge_free_rings()
2091 idx = i & ss->rx_small.mask; in myri10ge_free_rings()
2092 if (i == ss->rx_small.fill_cnt - 1) in myri10ge_free_rings()
2093 ss->rx_small.info[idx].page_offset = in myri10ge_free_rings()
2095 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], in myri10ge_free_rings()
2097 put_page(ss->rx_small.info[idx].page); in myri10ge_free_rings()
2099 tx = &ss->tx; in myri10ge_free_rings()
2110 ss->stats.tx_dropped++; in myri10ge_free_rings()
2125 kfree(ss->rx_big.info); in myri10ge_free_rings()
2127 kfree(ss->rx_small.info); in myri10ge_free_rings()
2129 kfree(ss->tx.info); in myri10ge_free_rings()
2131 kfree(ss->rx_big.shadow); in myri10ge_free_rings()
2133 kfree(ss->rx_small.shadow); in myri10ge_free_rings()
2135 kfree(ss->tx.req_bytes); in myri10ge_free_rings()
2136 ss->tx.req_bytes = NULL; in myri10ge_free_rings()
2137 ss->tx.req_list = NULL; in myri10ge_free_rings()
2143 struct myri10ge_slice_state *ss; in myri10ge_request_irq() local
2175 ss = &mgp->ss[i]; in myri10ge_request_irq()
2176 snprintf(ss->irq_desc, sizeof(ss->irq_desc), in myri10ge_request_irq()
2179 myri10ge_intr, 0, ss->irq_desc, in myri10ge_request_irq()
2180 ss); in myri10ge_request_irq()
2187 &mgp->ss[i]); in myri10ge_request_irq()
2196 mgp->dev->name, &mgp->ss[0]); in myri10ge_request_irq()
2213 free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]); in myri10ge_free_irq()
2215 free_irq(pdev->irq, &mgp->ss[0]); in myri10ge_free_irq()
2226 struct myri10ge_slice_state *ss; in myri10ge_get_txrx() local
2229 ss = &mgp->ss[slice]; in myri10ge_get_txrx()
2235 ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *) in myri10ge_get_txrx()
2241 ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *) in myri10ge_get_txrx()
2246 ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *) in myri10ge_get_txrx()
2249 ss->tx.send_go = (__iomem __be32 *) in myri10ge_get_txrx()
2251 ss->tx.send_stop = (__iomem __be32 *) in myri10ge_get_txrx()
2260 struct myri10ge_slice_state *ss; in myri10ge_set_stats() local
2263 ss = &mgp->ss[slice]; in myri10ge_set_stats()
2264 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus); in myri10ge_set_stats()
2265 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus); in myri10ge_set_stats()
2269 dma_addr_t bus = ss->fw_stats_bus; in myri10ge_set_stats()
2288 struct myri10ge_slice_state *ss; in myri10ge_open() local
2382 ss = &mgp->ss[slice]; in myri10ge_open()
2389 status = myri10ge_allocate_rings(ss); in myri10ge_open()
2404 napi_enable(&(ss)->napi); in myri10ge_open()
2452 napi_disable(&mgp->ss[slice].napi); in myri10ge_open()
2455 myri10ge_free_rings(&mgp->ss[i]); in myri10ge_open()
2474 if (mgp->ss[0].tx.req_bytes == NULL) in myri10ge_close()
2480 napi_disable(&mgp->ss[i].napi); in myri10ge_close()
2501 myri10ge_free_rings(&mgp->ss[i]); in myri10ge_close()
2617 struct myri10ge_slice_state *ss; in myri10ge_xmit() local
2632 ss = &mgp->ss[queue]; in myri10ge_xmit()
2634 tx = &ss->tx; in myri10ge_xmit()
2711 ss->stats.tx_dropped += 1; in myri10ge_xmit()
2876 ss->stats.tx_dropped += 1; in myri10ge_xmit()
2886 struct myri10ge_slice_state *ss; in myri10ge_sw_tso() local
2911 ss = &mgp->ss[skb_get_queue_mapping(skb)]; in myri10ge_sw_tso()
2913 ss->stats.tx_dropped += 1; in myri10ge_sw_tso()
2925 slice_stats = &mgp->ss[i].stats; in myri10ge_get_stats()
3336 myri10ge_check_slice(struct myri10ge_slice_state *ss, int *reset_needed, in myri10ge_check_slice() argument
3339 struct myri10ge_priv *mgp = ss->mgp; in myri10ge_check_slice()
3340 int slice = ss - mgp->ss; in myri10ge_check_slice()
3342 if (ss->tx.req != ss->tx.done && in myri10ge_check_slice()
3343 ss->tx.done == ss->watchdog_tx_done && in myri10ge_check_slice()
3344 ss->watchdog_tx_req != ss->watchdog_tx_done) { in myri10ge_check_slice()
3353 slice, ss->tx.queue_active, ss->tx.req, in myri10ge_check_slice()
3354 ss->tx.done, ss->tx.pkt_start, in myri10ge_check_slice()
3355 ss->tx.pkt_done, in myri10ge_check_slice()
3356 (int)ntohl(mgp->ss[slice].fw_stats-> in myri10ge_check_slice()
3359 ss->stuck = 1; in myri10ge_check_slice()
3362 if (ss->watchdog_tx_done != ss->tx.done || in myri10ge_check_slice()
3363 ss->watchdog_rx_done != ss->rx_done.cnt) { in myri10ge_check_slice()
3366 ss->watchdog_tx_done = ss->tx.done; in myri10ge_check_slice()
3367 ss->watchdog_tx_req = ss->tx.req; in myri10ge_check_slice()
3368 ss->watchdog_rx_done = ss->rx_done.cnt; in myri10ge_check_slice()
3379 struct myri10ge_slice_state *ss; in myri10ge_watchdog() local
3430 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause); in myri10ge_watchdog()
3432 ss = mgp->ss; in myri10ge_watchdog()
3433 if (ss->stuck) { in myri10ge_watchdog()
3434 myri10ge_check_slice(ss, &reset_needed, in myri10ge_watchdog()
3437 ss->stuck = 0; in myri10ge_watchdog()
3470 struct myri10ge_slice_state *ss; in myri10ge_watchdog_timer() local
3477 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause); in myri10ge_watchdog_timer()
3482 ss = &mgp->ss[i]; in myri10ge_watchdog_timer()
3483 if (ss->rx_small.watchdog_needed) { in myri10ge_watchdog_timer()
3484 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, in myri10ge_watchdog_timer()
3487 if (ss->rx_small.fill_cnt - ss->rx_small.cnt >= in myri10ge_watchdog_timer()
3489 ss->rx_small.watchdog_needed = 0; in myri10ge_watchdog_timer()
3491 if (ss->rx_big.watchdog_needed) { in myri10ge_watchdog_timer()
3492 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, in myri10ge_watchdog_timer()
3494 if (ss->rx_big.fill_cnt - ss->rx_big.cnt >= in myri10ge_watchdog_timer()
3496 ss->rx_big.watchdog_needed = 0; in myri10ge_watchdog_timer()
3498 myri10ge_check_slice(ss, &reset_needed, &busy_slice_cnt, in myri10ge_watchdog_timer()
3523 struct myri10ge_slice_state *ss; in myri10ge_free_slices() local
3528 if (mgp->ss == NULL) in myri10ge_free_slices()
3532 ss = &mgp->ss[i]; in myri10ge_free_slices()
3533 if (ss->rx_done.entry != NULL) { in myri10ge_free_slices()
3535 sizeof(*ss->rx_done.entry); in myri10ge_free_slices()
3537 ss->rx_done.entry, ss->rx_done.bus); in myri10ge_free_slices()
3538 ss->rx_done.entry = NULL; in myri10ge_free_slices()
3540 if (ss->fw_stats != NULL) { in myri10ge_free_slices()
3541 bytes = sizeof(*ss->fw_stats); in myri10ge_free_slices()
3543 ss->fw_stats, ss->fw_stats_bus); in myri10ge_free_slices()
3544 ss->fw_stats = NULL; in myri10ge_free_slices()
3546 __netif_napi_del(&ss->napi); in myri10ge_free_slices()
3548 /* Wait till napi structs are no longer used, and then free ss. */ in myri10ge_free_slices()
3550 kfree(mgp->ss); in myri10ge_free_slices()
3551 mgp->ss = NULL; in myri10ge_free_slices()
3556 struct myri10ge_slice_state *ss; in myri10ge_alloc_slices() local
3561 bytes = sizeof(*mgp->ss) * mgp->num_slices; in myri10ge_alloc_slices()
3562 mgp->ss = kzalloc(bytes, GFP_KERNEL); in myri10ge_alloc_slices()
3563 if (mgp->ss == NULL) { in myri10ge_alloc_slices()
3568 ss = &mgp->ss[i]; in myri10ge_alloc_slices()
3569 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); in myri10ge_alloc_slices()
3570 ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, in myri10ge_alloc_slices()
3571 &ss->rx_done.bus, in myri10ge_alloc_slices()
3573 if (ss->rx_done.entry == NULL) in myri10ge_alloc_slices()
3575 bytes = sizeof(*ss->fw_stats); in myri10ge_alloc_slices()
3576 ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes, in myri10ge_alloc_slices()
3577 &ss->fw_stats_bus, in myri10ge_alloc_slices()
3579 if (ss->fw_stats == NULL) in myri10ge_alloc_slices()
3581 ss->mgp = mgp; in myri10ge_alloc_slices()
3582 ss->dev = mgp->dev; in myri10ge_alloc_slices()
3583 netif_napi_add(ss->dev, &ss->napi, myri10ge_poll, in myri10ge_alloc_slices()