1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 286094f7fSBen Hutchings /**************************************************************************** 3f7a6d2c4SBen Hutchings * Driver for Solarflare network controllers and boards 486094f7fSBen Hutchings * Copyright 2005-2006 Fen Systems Ltd. 5f7a6d2c4SBen Hutchings * Copyright 2006-2013 Solarflare Communications Inc. 686094f7fSBen Hutchings */ 786094f7fSBen Hutchings 886094f7fSBen Hutchings #include <linux/bitops.h> 986094f7fSBen Hutchings #include <linux/delay.h> 1086094f7fSBen Hutchings #include <linux/interrupt.h> 1186094f7fSBen Hutchings #include <linux/pci.h> 1286094f7fSBen Hutchings #include <linux/module.h> 1386094f7fSBen Hutchings #include <linux/seq_file.h> 14964e6135SBen Hutchings #include <linux/crc32.h> 1586094f7fSBen Hutchings #include "net_driver.h" 1686094f7fSBen Hutchings #include "bitfield.h" 1786094f7fSBen Hutchings #include "efx.h" 18e1253f39SAlex Maftei (amaftei) #include "rx_common.h" 19cc42e4e3SMartin Habets #include "tx_common.h" 2086094f7fSBen Hutchings #include "nic.h" 2186094f7fSBen Hutchings #include "farch_regs.h" 227fa8d547SShradha Shah #include "sriov.h" 237fa8d547SShradha Shah #include "siena_sriov.h" 2486094f7fSBen Hutchings #include "io.h" 2586094f7fSBen Hutchings #include "workarounds.h" 2686094f7fSBen Hutchings 275a6681e2SEdward Cree /* Falcon-architecture (SFC9000-family) support */ 2886094f7fSBen Hutchings 2986094f7fSBen Hutchings /************************************************************************** 3086094f7fSBen Hutchings * 3186094f7fSBen Hutchings * Configurable values 3286094f7fSBen Hutchings * 3386094f7fSBen Hutchings ************************************************************************** 3486094f7fSBen Hutchings */ 3586094f7fSBen Hutchings 3686094f7fSBen Hutchings /* This is set to 16 for a good reason. In summary, if larger than 3786094f7fSBen Hutchings * 16, the descriptor cache holds more than a default socket 3886094f7fSBen Hutchings * buffer's worth of packets (for UDP we can only have at most one 3986094f7fSBen Hutchings * socket buffer's worth outstanding). This combined with the fact 4086094f7fSBen Hutchings * that we only get 1 TX event per descriptor cache means the NIC 4186094f7fSBen Hutchings * goes idle. 4286094f7fSBen Hutchings */ 4386094f7fSBen Hutchings #define TX_DC_ENTRIES 16 4486094f7fSBen Hutchings #define TX_DC_ENTRIES_ORDER 1 4586094f7fSBen Hutchings 4686094f7fSBen Hutchings #define RX_DC_ENTRIES 64 4786094f7fSBen Hutchings #define RX_DC_ENTRIES_ORDER 3 4886094f7fSBen Hutchings 4986094f7fSBen Hutchings /* If EFX_MAX_INT_ERRORS internal errors occur within 5086094f7fSBen Hutchings * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 5186094f7fSBen Hutchings * disable it. 5286094f7fSBen Hutchings */ 5386094f7fSBen Hutchings #define EFX_INT_ERROR_EXPIRE 3600 5486094f7fSBen Hutchings #define EFX_MAX_INT_ERRORS 5 5586094f7fSBen Hutchings 5686094f7fSBen Hutchings /* Depth of RX flush request fifo */ 5786094f7fSBen Hutchings #define EFX_RX_FLUSH_COUNT 4 5886094f7fSBen Hutchings 5986094f7fSBen Hutchings /* Driver generated events */ 6086094f7fSBen Hutchings #define _EFX_CHANNEL_MAGIC_TEST 0x000101 6186094f7fSBen Hutchings #define _EFX_CHANNEL_MAGIC_FILL 0x000102 6286094f7fSBen Hutchings #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 6386094f7fSBen Hutchings #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 6486094f7fSBen Hutchings 6586094f7fSBen Hutchings #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) 6686094f7fSBen Hutchings #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) 6786094f7fSBen Hutchings 6886094f7fSBen Hutchings #define EFX_CHANNEL_MAGIC_TEST(_channel) \ 6986094f7fSBen Hutchings _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) 7086094f7fSBen Hutchings #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ 7186094f7fSBen Hutchings _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ 7286094f7fSBen Hutchings efx_rx_queue_index(_rx_queue)) 7386094f7fSBen Hutchings #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ 7486094f7fSBen Hutchings _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ 7586094f7fSBen Hutchings efx_rx_queue_index(_rx_queue)) 7686094f7fSBen Hutchings #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ 7786094f7fSBen Hutchings _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ 7886094f7fSBen Hutchings (_tx_queue)->queue) 7986094f7fSBen Hutchings 8086094f7fSBen Hutchings static void efx_farch_magic_event(struct efx_channel *channel, u32 magic); 8186094f7fSBen Hutchings 8286094f7fSBen Hutchings /************************************************************************** 8386094f7fSBen Hutchings * 8486094f7fSBen Hutchings * Hardware access 8586094f7fSBen Hutchings * 8686094f7fSBen Hutchings **************************************************************************/ 8786094f7fSBen Hutchings 8886094f7fSBen Hutchings static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, 8986094f7fSBen Hutchings unsigned int index) 9086094f7fSBen Hutchings { 9186094f7fSBen Hutchings efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, 9286094f7fSBen Hutchings value, index); 9386094f7fSBen Hutchings } 9486094f7fSBen Hutchings 9586094f7fSBen Hutchings static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, 9686094f7fSBen Hutchings const efx_oword_t *mask) 9786094f7fSBen Hutchings { 9886094f7fSBen Hutchings return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || 9986094f7fSBen Hutchings ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); 10086094f7fSBen Hutchings } 10186094f7fSBen Hutchings 10286094f7fSBen Hutchings int efx_farch_test_registers(struct efx_nic *efx, 10386094f7fSBen Hutchings const struct efx_farch_register_test *regs, 10486094f7fSBen Hutchings size_t n_regs) 10586094f7fSBen Hutchings { 10617471c7bSArnd Bergmann unsigned address = 0; 10717471c7bSArnd Bergmann int i, j; 10886094f7fSBen Hutchings efx_oword_t mask, imask, original, reg, buf; 10986094f7fSBen Hutchings 11086094f7fSBen Hutchings for (i = 0; i < n_regs; ++i) { 11186094f7fSBen Hutchings address = regs[i].address; 11286094f7fSBen Hutchings mask = imask = regs[i].mask; 11386094f7fSBen Hutchings EFX_INVERT_OWORD(imask); 11486094f7fSBen Hutchings 11586094f7fSBen Hutchings efx_reado(efx, &original, address); 11686094f7fSBen Hutchings 11786094f7fSBen Hutchings /* bit sweep on and off */ 11886094f7fSBen Hutchings for (j = 0; j < 128; j++) { 11986094f7fSBen Hutchings if (!EFX_EXTRACT_OWORD32(mask, j, j)) 12086094f7fSBen Hutchings continue; 12186094f7fSBen Hutchings 12286094f7fSBen Hutchings /* Test this testable bit can be set in isolation */ 12386094f7fSBen Hutchings EFX_AND_OWORD(reg, original, mask); 12486094f7fSBen Hutchings EFX_SET_OWORD32(reg, j, j, 1); 12586094f7fSBen Hutchings 12686094f7fSBen Hutchings efx_writeo(efx, ®, address); 12786094f7fSBen Hutchings efx_reado(efx, &buf, address); 12886094f7fSBen Hutchings 12986094f7fSBen Hutchings if (efx_masked_compare_oword(®, &buf, &mask)) 13086094f7fSBen Hutchings goto fail; 13186094f7fSBen Hutchings 13286094f7fSBen Hutchings /* Test this testable bit can be cleared in isolation */ 13386094f7fSBen Hutchings EFX_OR_OWORD(reg, original, mask); 13486094f7fSBen Hutchings EFX_SET_OWORD32(reg, j, j, 0); 13586094f7fSBen Hutchings 13686094f7fSBen Hutchings efx_writeo(efx, ®, address); 13786094f7fSBen Hutchings efx_reado(efx, &buf, address); 13886094f7fSBen Hutchings 13986094f7fSBen Hutchings if (efx_masked_compare_oword(®, &buf, &mask)) 14086094f7fSBen Hutchings goto fail; 14186094f7fSBen Hutchings } 14286094f7fSBen Hutchings 14386094f7fSBen Hutchings efx_writeo(efx, &original, address); 14486094f7fSBen Hutchings } 14586094f7fSBen Hutchings 14686094f7fSBen Hutchings return 0; 14786094f7fSBen Hutchings 14886094f7fSBen Hutchings fail: 14986094f7fSBen Hutchings netif_err(efx, hw, efx->net_dev, 15086094f7fSBen Hutchings "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT 15186094f7fSBen Hutchings " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), 15286094f7fSBen Hutchings EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); 15386094f7fSBen Hutchings return -EIO; 15486094f7fSBen Hutchings } 15586094f7fSBen Hutchings 15686094f7fSBen Hutchings /************************************************************************** 15786094f7fSBen Hutchings * 15886094f7fSBen Hutchings * Special buffer handling 15986094f7fSBen Hutchings * Special buffers are used for event queues and the TX and RX 16086094f7fSBen Hutchings * descriptor rings. 16186094f7fSBen Hutchings * 16286094f7fSBen Hutchings *************************************************************************/ 16386094f7fSBen Hutchings 16486094f7fSBen Hutchings /* 16586094f7fSBen Hutchings * Initialise a special buffer 16686094f7fSBen Hutchings * 16786094f7fSBen Hutchings * This will define a buffer (previously allocated via 16886094f7fSBen Hutchings * efx_alloc_special_buffer()) in the buffer table, allowing 16986094f7fSBen Hutchings * it to be used for event queues, descriptor rings etc. 17086094f7fSBen Hutchings */ 17186094f7fSBen Hutchings static void 17286094f7fSBen Hutchings efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 17386094f7fSBen Hutchings { 17486094f7fSBen Hutchings efx_qword_t buf_desc; 17586094f7fSBen Hutchings unsigned int index; 17686094f7fSBen Hutchings dma_addr_t dma_addr; 17786094f7fSBen Hutchings int i; 17886094f7fSBen Hutchings 179e01b16a7SEdward Cree EFX_WARN_ON_PARANOID(!buffer->buf.addr); 18086094f7fSBen Hutchings 18186094f7fSBen Hutchings /* Write buffer descriptors to NIC */ 18286094f7fSBen Hutchings for (i = 0; i < buffer->entries; i++) { 18386094f7fSBen Hutchings index = buffer->index + i; 18486094f7fSBen Hutchings dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE); 18586094f7fSBen Hutchings netif_dbg(efx, probe, efx->net_dev, 18686094f7fSBen Hutchings "mapping special buffer %d at %llx\n", 18786094f7fSBen Hutchings index, (unsigned long long)dma_addr); 18886094f7fSBen Hutchings EFX_POPULATE_QWORD_3(buf_desc, 18986094f7fSBen Hutchings FRF_AZ_BUF_ADR_REGION, 0, 19086094f7fSBen Hutchings FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, 19186094f7fSBen Hutchings FRF_AZ_BUF_OWNER_ID_FBUF, 0); 19286094f7fSBen Hutchings efx_write_buf_tbl(efx, &buf_desc, index); 19386094f7fSBen Hutchings } 19486094f7fSBen Hutchings } 19586094f7fSBen Hutchings 19686094f7fSBen Hutchings /* Unmaps a buffer and clears the buffer table entries */ 19786094f7fSBen Hutchings static void 19886094f7fSBen Hutchings efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 19986094f7fSBen Hutchings { 20086094f7fSBen Hutchings efx_oword_t buf_tbl_upd; 20186094f7fSBen Hutchings unsigned int start = buffer->index; 20286094f7fSBen Hutchings unsigned int end = (buffer->index + buffer->entries - 1); 20386094f7fSBen Hutchings 20486094f7fSBen Hutchings if (!buffer->entries) 20586094f7fSBen Hutchings return; 20686094f7fSBen Hutchings 20786094f7fSBen Hutchings netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", 20886094f7fSBen Hutchings buffer->index, buffer->index + buffer->entries - 1); 20986094f7fSBen Hutchings 21086094f7fSBen Hutchings EFX_POPULATE_OWORD_4(buf_tbl_upd, 21186094f7fSBen Hutchings FRF_AZ_BUF_UPD_CMD, 0, 21286094f7fSBen Hutchings FRF_AZ_BUF_CLR_CMD, 1, 21386094f7fSBen Hutchings FRF_AZ_BUF_CLR_END_ID, end, 21486094f7fSBen Hutchings FRF_AZ_BUF_CLR_START_ID, start); 21586094f7fSBen Hutchings efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); 21686094f7fSBen Hutchings } 21786094f7fSBen Hutchings 21886094f7fSBen Hutchings /* 21986094f7fSBen Hutchings * Allocate a new special buffer 22086094f7fSBen Hutchings * 22186094f7fSBen Hutchings * This allocates memory for a new buffer, clears it and allocates a 22286094f7fSBen Hutchings * new buffer ID range. It does not write into the buffer table. 22386094f7fSBen Hutchings * 22486094f7fSBen Hutchings * This call will allocate 4KB buffers, since 8KB buffers can't be 22586094f7fSBen Hutchings * used for event queues and descriptor rings. 22686094f7fSBen Hutchings */ 22786094f7fSBen Hutchings static int efx_alloc_special_buffer(struct efx_nic *efx, 22886094f7fSBen Hutchings struct efx_special_buffer *buffer, 22986094f7fSBen Hutchings unsigned int len) 23086094f7fSBen Hutchings { 231dfb1cfbdSMartin Habets #ifdef CONFIG_SFC_SIENA_SRIOV 2322dc313ecSShradha Shah struct siena_nic_data *nic_data = efx->nic_data; 2332dc313ecSShradha Shah #endif 23486094f7fSBen Hutchings len = ALIGN(len, EFX_BUF_SIZE); 23586094f7fSBen Hutchings 236c8443b69SMartin Habets if (efx_siena_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) 23786094f7fSBen Hutchings return -ENOMEM; 23886094f7fSBen Hutchings buffer->entries = len / EFX_BUF_SIZE; 23986094f7fSBen Hutchings BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1)); 24086094f7fSBen Hutchings 24186094f7fSBen Hutchings /* Select new buffer ID */ 24286094f7fSBen Hutchings buffer->index = efx->next_buffer_table; 24386094f7fSBen Hutchings efx->next_buffer_table += buffer->entries; 244dfb1cfbdSMartin Habets #ifdef CONFIG_SFC_SIENA_SRIOV 245327c685eSShradha Shah BUG_ON(efx_siena_sriov_enabled(efx) && 2462dc313ecSShradha Shah nic_data->vf_buftbl_base < efx->next_buffer_table); 24786094f7fSBen Hutchings #endif 24886094f7fSBen Hutchings 24986094f7fSBen Hutchings netif_dbg(efx, probe, efx->net_dev, 25086094f7fSBen Hutchings "allocating special buffers %d-%d at %llx+%x " 25186094f7fSBen Hutchings "(virt %p phys %llx)\n", buffer->index, 25286094f7fSBen Hutchings buffer->index + buffer->entries - 1, 25386094f7fSBen Hutchings (u64)buffer->buf.dma_addr, len, 25486094f7fSBen Hutchings buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); 25586094f7fSBen Hutchings 25686094f7fSBen Hutchings return 0; 25786094f7fSBen Hutchings } 25886094f7fSBen Hutchings 25986094f7fSBen Hutchings static void 26086094f7fSBen Hutchings efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 26186094f7fSBen Hutchings { 26286094f7fSBen Hutchings if (!buffer->buf.addr) 26386094f7fSBen Hutchings return; 26486094f7fSBen Hutchings 26586094f7fSBen Hutchings netif_dbg(efx, hw, efx->net_dev, 26686094f7fSBen Hutchings "deallocating special buffers %d-%d at %llx+%x " 26786094f7fSBen Hutchings "(virt %p phys %llx)\n", buffer->index, 26886094f7fSBen Hutchings buffer->index + buffer->entries - 1, 26986094f7fSBen Hutchings (u64)buffer->buf.dma_addr, buffer->buf.len, 27086094f7fSBen Hutchings buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); 27186094f7fSBen Hutchings 272c8443b69SMartin Habets efx_siena_free_buffer(efx, &buffer->buf); 27386094f7fSBen Hutchings buffer->entries = 0; 27486094f7fSBen Hutchings } 27586094f7fSBen Hutchings 27686094f7fSBen Hutchings /************************************************************************** 27786094f7fSBen Hutchings * 27886094f7fSBen Hutchings * TX path 27986094f7fSBen Hutchings * 28086094f7fSBen Hutchings **************************************************************************/ 28186094f7fSBen Hutchings 28286094f7fSBen Hutchings /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 28386094f7fSBen Hutchings static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue) 28486094f7fSBen Hutchings { 28586094f7fSBen Hutchings unsigned write_ptr; 28686094f7fSBen Hutchings efx_dword_t reg; 28786094f7fSBen Hutchings 28886094f7fSBen Hutchings write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 28986094f7fSBen Hutchings EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 29086094f7fSBen Hutchings efx_writed_page(tx_queue->efx, ®, 29186094f7fSBen Hutchings FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 29286094f7fSBen Hutchings } 29386094f7fSBen Hutchings 29486094f7fSBen Hutchings /* Write pointer and first descriptor for TX descriptor ring */ 29586094f7fSBen Hutchings static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue, 29686094f7fSBen Hutchings const efx_qword_t *txd) 29786094f7fSBen Hutchings { 29886094f7fSBen Hutchings unsigned write_ptr; 29986094f7fSBen Hutchings efx_oword_t reg; 30086094f7fSBen Hutchings 30186094f7fSBen Hutchings BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); 30286094f7fSBen Hutchings BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); 30386094f7fSBen Hutchings 30486094f7fSBen Hutchings write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 30586094f7fSBen Hutchings EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, 30686094f7fSBen Hutchings FRF_AZ_TX_DESC_WPTR, write_ptr); 30786094f7fSBen Hutchings reg.qword[0] = *txd; 30886094f7fSBen Hutchings efx_writeo_page(tx_queue->efx, ®, 30986094f7fSBen Hutchings FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); 31086094f7fSBen Hutchings } 31186094f7fSBen Hutchings 31286094f7fSBen Hutchings 31386094f7fSBen Hutchings /* For each entry inserted into the software descriptor ring, create a 31486094f7fSBen Hutchings * descriptor in the hardware TX descriptor ring (in host memory), and 31586094f7fSBen Hutchings * write a doorbell. 31686094f7fSBen Hutchings */ 31786094f7fSBen Hutchings void efx_farch_tx_write(struct efx_tx_queue *tx_queue) 31886094f7fSBen Hutchings { 31986094f7fSBen Hutchings struct efx_tx_buffer *buffer; 32086094f7fSBen Hutchings efx_qword_t *txd; 32186094f7fSBen Hutchings unsigned write_ptr; 32286094f7fSBen Hutchings unsigned old_write_count = tx_queue->write_count; 32386094f7fSBen Hutchings 3241c0544d2SEdward Cree tx_queue->xmit_pending = false; 325b2663a4fSMartin Habets if (unlikely(tx_queue->write_count == tx_queue->insert_count)) 326b2663a4fSMartin Habets return; 32786094f7fSBen Hutchings 32886094f7fSBen Hutchings do { 32986094f7fSBen Hutchings write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 33086094f7fSBen Hutchings buffer = &tx_queue->buffer[write_ptr]; 33186094f7fSBen Hutchings txd = efx_tx_desc(tx_queue, write_ptr); 33286094f7fSBen Hutchings ++tx_queue->write_count; 33386094f7fSBen Hutchings 334e01b16a7SEdward Cree EFX_WARN_ON_ONCE_PARANOID(buffer->flags & EFX_TX_BUF_OPTION); 335ba8977bdSBen Hutchings 33686094f7fSBen Hutchings /* Create TX descriptor ring entry */ 33786094f7fSBen Hutchings BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); 33886094f7fSBen Hutchings EFX_POPULATE_QWORD_4(*txd, 33986094f7fSBen Hutchings FSF_AZ_TX_KER_CONT, 34086094f7fSBen Hutchings buffer->flags & EFX_TX_BUF_CONT, 34186094f7fSBen Hutchings FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 34286094f7fSBen Hutchings FSF_AZ_TX_KER_BUF_REGION, 0, 34386094f7fSBen Hutchings FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 34486094f7fSBen Hutchings } while (tx_queue->write_count != tx_queue->insert_count); 34586094f7fSBen Hutchings 34686094f7fSBen Hutchings wmb(); /* Ensure descriptors are written before they are fetched */ 34786094f7fSBen Hutchings 34886094f7fSBen Hutchings if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { 34986094f7fSBen Hutchings txd = efx_tx_desc(tx_queue, 35086094f7fSBen Hutchings old_write_count & tx_queue->ptr_mask); 35186094f7fSBen Hutchings efx_farch_push_tx_desc(tx_queue, txd); 35286094f7fSBen Hutchings ++tx_queue->pushes; 35386094f7fSBen Hutchings } else { 35486094f7fSBen Hutchings efx_farch_notify_tx_desc(tx_queue); 35586094f7fSBen Hutchings } 35686094f7fSBen Hutchings } 35786094f7fSBen Hutchings 358e9117e50SBert Kenward unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue, 359e9117e50SBert Kenward dma_addr_t dma_addr, unsigned int len) 360e9117e50SBert Kenward { 361e9117e50SBert Kenward /* Don't cross 4K boundaries with descriptors. */ 362e9117e50SBert Kenward unsigned int limit = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1; 363e9117e50SBert Kenward 364e9117e50SBert Kenward len = min(limit, len); 365e9117e50SBert Kenward 366e9117e50SBert Kenward return len; 367e9117e50SBert Kenward } 368e9117e50SBert Kenward 369e9117e50SBert Kenward 37086094f7fSBen Hutchings /* Allocate hardware resources for a TX queue */ 37186094f7fSBen Hutchings int efx_farch_tx_probe(struct efx_tx_queue *tx_queue) 37286094f7fSBen Hutchings { 37386094f7fSBen Hutchings struct efx_nic *efx = tx_queue->efx; 37486094f7fSBen Hutchings unsigned entries; 37586094f7fSBen Hutchings 376044588b9SEdward Cree tx_queue->type = ((tx_queue->label & 1) ? EFX_TXQ_TYPE_OUTER_CSUM : 0) | 37712804793SEdward Cree ((tx_queue->label & 2) ? EFX_TXQ_TYPE_HIGHPRI : 0); 37886094f7fSBen Hutchings entries = tx_queue->ptr_mask + 1; 37986094f7fSBen Hutchings return efx_alloc_special_buffer(efx, &tx_queue->txd, 38086094f7fSBen Hutchings entries * sizeof(efx_qword_t)); 38186094f7fSBen Hutchings } 38286094f7fSBen Hutchings 38386094f7fSBen Hutchings void efx_farch_tx_init(struct efx_tx_queue *tx_queue) 38486094f7fSBen Hutchings { 385044588b9SEdward Cree int csum = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM; 38686094f7fSBen Hutchings struct efx_nic *efx = tx_queue->efx; 38786094f7fSBen Hutchings efx_oword_t reg; 38886094f7fSBen Hutchings 38986094f7fSBen Hutchings /* Pin TX descriptor ring */ 39086094f7fSBen Hutchings efx_init_special_buffer(efx, &tx_queue->txd); 39186094f7fSBen Hutchings 39286094f7fSBen Hutchings /* Push TX descriptor ring to card */ 39386094f7fSBen Hutchings EFX_POPULATE_OWORD_10(reg, 39486094f7fSBen Hutchings FRF_AZ_TX_DESCQ_EN, 1, 39586094f7fSBen Hutchings FRF_AZ_TX_ISCSI_DDIG_EN, 0, 39686094f7fSBen Hutchings FRF_AZ_TX_ISCSI_HDIG_EN, 0, 39786094f7fSBen Hutchings FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 39886094f7fSBen Hutchings FRF_AZ_TX_DESCQ_EVQ_ID, 39986094f7fSBen Hutchings tx_queue->channel->channel, 40086094f7fSBen Hutchings FRF_AZ_TX_DESCQ_OWNER_ID, 0, 401a81dcd85SEdward Cree FRF_AZ_TX_DESCQ_LABEL, tx_queue->label, 40286094f7fSBen Hutchings FRF_AZ_TX_DESCQ_SIZE, 40386094f7fSBen Hutchings __ffs(tx_queue->txd.entries), 40486094f7fSBen Hutchings FRF_AZ_TX_DESCQ_TYPE, 0, 40586094f7fSBen Hutchings FRF_BZ_TX_NON_IP_DROP_DIS, 1); 40686094f7fSBen Hutchings 40786094f7fSBen Hutchings EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 4085a6681e2SEdward Cree EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, !csum); 40986094f7fSBen Hutchings 41086094f7fSBen Hutchings efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, 41186094f7fSBen Hutchings tx_queue->queue); 41286094f7fSBen Hutchings 41386094f7fSBen Hutchings EFX_POPULATE_OWORD_1(reg, 41486094f7fSBen Hutchings FRF_BZ_TX_PACE, 41512804793SEdward Cree (tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ? 41686094f7fSBen Hutchings FFE_BZ_TX_PACE_OFF : 41786094f7fSBen Hutchings FFE_BZ_TX_PACE_RESERVED); 4185a6681e2SEdward Cree efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, tx_queue->queue); 4191679c72cSEdward Cree 4201679c72cSEdward Cree tx_queue->tso_version = 1; 42186094f7fSBen Hutchings } 42286094f7fSBen Hutchings 42386094f7fSBen Hutchings static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue) 42486094f7fSBen Hutchings { 42586094f7fSBen Hutchings struct efx_nic *efx = tx_queue->efx; 42686094f7fSBen Hutchings efx_oword_t tx_flush_descq; 42786094f7fSBen Hutchings 42886094f7fSBen Hutchings WARN_ON(atomic_read(&tx_queue->flush_outstanding)); 42986094f7fSBen Hutchings atomic_set(&tx_queue->flush_outstanding, 1); 43086094f7fSBen Hutchings 43186094f7fSBen Hutchings EFX_POPULATE_OWORD_2(tx_flush_descq, 43286094f7fSBen Hutchings FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 43386094f7fSBen Hutchings FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 43486094f7fSBen Hutchings efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); 43586094f7fSBen Hutchings } 43686094f7fSBen Hutchings 43786094f7fSBen Hutchings void efx_farch_tx_fini(struct efx_tx_queue *tx_queue) 43886094f7fSBen Hutchings { 43986094f7fSBen Hutchings struct efx_nic *efx = tx_queue->efx; 44086094f7fSBen Hutchings efx_oword_t tx_desc_ptr; 44186094f7fSBen Hutchings 44286094f7fSBen Hutchings /* Remove TX descriptor ring from card */ 44386094f7fSBen Hutchings EFX_ZERO_OWORD(tx_desc_ptr); 44486094f7fSBen Hutchings efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 44586094f7fSBen Hutchings tx_queue->queue); 44686094f7fSBen Hutchings 44786094f7fSBen Hutchings /* Unpin TX descriptor ring */ 44886094f7fSBen Hutchings efx_fini_special_buffer(efx, &tx_queue->txd); 44986094f7fSBen Hutchings } 45086094f7fSBen Hutchings 45186094f7fSBen Hutchings /* Free buffers backing TX queue */ 45286094f7fSBen Hutchings void efx_farch_tx_remove(struct efx_tx_queue *tx_queue) 45386094f7fSBen Hutchings { 45486094f7fSBen Hutchings efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); 45586094f7fSBen Hutchings } 45686094f7fSBen Hutchings 45786094f7fSBen Hutchings /************************************************************************** 45886094f7fSBen Hutchings * 45986094f7fSBen Hutchings * RX path 46086094f7fSBen Hutchings * 46186094f7fSBen Hutchings **************************************************************************/ 46286094f7fSBen Hutchings 46386094f7fSBen Hutchings /* This creates an entry in the RX descriptor queue */ 46486094f7fSBen Hutchings static inline void 46586094f7fSBen Hutchings efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) 46686094f7fSBen Hutchings { 46786094f7fSBen Hutchings struct efx_rx_buffer *rx_buf; 46886094f7fSBen Hutchings efx_qword_t *rxd; 46986094f7fSBen Hutchings 47086094f7fSBen Hutchings rxd = efx_rx_desc(rx_queue, index); 47186094f7fSBen Hutchings rx_buf = efx_rx_buffer(rx_queue, index); 47286094f7fSBen Hutchings EFX_POPULATE_QWORD_3(*rxd, 47386094f7fSBen Hutchings FSF_AZ_RX_KER_BUF_SIZE, 47486094f7fSBen Hutchings rx_buf->len - 47586094f7fSBen Hutchings rx_queue->efx->type->rx_buffer_padding, 47686094f7fSBen Hutchings FSF_AZ_RX_KER_BUF_REGION, 0, 47786094f7fSBen Hutchings FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 47886094f7fSBen Hutchings } 47986094f7fSBen Hutchings 48086094f7fSBen Hutchings /* This writes to the RX_DESC_WPTR register for the specified receive 48186094f7fSBen Hutchings * descriptor ring. 48286094f7fSBen Hutchings */ 48386094f7fSBen Hutchings void efx_farch_rx_write(struct efx_rx_queue *rx_queue) 48486094f7fSBen Hutchings { 48586094f7fSBen Hutchings struct efx_nic *efx = rx_queue->efx; 48686094f7fSBen Hutchings efx_dword_t reg; 48786094f7fSBen Hutchings unsigned write_ptr; 48886094f7fSBen Hutchings 48986094f7fSBen Hutchings while (rx_queue->notified_count != rx_queue->added_count) { 49086094f7fSBen Hutchings efx_farch_build_rx_desc( 49186094f7fSBen Hutchings rx_queue, 49286094f7fSBen Hutchings rx_queue->notified_count & rx_queue->ptr_mask); 49386094f7fSBen Hutchings ++rx_queue->notified_count; 49486094f7fSBen Hutchings } 49586094f7fSBen Hutchings 49686094f7fSBen Hutchings wmb(); 49786094f7fSBen Hutchings write_ptr = rx_queue->added_count & rx_queue->ptr_mask; 49886094f7fSBen Hutchings EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 49986094f7fSBen Hutchings efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, 50086094f7fSBen Hutchings efx_rx_queue_index(rx_queue)); 50186094f7fSBen Hutchings } 50286094f7fSBen Hutchings 50386094f7fSBen Hutchings int efx_farch_rx_probe(struct efx_rx_queue *rx_queue) 50486094f7fSBen Hutchings { 50586094f7fSBen Hutchings struct efx_nic *efx = rx_queue->efx; 50686094f7fSBen Hutchings unsigned entries; 50786094f7fSBen Hutchings 50886094f7fSBen Hutchings entries = rx_queue->ptr_mask + 1; 50986094f7fSBen Hutchings return efx_alloc_special_buffer(efx, &rx_queue->rxd, 51086094f7fSBen Hutchings entries * sizeof(efx_qword_t)); 51186094f7fSBen Hutchings } 51286094f7fSBen Hutchings 51386094f7fSBen Hutchings void efx_farch_rx_init(struct efx_rx_queue *rx_queue) 51486094f7fSBen Hutchings { 51586094f7fSBen Hutchings efx_oword_t rx_desc_ptr; 51686094f7fSBen Hutchings struct efx_nic *efx = rx_queue->efx; 51786094f7fSBen Hutchings bool jumbo_en; 51886094f7fSBen Hutchings 5195a6681e2SEdward Cree /* For kernel-mode queues in Siena, the JUMBO flag enables scatter. */ 5205a6681e2SEdward Cree jumbo_en = efx->rx_scatter; 52186094f7fSBen Hutchings 52286094f7fSBen Hutchings netif_dbg(efx, hw, efx->net_dev, 52386094f7fSBen Hutchings "RX queue %d ring in special buffers %d-%d\n", 52486094f7fSBen Hutchings efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 52586094f7fSBen Hutchings rx_queue->rxd.index + rx_queue->rxd.entries - 1); 52686094f7fSBen Hutchings 52786094f7fSBen Hutchings rx_queue->scatter_n = 0; 52886094f7fSBen Hutchings 52986094f7fSBen Hutchings /* Pin RX descriptor ring */ 53086094f7fSBen Hutchings efx_init_special_buffer(efx, &rx_queue->rxd); 53186094f7fSBen Hutchings 53286094f7fSBen Hutchings /* Push RX descriptor ring to card */ 53386094f7fSBen Hutchings EFX_POPULATE_OWORD_10(rx_desc_ptr, 5345a6681e2SEdward Cree FRF_AZ_RX_ISCSI_DDIG_EN, true, 5355a6681e2SEdward Cree FRF_AZ_RX_ISCSI_HDIG_EN, true, 53686094f7fSBen Hutchings FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 53786094f7fSBen Hutchings FRF_AZ_RX_DESCQ_EVQ_ID, 53886094f7fSBen Hutchings efx_rx_queue_channel(rx_queue)->channel, 53986094f7fSBen Hutchings FRF_AZ_RX_DESCQ_OWNER_ID, 0, 54086094f7fSBen Hutchings FRF_AZ_RX_DESCQ_LABEL, 54186094f7fSBen Hutchings efx_rx_queue_index(rx_queue), 54286094f7fSBen Hutchings FRF_AZ_RX_DESCQ_SIZE, 54386094f7fSBen Hutchings __ffs(rx_queue->rxd.entries), 54486094f7fSBen Hutchings FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 54586094f7fSBen Hutchings FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, 54686094f7fSBen Hutchings FRF_AZ_RX_DESCQ_EN, 1); 54786094f7fSBen Hutchings efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 54886094f7fSBen Hutchings efx_rx_queue_index(rx_queue)); 54986094f7fSBen Hutchings } 55086094f7fSBen Hutchings 55186094f7fSBen Hutchings static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue) 55286094f7fSBen Hutchings { 55386094f7fSBen Hutchings struct efx_nic *efx = rx_queue->efx; 55486094f7fSBen Hutchings efx_oword_t rx_flush_descq; 55586094f7fSBen Hutchings 55686094f7fSBen Hutchings EFX_POPULATE_OWORD_2(rx_flush_descq, 55786094f7fSBen Hutchings FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 55886094f7fSBen Hutchings FRF_AZ_RX_FLUSH_DESCQ, 55986094f7fSBen Hutchings efx_rx_queue_index(rx_queue)); 56086094f7fSBen Hutchings efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 56186094f7fSBen Hutchings } 56286094f7fSBen Hutchings 56386094f7fSBen Hutchings void efx_farch_rx_fini(struct efx_rx_queue *rx_queue) 56486094f7fSBen Hutchings { 56586094f7fSBen Hutchings efx_oword_t rx_desc_ptr; 56686094f7fSBen Hutchings struct efx_nic *efx = rx_queue->efx; 56786094f7fSBen Hutchings 56886094f7fSBen Hutchings /* Remove RX descriptor ring from card */ 56986094f7fSBen Hutchings EFX_ZERO_OWORD(rx_desc_ptr); 57086094f7fSBen Hutchings efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 57186094f7fSBen Hutchings efx_rx_queue_index(rx_queue)); 57286094f7fSBen Hutchings 57386094f7fSBen Hutchings /* Unpin RX descriptor ring */ 57486094f7fSBen Hutchings efx_fini_special_buffer(efx, &rx_queue->rxd); 57586094f7fSBen Hutchings } 57686094f7fSBen Hutchings 57786094f7fSBen Hutchings /* Free buffers backing RX queue */ 57886094f7fSBen Hutchings void efx_farch_rx_remove(struct efx_rx_queue *rx_queue) 57986094f7fSBen Hutchings { 58086094f7fSBen Hutchings efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); 58186094f7fSBen Hutchings } 58286094f7fSBen Hutchings 58386094f7fSBen Hutchings /************************************************************************** 58486094f7fSBen Hutchings * 58586094f7fSBen Hutchings * Flush handling 58686094f7fSBen Hutchings * 58786094f7fSBen Hutchings **************************************************************************/ 58886094f7fSBen Hutchings 58986094f7fSBen Hutchings /* efx_farch_flush_queues() must be woken up when all flushes are completed, 59086094f7fSBen Hutchings * or more RX flushes can be kicked off. 59186094f7fSBen Hutchings */ 59286094f7fSBen Hutchings static bool efx_farch_flush_wake(struct efx_nic *efx) 59386094f7fSBen Hutchings { 59486094f7fSBen Hutchings /* Ensure that all updates are visible to efx_farch_flush_queues() */ 59586094f7fSBen Hutchings smp_mb(); 59686094f7fSBen Hutchings 5973881d8abSAlexandre Rames return (atomic_read(&efx->active_queues) == 0 || 59886094f7fSBen Hutchings (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT 59986094f7fSBen Hutchings && atomic_read(&efx->rxq_flush_pending) > 0)); 60086094f7fSBen Hutchings } 60186094f7fSBen Hutchings 60286094f7fSBen Hutchings static bool efx_check_tx_flush_complete(struct efx_nic *efx) 60386094f7fSBen Hutchings { 60486094f7fSBen Hutchings bool i = true; 60586094f7fSBen Hutchings efx_oword_t txd_ptr_tbl; 60686094f7fSBen Hutchings struct efx_channel *channel; 60786094f7fSBen Hutchings struct efx_tx_queue *tx_queue; 60886094f7fSBen Hutchings 60986094f7fSBen Hutchings efx_for_each_channel(channel, efx) { 61086094f7fSBen Hutchings efx_for_each_channel_tx_queue(tx_queue, channel) { 61186094f7fSBen Hutchings efx_reado_table(efx, &txd_ptr_tbl, 61286094f7fSBen Hutchings FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); 61386094f7fSBen Hutchings if (EFX_OWORD_FIELD(txd_ptr_tbl, 61486094f7fSBen Hutchings FRF_AZ_TX_DESCQ_FLUSH) || 61586094f7fSBen Hutchings EFX_OWORD_FIELD(txd_ptr_tbl, 61686094f7fSBen Hutchings FRF_AZ_TX_DESCQ_EN)) { 61786094f7fSBen Hutchings netif_dbg(efx, hw, efx->net_dev, 61886094f7fSBen Hutchings "flush did not complete on TXQ %d\n", 61986094f7fSBen Hutchings tx_queue->queue); 62086094f7fSBen Hutchings i = false; 62186094f7fSBen Hutchings } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, 62286094f7fSBen Hutchings 1, 0)) { 62386094f7fSBen Hutchings /* The flush is complete, but we didn't 62486094f7fSBen Hutchings * receive a flush completion event 62586094f7fSBen Hutchings */ 62686094f7fSBen Hutchings netif_dbg(efx, hw, efx->net_dev, 62786094f7fSBen Hutchings "flush complete on TXQ %d, so drain " 62886094f7fSBen Hutchings "the queue\n", tx_queue->queue); 6293881d8abSAlexandre Rames /* Don't need to increment active_queues as it 63086094f7fSBen Hutchings * has already been incremented for the queues 63186094f7fSBen Hutchings * which did not drain 63286094f7fSBen Hutchings */ 63386094f7fSBen Hutchings efx_farch_magic_event(channel, 63486094f7fSBen Hutchings EFX_CHANNEL_MAGIC_TX_DRAIN( 63586094f7fSBen Hutchings tx_queue)); 63686094f7fSBen Hutchings } 63786094f7fSBen Hutchings } 63886094f7fSBen Hutchings } 63986094f7fSBen Hutchings 64086094f7fSBen Hutchings return i; 64186094f7fSBen Hutchings } 64286094f7fSBen Hutchings 64386094f7fSBen Hutchings /* Flush all the transmit queues, and continue flushing receive queues until 644dbedd44eSJoe Perches * they're all flushed. Wait for the DRAIN events to be received so that there 64586094f7fSBen Hutchings * are no more RX and TX events left on any channel. */ 64686094f7fSBen Hutchings static int efx_farch_do_flush(struct efx_nic *efx) 64786094f7fSBen Hutchings { 64886094f7fSBen Hutchings unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ 64986094f7fSBen Hutchings struct efx_channel *channel; 65086094f7fSBen Hutchings struct efx_rx_queue *rx_queue; 65186094f7fSBen Hutchings struct efx_tx_queue *tx_queue; 65286094f7fSBen Hutchings int rc = 0; 65386094f7fSBen Hutchings 65486094f7fSBen Hutchings efx_for_each_channel(channel, efx) { 65586094f7fSBen Hutchings efx_for_each_channel_tx_queue(tx_queue, channel) { 65686094f7fSBen Hutchings efx_farch_flush_tx_queue(tx_queue); 65786094f7fSBen Hutchings } 65886094f7fSBen Hutchings efx_for_each_channel_rx_queue(rx_queue, channel) { 65986094f7fSBen Hutchings rx_queue->flush_pending = true; 66086094f7fSBen Hutchings atomic_inc(&efx->rxq_flush_pending); 66186094f7fSBen Hutchings } 66286094f7fSBen Hutchings } 66386094f7fSBen Hutchings 6643881d8abSAlexandre Rames while (timeout && atomic_read(&efx->active_queues) > 0) { 66586094f7fSBen Hutchings /* If SRIOV is enabled, then offload receive queue flushing to 66686094f7fSBen Hutchings * the firmware (though we will still have to poll for 66786094f7fSBen Hutchings * completion). If that fails, fall back to the old scheme. 66886094f7fSBen Hutchings */ 669327c685eSShradha Shah if (efx_siena_sriov_enabled(efx)) { 6704d49e5cdSMartin Habets rc = efx_siena_mcdi_flush_rxqs(efx); 67186094f7fSBen Hutchings if (!rc) 67286094f7fSBen Hutchings goto wait; 67386094f7fSBen Hutchings } 67486094f7fSBen Hutchings 67586094f7fSBen Hutchings /* The hardware supports four concurrent rx flushes, each of 67686094f7fSBen Hutchings * which may need to be retried if there is an outstanding 67786094f7fSBen Hutchings * descriptor fetch 67886094f7fSBen Hutchings */ 67986094f7fSBen Hutchings efx_for_each_channel(channel, efx) { 68086094f7fSBen Hutchings efx_for_each_channel_rx_queue(rx_queue, channel) { 68186094f7fSBen Hutchings if (atomic_read(&efx->rxq_flush_outstanding) >= 68286094f7fSBen Hutchings EFX_RX_FLUSH_COUNT) 68386094f7fSBen Hutchings break; 68486094f7fSBen Hutchings 68586094f7fSBen Hutchings if (rx_queue->flush_pending) { 68686094f7fSBen Hutchings rx_queue->flush_pending = false; 68786094f7fSBen Hutchings atomic_dec(&efx->rxq_flush_pending); 68886094f7fSBen Hutchings atomic_inc(&efx->rxq_flush_outstanding); 68986094f7fSBen Hutchings efx_farch_flush_rx_queue(rx_queue); 69086094f7fSBen Hutchings } 69186094f7fSBen Hutchings } 69286094f7fSBen Hutchings } 69386094f7fSBen Hutchings 69486094f7fSBen Hutchings wait: 69586094f7fSBen Hutchings timeout = wait_event_timeout(efx->flush_wq, 69686094f7fSBen Hutchings efx_farch_flush_wake(efx), 69786094f7fSBen Hutchings timeout); 69886094f7fSBen Hutchings } 69986094f7fSBen Hutchings 7003881d8abSAlexandre Rames if (atomic_read(&efx->active_queues) && 70186094f7fSBen Hutchings !efx_check_tx_flush_complete(efx)) { 70286094f7fSBen Hutchings netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " 7033881d8abSAlexandre Rames "(rx %d+%d)\n", atomic_read(&efx->active_queues), 70486094f7fSBen Hutchings atomic_read(&efx->rxq_flush_outstanding), 70586094f7fSBen Hutchings atomic_read(&efx->rxq_flush_pending)); 70686094f7fSBen Hutchings rc = -ETIMEDOUT; 70786094f7fSBen Hutchings 7083881d8abSAlexandre Rames atomic_set(&efx->active_queues, 0); 70986094f7fSBen Hutchings atomic_set(&efx->rxq_flush_pending, 0); 71086094f7fSBen Hutchings atomic_set(&efx->rxq_flush_outstanding, 0); 71186094f7fSBen Hutchings } 71286094f7fSBen Hutchings 71386094f7fSBen Hutchings return rc; 71486094f7fSBen Hutchings } 71586094f7fSBen Hutchings 71686094f7fSBen Hutchings int efx_farch_fini_dmaq(struct efx_nic *efx) 71786094f7fSBen Hutchings { 71886094f7fSBen Hutchings struct efx_channel *channel; 71986094f7fSBen Hutchings struct efx_tx_queue *tx_queue; 72086094f7fSBen Hutchings struct efx_rx_queue *rx_queue; 72186094f7fSBen Hutchings int rc = 0; 72286094f7fSBen Hutchings 72386094f7fSBen Hutchings /* Do not attempt to write to the NIC during EEH recovery */ 72486094f7fSBen Hutchings if (efx->state != STATE_RECOVERY) { 72586094f7fSBen Hutchings /* Only perform flush if DMA is enabled */ 72686094f7fSBen Hutchings if (efx->pci_dev->is_busmaster) { 72786094f7fSBen Hutchings efx->type->prepare_flush(efx); 72886094f7fSBen Hutchings rc = efx_farch_do_flush(efx); 72986094f7fSBen Hutchings efx->type->finish_flush(efx); 73086094f7fSBen Hutchings } 73186094f7fSBen Hutchings 73286094f7fSBen Hutchings efx_for_each_channel(channel, efx) { 73386094f7fSBen Hutchings efx_for_each_channel_rx_queue(rx_queue, channel) 73486094f7fSBen Hutchings efx_farch_rx_fini(rx_queue); 73586094f7fSBen Hutchings efx_for_each_channel_tx_queue(tx_queue, channel) 73686094f7fSBen Hutchings efx_farch_tx_fini(tx_queue); 73786094f7fSBen Hutchings } 73886094f7fSBen Hutchings } 73986094f7fSBen Hutchings 74086094f7fSBen Hutchings return rc; 74186094f7fSBen Hutchings } 74286094f7fSBen Hutchings 743e283546cSEdward Cree /* Reset queue and flush accounting after FLR 744e283546cSEdward Cree * 745e283546cSEdward Cree * One possible cause of FLR recovery is that DMA may be failing (eg. if bus 746e283546cSEdward Cree * mastering was disabled), in which case we don't receive (RXQ) flush 747e283546cSEdward Cree * completion events. This means that efx->rxq_flush_outstanding remained at 4 748e283546cSEdward Cree * after the FLR; also, efx->active_queues was non-zero (as no flush completion 749e283546cSEdward Cree * events were received, and we didn't go through efx_check_tx_flush_complete()) 75071ad88f6SMartin Habets * If we don't fix this up, on the next call to efx_siena_realloc_channels() we 75171ad88f6SMartin Habets * won't flush any RX queues because efx->rxq_flush_outstanding is at the limit 75271ad88f6SMartin Habets * of 4 for batched flush requests; and the efx->active_queues gets messed up 75371ad88f6SMartin Habets * because we keep incrementing for the newly initialised queues, but it never 75471ad88f6SMartin Habets * went to zero previously. Then we get a timeout every time we try to restart 75571ad88f6SMartin Habets * the queues, as it doesn't go back to zero when we should be flushing the 75671ad88f6SMartin Habets * queues. 757e283546cSEdward Cree */ 758e283546cSEdward Cree void efx_farch_finish_flr(struct efx_nic *efx) 759e283546cSEdward Cree { 760e283546cSEdward Cree atomic_set(&efx->rxq_flush_pending, 0); 761e283546cSEdward Cree atomic_set(&efx->rxq_flush_outstanding, 0); 762e283546cSEdward Cree atomic_set(&efx->active_queues, 0); 763e283546cSEdward Cree } 764e283546cSEdward Cree 765e283546cSEdward Cree 76686094f7fSBen Hutchings /************************************************************************** 76786094f7fSBen Hutchings * 76886094f7fSBen Hutchings * Event queue processing 76986094f7fSBen Hutchings * Event queues are processed by per-channel tasklets. 77086094f7fSBen Hutchings * 77186094f7fSBen Hutchings **************************************************************************/ 77286094f7fSBen Hutchings 77386094f7fSBen Hutchings /* Update a channel's event queue's read pointer (RPTR) register 77486094f7fSBen Hutchings * 77586094f7fSBen Hutchings * This writes the EVQ_RPTR_REG register for the specified channel's 77686094f7fSBen Hutchings * event queue. 77786094f7fSBen Hutchings */ 77886094f7fSBen Hutchings void efx_farch_ev_read_ack(struct efx_channel *channel) 77986094f7fSBen Hutchings { 78086094f7fSBen Hutchings efx_dword_t reg; 78186094f7fSBen Hutchings struct efx_nic *efx = channel->efx; 78286094f7fSBen Hutchings 78386094f7fSBen Hutchings EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 78486094f7fSBen Hutchings channel->eventq_read_ptr & channel->eventq_mask); 78586094f7fSBen Hutchings 78686094f7fSBen Hutchings /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size 78786094f7fSBen Hutchings * of 4 bytes, but it is really 16 bytes just like later revisions. 78886094f7fSBen Hutchings */ 78986094f7fSBen Hutchings efx_writed(efx, ®, 79086094f7fSBen Hutchings efx->type->evq_rptr_tbl_base + 79186094f7fSBen Hutchings FR_BZ_EVQ_RPTR_STEP * channel->channel); 79286094f7fSBen Hutchings } 79386094f7fSBen Hutchings 79486094f7fSBen Hutchings /* Use HW to insert a SW defined event */ 79586094f7fSBen Hutchings void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, 79686094f7fSBen Hutchings efx_qword_t *event) 79786094f7fSBen Hutchings { 79886094f7fSBen Hutchings efx_oword_t drv_ev_reg; 79986094f7fSBen Hutchings 80086094f7fSBen Hutchings BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || 80186094f7fSBen Hutchings FRF_AZ_DRV_EV_DATA_WIDTH != 64); 80286094f7fSBen Hutchings drv_ev_reg.u32[0] = event->u32[0]; 80386094f7fSBen Hutchings drv_ev_reg.u32[1] = event->u32[1]; 80486094f7fSBen Hutchings drv_ev_reg.u32[2] = 0; 80586094f7fSBen Hutchings drv_ev_reg.u32[3] = 0; 80686094f7fSBen Hutchings EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); 80786094f7fSBen Hutchings efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); 80886094f7fSBen Hutchings } 80986094f7fSBen Hutchings 81086094f7fSBen Hutchings static void efx_farch_magic_event(struct efx_channel *channel, u32 magic) 81186094f7fSBen Hutchings { 81286094f7fSBen Hutchings efx_qword_t event; 81386094f7fSBen Hutchings 81486094f7fSBen Hutchings EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, 81586094f7fSBen Hutchings FSE_AZ_EV_CODE_DRV_GEN_EV, 81686094f7fSBen Hutchings FSF_AZ_DRV_GEN_EV_MAGIC, magic); 81786094f7fSBen Hutchings efx_farch_generate_event(channel->efx, channel->channel, &event); 81886094f7fSBen Hutchings } 81986094f7fSBen Hutchings 82086094f7fSBen Hutchings /* Handle a transmit completion event 82186094f7fSBen Hutchings * 82286094f7fSBen Hutchings * The NIC batches TX completion events; the message we receive is of 82386094f7fSBen Hutchings * the form "complete all TX events up to this index". 82486094f7fSBen Hutchings */ 8255227ecccSBert Kenward static void 82686094f7fSBen Hutchings efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 82786094f7fSBen Hutchings { 82886094f7fSBen Hutchings unsigned int tx_ev_desc_ptr; 82986094f7fSBen Hutchings unsigned int tx_ev_q_label; 83086094f7fSBen Hutchings struct efx_tx_queue *tx_queue; 83186094f7fSBen Hutchings struct efx_nic *efx = channel->efx; 83286094f7fSBen Hutchings 8336aa7de05SMark Rutland if (unlikely(READ_ONCE(efx->reset_pending))) 8345227ecccSBert Kenward return; 83586094f7fSBen Hutchings 83686094f7fSBen Hutchings if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 83786094f7fSBen Hutchings /* Transmit completion */ 83886094f7fSBen Hutchings tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 83986094f7fSBen Hutchings tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 84083b09a18SEdward Cree tx_queue = channel->tx_queue + 84183b09a18SEdward Cree (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL); 84271ad88f6SMartin Habets efx_siena_xmit_done(tx_queue, tx_ev_desc_ptr); 84386094f7fSBen Hutchings } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 84486094f7fSBen Hutchings /* Rewrite the FIFO write pointer */ 84586094f7fSBen Hutchings tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 84683b09a18SEdward Cree tx_queue = channel->tx_queue + 84783b09a18SEdward Cree (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL); 84886094f7fSBen Hutchings 84986094f7fSBen Hutchings netif_tx_lock(efx->net_dev); 85086094f7fSBen Hutchings efx_farch_notify_tx_desc(tx_queue); 85186094f7fSBen Hutchings netif_tx_unlock(efx->net_dev); 852ab3b8250SBen Hutchings } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) { 85371ad88f6SMartin Habets efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR); 85486094f7fSBen Hutchings } else { 85586094f7fSBen Hutchings netif_err(efx, tx_err, efx->net_dev, 85686094f7fSBen Hutchings "channel %d unexpected TX event " 85786094f7fSBen Hutchings EFX_QWORD_FMT"\n", channel->channel, 85886094f7fSBen Hutchings EFX_QWORD_VAL(*event)); 85986094f7fSBen Hutchings } 86086094f7fSBen Hutchings } 86186094f7fSBen Hutchings 86286094f7fSBen Hutchings /* Detect errors included in the rx_evt_pkt_ok bit. */ 86386094f7fSBen Hutchings static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 86486094f7fSBen Hutchings const efx_qword_t *event) 86586094f7fSBen Hutchings { 86686094f7fSBen Hutchings struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 86786094f7fSBen Hutchings struct efx_nic *efx = rx_queue->efx; 86886094f7fSBen Hutchings bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 86986094f7fSBen Hutchings bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 8705a6681e2SEdward Cree bool rx_ev_frm_trunc, rx_ev_tobe_disc; 87186094f7fSBen Hutchings bool rx_ev_other_err, rx_ev_pause_frm; 87286094f7fSBen Hutchings 87386094f7fSBen Hutchings rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); 87486094f7fSBen Hutchings rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, 87586094f7fSBen Hutchings FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); 87686094f7fSBen Hutchings rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, 87786094f7fSBen Hutchings FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); 87886094f7fSBen Hutchings rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, 87986094f7fSBen Hutchings FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); 88086094f7fSBen Hutchings rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); 88186094f7fSBen Hutchings rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); 88286094f7fSBen Hutchings rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); 88386094f7fSBen Hutchings 88486094f7fSBen Hutchings /* Every error apart from tobe_disc and pause_frm */ 8855a6681e2SEdward Cree rx_ev_other_err = (rx_ev_tcp_udp_chksum_err | 88686094f7fSBen Hutchings rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 88786094f7fSBen Hutchings rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 88886094f7fSBen Hutchings 88986094f7fSBen Hutchings /* Count errors that are not in MAC stats. Ignore expected 89086094f7fSBen Hutchings * checksum errors during self-test. */ 89186094f7fSBen Hutchings if (rx_ev_frm_trunc) 89286094f7fSBen Hutchings ++channel->n_rx_frm_trunc; 89386094f7fSBen Hutchings else if (rx_ev_tobe_disc) 89486094f7fSBen Hutchings ++channel->n_rx_tobe_disc; 89586094f7fSBen Hutchings else if (!efx->loopback_selftest) { 89686094f7fSBen Hutchings if (rx_ev_ip_hdr_chksum_err) 89786094f7fSBen Hutchings ++channel->n_rx_ip_hdr_chksum_err; 89886094f7fSBen Hutchings else if (rx_ev_tcp_udp_chksum_err) 89986094f7fSBen Hutchings ++channel->n_rx_tcp_udp_chksum_err; 90086094f7fSBen Hutchings } 90186094f7fSBen Hutchings 90286094f7fSBen Hutchings /* TOBE_DISC is expected on unicast mismatches; don't print out an 90386094f7fSBen Hutchings * error message. FRM_TRUNC indicates RXDP dropped the packet due 90486094f7fSBen Hutchings * to a FIFO overflow. 90586094f7fSBen Hutchings */ 90686094f7fSBen Hutchings #ifdef DEBUG 90786094f7fSBen Hutchings if (rx_ev_other_err && net_ratelimit()) { 90886094f7fSBen Hutchings netif_dbg(efx, rx_err, efx->net_dev, 90986094f7fSBen Hutchings " RX queue %d unexpected RX event " 910edd96fa0SEdward Cree EFX_QWORD_FMT "%s%s%s%s%s%s%s\n", 91186094f7fSBen Hutchings efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), 91286094f7fSBen Hutchings rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 91386094f7fSBen Hutchings rx_ev_ip_hdr_chksum_err ? 91486094f7fSBen Hutchings " [IP_HDR_CHKSUM_ERR]" : "", 91586094f7fSBen Hutchings rx_ev_tcp_udp_chksum_err ? 91686094f7fSBen Hutchings " [TCP_UDP_CHKSUM_ERR]" : "", 91786094f7fSBen Hutchings rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", 91886094f7fSBen Hutchings rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", 91986094f7fSBen Hutchings rx_ev_tobe_disc ? " [TOBE_DISC]" : "", 92086094f7fSBen Hutchings rx_ev_pause_frm ? " [PAUSE]" : ""); 92186094f7fSBen Hutchings } 92235ff765fSEdward Cree #else 92335ff765fSEdward Cree (void) rx_ev_other_err; 92486094f7fSBen Hutchings #endif 92586094f7fSBen Hutchings 9266978729fSEdward Cree if (efx->net_dev->features & NETIF_F_RXALL) 9276978729fSEdward Cree /* don't discard frame for CRC error */ 9286978729fSEdward Cree rx_ev_eth_crc_err = false; 9296978729fSEdward Cree 93086094f7fSBen Hutchings /* The frame must be discarded if any of these are true. */ 9315a6681e2SEdward Cree return (rx_ev_eth_crc_err | rx_ev_frm_trunc | 93286094f7fSBen Hutchings rx_ev_tobe_disc | rx_ev_pause_frm) ? 93386094f7fSBen Hutchings EFX_RX_PKT_DISCARD : 0; 93486094f7fSBen Hutchings } 93586094f7fSBen Hutchings 93686094f7fSBen Hutchings /* Handle receive events that are not in-order. Return true if this 93786094f7fSBen Hutchings * can be handled as a partial packet discard, false if it's more 93886094f7fSBen Hutchings * serious. 93986094f7fSBen Hutchings */ 94086094f7fSBen Hutchings static bool 94186094f7fSBen Hutchings efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) 94286094f7fSBen Hutchings { 94386094f7fSBen Hutchings struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 94486094f7fSBen Hutchings struct efx_nic *efx = rx_queue->efx; 94586094f7fSBen Hutchings unsigned expected, dropped; 94686094f7fSBen Hutchings 94786094f7fSBen Hutchings if (rx_queue->scatter_n && 94886094f7fSBen Hutchings index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & 94986094f7fSBen Hutchings rx_queue->ptr_mask)) { 95086094f7fSBen Hutchings ++channel->n_rx_nodesc_trunc; 95186094f7fSBen Hutchings return true; 95286094f7fSBen Hutchings } 95386094f7fSBen Hutchings 95486094f7fSBen Hutchings expected = rx_queue->removed_count & rx_queue->ptr_mask; 95586094f7fSBen Hutchings dropped = (index - expected) & rx_queue->ptr_mask; 95686094f7fSBen Hutchings netif_info(efx, rx_err, efx->net_dev, 95786094f7fSBen Hutchings "dropped %d events (index=%d expected=%d)\n", 95886094f7fSBen Hutchings dropped, index, expected); 95986094f7fSBen Hutchings 96071ad88f6SMartin Habets efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE); 96186094f7fSBen Hutchings return false; 96286094f7fSBen Hutchings } 96386094f7fSBen Hutchings 96486094f7fSBen Hutchings /* Handle a packet received event 96586094f7fSBen Hutchings * 96686094f7fSBen Hutchings * The NIC gives a "discard" flag if it's a unicast packet with the 96786094f7fSBen Hutchings * wrong destination address 96886094f7fSBen Hutchings * Also "is multicast" and "matches multicast filter" flags can be used to 96986094f7fSBen Hutchings * discard non-matching multicast packets. 97086094f7fSBen Hutchings */ 97186094f7fSBen Hutchings static void 97286094f7fSBen Hutchings efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) 97386094f7fSBen Hutchings { 97486094f7fSBen Hutchings unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 97586094f7fSBen Hutchings unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 97686094f7fSBen Hutchings unsigned expected_ptr; 97786094f7fSBen Hutchings bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; 97886094f7fSBen Hutchings u16 flags; 97986094f7fSBen Hutchings struct efx_rx_queue *rx_queue; 98086094f7fSBen Hutchings struct efx_nic *efx = channel->efx; 98186094f7fSBen Hutchings 9826aa7de05SMark Rutland if (unlikely(READ_ONCE(efx->reset_pending))) 98386094f7fSBen Hutchings return; 98486094f7fSBen Hutchings 98586094f7fSBen Hutchings rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); 98686094f7fSBen Hutchings rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); 98786094f7fSBen Hutchings WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 98886094f7fSBen Hutchings channel->channel); 98986094f7fSBen Hutchings 99086094f7fSBen Hutchings rx_queue = efx_channel_get_rx_queue(channel); 99186094f7fSBen Hutchings 99286094f7fSBen Hutchings rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 99386094f7fSBen Hutchings expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & 99486094f7fSBen Hutchings rx_queue->ptr_mask); 99586094f7fSBen Hutchings 99686094f7fSBen Hutchings /* Check for partial drops and other errors */ 99786094f7fSBen Hutchings if (unlikely(rx_ev_desc_ptr != expected_ptr) || 99886094f7fSBen Hutchings unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { 99986094f7fSBen Hutchings if (rx_ev_desc_ptr != expected_ptr && 100086094f7fSBen Hutchings !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) 100186094f7fSBen Hutchings return; 100286094f7fSBen Hutchings 100386094f7fSBen Hutchings /* Discard all pending fragments */ 100486094f7fSBen Hutchings if (rx_queue->scatter_n) { 100571ad88f6SMartin Habets efx_siena_rx_packet( 100686094f7fSBen Hutchings rx_queue, 100786094f7fSBen Hutchings rx_queue->removed_count & rx_queue->ptr_mask, 100886094f7fSBen Hutchings rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); 100986094f7fSBen Hutchings rx_queue->removed_count += rx_queue->scatter_n; 101086094f7fSBen Hutchings rx_queue->scatter_n = 0; 101186094f7fSBen Hutchings } 101286094f7fSBen Hutchings 101386094f7fSBen Hutchings /* Return if there is no new fragment */ 101486094f7fSBen Hutchings if (rx_ev_desc_ptr != expected_ptr) 101586094f7fSBen Hutchings return; 101686094f7fSBen Hutchings 101786094f7fSBen Hutchings /* Discard new fragment if not SOP */ 101886094f7fSBen Hutchings if (!rx_ev_sop) { 101971ad88f6SMartin Habets efx_siena_rx_packet( 102086094f7fSBen Hutchings rx_queue, 102186094f7fSBen Hutchings rx_queue->removed_count & rx_queue->ptr_mask, 102286094f7fSBen Hutchings 1, 0, EFX_RX_PKT_DISCARD); 102386094f7fSBen Hutchings ++rx_queue->removed_count; 102486094f7fSBen Hutchings return; 102586094f7fSBen Hutchings } 102686094f7fSBen Hutchings } 102786094f7fSBen Hutchings 102886094f7fSBen Hutchings ++rx_queue->scatter_n; 102986094f7fSBen Hutchings if (rx_ev_cont) 103086094f7fSBen Hutchings return; 103186094f7fSBen Hutchings 103286094f7fSBen Hutchings rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 103386094f7fSBen Hutchings rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); 103486094f7fSBen Hutchings rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 103586094f7fSBen Hutchings 103686094f7fSBen Hutchings if (likely(rx_ev_pkt_ok)) { 103786094f7fSBen Hutchings /* If packet is marked as OK then we can rely on the 103886094f7fSBen Hutchings * hardware checksum and classification. 103986094f7fSBen Hutchings */ 104086094f7fSBen Hutchings flags = 0; 104186094f7fSBen Hutchings switch (rx_ev_hdr_type) { 104286094f7fSBen Hutchings case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: 104386094f7fSBen Hutchings flags |= EFX_RX_PKT_TCP; 1044df561f66SGustavo A. R. Silva fallthrough; 104586094f7fSBen Hutchings case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: 104686094f7fSBen Hutchings flags |= EFX_RX_PKT_CSUMMED; 1047df561f66SGustavo A. R. Silva fallthrough; 104886094f7fSBen Hutchings case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: 104986094f7fSBen Hutchings case FSE_AZ_RX_EV_HDR_TYPE_OTHER: 105086094f7fSBen Hutchings break; 105186094f7fSBen Hutchings } 105286094f7fSBen Hutchings } else { 105386094f7fSBen Hutchings flags = efx_farch_handle_rx_not_ok(rx_queue, event); 105486094f7fSBen Hutchings } 105586094f7fSBen Hutchings 105686094f7fSBen Hutchings /* Detect multicast packets that didn't match the filter */ 105786094f7fSBen Hutchings rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 105886094f7fSBen Hutchings if (rx_ev_mcast_pkt) { 105986094f7fSBen Hutchings unsigned int rx_ev_mcast_hash_match = 106086094f7fSBen Hutchings EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); 106186094f7fSBen Hutchings 106286094f7fSBen Hutchings if (unlikely(!rx_ev_mcast_hash_match)) { 106386094f7fSBen Hutchings ++channel->n_rx_mcast_mismatch; 106486094f7fSBen Hutchings flags |= EFX_RX_PKT_DISCARD; 106586094f7fSBen Hutchings } 106686094f7fSBen Hutchings } 106786094f7fSBen Hutchings 106886094f7fSBen Hutchings channel->irq_mod_score += 2; 106986094f7fSBen Hutchings 107086094f7fSBen Hutchings /* Handle received packet */ 107171ad88f6SMartin Habets efx_siena_rx_packet(rx_queue, 107286094f7fSBen Hutchings rx_queue->removed_count & rx_queue->ptr_mask, 107386094f7fSBen Hutchings rx_queue->scatter_n, rx_ev_byte_cnt, flags); 107486094f7fSBen Hutchings rx_queue->removed_count += rx_queue->scatter_n; 107586094f7fSBen Hutchings rx_queue->scatter_n = 0; 107686094f7fSBen Hutchings } 107786094f7fSBen Hutchings 107886094f7fSBen Hutchings /* If this flush done event corresponds to a &struct efx_tx_queue, then 107986094f7fSBen Hutchings * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue 108086094f7fSBen Hutchings * of all transmit completions. 108186094f7fSBen Hutchings */ 108286094f7fSBen Hutchings static void 108386094f7fSBen Hutchings efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) 108486094f7fSBen Hutchings { 108586094f7fSBen Hutchings struct efx_tx_queue *tx_queue; 10865b1faa92SEdward Cree struct efx_channel *channel; 108786094f7fSBen Hutchings int qid; 108886094f7fSBen Hutchings 108986094f7fSBen Hutchings qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 109012804793SEdward Cree if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) { 10915b1faa92SEdward Cree channel = efx_get_tx_channel(efx, qid / EFX_MAX_TXQ_PER_CHANNEL); 10925b1faa92SEdward Cree tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL); 10935b1faa92SEdward Cree if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) 109486094f7fSBen Hutchings efx_farch_magic_event(tx_queue->channel, 109586094f7fSBen Hutchings EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); 109686094f7fSBen Hutchings } 109786094f7fSBen Hutchings } 109886094f7fSBen Hutchings 109986094f7fSBen Hutchings /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush 1100dbedd44eSJoe Perches * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add 110186094f7fSBen Hutchings * the RX queue back to the mask of RX queues in need of flushing. 110286094f7fSBen Hutchings */ 110386094f7fSBen Hutchings static void 110486094f7fSBen Hutchings efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) 110586094f7fSBen Hutchings { 110686094f7fSBen Hutchings struct efx_channel *channel; 110786094f7fSBen Hutchings struct efx_rx_queue *rx_queue; 110886094f7fSBen Hutchings int qid; 110986094f7fSBen Hutchings bool failed; 111086094f7fSBen Hutchings 111186094f7fSBen Hutchings qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 111286094f7fSBen Hutchings failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 111386094f7fSBen Hutchings if (qid >= efx->n_channels) 111486094f7fSBen Hutchings return; 111586094f7fSBen Hutchings channel = efx_get_channel(efx, qid); 111686094f7fSBen Hutchings if (!efx_channel_has_rx_queue(channel)) 111786094f7fSBen Hutchings return; 111886094f7fSBen Hutchings rx_queue = efx_channel_get_rx_queue(channel); 111986094f7fSBen Hutchings 112086094f7fSBen Hutchings if (failed) { 112186094f7fSBen Hutchings netif_info(efx, hw, efx->net_dev, 112286094f7fSBen Hutchings "RXQ %d flush retry\n", qid); 112386094f7fSBen Hutchings rx_queue->flush_pending = true; 112486094f7fSBen Hutchings atomic_inc(&efx->rxq_flush_pending); 112586094f7fSBen Hutchings } else { 112686094f7fSBen Hutchings efx_farch_magic_event(efx_rx_queue_channel(rx_queue), 112786094f7fSBen Hutchings EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); 112886094f7fSBen Hutchings } 112986094f7fSBen Hutchings atomic_dec(&efx->rxq_flush_outstanding); 113086094f7fSBen Hutchings if (efx_farch_flush_wake(efx)) 113186094f7fSBen Hutchings wake_up(&efx->flush_wq); 113286094f7fSBen Hutchings } 113386094f7fSBen Hutchings 113486094f7fSBen Hutchings static void 113586094f7fSBen Hutchings efx_farch_handle_drain_event(struct efx_channel *channel) 113686094f7fSBen Hutchings { 113786094f7fSBen Hutchings struct efx_nic *efx = channel->efx; 113886094f7fSBen Hutchings 11393881d8abSAlexandre Rames WARN_ON(atomic_read(&efx->active_queues) == 0); 11403881d8abSAlexandre Rames atomic_dec(&efx->active_queues); 114186094f7fSBen Hutchings if (efx_farch_flush_wake(efx)) 114286094f7fSBen Hutchings wake_up(&efx->flush_wq); 114386094f7fSBen Hutchings } 114486094f7fSBen Hutchings 114586094f7fSBen Hutchings static void efx_farch_handle_generated_event(struct efx_channel *channel, 114686094f7fSBen Hutchings efx_qword_t *event) 114786094f7fSBen Hutchings { 114886094f7fSBen Hutchings struct efx_nic *efx = channel->efx; 114986094f7fSBen Hutchings struct efx_rx_queue *rx_queue = 115086094f7fSBen Hutchings efx_channel_has_rx_queue(channel) ? 115186094f7fSBen Hutchings efx_channel_get_rx_queue(channel) : NULL; 115286094f7fSBen Hutchings unsigned magic, code; 115386094f7fSBen Hutchings 115486094f7fSBen Hutchings magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 115586094f7fSBen Hutchings code = _EFX_CHANNEL_MAGIC_CODE(magic); 115686094f7fSBen Hutchings 115786094f7fSBen Hutchings if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { 115886094f7fSBen Hutchings channel->event_test_cpu = raw_smp_processor_id(); 115986094f7fSBen Hutchings } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { 116086094f7fSBen Hutchings /* The queue must be empty, so we won't receive any rx 116186094f7fSBen Hutchings * events, so efx_process_channel() won't refill the 116286094f7fSBen Hutchings * queue. Refill it here */ 11637f9e4b2aSMartin Habets efx_siena_fast_push_rx_descriptors(rx_queue, true); 116486094f7fSBen Hutchings } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { 116586094f7fSBen Hutchings efx_farch_handle_drain_event(channel); 116686094f7fSBen Hutchings } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { 116786094f7fSBen Hutchings efx_farch_handle_drain_event(channel); 116886094f7fSBen Hutchings } else { 116986094f7fSBen Hutchings netif_dbg(efx, hw, efx->net_dev, "channel %d received " 117086094f7fSBen Hutchings "generated event "EFX_QWORD_FMT"\n", 117186094f7fSBen Hutchings channel->channel, EFX_QWORD_VAL(*event)); 117286094f7fSBen Hutchings } 117386094f7fSBen Hutchings } 117486094f7fSBen Hutchings 117586094f7fSBen Hutchings static void 117686094f7fSBen Hutchings efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 117786094f7fSBen Hutchings { 117886094f7fSBen Hutchings struct efx_nic *efx = channel->efx; 117986094f7fSBen Hutchings unsigned int ev_sub_code; 118086094f7fSBen Hutchings unsigned int ev_sub_data; 118186094f7fSBen Hutchings 118286094f7fSBen Hutchings ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); 118386094f7fSBen Hutchings ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 118486094f7fSBen Hutchings 118586094f7fSBen Hutchings switch (ev_sub_code) { 118686094f7fSBen Hutchings case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 118786094f7fSBen Hutchings netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", 118886094f7fSBen Hutchings channel->channel, ev_sub_data); 118986094f7fSBen Hutchings efx_farch_handle_tx_flush_done(efx, event); 1190dfb1cfbdSMartin Habets #ifdef CONFIG_SFC_SIENA_SRIOV 1191327c685eSShradha Shah efx_siena_sriov_tx_flush_done(efx, event); 11927fa8d547SShradha Shah #endif 119386094f7fSBen Hutchings break; 119486094f7fSBen Hutchings case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 119586094f7fSBen Hutchings netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", 119686094f7fSBen Hutchings channel->channel, ev_sub_data); 119786094f7fSBen Hutchings efx_farch_handle_rx_flush_done(efx, event); 1198dfb1cfbdSMartin Habets #ifdef CONFIG_SFC_SIENA_SRIOV 1199327c685eSShradha Shah efx_siena_sriov_rx_flush_done(efx, event); 12007fa8d547SShradha Shah #endif 120186094f7fSBen Hutchings break; 120286094f7fSBen Hutchings case FSE_AZ_EVQ_INIT_DONE_EV: 120386094f7fSBen Hutchings netif_dbg(efx, hw, efx->net_dev, 120486094f7fSBen Hutchings "channel %d EVQ %d initialised\n", 120586094f7fSBen Hutchings channel->channel, ev_sub_data); 120686094f7fSBen Hutchings break; 120786094f7fSBen Hutchings case FSE_AZ_SRM_UPD_DONE_EV: 120886094f7fSBen Hutchings netif_vdbg(efx, hw, efx->net_dev, 120986094f7fSBen Hutchings "channel %d SRAM update done\n", channel->channel); 121086094f7fSBen Hutchings break; 121186094f7fSBen Hutchings case FSE_AZ_WAKE_UP_EV: 121286094f7fSBen Hutchings netif_vdbg(efx, hw, efx->net_dev, 121386094f7fSBen Hutchings "channel %d RXQ %d wakeup event\n", 121486094f7fSBen Hutchings channel->channel, ev_sub_data); 121586094f7fSBen Hutchings break; 121686094f7fSBen Hutchings case FSE_AZ_TIMER_EV: 121786094f7fSBen Hutchings netif_vdbg(efx, hw, efx->net_dev, 121886094f7fSBen Hutchings "channel %d RX queue %d timer expired\n", 121986094f7fSBen Hutchings channel->channel, ev_sub_data); 122086094f7fSBen Hutchings break; 122186094f7fSBen Hutchings case FSE_AA_RX_RECOVER_EV: 122286094f7fSBen Hutchings netif_err(efx, rx_err, efx->net_dev, 122386094f7fSBen Hutchings "channel %d seen DRIVER RX_RESET event. " 122486094f7fSBen Hutchings "Resetting.\n", channel->channel); 122586094f7fSBen Hutchings atomic_inc(&efx->rx_reset); 122671ad88f6SMartin Habets efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE); 122786094f7fSBen Hutchings break; 122886094f7fSBen Hutchings case FSE_BZ_RX_DSC_ERROR_EV: 122986094f7fSBen Hutchings if (ev_sub_data < EFX_VI_BASE) { 123086094f7fSBen Hutchings netif_err(efx, rx_err, efx->net_dev, 123186094f7fSBen Hutchings "RX DMA Q %d reports descriptor fetch error." 123286094f7fSBen Hutchings " RX Q %d is disabled.\n", ev_sub_data, 123386094f7fSBen Hutchings ev_sub_data); 123471ad88f6SMartin Habets efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR); 12357fa8d547SShradha Shah } 1236dfb1cfbdSMartin Habets #ifdef CONFIG_SFC_SIENA_SRIOV 12377fa8d547SShradha Shah else 1238327c685eSShradha Shah efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); 12397fa8d547SShradha Shah #endif 124086094f7fSBen Hutchings break; 124186094f7fSBen Hutchings case FSE_BZ_TX_DSC_ERROR_EV: 124286094f7fSBen Hutchings if (ev_sub_data < EFX_VI_BASE) { 124386094f7fSBen Hutchings netif_err(efx, tx_err, efx->net_dev, 124486094f7fSBen Hutchings "TX DMA Q %d reports descriptor fetch error." 124586094f7fSBen Hutchings " TX Q %d is disabled.\n", ev_sub_data, 124686094f7fSBen Hutchings ev_sub_data); 124771ad88f6SMartin Habets efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR); 12487fa8d547SShradha Shah } 1249dfb1cfbdSMartin Habets #ifdef CONFIG_SFC_SIENA_SRIOV 12507fa8d547SShradha Shah else 1251327c685eSShradha Shah efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); 12527fa8d547SShradha Shah #endif 125386094f7fSBen Hutchings break; 125486094f7fSBen Hutchings default: 125586094f7fSBen Hutchings netif_vdbg(efx, hw, efx->net_dev, 125686094f7fSBen Hutchings "channel %d unknown driver event code %d " 125786094f7fSBen Hutchings "data %04x\n", channel->channel, ev_sub_code, 125886094f7fSBen Hutchings ev_sub_data); 125986094f7fSBen Hutchings break; 126086094f7fSBen Hutchings } 126186094f7fSBen Hutchings } 126286094f7fSBen Hutchings 126386094f7fSBen Hutchings int efx_farch_ev_process(struct efx_channel *channel, int budget) 126486094f7fSBen Hutchings { 126586094f7fSBen Hutchings struct efx_nic *efx = channel->efx; 126686094f7fSBen Hutchings unsigned int read_ptr; 126786094f7fSBen Hutchings efx_qword_t event, *p_event; 126886094f7fSBen Hutchings int ev_code; 126986094f7fSBen Hutchings int spent = 0; 127086094f7fSBen Hutchings 127175363a46SEric W. Biederman if (budget <= 0) 127275363a46SEric W. Biederman return spent; 127375363a46SEric W. Biederman 127486094f7fSBen Hutchings read_ptr = channel->eventq_read_ptr; 127586094f7fSBen Hutchings 127686094f7fSBen Hutchings for (;;) { 127786094f7fSBen Hutchings p_event = efx_event(channel, read_ptr); 127886094f7fSBen Hutchings event = *p_event; 127986094f7fSBen Hutchings 128086094f7fSBen Hutchings if (!efx_event_present(&event)) 128186094f7fSBen Hutchings /* End of events */ 128286094f7fSBen Hutchings break; 128386094f7fSBen Hutchings 128486094f7fSBen Hutchings netif_vdbg(channel->efx, intr, channel->efx->net_dev, 128586094f7fSBen Hutchings "channel %d event is "EFX_QWORD_FMT"\n", 128686094f7fSBen Hutchings channel->channel, EFX_QWORD_VAL(event)); 128786094f7fSBen Hutchings 128886094f7fSBen Hutchings /* Clear this event by marking it all ones */ 128986094f7fSBen Hutchings EFX_SET_QWORD(*p_event); 129086094f7fSBen Hutchings 129186094f7fSBen Hutchings ++read_ptr; 129286094f7fSBen Hutchings 129386094f7fSBen Hutchings ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 129486094f7fSBen Hutchings 129586094f7fSBen Hutchings switch (ev_code) { 129686094f7fSBen Hutchings case FSE_AZ_EV_CODE_RX_EV: 129786094f7fSBen Hutchings efx_farch_handle_rx_event(channel, &event); 129886094f7fSBen Hutchings if (++spent == budget) 129986094f7fSBen Hutchings goto out; 130086094f7fSBen Hutchings break; 130186094f7fSBen Hutchings case FSE_AZ_EV_CODE_TX_EV: 13025227ecccSBert Kenward efx_farch_handle_tx_event(channel, &event); 130386094f7fSBen Hutchings break; 130486094f7fSBen Hutchings case FSE_AZ_EV_CODE_DRV_GEN_EV: 130586094f7fSBen Hutchings efx_farch_handle_generated_event(channel, &event); 130686094f7fSBen Hutchings break; 130786094f7fSBen Hutchings case FSE_AZ_EV_CODE_DRIVER_EV: 130886094f7fSBen Hutchings efx_farch_handle_driver_event(channel, &event); 130986094f7fSBen Hutchings break; 1310dfb1cfbdSMartin Habets #ifdef CONFIG_SFC_SIENA_SRIOV 131186094f7fSBen Hutchings case FSE_CZ_EV_CODE_USER_EV: 1312327c685eSShradha Shah efx_siena_sriov_event(channel, &event); 131386094f7fSBen Hutchings break; 13147fa8d547SShradha Shah #endif 131586094f7fSBen Hutchings case FSE_CZ_EV_CODE_MCDI_EV: 13164d49e5cdSMartin Habets efx_siena_mcdi_process_event(channel, &event); 131786094f7fSBen Hutchings break; 131886094f7fSBen Hutchings case FSE_AZ_EV_CODE_GLOBAL_EV: 131986094f7fSBen Hutchings if (efx->type->handle_global_event && 132086094f7fSBen Hutchings efx->type->handle_global_event(channel, &event)) 132186094f7fSBen Hutchings break; 1322df561f66SGustavo A. R. Silva fallthrough; 132386094f7fSBen Hutchings default: 132486094f7fSBen Hutchings netif_err(channel->efx, hw, channel->efx->net_dev, 132586094f7fSBen Hutchings "channel %d unknown event type %d (data " 132686094f7fSBen Hutchings EFX_QWORD_FMT ")\n", channel->channel, 132786094f7fSBen Hutchings ev_code, EFX_QWORD_VAL(event)); 132886094f7fSBen Hutchings } 132986094f7fSBen Hutchings } 133086094f7fSBen Hutchings 133186094f7fSBen Hutchings out: 133286094f7fSBen Hutchings channel->eventq_read_ptr = read_ptr; 133386094f7fSBen Hutchings return spent; 133486094f7fSBen Hutchings } 133586094f7fSBen Hutchings 133686094f7fSBen Hutchings /* Allocate buffer table entries for event queue */ 133786094f7fSBen Hutchings int efx_farch_ev_probe(struct efx_channel *channel) 133886094f7fSBen Hutchings { 133986094f7fSBen Hutchings struct efx_nic *efx = channel->efx; 134086094f7fSBen Hutchings unsigned entries; 134186094f7fSBen Hutchings 134286094f7fSBen Hutchings entries = channel->eventq_mask + 1; 134386094f7fSBen Hutchings return efx_alloc_special_buffer(efx, &channel->eventq, 134486094f7fSBen Hutchings entries * sizeof(efx_qword_t)); 134586094f7fSBen Hutchings } 134686094f7fSBen Hutchings 1347261e4d96SJon Cooper int efx_farch_ev_init(struct efx_channel *channel) 134886094f7fSBen Hutchings { 134986094f7fSBen Hutchings efx_oword_t reg; 135086094f7fSBen Hutchings struct efx_nic *efx = channel->efx; 135186094f7fSBen Hutchings 135286094f7fSBen Hutchings netif_dbg(efx, hw, efx->net_dev, 135386094f7fSBen Hutchings "channel %d event queue in special buffers %d-%d\n", 135486094f7fSBen Hutchings channel->channel, channel->eventq.index, 135586094f7fSBen Hutchings channel->eventq.index + channel->eventq.entries - 1); 135686094f7fSBen Hutchings 135786094f7fSBen Hutchings EFX_POPULATE_OWORD_3(reg, 135886094f7fSBen Hutchings FRF_CZ_TIMER_Q_EN, 1, 135986094f7fSBen Hutchings FRF_CZ_HOST_NOTIFY_MODE, 0, 136086094f7fSBen Hutchings FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); 136186094f7fSBen Hutchings efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 136286094f7fSBen Hutchings 136386094f7fSBen Hutchings /* Pin event queue buffer */ 136486094f7fSBen Hutchings efx_init_special_buffer(efx, &channel->eventq); 136586094f7fSBen Hutchings 136686094f7fSBen Hutchings /* Fill event queue with all ones (i.e. empty events) */ 136786094f7fSBen Hutchings memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); 136886094f7fSBen Hutchings 136986094f7fSBen Hutchings /* Push event queue to card */ 137086094f7fSBen Hutchings EFX_POPULATE_OWORD_3(reg, 137186094f7fSBen Hutchings FRF_AZ_EVQ_EN, 1, 137286094f7fSBen Hutchings FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), 137386094f7fSBen Hutchings FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); 137486094f7fSBen Hutchings efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 137586094f7fSBen Hutchings channel->channel); 137686094f7fSBen Hutchings 1377261e4d96SJon Cooper return 0; 137886094f7fSBen Hutchings } 137986094f7fSBen Hutchings 138086094f7fSBen Hutchings void efx_farch_ev_fini(struct efx_channel *channel) 138186094f7fSBen Hutchings { 138286094f7fSBen Hutchings efx_oword_t reg; 138386094f7fSBen Hutchings struct efx_nic *efx = channel->efx; 138486094f7fSBen Hutchings 138586094f7fSBen Hutchings /* Remove event queue from card */ 138686094f7fSBen Hutchings EFX_ZERO_OWORD(reg); 138786094f7fSBen Hutchings efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 138886094f7fSBen Hutchings channel->channel); 138986094f7fSBen Hutchings efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 139086094f7fSBen Hutchings 139186094f7fSBen Hutchings /* Unpin event queue */ 139286094f7fSBen Hutchings efx_fini_special_buffer(efx, &channel->eventq); 139386094f7fSBen Hutchings } 139486094f7fSBen Hutchings 139586094f7fSBen Hutchings /* Free buffers backing event queue */ 139686094f7fSBen Hutchings void efx_farch_ev_remove(struct efx_channel *channel) 139786094f7fSBen Hutchings { 139886094f7fSBen Hutchings efx_free_special_buffer(channel->efx, &channel->eventq); 139986094f7fSBen Hutchings } 140086094f7fSBen Hutchings 140186094f7fSBen Hutchings 140286094f7fSBen Hutchings void efx_farch_ev_test_generate(struct efx_channel *channel) 140386094f7fSBen Hutchings { 140486094f7fSBen Hutchings efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); 140586094f7fSBen Hutchings } 140686094f7fSBen Hutchings 140786094f7fSBen Hutchings void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue) 140886094f7fSBen Hutchings { 140986094f7fSBen Hutchings efx_farch_magic_event(efx_rx_queue_channel(rx_queue), 141086094f7fSBen Hutchings EFX_CHANNEL_MAGIC_FILL(rx_queue)); 141186094f7fSBen Hutchings } 141286094f7fSBen Hutchings 141386094f7fSBen Hutchings /************************************************************************** 141486094f7fSBen Hutchings * 141586094f7fSBen Hutchings * Hardware interrupts 141686094f7fSBen Hutchings * The hardware interrupt handler does very little work; all the event 141786094f7fSBen Hutchings * queue processing is carried out by per-channel tasklets. 141886094f7fSBen Hutchings * 141986094f7fSBen Hutchings **************************************************************************/ 142086094f7fSBen Hutchings 142186094f7fSBen Hutchings /* Enable/disable/generate interrupts */ 142286094f7fSBen Hutchings static inline void efx_farch_interrupts(struct efx_nic *efx, 142386094f7fSBen Hutchings bool enabled, bool force) 142486094f7fSBen Hutchings { 142586094f7fSBen Hutchings efx_oword_t int_en_reg_ker; 142686094f7fSBen Hutchings 142786094f7fSBen Hutchings EFX_POPULATE_OWORD_3(int_en_reg_ker, 142886094f7fSBen Hutchings FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, 142986094f7fSBen Hutchings FRF_AZ_KER_INT_KER, force, 143086094f7fSBen Hutchings FRF_AZ_DRV_INT_EN_KER, enabled); 143186094f7fSBen Hutchings efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 143286094f7fSBen Hutchings } 143386094f7fSBen Hutchings 143486094f7fSBen Hutchings void efx_farch_irq_enable_master(struct efx_nic *efx) 143586094f7fSBen Hutchings { 143686094f7fSBen Hutchings EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); 143786094f7fSBen Hutchings wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 143886094f7fSBen Hutchings 143986094f7fSBen Hutchings efx_farch_interrupts(efx, true, false); 144086094f7fSBen Hutchings } 144186094f7fSBen Hutchings 144286094f7fSBen Hutchings void efx_farch_irq_disable_master(struct efx_nic *efx) 144386094f7fSBen Hutchings { 144486094f7fSBen Hutchings /* Disable interrupts */ 144586094f7fSBen Hutchings efx_farch_interrupts(efx, false, false); 144686094f7fSBen Hutchings } 144786094f7fSBen Hutchings 144886094f7fSBen Hutchings /* Generate a test interrupt 144986094f7fSBen Hutchings * Interrupt must already have been enabled, otherwise nasty things 145086094f7fSBen Hutchings * may happen. 145186094f7fSBen Hutchings */ 1452942e298eSJon Cooper int efx_farch_irq_test_generate(struct efx_nic *efx) 145386094f7fSBen Hutchings { 145486094f7fSBen Hutchings efx_farch_interrupts(efx, true, true); 1455942e298eSJon Cooper return 0; 145686094f7fSBen Hutchings } 145786094f7fSBen Hutchings 145886094f7fSBen Hutchings /* Process a fatal interrupt 145986094f7fSBen Hutchings * Disable bus mastering ASAP and schedule a reset 146086094f7fSBen Hutchings */ 146186094f7fSBen Hutchings irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx) 146286094f7fSBen Hutchings { 146386094f7fSBen Hutchings efx_oword_t *int_ker = efx->irq_status.addr; 146486094f7fSBen Hutchings efx_oword_t fatal_intr; 146586094f7fSBen Hutchings int error, mem_perr; 146686094f7fSBen Hutchings 146786094f7fSBen Hutchings efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); 146886094f7fSBen Hutchings error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); 146986094f7fSBen Hutchings 147086094f7fSBen Hutchings netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " 147186094f7fSBen Hutchings EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 147286094f7fSBen Hutchings EFX_OWORD_VAL(fatal_intr), 147386094f7fSBen Hutchings error ? "disabling bus mastering" : "no recognised error"); 147486094f7fSBen Hutchings 147586094f7fSBen Hutchings /* If this is a memory parity error dump which blocks are offending */ 147686094f7fSBen Hutchings mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || 147786094f7fSBen Hutchings EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); 147886094f7fSBen Hutchings if (mem_perr) { 147986094f7fSBen Hutchings efx_oword_t reg; 148086094f7fSBen Hutchings efx_reado(efx, ®, FR_AZ_MEM_STAT); 148186094f7fSBen Hutchings netif_err(efx, hw, efx->net_dev, 148286094f7fSBen Hutchings "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", 148386094f7fSBen Hutchings EFX_OWORD_VAL(reg)); 148486094f7fSBen Hutchings } 148586094f7fSBen Hutchings 148686094f7fSBen Hutchings /* Disable both devices */ 148786094f7fSBen Hutchings pci_clear_master(efx->pci_dev); 148886094f7fSBen Hutchings efx_farch_irq_disable_master(efx); 148986094f7fSBen Hutchings 149086094f7fSBen Hutchings /* Count errors and reset or disable the NIC accordingly */ 149186094f7fSBen Hutchings if (efx->int_error_count == 0 || 149286094f7fSBen Hutchings time_after(jiffies, efx->int_error_expire)) { 149386094f7fSBen Hutchings efx->int_error_count = 0; 149486094f7fSBen Hutchings efx->int_error_expire = 149586094f7fSBen Hutchings jiffies + EFX_INT_ERROR_EXPIRE * HZ; 149686094f7fSBen Hutchings } 149786094f7fSBen Hutchings if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { 149886094f7fSBen Hutchings netif_err(efx, hw, efx->net_dev, 149986094f7fSBen Hutchings "SYSTEM ERROR - reset scheduled\n"); 150071ad88f6SMartin Habets efx_siena_schedule_reset(efx, RESET_TYPE_INT_ERROR); 150186094f7fSBen Hutchings } else { 150286094f7fSBen Hutchings netif_err(efx, hw, efx->net_dev, 150386094f7fSBen Hutchings "SYSTEM ERROR - max number of errors seen." 150486094f7fSBen Hutchings "NIC will be disabled\n"); 150571ad88f6SMartin Habets efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE); 150686094f7fSBen Hutchings } 150786094f7fSBen Hutchings 150886094f7fSBen Hutchings return IRQ_HANDLED; 150986094f7fSBen Hutchings } 151086094f7fSBen Hutchings 151186094f7fSBen Hutchings /* Handle a legacy interrupt 151286094f7fSBen Hutchings * Acknowledges the interrupt and schedule event queue processing. 151386094f7fSBen Hutchings */ 151486094f7fSBen Hutchings irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) 151586094f7fSBen Hutchings { 151686094f7fSBen Hutchings struct efx_nic *efx = dev_id; 15176aa7de05SMark Rutland bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); 151886094f7fSBen Hutchings efx_oword_t *int_ker = efx->irq_status.addr; 151986094f7fSBen Hutchings irqreturn_t result = IRQ_NONE; 152086094f7fSBen Hutchings struct efx_channel *channel; 152186094f7fSBen Hutchings efx_dword_t reg; 152286094f7fSBen Hutchings u32 queues; 152386094f7fSBen Hutchings int syserr; 152486094f7fSBen Hutchings 152586094f7fSBen Hutchings /* Read the ISR which also ACKs the interrupts */ 152686094f7fSBen Hutchings efx_readd(efx, ®, FR_BZ_INT_ISR0); 152786094f7fSBen Hutchings queues = EFX_EXTRACT_DWORD(reg, 0, 31); 152886094f7fSBen Hutchings 152986094f7fSBen Hutchings /* Legacy interrupts are disabled too late by the EEH kernel 153086094f7fSBen Hutchings * code. Disable them earlier. 153186094f7fSBen Hutchings * If an EEH error occurred, the read will have returned all ones. 153286094f7fSBen Hutchings */ 153371ad88f6SMartin Habets if (EFX_DWORD_IS_ALL_ONES(reg) && efx_siena_try_recovery(efx) && 153486094f7fSBen Hutchings !efx->eeh_disabled_legacy_irq) { 153586094f7fSBen Hutchings disable_irq_nosync(efx->legacy_irq); 153686094f7fSBen Hutchings efx->eeh_disabled_legacy_irq = true; 153786094f7fSBen Hutchings } 153886094f7fSBen Hutchings 153986094f7fSBen Hutchings /* Handle non-event-queue sources */ 154086094f7fSBen Hutchings if (queues & (1U << efx->irq_level) && soft_enabled) { 154186094f7fSBen Hutchings syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 154286094f7fSBen Hutchings if (unlikely(syserr)) 154386094f7fSBen Hutchings return efx_farch_fatal_interrupt(efx); 154486094f7fSBen Hutchings efx->last_irq_cpu = raw_smp_processor_id(); 154586094f7fSBen Hutchings } 154686094f7fSBen Hutchings 154786094f7fSBen Hutchings if (queues != 0) { 154886094f7fSBen Hutchings efx->irq_zero_count = 0; 154986094f7fSBen Hutchings 155086094f7fSBen Hutchings /* Schedule processing of any interrupting queues */ 155186094f7fSBen Hutchings if (likely(soft_enabled)) { 155286094f7fSBen Hutchings efx_for_each_channel(channel, efx) { 155386094f7fSBen Hutchings if (queues & 1) 155486094f7fSBen Hutchings efx_schedule_channel_irq(channel); 155586094f7fSBen Hutchings queues >>= 1; 155686094f7fSBen Hutchings } 155786094f7fSBen Hutchings } 155886094f7fSBen Hutchings result = IRQ_HANDLED; 155986094f7fSBen Hutchings 1560ab3b8250SBen Hutchings } else { 156186094f7fSBen Hutchings efx_qword_t *event; 156286094f7fSBen Hutchings 1563ab3b8250SBen Hutchings /* Legacy ISR read can return zero once (SF bug 15783) */ 1564ab3b8250SBen Hutchings 156586094f7fSBen Hutchings /* We can't return IRQ_HANDLED more than once on seeing ISR=0 156686094f7fSBen Hutchings * because this might be a shared interrupt. */ 156786094f7fSBen Hutchings if (efx->irq_zero_count++ == 0) 156886094f7fSBen Hutchings result = IRQ_HANDLED; 156986094f7fSBen Hutchings 157086094f7fSBen Hutchings /* Ensure we schedule or rearm all event queues */ 157186094f7fSBen Hutchings if (likely(soft_enabled)) { 157286094f7fSBen Hutchings efx_for_each_channel(channel, efx) { 157386094f7fSBen Hutchings event = efx_event(channel, 157486094f7fSBen Hutchings channel->eventq_read_ptr); 157586094f7fSBen Hutchings if (efx_event_present(event)) 157686094f7fSBen Hutchings efx_schedule_channel_irq(channel); 157786094f7fSBen Hutchings else 157886094f7fSBen Hutchings efx_farch_ev_read_ack(channel); 157986094f7fSBen Hutchings } 158086094f7fSBen Hutchings } 158186094f7fSBen Hutchings } 158286094f7fSBen Hutchings 158386094f7fSBen Hutchings if (result == IRQ_HANDLED) 158486094f7fSBen Hutchings netif_vdbg(efx, intr, efx->net_dev, 158586094f7fSBen Hutchings "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 158686094f7fSBen Hutchings irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 158786094f7fSBen Hutchings 158886094f7fSBen Hutchings return result; 158986094f7fSBen Hutchings } 159086094f7fSBen Hutchings 159186094f7fSBen Hutchings /* Handle an MSI interrupt 159286094f7fSBen Hutchings * 159386094f7fSBen Hutchings * Handle an MSI hardware interrupt. This routine schedules event 159486094f7fSBen Hutchings * queue processing. No interrupt acknowledgement cycle is necessary. 159586094f7fSBen Hutchings * Also, we never need to check that the interrupt is for us, since 159686094f7fSBen Hutchings * MSI interrupts cannot be shared. 159786094f7fSBen Hutchings */ 159886094f7fSBen Hutchings irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id) 159986094f7fSBen Hutchings { 160086094f7fSBen Hutchings struct efx_msi_context *context = dev_id; 160186094f7fSBen Hutchings struct efx_nic *efx = context->efx; 160286094f7fSBen Hutchings efx_oword_t *int_ker = efx->irq_status.addr; 160386094f7fSBen Hutchings int syserr; 160486094f7fSBen Hutchings 160586094f7fSBen Hutchings netif_vdbg(efx, intr, efx->net_dev, 160686094f7fSBen Hutchings "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 160786094f7fSBen Hutchings irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 160886094f7fSBen Hutchings 16096aa7de05SMark Rutland if (!likely(READ_ONCE(efx->irq_soft_enabled))) 161086094f7fSBen Hutchings return IRQ_HANDLED; 161186094f7fSBen Hutchings 161286094f7fSBen Hutchings /* Handle non-event-queue sources */ 161386094f7fSBen Hutchings if (context->index == efx->irq_level) { 161486094f7fSBen Hutchings syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 161586094f7fSBen Hutchings if (unlikely(syserr)) 161686094f7fSBen Hutchings return efx_farch_fatal_interrupt(efx); 161786094f7fSBen Hutchings efx->last_irq_cpu = raw_smp_processor_id(); 161886094f7fSBen Hutchings } 161986094f7fSBen Hutchings 162086094f7fSBen Hutchings /* Schedule processing of the channel */ 162186094f7fSBen Hutchings efx_schedule_channel_irq(efx->channel[context->index]); 162286094f7fSBen Hutchings 162386094f7fSBen Hutchings return IRQ_HANDLED; 162486094f7fSBen Hutchings } 162586094f7fSBen Hutchings 162686094f7fSBen Hutchings /* Setup RSS indirection table. 162786094f7fSBen Hutchings * This maps from the hash value of the packet to RXQ 162886094f7fSBen Hutchings */ 162986094f7fSBen Hutchings void efx_farch_rx_push_indir_table(struct efx_nic *efx) 163086094f7fSBen Hutchings { 163186094f7fSBen Hutchings size_t i = 0; 163286094f7fSBen Hutchings efx_dword_t dword; 163386094f7fSBen Hutchings 163442356d9aSEdward Cree BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != 163586094f7fSBen Hutchings FR_BZ_RX_INDIRECTION_TBL_ROWS); 163686094f7fSBen Hutchings 163786094f7fSBen Hutchings for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 163886094f7fSBen Hutchings EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 163942356d9aSEdward Cree efx->rss_context.rx_indir_table[i]); 164086094f7fSBen Hutchings efx_writed(efx, &dword, 164186094f7fSBen Hutchings FR_BZ_RX_INDIRECTION_TBL + 164286094f7fSBen Hutchings FR_BZ_RX_INDIRECTION_TBL_STEP * i); 164386094f7fSBen Hutchings } 164486094f7fSBen Hutchings } 164586094f7fSBen Hutchings 1646a707d188SEdward Cree void efx_farch_rx_pull_indir_table(struct efx_nic *efx) 1647a707d188SEdward Cree { 1648a707d188SEdward Cree size_t i = 0; 1649a707d188SEdward Cree efx_dword_t dword; 1650a707d188SEdward Cree 165142356d9aSEdward Cree BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != 1652a707d188SEdward Cree FR_BZ_RX_INDIRECTION_TBL_ROWS); 1653a707d188SEdward Cree 1654a707d188SEdward Cree for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 1655a707d188SEdward Cree efx_readd(efx, &dword, 1656a707d188SEdward Cree FR_BZ_RX_INDIRECTION_TBL + 1657a707d188SEdward Cree FR_BZ_RX_INDIRECTION_TBL_STEP * i); 165842356d9aSEdward Cree efx->rss_context.rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE); 1659a707d188SEdward Cree } 1660a707d188SEdward Cree } 1661a707d188SEdward Cree 166286094f7fSBen Hutchings /* Looks at available SRAM resources and works out how many queues we 166386094f7fSBen Hutchings * can support, and where things like descriptor caches should live. 166486094f7fSBen Hutchings * 166586094f7fSBen Hutchings * SRAM is split up as follows: 166686094f7fSBen Hutchings * 0 buftbl entries for channels 166786094f7fSBen Hutchings * efx->vf_buftbl_base buftbl entries for SR-IOV 166886094f7fSBen Hutchings * efx->rx_dc_base RX descriptor caches 166986094f7fSBen Hutchings * efx->tx_dc_base TX descriptor caches 167086094f7fSBen Hutchings */ 167186094f7fSBen Hutchings void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) 167286094f7fSBen Hutchings { 167331d990cbSYang Yingliang unsigned vi_count, total_tx_channels; 1674dfb1cfbdSMartin Habets #ifdef CONFIG_SFC_SIENA_SRIOV 167531d990cbSYang Yingliang struct siena_nic_data *nic_data; 167631d990cbSYang Yingliang unsigned buftbl_min; 16772dc313ecSShradha Shah #endif 16782dc313ecSShradha Shah 16792935e3c3SEdward Cree total_tx_channels = efx->n_tx_channels + efx->n_extra_tx_channels; 168031d990cbSYang Yingliang vi_count = max(efx->n_channels, total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL); 168131d990cbSYang Yingliang 1682dfb1cfbdSMartin Habets #ifdef CONFIG_SFC_SIENA_SRIOV 168331d990cbSYang Yingliang nic_data = efx->nic_data; 168486094f7fSBen Hutchings /* Account for the buffer table entries backing the datapath channels 168586094f7fSBen Hutchings * and the descriptor caches for those channels. 168686094f7fSBen Hutchings */ 168786094f7fSBen Hutchings buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + 168812804793SEdward Cree total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_DMAQ_SIZE + 168986094f7fSBen Hutchings efx->n_channels * EFX_MAX_EVQ_SIZE) 169086094f7fSBen Hutchings * sizeof(efx_qword_t) / EFX_BUF_SIZE); 16917fa8d547SShradha Shah if (efx->type->sriov_wanted) { 1692d98a4ffeSShradha Shah if (efx->type->sriov_wanted(efx)) { 16937fa8d547SShradha Shah unsigned vi_dc_entries, buftbl_free; 16947fa8d547SShradha Shah unsigned entries_per_vf, vf_limit; 169586094f7fSBen Hutchings 16962dc313ecSShradha Shah nic_data->vf_buftbl_base = buftbl_min; 169786094f7fSBen Hutchings 169886094f7fSBen Hutchings vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; 169986094f7fSBen Hutchings vi_count = max(vi_count, EFX_VI_BASE); 170086094f7fSBen Hutchings buftbl_free = (sram_lim_qw - buftbl_min - 170186094f7fSBen Hutchings vi_count * vi_dc_entries); 170286094f7fSBen Hutchings 17037fa8d547SShradha Shah entries_per_vf = ((vi_dc_entries + 17047fa8d547SShradha Shah EFX_VF_BUFTBL_PER_VI) * 170586094f7fSBen Hutchings efx_vf_size(efx)); 170686094f7fSBen Hutchings vf_limit = min(buftbl_free / entries_per_vf, 170786094f7fSBen Hutchings (1024U - EFX_VI_BASE) >> efx->vi_scale); 170886094f7fSBen Hutchings 170986094f7fSBen Hutchings if (efx->vf_count > vf_limit) { 171086094f7fSBen Hutchings netif_err(efx, probe, efx->net_dev, 1711*fe09560fSBjorn Helgaas "Reducing VF count from %d to %d\n", 171286094f7fSBen Hutchings efx->vf_count, vf_limit); 171386094f7fSBen Hutchings efx->vf_count = vf_limit; 171486094f7fSBen Hutchings } 171586094f7fSBen Hutchings vi_count += efx->vf_count * efx_vf_size(efx); 171686094f7fSBen Hutchings } 17177fa8d547SShradha Shah } 171886094f7fSBen Hutchings #endif 171986094f7fSBen Hutchings 172086094f7fSBen Hutchings efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; 172186094f7fSBen Hutchings efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; 172286094f7fSBen Hutchings } 172386094f7fSBen Hutchings 172486094f7fSBen Hutchings u32 efx_farch_fpga_ver(struct efx_nic *efx) 172586094f7fSBen Hutchings { 172686094f7fSBen Hutchings efx_oword_t altera_build; 172786094f7fSBen Hutchings efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); 172886094f7fSBen Hutchings return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); 172986094f7fSBen Hutchings } 173086094f7fSBen Hutchings 173186094f7fSBen Hutchings void efx_farch_init_common(struct efx_nic *efx) 173286094f7fSBen Hutchings { 173386094f7fSBen Hutchings efx_oword_t temp; 173486094f7fSBen Hutchings 173586094f7fSBen Hutchings /* Set positions of descriptor caches in SRAM. */ 173686094f7fSBen Hutchings EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); 173786094f7fSBen Hutchings efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); 173886094f7fSBen Hutchings EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); 173986094f7fSBen Hutchings efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); 174086094f7fSBen Hutchings 174186094f7fSBen Hutchings /* Set TX descriptor cache size. */ 174286094f7fSBen Hutchings BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); 174386094f7fSBen Hutchings EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); 174486094f7fSBen Hutchings efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); 174586094f7fSBen Hutchings 174686094f7fSBen Hutchings /* Set RX descriptor cache size. Set low watermark to size-8, as 174786094f7fSBen Hutchings * this allows most efficient prefetching. 174886094f7fSBen Hutchings */ 174986094f7fSBen Hutchings BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); 175086094f7fSBen Hutchings EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); 175186094f7fSBen Hutchings efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); 175286094f7fSBen Hutchings EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); 175386094f7fSBen Hutchings efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); 175486094f7fSBen Hutchings 175586094f7fSBen Hutchings /* Program INT_KER address */ 175686094f7fSBen Hutchings EFX_POPULATE_OWORD_2(temp, 175786094f7fSBen Hutchings FRF_AZ_NORM_INT_VEC_DIS_KER, 175886094f7fSBen Hutchings EFX_INT_MODE_USE_MSI(efx), 175986094f7fSBen Hutchings FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 176086094f7fSBen Hutchings efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 176186094f7fSBen Hutchings 176286094f7fSBen Hutchings if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) 176386094f7fSBen Hutchings /* Use an interrupt level unused by event queues */ 176486094f7fSBen Hutchings efx->irq_level = 0x1f; 176586094f7fSBen Hutchings else 176686094f7fSBen Hutchings /* Use a valid MSI-X vector */ 176786094f7fSBen Hutchings efx->irq_level = 0; 176886094f7fSBen Hutchings 176986094f7fSBen Hutchings /* Enable all the genuinely fatal interrupts. (They are still 177086094f7fSBen Hutchings * masked by the overall interrupt mask, controlled by 177186094f7fSBen Hutchings * falcon_interrupts()). 177286094f7fSBen Hutchings * 177386094f7fSBen Hutchings * Note: All other fatal interrupts are enabled 177486094f7fSBen Hutchings */ 177586094f7fSBen Hutchings EFX_POPULATE_OWORD_3(temp, 177686094f7fSBen Hutchings FRF_AZ_ILL_ADR_INT_KER_EN, 1, 177786094f7fSBen Hutchings FRF_AZ_RBUF_OWN_INT_KER_EN, 1, 177886094f7fSBen Hutchings FRF_AZ_TBUF_OWN_INT_KER_EN, 1); 177986094f7fSBen Hutchings EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); 178086094f7fSBen Hutchings EFX_INVERT_OWORD(temp); 178186094f7fSBen Hutchings efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 178286094f7fSBen Hutchings 178386094f7fSBen Hutchings /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 178486094f7fSBen Hutchings * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 178586094f7fSBen Hutchings */ 178686094f7fSBen Hutchings efx_reado(efx, &temp, FR_AZ_TX_RESERVED); 178786094f7fSBen Hutchings EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); 178886094f7fSBen Hutchings EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); 178986094f7fSBen Hutchings EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); 179086094f7fSBen Hutchings EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); 179186094f7fSBen Hutchings EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); 179286094f7fSBen Hutchings /* Enable SW_EV to inherit in char driver - assume harmless here */ 179386094f7fSBen Hutchings EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 179486094f7fSBen Hutchings /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 179586094f7fSBen Hutchings EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); 179686094f7fSBen Hutchings /* Disable hardware watchdog which can misfire */ 179786094f7fSBen Hutchings EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); 179886094f7fSBen Hutchings /* Squash TX of packets of 16 bytes or less */ 179986094f7fSBen Hutchings EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 180086094f7fSBen Hutchings efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 180186094f7fSBen Hutchings 180286094f7fSBen Hutchings EFX_POPULATE_OWORD_4(temp, 180386094f7fSBen Hutchings /* Default values */ 180486094f7fSBen Hutchings FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, 180586094f7fSBen Hutchings FRF_BZ_TX_PACE_SB_AF, 0xb, 180686094f7fSBen Hutchings FRF_BZ_TX_PACE_FB_BASE, 0, 18075a6681e2SEdward Cree /* Allow large pace values in the fast bin. */ 180886094f7fSBen Hutchings FRF_BZ_TX_PACE_BIN_TH, 180986094f7fSBen Hutchings FFE_BZ_TX_PACE_RESERVED); 181086094f7fSBen Hutchings efx_writeo(efx, &temp, FR_BZ_TX_PACE); 181186094f7fSBen Hutchings } 1812add72477SBen Hutchings 1813add72477SBen Hutchings /************************************************************************** 1814add72477SBen Hutchings * 1815add72477SBen Hutchings * Filter tables 1816add72477SBen Hutchings * 1817add72477SBen Hutchings ************************************************************************** 1818add72477SBen Hutchings */ 1819add72477SBen Hutchings 1820add72477SBen Hutchings /* "Fudge factors" - difference between programmed value and actual depth. 1821add72477SBen Hutchings * Due to pipelined implementation we need to program H/W with a value that 1822add72477SBen Hutchings * is larger than the hop limit we want. 1823add72477SBen Hutchings */ 1824add72477SBen Hutchings #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3 1825add72477SBen Hutchings #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1 1826add72477SBen Hutchings 1827add72477SBen Hutchings /* Hard maximum search limit. Hardware will time-out beyond 200-something. 1828add72477SBen Hutchings * We also need to avoid infinite loops in efx_farch_filter_search() when the 1829add72477SBen Hutchings * table is full. 1830add72477SBen Hutchings */ 1831add72477SBen Hutchings #define EFX_FARCH_FILTER_CTL_SRCH_MAX 200 1832add72477SBen Hutchings 1833add72477SBen Hutchings /* Don't try very hard to find space for performance hints, as this is 1834add72477SBen Hutchings * counter-productive. */ 1835add72477SBen Hutchings #define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5 1836add72477SBen Hutchings 1837add72477SBen Hutchings enum efx_farch_filter_type { 1838add72477SBen Hutchings EFX_FARCH_FILTER_TCP_FULL = 0, 1839add72477SBen Hutchings EFX_FARCH_FILTER_TCP_WILD, 1840add72477SBen Hutchings EFX_FARCH_FILTER_UDP_FULL, 1841add72477SBen Hutchings EFX_FARCH_FILTER_UDP_WILD, 1842add72477SBen Hutchings EFX_FARCH_FILTER_MAC_FULL = 4, 1843add72477SBen Hutchings EFX_FARCH_FILTER_MAC_WILD, 1844add72477SBen Hutchings EFX_FARCH_FILTER_UC_DEF = 8, 1845add72477SBen Hutchings EFX_FARCH_FILTER_MC_DEF, 1846add72477SBen Hutchings EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */ 1847add72477SBen Hutchings }; 1848add72477SBen Hutchings 1849add72477SBen Hutchings enum efx_farch_filter_table_id { 1850add72477SBen Hutchings EFX_FARCH_FILTER_TABLE_RX_IP = 0, 1851add72477SBen Hutchings EFX_FARCH_FILTER_TABLE_RX_MAC, 1852add72477SBen Hutchings EFX_FARCH_FILTER_TABLE_RX_DEF, 1853add72477SBen Hutchings EFX_FARCH_FILTER_TABLE_TX_MAC, 1854add72477SBen Hutchings EFX_FARCH_FILTER_TABLE_COUNT, 1855add72477SBen Hutchings }; 1856add72477SBen Hutchings 1857add72477SBen Hutchings enum efx_farch_filter_index { 1858add72477SBen Hutchings EFX_FARCH_FILTER_INDEX_UC_DEF, 1859add72477SBen Hutchings EFX_FARCH_FILTER_INDEX_MC_DEF, 1860add72477SBen Hutchings EFX_FARCH_FILTER_SIZE_RX_DEF, 1861add72477SBen Hutchings }; 1862add72477SBen Hutchings 1863add72477SBen Hutchings struct efx_farch_filter_spec { 1864add72477SBen Hutchings u8 type:4; 1865add72477SBen Hutchings u8 priority:4; 1866add72477SBen Hutchings u8 flags; 1867add72477SBen Hutchings u16 dmaq_id; 1868add72477SBen Hutchings u32 data[3]; 1869add72477SBen Hutchings }; 1870add72477SBen Hutchings 1871add72477SBen Hutchings struct efx_farch_filter_table { 1872add72477SBen Hutchings enum efx_farch_filter_table_id id; 1873add72477SBen Hutchings u32 offset; /* address of table relative to BAR */ 1874add72477SBen Hutchings unsigned size; /* number of entries */ 1875add72477SBen Hutchings unsigned step; /* step between entries */ 1876add72477SBen Hutchings unsigned used; /* number currently used */ 1877add72477SBen Hutchings unsigned long *used_bitmap; 1878add72477SBen Hutchings struct efx_farch_filter_spec *spec; 1879add72477SBen Hutchings unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT]; 1880add72477SBen Hutchings }; 1881add72477SBen Hutchings 1882add72477SBen Hutchings struct efx_farch_filter_state { 1883fc7a6c28SEdward Cree struct rw_semaphore lock; /* Protects table contents */ 1884add72477SBen Hutchings struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT]; 1885add72477SBen Hutchings }; 1886add72477SBen Hutchings 1887add72477SBen Hutchings static void 1888add72477SBen Hutchings efx_farch_filter_table_clear_entry(struct efx_nic *efx, 1889add72477SBen Hutchings struct efx_farch_filter_table *table, 1890add72477SBen Hutchings unsigned int filter_idx); 1891add72477SBen Hutchings 1892add72477SBen Hutchings /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit 1893add72477SBen Hutchings * key derived from the n-tuple. The initial LFSR state is 0xffff. */ 1894add72477SBen Hutchings static u16 efx_farch_filter_hash(u32 key) 1895add72477SBen Hutchings { 1896add72477SBen Hutchings u16 tmp; 1897add72477SBen Hutchings 1898add72477SBen Hutchings /* First 16 rounds */ 1899add72477SBen Hutchings tmp = 0x1fff ^ key >> 16; 1900add72477SBen Hutchings tmp = tmp ^ tmp >> 3 ^ tmp >> 6; 1901add72477SBen Hutchings tmp = tmp ^ tmp >> 9; 1902add72477SBen Hutchings /* Last 16 rounds */ 1903add72477SBen Hutchings tmp = tmp ^ tmp << 13 ^ key; 1904add72477SBen Hutchings tmp = tmp ^ tmp >> 3 ^ tmp >> 6; 1905add72477SBen Hutchings return tmp ^ tmp >> 9; 1906add72477SBen Hutchings } 1907add72477SBen Hutchings 1908add72477SBen Hutchings /* To allow for hash collisions, filter search continues at these 1909add72477SBen Hutchings * increments from the first possible entry selected by the hash. */ 1910add72477SBen Hutchings static u16 efx_farch_filter_increment(u32 key) 1911add72477SBen Hutchings { 1912add72477SBen Hutchings return key * 2 - 1; 1913add72477SBen Hutchings } 1914add72477SBen Hutchings 1915add72477SBen Hutchings static enum efx_farch_filter_table_id 1916add72477SBen Hutchings efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec) 1917add72477SBen Hutchings { 1918add72477SBen Hutchings BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != 1919add72477SBen Hutchings (EFX_FARCH_FILTER_TCP_FULL >> 2)); 1920add72477SBen Hutchings BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != 1921add72477SBen Hutchings (EFX_FARCH_FILTER_TCP_WILD >> 2)); 1922add72477SBen Hutchings BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != 1923add72477SBen Hutchings (EFX_FARCH_FILTER_UDP_FULL >> 2)); 1924add72477SBen Hutchings BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != 1925add72477SBen Hutchings (EFX_FARCH_FILTER_UDP_WILD >> 2)); 1926add72477SBen Hutchings BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != 1927add72477SBen Hutchings (EFX_FARCH_FILTER_MAC_FULL >> 2)); 1928add72477SBen Hutchings BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != 1929add72477SBen Hutchings (EFX_FARCH_FILTER_MAC_WILD >> 2)); 1930add72477SBen Hutchings BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC != 1931add72477SBen Hutchings EFX_FARCH_FILTER_TABLE_RX_MAC + 2); 1932add72477SBen Hutchings return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0); 1933add72477SBen Hutchings } 1934add72477SBen Hutchings 1935add72477SBen Hutchings static void efx_farch_filter_push_rx_config(struct efx_nic *efx) 1936add72477SBen Hutchings { 1937add72477SBen Hutchings struct efx_farch_filter_state *state = efx->filter_state; 1938add72477SBen Hutchings struct efx_farch_filter_table *table; 1939add72477SBen Hutchings efx_oword_t filter_ctl; 1940add72477SBen Hutchings 1941add72477SBen Hutchings efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); 1942add72477SBen Hutchings 1943add72477SBen Hutchings table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; 1944add72477SBen Hutchings EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, 1945add72477SBen Hutchings table->search_limit[EFX_FARCH_FILTER_TCP_FULL] + 1946add72477SBen Hutchings EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); 1947add72477SBen Hutchings EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, 1948add72477SBen Hutchings table->search_limit[EFX_FARCH_FILTER_TCP_WILD] + 1949add72477SBen Hutchings EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); 1950add72477SBen Hutchings EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, 1951add72477SBen Hutchings table->search_limit[EFX_FARCH_FILTER_UDP_FULL] + 1952add72477SBen Hutchings EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); 1953add72477SBen Hutchings EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, 1954add72477SBen Hutchings table->search_limit[EFX_FARCH_FILTER_UDP_WILD] + 1955add72477SBen Hutchings EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); 1956add72477SBen Hutchings 1957add72477SBen Hutchings table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; 1958add72477SBen Hutchings if (table->size) { 1959add72477SBen Hutchings EFX_SET_OWORD_FIELD( 1960add72477SBen Hutchings filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, 1961add72477SBen Hutchings table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + 1962add72477SBen Hutchings EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); 1963add72477SBen Hutchings EFX_SET_OWORD_FIELD( 1964add72477SBen Hutchings filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, 1965add72477SBen Hutchings table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + 1966add72477SBen Hutchings EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); 1967add72477SBen Hutchings } 1968add72477SBen Hutchings 1969add72477SBen Hutchings table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; 1970add72477SBen Hutchings if (table->size) { 1971add72477SBen Hutchings EFX_SET_OWORD_FIELD( 1972add72477SBen Hutchings filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID, 1973add72477SBen Hutchings table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id); 1974add72477SBen Hutchings EFX_SET_OWORD_FIELD( 1975add72477SBen Hutchings filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED, 1976add72477SBen Hutchings !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & 1977add72477SBen Hutchings EFX_FILTER_FLAG_RX_RSS)); 1978add72477SBen Hutchings EFX_SET_OWORD_FIELD( 1979add72477SBen Hutchings filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID, 1980add72477SBen Hutchings table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id); 1981add72477SBen Hutchings EFX_SET_OWORD_FIELD( 1982add72477SBen Hutchings filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, 1983add72477SBen Hutchings !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & 1984add72477SBen Hutchings EFX_FILTER_FLAG_RX_RSS)); 1985add72477SBen Hutchings 1986add72477SBen Hutchings /* There is a single bit to enable RX scatter for all 1987add72477SBen Hutchings * unmatched packets. Only set it if scatter is 1988add72477SBen Hutchings * enabled in both filter specs. 1989add72477SBen Hutchings */ 1990add72477SBen Hutchings EFX_SET_OWORD_FIELD( 1991add72477SBen Hutchings filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, 1992add72477SBen Hutchings !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & 1993add72477SBen Hutchings table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & 1994add72477SBen Hutchings EFX_FILTER_FLAG_RX_SCATTER)); 19955a6681e2SEdward Cree } else { 1996add72477SBen Hutchings /* We don't expose 'default' filters because unmatched 1997add72477SBen Hutchings * packets always go to the queue number found in the 1998add72477SBen Hutchings * RSS table. But we still need to set the RX scatter 1999add72477SBen Hutchings * bit here. 2000add72477SBen Hutchings */ 2001add72477SBen Hutchings EFX_SET_OWORD_FIELD( 2002add72477SBen Hutchings filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, 2003add72477SBen Hutchings efx->rx_scatter); 2004add72477SBen Hutchings } 2005add72477SBen Hutchings 2006add72477SBen Hutchings efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); 2007add72477SBen Hutchings } 2008add72477SBen Hutchings 2009add72477SBen Hutchings static void efx_farch_filter_push_tx_limits(struct efx_nic *efx) 2010add72477SBen Hutchings { 2011add72477SBen Hutchings struct efx_farch_filter_state *state = efx->filter_state; 2012add72477SBen Hutchings struct efx_farch_filter_table *table; 2013add72477SBen Hutchings efx_oword_t tx_cfg; 2014add72477SBen Hutchings 2015add72477SBen Hutchings efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG); 2016add72477SBen Hutchings 2017add72477SBen Hutchings table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; 2018add72477SBen Hutchings if (table->size) { 2019add72477SBen Hutchings EFX_SET_OWORD_FIELD( 2020add72477SBen Hutchings tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE, 2021add72477SBen Hutchings table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + 2022add72477SBen Hutchings EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); 2023add72477SBen Hutchings EFX_SET_OWORD_FIELD( 2024add72477SBen Hutchings tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE, 2025add72477SBen Hutchings table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + 2026add72477SBen Hutchings EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); 2027add72477SBen Hutchings } 2028add72477SBen Hutchings 2029add72477SBen Hutchings efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG); 2030add72477SBen Hutchings } 2031add72477SBen Hutchings 2032add72477SBen Hutchings static int 2033add72477SBen Hutchings efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec, 2034add72477SBen Hutchings const struct efx_filter_spec *gen_spec) 2035add72477SBen Hutchings { 2036add72477SBen Hutchings bool is_full = false; 2037add72477SBen Hutchings 203842356d9aSEdward Cree if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) && gen_spec->rss_context) 2039add72477SBen Hutchings return -EINVAL; 2040add72477SBen Hutchings 2041add72477SBen Hutchings spec->priority = gen_spec->priority; 2042add72477SBen Hutchings spec->flags = gen_spec->flags; 2043add72477SBen Hutchings spec->dmaq_id = gen_spec->dmaq_id; 2044add72477SBen Hutchings 2045add72477SBen Hutchings switch (gen_spec->match_flags) { 2046add72477SBen Hutchings case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | 2047add72477SBen Hutchings EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | 2048add72477SBen Hutchings EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT): 2049add72477SBen Hutchings is_full = true; 2050df561f66SGustavo A. R. Silva fallthrough; 2051add72477SBen Hutchings case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | 2052add72477SBen Hutchings EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): { 2053add72477SBen Hutchings __be32 rhost, host1, host2; 2054add72477SBen Hutchings __be16 rport, port1, port2; 2055add72477SBen Hutchings 2056e01b16a7SEdward Cree EFX_WARN_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX)); 2057add72477SBen Hutchings 2058add72477SBen Hutchings if (gen_spec->ether_type != htons(ETH_P_IP)) 2059add72477SBen Hutchings return -EPROTONOSUPPORT; 2060add72477SBen Hutchings if (gen_spec->loc_port == 0 || 2061add72477SBen Hutchings (is_full && gen_spec->rem_port == 0)) 2062add72477SBen Hutchings return -EADDRNOTAVAIL; 2063add72477SBen Hutchings switch (gen_spec->ip_proto) { 2064add72477SBen Hutchings case IPPROTO_TCP: 2065add72477SBen Hutchings spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL : 2066add72477SBen Hutchings EFX_FARCH_FILTER_TCP_WILD); 2067add72477SBen Hutchings break; 2068add72477SBen Hutchings case IPPROTO_UDP: 2069add72477SBen Hutchings spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL : 2070add72477SBen Hutchings EFX_FARCH_FILTER_UDP_WILD); 2071add72477SBen Hutchings break; 2072add72477SBen Hutchings default: 2073add72477SBen Hutchings return -EPROTONOSUPPORT; 2074add72477SBen Hutchings } 2075add72477SBen Hutchings 2076add72477SBen Hutchings /* Filter is constructed in terms of source and destination, 2077add72477SBen Hutchings * with the odd wrinkle that the ports are swapped in a UDP 2078add72477SBen Hutchings * wildcard filter. We need to convert from local and remote 2079add72477SBen Hutchings * (= zero for wildcard) addresses. 2080add72477SBen Hutchings */ 2081add72477SBen Hutchings rhost = is_full ? gen_spec->rem_host[0] : 0; 2082add72477SBen Hutchings rport = is_full ? gen_spec->rem_port : 0; 2083add72477SBen Hutchings host1 = rhost; 2084add72477SBen Hutchings host2 = gen_spec->loc_host[0]; 2085add72477SBen Hutchings if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) { 2086add72477SBen Hutchings port1 = gen_spec->loc_port; 2087add72477SBen Hutchings port2 = rport; 2088add72477SBen Hutchings } else { 2089add72477SBen Hutchings port1 = rport; 2090add72477SBen Hutchings port2 = gen_spec->loc_port; 2091add72477SBen Hutchings } 2092add72477SBen Hutchings spec->data[0] = ntohl(host1) << 16 | ntohs(port1); 2093add72477SBen Hutchings spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16; 2094add72477SBen Hutchings spec->data[2] = ntohl(host2); 2095add72477SBen Hutchings 2096add72477SBen Hutchings break; 2097add72477SBen Hutchings } 2098add72477SBen Hutchings 2099add72477SBen Hutchings case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID: 2100add72477SBen Hutchings is_full = true; 2101df561f66SGustavo A. R. Silva fallthrough; 2102add72477SBen Hutchings case EFX_FILTER_MATCH_LOC_MAC: 2103add72477SBen Hutchings spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL : 2104add72477SBen Hutchings EFX_FARCH_FILTER_MAC_WILD); 2105add72477SBen Hutchings spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0; 2106add72477SBen Hutchings spec->data[1] = (gen_spec->loc_mac[2] << 24 | 2107add72477SBen Hutchings gen_spec->loc_mac[3] << 16 | 2108add72477SBen Hutchings gen_spec->loc_mac[4] << 8 | 2109add72477SBen Hutchings gen_spec->loc_mac[5]); 2110add72477SBen Hutchings spec->data[2] = (gen_spec->loc_mac[0] << 8 | 2111add72477SBen Hutchings gen_spec->loc_mac[1]); 2112add72477SBen Hutchings break; 2113add72477SBen Hutchings 2114add72477SBen Hutchings case EFX_FILTER_MATCH_LOC_MAC_IG: 2115add72477SBen Hutchings spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ? 2116add72477SBen Hutchings EFX_FARCH_FILTER_MC_DEF : 2117add72477SBen Hutchings EFX_FARCH_FILTER_UC_DEF); 2118add72477SBen Hutchings memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */ 2119add72477SBen Hutchings break; 2120add72477SBen Hutchings 2121add72477SBen Hutchings default: 2122add72477SBen Hutchings return -EPROTONOSUPPORT; 2123add72477SBen Hutchings } 2124add72477SBen Hutchings 2125add72477SBen Hutchings return 0; 2126add72477SBen Hutchings } 2127add72477SBen Hutchings 2128add72477SBen Hutchings static void 2129add72477SBen Hutchings efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec, 2130add72477SBen Hutchings const struct efx_farch_filter_spec *spec) 2131add72477SBen Hutchings { 2132add72477SBen Hutchings bool is_full = false; 2133add72477SBen Hutchings 2134add72477SBen Hutchings /* *gen_spec should be completely initialised, to be consistent 2135add72477SBen Hutchings * with efx_filter_init_{rx,tx}() and in case we want to copy 2136add72477SBen Hutchings * it back to userland. 2137add72477SBen Hutchings */ 2138add72477SBen Hutchings memset(gen_spec, 0, sizeof(*gen_spec)); 2139add72477SBen Hutchings 2140add72477SBen Hutchings gen_spec->priority = spec->priority; 2141add72477SBen Hutchings gen_spec->flags = spec->flags; 2142add72477SBen Hutchings gen_spec->dmaq_id = spec->dmaq_id; 2143add72477SBen Hutchings 2144add72477SBen Hutchings switch (spec->type) { 2145add72477SBen Hutchings case EFX_FARCH_FILTER_TCP_FULL: 2146add72477SBen Hutchings case EFX_FARCH_FILTER_UDP_FULL: 2147add72477SBen Hutchings is_full = true; 2148df561f66SGustavo A. R. Silva fallthrough; 2149add72477SBen Hutchings case EFX_FARCH_FILTER_TCP_WILD: 2150add72477SBen Hutchings case EFX_FARCH_FILTER_UDP_WILD: { 2151add72477SBen Hutchings __be32 host1, host2; 2152add72477SBen Hutchings __be16 port1, port2; 2153add72477SBen Hutchings 2154add72477SBen Hutchings gen_spec->match_flags = 2155add72477SBen Hutchings EFX_FILTER_MATCH_ETHER_TYPE | 2156add72477SBen Hutchings EFX_FILTER_MATCH_IP_PROTO | 2157add72477SBen Hutchings EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; 2158add72477SBen Hutchings if (is_full) 2159add72477SBen Hutchings gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST | 2160add72477SBen Hutchings EFX_FILTER_MATCH_REM_PORT); 2161add72477SBen Hutchings gen_spec->ether_type = htons(ETH_P_IP); 2162add72477SBen Hutchings gen_spec->ip_proto = 2163add72477SBen Hutchings (spec->type == EFX_FARCH_FILTER_TCP_FULL || 2164add72477SBen Hutchings spec->type == EFX_FARCH_FILTER_TCP_WILD) ? 2165add72477SBen Hutchings IPPROTO_TCP : IPPROTO_UDP; 2166add72477SBen Hutchings 2167add72477SBen Hutchings host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16); 2168add72477SBen Hutchings port1 = htons(spec->data[0]); 2169add72477SBen Hutchings host2 = htonl(spec->data[2]); 2170add72477SBen Hutchings port2 = htons(spec->data[1] >> 16); 2171add72477SBen Hutchings if (spec->flags & EFX_FILTER_FLAG_TX) { 2172add72477SBen Hutchings gen_spec->loc_host[0] = host1; 2173add72477SBen Hutchings gen_spec->rem_host[0] = host2; 2174add72477SBen Hutchings } else { 2175add72477SBen Hutchings gen_spec->loc_host[0] = host2; 2176add72477SBen Hutchings gen_spec->rem_host[0] = host1; 2177add72477SBen Hutchings } 2178add72477SBen Hutchings if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^ 2179add72477SBen Hutchings (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) { 2180add72477SBen Hutchings gen_spec->loc_port = port1; 2181add72477SBen Hutchings gen_spec->rem_port = port2; 2182add72477SBen Hutchings } else { 2183add72477SBen Hutchings gen_spec->loc_port = port2; 2184add72477SBen Hutchings gen_spec->rem_port = port1; 2185add72477SBen Hutchings } 2186add72477SBen Hutchings 2187add72477SBen Hutchings break; 2188add72477SBen Hutchings } 2189add72477SBen Hutchings 2190add72477SBen Hutchings case EFX_FARCH_FILTER_MAC_FULL: 2191add72477SBen Hutchings is_full = true; 2192df561f66SGustavo A. R. Silva fallthrough; 2193add72477SBen Hutchings case EFX_FARCH_FILTER_MAC_WILD: 2194add72477SBen Hutchings gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC; 2195add72477SBen Hutchings if (is_full) 2196add72477SBen Hutchings gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID; 2197add72477SBen Hutchings gen_spec->loc_mac[0] = spec->data[2] >> 8; 2198add72477SBen Hutchings gen_spec->loc_mac[1] = spec->data[2]; 2199add72477SBen Hutchings gen_spec->loc_mac[2] = spec->data[1] >> 24; 2200add72477SBen Hutchings gen_spec->loc_mac[3] = spec->data[1] >> 16; 2201add72477SBen Hutchings gen_spec->loc_mac[4] = spec->data[1] >> 8; 2202add72477SBen Hutchings gen_spec->loc_mac[5] = spec->data[1]; 2203add72477SBen Hutchings gen_spec->outer_vid = htons(spec->data[0]); 2204add72477SBen Hutchings break; 2205add72477SBen Hutchings 2206add72477SBen Hutchings case EFX_FARCH_FILTER_UC_DEF: 2207add72477SBen Hutchings case EFX_FARCH_FILTER_MC_DEF: 2208add72477SBen Hutchings gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG; 2209add72477SBen Hutchings gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF; 2210add72477SBen Hutchings break; 2211add72477SBen Hutchings 2212add72477SBen Hutchings default: 2213add72477SBen Hutchings WARN_ON(1); 2214add72477SBen Hutchings break; 2215add72477SBen Hutchings } 2216add72477SBen Hutchings } 2217add72477SBen Hutchings 2218add72477SBen Hutchings static void 2219b59e6ef8SBen Hutchings efx_farch_filter_init_rx_auto(struct efx_nic *efx, 22208803e150SBen Hutchings struct efx_farch_filter_spec *spec) 2221add72477SBen Hutchings { 2222add72477SBen Hutchings /* If there's only one channel then disable RSS for non VF 2223add72477SBen Hutchings * traffic, thereby allowing VFs to use RSS when the PF can't. 2224add72477SBen Hutchings */ 22257665d1abSBen Hutchings spec->priority = EFX_FILTER_PRI_AUTO; 22267665d1abSBen Hutchings spec->flags = (EFX_FILTER_FLAG_RX | 2227f1c2ef40SBert Kenward (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) | 2228add72477SBen Hutchings (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); 2229add72477SBen Hutchings spec->dmaq_id = 0; 2230add72477SBen Hutchings } 2231add72477SBen Hutchings 2232add72477SBen Hutchings /* Build a filter entry and return its n-tuple key. */ 2233add72477SBen Hutchings static u32 efx_farch_filter_build(efx_oword_t *filter, 2234add72477SBen Hutchings struct efx_farch_filter_spec *spec) 2235add72477SBen Hutchings { 2236add72477SBen Hutchings u32 data3; 2237add72477SBen Hutchings 2238add72477SBen Hutchings switch (efx_farch_filter_spec_table_id(spec)) { 2239add72477SBen Hutchings case EFX_FARCH_FILTER_TABLE_RX_IP: { 2240add72477SBen Hutchings bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL || 2241add72477SBen Hutchings spec->type == EFX_FARCH_FILTER_UDP_WILD); 2242add72477SBen Hutchings EFX_POPULATE_OWORD_7( 2243add72477SBen Hutchings *filter, 2244add72477SBen Hutchings FRF_BZ_RSS_EN, 2245add72477SBen Hutchings !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), 2246add72477SBen Hutchings FRF_BZ_SCATTER_EN, 2247add72477SBen Hutchings !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), 2248add72477SBen Hutchings FRF_BZ_TCP_UDP, is_udp, 2249add72477SBen Hutchings FRF_BZ_RXQ_ID, spec->dmaq_id, 2250add72477SBen Hutchings EFX_DWORD_2, spec->data[2], 2251add72477SBen Hutchings EFX_DWORD_1, spec->data[1], 2252add72477SBen Hutchings EFX_DWORD_0, spec->data[0]); 2253add72477SBen Hutchings data3 = is_udp; 2254add72477SBen Hutchings break; 2255add72477SBen Hutchings } 2256add72477SBen Hutchings 2257add72477SBen Hutchings case EFX_FARCH_FILTER_TABLE_RX_MAC: { 2258add72477SBen Hutchings bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; 2259add72477SBen Hutchings EFX_POPULATE_OWORD_7( 2260add72477SBen Hutchings *filter, 2261add72477SBen Hutchings FRF_CZ_RMFT_RSS_EN, 2262add72477SBen Hutchings !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), 2263add72477SBen Hutchings FRF_CZ_RMFT_SCATTER_EN, 2264add72477SBen Hutchings !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), 2265add72477SBen Hutchings FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id, 2266add72477SBen Hutchings FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, 2267add72477SBen Hutchings FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2], 2268add72477SBen Hutchings FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1], 2269add72477SBen Hutchings FRF_CZ_RMFT_VLAN_ID, spec->data[0]); 2270add72477SBen Hutchings data3 = is_wild; 2271add72477SBen Hutchings break; 2272add72477SBen Hutchings } 2273add72477SBen Hutchings 2274add72477SBen Hutchings case EFX_FARCH_FILTER_TABLE_TX_MAC: { 2275add72477SBen Hutchings bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; 2276add72477SBen Hutchings EFX_POPULATE_OWORD_5(*filter, 2277add72477SBen Hutchings FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id, 2278add72477SBen Hutchings FRF_CZ_TMFT_WILDCARD_MATCH, is_wild, 2279add72477SBen Hutchings FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2], 2280add72477SBen Hutchings FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1], 2281add72477SBen Hutchings FRF_CZ_TMFT_VLAN_ID, spec->data[0]); 2282add72477SBen Hutchings data3 = is_wild | spec->dmaq_id << 1; 2283add72477SBen Hutchings break; 2284add72477SBen Hutchings } 2285add72477SBen Hutchings 2286add72477SBen Hutchings default: 2287add72477SBen Hutchings BUG(); 2288add72477SBen Hutchings } 2289add72477SBen Hutchings 2290add72477SBen Hutchings return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3; 2291add72477SBen Hutchings } 2292add72477SBen Hutchings 2293add72477SBen Hutchings static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left, 2294add72477SBen Hutchings const struct efx_farch_filter_spec *right) 2295add72477SBen Hutchings { 2296add72477SBen Hutchings if (left->type != right->type || 2297add72477SBen Hutchings memcmp(left->data, right->data, sizeof(left->data))) 2298add72477SBen Hutchings return false; 2299add72477SBen Hutchings 2300add72477SBen Hutchings if (left->flags & EFX_FILTER_FLAG_TX && 2301add72477SBen Hutchings left->dmaq_id != right->dmaq_id) 2302add72477SBen Hutchings return false; 2303add72477SBen Hutchings 2304add72477SBen Hutchings return true; 2305add72477SBen Hutchings } 2306add72477SBen Hutchings 2307add72477SBen Hutchings /* 2308add72477SBen Hutchings * Construct/deconstruct external filter IDs. At least the RX filter 2309add72477SBen Hutchings * IDs must be ordered by matching priority, for RX NFC semantics. 2310add72477SBen Hutchings * 2311add72477SBen Hutchings * Deconstruction needs to be robust against invalid IDs so that 2312add72477SBen Hutchings * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can 2313add72477SBen Hutchings * accept user-provided IDs. 2314add72477SBen Hutchings */ 2315add72477SBen Hutchings 2316add72477SBen Hutchings #define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5 2317add72477SBen Hutchings 2318add72477SBen Hutchings static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = { 2319add72477SBen Hutchings [EFX_FARCH_FILTER_TCP_FULL] = 0, 2320add72477SBen Hutchings [EFX_FARCH_FILTER_UDP_FULL] = 0, 2321add72477SBen Hutchings [EFX_FARCH_FILTER_TCP_WILD] = 1, 2322add72477SBen Hutchings [EFX_FARCH_FILTER_UDP_WILD] = 1, 2323add72477SBen Hutchings [EFX_FARCH_FILTER_MAC_FULL] = 2, 2324add72477SBen Hutchings [EFX_FARCH_FILTER_MAC_WILD] = 3, 2325add72477SBen Hutchings [EFX_FARCH_FILTER_UC_DEF] = 4, 2326add72477SBen Hutchings [EFX_FARCH_FILTER_MC_DEF] = 4, 2327add72477SBen Hutchings }; 2328add72477SBen Hutchings 2329add72477SBen Hutchings static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = { 2330add72477SBen Hutchings EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */ 2331add72477SBen Hutchings EFX_FARCH_FILTER_TABLE_RX_IP, 2332add72477SBen Hutchings EFX_FARCH_FILTER_TABLE_RX_MAC, 2333add72477SBen Hutchings EFX_FARCH_FILTER_TABLE_RX_MAC, 2334add72477SBen Hutchings EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */ 2335add72477SBen Hutchings EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */ 2336add72477SBen Hutchings EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */ 2337add72477SBen Hutchings }; 2338add72477SBen Hutchings 2339add72477SBen Hutchings #define EFX_FARCH_FILTER_INDEX_WIDTH 13 2340add72477SBen Hutchings #define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1) 2341add72477SBen Hutchings 2342add72477SBen Hutchings static inline u32 2343add72477SBen Hutchings efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec, 2344add72477SBen Hutchings unsigned int index) 2345add72477SBen Hutchings { 2346add72477SBen Hutchings unsigned int range; 2347add72477SBen Hutchings 2348add72477SBen Hutchings range = efx_farch_filter_type_match_pri[spec->type]; 2349add72477SBen Hutchings if (!(spec->flags & EFX_FILTER_FLAG_RX)) 2350add72477SBen Hutchings range += EFX_FARCH_FILTER_MATCH_PRI_COUNT; 2351add72477SBen Hutchings 2352add72477SBen Hutchings return range << EFX_FARCH_FILTER_INDEX_WIDTH | index; 2353add72477SBen Hutchings } 2354add72477SBen Hutchings 2355add72477SBen Hutchings static inline enum efx_farch_filter_table_id 2356add72477SBen Hutchings efx_farch_filter_id_table_id(u32 id) 2357add72477SBen Hutchings { 2358add72477SBen Hutchings unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH; 2359add72477SBen Hutchings 2360add72477SBen Hutchings if (range < ARRAY_SIZE(efx_farch_filter_range_table)) 2361add72477SBen Hutchings return efx_farch_filter_range_table[range]; 2362add72477SBen Hutchings else 2363add72477SBen Hutchings return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */ 2364add72477SBen Hutchings } 2365add72477SBen Hutchings 2366add72477SBen Hutchings static inline unsigned int efx_farch_filter_id_index(u32 id) 2367add72477SBen Hutchings { 2368add72477SBen Hutchings return id & EFX_FARCH_FILTER_INDEX_MASK; 2369add72477SBen Hutchings } 2370add72477SBen Hutchings 2371add72477SBen Hutchings u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx) 2372add72477SBen Hutchings { 2373add72477SBen Hutchings struct efx_farch_filter_state *state = efx->filter_state; 2374add72477SBen Hutchings unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1; 2375add72477SBen Hutchings enum efx_farch_filter_table_id table_id; 2376add72477SBen Hutchings 2377add72477SBen Hutchings do { 2378add72477SBen Hutchings table_id = efx_farch_filter_range_table[range]; 2379add72477SBen Hutchings if (state->table[table_id].size != 0) 2380add72477SBen Hutchings return range << EFX_FARCH_FILTER_INDEX_WIDTH | 2381add72477SBen Hutchings state->table[table_id].size; 2382add72477SBen Hutchings } while (range--); 2383add72477SBen Hutchings 2384add72477SBen Hutchings return 0; 2385add72477SBen Hutchings } 2386add72477SBen Hutchings 2387add72477SBen Hutchings s32 efx_farch_filter_insert(struct efx_nic *efx, 2388add72477SBen Hutchings struct efx_filter_spec *gen_spec, 2389add72477SBen Hutchings bool replace_equal) 2390add72477SBen Hutchings { 2391add72477SBen Hutchings struct efx_farch_filter_state *state = efx->filter_state; 2392add72477SBen Hutchings struct efx_farch_filter_table *table; 2393add72477SBen Hutchings struct efx_farch_filter_spec spec; 2394add72477SBen Hutchings efx_oword_t filter; 2395add72477SBen Hutchings int rep_index, ins_index; 2396add72477SBen Hutchings unsigned int depth = 0; 2397add72477SBen Hutchings int rc; 2398add72477SBen Hutchings 2399add72477SBen Hutchings rc = efx_farch_filter_from_gen_spec(&spec, gen_spec); 2400add72477SBen Hutchings if (rc) 2401add72477SBen Hutchings return rc; 2402add72477SBen Hutchings 2403fc7a6c28SEdward Cree down_write(&state->lock); 2404fc7a6c28SEdward Cree 2405add72477SBen Hutchings table = &state->table[efx_farch_filter_spec_table_id(&spec)]; 2406fc7a6c28SEdward Cree if (table->size == 0) { 2407fc7a6c28SEdward Cree rc = -EINVAL; 2408fc7a6c28SEdward Cree goto out_unlock; 2409fc7a6c28SEdward Cree } 2410add72477SBen Hutchings 2411add72477SBen Hutchings netif_vdbg(efx, hw, efx->net_dev, 2412add72477SBen Hutchings "%s: type %d search_limit=%d", __func__, spec.type, 2413add72477SBen Hutchings table->search_limit[spec.type]); 2414add72477SBen Hutchings 2415add72477SBen Hutchings if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { 2416add72477SBen Hutchings /* One filter spec per type */ 2417add72477SBen Hutchings BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0); 2418add72477SBen Hutchings BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF != 2419add72477SBen Hutchings EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF); 2420add72477SBen Hutchings rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF; 2421add72477SBen Hutchings ins_index = rep_index; 2422add72477SBen Hutchings } else { 2423add72477SBen Hutchings /* Search concurrently for 2424add72477SBen Hutchings * (1) a filter to be replaced (rep_index): any filter 2425add72477SBen Hutchings * with the same match values, up to the current 2426add72477SBen Hutchings * search depth for this type, and 2427add72477SBen Hutchings * (2) the insertion point (ins_index): (1) or any 2428add72477SBen Hutchings * free slot before it or up to the maximum search 2429add72477SBen Hutchings * depth for this priority 2430add72477SBen Hutchings * We fail if we cannot find (2). 2431add72477SBen Hutchings * 2432add72477SBen Hutchings * We can stop once either 2433add72477SBen Hutchings * (a) we find (1), in which case we have definitely 2434add72477SBen Hutchings * found (2) as well; or 2435add72477SBen Hutchings * (b) we have searched exhaustively for (1), and have 2436add72477SBen Hutchings * either found (2) or searched exhaustively for it 2437add72477SBen Hutchings */ 2438add72477SBen Hutchings u32 key = efx_farch_filter_build(&filter, &spec); 2439add72477SBen Hutchings unsigned int hash = efx_farch_filter_hash(key); 2440add72477SBen Hutchings unsigned int incr = efx_farch_filter_increment(key); 2441add72477SBen Hutchings unsigned int max_rep_depth = table->search_limit[spec.type]; 2442add72477SBen Hutchings unsigned int max_ins_depth = 2443add72477SBen Hutchings spec.priority <= EFX_FILTER_PRI_HINT ? 2444add72477SBen Hutchings EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX : 2445add72477SBen Hutchings EFX_FARCH_FILTER_CTL_SRCH_MAX; 2446add72477SBen Hutchings unsigned int i = hash & (table->size - 1); 2447add72477SBen Hutchings 2448add72477SBen Hutchings ins_index = -1; 2449add72477SBen Hutchings depth = 1; 2450add72477SBen Hutchings 2451add72477SBen Hutchings for (;;) { 2452add72477SBen Hutchings if (!test_bit(i, table->used_bitmap)) { 2453add72477SBen Hutchings if (ins_index < 0) 2454add72477SBen Hutchings ins_index = i; 2455add72477SBen Hutchings } else if (efx_farch_filter_equal(&spec, 2456add72477SBen Hutchings &table->spec[i])) { 2457add72477SBen Hutchings /* Case (a) */ 2458add72477SBen Hutchings if (ins_index < 0) 2459add72477SBen Hutchings ins_index = i; 2460add72477SBen Hutchings rep_index = i; 2461add72477SBen Hutchings break; 2462add72477SBen Hutchings } 2463add72477SBen Hutchings 2464add72477SBen Hutchings if (depth >= max_rep_depth && 2465add72477SBen Hutchings (ins_index >= 0 || depth >= max_ins_depth)) { 2466add72477SBen Hutchings /* Case (b) */ 2467add72477SBen Hutchings if (ins_index < 0) { 2468add72477SBen Hutchings rc = -EBUSY; 2469fc7a6c28SEdward Cree goto out_unlock; 2470add72477SBen Hutchings } 2471add72477SBen Hutchings rep_index = -1; 2472add72477SBen Hutchings break; 2473add72477SBen Hutchings } 2474add72477SBen Hutchings 2475add72477SBen Hutchings i = (i + incr) & (table->size - 1); 2476add72477SBen Hutchings ++depth; 2477add72477SBen Hutchings } 2478add72477SBen Hutchings } 2479add72477SBen Hutchings 2480add72477SBen Hutchings /* If we found a filter to be replaced, check whether we 2481add72477SBen Hutchings * should do so 2482add72477SBen Hutchings */ 2483add72477SBen Hutchings if (rep_index >= 0) { 2484add72477SBen Hutchings struct efx_farch_filter_spec *saved_spec = 2485add72477SBen Hutchings &table->spec[rep_index]; 2486add72477SBen Hutchings 2487add72477SBen Hutchings if (spec.priority == saved_spec->priority && !replace_equal) { 2488add72477SBen Hutchings rc = -EEXIST; 2489fc7a6c28SEdward Cree goto out_unlock; 2490add72477SBen Hutchings } 24917665d1abSBen Hutchings if (spec.priority < saved_spec->priority) { 2492add72477SBen Hutchings rc = -EPERM; 2493fc7a6c28SEdward Cree goto out_unlock; 2494add72477SBen Hutchings } 24957665d1abSBen Hutchings if (saved_spec->priority == EFX_FILTER_PRI_AUTO || 24967665d1abSBen Hutchings saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) 24977665d1abSBen Hutchings spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO; 2498add72477SBen Hutchings } 2499add72477SBen Hutchings 2500add72477SBen Hutchings /* Insert the filter */ 2501add72477SBen Hutchings if (ins_index != rep_index) { 2502add72477SBen Hutchings __set_bit(ins_index, table->used_bitmap); 2503add72477SBen Hutchings ++table->used; 2504add72477SBen Hutchings } 2505add72477SBen Hutchings table->spec[ins_index] = spec; 2506add72477SBen Hutchings 2507add72477SBen Hutchings if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { 2508add72477SBen Hutchings efx_farch_filter_push_rx_config(efx); 2509add72477SBen Hutchings } else { 2510add72477SBen Hutchings if (table->search_limit[spec.type] < depth) { 2511add72477SBen Hutchings table->search_limit[spec.type] = depth; 2512add72477SBen Hutchings if (spec.flags & EFX_FILTER_FLAG_TX) 2513add72477SBen Hutchings efx_farch_filter_push_tx_limits(efx); 2514add72477SBen Hutchings else 2515add72477SBen Hutchings efx_farch_filter_push_rx_config(efx); 2516add72477SBen Hutchings } 2517add72477SBen Hutchings 2518add72477SBen Hutchings efx_writeo(efx, &filter, 2519add72477SBen Hutchings table->offset + table->step * ins_index); 2520add72477SBen Hutchings 2521add72477SBen Hutchings /* If we were able to replace a filter by inserting 2522add72477SBen Hutchings * at a lower depth, clear the replaced filter 2523add72477SBen Hutchings */ 2524add72477SBen Hutchings if (ins_index != rep_index && rep_index >= 0) 2525add72477SBen Hutchings efx_farch_filter_table_clear_entry(efx, table, 2526add72477SBen Hutchings rep_index); 2527add72477SBen Hutchings } 2528add72477SBen Hutchings 2529add72477SBen Hutchings netif_vdbg(efx, hw, efx->net_dev, 2530add72477SBen Hutchings "%s: filter type %d index %d rxq %u set", 2531add72477SBen Hutchings __func__, spec.type, ins_index, spec.dmaq_id); 2532add72477SBen Hutchings rc = efx_farch_filter_make_id(&spec, ins_index); 2533add72477SBen Hutchings 2534fc7a6c28SEdward Cree out_unlock: 2535fc7a6c28SEdward Cree up_write(&state->lock); 2536add72477SBen Hutchings return rc; 2537add72477SBen Hutchings } 2538add72477SBen Hutchings 2539add72477SBen Hutchings static void 2540add72477SBen Hutchings efx_farch_filter_table_clear_entry(struct efx_nic *efx, 2541add72477SBen Hutchings struct efx_farch_filter_table *table, 2542add72477SBen Hutchings unsigned int filter_idx) 2543add72477SBen Hutchings { 2544add72477SBen Hutchings static efx_oword_t filter; 2545add72477SBen Hutchings 254614990a5dSBen Hutchings EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap)); 25478803e150SBen Hutchings BUG_ON(table->offset == 0); /* can't clear MAC default filters */ 254814990a5dSBen Hutchings 2549add72477SBen Hutchings __clear_bit(filter_idx, table->used_bitmap); 2550add72477SBen Hutchings --table->used; 2551add72477SBen Hutchings memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); 2552add72477SBen Hutchings 255314990a5dSBen Hutchings efx_writeo(efx, &filter, table->offset + table->step * filter_idx); 2554add72477SBen Hutchings 2555add72477SBen Hutchings /* If this filter required a greater search depth than 2556add72477SBen Hutchings * any other, the search limit for its type can now be 2557add72477SBen Hutchings * decreased. However, it is hard to determine that 2558add72477SBen Hutchings * unless the table has become completely empty - in 2559add72477SBen Hutchings * which case, all its search limits can be set to 0. 2560add72477SBen Hutchings */ 2561add72477SBen Hutchings if (unlikely(table->used == 0)) { 256214990a5dSBen Hutchings memset(table->search_limit, 0, sizeof(table->search_limit)); 2563add72477SBen Hutchings if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC) 2564add72477SBen Hutchings efx_farch_filter_push_tx_limits(efx); 2565add72477SBen Hutchings else 2566add72477SBen Hutchings efx_farch_filter_push_rx_config(efx); 2567add72477SBen Hutchings } 2568add72477SBen Hutchings } 256914990a5dSBen Hutchings 257014990a5dSBen Hutchings static int efx_farch_filter_remove(struct efx_nic *efx, 257114990a5dSBen Hutchings struct efx_farch_filter_table *table, 257214990a5dSBen Hutchings unsigned int filter_idx, 257314990a5dSBen Hutchings enum efx_filter_priority priority) 257414990a5dSBen Hutchings { 257514990a5dSBen Hutchings struct efx_farch_filter_spec *spec = &table->spec[filter_idx]; 257614990a5dSBen Hutchings 257714990a5dSBen Hutchings if (!test_bit(filter_idx, table->used_bitmap) || 2578f7284802SBen Hutchings spec->priority != priority) 257914990a5dSBen Hutchings return -ENOENT; 258014990a5dSBen Hutchings 25817665d1abSBen Hutchings if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { 2582b59e6ef8SBen Hutchings efx_farch_filter_init_rx_auto(efx, spec); 258314990a5dSBen Hutchings efx_farch_filter_push_rx_config(efx); 258414990a5dSBen Hutchings } else { 258514990a5dSBen Hutchings efx_farch_filter_table_clear_entry(efx, table, filter_idx); 258614990a5dSBen Hutchings } 258714990a5dSBen Hutchings 258814990a5dSBen Hutchings return 0; 2589add72477SBen Hutchings } 2590add72477SBen Hutchings 2591add72477SBen Hutchings int efx_farch_filter_remove_safe(struct efx_nic *efx, 2592add72477SBen Hutchings enum efx_filter_priority priority, 2593add72477SBen Hutchings u32 filter_id) 2594add72477SBen Hutchings { 2595add72477SBen Hutchings struct efx_farch_filter_state *state = efx->filter_state; 2596add72477SBen Hutchings enum efx_farch_filter_table_id table_id; 2597add72477SBen Hutchings struct efx_farch_filter_table *table; 2598add72477SBen Hutchings unsigned int filter_idx; 2599add72477SBen Hutchings int rc; 2600add72477SBen Hutchings 2601add72477SBen Hutchings table_id = efx_farch_filter_id_table_id(filter_id); 2602add72477SBen Hutchings if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) 2603add72477SBen Hutchings return -ENOENT; 2604add72477SBen Hutchings table = &state->table[table_id]; 2605add72477SBen Hutchings 2606add72477SBen Hutchings filter_idx = efx_farch_filter_id_index(filter_id); 2607add72477SBen Hutchings if (filter_idx >= table->size) 2608add72477SBen Hutchings return -ENOENT; 2609fc7a6c28SEdward Cree down_write(&state->lock); 2610add72477SBen Hutchings 261114990a5dSBen Hutchings rc = efx_farch_filter_remove(efx, table, filter_idx, priority); 2612fc7a6c28SEdward Cree up_write(&state->lock); 2613add72477SBen Hutchings 2614add72477SBen Hutchings return rc; 2615add72477SBen Hutchings } 2616add72477SBen Hutchings 2617add72477SBen Hutchings int efx_farch_filter_get_safe(struct efx_nic *efx, 2618add72477SBen Hutchings enum efx_filter_priority priority, 2619add72477SBen Hutchings u32 filter_id, struct efx_filter_spec *spec_buf) 2620add72477SBen Hutchings { 2621add72477SBen Hutchings struct efx_farch_filter_state *state = efx->filter_state; 2622add72477SBen Hutchings enum efx_farch_filter_table_id table_id; 2623add72477SBen Hutchings struct efx_farch_filter_table *table; 2624add72477SBen Hutchings struct efx_farch_filter_spec *spec; 2625add72477SBen Hutchings unsigned int filter_idx; 2626fc7a6c28SEdward Cree int rc = -ENOENT; 2627fc7a6c28SEdward Cree 2628fc7a6c28SEdward Cree down_read(&state->lock); 2629add72477SBen Hutchings 2630add72477SBen Hutchings table_id = efx_farch_filter_id_table_id(filter_id); 2631add72477SBen Hutchings if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) 2632fc7a6c28SEdward Cree goto out_unlock; 2633add72477SBen Hutchings table = &state->table[table_id]; 2634add72477SBen Hutchings 2635add72477SBen Hutchings filter_idx = efx_farch_filter_id_index(filter_id); 2636add72477SBen Hutchings if (filter_idx >= table->size) 2637fc7a6c28SEdward Cree goto out_unlock; 2638add72477SBen Hutchings spec = &table->spec[filter_idx]; 2639add72477SBen Hutchings 2640add72477SBen Hutchings if (test_bit(filter_idx, table->used_bitmap) && 2641add72477SBen Hutchings spec->priority == priority) { 2642add72477SBen Hutchings efx_farch_filter_to_gen_spec(spec_buf, spec); 2643add72477SBen Hutchings rc = 0; 2644add72477SBen Hutchings } 2645add72477SBen Hutchings 2646fc7a6c28SEdward Cree out_unlock: 2647fc7a6c28SEdward Cree up_read(&state->lock); 2648add72477SBen Hutchings return rc; 2649add72477SBen Hutchings } 2650add72477SBen Hutchings 2651add72477SBen Hutchings static void 2652add72477SBen Hutchings efx_farch_filter_table_clear(struct efx_nic *efx, 2653add72477SBen Hutchings enum efx_farch_filter_table_id table_id, 2654add72477SBen Hutchings enum efx_filter_priority priority) 2655add72477SBen Hutchings { 2656add72477SBen Hutchings struct efx_farch_filter_state *state = efx->filter_state; 2657add72477SBen Hutchings struct efx_farch_filter_table *table = &state->table[table_id]; 2658add72477SBen Hutchings unsigned int filter_idx; 2659add72477SBen Hutchings 2660fc7a6c28SEdward Cree down_write(&state->lock); 26617665d1abSBen Hutchings for (filter_idx = 0; filter_idx < table->size; ++filter_idx) { 26627665d1abSBen Hutchings if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO) 26637665d1abSBen Hutchings efx_farch_filter_remove(efx, table, 26647665d1abSBen Hutchings filter_idx, priority); 26657665d1abSBen Hutchings } 2666fc7a6c28SEdward Cree up_write(&state->lock); 2667add72477SBen Hutchings } 2668add72477SBen Hutchings 2669fbd79120SBen Hutchings int efx_farch_filter_clear_rx(struct efx_nic *efx, 2670add72477SBen Hutchings enum efx_filter_priority priority) 2671add72477SBen Hutchings { 2672add72477SBen Hutchings efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP, 2673add72477SBen Hutchings priority); 2674add72477SBen Hutchings efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC, 2675add72477SBen Hutchings priority); 26768803e150SBen Hutchings efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF, 26778803e150SBen Hutchings priority); 2678fbd79120SBen Hutchings return 0; 2679add72477SBen Hutchings } 2680add72477SBen Hutchings 2681add72477SBen Hutchings u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, 2682add72477SBen Hutchings enum efx_filter_priority priority) 2683add72477SBen Hutchings { 2684add72477SBen Hutchings struct efx_farch_filter_state *state = efx->filter_state; 2685add72477SBen Hutchings enum efx_farch_filter_table_id table_id; 2686add72477SBen Hutchings struct efx_farch_filter_table *table; 2687add72477SBen Hutchings unsigned int filter_idx; 2688add72477SBen Hutchings u32 count = 0; 2689add72477SBen Hutchings 2690fc7a6c28SEdward Cree down_read(&state->lock); 2691add72477SBen Hutchings 2692add72477SBen Hutchings for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; 2693add72477SBen Hutchings table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; 2694add72477SBen Hutchings table_id++) { 2695add72477SBen Hutchings table = &state->table[table_id]; 2696add72477SBen Hutchings for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 2697add72477SBen Hutchings if (test_bit(filter_idx, table->used_bitmap) && 2698add72477SBen Hutchings table->spec[filter_idx].priority == priority) 2699add72477SBen Hutchings ++count; 2700add72477SBen Hutchings } 2701add72477SBen Hutchings } 2702add72477SBen Hutchings 2703fc7a6c28SEdward Cree up_read(&state->lock); 2704add72477SBen Hutchings 2705add72477SBen Hutchings return count; 2706add72477SBen Hutchings } 2707add72477SBen Hutchings 2708add72477SBen Hutchings s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, 2709add72477SBen Hutchings enum efx_filter_priority priority, 2710add72477SBen Hutchings u32 *buf, u32 size) 2711add72477SBen Hutchings { 2712add72477SBen Hutchings struct efx_farch_filter_state *state = efx->filter_state; 2713add72477SBen Hutchings enum efx_farch_filter_table_id table_id; 2714add72477SBen Hutchings struct efx_farch_filter_table *table; 2715add72477SBen Hutchings unsigned int filter_idx; 2716add72477SBen Hutchings s32 count = 0; 2717add72477SBen Hutchings 2718fc7a6c28SEdward Cree down_read(&state->lock); 2719add72477SBen Hutchings 2720add72477SBen Hutchings for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; 2721add72477SBen Hutchings table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; 2722add72477SBen Hutchings table_id++) { 2723add72477SBen Hutchings table = &state->table[table_id]; 2724add72477SBen Hutchings for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 2725add72477SBen Hutchings if (test_bit(filter_idx, table->used_bitmap) && 2726add72477SBen Hutchings table->spec[filter_idx].priority == priority) { 2727add72477SBen Hutchings if (count == size) { 2728add72477SBen Hutchings count = -EMSGSIZE; 2729add72477SBen Hutchings goto out; 2730add72477SBen Hutchings } 2731add72477SBen Hutchings buf[count++] = efx_farch_filter_make_id( 2732add72477SBen Hutchings &table->spec[filter_idx], filter_idx); 2733add72477SBen Hutchings } 2734add72477SBen Hutchings } 2735add72477SBen Hutchings } 2736add72477SBen Hutchings out: 2737fc7a6c28SEdward Cree up_read(&state->lock); 2738add72477SBen Hutchings 2739add72477SBen Hutchings return count; 2740add72477SBen Hutchings } 2741add72477SBen Hutchings 2742add72477SBen Hutchings /* Restore filter stater after reset */ 2743add72477SBen Hutchings void efx_farch_filter_table_restore(struct efx_nic *efx) 2744add72477SBen Hutchings { 2745add72477SBen Hutchings struct efx_farch_filter_state *state = efx->filter_state; 2746add72477SBen Hutchings enum efx_farch_filter_table_id table_id; 2747add72477SBen Hutchings struct efx_farch_filter_table *table; 2748add72477SBen Hutchings efx_oword_t filter; 2749add72477SBen Hutchings unsigned int filter_idx; 2750add72477SBen Hutchings 2751fc7a6c28SEdward Cree down_write(&state->lock); 2752add72477SBen Hutchings 2753add72477SBen Hutchings for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { 2754add72477SBen Hutchings table = &state->table[table_id]; 2755add72477SBen Hutchings 2756add72477SBen Hutchings /* Check whether this is a regular register table */ 2757add72477SBen Hutchings if (table->step == 0) 2758add72477SBen Hutchings continue; 2759add72477SBen Hutchings 2760add72477SBen Hutchings for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 2761add72477SBen Hutchings if (!test_bit(filter_idx, table->used_bitmap)) 2762add72477SBen Hutchings continue; 2763add72477SBen Hutchings efx_farch_filter_build(&filter, &table->spec[filter_idx]); 2764add72477SBen Hutchings efx_writeo(efx, &filter, 2765add72477SBen Hutchings table->offset + table->step * filter_idx); 2766add72477SBen Hutchings } 2767add72477SBen Hutchings } 2768add72477SBen Hutchings 2769add72477SBen Hutchings efx_farch_filter_push_rx_config(efx); 2770add72477SBen Hutchings efx_farch_filter_push_tx_limits(efx); 2771add72477SBen Hutchings 2772fc7a6c28SEdward Cree up_write(&state->lock); 2773add72477SBen Hutchings } 2774add72477SBen Hutchings 2775add72477SBen Hutchings void efx_farch_filter_table_remove(struct efx_nic *efx) 2776add72477SBen Hutchings { 2777add72477SBen Hutchings struct efx_farch_filter_state *state = efx->filter_state; 2778add72477SBen Hutchings enum efx_farch_filter_table_id table_id; 2779add72477SBen Hutchings 2780add72477SBen Hutchings for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { 2781820aceb5SChristophe JAILLET bitmap_free(state->table[table_id].used_bitmap); 2782add72477SBen Hutchings vfree(state->table[table_id].spec); 2783add72477SBen Hutchings } 2784add72477SBen Hutchings kfree(state); 2785add72477SBen Hutchings } 2786add72477SBen Hutchings 2787add72477SBen Hutchings int efx_farch_filter_table_probe(struct efx_nic *efx) 2788add72477SBen Hutchings { 2789add72477SBen Hutchings struct efx_farch_filter_state *state; 2790add72477SBen Hutchings struct efx_farch_filter_table *table; 2791add72477SBen Hutchings unsigned table_id; 2792add72477SBen Hutchings 2793add72477SBen Hutchings state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL); 2794add72477SBen Hutchings if (!state) 2795add72477SBen Hutchings return -ENOMEM; 2796add72477SBen Hutchings efx->filter_state = state; 2797cafb3960SBert Kenward init_rwsem(&state->lock); 2798add72477SBen Hutchings 2799add72477SBen Hutchings table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; 2800add72477SBen Hutchings table->id = EFX_FARCH_FILTER_TABLE_RX_IP; 2801add72477SBen Hutchings table->offset = FR_BZ_RX_FILTER_TBL0; 2802add72477SBen Hutchings table->size = FR_BZ_RX_FILTER_TBL0_ROWS; 2803add72477SBen Hutchings table->step = FR_BZ_RX_FILTER_TBL0_STEP; 2804add72477SBen Hutchings 2805add72477SBen Hutchings table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; 2806add72477SBen Hutchings table->id = EFX_FARCH_FILTER_TABLE_RX_MAC; 2807add72477SBen Hutchings table->offset = FR_CZ_RX_MAC_FILTER_TBL0; 2808add72477SBen Hutchings table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; 2809add72477SBen Hutchings table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; 2810add72477SBen Hutchings 2811add72477SBen Hutchings table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; 2812add72477SBen Hutchings table->id = EFX_FARCH_FILTER_TABLE_RX_DEF; 2813add72477SBen Hutchings table->size = EFX_FARCH_FILTER_SIZE_RX_DEF; 2814add72477SBen Hutchings 2815add72477SBen Hutchings table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; 2816add72477SBen Hutchings table->id = EFX_FARCH_FILTER_TABLE_TX_MAC; 2817add72477SBen Hutchings table->offset = FR_CZ_TX_MAC_FILTER_TBL0; 2818add72477SBen Hutchings table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS; 2819add72477SBen Hutchings table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP; 2820add72477SBen Hutchings 2821add72477SBen Hutchings for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { 2822add72477SBen Hutchings table = &state->table[table_id]; 2823add72477SBen Hutchings if (table->size == 0) 2824add72477SBen Hutchings continue; 2825820aceb5SChristophe JAILLET table->used_bitmap = bitmap_zalloc(table->size, GFP_KERNEL); 2826add72477SBen Hutchings if (!table->used_bitmap) 2827add72477SBen Hutchings goto fail; 2828fad953ceSKees Cook table->spec = vzalloc(array_size(sizeof(*table->spec), 2829fad953ceSKees Cook table->size)); 2830add72477SBen Hutchings if (!table->spec) 2831add72477SBen Hutchings goto fail; 2832add72477SBen Hutchings } 2833add72477SBen Hutchings 28348803e150SBen Hutchings table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; 28358803e150SBen Hutchings if (table->size) { 2836add72477SBen Hutchings /* RX default filters must always exist */ 28378803e150SBen Hutchings struct efx_farch_filter_spec *spec; 2838add72477SBen Hutchings unsigned i; 28398803e150SBen Hutchings 28408803e150SBen Hutchings for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) { 28418803e150SBen Hutchings spec = &table->spec[i]; 28428803e150SBen Hutchings spec->type = EFX_FARCH_FILTER_UC_DEF + i; 2843b59e6ef8SBen Hutchings efx_farch_filter_init_rx_auto(efx, spec); 28448803e150SBen Hutchings __set_bit(i, table->used_bitmap); 28458803e150SBen Hutchings } 2846add72477SBen Hutchings } 2847add72477SBen Hutchings 2848add72477SBen Hutchings efx_farch_filter_push_rx_config(efx); 2849add72477SBen Hutchings 2850add72477SBen Hutchings return 0; 2851add72477SBen Hutchings 2852add72477SBen Hutchings fail: 2853add72477SBen Hutchings efx_farch_filter_table_remove(efx); 2854add72477SBen Hutchings return -ENOMEM; 2855add72477SBen Hutchings } 2856add72477SBen Hutchings 2857add72477SBen Hutchings /* Update scatter enable flags for filters pointing to our own RX queues */ 2858add72477SBen Hutchings void efx_farch_filter_update_rx_scatter(struct efx_nic *efx) 2859add72477SBen Hutchings { 2860add72477SBen Hutchings struct efx_farch_filter_state *state = efx->filter_state; 2861add72477SBen Hutchings enum efx_farch_filter_table_id table_id; 2862add72477SBen Hutchings struct efx_farch_filter_table *table; 2863add72477SBen Hutchings efx_oword_t filter; 2864add72477SBen Hutchings unsigned int filter_idx; 2865add72477SBen Hutchings 2866fc7a6c28SEdward Cree down_write(&state->lock); 2867add72477SBen Hutchings 2868add72477SBen Hutchings for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; 2869add72477SBen Hutchings table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; 2870add72477SBen Hutchings table_id++) { 2871add72477SBen Hutchings table = &state->table[table_id]; 2872add72477SBen Hutchings 2873add72477SBen Hutchings for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 2874add72477SBen Hutchings if (!test_bit(filter_idx, table->used_bitmap) || 2875add72477SBen Hutchings table->spec[filter_idx].dmaq_id >= 2876add72477SBen Hutchings efx->n_rx_channels) 2877add72477SBen Hutchings continue; 2878add72477SBen Hutchings 2879add72477SBen Hutchings if (efx->rx_scatter) 2880add72477SBen Hutchings table->spec[filter_idx].flags |= 2881add72477SBen Hutchings EFX_FILTER_FLAG_RX_SCATTER; 2882add72477SBen Hutchings else 2883add72477SBen Hutchings table->spec[filter_idx].flags &= 2884add72477SBen Hutchings ~EFX_FILTER_FLAG_RX_SCATTER; 2885add72477SBen Hutchings 2886add72477SBen Hutchings if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF) 2887add72477SBen Hutchings /* Pushed by efx_farch_filter_push_rx_config() */ 2888add72477SBen Hutchings continue; 2889add72477SBen Hutchings 2890add72477SBen Hutchings efx_farch_filter_build(&filter, &table->spec[filter_idx]); 2891add72477SBen Hutchings efx_writeo(efx, &filter, 2892add72477SBen Hutchings table->offset + table->step * filter_idx); 2893add72477SBen Hutchings } 2894add72477SBen Hutchings } 2895add72477SBen Hutchings 2896add72477SBen Hutchings efx_farch_filter_push_rx_config(efx); 2897add72477SBen Hutchings 2898fc7a6c28SEdward Cree up_write(&state->lock); 2899add72477SBen Hutchings } 2900add72477SBen Hutchings 2901add72477SBen Hutchings #ifdef CONFIG_RFS_ACCEL 2902add72477SBen Hutchings 2903add72477SBen Hutchings bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, 2904add72477SBen Hutchings unsigned int index) 2905add72477SBen Hutchings { 2906add72477SBen Hutchings struct efx_farch_filter_state *state = efx->filter_state; 29073af0f342SEdward Cree struct efx_farch_filter_table *table; 2908f8d62037SEdward Cree bool ret = false, force = false; 2909f8d62037SEdward Cree u16 arfs_id; 2910add72477SBen Hutchings 2911fc7a6c28SEdward Cree down_write(&state->lock); 2912f8d62037SEdward Cree spin_lock_bh(&efx->rps_hash_lock); 29133af0f342SEdward Cree table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; 2914add72477SBen Hutchings if (test_bit(index, table->used_bitmap) && 2915f8d62037SEdward Cree table->spec[index].priority == EFX_FILTER_PRI_HINT) { 2916f8d62037SEdward Cree struct efx_arfs_rule *rule = NULL; 2917f8d62037SEdward Cree struct efx_filter_spec spec; 2918f8d62037SEdward Cree 2919f8d62037SEdward Cree efx_farch_filter_to_gen_spec(&spec, &table->spec[index]); 2920f8d62037SEdward Cree if (!efx->rps_hash_table) { 2921f8d62037SEdward Cree /* In the absence of the table, we always returned 0 to 2922f8d62037SEdward Cree * ARFS, so use the same to query it. 2923f8d62037SEdward Cree */ 2924f8d62037SEdward Cree arfs_id = 0; 2925f8d62037SEdward Cree } else { 29267f9e4b2aSMartin Habets rule = efx_siena_rps_hash_find(efx, &spec); 2927f8d62037SEdward Cree if (!rule) { 2928f8d62037SEdward Cree /* ARFS table doesn't know of this filter, remove it */ 2929f8d62037SEdward Cree force = true; 2930f8d62037SEdward Cree } else { 2931f8d62037SEdward Cree arfs_id = rule->arfs_id; 29327f9e4b2aSMartin Habets if (!efx_siena_rps_check_rule(rule, index, 29337f9e4b2aSMartin Habets &force)) 2934f8d62037SEdward Cree goto out_unlock; 2935f8d62037SEdward Cree } 2936f8d62037SEdward Cree } 2937f8d62037SEdward Cree if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id, 2938f8d62037SEdward Cree flow_id, arfs_id)) { 2939f8d62037SEdward Cree if (rule) 2940f8d62037SEdward Cree rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; 29417f9e4b2aSMartin Habets efx_siena_rps_hash_del(efx, &spec); 2942add72477SBen Hutchings efx_farch_filter_table_clear_entry(efx, table, index); 29433af0f342SEdward Cree ret = true; 2944add72477SBen Hutchings } 2945f8d62037SEdward Cree } 2946f8d62037SEdward Cree out_unlock: 2947f8d62037SEdward Cree spin_unlock_bh(&efx->rps_hash_lock); 2948fc7a6c28SEdward Cree up_write(&state->lock); 29493af0f342SEdward Cree return ret; 2950add72477SBen Hutchings } 2951add72477SBen Hutchings 2952add72477SBen Hutchings #endif /* CONFIG_RFS_ACCEL */ 2953964e6135SBen Hutchings 2954964e6135SBen Hutchings void efx_farch_filter_sync_rx_mode(struct efx_nic *efx) 2955964e6135SBen Hutchings { 2956964e6135SBen Hutchings struct net_device *net_dev = efx->net_dev; 2957964e6135SBen Hutchings struct netdev_hw_addr *ha; 2958964e6135SBen Hutchings union efx_multicast_hash *mc_hash = &efx->multicast_hash; 2959964e6135SBen Hutchings u32 crc; 2960964e6135SBen Hutchings int bit; 2961964e6135SBen Hutchings 2962f650fb45SEdward Cree if (!efx_dev_registered(efx)) 2963f650fb45SEdward Cree return; 2964f650fb45SEdward Cree 2965964e6135SBen Hutchings netif_addr_lock_bh(net_dev); 2966964e6135SBen Hutchings 2967964e6135SBen Hutchings efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); 2968964e6135SBen Hutchings 2969964e6135SBen Hutchings /* Build multicast hash table */ 2970964e6135SBen Hutchings if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 2971964e6135SBen Hutchings memset(mc_hash, 0xff, sizeof(*mc_hash)); 2972964e6135SBen Hutchings } else { 2973964e6135SBen Hutchings memset(mc_hash, 0x00, sizeof(*mc_hash)); 2974964e6135SBen Hutchings netdev_for_each_mc_addr(ha, net_dev) { 2975964e6135SBen Hutchings crc = ether_crc_le(ETH_ALEN, ha->addr); 2976964e6135SBen Hutchings bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); 2977964e6135SBen Hutchings __set_bit_le(bit, mc_hash); 2978964e6135SBen Hutchings } 2979964e6135SBen Hutchings 2980964e6135SBen Hutchings /* Broadcast packets go through the multicast hash filter. 2981964e6135SBen Hutchings * ether_crc_le() of the broadcast address is 0xbe2612ff 2982964e6135SBen Hutchings * so we always add bit 0xff to the mask. 2983964e6135SBen Hutchings */ 2984964e6135SBen Hutchings __set_bit_le(0xff, mc_hash); 2985964e6135SBen Hutchings } 2986964e6135SBen Hutchings 2987964e6135SBen Hutchings netif_addr_unlock_bh(net_dev); 2988964e6135SBen Hutchings } 2989