1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. 4 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 5 */ 6 7 #ifndef _ENIC_H_ 8 #define _ENIC_H_ 9 10 #include "vnic_enet.h" 11 #include "vnic_dev.h" 12 #include "vnic_wq.h" 13 #include "vnic_rq.h" 14 #include "vnic_cq.h" 15 #include "vnic_intr.h" 16 #include "vnic_stats.h" 17 #include "vnic_nic.h" 18 #include "vnic_rss.h" 19 #include <linux/irq.h> 20 #include <net/page_pool/helpers.h> 21 22 #define DRV_NAME "enic" 23 #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 24 25 #define ENIC_BARS_MAX 6 26 27 #define ENIC_WQ_MAX 256 28 #define ENIC_RQ_MAX 256 29 30 #define ENIC_WQ_NAPI_BUDGET 256 31 32 #define ENIC_AIC_LARGE_PKT_DIFF 3 33 34 enum ext_cq { 35 ENIC_RQ_CQ_ENTRY_SIZE_16, 36 ENIC_RQ_CQ_ENTRY_SIZE_32, 37 ENIC_RQ_CQ_ENTRY_SIZE_64, 38 ENIC_RQ_CQ_ENTRY_SIZE_MAX, 39 }; 40 41 struct enic_msix_entry { 42 int requested; 43 char devname[IFNAMSIZ + 8]; 44 irqreturn_t (*isr)(int, void *); 45 void *devid; 46 cpumask_var_t affinity_mask; 47 }; 48 49 /* Store only the lower range. Higher range is given by fw. */ 50 struct enic_intr_mod_range { 51 u32 small_pkt_range_start; 52 u32 large_pkt_range_start; 53 }; 54 55 struct enic_intr_mod_table { 56 u32 rx_rate; 57 u32 range_percent; 58 }; 59 60 #define ENIC_MAX_LINK_SPEEDS 3 61 #define ENIC_LINK_SPEED_10G 10000 62 #define ENIC_LINK_SPEED_4G 4000 63 #define ENIC_LINK_40G_INDEX 2 64 #define ENIC_LINK_10G_INDEX 1 65 #define ENIC_LINK_4G_INDEX 0 66 #define ENIC_RX_COALESCE_RANGE_END 125 67 #define ENIC_AIC_TS_BREAK 100 68 69 struct enic_rx_coal { 70 u32 small_pkt_range_start; 71 u32 large_pkt_range_start; 72 u32 range_end; 73 u32 use_adaptive_rx_coalesce; 74 }; 75 76 /* priv_flags */ 77 #define ENIC_SRIOV_ENABLED (1 << 0) 78 79 /* enic port profile set flags */ 80 #define ENIC_PORT_REQUEST_APPLIED (1 << 0) 81 #define ENIC_SET_REQUEST (1 << 1) 82 #define ENIC_SET_NAME (1 << 2) 83 #define ENIC_SET_INSTANCE (1 << 3) 84 #define ENIC_SET_HOST (1 << 4) 85 86 #define MAX_TSO BIT(16) 87 #define WQ_ENET_MAX_DESC_LEN BIT(WQ_ENET_LEN_BITS) 88 #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1) 89 90 struct enic_port_profile { 91 u32 set; 92 u8 request; 93 char name[PORT_PROFILE_MAX]; 94 u8 instance_uuid[PORT_UUID_MAX]; 95 u8 host_uuid[PORT_UUID_MAX]; 96 u8 vf_mac[ETH_ALEN]; 97 u8 mac_addr[ETH_ALEN]; 98 }; 99 100 /* enic_rfs_fltr_node - rfs filter node in hash table 101 * @@keys: IPv4 5 tuple 102 * @flow_id: flow_id of clsf filter provided by kernel 103 * @fltr_id: filter id of clsf filter returned by adaptor 104 * @rq_id: desired rq index 105 * @node: hlist_node 106 */ 107 struct enic_rfs_fltr_node { 108 struct flow_keys keys; 109 u32 flow_id; 110 u16 fltr_id; 111 u16 rq_id; 112 struct hlist_node node; 113 }; 114 115 /* enic_rfs_flw_tbl - rfs flow table 116 * @max: Maximum number of filters vNIC supports 117 * @free: Number of free filters available 118 * @toclean: hash table index to clean next 119 * @ht_head: hash table list head 120 * @lock: spin lock 121 * @rfs_may_expire: timer function for enic_rps_may_expire_flow 122 */ 123 struct enic_rfs_flw_tbl { 124 u16 max; 125 int free; 126 127 #define ENIC_RFS_FLW_BITSHIFT (10) 128 #define ENIC_RFS_FLW_MASK ((1 << ENIC_RFS_FLW_BITSHIFT) - 1) 129 u16 toclean:ENIC_RFS_FLW_BITSHIFT; 130 struct hlist_head ht_head[1 << ENIC_RFS_FLW_BITSHIFT]; 131 spinlock_t lock; 132 struct timer_list rfs_may_expire; 133 }; 134 135 struct vxlan_offload { 136 u16 vxlan_udp_port_number; 137 u8 patch_level; 138 u8 flags; 139 }; 140 141 struct enic_wq_stats { 142 u64 packets; /* pkts queued for Tx */ 143 u64 stopped; /* Tx ring almost full, queue stopped */ 144 u64 wake; /* Tx ring no longer full, queue woken up*/ 145 u64 tso; /* non-encap tso pkt */ 146 u64 encap_tso; /* encap tso pkt */ 147 u64 encap_csum; /* encap HW csum */ 148 u64 csum_partial; /* skb->ip_summed = CHECKSUM_PARTIAL */ 149 u64 csum_none; /* HW csum not required */ 150 u64 bytes; /* bytes queued for Tx */ 151 u64 add_vlan; /* HW adds vlan tag */ 152 u64 cq_work; /* Tx completions processed */ 153 u64 cq_bytes; /* Tx bytes processed */ 154 u64 null_pkt; /* skb length <= 0 */ 155 u64 skb_linear_fail; /* linearize failures */ 156 u64 desc_full_awake; /* TX ring full while queue awake */ 157 }; 158 159 struct enic_rq_stats { 160 u64 packets; /* pkts received */ 161 u64 bytes; /* bytes received */ 162 u64 l4_rss_hash; /* hashed on l4 */ 163 u64 l3_rss_hash; /* hashed on l3 */ 164 u64 csum_unnecessary; /* HW verified csum */ 165 u64 csum_unnecessary_encap; /* HW verified csum on encap packet */ 166 u64 vlan_stripped; /* HW stripped vlan */ 167 u64 napi_complete; /* napi complete intr reenabled */ 168 u64 napi_repoll; /* napi poll again */ 169 u64 bad_fcs; /* bad pkts */ 170 u64 pkt_truncated; /* truncated pkts */ 171 u64 no_skb; /* out of skbs */ 172 u64 desc_skip; /* Rx pkt went into later buffer */ 173 u64 pp_alloc_fail; /* page pool alloc failure */ 174 }; 175 176 struct enic_wq { 177 spinlock_t lock; /* spinlock for wq */ 178 struct vnic_wq vwq; 179 struct enic_wq_stats stats; 180 } ____cacheline_aligned; 181 182 struct enic_rq { 183 struct vnic_rq vrq; 184 struct enic_rq_stats stats; 185 struct page_pool *pool; 186 } ____cacheline_aligned; 187 188 /* Per-instance private data structure */ 189 struct enic { 190 struct net_device *netdev; 191 struct pci_dev *pdev; 192 struct vnic_enet_config config; 193 struct vnic_dev_bar bar[ENIC_BARS_MAX]; 194 struct vnic_dev *vdev; 195 struct timer_list notify_timer; 196 struct work_struct reset; 197 struct work_struct tx_hang_reset; 198 struct work_struct change_mtu_work; 199 struct msix_entry *msix_entry; 200 struct enic_msix_entry *msix; 201 u32 msg_enable; 202 spinlock_t devcmd_lock; 203 u8 mac_addr[ETH_ALEN]; 204 unsigned int flags; 205 unsigned int priv_flags; 206 unsigned int mc_count; 207 unsigned int uc_count; 208 u32 port_mtu; 209 struct enic_rx_coal rx_coalesce_setting; 210 u32 rx_coalesce_usecs; 211 u32 tx_coalesce_usecs; 212 #ifdef CONFIG_PCI_IOV 213 u16 num_vfs; 214 #endif 215 spinlock_t enic_api_lock; 216 bool enic_api_busy; 217 struct enic_port_profile *pp; 218 219 struct enic_wq *wq; 220 unsigned int wq_avail; 221 unsigned int wq_count; 222 u16 loop_enable; 223 u16 loop_tag; 224 225 struct enic_rq *rq; 226 unsigned int rq_avail; 227 unsigned int rq_count; 228 struct vxlan_offload vxlan; 229 struct napi_struct *napi; 230 231 struct vnic_intr *intr; 232 unsigned int intr_avail; 233 unsigned int intr_count; 234 u32 __iomem *legacy_pba; /* memory-mapped */ 235 236 struct vnic_cq *cq; 237 unsigned int cq_avail; 238 unsigned int cq_count; 239 struct enic_rfs_flw_tbl rfs_h; 240 u8 rss_key[ENIC_RSS_LEN]; 241 struct vnic_gen_stats gen_stats; 242 enum ext_cq ext_cq; 243 }; 244 245 static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev) 246 { 247 struct enic *enic = vdev->priv; 248 249 return enic->netdev; 250 } 251 252 /* wrappers function for kernel log 253 */ 254 #define vdev_err(vdev, fmt, ...) \ 255 dev_err(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__) 256 #define vdev_warn(vdev, fmt, ...) \ 257 dev_warn(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__) 258 #define vdev_info(vdev, fmt, ...) \ 259 dev_info(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__) 260 261 #define vdev_neterr(vdev, fmt, ...) \ 262 netdev_err(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__) 263 #define vdev_netwarn(vdev, fmt, ...) \ 264 netdev_warn(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__) 265 #define vdev_netinfo(vdev, fmt, ...) \ 266 netdev_info(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__) 267 268 static inline struct device *enic_get_dev(struct enic *enic) 269 { 270 return &(enic->pdev->dev); 271 } 272 273 static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq) 274 { 275 return rq; 276 } 277 278 static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) 279 { 280 return enic->rq_count + wq; 281 } 282 283 static inline unsigned int enic_msix_rq_intr(struct enic *enic, 284 unsigned int rq) 285 { 286 return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset; 287 } 288 289 static inline unsigned int enic_msix_wq_intr(struct enic *enic, 290 unsigned int wq) 291 { 292 return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; 293 } 294 295 /* MSIX interrupts are organized as the error interrupt, then the notify 296 * interrupt followed by all the I/O interrupts. The error interrupt needs 297 * to fit in 7 bits due to hardware constraints 298 */ 299 #define ENIC_MSIX_RESERVED_INTR 2 300 #define ENIC_MSIX_ERR_INTR 0 301 #define ENIC_MSIX_NOTIFY_INTR 1 302 #define ENIC_MSIX_IO_INTR_BASE ENIC_MSIX_RESERVED_INTR 303 #define ENIC_MSIX_MIN_INTR (ENIC_MSIX_RESERVED_INTR + 2) 304 305 #define ENIC_LEGACY_IO_INTR 0 306 #define ENIC_LEGACY_ERR_INTR 1 307 #define ENIC_LEGACY_NOTIFY_INTR 2 308 309 static inline unsigned int enic_msix_err_intr(struct enic *enic) 310 { 311 return ENIC_MSIX_ERR_INTR; 312 } 313 314 static inline unsigned int enic_msix_notify_intr(struct enic *enic) 315 { 316 return ENIC_MSIX_NOTIFY_INTR; 317 } 318 319 static inline bool enic_is_err_intr(struct enic *enic, int intr) 320 { 321 switch (vnic_dev_get_intr_mode(enic->vdev)) { 322 case VNIC_DEV_INTR_MODE_INTX: 323 return intr == ENIC_LEGACY_ERR_INTR; 324 case VNIC_DEV_INTR_MODE_MSIX: 325 return intr == enic_msix_err_intr(enic); 326 case VNIC_DEV_INTR_MODE_MSI: 327 default: 328 return false; 329 } 330 } 331 332 static inline bool enic_is_notify_intr(struct enic *enic, int intr) 333 { 334 switch (vnic_dev_get_intr_mode(enic->vdev)) { 335 case VNIC_DEV_INTR_MODE_INTX: 336 return intr == ENIC_LEGACY_NOTIFY_INTR; 337 case VNIC_DEV_INTR_MODE_MSIX: 338 return intr == enic_msix_notify_intr(enic); 339 case VNIC_DEV_INTR_MODE_MSI: 340 default: 341 return false; 342 } 343 } 344 345 static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr) 346 { 347 if (unlikely(dma_mapping_error(&enic->pdev->dev, dma_addr))) { 348 net_warn_ratelimited("%s: PCI dma mapping failed!\n", 349 enic->netdev->name); 350 enic->gen_stats.dma_map_error++; 351 352 return -ENOMEM; 353 } 354 355 return 0; 356 } 357 358 void enic_reset_addr_lists(struct enic *enic); 359 int enic_sriov_enabled(struct enic *enic); 360 int enic_is_valid_vf(struct enic *enic, int vf); 361 int enic_is_dynamic(struct enic *enic); 362 void enic_set_ethtool_ops(struct net_device *netdev); 363 int __enic_set_rsskey(struct enic *enic); 364 void enic_ext_cq(struct enic *enic); 365 366 #endif /* _ENIC_H_ */ 367