1 /*****************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 *****************************************************************************/
33
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36 #include "opt_rss.h"
37
38 #include "ixgbe.h"
39 #include "ixgbe_sriov.h"
40 #include "ifdi_if.h"
41
42 #include <net/netmap.h>
43 #include <dev/netmap/netmap_kern.h>
44
45 /************************************************************************
46 * Driver version
47 ************************************************************************/
48 static const char ixgbe_driver_version[] = "5.0.1-k";
49
50 /************************************************************************
51 * PCI Device ID Table
52 *
53 * Used by probe to select devices to load on
54 * Last field stores an index into ixgbe_strings
55 * Last entry must be all 0s
56 *
57 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58 ************************************************************************/
59 static const pci_vendor_info_t ixgbe_vendor_info_array[] =
60 {
61 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,
62 "Intel(R) 82598EB AF (Dual Fiber)"),
63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,
64 "Intel(R) 82598EB AF (Fiber)"),
65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,
66 "Intel(R) 82598EB AT (CX4)"),
67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,
68 "Intel(R) 82598EB AT"),
69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,
70 "Intel(R) 82598EB AT2"),
71 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"),
72 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,
73 "Intel(R) 82598EB AF DA (Dual Fiber)"),
74 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,
75 "Intel(R) 82598EB AT (Dual CX4)"),
76 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,
77 "Intel(R) 82598EB AF (Dual Fiber LR)"),
78 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,
79 "Intel(R) 82598EB AF (Dual Fiber SR)"),
80 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,
81 "Intel(R) 82598EB LOM"),
82 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,
83 "Intel(R) X520 82599 (KX4)"),
84 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,
85 "Intel(R) X520 82599 (KX4 Mezzanine)"),
86 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,
87 "Intel(R) X520 82599ES (SFI/SFP+)"),
88 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,
89 "Intel(R) X520 82599 (XAUI/BX4)"),
90 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,
91 "Intel(R) X520 82599 (Dual CX4)"),
92 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,
93 "Intel(R) X520-T 82599 LOM"),
94 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS,
95 "Intel(R) X520 82599 LS"),
96 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,
97 "Intel(R) X520 82599 (Combined Backplane)"),
98 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,
99 "Intel(R) X520 82599 (Backplane w/FCoE)"),
100 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,
101 "Intel(R) X520 82599 (Dual SFP+)"),
102 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,
103 "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"),
104 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,
105 "Intel(R) X520-1 82599EN (SFP+)"),
106 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,
107 "Intel(R) X520-4 82599 (Quad SFP+)"),
108 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,
109 "Intel(R) X520-Q1 82599 (QSFP+)"),
110 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,
111 "Intel(R) X540-AT2"),
112 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"),
113 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"),
114 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
115 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,
116 "Intel(R) X552 (KR Backplane)"),
117 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,
118 "Intel(R) X552 (KX4 Backplane)"),
119 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,
120 "Intel(R) X552/X557-AT (10GBASE-T)"),
121 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,
122 "Intel(R) X552 (1000BASE-T)"),
123 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP,
124 "Intel(R) X552 (SFP+)"),
125 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR,
126 "Intel(R) X553 (KR Backplane)"),
127 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L,
128 "Intel(R) X553 L (KR Backplane)"),
129 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP,
130 "Intel(R) X553 (SFP+)"),
131 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N,
132 "Intel(R) X553 N (SFP+)"),
133 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII,
134 "Intel(R) X553 (1GbE SGMII)"),
135 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L,
136 "Intel(R) X553 L (1GbE SGMII)"),
137 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T,
138 "Intel(R) X553/X557-AT (10GBASE-T)"),
139 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T,
140 "Intel(R) X553 (1GbE)"),
141 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L,
142 "Intel(R) X553 L (1GbE)"),
143 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS,
144 "Intel(R) X540-T2 (Bypass)"),
145 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS,
146 "Intel(R) X520 82599 (Bypass)"),
147 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_BACKPLANE,
148 "Intel(R) E610 (Backplane)"),
149 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_SFP,
150 "Intel(R) E610 (SFP)"),
151 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_2_5G_T,
152 "Intel(R) E610 (2.5 GbE)"),
153 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_10G_T,
154 "Intel(R) E610 (10 GbE)"),
155 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_SGMII,
156 "Intel(R) E610 (SGMII)"),
157 /* required last entry */
158 PVID_END
159 };
160
161 static void *ixgbe_register(device_t);
162 static int ixgbe_if_attach_pre(if_ctx_t);
163 static int ixgbe_if_attach_post(if_ctx_t);
164 static int ixgbe_if_detach(if_ctx_t);
165 static int ixgbe_if_shutdown(if_ctx_t);
166 static int ixgbe_if_suspend(if_ctx_t);
167 static int ixgbe_if_resume(if_ctx_t);
168
169 static void ixgbe_if_stop(if_ctx_t);
170 void ixgbe_if_enable_intr(if_ctx_t);
171 static void ixgbe_if_disable_intr(if_ctx_t);
172 static void ixgbe_link_intr_enable(if_ctx_t);
173 static int ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
174 static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *);
175 static int ixgbe_if_media_change(if_ctx_t);
176 static int ixgbe_if_msix_intr_assign(if_ctx_t, int);
177 static int ixgbe_if_mtu_set(if_ctx_t, uint32_t);
178 static void ixgbe_if_crcstrip_set(if_ctx_t, int, int);
179 static void ixgbe_if_multi_set(if_ctx_t);
180 static int ixgbe_if_promisc_set(if_ctx_t, int);
181 static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
182 int);
183 static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
184 int);
185 static void ixgbe_if_queues_free(if_ctx_t);
186 static void ixgbe_if_timer(if_ctx_t, uint16_t);
187 static const char *ixgbe_link_speed_to_str(u32 link_speed);
188 static void ixgbe_if_update_admin_status(if_ctx_t);
189 static void ixgbe_if_vlan_register(if_ctx_t, u16);
190 static void ixgbe_if_vlan_unregister(if_ctx_t, u16);
191 static int ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *);
192 static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event);
193 int ixgbe_intr(void *);
194
195 static int ixgbe_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
196
197 /************************************************************************
198 * Function prototypes
199 ************************************************************************/
200 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter);
201
202 static void ixgbe_enable_queue(struct ixgbe_softc *, u32);
203 static void ixgbe_disable_queue(struct ixgbe_softc *, u32);
204 static void ixgbe_add_device_sysctls(if_ctx_t);
205 static int ixgbe_allocate_pci_resources(if_ctx_t);
206 static int ixgbe_setup_low_power_mode(if_ctx_t);
207
208 static void ixgbe_config_dmac(struct ixgbe_softc *);
209 static void ixgbe_configure_ivars(struct ixgbe_softc *);
210 static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8);
211 static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
212 static bool ixgbe_sfp_probe(if_ctx_t);
213
214 static void ixgbe_free_pci_resources(if_ctx_t);
215
216 static int ixgbe_msix_link(void *);
217 static int ixgbe_msix_que(void *);
218 static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *);
219 static void ixgbe_initialize_receive_units(if_ctx_t);
220 static void ixgbe_initialize_transmit_units(if_ctx_t);
221
222 static int ixgbe_setup_interface(if_ctx_t);
223 static void ixgbe_init_device_features(struct ixgbe_softc *);
224 static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool);
225 static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *);
226 static void ixgbe_print_fw_version(if_ctx_t);
227 static void ixgbe_add_media_types(if_ctx_t);
228 static void ixgbe_update_stats_counters(struct ixgbe_softc *);
229 static void ixgbe_config_link(if_ctx_t);
230 static void ixgbe_get_slot_info(struct ixgbe_softc *);
231 static void ixgbe_fw_mode_timer(void *);
232 static void ixgbe_check_wol_support(struct ixgbe_softc *);
233 static void ixgbe_enable_rx_drop(struct ixgbe_softc *);
234 static void ixgbe_disable_rx_drop(struct ixgbe_softc *);
235
236 static void ixgbe_add_hw_stats(struct ixgbe_softc *);
237 static int ixgbe_set_flowcntl(struct ixgbe_softc *, int);
238 static int ixgbe_set_advertise(struct ixgbe_softc *, int);
239 static int ixgbe_get_default_advertise(struct ixgbe_softc *);
240 static void ixgbe_setup_vlan_hw_support(if_ctx_t);
241 static void ixgbe_config_gpie(struct ixgbe_softc *);
242 static void ixgbe_config_delay_values(struct ixgbe_softc *);
243
244 static void ixgbe_add_debug_sysctls(struct ixgbe_softc *sc);
245 static void ixgbe_add_debug_dump_sysctls(struct ixgbe_softc *sc);
246 static int ixgbe_debug_dump_ioctl(struct ixgbe_softc *sc, struct ifdrv *ifd);
247 static u8 ixgbe_debug_dump_print_cluster(struct ixgbe_softc *sc,
248 struct sbuf *sbuf, u8 cluster_id);
249 static int ixgbe_nvm_access_ioctl(struct ixgbe_softc *sc, struct ifdrv *ifd);
250
251 /* Sysctl handlers */
252 static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
253 static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
254 static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
255 static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
256 static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
257 static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
258 static int ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS);
259 #ifdef IXGBE_DEBUG
260 static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
261 static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
262 #endif
263 static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
264 static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
265 static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
266 static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
267 static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
268 static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
269 static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
270 static int ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS);
271
272 static int ixgbe_sysctl_debug_dump_set_clusters(SYSCTL_HANDLER_ARGS);
273 static int ixgbe_sysctl_dump_debug_dump(SYSCTL_HANDLER_ARGS);
274
275 /* Deferred interrupt tasklets */
276 static void ixgbe_handle_msf(void *);
277 static void ixgbe_handle_mod(void *);
278 static void ixgbe_handle_phy(void *);
279 static void ixgbe_handle_fw_event(void *);
280
281 static int ixgbe_enable_lse(struct ixgbe_softc *sc);
282 static int ixgbe_disable_lse(struct ixgbe_softc *sc);
283
284 /************************************************************************
285 * FreeBSD Device Interface Entry Points
286 ************************************************************************/
287 static device_method_t ix_methods[] = {
288 /* Device interface */
289 DEVMETHOD(device_register, ixgbe_register),
290 DEVMETHOD(device_probe, iflib_device_probe),
291 DEVMETHOD(device_attach, iflib_device_attach),
292 DEVMETHOD(device_detach, iflib_device_detach),
293 DEVMETHOD(device_shutdown, iflib_device_shutdown),
294 DEVMETHOD(device_suspend, iflib_device_suspend),
295 DEVMETHOD(device_resume, iflib_device_resume),
296 #ifdef PCI_IOV
297 DEVMETHOD(pci_iov_init, iflib_device_iov_init),
298 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
299 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
300 #endif /* PCI_IOV */
301 DEVMETHOD_END
302 };
303
304 static driver_t ix_driver = {
305 "ix", ix_methods, sizeof(struct ixgbe_softc),
306 };
307
308 DRIVER_MODULE(ix, pci, ix_driver, 0, 0);
309 IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array);
310 MODULE_DEPEND(ix, pci, 1, 1, 1);
311 MODULE_DEPEND(ix, ether, 1, 1, 1);
312 MODULE_DEPEND(ix, iflib, 1, 1, 1);
313
314 static device_method_t ixgbe_if_methods[] = {
315 DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre),
316 DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post),
317 DEVMETHOD(ifdi_detach, ixgbe_if_detach),
318 DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown),
319 DEVMETHOD(ifdi_suspend, ixgbe_if_suspend),
320 DEVMETHOD(ifdi_resume, ixgbe_if_resume),
321 DEVMETHOD(ifdi_init, ixgbe_if_init),
322 DEVMETHOD(ifdi_stop, ixgbe_if_stop),
323 DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign),
324 DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr),
325 DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr),
326 DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable),
327 DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
328 DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable),
329 DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc),
330 DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc),
331 DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free),
332 DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status),
333 DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set),
334 DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set),
335 DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set),
336 DEVMETHOD(ifdi_media_status, ixgbe_if_media_status),
337 DEVMETHOD(ifdi_media_change, ixgbe_if_media_change),
338 DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set),
339 DEVMETHOD(ifdi_timer, ixgbe_if_timer),
340 DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register),
341 DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister),
342 DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
343 DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
344 DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
345 DEVMETHOD(ifdi_priv_ioctl, ixgbe_if_priv_ioctl),
346 #ifdef PCI_IOV
347 DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
348 DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
349 DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add),
350 #endif /* PCI_IOV */
351 DEVMETHOD_END
352 };
353
354 /*
355 * TUNEABLE PARAMETERS:
356 */
357
358 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
359 "IXGBE driver parameters");
360 static driver_t ixgbe_if_driver = {
361 "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc)
362 };
363
364 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
365 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
366 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
367
368 /* Flow control setting, default to full */
369 static int ixgbe_flow_control = ixgbe_fc_full;
370 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
371 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
372
373 /* Advertise Speed, default to 0 (auto) */
374 static int ixgbe_advertise_speed = 0;
375 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
376 &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
377
378 /*
379 * Smart speed setting, default to on
380 * this only works as a compile option
381 * right now as its during attach, set
382 * this to 'ixgbe_smart_speed_off' to
383 * disable.
384 */
385 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
386
387 /*
388 * MSI-X should be the default for best performance,
389 * but this allows it to be forced off for testing.
390 */
391 static int ixgbe_enable_msix = 1;
392 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix,
393 0,
394 "Enable MSI-X interrupts");
395
396 /*
397 * Defining this on will allow the use
398 * of unsupported SFP+ modules, note that
399 * doing so you are on your own :)
400 */
401 static int allow_unsupported_sfp = false;
402 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
403 &allow_unsupported_sfp, 0,
404 "Allow unsupported SFP modules...use at your own risk");
405
406 /*
407 * Not sure if Flow Director is fully baked,
408 * so we'll default to turning it off.
409 */
410 static int ixgbe_enable_fdir = 0;
411 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir,
412 0,
413 "Enable Flow Director");
414
415 /* Receive-Side Scaling */
416 static int ixgbe_enable_rss = 1;
417 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss,
418 0,
419 "Enable Receive-Side Scaling (RSS)");
420
421 /*
422 * AIM: Adaptive Interrupt Moderation
423 * which means that the interrupt rate
424 * is varied over time based on the
425 * traffic for that interrupt vector
426 */
427 static int ixgbe_enable_aim = false;
428 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim,
429 0,
430 "Enable adaptive interrupt moderation");
431
432 #if 0
433 /* Keep running tab on them for sanity check */
434 static int ixgbe_total_ports;
435 #endif
436
437 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
438
439 /*
440 * For Flow Director: this is the number of TX packets we sample
441 * for the filter pool, this means every 20th packet will be probed.
442 *
443 * This feature can be disabled by setting this to 0.
444 */
445 static int atr_sample_rate = 20;
446
447 extern struct if_txrx ixgbe_txrx;
448
449 static struct if_shared_ctx ixgbe_sctx_init = {
450 .isc_magic = IFLIB_MAGIC,
451 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
452 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
453 .isc_tx_maxsegsize = PAGE_SIZE,
454 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
455 .isc_tso_maxsegsize = PAGE_SIZE,
456 .isc_rx_maxsize = PAGE_SIZE*4,
457 .isc_rx_nsegments = 1,
458 .isc_rx_maxsegsize = PAGE_SIZE*4,
459 .isc_nfl = 1,
460 .isc_ntxqs = 1,
461 .isc_nrxqs = 1,
462
463 .isc_admin_intrcnt = 1,
464 .isc_vendor_info = ixgbe_vendor_info_array,
465 .isc_driver_version = ixgbe_driver_version,
466 .isc_driver = &ixgbe_if_driver,
467 .isc_flags = IFLIB_TSO_INIT_IP,
468
469 .isc_nrxd_min = {MIN_RXD},
470 .isc_ntxd_min = {MIN_TXD},
471 .isc_nrxd_max = {MAX_RXD},
472 .isc_ntxd_max = {MAX_TXD},
473 .isc_nrxd_default = {DEFAULT_RXD},
474 .isc_ntxd_default = {DEFAULT_TXD},
475 };
476
477 /************************************************************************
478 * ixgbe_if_tx_queues_alloc
479 ************************************************************************/
480 static int
ixgbe_if_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)481 ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
482 int ntxqs, int ntxqsets)
483 {
484 struct ixgbe_softc *sc = iflib_get_softc(ctx);
485 if_softc_ctx_t scctx = sc->shared;
486 struct ix_tx_queue *que;
487 int i, j, error;
488
489 MPASS(sc->num_tx_queues > 0);
490 MPASS(sc->num_tx_queues == ntxqsets);
491 MPASS(ntxqs == 1);
492
493 /* Allocate queue structure memory */
494 sc->tx_queues =
495 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) *
496 ntxqsets, M_IXGBE, M_NOWAIT | M_ZERO);
497 if (!sc->tx_queues) {
498 device_printf(iflib_get_dev(ctx),
499 "Unable to allocate TX ring memory\n");
500 return (ENOMEM);
501 }
502
503 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
504 struct tx_ring *txr = &que->txr;
505
506 /* In case SR-IOV is enabled, align the index properly */
507 txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
508
509 txr->sc = que->sc = sc;
510
511 /* Allocate report status array */
512 txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) *
513 scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
514 if (txr->tx_rsq == NULL) {
515 error = ENOMEM;
516 goto fail;
517 }
518 for (j = 0; j < scctx->isc_ntxd[0]; j++)
519 txr->tx_rsq[j] = QIDX_INVALID;
520 /* get virtual and physical address of the hardware queues */
521 txr->tail = IXGBE_TDT(txr->me);
522 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
523 txr->tx_paddr = paddrs[i];
524
525 txr->bytes = 0;
526 txr->total_packets = 0;
527
528 /* Set the rate at which we sample packets */
529 if (sc->feat_en & IXGBE_FEATURE_FDIR)
530 txr->atr_sample = atr_sample_rate;
531
532 }
533
534 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
535 sc->num_tx_queues);
536
537 return (0);
538
539 fail:
540 ixgbe_if_queues_free(ctx);
541
542 return (error);
543 } /* ixgbe_if_tx_queues_alloc */
544
545 /************************************************************************
546 * ixgbe_if_rx_queues_alloc
547 ************************************************************************/
548 static int
ixgbe_if_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)549 ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
550 int nrxqs, int nrxqsets)
551 {
552 struct ixgbe_softc *sc = iflib_get_softc(ctx);
553 struct ix_rx_queue *que;
554 int i;
555
556 MPASS(sc->num_rx_queues > 0);
557 MPASS(sc->num_rx_queues == nrxqsets);
558 MPASS(nrxqs == 1);
559
560 /* Allocate queue structure memory */
561 sc->rx_queues =
562 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
563 M_IXGBE, M_NOWAIT | M_ZERO);
564 if (!sc->rx_queues) {
565 device_printf(iflib_get_dev(ctx),
566 "Unable to allocate TX ring memory\n");
567 return (ENOMEM);
568 }
569
570 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
571 struct rx_ring *rxr = &que->rxr;
572
573 /* In case SR-IOV is enabled, align the index properly */
574 rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
575
576 rxr->sc = que->sc = sc;
577
578 /* get the virtual and physical address of the hw queues */
579 rxr->tail = IXGBE_RDT(rxr->me);
580 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
581 rxr->rx_paddr = paddrs[i];
582 rxr->bytes = 0;
583 rxr->que = que;
584 }
585
586 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
587 sc->num_rx_queues);
588
589 return (0);
590 } /* ixgbe_if_rx_queues_alloc */
591
592 /************************************************************************
593 * ixgbe_if_queues_free
594 ************************************************************************/
595 static void
ixgbe_if_queues_free(if_ctx_t ctx)596 ixgbe_if_queues_free(if_ctx_t ctx)
597 {
598 struct ixgbe_softc *sc = iflib_get_softc(ctx);
599 struct ix_tx_queue *tx_que = sc->tx_queues;
600 struct ix_rx_queue *rx_que = sc->rx_queues;
601 int i;
602
603 if (tx_que != NULL) {
604 for (i = 0; i < sc->num_tx_queues; i++, tx_que++) {
605 struct tx_ring *txr = &tx_que->txr;
606 if (txr->tx_rsq == NULL)
607 break;
608
609 free(txr->tx_rsq, M_IXGBE);
610 txr->tx_rsq = NULL;
611 }
612
613 free(sc->tx_queues, M_IXGBE);
614 sc->tx_queues = NULL;
615 }
616 if (rx_que != NULL) {
617 free(sc->rx_queues, M_IXGBE);
618 sc->rx_queues = NULL;
619 }
620 } /* ixgbe_if_queues_free */
621
622 /************************************************************************
623 * ixgbe_initialize_rss_mapping
624 ************************************************************************/
625 static void
ixgbe_initialize_rss_mapping(struct ixgbe_softc * sc)626 ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
627 {
628 struct ixgbe_hw *hw = &sc->hw;
629 u32 reta = 0, mrqc, rss_key[10];
630 int queue_id, table_size, index_mult;
631 int i, j;
632 u32 rss_hash_config;
633
634 if (sc->feat_en & IXGBE_FEATURE_RSS) {
635 /* Fetch the configured RSS key */
636 rss_getkey((uint8_t *)&rss_key);
637 } else {
638 /* set up random bits */
639 arc4rand(&rss_key, sizeof(rss_key), 0);
640 }
641
642 /* Set multiplier for RETA setup and table size based on MAC */
643 index_mult = 0x1;
644 table_size = 128;
645 switch (sc->hw.mac.type) {
646 case ixgbe_mac_82598EB:
647 index_mult = 0x11;
648 break;
649 case ixgbe_mac_X550:
650 case ixgbe_mac_X550EM_x:
651 case ixgbe_mac_X550EM_a:
652 case ixgbe_mac_E610:
653 table_size = 512;
654 break;
655 default:
656 break;
657 }
658
659 /* Set up the redirection table */
660 for (i = 0, j = 0; i < table_size; i++, j++) {
661 if (j == sc->num_rx_queues)
662 j = 0;
663
664 if (sc->feat_en & IXGBE_FEATURE_RSS) {
665 /*
666 * Fetch the RSS bucket id for the given indirection
667 * entry. Cap it at the number of configured buckets
668 * (which is num_rx_queues.)
669 */
670 queue_id = rss_get_indirection_to_bucket(i);
671 queue_id = queue_id % sc->num_rx_queues;
672 } else
673 queue_id = (j * index_mult);
674
675 /*
676 * The low 8 bits are for hash value (n+0);
677 * The next 8 bits are for hash value (n+1), etc.
678 */
679 reta = reta >> 8;
680 reta = reta | (((uint32_t)queue_id) << 24);
681 if ((i & 3) == 3) {
682 if (i < 128)
683 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
684 else
685 IXGBE_WRITE_REG(hw,
686 IXGBE_ERETA((i >> 2) - 32), reta);
687 reta = 0;
688 }
689 }
690
691 /* Now fill our hash function seeds */
692 for (i = 0; i < 10; i++)
693 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
694
695 /* Perform hash on these packet types */
696 if (sc->feat_en & IXGBE_FEATURE_RSS)
697 rss_hash_config = rss_gethashconfig();
698 else {
699 /*
700 * Disable UDP - IP fragments aren't currently being handled
701 * and so we end up with a mix of 2-tuple and 4-tuple
702 * traffic.
703 */
704 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 |
705 RSS_HASHTYPE_RSS_TCP_IPV4 |
706 RSS_HASHTYPE_RSS_IPV6 |
707 RSS_HASHTYPE_RSS_TCP_IPV6 |
708 RSS_HASHTYPE_RSS_IPV6_EX |
709 RSS_HASHTYPE_RSS_TCP_IPV6_EX;
710 }
711
712 mrqc = IXGBE_MRQC_RSSEN;
713 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
714 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
715 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
716 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
717 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
718 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
719 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
720 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
721 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
722 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
723 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
724 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
725 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
726 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
727 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
728 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
729 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
730 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
731 mrqc |= ixgbe_get_mrqc(sc->iov_mode);
732 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
733 } /* ixgbe_initialize_rss_mapping */
734
735 /************************************************************************
736 * ixgbe_initialize_receive_units - Setup receive registers and features.
737 ************************************************************************/
738 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
739
740 static void
ixgbe_initialize_receive_units(if_ctx_t ctx)741 ixgbe_initialize_receive_units(if_ctx_t ctx)
742 {
743 struct ixgbe_softc *sc = iflib_get_softc(ctx);
744 if_softc_ctx_t scctx = sc->shared;
745 struct ixgbe_hw *hw = &sc->hw;
746 if_t ifp = iflib_get_ifp(ctx);
747 struct ix_rx_queue *que;
748 int i, j;
749 u32 bufsz, fctrl, srrctl, rxcsum;
750 u32 hlreg;
751
752 /*
753 * Make sure receives are disabled while
754 * setting up the descriptor ring
755 */
756 ixgbe_disable_rx(hw);
757
758 /* Enable broadcasts */
759 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
760 fctrl |= IXGBE_FCTRL_BAM;
761 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
762 fctrl |= IXGBE_FCTRL_DPF;
763 fctrl |= IXGBE_FCTRL_PMCF;
764 }
765 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
766
767 /* Set for Jumbo Frames? */
768 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
769 if (if_getmtu(ifp) > ETHERMTU)
770 hlreg |= IXGBE_HLREG0_JUMBOEN;
771 else
772 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
773 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
774
775 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
776 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
777
778 /* Setup the Base and Length of the Rx Descriptor Ring */
779 for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) {
780 struct rx_ring *rxr = &que->rxr;
781 u64 rdba = rxr->rx_paddr;
782
783 j = rxr->me;
784
785 /* Setup the Base and Length of the Rx Descriptor Ring */
786 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
787 (rdba & 0x00000000ffffffffULL));
788 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
789 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
790 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
791
792 /* Set up the SRRCTL register */
793 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
794 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
795 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
796 srrctl |= bufsz;
797 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
798
799 /*
800 * Set DROP_EN iff we have no flow control and >1 queue.
801 * Note that srrctl was cleared shortly before during reset,
802 * so we do not need to clear the bit, but do it just in case
803 * this code is moved elsewhere.
804 */
805 if (sc->num_rx_queues > 1 &&
806 sc->hw.fc.requested_mode == ixgbe_fc_none) {
807 srrctl |= IXGBE_SRRCTL_DROP_EN;
808 } else {
809 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
810 }
811
812 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
813
814 /* Setup the HW Rx Head and Tail Descriptor Pointers */
815 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
816 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
817
818 /* Set the driver rx tail address */
819 rxr->tail = IXGBE_RDT(rxr->me);
820 }
821
822 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
823 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
824 IXGBE_PSRTYPE_UDPHDR |
825 IXGBE_PSRTYPE_IPV4HDR |
826 IXGBE_PSRTYPE_IPV6HDR;
827 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
828 }
829
830 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
831
832 ixgbe_initialize_rss_mapping(sc);
833
834 if (sc->feat_en & IXGBE_FEATURE_RSS) {
835 /* RSS and RX IPP Checksum are mutually exclusive */
836 rxcsum |= IXGBE_RXCSUM_PCSD;
837 }
838
839 if (if_getcapenable(ifp) & IFCAP_RXCSUM)
840 rxcsum |= IXGBE_RXCSUM_PCSD;
841
842 /* This is useful for calculating UDP/IP fragment checksums */
843 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
844 rxcsum |= IXGBE_RXCSUM_IPPCSE;
845
846 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
847
848 } /* ixgbe_initialize_receive_units */
849
850 /************************************************************************
851 * ixgbe_initialize_transmit_units - Enable transmit units.
852 ************************************************************************/
853 static void
ixgbe_initialize_transmit_units(if_ctx_t ctx)854 ixgbe_initialize_transmit_units(if_ctx_t ctx)
855 {
856 struct ixgbe_softc *sc = iflib_get_softc(ctx);
857 struct ixgbe_hw *hw = &sc->hw;
858 if_softc_ctx_t scctx = sc->shared;
859 struct ix_tx_queue *que;
860 int i;
861
862 /* Setup the Base and Length of the Tx Descriptor Ring */
863 for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues;
864 i++, que++) {
865 struct tx_ring *txr = &que->txr;
866 u64 tdba = txr->tx_paddr;
867 u32 txctrl = 0;
868 int j = txr->me;
869
870 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
871 (tdba & 0x00000000ffffffffULL));
872 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
873 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
874 scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc));
875
876 /* Setup the HW Tx Head and Tail descriptor pointers */
877 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
878 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
879
880 /* Cache the tail address */
881 txr->tail = IXGBE_TDT(txr->me);
882
883 txr->tx_rs_cidx = txr->tx_rs_pidx;
884 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
885 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
886 txr->tx_rsq[k] = QIDX_INVALID;
887
888 /* Disable Head Writeback */
889 /*
890 * Note: for X550 series devices, these registers are actually
891 * prefixed with TPH_ isntead of DCA_, but the addresses and
892 * fields remain the same.
893 */
894 switch (hw->mac.type) {
895 case ixgbe_mac_82598EB:
896 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
897 break;
898 default:
899 txctrl =
900 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
901 break;
902 }
903 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
904 switch (hw->mac.type) {
905 case ixgbe_mac_82598EB:
906 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
907 break;
908 default:
909 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j),
910 txctrl);
911 break;
912 }
913
914 }
915
916 if (hw->mac.type != ixgbe_mac_82598EB) {
917 u32 dmatxctl, rttdcs;
918
919 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
920 dmatxctl |= IXGBE_DMATXCTL_TE;
921 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
922 /* Disable arbiter to set MTQC */
923 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
924 rttdcs |= IXGBE_RTTDCS_ARBDIS;
925 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
926 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
927 ixgbe_get_mtqc(sc->iov_mode));
928 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
929 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
930 }
931
932 } /* ixgbe_initialize_transmit_units */
933
934 static int
ixgbe_check_fw_api_version(struct ixgbe_softc * sc)935 ixgbe_check_fw_api_version(struct ixgbe_softc *sc)
936 {
937 struct ixgbe_hw *hw = &sc->hw;
938 if (hw->api_maj_ver > IXGBE_FW_API_VER_MAJOR) {
939 device_printf(sc->dev,
940 "The driver for the device stopped because the NVM "
941 "image is newer than expected. You must install the "
942 "most recent version of the network driver.\n");
943 return (EOPNOTSUPP);
944 } else if (hw->api_maj_ver == IXGBE_FW_API_VER_MAJOR &&
945 hw->api_min_ver > (IXGBE_FW_API_VER_MINOR + 2)) {
946 device_printf(sc->dev,
947 "The driver for the device detected a newer version of "
948 "the NVM image than expected. Please install the most "
949 "recent version of the network driver.\n");
950 } else if (hw->api_maj_ver < IXGBE_FW_API_VER_MAJOR ||
951 hw->api_min_ver < IXGBE_FW_API_VER_MINOR - 2) {
952 device_printf(sc->dev,
953 "The driver for the device detected an older version "
954 "of the NVM image than expected. "
955 "Please update the NVM image.\n");
956 }
957 return (0);
958 }
959
960 /************************************************************************
961 * ixgbe_register
962 ************************************************************************/
963 static void *
ixgbe_register(device_t dev)964 ixgbe_register(device_t dev)
965 {
966 return (&ixgbe_sctx_init);
967 } /* ixgbe_register */
968
969 /************************************************************************
970 * ixgbe_if_attach_pre - Device initialization routine, part 1
971 *
972 * Called when the driver is being loaded.
973 * Identifies the type of hardware, initializes the hardware,
974 * and initializes iflib structures.
975 *
976 * return 0 on success, positive on failure
977 ************************************************************************/
978 static int
ixgbe_if_attach_pre(if_ctx_t ctx)979 ixgbe_if_attach_pre(if_ctx_t ctx)
980 {
981 struct ixgbe_softc *sc;
982 device_t dev;
983 if_softc_ctx_t scctx;
984 struct ixgbe_hw *hw;
985 int error = 0;
986 u32 ctrl_ext;
987 size_t i;
988
989 INIT_DEBUGOUT("ixgbe_attach: begin");
990
991 /* Allocate, clear, and link in our adapter structure */
992 dev = iflib_get_dev(ctx);
993 sc = iflib_get_softc(ctx);
994 sc->hw.back = sc;
995 sc->ctx = ctx;
996 sc->dev = dev;
997 scctx = sc->shared = iflib_get_softc_ctx(ctx);
998 sc->media = iflib_get_media(ctx);
999 hw = &sc->hw;
1000
1001 /* Determine hardware revision */
1002 hw->vendor_id = pci_get_vendor(dev);
1003 hw->device_id = pci_get_device(dev);
1004 hw->revision_id = pci_get_revid(dev);
1005 hw->subsystem_vendor_id = pci_get_subvendor(dev);
1006 hw->subsystem_device_id = pci_get_subdevice(dev);
1007
1008 /* Do base PCI setup - map BAR0 */
1009 if (ixgbe_allocate_pci_resources(ctx)) {
1010 device_printf(dev, "Allocation of PCI resources failed\n");
1011 return (ENXIO);
1012 }
1013
1014 /* let hardware know driver is loaded */
1015 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1016 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1017 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1018
1019 /*
1020 * Initialize the shared code
1021 */
1022 if (ixgbe_init_shared_code(hw) != 0) {
1023 device_printf(dev, "Unable to initialize the shared code\n");
1024 error = ENXIO;
1025 goto err_pci;
1026 }
1027
1028 if (hw->mac.type == ixgbe_mac_E610)
1029 ixgbe_init_aci(hw);
1030
1031 sc->do_debug_dump = false;
1032
1033 if (hw->mac.ops.fw_recovery_mode &&
1034 hw->mac.ops.fw_recovery_mode(hw)) {
1035 device_printf(dev,
1036 "Firmware recovery mode detected. Limiting "
1037 "functionality.\nRefer to the Intel(R) Ethernet Adapters "
1038 "and Devices User Guide for details on firmware recovery "
1039 "mode.");
1040 error = ENOSYS;
1041 goto err_pci;
1042 }
1043
1044 /* 82598 Does not support SR-IOV, initialize everything else */
1045 if (hw->mac.type >= ixgbe_mac_82599_vf) {
1046 for (i = 0; i < sc->num_vfs; i++)
1047 hw->mbx.ops[i].init_params(hw);
1048 }
1049
1050 hw->allow_unsupported_sfp = allow_unsupported_sfp;
1051
1052 if (hw->mac.type != ixgbe_mac_82598EB)
1053 hw->phy.smart_speed = ixgbe_smart_speed;
1054
1055 ixgbe_init_device_features(sc);
1056
1057 /* Enable WoL (if supported) */
1058 ixgbe_check_wol_support(sc);
1059
1060 /* Verify adapter fan is still functional (if applicable) */
1061 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
1062 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
1063 ixgbe_check_fan_failure(sc, esdp, false);
1064 }
1065
1066 /* Ensure SW/FW semaphore is free */
1067 ixgbe_init_swfw_semaphore(hw);
1068
1069 /* Enable EEE power saving */
1070 if (sc->feat_en & IXGBE_FEATURE_EEE)
1071 hw->mac.ops.setup_eee(hw, true);
1072
1073 /* Set an initial default flow control value */
1074 hw->fc.requested_mode = ixgbe_flow_control;
1075
1076 hw->phy.reset_if_overtemp = true;
1077 error = ixgbe_reset_hw(hw);
1078 hw->phy.reset_if_overtemp = false;
1079 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
1080 /*
1081 * No optics in this port, set up
1082 * so the timer routine will probe
1083 * for later insertion.
1084 */
1085 sc->sfp_probe = true;
1086 error = 0;
1087 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1088 device_printf(dev, "Unsupported SFP+ module detected!\n");
1089 error = EIO;
1090 goto err_pci;
1091 } else if (error) {
1092 device_printf(dev, "Hardware initialization failed\n");
1093 error = EIO;
1094 goto err_pci;
1095 }
1096
1097 /* Make sure we have a good EEPROM before we read from it */
1098 if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) {
1099 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
1100 error = EIO;
1101 goto err_pci;
1102 }
1103
1104 error = ixgbe_start_hw(hw);
1105 switch (error) {
1106 case IXGBE_ERR_EEPROM_VERSION:
1107 device_printf(dev,
1108 "This device is a pre-production adapter/LOM. Please be"
1109 " aware there may be issues associated with your"
1110 " hardware.\nIf you are experiencing problems please"
1111 " contact your Intel or hardware representative who"
1112 " provided you with this hardware.\n");
1113 break;
1114 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1115 device_printf(dev, "Unsupported SFP+ Module\n");
1116 error = EIO;
1117 goto err_pci;
1118 case IXGBE_ERR_SFP_NOT_PRESENT:
1119 device_printf(dev, "No SFP+ Module found\n");
1120 /* falls thru */
1121 default:
1122 break;
1123 }
1124
1125 /* Check the FW API version */
1126 if (hw->mac.type == ixgbe_mac_E610 && ixgbe_check_fw_api_version(sc)) {
1127 error = EIO;
1128 goto err_pci;
1129 }
1130
1131 /* Most of the iflib initialization... */
1132
1133 iflib_set_mac(ctx, hw->mac.addr);
1134 switch (sc->hw.mac.type) {
1135 case ixgbe_mac_X550:
1136 case ixgbe_mac_X550EM_x:
1137 case ixgbe_mac_X550EM_a:
1138 scctx->isc_rss_table_size = 512;
1139 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
1140 break;
1141 default:
1142 scctx->isc_rss_table_size = 128;
1143 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16;
1144 }
1145
1146 /* Allow legacy interrupts */
1147 ixgbe_txrx.ift_legacy_intr = ixgbe_intr;
1148
1149 scctx->isc_txqsizes[0] =
1150 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
1151 sizeof(u32), DBA_ALIGN),
1152 scctx->isc_rxqsizes[0] =
1153 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
1154 DBA_ALIGN);
1155
1156 /* XXX */
1157 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1158 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
1159 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
1160 scctx->isc_tx_nsegments = IXGBE_82598_SCATTER;
1161 } else {
1162 scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP;
1163 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
1164 }
1165
1166 scctx->isc_msix_bar = pci_msix_table_bar(dev);
1167
1168 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
1169 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
1170 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
1171
1172 scctx->isc_txrx = &ixgbe_txrx;
1173
1174 scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS;
1175
1176 return (0);
1177
1178 err_pci:
1179 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
1180 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1181 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
1182 ixgbe_free_pci_resources(ctx);
1183
1184 if (hw->mac.type == ixgbe_mac_E610)
1185 ixgbe_shutdown_aci(hw);
1186
1187 return (error);
1188 } /* ixgbe_if_attach_pre */
1189
1190 /*********************************************************************
1191 * ixgbe_if_attach_post - Device initialization routine, part 2
1192 *
1193 * Called during driver load, but after interrupts and
1194 * resources have been allocated and configured.
1195 * Sets up some data structures not relevant to iflib.
1196 *
1197 * return 0 on success, positive on failure
1198 *********************************************************************/
1199 static int
ixgbe_if_attach_post(if_ctx_t ctx)1200 ixgbe_if_attach_post(if_ctx_t ctx)
1201 {
1202 device_t dev;
1203 struct ixgbe_softc *sc;
1204 struct ixgbe_hw *hw;
1205 int error = 0;
1206
1207 dev = iflib_get_dev(ctx);
1208 sc = iflib_get_softc(ctx);
1209 hw = &sc->hw;
1210
1211 if (sc->intr_type == IFLIB_INTR_LEGACY &&
1212 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
1213 device_printf(dev, "Device does not support legacy interrupts");
1214 error = ENXIO;
1215 goto err;
1216 }
1217
1218 /* Allocate multicast array memory. */
1219 sc->mta = malloc(sizeof(*sc->mta) * MAX_NUM_MULTICAST_ADDRESSES,
1220 M_IXGBE, M_NOWAIT);
1221 if (sc->mta == NULL) {
1222 device_printf(dev,
1223 "Can not allocate multicast setup array\n");
1224 error = ENOMEM;
1225 goto err;
1226 }
1227
1228 /* hw.ix defaults init */
1229 ixgbe_set_advertise(sc, ixgbe_advertise_speed);
1230
1231 /* Enable the optics for 82599 SFP+ fiber */
1232 ixgbe_enable_tx_laser(hw);
1233
1234 /* Enable power to the phy. */
1235 ixgbe_set_phy_power(hw, true);
1236
1237 ixgbe_initialize_iov(sc);
1238
1239 error = ixgbe_setup_interface(ctx);
1240 if (error) {
1241 device_printf(dev, "Interface setup failed: %d\n", error);
1242 goto err;
1243 }
1244
1245 ixgbe_if_update_admin_status(ctx);
1246
1247 /* Initialize statistics */
1248 ixgbe_update_stats_counters(sc);
1249 ixgbe_add_hw_stats(sc);
1250
1251 /* Check PCIE slot type/speed/width */
1252 ixgbe_get_slot_info(sc);
1253
1254 /*
1255 * Do time init and sysctl init here, but
1256 * only on the first port of a bypass sc.
1257 */
1258 ixgbe_bypass_init(sc);
1259
1260 /* Display NVM and Option ROM versions */
1261 ixgbe_print_fw_version(ctx);
1262
1263 /* Set an initial dmac value */
1264 sc->dmac = 0;
1265 /* Set initial advertised speeds (if applicable) */
1266 sc->advertise = ixgbe_get_default_advertise(sc);
1267
1268 if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
1269 ixgbe_define_iov_schemas(dev, &error);
1270
1271 /* Add sysctls */
1272 ixgbe_add_device_sysctls(ctx);
1273
1274 /* Init recovery mode timer and state variable */
1275 if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1276 sc->recovery_mode = 0;
1277
1278 /* Set up the timer callout */
1279 callout_init(&sc->fw_mode_timer, true);
1280
1281 /* Start the task */
1282 callout_reset(&sc->fw_mode_timer, hz, ixgbe_fw_mode_timer, sc);
1283 }
1284
1285 return (0);
1286 err:
1287 return (error);
1288 } /* ixgbe_if_attach_post */
1289
1290 /************************************************************************
1291 * ixgbe_check_wol_support
1292 *
1293 * Checks whether the adapter's ports are capable of
1294 * Wake On LAN by reading the adapter's NVM.
1295 *
1296 * Sets each port's hw->wol_enabled value depending
1297 * on the value read here.
1298 ************************************************************************/
1299 static void
ixgbe_check_wol_support(struct ixgbe_softc * sc)1300 ixgbe_check_wol_support(struct ixgbe_softc *sc)
1301 {
1302 struct ixgbe_hw *hw = &sc->hw;
1303 u16 dev_caps = 0;
1304
1305 /* Find out WoL support for port */
1306 sc->wol_support = hw->wol_enabled = 0;
1307 ixgbe_get_device_caps(hw, &dev_caps);
1308 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1309 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1310 hw->bus.func == 0))
1311 sc->wol_support = hw->wol_enabled = 1;
1312
1313 /* Save initial wake up filter configuration */
1314 sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1315
1316 return;
1317 } /* ixgbe_check_wol_support */
1318
1319 /************************************************************************
1320 * ixgbe_setup_interface
1321 *
1322 * Setup networking device structure and register an interface.
1323 ************************************************************************/
1324 static int
ixgbe_setup_interface(if_ctx_t ctx)1325 ixgbe_setup_interface(if_ctx_t ctx)
1326 {
1327 if_t ifp = iflib_get_ifp(ctx);
1328 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1329
1330 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1331
1332 if_setbaudrate(ifp, IF_Gbps(10));
1333
1334 sc->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
1335
1336 sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw);
1337
1338 ixgbe_add_media_types(ctx);
1339
1340 /* Autoselect media by default */
1341 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1342
1343 return (0);
1344 } /* ixgbe_setup_interface */
1345
1346 /************************************************************************
1347 * ixgbe_if_get_counter
1348 ************************************************************************/
1349 static uint64_t
ixgbe_if_get_counter(if_ctx_t ctx,ift_counter cnt)1350 ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1351 {
1352 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1353 if_t ifp = iflib_get_ifp(ctx);
1354
1355 switch (cnt) {
1356 case IFCOUNTER_IPACKETS:
1357 return (sc->ipackets);
1358 case IFCOUNTER_OPACKETS:
1359 return (sc->opackets);
1360 case IFCOUNTER_IBYTES:
1361 return (sc->ibytes);
1362 case IFCOUNTER_OBYTES:
1363 return (sc->obytes);
1364 case IFCOUNTER_IMCASTS:
1365 return (sc->imcasts);
1366 case IFCOUNTER_OMCASTS:
1367 return (sc->omcasts);
1368 case IFCOUNTER_COLLISIONS:
1369 return (0);
1370 case IFCOUNTER_IQDROPS:
1371 return (sc->iqdrops);
1372 case IFCOUNTER_IERRORS:
1373 return (sc->ierrors);
1374 default:
1375 return (if_get_counter_default(ifp, cnt));
1376 }
1377 } /* ixgbe_if_get_counter */
1378
1379 /************************************************************************
1380 * ixgbe_if_i2c_req
1381 ************************************************************************/
1382 static int
ixgbe_if_i2c_req(if_ctx_t ctx,struct ifi2creq * req)1383 ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1384 {
1385 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1386 struct ixgbe_hw *hw = &sc->hw;
1387 int i;
1388
1389 if (hw->phy.ops.read_i2c_byte == NULL)
1390 return (ENXIO);
1391 for (i = 0; i < req->len; i++)
1392 hw->phy.ops.read_i2c_byte(hw, req->offset + i,
1393 req->dev_addr, &req->data[i]);
1394 return (0);
1395 } /* ixgbe_if_i2c_req */
1396
1397 /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be
1398 * reinitialized
1399 * @ctx: iflib context
1400 * @event: event code to check
1401 *
1402 * Defaults to returning false for unknown events.
1403 *
1404 * @returns true if iflib needs to reinit the interface
1405 */
1406 static bool
ixgbe_if_needs_restart(if_ctx_t ctx __unused,enum iflib_restart_event event)1407 ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1408 {
1409 switch (event) {
1410 case IFLIB_RESTART_VLAN_CONFIG:
1411 default:
1412 return (false);
1413 }
1414 }
1415
1416 /************************************************************************
1417 * ixgbe_if_priv_ioctl - Ioctl handler for driver
1418 *
1419 * Handler for custom driver specific ioctls
1420 *
1421 * return 0 on success, positive on failure
1422 ************************************************************************/
1423 static int
ixgbe_if_priv_ioctl(if_ctx_t ctx,u_long command,caddr_t data)1424 ixgbe_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
1425 {
1426 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1427 struct ifdrv *ifd;
1428 device_t dev = sc->dev;
1429
1430 /* Make sure the command type is valid */
1431 switch (command) {
1432 case SIOCSDRVSPEC:
1433 case SIOCGDRVSPEC:
1434 /* Accepted commands */
1435 break;
1436 case SIOCGPRIVATE_0:
1437 /*
1438 * Although we do not support this ioctl command, it's expected
1439 * that iflib will forward it to the IFDI_PRIV_IOCTL handler.
1440 * Do not print a message in this case.
1441 */
1442 return (ENOTSUP);
1443 default:
1444 /*
1445 * If we get a different command for this function, it's
1446 * definitely unexpected, so log a message indicating what
1447 * command we got for debugging purposes.
1448 */
1449 device_printf(dev,
1450 "%s: unexpected ioctl command %08lx\n",
1451 __func__, command);
1452 return (EINVAL);
1453 }
1454
1455 ifd = (struct ifdrv *)data;
1456
1457 switch (ifd->ifd_cmd) {
1458 case IXGBE_NVM_ACCESS:
1459 IOCTL_DEBUGOUT("ioctl: NVM ACCESS");
1460 return (ixgbe_nvm_access_ioctl(sc, ifd));
1461 case IXGBE_DEBUG_DUMP:
1462 IOCTL_DEBUGOUT("ioctl: DEBUG DUMP");
1463 return (ixgbe_debug_dump_ioctl(sc, ifd));
1464 default:
1465 IOCTL_DEBUGOUT1(
1466 "ioctl: UNKNOWN SIOC(S|G)DRVSPEC (0x%X) command\n",
1467 (int)ifd->ifd_cmd);
1468 return (EINVAL);
1469 }
1470
1471 return (0);
1472 }
1473
1474 /************************************************************************
1475 * ixgbe_nvm_access_ioctl
1476 *
1477 * Handles an NVM access ioctl request
1478 ************************************************************************/
1479 static int
ixgbe_nvm_access_ioctl(struct ixgbe_softc * sc,struct ifdrv * ifd)1480 ixgbe_nvm_access_ioctl(struct ixgbe_softc *sc, struct ifdrv *ifd)
1481 {
1482 struct ixgbe_nvm_access_data *data;
1483 struct ixgbe_nvm_access_cmd *cmd;
1484 struct ixgbe_hw *hw = &sc->hw;
1485 size_t ifd_len = ifd->ifd_len;
1486 size_t malloc_len;
1487 device_t dev = sc->dev;
1488 u8 *nvm_buffer;
1489 s32 error = 0;
1490
1491 /*
1492 * ifioctl forwards SIOCxDRVSPEC to iflib without conducting
1493 * a privilege check. Subsequently, iflib passes the ioctl to the driver
1494 * without verifying privileges. To prevent non-privileged threads from
1495 * accessing this interface, perform a privilege check at this point.
1496 */
1497 error = priv_check(curthread, PRIV_DRIVER);
1498 if (error)
1499 return (error);
1500
1501 if (ifd_len < sizeof(*cmd)) {
1502 device_printf(dev,
1503 "%s: ifdrv length is too small. Got %zu, "
1504 "but expected %zu\n",
1505 __func__, ifd_len, sizeof(*cmd));
1506 return (EINVAL);
1507 }
1508
1509 if (ifd->ifd_data == NULL) {
1510 device_printf(dev, "%s: No ifd data buffer.\n",
1511 __func__);
1512 return (EINVAL);
1513 }
1514
1515 malloc_len = max(ifd_len, sizeof(*data) + sizeof(*cmd));
1516
1517 nvm_buffer = (u8 *)malloc(malloc_len, M_IXGBE, M_ZERO | M_NOWAIT);
1518 if (!nvm_buffer)
1519 return (ENOMEM);
1520
1521 /* Copy the NVM access command and data in from user space */
1522 error = copyin(ifd->ifd_data, nvm_buffer, ifd_len);
1523 if (error) {
1524 device_printf(dev, "%s: Failed to copy data in, error: %d\n",
1525 __func__, error);
1526 goto cleanup_free_nvm_buffer;
1527 }
1528
1529 /*
1530 * The NVM command structure is immediately followed by data which
1531 * varies in size based on the command.
1532 */
1533 cmd = (struct ixgbe_nvm_access_cmd *)nvm_buffer;
1534 data = (struct ixgbe_nvm_access_data *)
1535 (nvm_buffer + sizeof(struct ixgbe_nvm_access_cmd));
1536
1537 /* Handle the NVM access request */
1538 error = ixgbe_handle_nvm_access(hw, cmd, data);
1539 if (error) {
1540 device_printf(dev, "%s: NVM access request failed, error %d\n",
1541 __func__, error);
1542 }
1543
1544 /* Copy the possibly modified contents of the handled request out */
1545 error = copyout(nvm_buffer, ifd->ifd_data, ifd_len);
1546 if (error) {
1547 device_printf(dev, "%s: Copying response back to "
1548 "user space failed, error %d\n",
1549 __func__, error);
1550 goto cleanup_free_nvm_buffer;
1551 }
1552
1553 cleanup_free_nvm_buffer:
1554 free(nvm_buffer, M_IXGBE);
1555 return (error);
1556 }
1557
1558 /************************************************************************
1559 * ixgbe_debug_dump_ioctl
1560 *
1561 * Makes debug dump of internal FW/HW data.
1562 ************************************************************************/
1563 static int
ixgbe_debug_dump_ioctl(struct ixgbe_softc * sc,struct ifdrv * ifd)1564 ixgbe_debug_dump_ioctl(struct ixgbe_softc *sc, struct ifdrv *ifd)
1565 {
1566 struct ixgbe_debug_dump_cmd *dd_cmd;
1567 struct ixgbe_hw *hw = &sc->hw;
1568 size_t ifd_len = ifd->ifd_len;
1569 device_t dev = sc->dev;
1570 s32 error = 0;
1571
1572 if (!(sc->feat_en & IXGBE_FEATURE_DBG_DUMP))
1573 return (ENODEV);
1574
1575 /* Data returned from ACI command */
1576 u16 ret_buf_size = 0;
1577 u16 ret_next_cluster = 0;
1578 u16 ret_next_table = 0;
1579 u32 ret_next_index = 0;
1580
1581 /*
1582 * ifioctl forwards SIOCxDRVSPEC to iflib without conducting
1583 * a privilege check. Subsequently, iflib passes the ioctl to the driver
1584 * without verifying privileges. To prevent non-privileged threads from
1585 * accessing this interface, perform a privilege check at this point.
1586 */
1587 error = priv_check(curthread, PRIV_DRIVER);
1588 if (error)
1589 return (error);
1590
1591 if (ifd_len < sizeof(*dd_cmd)) {
1592 device_printf(dev,
1593 "%s: ifdrv length is too small. Got %zu, "
1594 "but expected %zu\n",
1595 __func__, ifd_len, sizeof(*dd_cmd));
1596 return (EINVAL);
1597 }
1598
1599 if (ifd->ifd_data == NULL) {
1600 device_printf(dev, "%s: No ifd data buffer.\n",
1601 __func__);
1602 return (EINVAL);
1603 }
1604
1605 dd_cmd = (struct ixgbe_debug_dump_cmd *)malloc(ifd_len, M_IXGBE,
1606 M_NOWAIT | M_ZERO);
1607 if (!dd_cmd) {
1608 error = -ENOMEM;
1609 goto out;
1610 }
1611 /* copy data from userspace */
1612 error = copyin(ifd->ifd_data, dd_cmd, ifd_len);
1613 if (error) {
1614 device_printf(dev, "%s: Failed to copy data in, error: %d\n",
1615 __func__, error);
1616 goto out;
1617 }
1618
1619 /* ACI command requires buf_size arg to be grater than 0 */
1620 if (dd_cmd->data_size == 0) {
1621 device_printf(dev, "%s: data_size must be greater than 0\n",
1622 __func__);
1623 error = EINVAL;
1624 goto out;
1625 }
1626
1627 /* Zero the data buffer memory space */
1628 memset(dd_cmd->data, 0, ifd_len - sizeof(*dd_cmd));
1629
1630 error = ixgbe_aci_get_internal_data(hw, dd_cmd->cluster_id,
1631 dd_cmd->table_id, dd_cmd->offset, dd_cmd->data, dd_cmd->data_size,
1632 &ret_buf_size, &ret_next_cluster, &ret_next_table, &ret_next_index);
1633 if (error) {
1634 device_printf(dev,
1635 "%s: Failed to get internal FW/HW data, error: %d\n",
1636 __func__, error);
1637 goto out;
1638 }
1639
1640 dd_cmd->cluster_id = ret_next_cluster;
1641 dd_cmd->table_id = ret_next_table;
1642 dd_cmd->offset = ret_next_index;
1643 dd_cmd->data_size = ret_buf_size;
1644
1645 error = copyout(dd_cmd, ifd->ifd_data, ifd->ifd_len);
1646 if (error) {
1647 device_printf(dev,
1648 "%s: Failed to copy data out, error: %d\n",
1649 __func__, error);
1650 }
1651
1652 out:
1653 free(dd_cmd, M_IXGBE);
1654
1655 return (error);
1656 }
1657
1658 /************************************************************************
1659 * ixgbe_add_media_types
1660 ************************************************************************/
1661 static void
ixgbe_add_media_types(if_ctx_t ctx)1662 ixgbe_add_media_types(if_ctx_t ctx)
1663 {
1664 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1665 struct ixgbe_hw *hw = &sc->hw;
1666 device_t dev = iflib_get_dev(ctx);
1667 u64 layer;
1668
1669 layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
1670
1671 /* Media types with matching FreeBSD media defines */
1672 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1673 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1674 if (layer & IXGBE_PHYSICAL_LAYER_5000BASE_T)
1675 ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL);
1676 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T)
1677 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
1678 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1679 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1680 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1681 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1682 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1683 ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1684
1685 if (hw->mac.type == ixgbe_mac_X550) {
1686 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
1687 ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL);
1688 }
1689
1690 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1691 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1692 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1693 NULL);
1694 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1695 }
1696
1697 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1698 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1699 if (hw->phy.multispeed_fiber)
1700 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0,
1701 NULL);
1702 }
1703 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1704 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1705 if (hw->phy.multispeed_fiber)
1706 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0,
1707 NULL);
1708 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1709 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1710 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1711 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1712
1713 #ifdef IFM_ETH_XTYPE
1714 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1715 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1716 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1717 ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1718 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1719 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1720 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1721 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1722 #else
1723 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1724 device_printf(dev, "Media supported: 10GbaseKR\n");
1725 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1726 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1727 }
1728 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1729 device_printf(dev, "Media supported: 10GbaseKX4\n");
1730 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1731 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1732 }
1733 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1734 device_printf(dev, "Media supported: 1000baseKX\n");
1735 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1736 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1737 }
1738 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1739 device_printf(dev, "Media supported: 2500baseKX\n");
1740 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1741 ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1742 }
1743 #endif
1744 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
1745 device_printf(dev, "Media supported: 1000baseBX\n");
1746 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_BX, 0, NULL);
1747 }
1748
1749 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1750 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1751 0, NULL);
1752 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1753 }
1754
1755 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1756 } /* ixgbe_add_media_types */
1757
1758 /************************************************************************
1759 * ixgbe_is_sfp
1760 ************************************************************************/
1761 static inline bool
ixgbe_is_sfp(struct ixgbe_hw * hw)1762 ixgbe_is_sfp(struct ixgbe_hw *hw)
1763 {
1764 switch (hw->mac.type) {
1765 case ixgbe_mac_82598EB:
1766 if (hw->phy.type == ixgbe_phy_nl)
1767 return (true);
1768 return (false);
1769 case ixgbe_mac_82599EB:
1770 switch (hw->mac.ops.get_media_type(hw)) {
1771 case ixgbe_media_type_fiber:
1772 case ixgbe_media_type_fiber_qsfp:
1773 return (true);
1774 default:
1775 return (false);
1776 }
1777 case ixgbe_mac_X550EM_x:
1778 case ixgbe_mac_X550EM_a:
1779 case ixgbe_mac_E610:
1780 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1781 return (true);
1782 return (false);
1783 default:
1784 return (false);
1785 }
1786 } /* ixgbe_is_sfp */
1787
1788 /************************************************************************
1789 * ixgbe_config_link
1790 ************************************************************************/
1791 static void
ixgbe_config_link(if_ctx_t ctx)1792 ixgbe_config_link(if_ctx_t ctx)
1793 {
1794 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1795 struct ixgbe_hw *hw = &sc->hw;
1796 u32 autoneg, err = 0;
1797 bool sfp, negotiate;
1798
1799 sfp = ixgbe_is_sfp(hw);
1800
1801 if (sfp) {
1802 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
1803 iflib_admin_intr_deferred(ctx);
1804 } else {
1805 if (hw->mac.ops.check_link)
1806 err = ixgbe_check_link(hw, &sc->link_speed,
1807 &sc->link_up, false);
1808 if (err)
1809 return;
1810 autoneg = hw->phy.autoneg_advertised;
1811 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1812 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1813 &negotiate);
1814 if (err)
1815 return;
1816
1817 if (hw->mac.type == ixgbe_mac_X550 &&
1818 hw->phy.autoneg_advertised == 0) {
1819 /*
1820 * 2.5G and 5G autonegotiation speeds on X550
1821 * are disabled by default due to reported
1822 * interoperability issues with some switches.
1823 *
1824 * The second condition checks if any operations
1825 * involving setting autonegotiation speeds have
1826 * been performed prior to this ixgbe_config_link()
1827 * call.
1828 *
1829 * If hw->phy.autoneg_advertised does not
1830 * equal 0, this means that the user might have
1831 * set autonegotiation speeds via the sysctl
1832 * before bringing the interface up. In this
1833 * case, we should not disable 2.5G and 5G
1834 * since that speeds might be selected by the
1835 * user.
1836 *
1837 * Otherwise (i.e. if hw->phy.autoneg_advertised
1838 * is set to 0), it is the first time we set
1839 * autonegotiation preferences and the default
1840 * set of speeds should exclude 2.5G and 5G.
1841 */
1842 autoneg &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
1843 IXGBE_LINK_SPEED_5GB_FULL);
1844 }
1845
1846 if (hw->mac.type == ixgbe_mac_E610) {
1847 hw->phy.ops.init(hw);
1848 err = ixgbe_enable_lse(sc);
1849 if (err)
1850 device_printf(sc->dev,
1851 "Failed to enable Link Status Event, "
1852 "error: %d", err);
1853 }
1854
1855 if (hw->mac.ops.setup_link)
1856 err = hw->mac.ops.setup_link(hw, autoneg,
1857 sc->link_up);
1858 }
1859 } /* ixgbe_config_link */
1860
1861 /************************************************************************
1862 * ixgbe_update_stats_counters - Update board statistics counters.
1863 ************************************************************************/
1864 static void
ixgbe_update_stats_counters(struct ixgbe_softc * sc)1865 ixgbe_update_stats_counters(struct ixgbe_softc *sc)
1866 {
1867 struct ixgbe_hw *hw = &sc->hw;
1868 struct ixgbe_hw_stats *stats = &sc->stats.pf;
1869 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1870 u32 lxoffrxc;
1871 u64 total_missed_rx = 0;
1872
1873 stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1874 stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1875 stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1876 stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1877 stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1878
1879 for (int i = 0; i < 16; i++) {
1880 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1881 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1882 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1883 }
1884 stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1885 stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1886 stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1887
1888 /* Hardware workaround, gprc counts missed packets */
1889 stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1890 stats->gprc -= missed_rx;
1891
1892 if (hw->mac.type != ixgbe_mac_82598EB) {
1893 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1894 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1895 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1896 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1897 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1898 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1899 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1900 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1901 stats->lxoffrxc += lxoffrxc;
1902 } else {
1903 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1904 lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1905 stats->lxoffrxc += lxoffrxc;
1906 /* 82598 only has a counter in the high register */
1907 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1908 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1909 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1910 }
1911
1912 /*
1913 * For watchdog management we need to know if we have been paused
1914 * during the last interval, so capture that here.
1915 */
1916 if (lxoffrxc)
1917 sc->shared->isc_pause_frames = 1;
1918
1919 /*
1920 * Workaround: mprc hardware is incorrectly counting
1921 * broadcasts, so for now we subtract those.
1922 */
1923 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1924 stats->bprc += bprc;
1925 stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1926 if (hw->mac.type == ixgbe_mac_82598EB)
1927 stats->mprc -= bprc;
1928
1929 stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1930 stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1931 stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1932 stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1933 stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1934 stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1935
1936 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1937 stats->lxontxc += lxon;
1938 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1939 stats->lxofftxc += lxoff;
1940 total = lxon + lxoff;
1941
1942 stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1943 stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1944 stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1945 stats->gptc -= total;
1946 stats->mptc -= total;
1947 stats->ptc64 -= total;
1948 stats->gotc -= total * ETHER_MIN_LEN;
1949
1950 stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1951 stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1952 stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1953 stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1954 stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1955 stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1956 stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1957 stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1958 stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1959 stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1960 stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1961 stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1962 stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1963 stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1964 stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1965 stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1966 stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1967 stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1968 /* Only read FCOE on 82599 */
1969 if (hw->mac.type != ixgbe_mac_82598EB) {
1970 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1971 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1972 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1973 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1974 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1975 }
1976
1977 /* Fill out the OS statistics structure */
1978 IXGBE_SET_IPACKETS(sc, stats->gprc);
1979 IXGBE_SET_OPACKETS(sc, stats->gptc);
1980 IXGBE_SET_IBYTES(sc, stats->gorc);
1981 IXGBE_SET_OBYTES(sc, stats->gotc);
1982 IXGBE_SET_IMCASTS(sc, stats->mprc);
1983 IXGBE_SET_OMCASTS(sc, stats->mptc);
1984 IXGBE_SET_COLLISIONS(sc, 0);
1985 IXGBE_SET_IQDROPS(sc, total_missed_rx);
1986
1987 /*
1988 * Aggregate following types of errors as RX errors:
1989 * - CRC error count,
1990 * - illegal byte error count,
1991 * - missed packets count,
1992 * - length error count,
1993 * - undersized packets count,
1994 * - fragmented packets count,
1995 * - oversized packets count,
1996 * - jabber count.
1997 */
1998 IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc +
1999 stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc +
2000 stats->roc + stats->rjc);
2001 } /* ixgbe_update_stats_counters */
2002
2003 /************************************************************************
2004 * ixgbe_add_hw_stats
2005 *
2006 * Add sysctl variables, one per statistic, to the system.
2007 ************************************************************************/
2008 static void
ixgbe_add_hw_stats(struct ixgbe_softc * sc)2009 ixgbe_add_hw_stats(struct ixgbe_softc *sc)
2010 {
2011 device_t dev = iflib_get_dev(sc->ctx);
2012 struct ix_rx_queue *rx_que;
2013 struct ix_tx_queue *tx_que;
2014 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2015 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2016 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2017 struct ixgbe_hw_stats *stats = &sc->stats.pf;
2018 struct sysctl_oid *stat_node, *queue_node;
2019 struct sysctl_oid_list *stat_list, *queue_list;
2020 int i;
2021
2022 #define QUEUE_NAME_LEN 32
2023 char namebuf[QUEUE_NAME_LEN];
2024
2025 /* Driver Statistics */
2026 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
2027 CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets");
2028 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2029 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
2030 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
2031 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
2032
2033 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues;
2034 i++, tx_que++) {
2035 struct tx_ring *txr = &tx_que->txr;
2036 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
2037 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
2038 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
2039 queue_list = SYSCTL_CHILDREN(queue_node);
2040
2041 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
2042 CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
2043 ixgbe_sysctl_tdh_handler, "IU",
2044 "Transmit Descriptor Head");
2045 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
2046 CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
2047 ixgbe_sysctl_tdt_handler, "IU",
2048 "Transmit Descriptor Tail");
2049 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2050 CTLFLAG_RD, &txr->tso_tx, "TSO");
2051 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2052 CTLFLAG_RD, &txr->total_packets,
2053 "Queue Packets Transmitted");
2054 }
2055
2056 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues;
2057 i++, rx_que++) {
2058 struct rx_ring *rxr = &rx_que->rxr;
2059 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
2060 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
2061 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
2062 queue_list = SYSCTL_CHILDREN(queue_node);
2063
2064 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
2065 CTLTYPE_UINT | CTLFLAG_RW,
2066 &sc->rx_queues[i], 0,
2067 ixgbe_sysctl_interrupt_rate_handler, "IU",
2068 "Interrupt Rate");
2069 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2070 CTLFLAG_RD, &(sc->rx_queues[i].irqs),
2071 "irqs on this queue");
2072 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
2073 CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
2074 ixgbe_sysctl_rdh_handler, "IU",
2075 "Receive Descriptor Head");
2076 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
2077 CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
2078 ixgbe_sysctl_rdt_handler, "IU",
2079 "Receive Descriptor Tail");
2080 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2081 CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
2082 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2083 CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
2084 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
2085 CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
2086 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
2087 CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
2088 }
2089
2090 /* MAC stats get their own sub node */
2091 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
2092 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
2093 stat_list = SYSCTL_CHILDREN(stat_node);
2094
2095 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs",
2096 CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS);
2097 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
2098 CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
2099 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
2100 CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
2101 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
2102 CTLFLAG_RD, &stats->errbc, "Byte Errors");
2103 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
2104 CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
2105 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
2106 CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
2107 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
2108 CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
2109 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
2110 CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
2111 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
2112 CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
2113
2114 /* Flow Control stats */
2115 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
2116 CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
2117 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
2118 CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
2119 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
2120 CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
2121 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
2122 CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
2123
2124 /* Packet Reception Stats */
2125 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
2126 CTLFLAG_RD, &stats->tor, "Total Octets Received");
2127 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
2128 CTLFLAG_RD, &stats->gorc, "Good Octets Received");
2129 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
2130 CTLFLAG_RD, &stats->tpr, "Total Packets Received");
2131 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
2132 CTLFLAG_RD, &stats->gprc, "Good Packets Received");
2133 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
2134 CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
2135 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
2136 CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
2137 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
2138 CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
2139 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
2140 CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
2141 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
2142 CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
2143 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
2144 CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
2145 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
2146 CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
2147 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
2148 CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
2149 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
2150 CTLFLAG_RD, &stats->ruc, "Receive Undersized");
2151 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
2152 CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
2153 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
2154 CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
2155 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
2156 CTLFLAG_RD, &stats->rjc, "Received Jabber");
2157 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
2158 CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
2159 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
2160 CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
2161 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
2162 CTLFLAG_RD, &stats->xec, "Checksum Errors");
2163
2164 /* Packet Transmission Stats */
2165 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
2166 CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
2167 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
2168 CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
2169 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
2170 CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
2171 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
2172 CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
2173 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
2174 CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
2175 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
2176 CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
2177 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
2178 CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
2179 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
2180 CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
2181 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
2182 CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
2183 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
2184 CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
2185 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
2186 CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
2187 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
2188 CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
2189 } /* ixgbe_add_hw_stats */
2190
2191 /************************************************************************
2192 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2193 *
2194 * Retrieves the TDH value from the hardware
2195 ************************************************************************/
2196 static int
ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)2197 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
2198 {
2199 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
2200 int error;
2201 unsigned int val;
2202
2203 if (!txr)
2204 return (0);
2205
2206
2207 if (atomic_load_acq_int(&txr->sc->recovery_mode))
2208 return (EPERM);
2209
2210 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me));
2211 error = sysctl_handle_int(oidp, &val, 0, req);
2212 if (error || !req->newptr)
2213 return error;
2214
2215 return (0);
2216 } /* ixgbe_sysctl_tdh_handler */
2217
2218 /************************************************************************
2219 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2220 *
2221 * Retrieves the TDT value from the hardware
2222 ************************************************************************/
2223 static int
ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)2224 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
2225 {
2226 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
2227 int error;
2228 unsigned int val;
2229
2230 if (!txr)
2231 return (0);
2232
2233 if (atomic_load_acq_int(&txr->sc->recovery_mode))
2234 return (EPERM);
2235
2236 val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me));
2237 error = sysctl_handle_int(oidp, &val, 0, req);
2238 if (error || !req->newptr)
2239 return error;
2240
2241 return (0);
2242 } /* ixgbe_sysctl_tdt_handler */
2243
2244 /************************************************************************
2245 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2246 *
2247 * Retrieves the RDH value from the hardware
2248 ************************************************************************/
2249 static int
ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)2250 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
2251 {
2252 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
2253 int error;
2254 unsigned int val;
2255
2256 if (!rxr)
2257 return (0);
2258
2259 if (atomic_load_acq_int(&rxr->sc->recovery_mode))
2260 return (EPERM);
2261
2262 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me));
2263 error = sysctl_handle_int(oidp, &val, 0, req);
2264 if (error || !req->newptr)
2265 return error;
2266
2267 return (0);
2268 } /* ixgbe_sysctl_rdh_handler */
2269
2270 /************************************************************************
2271 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2272 *
2273 * Retrieves the RDT value from the hardware
2274 ************************************************************************/
2275 static int
ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)2276 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
2277 {
2278 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
2279 int error;
2280 unsigned int val;
2281
2282 if (!rxr)
2283 return (0);
2284
2285 if (atomic_load_acq_int(&rxr->sc->recovery_mode))
2286 return (EPERM);
2287
2288 val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me));
2289 error = sysctl_handle_int(oidp, &val, 0, req);
2290 if (error || !req->newptr)
2291 return error;
2292
2293 return (0);
2294 } /* ixgbe_sysctl_rdt_handler */
2295
2296 /************************************************************************
2297 * ixgbe_if_vlan_register
2298 *
2299 * Run via vlan config EVENT, it enables us to use the
2300 * HW Filter table since we can get the vlan id. This
2301 * just creates the entry in the soft version of the
2302 * VFTA, init will repopulate the real table.
2303 ************************************************************************/
2304 static void
ixgbe_if_vlan_register(if_ctx_t ctx,u16 vtag)2305 ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
2306 {
2307 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2308 u16 index, bit;
2309
2310 index = (vtag >> 5) & 0x7F;
2311 bit = vtag & 0x1F;
2312 sc->shadow_vfta[index] |= (1 << bit);
2313 ++sc->num_vlans;
2314 ixgbe_setup_vlan_hw_support(ctx);
2315 } /* ixgbe_if_vlan_register */
2316
2317 /************************************************************************
2318 * ixgbe_if_vlan_unregister
2319 *
2320 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2321 ************************************************************************/
2322 static void
ixgbe_if_vlan_unregister(if_ctx_t ctx,u16 vtag)2323 ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
2324 {
2325 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2326 u16 index, bit;
2327
2328 index = (vtag >> 5) & 0x7F;
2329 bit = vtag & 0x1F;
2330 sc->shadow_vfta[index] &= ~(1 << bit);
2331 --sc->num_vlans;
2332 /* Re-init to load the changes */
2333 ixgbe_setup_vlan_hw_support(ctx);
2334 } /* ixgbe_if_vlan_unregister */
2335
2336 /************************************************************************
2337 * ixgbe_setup_vlan_hw_support
2338 ************************************************************************/
2339 static void
ixgbe_setup_vlan_hw_support(if_ctx_t ctx)2340 ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
2341 {
2342 if_t ifp = iflib_get_ifp(ctx);
2343 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2344 struct ixgbe_hw *hw = &sc->hw;
2345 struct rx_ring *rxr;
2346 int i;
2347 u32 ctrl;
2348
2349
2350 /*
2351 * We get here thru init_locked, meaning
2352 * a soft reset, this has already cleared
2353 * the VFTA and other state, so if there
2354 * have been no vlan's registered do nothing.
2355 */
2356 if (sc->num_vlans == 0 ||
2357 (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) {
2358 /* Clear the vlan hw flag */
2359 for (i = 0; i < sc->num_rx_queues; i++) {
2360 rxr = &sc->rx_queues[i].rxr;
2361 /* On 82599 the VLAN enable is per/queue in RXDCTL */
2362 if (hw->mac.type != ixgbe_mac_82598EB) {
2363 ctrl = IXGBE_READ_REG(hw,
2364 IXGBE_RXDCTL(rxr->me));
2365 ctrl &= ~IXGBE_RXDCTL_VME;
2366 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me),
2367 ctrl);
2368 }
2369 rxr->vtag_strip = false;
2370 }
2371 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2372 /* Enable the Filter Table if enabled */
2373 ctrl |= IXGBE_VLNCTRL_CFIEN;
2374 ctrl &= ~IXGBE_VLNCTRL_VFE;
2375 if (hw->mac.type == ixgbe_mac_82598EB)
2376 ctrl &= ~IXGBE_VLNCTRL_VME;
2377 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2378 return;
2379 }
2380
2381 /* Setup the queues for vlans */
2382 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
2383 for (i = 0; i < sc->num_rx_queues; i++) {
2384 rxr = &sc->rx_queues[i].rxr;
2385 /* On 82599 the VLAN enable is per/queue in RXDCTL */
2386 if (hw->mac.type != ixgbe_mac_82598EB) {
2387 ctrl = IXGBE_READ_REG(hw,
2388 IXGBE_RXDCTL(rxr->me));
2389 ctrl |= IXGBE_RXDCTL_VME;
2390 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me),
2391 ctrl);
2392 }
2393 rxr->vtag_strip = true;
2394 }
2395 }
2396
2397 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
2398 return;
2399 /*
2400 * A soft reset zero's out the VFTA, so
2401 * we need to repopulate it now.
2402 */
2403 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2404 if (sc->shadow_vfta[i] != 0)
2405 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
2406 sc->shadow_vfta[i]);
2407
2408 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2409 /* Enable the Filter Table if enabled */
2410 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) {
2411 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2412 ctrl |= IXGBE_VLNCTRL_VFE;
2413 }
2414 if (hw->mac.type == ixgbe_mac_82598EB)
2415 ctrl |= IXGBE_VLNCTRL_VME;
2416 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2417 } /* ixgbe_setup_vlan_hw_support */
2418
2419 /************************************************************************
2420 * ixgbe_get_slot_info
2421 *
2422 * Get the width and transaction speed of
2423 * the slot this adapter is plugged into.
2424 ************************************************************************/
2425 static void
ixgbe_get_slot_info(struct ixgbe_softc * sc)2426 ixgbe_get_slot_info(struct ixgbe_softc *sc)
2427 {
2428 device_t dev = iflib_get_dev(sc->ctx);
2429 struct ixgbe_hw *hw = &sc->hw;
2430 int bus_info_valid = true;
2431 u32 offset;
2432 u16 link;
2433
2434 /* Some devices are behind an internal bridge */
2435 switch (hw->device_id) {
2436 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2437 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2438 goto get_parent_info;
2439 default:
2440 break;
2441 }
2442
2443 ixgbe_get_bus_info(hw);
2444
2445 /*
2446 * Some devices don't use PCI-E, but there is no need
2447 * to display "Unknown" for bus speed and width.
2448 */
2449 switch (hw->mac.type) {
2450 case ixgbe_mac_X550EM_x:
2451 case ixgbe_mac_X550EM_a:
2452 return;
2453 default:
2454 goto display;
2455 }
2456
2457 get_parent_info:
2458 /*
2459 * For the Quad port adapter we need to parse back
2460 * up the PCI tree to find the speed of the expansion
2461 * slot into which this adapter is plugged. A bit more work.
2462 */
2463 dev = device_get_parent(device_get_parent(dev));
2464 #ifdef IXGBE_DEBUG
2465 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2466 pci_get_slot(dev), pci_get_function(dev));
2467 #endif
2468 dev = device_get_parent(device_get_parent(dev));
2469 #ifdef IXGBE_DEBUG
2470 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2471 pci_get_slot(dev), pci_get_function(dev));
2472 #endif
2473 /* Now get the PCI Express Capabilities offset */
2474 if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
2475 /*
2476 * Hmm...can't get PCI-Express capabilities.
2477 * Falling back to default method.
2478 */
2479 bus_info_valid = false;
2480 ixgbe_get_bus_info(hw);
2481 goto display;
2482 }
2483 /* ...and read the Link Status Register */
2484 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2485 ixgbe_set_pci_config_data_generic(hw, link);
2486
2487 display:
2488 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2489 ((hw->bus.speed == ixgbe_bus_speed_16000) ? "16.0GT/s" :
2490 (hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2491 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2492 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2493 "Unknown"),
2494 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2495 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2496 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2497 "Unknown"));
2498
2499 if (bus_info_valid) {
2500 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2501 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2502 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2503 device_printf(dev,
2504 "PCI-Express bandwidth available for this card"
2505 " is not sufficient for optimal performance.\n");
2506 device_printf(dev,
2507 "For optimal performance a x8 PCIE, or x4 PCIE"
2508 " Gen2 slot is required.\n");
2509 }
2510 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2511 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2512 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2513 device_printf(dev,
2514 "PCI-Express bandwidth available for this card"
2515 " is not sufficient for optimal performance.\n");
2516 device_printf(dev,
2517 "For optimal performance a x8 PCIE Gen3 slot is"
2518 " required.\n");
2519 }
2520 } else
2521 device_printf(dev,
2522 "Unable to determine slot speed/width. The speed/width"
2523 " reported are that of the internal switch.\n");
2524
2525 return;
2526 } /* ixgbe_get_slot_info */
2527
2528 /************************************************************************
2529 * ixgbe_if_msix_intr_assign
2530 *
2531 * Setup MSI-X Interrupt resources and handlers
2532 ************************************************************************/
2533 static int
ixgbe_if_msix_intr_assign(if_ctx_t ctx,int msix)2534 ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
2535 {
2536 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2537 struct ix_rx_queue *rx_que = sc->rx_queues;
2538 struct ix_tx_queue *tx_que;
2539 int error, rid, vector = 0;
2540 char buf[16];
2541
2542 /* Admin Que is vector 0*/
2543 rid = vector + 1;
2544 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
2545 rid = vector + 1;
2546
2547 snprintf(buf, sizeof(buf), "rxq%d", i);
2548 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
2549 IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me,
2550 buf);
2551
2552 if (error) {
2553 device_printf(iflib_get_dev(ctx),
2554 "Failed to allocate que int %d err: %d",
2555 i,error);
2556 sc->num_rx_queues = i + 1;
2557 goto fail;
2558 }
2559
2560 rx_que->msix = vector;
2561 }
2562 for (int i = 0; i < sc->num_tx_queues; i++) {
2563 snprintf(buf, sizeof(buf), "txq%d", i);
2564 tx_que = &sc->tx_queues[i];
2565 tx_que->msix = i % sc->num_rx_queues;
2566 iflib_softirq_alloc_generic(ctx,
2567 &sc->rx_queues[tx_que->msix].que_irq,
2568 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
2569 }
2570 rid = vector + 1;
2571 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
2572 IFLIB_INTR_ADMIN, ixgbe_msix_link, sc, 0, "aq");
2573 if (error) {
2574 device_printf(iflib_get_dev(ctx),
2575 "Failed to register admin handler");
2576 return (error);
2577 }
2578
2579 sc->vector = vector;
2580
2581 return (0);
2582 fail:
2583 iflib_irq_free(ctx, &sc->irq);
2584 rx_que = sc->rx_queues;
2585 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
2586 iflib_irq_free(ctx, &rx_que->que_irq);
2587
2588 return (error);
2589 } /* ixgbe_if_msix_intr_assign */
2590
2591 static inline void
ixgbe_perform_aim(struct ixgbe_softc * sc,struct ix_rx_queue * que)2592 ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que)
2593 {
2594 uint32_t newitr = 0;
2595 struct rx_ring *rxr = &que->rxr;
2596 /* FIXME struct tx_ring *txr = ... ->txr; */
2597
2598 /*
2599 * Do Adaptive Interrupt Moderation:
2600 * - Write out last calculated setting
2601 * - Calculate based on average size over
2602 * the last interval.
2603 */
2604 if (que->eitr_setting) {
2605 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(que->msix),
2606 que->eitr_setting);
2607 }
2608
2609 que->eitr_setting = 0;
2610 /* Idle, do nothing */
2611 if (rxr->bytes == 0) {
2612 /* FIXME && txr->bytes == 0 */
2613 return;
2614 }
2615
2616 if ((rxr->bytes) && (rxr->packets))
2617 newitr = rxr->bytes / rxr->packets;
2618 /* FIXME for transmit accounting
2619 * if ((txr->bytes) && (txr->packets))
2620 * newitr = txr->bytes/txr->packets;
2621 * if ((rxr->bytes) && (rxr->packets))
2622 * newitr = max(newitr, (rxr->bytes / rxr->packets));
2623 */
2624
2625 newitr += 24; /* account for hardware frame, crc */
2626 /* set an upper boundary */
2627 newitr = min(newitr, 3000);
2628
2629 /* Be nice to the mid range */
2630 if ((newitr > 300) && (newitr < 1200)) {
2631 newitr = (newitr / 3);
2632 } else {
2633 newitr = (newitr / 2);
2634 }
2635
2636 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
2637 newitr |= newitr << 16;
2638 } else {
2639 newitr |= IXGBE_EITR_CNT_WDIS;
2640 }
2641
2642 /* save for next interrupt */
2643 que->eitr_setting = newitr;
2644
2645 /* Reset state */
2646 /* FIXME txr->bytes = 0; */
2647 /* FIXME txr->packets = 0; */
2648 rxr->bytes = 0;
2649 rxr->packets = 0;
2650
2651 return;
2652 }
2653
2654 /*********************************************************************
2655 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2656 **********************************************************************/
2657 static int
ixgbe_msix_que(void * arg)2658 ixgbe_msix_que(void *arg)
2659 {
2660 struct ix_rx_queue *que = arg;
2661 struct ixgbe_softc *sc = que->sc;
2662 if_t ifp = iflib_get_ifp(que->sc->ctx);
2663
2664 /* Protect against spurious interrupts */
2665 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
2666 return (FILTER_HANDLED);
2667
2668 ixgbe_disable_queue(sc, que->msix);
2669 ++que->irqs;
2670
2671 /* Check for AIM */
2672 if (sc->enable_aim) {
2673 ixgbe_perform_aim(sc, que);
2674 }
2675
2676 return (FILTER_SCHEDULE_THREAD);
2677 } /* ixgbe_msix_que */
2678
2679 /************************************************************************
2680 * ixgbe_media_status - Media Ioctl callback
2681 *
2682 * Called whenever the user queries the status of
2683 * the interface using ifconfig.
2684 ************************************************************************/
2685 static void
ixgbe_if_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)2686 ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2687 {
2688 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2689 struct ixgbe_hw *hw = &sc->hw;
2690 int layer;
2691
2692 INIT_DEBUGOUT("ixgbe_if_media_status: begin");
2693
2694 ifmr->ifm_status = IFM_AVALID;
2695 ifmr->ifm_active = IFM_ETHER;
2696
2697 if (!sc->link_active)
2698 return;
2699
2700 ifmr->ifm_status |= IFM_ACTIVE;
2701 layer = sc->phy_layer;
2702
2703 if (layer & IXGBE_PHYSICAL_LAYERS_BASE_T_ALL)
2704 switch (sc->link_speed) {
2705 case IXGBE_LINK_SPEED_10GB_FULL:
2706 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2707 break;
2708 case IXGBE_LINK_SPEED_5GB_FULL:
2709 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2710 break;
2711 case IXGBE_LINK_SPEED_2_5GB_FULL:
2712 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2713 break;
2714 case IXGBE_LINK_SPEED_1GB_FULL:
2715 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2716 break;
2717 case IXGBE_LINK_SPEED_100_FULL:
2718 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2719 break;
2720 case IXGBE_LINK_SPEED_10_FULL:
2721 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2722 break;
2723 }
2724 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2725 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2726 switch (sc->link_speed) {
2727 case IXGBE_LINK_SPEED_10GB_FULL:
2728 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2729 break;
2730 case IXGBE_LINK_SPEED_1GB_FULL:
2731 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2732 break;
2733 }
2734 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2735 switch (sc->link_speed) {
2736 case IXGBE_LINK_SPEED_10GB_FULL:
2737 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2738 break;
2739 case IXGBE_LINK_SPEED_1GB_FULL:
2740 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2741 break;
2742 }
2743 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2744 switch (sc->link_speed) {
2745 case IXGBE_LINK_SPEED_10GB_FULL:
2746 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2747 break;
2748 case IXGBE_LINK_SPEED_1GB_FULL:
2749 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2750 break;
2751 }
2752 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2753 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2754 switch (sc->link_speed) {
2755 case IXGBE_LINK_SPEED_10GB_FULL:
2756 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2757 break;
2758 case IXGBE_LINK_SPEED_1GB_FULL:
2759 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2760 break;
2761 }
2762 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2763 switch (sc->link_speed) {
2764 case IXGBE_LINK_SPEED_10GB_FULL:
2765 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2766 break;
2767 }
2768 /*
2769 * XXX: These need to use the proper media types once
2770 * they're added.
2771 */
2772 #ifndef IFM_ETH_XTYPE
2773 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2774 switch (sc->link_speed) {
2775 case IXGBE_LINK_SPEED_10GB_FULL:
2776 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2777 break;
2778 case IXGBE_LINK_SPEED_2_5GB_FULL:
2779 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2780 break;
2781 case IXGBE_LINK_SPEED_1GB_FULL:
2782 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2783 break;
2784 }
2785 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2786 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2787 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2788 switch (sc->link_speed) {
2789 case IXGBE_LINK_SPEED_10GB_FULL:
2790 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2791 break;
2792 case IXGBE_LINK_SPEED_2_5GB_FULL:
2793 ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2794 break;
2795 case IXGBE_LINK_SPEED_1GB_FULL:
2796 ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2797 break;
2798 }
2799 #else
2800 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2801 switch (sc->link_speed) {
2802 case IXGBE_LINK_SPEED_10GB_FULL:
2803 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2804 break;
2805 case IXGBE_LINK_SPEED_2_5GB_FULL:
2806 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2807 break;
2808 case IXGBE_LINK_SPEED_1GB_FULL:
2809 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2810 break;
2811 }
2812 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2813 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2814 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2815 switch (sc->link_speed) {
2816 case IXGBE_LINK_SPEED_10GB_FULL:
2817 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2818 break;
2819 case IXGBE_LINK_SPEED_2_5GB_FULL:
2820 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2821 break;
2822 case IXGBE_LINK_SPEED_1GB_FULL:
2823 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2824 break;
2825 }
2826 #endif
2827
2828 /* If nothing is recognized... */
2829 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2830 ifmr->ifm_active |= IFM_UNKNOWN;
2831
2832 /* Display current flow control setting used on link */
2833 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2834 hw->fc.current_mode == ixgbe_fc_full)
2835 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2836 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2837 hw->fc.current_mode == ixgbe_fc_full)
2838 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2839 } /* ixgbe_media_status */
2840
2841 /************************************************************************
2842 * ixgbe_media_change - Media Ioctl callback
2843 *
2844 * Called when the user changes speed/duplex using
2845 * media/mediopt option with ifconfig.
2846 ************************************************************************/
2847 static int
ixgbe_if_media_change(if_ctx_t ctx)2848 ixgbe_if_media_change(if_ctx_t ctx)
2849 {
2850 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2851 struct ifmedia *ifm = iflib_get_media(ctx);
2852 struct ixgbe_hw *hw = &sc->hw;
2853 ixgbe_link_speed speed = 0;
2854
2855 INIT_DEBUGOUT("ixgbe_if_media_change: begin");
2856
2857 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2858 return (EINVAL);
2859
2860 if (hw->phy.media_type == ixgbe_media_type_backplane)
2861 return (EPERM);
2862
2863 /*
2864 * We don't actually need to check against the supported
2865 * media types of the adapter; ifmedia will take care of
2866 * that for us.
2867 */
2868 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2869 case IFM_AUTO:
2870 case IFM_10G_T:
2871 speed |= IXGBE_LINK_SPEED_100_FULL;
2872 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2873 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2874 break;
2875 case IFM_10G_LRM:
2876 case IFM_10G_LR:
2877 #ifndef IFM_ETH_XTYPE
2878 case IFM_10G_SR: /* KR, too */
2879 case IFM_10G_CX4: /* KX4 */
2880 #else
2881 case IFM_10G_KR:
2882 case IFM_10G_KX4:
2883 #endif
2884 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2885 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2886 break;
2887 #ifndef IFM_ETH_XTYPE
2888 case IFM_1000_CX: /* KX */
2889 #else
2890 case IFM_1000_KX:
2891 #endif
2892 case IFM_1000_LX:
2893 case IFM_1000_SX:
2894 case IFM_1000_BX:
2895 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2896 break;
2897 case IFM_1000_T:
2898 speed |= IXGBE_LINK_SPEED_100_FULL;
2899 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2900 break;
2901 case IFM_10G_TWINAX:
2902 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2903 break;
2904 case IFM_5000_T:
2905 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2906 break;
2907 case IFM_2500_T:
2908 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2909 break;
2910 case IFM_100_TX:
2911 speed |= IXGBE_LINK_SPEED_100_FULL;
2912 break;
2913 case IFM_10_T:
2914 speed |= IXGBE_LINK_SPEED_10_FULL;
2915 break;
2916 default:
2917 goto invalid;
2918 }
2919
2920 hw->mac.autotry_restart = true;
2921 hw->mac.ops.setup_link(hw, speed, true);
2922 sc->advertise =
2923 ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
2924 ((speed & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
2925 ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
2926 ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
2927 ((speed & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
2928 ((speed & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
2929
2930 return (0);
2931
2932 invalid:
2933 device_printf(iflib_get_dev(ctx), "Invalid media type!\n");
2934
2935 return (EINVAL);
2936 } /* ixgbe_if_media_change */
2937
2938 /************************************************************************
2939 * ixgbe_set_promisc
2940 ************************************************************************/
2941 static int
ixgbe_if_promisc_set(if_ctx_t ctx,int flags)2942 ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
2943 {
2944 struct ixgbe_softc *sc = iflib_get_softc(ctx);
2945 if_t ifp = iflib_get_ifp(ctx);
2946 u32 rctl;
2947 int mcnt = 0;
2948
2949 rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
2950 rctl &= (~IXGBE_FCTRL_UPE);
2951 if (if_getflags(ifp) & IFF_ALLMULTI)
2952 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2953 else {
2954 mcnt = min(if_llmaddr_count(ifp),
2955 MAX_NUM_MULTICAST_ADDRESSES);
2956 }
2957 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2958 rctl &= (~IXGBE_FCTRL_MPE);
2959 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2960
2961 if (if_getflags(ifp) & IFF_PROMISC) {
2962 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2963 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2964 } else if (if_getflags(ifp) & IFF_ALLMULTI) {
2965 rctl |= IXGBE_FCTRL_MPE;
2966 rctl &= ~IXGBE_FCTRL_UPE;
2967 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, rctl);
2968 }
2969 return (0);
2970 } /* ixgbe_if_promisc_set */
2971
2972 /************************************************************************
2973 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2974 ************************************************************************/
2975 static int
ixgbe_msix_link(void * arg)2976 ixgbe_msix_link(void *arg)
2977 {
2978 struct ixgbe_softc *sc = arg;
2979 struct ixgbe_hw *hw = &sc->hw;
2980 u32 eicr, eicr_mask;
2981 s32 retval;
2982
2983 ++sc->link_irq;
2984
2985 /* Pause other interrupts */
2986 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2987
2988 /* First get the cause */
2989 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2990 /* Be sure the queue bits are not cleared */
2991 eicr &= ~IXGBE_EICR_RTX_QUEUE;
2992 /* Clear interrupt with write */
2993 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2994
2995 /* Link status change */
2996 if (eicr & IXGBE_EICR_LSC) {
2997 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2998 sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
2999 }
3000
3001 if (eicr & IXGBE_EICR_FW_EVENT) {
3002 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FW_EVENT);
3003 sc->task_requests |= IXGBE_REQUEST_TASK_FWEVENT;
3004 }
3005
3006 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
3007 if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
3008 (eicr & IXGBE_EICR_FLOW_DIR)) {
3009 /* This is probably overkill :) */
3010 if (!atomic_cmpset_int(&sc->fdir_reinit, 0, 1))
3011 return (FILTER_HANDLED);
3012 /* Disable the interrupt */
3013 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
3014 sc->task_requests |= IXGBE_REQUEST_TASK_FDIR;
3015 } else
3016 if (eicr & IXGBE_EICR_ECC) {
3017 device_printf(iflib_get_dev(sc->ctx),
3018 "Received ECC Err, initiating reset\n");
3019 hw->mac.flags |=
3020 ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3021 ixgbe_reset_hw(hw);
3022 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3023 IXGBE_EICR_ECC);
3024 }
3025
3026 /* Check for over temp condition */
3027 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3028 switch (sc->hw.mac.type) {
3029 case ixgbe_mac_X550EM_a:
3030 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3031 break;
3032 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3033 IXGBE_EICR_GPI_SDP0_X550EM_a);
3034 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3035 IXGBE_EICR_GPI_SDP0_X550EM_a);
3036 retval = hw->phy.ops.check_overtemp(hw);
3037 if (retval != IXGBE_ERR_OVERTEMP)
3038 break;
3039 device_printf(iflib_get_dev(sc->ctx),
3040 "\nCRITICAL: OVER TEMP!!"
3041 " PHY IS SHUT DOWN!!\n");
3042 device_printf(iflib_get_dev(sc->ctx),
3043 "System shutdown required!\n");
3044 break;
3045 default:
3046 if (!(eicr & IXGBE_EICR_TS))
3047 break;
3048 retval = hw->phy.ops.check_overtemp(hw);
3049 if (retval != IXGBE_ERR_OVERTEMP)
3050 break;
3051 device_printf(iflib_get_dev(sc->ctx),
3052 "\nCRITICAL: OVER TEMP!!"
3053 " PHY IS SHUT DOWN!!\n");
3054 device_printf(iflib_get_dev(sc->ctx),
3055 "System shutdown required!\n");
3056 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3057 IXGBE_EICR_TS);
3058 break;
3059 }
3060 }
3061
3062 /* Check for VF message */
3063 if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
3064 (eicr & IXGBE_EICR_MAILBOX)) {
3065 sc->task_requests |= IXGBE_REQUEST_TASK_MBX;
3066 }
3067 }
3068
3069 /*
3070 * On E610, the firmware handles PHY configuration, so
3071 * there is no need to perform any SFP-specific tasks.
3072 */
3073 if (hw->mac.type != ixgbe_mac_E610 && ixgbe_is_sfp(hw)) {
3074 /* Pluggable optics-related interrupt */
3075 if (hw->mac.type >= ixgbe_mac_X540)
3076 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3077 else
3078 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3079
3080 if (eicr & eicr_mask) {
3081 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3082 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
3083 }
3084
3085 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3086 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3087 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3088 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3089 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
3090 }
3091 }
3092
3093 /* Check for fan failure */
3094 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3095 ixgbe_check_fan_failure(sc, eicr, true);
3096 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3097 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3098 }
3099
3100 /* External PHY interrupt */
3101 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3102 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3103 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3104 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
3105 }
3106
3107 return (sc->task_requests != 0) ?
3108 FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
3109 } /* ixgbe_msix_link */
3110
3111 /************************************************************************
3112 * ixgbe_sysctl_interrupt_rate_handler
3113 ************************************************************************/
3114 static int
ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)3115 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
3116 {
3117 struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
3118 int error;
3119 unsigned int reg, usec, rate;
3120
3121 if (atomic_load_acq_int(&que->sc->recovery_mode))
3122 return (EPERM);
3123
3124 reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix));
3125 usec = ((reg & 0x0FF8) >> 3);
3126 if (usec > 0)
3127 rate = 500000 / usec;
3128 else
3129 rate = 0;
3130 error = sysctl_handle_int(oidp, &rate, 0, req);
3131 if (error || !req->newptr)
3132 return error;
3133 reg &= ~0xfff; /* default, no limitation */
3134 ixgbe_max_interrupt_rate = 0;
3135 if (rate > 0 && rate < 500000) {
3136 if (rate < 1000)
3137 rate = 1000;
3138 ixgbe_max_interrupt_rate = rate;
3139 reg |= ((4000000/rate) & 0xff8);
3140 }
3141 IXGBE_WRITE_REG(&que->sc->hw, IXGBE_EITR(que->msix), reg);
3142
3143 return (0);
3144 } /* ixgbe_sysctl_interrupt_rate_handler */
3145
3146 /************************************************************************
3147 * ixgbe_debug_dump_print_cluster
3148 ************************************************************************/
3149 static u8
ixgbe_debug_dump_print_cluster(struct ixgbe_softc * sc,struct sbuf * sbuf,u8 cluster_id)3150 ixgbe_debug_dump_print_cluster(struct ixgbe_softc *sc, struct sbuf *sbuf,
3151 u8 cluster_id)
3152 {
3153 u16 data_buf_size = IXGBE_ACI_MAX_BUFFER_SIZE;
3154 device_t dev = sc->dev;
3155 struct ixgbe_hw *hw = &sc->hw;
3156 const u8 reserved_buf[8] = {};
3157 int max_aci_calls = 1000;
3158 int error, counter = 0;
3159 u8 *data_buf;
3160
3161 /* Input parameters / loop variables */
3162 u16 table_id = 0;
3163 u32 offset = 0;
3164
3165 /* Data returned from ACI command */
3166 u16 ret_buf_size = 0;
3167 u16 ret_next_cluster = 0;
3168 u16 ret_next_table = 0;
3169 u32 ret_next_index = 0;
3170
3171 data_buf = (u8 *)malloc(data_buf_size, M_IXGBE, M_NOWAIT | M_ZERO);
3172 if (!data_buf)
3173 return (0);
3174
3175 DEBUGOUT2("%s: dumping cluster id (relative) %d\n",
3176 __func__, cluster_id);
3177
3178 do {
3179 DEBUGOUT3("table_id 0x%04x offset 0x%08x buf_size %d\n",
3180 table_id, offset, data_buf_size);
3181
3182 error = ixgbe_aci_get_internal_data(hw, cluster_id, table_id,
3183 offset, data_buf, data_buf_size, &ret_buf_size,
3184 &ret_next_cluster, &ret_next_table, &ret_next_index);
3185 if (error) {
3186 device_printf(dev,
3187 "%s: Failed to get internal FW/HW data, error: %d, "
3188 "last aci status: %d\n",
3189 __func__, error, hw->aci.last_status);
3190 break;
3191 }
3192
3193 DEBUGOUT3("ret_table_id 0x%04x ret_offset 0x%08x "
3194 "ret_buf_size %d\n",
3195 ret_next_table, ret_next_index, ret_buf_size);
3196
3197 /* Print cluster id */
3198 u32 print_cluster_id = (u32)cluster_id;
3199 sbuf_bcat(sbuf, &print_cluster_id, sizeof(print_cluster_id));
3200 /* Print table id */
3201 u32 print_table_id = (u32)table_id;
3202 sbuf_bcat(sbuf, &print_table_id, sizeof(print_table_id));
3203 /* Print table length */
3204 u32 print_table_length = (u32)ret_buf_size;
3205 sbuf_bcat(sbuf, &print_table_length,
3206 sizeof(print_table_length));
3207 /* Print current offset */
3208 u32 print_curr_offset = offset;
3209 sbuf_bcat(sbuf, &print_curr_offset, sizeof(print_curr_offset));
3210 /* Print reserved bytes */
3211 sbuf_bcat(sbuf, reserved_buf, sizeof(reserved_buf));
3212 /* Print data */
3213 sbuf_bcat(sbuf, data_buf, ret_buf_size);
3214
3215 /* Prepare for the next loop spin */
3216 memset(data_buf, 0, data_buf_size);
3217
3218 bool last_index = (ret_next_index == 0xffffffff);
3219 bool last_table = ((ret_next_table == 0xff ||
3220 ret_next_table == 0xffff) &&
3221 last_index);
3222
3223 if (last_table) {
3224 /* End of the cluster */
3225 DEBUGOUT1("End of the cluster ID %d\n", cluster_id);
3226 break;
3227 } else if (last_index) {
3228 /* End of the table */
3229 table_id = ret_next_table;
3230 offset = 0;
3231 } else {
3232 /* More data left in the table */
3233 offset = ret_next_index;
3234 }
3235 } while (++counter < max_aci_calls);
3236
3237 if (counter >= max_aci_calls)
3238 device_printf(dev, "Exceeded nr of ACI calls for cluster %d\n",
3239 cluster_id);
3240
3241 free(data_buf, M_IXGBE);
3242
3243 return (++cluster_id);
3244 } /* ixgbe_print_debug_dump_cluster */
3245
3246 /************************************************************************
3247 * ixgbe_sysctl_debug_dump_set_clusters
3248 *
3249 * Sets the cluster to dump from FW when Debug Dump requested.
3250 ************************************************************************/
3251 static int
ixgbe_sysctl_debug_dump_set_clusters(SYSCTL_HANDLER_ARGS)3252 ixgbe_sysctl_debug_dump_set_clusters(SYSCTL_HANDLER_ARGS)
3253 {
3254 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
3255 u32 clusters = sc->debug_dump_cluster_mask;
3256 device_t dev = sc->dev;
3257 int error;
3258
3259 error = sysctl_handle_32(oidp, &clusters, 0, req);
3260 if ((error) || !req->newptr)
3261 return (error);
3262
3263 if (clusters & ~(IXGBE_DBG_DUMP_VALID_CLUSTERS_MASK)) {
3264 device_printf(dev,
3265 "%s: Unrecognized parameter: %u\n",
3266 __func__, clusters);
3267 sc->debug_dump_cluster_mask =
3268 IXGBE_ACI_DBG_DUMP_CLUSTER_ID_INVALID;
3269 return (EINVAL);
3270 }
3271
3272 sc->debug_dump_cluster_mask = clusters;
3273
3274 return (0);
3275 } /* ixgbe_sysctl_debug_dump_set_clusters */
3276
3277 /************************************************************************
3278 * ixgbe_sysctl_dump_debug_dump
3279 ************************************************************************/
3280 static int
ixgbe_sysctl_dump_debug_dump(SYSCTL_HANDLER_ARGS)3281 ixgbe_sysctl_dump_debug_dump(SYSCTL_HANDLER_ARGS)
3282 {
3283 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
3284 device_t dev = sc->dev;
3285 struct sbuf *sbuf;
3286 int error = 0;
3287
3288 UNREFERENCED_PARAMETER(arg2);
3289
3290 if (!sc->do_debug_dump) {
3291 if (req->oldptr == NULL && req->newptr == NULL) {
3292 error = SYSCTL_OUT(req, 0, 0);
3293 return (error);
3294 }
3295
3296 char input_buf[2] = "";
3297 error = sysctl_handle_string(oidp, input_buf,
3298 sizeof(input_buf), req);
3299 if ((error) || (req->newptr == NULL))
3300 return (error);
3301
3302 if (input_buf[0] == '1') {
3303 if (sc->debug_dump_cluster_mask ==
3304 IXGBE_ACI_DBG_DUMP_CLUSTER_ID_INVALID) {
3305 device_printf(dev,
3306 "Debug Dump failed because an invalid "
3307 "cluster was specified.\n");
3308 return (EINVAL);
3309 }
3310
3311 sc->do_debug_dump = true;
3312 return (0);
3313 }
3314
3315 return (EINVAL);
3316 }
3317
3318 /* Caller just wants the upper bound for size */
3319 if (req->oldptr == NULL && req->newptr == NULL) {
3320 size_t est_output_len = IXGBE_DBG_DUMP_BASE_SIZE;
3321 if (sc->debug_dump_cluster_mask & 0x2)
3322 est_output_len += IXGBE_DBG_DUMP_BASE_SIZE;
3323 error = SYSCTL_OUT(req, 0, est_output_len);
3324 return (error);
3325 }
3326
3327 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3328 sbuf_clear_flags(sbuf, SBUF_INCLUDENUL);
3329
3330 DEBUGOUT("FW Debug Dump running...\n");
3331
3332 if (sc->debug_dump_cluster_mask) {
3333 for (u8 id = 0; id <= IXGBE_ACI_DBG_DUMP_CLUSTER_ID_MAX; id++) {
3334 if (sc->debug_dump_cluster_mask & BIT(id)) {
3335 DEBUGOUT1("Dumping cluster ID %u...\n", id);
3336 ixgbe_debug_dump_print_cluster(sc, sbuf, id);
3337 }
3338 }
3339 } else {
3340 u8 next_cluster_id = 0;
3341 do {
3342 DEBUGOUT1("Dumping cluster ID %u...\n",
3343 next_cluster_id);
3344 next_cluster_id = ixgbe_debug_dump_print_cluster(sc,
3345 sbuf, next_cluster_id);
3346 } while (next_cluster_id != 0 &&
3347 next_cluster_id <= IXGBE_ACI_DBG_DUMP_CLUSTER_ID_MAX);
3348 }
3349
3350 sbuf_finish(sbuf);
3351 sbuf_delete(sbuf);
3352
3353 sc->do_debug_dump = false;
3354
3355 return (error);
3356 } /* ixgbe_sysctl_dump_debug_dump */
3357
3358 /************************************************************************
3359 * ixgbe_add_debug_dump_sysctls
3360 ************************************************************************/
3361 static void
ixgbe_add_debug_dump_sysctls(struct ixgbe_softc * sc)3362 ixgbe_add_debug_dump_sysctls(struct ixgbe_softc *sc)
3363 {
3364 struct sysctl_oid_list *debug_list, *dump_list;
3365 struct sysctl_oid *dump_node;
3366 struct sysctl_ctx_list *ctx;
3367 device_t dev = sc->dev;
3368
3369 ctx = device_get_sysctl_ctx(dev);
3370 debug_list = SYSCTL_CHILDREN(sc->debug_sysctls);
3371
3372 dump_node = SYSCTL_ADD_NODE(ctx, debug_list, OID_AUTO, "dump",
3373 CTLFLAG_RD, NULL, "Internal FW/HW Dump");
3374 dump_list = SYSCTL_CHILDREN(dump_node);
3375
3376 SYSCTL_ADD_PROC(ctx, dump_list, OID_AUTO, "clusters",
3377 CTLTYPE_U32 | CTLFLAG_RW, sc, 0,
3378 ixgbe_sysctl_debug_dump_set_clusters, "SU",
3379 IXGBE_SYSCTL_DESC_DEBUG_DUMP_SET_CLUSTER);
3380
3381 SYSCTL_ADD_PROC(ctx, dump_list, OID_AUTO, "dump",
3382 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
3383 ixgbe_sysctl_dump_debug_dump, "",
3384 IXGBE_SYSCTL_DESC_DUMP_DEBUG_DUMP);
3385 } /* ixgbe_add_debug_dump_sysctls */
3386
3387 static void
ixgbe_add_debug_sysctls(struct ixgbe_softc * sc)3388 ixgbe_add_debug_sysctls(struct ixgbe_softc *sc)
3389 {
3390 struct sysctl_oid_list *ctx_list;
3391 struct sysctl_ctx_list *ctx;
3392 device_t dev = sc->dev;
3393
3394 ctx = device_get_sysctl_ctx(dev);
3395 ctx_list = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3396
3397 sc->debug_sysctls = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "debug",
3398 CTLFLAG_RD, NULL, "Debug Sysctls");
3399
3400 if (sc->feat_en & IXGBE_FEATURE_DBG_DUMP)
3401 ixgbe_add_debug_dump_sysctls(sc);
3402 } /* ixgbe_add_debug_sysctls */
3403
3404 /************************************************************************
3405 * ixgbe_add_device_sysctls
3406 ************************************************************************/
3407 static void
ixgbe_add_device_sysctls(if_ctx_t ctx)3408 ixgbe_add_device_sysctls(if_ctx_t ctx)
3409 {
3410 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3411 device_t dev = iflib_get_dev(ctx);
3412 struct ixgbe_hw *hw = &sc->hw;
3413 struct sysctl_oid_list *child;
3414 struct sysctl_ctx_list *ctx_list;
3415
3416 ctx_list = device_get_sysctl_ctx(dev);
3417 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3418
3419 /* Sysctls for all devices */
3420 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
3421 CTLTYPE_INT | CTLFLAG_RW,
3422 sc, 0, ixgbe_sysctl_flowcntl, "I",
3423 IXGBE_SYSCTL_DESC_SET_FC);
3424
3425 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
3426 CTLTYPE_INT | CTLFLAG_RW,
3427 sc, 0, ixgbe_sysctl_advertise, "I",
3428 IXGBE_SYSCTL_DESC_ADV_SPEED);
3429
3430 sc->enable_aim = ixgbe_enable_aim;
3431 SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
3432 &sc->enable_aim, 0, "Interrupt Moderation");
3433
3434 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version",
3435 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3436 ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions");
3437
3438 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
3439 "tso_tcp_flags_mask_first_segment",
3440 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3441 sc, 0, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
3442 "TSO TCP flags mask for first segment");
3443
3444 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
3445 "tso_tcp_flags_mask_middle_segment",
3446 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3447 sc, 1, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
3448 "TSO TCP flags mask for middle segment");
3449
3450 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
3451 "tso_tcp_flags_mask_last_segment",
3452 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3453 sc, 2, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
3454 "TSO TCP flags mask for last segment");
3455
3456 #ifdef IXGBE_DEBUG
3457 /* testing sysctls (for all devices) */
3458 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
3459 CTLTYPE_INT | CTLFLAG_RW,
3460 sc, 0, ixgbe_sysctl_power_state,
3461 "I", "PCI Power State");
3462
3463 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
3464 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3465 ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
3466 #endif
3467 /* for X550 series devices */
3468 if (hw->mac.type >= ixgbe_mac_X550)
3469 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
3470 CTLTYPE_U16 | CTLFLAG_RW,
3471 sc, 0, ixgbe_sysctl_dmac,
3472 "I", "DMA Coalesce");
3473
3474 /* for WoL-capable devices */
3475 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3476 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
3477 CTLTYPE_INT | CTLFLAG_RW, sc, 0,
3478 ixgbe_sysctl_wol_enable, "I",
3479 "Enable/Disable Wake on LAN");
3480
3481 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
3482 CTLTYPE_U32 | CTLFLAG_RW,
3483 sc, 0, ixgbe_sysctl_wufc,
3484 "I", "Enable/Disable Wake Up Filters");
3485 }
3486
3487 /* for X552/X557-AT devices */
3488 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3489 struct sysctl_oid *phy_node;
3490 struct sysctl_oid_list *phy_list;
3491
3492 phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
3493 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3494 "External PHY sysctls");
3495 phy_list = SYSCTL_CHILDREN(phy_node);
3496
3497 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
3498 CTLTYPE_U16 | CTLFLAG_RD,
3499 sc, 0, ixgbe_sysctl_phy_temp,
3500 "I", "Current External PHY Temperature (Celsius)");
3501
3502 SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
3503 "overtemp_occurred",
3504 CTLTYPE_U16 | CTLFLAG_RD, sc, 0,
3505 ixgbe_sysctl_phy_overtemp_occurred, "I",
3506 "External PHY High Temperature Event Occurred");
3507 }
3508
3509 if (sc->feat_cap & IXGBE_FEATURE_EEE) {
3510 SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
3511 CTLTYPE_INT | CTLFLAG_RW, sc, 0,
3512 ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
3513 }
3514
3515 ixgbe_add_debug_sysctls(sc);
3516 } /* ixgbe_add_device_sysctls */
3517
3518 /************************************************************************
3519 * ixgbe_allocate_pci_resources
3520 ************************************************************************/
3521 static int
ixgbe_allocate_pci_resources(if_ctx_t ctx)3522 ixgbe_allocate_pci_resources(if_ctx_t ctx)
3523 {
3524 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3525 device_t dev = iflib_get_dev(ctx);
3526 int rid;
3527
3528 rid = PCIR_BAR(0);
3529 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
3530 RF_ACTIVE);
3531
3532 if (!(sc->pci_mem)) {
3533 device_printf(dev,
3534 "Unable to allocate bus resource: memory\n");
3535 return (ENXIO);
3536 }
3537
3538 /* Save bus_space values for READ/WRITE_REG macros */
3539 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
3540 sc->osdep.mem_bus_space_handle =
3541 rman_get_bushandle(sc->pci_mem);
3542 /* Set hw values for shared code */
3543 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
3544
3545 return (0);
3546 } /* ixgbe_allocate_pci_resources */
3547
3548 /************************************************************************
3549 * ixgbe_detach - Device removal routine
3550 *
3551 * Called when the driver is being removed.
3552 * Stops the adapter and deallocates all the resources
3553 * that were allocated for driver operation.
3554 *
3555 * return 0 on success, positive on failure
3556 ************************************************************************/
3557 static int
ixgbe_if_detach(if_ctx_t ctx)3558 ixgbe_if_detach(if_ctx_t ctx)
3559 {
3560 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3561 device_t dev = iflib_get_dev(ctx);
3562 u32 ctrl_ext;
3563
3564 INIT_DEBUGOUT("ixgbe_detach: begin");
3565
3566 if (ixgbe_pci_iov_detach(dev) != 0) {
3567 device_printf(dev, "SR-IOV in use; detach first.\n");
3568 return (EBUSY);
3569 }
3570
3571 ixgbe_setup_low_power_mode(ctx);
3572
3573 /* let hardware know driver is unloading */
3574 ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
3575 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3576 IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
3577
3578 callout_drain(&sc->fw_mode_timer);
3579
3580 if (sc->hw.mac.type == ixgbe_mac_E610) {
3581 ixgbe_disable_lse(sc);
3582 ixgbe_shutdown_aci(&sc->hw);
3583 }
3584
3585 ixgbe_free_pci_resources(ctx);
3586
3587 free(sc->mta, M_IXGBE);
3588
3589 return (0);
3590 } /* ixgbe_if_detach */
3591
3592 /************************************************************************
3593 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3594 *
3595 * Prepare the adapter/port for LPLU and/or WoL
3596 ************************************************************************/
3597 static int
ixgbe_setup_low_power_mode(if_ctx_t ctx)3598 ixgbe_setup_low_power_mode(if_ctx_t ctx)
3599 {
3600 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3601 struct ixgbe_hw *hw = &sc->hw;
3602 device_t dev = iflib_get_dev(ctx);
3603 s32 error = 0;
3604
3605 if (!hw->wol_enabled)
3606 ixgbe_set_phy_power(hw, false);
3607
3608 /* Limit power management flow to X550EM baseT */
3609 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3610 hw->phy.ops.enter_lplu) {
3611 /* Turn off support for APM wakeup. (Using ACPI instead) */
3612 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3613 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3614
3615 /*
3616 * Clear Wake Up Status register to prevent any previous
3617 * wakeup events from waking us up immediately after we
3618 * suspend.
3619 */
3620 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3621
3622 /*
3623 * Program the Wakeup Filter Control register with user filter
3624 * settings
3625 */
3626 IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc);
3627
3628 /* Enable wakeups and power management in Wakeup Control */
3629 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3630 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3631
3632 /* X550EM baseT adapters need a special LPLU flow */
3633 hw->phy.reset_disable = true;
3634 ixgbe_if_stop(ctx);
3635 error = hw->phy.ops.enter_lplu(hw);
3636 if (error)
3637 device_printf(dev, "Error entering LPLU: %d\n",
3638 error);
3639 hw->phy.reset_disable = false;
3640 } else {
3641 /* Just stop for other adapters */
3642 ixgbe_if_stop(ctx);
3643 }
3644
3645 return error;
3646 } /* ixgbe_setup_low_power_mode */
3647
3648 /************************************************************************
3649 * ixgbe_shutdown - Shutdown entry point
3650 ************************************************************************/
3651 static int
ixgbe_if_shutdown(if_ctx_t ctx)3652 ixgbe_if_shutdown(if_ctx_t ctx)
3653 {
3654 int error = 0;
3655
3656 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3657
3658 error = ixgbe_setup_low_power_mode(ctx);
3659
3660 return (error);
3661 } /* ixgbe_if_shutdown */
3662
3663 /************************************************************************
3664 * ixgbe_suspend
3665 *
3666 * From D0 to D3
3667 ************************************************************************/
3668 static int
ixgbe_if_suspend(if_ctx_t ctx)3669 ixgbe_if_suspend(if_ctx_t ctx)
3670 {
3671 int error = 0;
3672
3673 INIT_DEBUGOUT("ixgbe_suspend: begin");
3674
3675 error = ixgbe_setup_low_power_mode(ctx);
3676
3677 return (error);
3678 } /* ixgbe_if_suspend */
3679
3680 /************************************************************************
3681 * ixgbe_resume
3682 *
3683 * From D3 to D0
3684 ************************************************************************/
3685 static int
ixgbe_if_resume(if_ctx_t ctx)3686 ixgbe_if_resume(if_ctx_t ctx)
3687 {
3688 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3689 device_t dev = iflib_get_dev(ctx);
3690 if_t ifp = iflib_get_ifp(ctx);
3691 struct ixgbe_hw *hw = &sc->hw;
3692 u32 wus;
3693
3694 INIT_DEBUGOUT("ixgbe_resume: begin");
3695
3696 /* Read & clear WUS register */
3697 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3698 if (wus)
3699 device_printf(dev, "Woken up by (WUS): %#010x\n",
3700 IXGBE_READ_REG(hw, IXGBE_WUS));
3701 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3702 /* And clear WUFC until next low-power transition */
3703 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3704
3705 /*
3706 * Required after D3->D0 transition;
3707 * will re-advertise all previous advertised speeds
3708 */
3709 if (if_getflags(ifp) & IFF_UP)
3710 ixgbe_if_init(ctx);
3711
3712 return (0);
3713 } /* ixgbe_if_resume */
3714
3715 /************************************************************************
3716 * ixgbe_if_mtu_set - Ioctl mtu entry point
3717 *
3718 * Return 0 on success, EINVAL on failure
3719 ************************************************************************/
3720 static int
ixgbe_if_mtu_set(if_ctx_t ctx,uint32_t mtu)3721 ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
3722 {
3723 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3724 int error = 0;
3725
3726 IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)");
3727
3728 if (mtu > IXGBE_MAX_MTU) {
3729 error = EINVAL;
3730 } else {
3731 sc->max_frame_size = mtu + IXGBE_MTU_HDR;
3732 }
3733
3734 return error;
3735 } /* ixgbe_if_mtu_set */
3736
3737 /************************************************************************
3738 * ixgbe_if_crcstrip_set
3739 ************************************************************************/
3740 static void
ixgbe_if_crcstrip_set(if_ctx_t ctx,int onoff,int crcstrip)3741 ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
3742 {
3743 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3744 struct ixgbe_hw *hw = &sc->hw;
3745 /* crc stripping is set in two places:
3746 * IXGBE_HLREG0 (modified on init_locked and hw reset)
3747 * IXGBE_RDRXCTL (set by the original driver in
3748 * ixgbe_setup_hw_rsc() called in init_locked.
3749 * We disable the setting when netmap is compiled in).
3750 * We update the values here, but also in ixgbe.c because
3751 * init_locked sometimes is called outside our control.
3752 */
3753 uint32_t hl, rxc;
3754
3755 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3756 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3757 #ifdef NETMAP
3758 if (netmap_verbose)
3759 D("%s read HLREG 0x%x rxc 0x%x",
3760 onoff ? "enter" : "exit", hl, rxc);
3761 #endif
3762 /* hw requirements ... */
3763 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3764 rxc |= IXGBE_RDRXCTL_RSCACKC;
3765 if (onoff && !crcstrip) {
3766 /* keep the crc. Fast rx */
3767 hl &= ~IXGBE_HLREG0_RXCRCSTRP;
3768 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
3769 } else {
3770 /* reset default mode */
3771 hl |= IXGBE_HLREG0_RXCRCSTRP;
3772 rxc |= IXGBE_RDRXCTL_CRCSTRIP;
3773 }
3774 #ifdef NETMAP
3775 if (netmap_verbose)
3776 D("%s write HLREG 0x%x rxc 0x%x",
3777 onoff ? "enter" : "exit", hl, rxc);
3778 #endif
3779 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
3780 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
3781 } /* ixgbe_if_crcstrip_set */
3782
3783 /*********************************************************************
3784 * ixgbe_if_init - Init entry point
3785 *
3786 * Used in two ways: It is used by the stack as an init
3787 * entry point in network interface structure. It is also
3788 * used by the driver as a hw/sw initialization routine to
3789 * get to a consistent state.
3790 *
3791 * Return 0 on success, positive on failure
3792 **********************************************************************/
3793 void
ixgbe_if_init(if_ctx_t ctx)3794 ixgbe_if_init(if_ctx_t ctx)
3795 {
3796 struct ixgbe_softc *sc = iflib_get_softc(ctx);
3797 if_t ifp = iflib_get_ifp(ctx);
3798 device_t dev = iflib_get_dev(ctx);
3799 struct ixgbe_hw *hw = &sc->hw;
3800 struct ix_rx_queue *rx_que;
3801 struct ix_tx_queue *tx_que;
3802 u32 txdctl, mhadd;
3803 u32 rxdctl, rxctrl;
3804 u32 ctrl_ext;
3805
3806 int i, j, err;
3807
3808 INIT_DEBUGOUT("ixgbe_if_init: begin");
3809
3810 /* Queue indices may change with IOV mode */
3811 ixgbe_align_all_queue_indices(sc);
3812
3813 /* reprogram the RAR[0] in case user changed it. */
3814 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV);
3815
3816 /* Get the latest mac address, User can use a LAA */
3817 bcopy(if_getlladdr(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3818 ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1);
3819 hw->addr_ctrl.rar_used_count = 1;
3820
3821 ixgbe_init_hw(hw);
3822
3823 ixgbe_initialize_iov(sc);
3824
3825 ixgbe_initialize_transmit_units(ctx);
3826
3827 /* Setup Multicast table */
3828 ixgbe_if_multi_set(ctx);
3829
3830 /* Determine the correct mbuf pool, based on frame size */
3831 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
3832
3833 /* Configure RX settings */
3834 ixgbe_initialize_receive_units(ctx);
3835
3836 /*
3837 * Initialize variable holding task enqueue requests
3838 * from MSI-X interrupts
3839 */
3840 sc->task_requests = 0;
3841
3842 /* Enable SDP & MSI-X interrupts based on adapter */
3843 ixgbe_config_gpie(sc);
3844
3845 /* Set MTU size */
3846 if (if_getmtu(ifp) > ETHERMTU) {
3847 /* aka IXGBE_MAXFRS on 82599 and newer */
3848 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3849 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3850 mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3851 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3852 }
3853
3854 /* Now enable all the queues */
3855 for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues;
3856 i++, tx_que++) {
3857 struct tx_ring *txr = &tx_que->txr;
3858
3859 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3860 txdctl |= IXGBE_TXDCTL_ENABLE;
3861 /* Set WTHRESH to 8, burst writeback */
3862 txdctl |= (8 << 16);
3863 /*
3864 * When the internal queue falls below PTHRESH (32),
3865 * start prefetching as long as there are at least
3866 * HTHRESH (1) buffers ready. The values are taken
3867 * from the Intel linux driver 3.8.21.
3868 * Prefetching enables tx line rate even with 1 queue.
3869 */
3870 txdctl |= (32 << 0) | (1 << 8);
3871 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3872 }
3873
3874 for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues;
3875 i++, rx_que++) {
3876 struct rx_ring *rxr = &rx_que->rxr;
3877
3878 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3879 if (hw->mac.type == ixgbe_mac_82598EB) {
3880 /*
3881 * PTHRESH = 21
3882 * HTHRESH = 4
3883 * WTHRESH = 8
3884 */
3885 rxdctl &= ~0x3FFFFF;
3886 rxdctl |= 0x080420;
3887 }
3888 rxdctl |= IXGBE_RXDCTL_ENABLE;
3889 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
3890 for (j = 0; j < 10; j++) {
3891 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
3892 IXGBE_RXDCTL_ENABLE)
3893 break;
3894 else
3895 msec_delay(1);
3896 }
3897 wmb();
3898 }
3899
3900 /* Enable Receive engine */
3901 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3902 if (hw->mac.type == ixgbe_mac_82598EB)
3903 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3904 rxctrl |= IXGBE_RXCTRL_RXEN;
3905 ixgbe_enable_rx_dma(hw, rxctrl);
3906
3907 /* Set up MSI/MSI-X routing */
3908 if (ixgbe_enable_msix) {
3909 ixgbe_configure_ivars(sc);
3910 /* Set up auto-mask */
3911 if (hw->mac.type == ixgbe_mac_82598EB)
3912 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3913 else {
3914 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3915 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3916 }
3917 } else { /* Simple settings for Legacy/MSI */
3918 ixgbe_set_ivar(sc, 0, 0, 0);
3919 ixgbe_set_ivar(sc, 0, 0, 1);
3920 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3921 }
3922
3923 ixgbe_init_fdir(sc);
3924
3925 /*
3926 * Check on any SFP devices that
3927 * need to be kick-started
3928 */
3929 if (hw->phy.type == ixgbe_phy_none) {
3930 err = hw->phy.ops.identify(hw);
3931 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3932 device_printf(dev,
3933 "Unsupported SFP+ module type was detected.\n");
3934 return;
3935 }
3936 }
3937
3938 /* Set moderation on the Link interrupt */
3939 IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->vector), IXGBE_LINK_ITR);
3940
3941 /* Enable power to the phy. */
3942 ixgbe_set_phy_power(hw, true);
3943
3944 /* Config/Enable Link */
3945 ixgbe_config_link(ctx);
3946
3947 /* Hardware Packet Buffer & Flow Control setup */
3948 ixgbe_config_delay_values(sc);
3949
3950 /* Initialize the FC settings */
3951 ixgbe_start_hw(hw);
3952
3953 /* Set up VLAN support and filter */
3954 ixgbe_setup_vlan_hw_support(ctx);
3955
3956 /* Setup DMA Coalescing */
3957 ixgbe_config_dmac(sc);
3958
3959 /* And now turn on interrupts */
3960 ixgbe_if_enable_intr(ctx);
3961
3962 /* Enable the use of the MBX by the VF's */
3963 if (sc->feat_en & IXGBE_FEATURE_SRIOV) {
3964 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3965 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3966 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3967 }
3968
3969 } /* ixgbe_init_locked */
3970
3971 /************************************************************************
3972 * ixgbe_set_ivar
3973 *
3974 * Setup the correct IVAR register for a particular MSI-X interrupt
3975 * (yes this is all very magic and confusing :)
3976 * - entry is the register array entry
3977 * - vector is the MSI-X vector for this queue
3978 * - type is RX/TX/MISC
3979 ************************************************************************/
3980 static void
ixgbe_set_ivar(struct ixgbe_softc * sc,u8 entry,u8 vector,s8 type)3981 ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
3982 {
3983 struct ixgbe_hw *hw = &sc->hw;
3984 u32 ivar, index;
3985
3986 vector |= IXGBE_IVAR_ALLOC_VAL;
3987
3988 switch (hw->mac.type) {
3989 case ixgbe_mac_82598EB:
3990 if (type == -1)
3991 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3992 else
3993 entry += (type * 64);
3994 index = (entry >> 2) & 0x1F;
3995 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3996 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3997 ivar |= (vector << (8 * (entry & 0x3)));
3998 IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3999 break;
4000 case ixgbe_mac_82599EB:
4001 case ixgbe_mac_X540:
4002 case ixgbe_mac_X550:
4003 case ixgbe_mac_X550EM_x:
4004 case ixgbe_mac_X550EM_a:
4005 case ixgbe_mac_E610:
4006 if (type == -1) { /* MISC IVAR */
4007 index = (entry & 1) * 8;
4008 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4009 ivar &= ~(0xFF << index);
4010 ivar |= (vector << index);
4011 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4012 } else { /* RX/TX IVARS */
4013 index = (16 * (entry & 1)) + (8 * type);
4014 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4015 ivar &= ~(0xFF << index);
4016 ivar |= (vector << index);
4017 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4018 }
4019 default:
4020 break;
4021 }
4022 } /* ixgbe_set_ivar */
4023
4024 /************************************************************************
4025 * ixgbe_configure_ivars
4026 ************************************************************************/
4027 static void
ixgbe_configure_ivars(struct ixgbe_softc * sc)4028 ixgbe_configure_ivars(struct ixgbe_softc *sc)
4029 {
4030 struct ix_rx_queue *rx_que = sc->rx_queues;
4031 struct ix_tx_queue *tx_que = sc->tx_queues;
4032 u32 newitr;
4033
4034 if (ixgbe_max_interrupt_rate > 0)
4035 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4036 else {
4037 /*
4038 * Disable DMA coalescing if interrupt moderation is
4039 * disabled.
4040 */
4041 sc->dmac = 0;
4042 newitr = 0;
4043 }
4044
4045 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
4046 struct rx_ring *rxr = &rx_que->rxr;
4047
4048 /* First the RX queue entry */
4049 ixgbe_set_ivar(sc, rxr->me, rx_que->msix, 0);
4050
4051 /* Set an Initial EITR value */
4052 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(rx_que->msix), newitr);
4053 }
4054 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
4055 struct tx_ring *txr = &tx_que->txr;
4056
4057 /* ... and the TX */
4058 ixgbe_set_ivar(sc, txr->me, tx_que->msix, 1);
4059 }
4060 /* For the Link interrupt */
4061 ixgbe_set_ivar(sc, 1, sc->vector, -1);
4062 } /* ixgbe_configure_ivars */
4063
4064 /************************************************************************
4065 * ixgbe_config_gpie
4066 ************************************************************************/
4067 static void
ixgbe_config_gpie(struct ixgbe_softc * sc)4068 ixgbe_config_gpie(struct ixgbe_softc *sc)
4069 {
4070 struct ixgbe_hw *hw = &sc->hw;
4071 u32 gpie;
4072
4073 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4074
4075 if (sc->intr_type == IFLIB_INTR_MSIX) {
4076 /* Enable Enhanced MSI-X mode */
4077 gpie |= IXGBE_GPIE_MSIX_MODE |
4078 IXGBE_GPIE_EIAME |
4079 IXGBE_GPIE_PBA_SUPPORT |
4080 IXGBE_GPIE_OCD;
4081 }
4082
4083 /* Fan Failure Interrupt */
4084 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
4085 gpie |= IXGBE_SDP1_GPIEN;
4086
4087 /* Thermal Sensor Interrupt */
4088 if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4089 gpie |= IXGBE_SDP0_GPIEN_X540;
4090
4091 /* Link detection */
4092 switch (hw->mac.type) {
4093 case ixgbe_mac_82599EB:
4094 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4095 break;
4096 case ixgbe_mac_X550EM_x:
4097 case ixgbe_mac_X550EM_a:
4098 gpie |= IXGBE_SDP0_GPIEN_X540;
4099 break;
4100 default:
4101 break;
4102 }
4103
4104 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4105
4106 } /* ixgbe_config_gpie */
4107
4108 /************************************************************************
4109 * ixgbe_config_delay_values
4110 *
4111 * Requires sc->max_frame_size to be set.
4112 ************************************************************************/
4113 static void
ixgbe_config_delay_values(struct ixgbe_softc * sc)4114 ixgbe_config_delay_values(struct ixgbe_softc *sc)
4115 {
4116 struct ixgbe_hw *hw = &sc->hw;
4117 u32 rxpb, frame, size, tmp;
4118
4119 frame = sc->max_frame_size;
4120
4121 /* Calculate High Water */
4122 switch (hw->mac.type) {
4123 case ixgbe_mac_X540:
4124 case ixgbe_mac_X550:
4125 case ixgbe_mac_X550EM_x:
4126 case ixgbe_mac_X550EM_a:
4127 tmp = IXGBE_DV_X540(frame, frame);
4128 break;
4129 default:
4130 tmp = IXGBE_DV(frame, frame);
4131 break;
4132 }
4133 size = IXGBE_BT2KB(tmp);
4134 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4135 hw->fc.high_water[0] = rxpb - size;
4136
4137 /* Now calculate Low Water */
4138 switch (hw->mac.type) {
4139 case ixgbe_mac_X540:
4140 case ixgbe_mac_X550:
4141 case ixgbe_mac_X550EM_x:
4142 case ixgbe_mac_X550EM_a:
4143 tmp = IXGBE_LOW_DV_X540(frame);
4144 break;
4145 default:
4146 tmp = IXGBE_LOW_DV(frame);
4147 break;
4148 }
4149 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4150
4151 hw->fc.pause_time = IXGBE_FC_PAUSE;
4152 hw->fc.send_xon = true;
4153 } /* ixgbe_config_delay_values */
4154
4155 /************************************************************************
4156 * ixgbe_set_multi - Multicast Update
4157 *
4158 * Called whenever multicast address list is updated.
4159 ************************************************************************/
4160 static u_int
ixgbe_mc_filter_apply(void * arg,struct sockaddr_dl * sdl,u_int idx)4161 ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx)
4162 {
4163 struct ixgbe_softc *sc = arg;
4164 struct ixgbe_mc_addr *mta = sc->mta;
4165
4166 if (idx == MAX_NUM_MULTICAST_ADDRESSES)
4167 return (0);
4168 bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4169 mta[idx].vmdq = sc->pool;
4170
4171 return (1);
4172 } /* ixgbe_mc_filter_apply */
4173
4174 static void
ixgbe_if_multi_set(if_ctx_t ctx)4175 ixgbe_if_multi_set(if_ctx_t ctx)
4176 {
4177 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4178 struct ixgbe_mc_addr *mta;
4179 if_t ifp = iflib_get_ifp(ctx);
4180 u8 *update_ptr;
4181 u32 fctrl;
4182 u_int mcnt;
4183
4184 IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
4185
4186 mta = sc->mta;
4187 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4188
4189 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply,
4190 sc);
4191
4192 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
4193 update_ptr = (u8 *)mta;
4194 ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt,
4195 ixgbe_mc_array_itr, true);
4196 }
4197
4198 fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
4199
4200 if (if_getflags(ifp) & IFF_PROMISC)
4201 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4202 else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
4203 if_getflags(ifp) & IFF_ALLMULTI) {
4204 fctrl |= IXGBE_FCTRL_MPE;
4205 fctrl &= ~IXGBE_FCTRL_UPE;
4206 } else
4207 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4208
4209 IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
4210 } /* ixgbe_if_multi_set */
4211
4212 /************************************************************************
4213 * ixgbe_mc_array_itr
4214 *
4215 * An iterator function needed by the multicast shared code.
4216 * It feeds the shared code routine the addresses in the
4217 * array of ixgbe_set_multi() one by one.
4218 ************************************************************************/
4219 static u8 *
ixgbe_mc_array_itr(struct ixgbe_hw * hw,u8 ** update_ptr,u32 * vmdq)4220 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4221 {
4222 struct ixgbe_mc_addr *mta;
4223
4224 mta = (struct ixgbe_mc_addr *)*update_ptr;
4225 *vmdq = mta->vmdq;
4226
4227 *update_ptr = (u8*)(mta + 1);
4228
4229 return (mta->addr);
4230 } /* ixgbe_mc_array_itr */
4231
4232 /************************************************************************
4233 * ixgbe_local_timer - Timer routine
4234 *
4235 * Checks for link status, updates statistics,
4236 * and runs the watchdog check.
4237 ************************************************************************/
4238 static void
ixgbe_if_timer(if_ctx_t ctx,uint16_t qid)4239 ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
4240 {
4241 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4242
4243 if (qid != 0)
4244 return;
4245
4246 /* Check for pluggable optics */
4247 if (sc->sfp_probe)
4248 if (!ixgbe_sfp_probe(ctx))
4249 return; /* Nothing to do */
4250
4251 ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
4252
4253 /* Fire off the adminq task */
4254 iflib_admin_intr_deferred(ctx);
4255
4256 } /* ixgbe_if_timer */
4257
4258 /************************************************************************
4259 * ixgbe_fw_mode_timer - FW mode timer routine
4260 ************************************************************************/
4261 static void
ixgbe_fw_mode_timer(void * arg)4262 ixgbe_fw_mode_timer(void *arg)
4263 {
4264 struct ixgbe_softc *sc = arg;
4265 struct ixgbe_hw *hw = &sc->hw;
4266
4267 if (ixgbe_fw_recovery_mode(hw)) {
4268 if (atomic_cmpset_acq_int(&sc->recovery_mode, 0, 1)) {
4269 /* Firmware error detected, entering recovery mode */
4270 device_printf(sc->dev,
4271 "Firmware recovery mode detected. Limiting"
4272 " functionality. Refer to the Intel(R) Ethernet"
4273 " Adapters and Devices User Guide for details on"
4274 " firmware recovery mode.\n");
4275
4276 if (hw->adapter_stopped == FALSE)
4277 ixgbe_if_stop(sc->ctx);
4278 }
4279 } else
4280 atomic_cmpset_acq_int(&sc->recovery_mode, 1, 0);
4281
4282
4283 callout_reset(&sc->fw_mode_timer, hz,
4284 ixgbe_fw_mode_timer, sc);
4285 } /* ixgbe_fw_mode_timer */
4286
4287 /************************************************************************
4288 * ixgbe_sfp_probe
4289 *
4290 * Determine if a port had optics inserted.
4291 ************************************************************************/
4292 static bool
ixgbe_sfp_probe(if_ctx_t ctx)4293 ixgbe_sfp_probe(if_ctx_t ctx)
4294 {
4295 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4296 struct ixgbe_hw *hw = &sc->hw;
4297 device_t dev = iflib_get_dev(ctx);
4298 bool result = false;
4299
4300 if ((hw->phy.type == ixgbe_phy_nl) &&
4301 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4302 s32 ret = hw->phy.ops.identify_sfp(hw);
4303 if (ret)
4304 goto out;
4305 ret = hw->phy.ops.reset(hw);
4306 sc->sfp_probe = false;
4307 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4308 device_printf(dev,
4309 "Unsupported SFP+ module detected!");
4310 device_printf(dev,
4311 "Reload driver with supported module.\n");
4312 goto out;
4313 } else
4314 device_printf(dev, "SFP+ module detected!\n");
4315 /* We now have supported optics */
4316 result = true;
4317 }
4318 out:
4319
4320 return (result);
4321 } /* ixgbe_sfp_probe */
4322
4323 /************************************************************************
4324 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4325 ************************************************************************/
4326 static void
ixgbe_handle_mod(void * context)4327 ixgbe_handle_mod(void *context)
4328 {
4329 if_ctx_t ctx = context;
4330 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4331 struct ixgbe_hw *hw = &sc->hw;
4332 device_t dev = iflib_get_dev(ctx);
4333 u32 err, cage_full = 0;
4334
4335 if (sc->hw.need_crosstalk_fix) {
4336 switch (hw->mac.type) {
4337 case ixgbe_mac_82599EB:
4338 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4339 IXGBE_ESDP_SDP2;
4340 break;
4341 case ixgbe_mac_X550EM_x:
4342 case ixgbe_mac_X550EM_a:
4343 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4344 IXGBE_ESDP_SDP0;
4345 break;
4346 default:
4347 break;
4348 }
4349
4350 if (!cage_full)
4351 goto handle_mod_out;
4352 }
4353
4354 err = hw->phy.ops.identify_sfp(hw);
4355 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4356 device_printf(dev,
4357 "Unsupported SFP+ module type was detected.\n");
4358 goto handle_mod_out;
4359 }
4360
4361 if (hw->mac.type == ixgbe_mac_82598EB)
4362 err = hw->phy.ops.reset(hw);
4363 else
4364 err = hw->mac.ops.setup_sfp(hw);
4365
4366 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4367 device_printf(dev,
4368 "Setup failure - unsupported SFP+ module type.\n");
4369 goto handle_mod_out;
4370 }
4371 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
4372 return;
4373
4374 handle_mod_out:
4375 sc->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
4376 } /* ixgbe_handle_mod */
4377
4378
4379 /************************************************************************
4380 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4381 ************************************************************************/
4382 static void
ixgbe_handle_msf(void * context)4383 ixgbe_handle_msf(void *context)
4384 {
4385 if_ctx_t ctx = context;
4386 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4387 struct ixgbe_hw *hw = &sc->hw;
4388 u32 autoneg;
4389 bool negotiate;
4390
4391 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4392 sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
4393
4394 autoneg = hw->phy.autoneg_advertised;
4395 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4396 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4397 if (hw->mac.ops.setup_link)
4398 hw->mac.ops.setup_link(hw, autoneg, true);
4399
4400 /* Adjust media types shown in ifconfig */
4401 ifmedia_removeall(sc->media);
4402 ixgbe_add_media_types(sc->ctx);
4403 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
4404 } /* ixgbe_handle_msf */
4405
4406 /************************************************************************
4407 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4408 ************************************************************************/
4409 static void
ixgbe_handle_phy(void * context)4410 ixgbe_handle_phy(void *context)
4411 {
4412 if_ctx_t ctx = context;
4413 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4414 struct ixgbe_hw *hw = &sc->hw;
4415 int error;
4416
4417 error = hw->phy.ops.handle_lasi(hw);
4418 if (error == IXGBE_ERR_OVERTEMP)
4419 device_printf(sc->dev,
4420 "CRITICAL: EXTERNAL PHY OVER TEMP!!"
4421 " PHY will downshift to lower power state!\n");
4422 else if (error)
4423 device_printf(sc->dev,
4424 "Error handling LASI interrupt: %d\n", error);
4425 } /* ixgbe_handle_phy */
4426
4427 /************************************************************************
4428 * ixgbe_enable_lse - enable link status events
4429 *
4430 * Sets mask and enables link status events
4431 ************************************************************************/
ixgbe_enable_lse(struct ixgbe_softc * sc)4432 s32 ixgbe_enable_lse(struct ixgbe_softc *sc)
4433 {
4434 s32 error;
4435
4436 u16 mask = ~((u16)(IXGBE_ACI_LINK_EVENT_UPDOWN |
4437 IXGBE_ACI_LINK_EVENT_MEDIA_NA |
4438 IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL |
4439 IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL));
4440
4441 error = ixgbe_configure_lse(&sc->hw, TRUE, mask);
4442 if (error)
4443 return (error);
4444
4445 sc->lse_mask = mask;
4446 return (IXGBE_SUCCESS);
4447 } /* ixgbe_enable_lse */
4448
4449 /************************************************************************
4450 * ixgbe_disable_lse - disable link status events
4451 ************************************************************************/
ixgbe_disable_lse(struct ixgbe_softc * sc)4452 s32 ixgbe_disable_lse(struct ixgbe_softc *sc)
4453 {
4454 s32 error;
4455
4456 error = ixgbe_configure_lse(&sc->hw, false, sc->lse_mask);
4457 if (error)
4458 return (error);
4459
4460 sc->lse_mask = 0;
4461 return (IXGBE_SUCCESS);
4462 } /* ixgbe_disable_lse */
4463
4464 /************************************************************************
4465 * ixgbe_handle_fw_event - Tasklet for MSI-X Link Status Event interrupts
4466 ************************************************************************/
4467 static void
ixgbe_handle_fw_event(void * context)4468 ixgbe_handle_fw_event(void *context)
4469 {
4470 if_ctx_t ctx = context;
4471 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4472 struct ixgbe_hw *hw = &sc->hw;
4473 struct ixgbe_aci_event event;
4474 bool pending = false;
4475 s32 error;
4476
4477 event.buf_len = IXGBE_ACI_MAX_BUFFER_SIZE;
4478 event.msg_buf = malloc(event.buf_len, M_IXGBE, M_ZERO | M_NOWAIT);
4479 if (!event.msg_buf) {
4480 device_printf(sc->dev, "Can not allocate buffer for "
4481 "event message\n");
4482 return;
4483 }
4484
4485 do {
4486 error = ixgbe_aci_get_event(hw, &event, &pending);
4487 if (error) {
4488 device_printf(sc->dev, "Error getting event from "
4489 "FW:%d\n", error);
4490 break;
4491 }
4492
4493 switch (le16toh(event.desc.opcode)) {
4494 case ixgbe_aci_opc_get_link_status:
4495 sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
4496 break;
4497
4498 case ixgbe_aci_opc_temp_tca_event:
4499 if (hw->adapter_stopped == FALSE)
4500 ixgbe_if_stop(ctx);
4501 device_printf(sc->dev,
4502 "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
4503 device_printf(sc->dev, "System shutdown required!\n");
4504 break;
4505
4506 default:
4507 device_printf(sc->dev,
4508 "Unknown FW event captured, opcode=0x%04X\n",
4509 le16toh(event.desc.opcode));
4510 break;
4511 }
4512 } while (pending);
4513
4514 free(event.msg_buf, M_IXGBE);
4515 } /* ixgbe_handle_fw_event */
4516
4517 /************************************************************************
4518 * ixgbe_if_stop - Stop the hardware
4519 *
4520 * Disables all traffic on the adapter by issuing a
4521 * global reset on the MAC and deallocates TX/RX buffers.
4522 ************************************************************************/
4523 static void
ixgbe_if_stop(if_ctx_t ctx)4524 ixgbe_if_stop(if_ctx_t ctx)
4525 {
4526 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4527 struct ixgbe_hw *hw = &sc->hw;
4528
4529 INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
4530
4531 ixgbe_reset_hw(hw);
4532 hw->adapter_stopped = false;
4533 ixgbe_stop_adapter(hw);
4534 if (hw->mac.type == ixgbe_mac_82599EB)
4535 ixgbe_stop_mac_link_on_d3_82599(hw);
4536 /* Turn off the laser - noop with no optics */
4537 ixgbe_disable_tx_laser(hw);
4538
4539 /* Update the stack */
4540 sc->link_up = false;
4541 ixgbe_if_update_admin_status(ctx);
4542
4543 /* reprogram the RAR[0] in case user changed it. */
4544 ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
4545
4546 return;
4547 } /* ixgbe_if_stop */
4548
4549 /************************************************************************
4550 * ixgbe_link_speed_to_str - Convert link speed to string
4551 *
4552 * Helper function to convert link speed constants to human-readable
4553 * string representations in conventional Gbps or Mbps.
4554 ************************************************************************/
4555 static const char *
ixgbe_link_speed_to_str(u32 link_speed)4556 ixgbe_link_speed_to_str(u32 link_speed)
4557 {
4558 switch (link_speed) {
4559 case IXGBE_LINK_SPEED_10GB_FULL:
4560 return "10 Gbps";
4561 case IXGBE_LINK_SPEED_5GB_FULL:
4562 return "5 Gbps";
4563 case IXGBE_LINK_SPEED_2_5GB_FULL:
4564 return "2.5 Gbps";
4565 case IXGBE_LINK_SPEED_1GB_FULL:
4566 return "1 Gbps";
4567 case IXGBE_LINK_SPEED_100_FULL:
4568 return "100 Mbps";
4569 case IXGBE_LINK_SPEED_10_FULL:
4570 return "10 Mbps";
4571 default:
4572 return "Unknown";
4573 }
4574 } /* ixgbe_link_speed_to_str */
4575
4576 /************************************************************************
4577 * ixgbe_update_link_status - Update OS on link state
4578 *
4579 * Note: Only updates the OS on the cached link state.
4580 * The real check of the hardware only happens with
4581 * a link interrupt.
4582 ************************************************************************/
4583 static void
ixgbe_if_update_admin_status(if_ctx_t ctx)4584 ixgbe_if_update_admin_status(if_ctx_t ctx)
4585 {
4586 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4587 device_t dev = iflib_get_dev(ctx);
4588
4589 if (sc->link_up) {
4590 if (sc->link_active == false) {
4591 if (bootverbose)
4592 device_printf(dev,
4593 "Link is up %s Full Duplex\n",
4594 ixgbe_link_speed_to_str(sc->link_speed));
4595 sc->link_active = true;
4596
4597 /* If link speed is <= 1Gbps and EEE is enabled,
4598 * log info.
4599 */
4600 if (sc->hw.mac.type == ixgbe_mac_E610 &&
4601 (sc->feat_en & IXGBE_FEATURE_EEE) &&
4602 sc->link_speed <= IXGBE_LINK_SPEED_1GB_FULL) {
4603 device_printf(sc->dev,
4604 "Energy Efficient Ethernet (EEE) feature "
4605 "is not supported on link speeds equal to "
4606 "or below 1Gbps. EEE is supported on "
4607 "speeds above 1Gbps.\n");
4608 }
4609
4610 /* Update any Flow Control changes */
4611 ixgbe_fc_enable(&sc->hw);
4612 /* Update DMA coalescing config */
4613 ixgbe_config_dmac(sc);
4614 iflib_link_state_change(ctx, LINK_STATE_UP,
4615 ixgbe_link_speed_to_baudrate(sc->link_speed));
4616
4617 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
4618 ixgbe_ping_all_vfs(sc);
4619 }
4620 } else { /* Link down */
4621 if (sc->link_active == true) {
4622 if (bootverbose)
4623 device_printf(dev, "Link is Down\n");
4624 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
4625 sc->link_active = false;
4626 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
4627 ixgbe_ping_all_vfs(sc);
4628 }
4629 }
4630
4631 /* Handle task requests from msix_link() */
4632 if (sc->task_requests & IXGBE_REQUEST_TASK_FWEVENT)
4633 ixgbe_handle_fw_event(ctx);
4634 if (sc->task_requests & IXGBE_REQUEST_TASK_MOD)
4635 ixgbe_handle_mod(ctx);
4636 if (sc->task_requests & IXGBE_REQUEST_TASK_MSF)
4637 ixgbe_handle_msf(ctx);
4638 if (sc->task_requests & IXGBE_REQUEST_TASK_MBX)
4639 ixgbe_handle_mbx(ctx);
4640 if (sc->task_requests & IXGBE_REQUEST_TASK_FDIR)
4641 ixgbe_reinit_fdir(ctx);
4642 if (sc->task_requests & IXGBE_REQUEST_TASK_PHY)
4643 ixgbe_handle_phy(ctx);
4644 sc->task_requests = 0;
4645
4646 ixgbe_update_stats_counters(sc);
4647 } /* ixgbe_if_update_admin_status */
4648
4649 /************************************************************************
4650 * ixgbe_config_dmac - Configure DMA Coalescing
4651 ************************************************************************/
4652 static void
ixgbe_config_dmac(struct ixgbe_softc * sc)4653 ixgbe_config_dmac(struct ixgbe_softc *sc)
4654 {
4655 struct ixgbe_hw *hw = &sc->hw;
4656 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4657
4658 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4659 return;
4660
4661 if (dcfg->watchdog_timer ^ sc->dmac ||
4662 dcfg->link_speed ^ sc->link_speed) {
4663 dcfg->watchdog_timer = sc->dmac;
4664 dcfg->fcoe_en = false;
4665 dcfg->link_speed = sc->link_speed;
4666 dcfg->num_tcs = 1;
4667
4668 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4669 dcfg->watchdog_timer, dcfg->link_speed);
4670
4671 hw->mac.ops.dmac_config(hw);
4672 }
4673 } /* ixgbe_config_dmac */
4674
4675 /************************************************************************
4676 * ixgbe_if_enable_intr
4677 ************************************************************************/
4678 void
ixgbe_if_enable_intr(if_ctx_t ctx)4679 ixgbe_if_enable_intr(if_ctx_t ctx)
4680 {
4681 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4682 struct ixgbe_hw *hw = &sc->hw;
4683 struct ix_rx_queue *que = sc->rx_queues;
4684 u32 mask, fwsm;
4685
4686 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4687
4688 switch (sc->hw.mac.type) {
4689 case ixgbe_mac_82599EB:
4690 mask |= IXGBE_EIMS_ECC;
4691 /* Temperature sensor on some scs */
4692 mask |= IXGBE_EIMS_GPI_SDP0;
4693 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
4694 mask |= IXGBE_EIMS_GPI_SDP1;
4695 mask |= IXGBE_EIMS_GPI_SDP2;
4696 break;
4697 case ixgbe_mac_X540:
4698 /* Detect if Thermal Sensor is enabled */
4699 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4700 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4701 mask |= IXGBE_EIMS_TS;
4702 mask |= IXGBE_EIMS_ECC;
4703 break;
4704 case ixgbe_mac_X550:
4705 /* MAC thermal sensor is automatically enabled */
4706 mask |= IXGBE_EIMS_TS;
4707 mask |= IXGBE_EIMS_ECC;
4708 break;
4709 case ixgbe_mac_X550EM_x:
4710 case ixgbe_mac_X550EM_a:
4711 /* Some devices use SDP0 for important information */
4712 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
4713 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
4714 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
4715 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
4716 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
4717 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
4718 mask |= IXGBE_EICR_GPI_SDP0_X540;
4719 mask |= IXGBE_EIMS_ECC;
4720 break;
4721 case ixgbe_mac_E610:
4722 mask |= IXGBE_EIMS_FW_EVENT;
4723 break;
4724 default:
4725 break;
4726 }
4727
4728 /* Enable Fan Failure detection */
4729 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL)
4730 mask |= IXGBE_EIMS_GPI_SDP1;
4731 /* Enable SR-IOV */
4732 if (sc->feat_en & IXGBE_FEATURE_SRIOV)
4733 mask |= IXGBE_EIMS_MAILBOX;
4734 /* Enable Flow Director */
4735 if (sc->feat_en & IXGBE_FEATURE_FDIR)
4736 mask |= IXGBE_EIMS_FLOW_DIR;
4737
4738 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4739
4740 /* With MSI-X we use auto clear */
4741 if (sc->intr_type == IFLIB_INTR_MSIX) {
4742 mask = IXGBE_EIMS_ENABLE_MASK;
4743 /* Don't autoclear Link */
4744 mask &= ~IXGBE_EIMS_OTHER;
4745 mask &= ~IXGBE_EIMS_LSC;
4746 mask &= ~IXGBE_EIMS_FW_EVENT;
4747 if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
4748 mask &= ~IXGBE_EIMS_MAILBOX;
4749 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4750 }
4751
4752 /*
4753 * Now enable all queues, this is done separately to
4754 * allow for handling the extended (beyond 32) MSI-X
4755 * vectors that can be used by 82599
4756 */
4757 for (int i = 0; i < sc->num_rx_queues; i++, que++)
4758 ixgbe_enable_queue(sc, que->msix);
4759
4760 IXGBE_WRITE_FLUSH(hw);
4761
4762 } /* ixgbe_if_enable_intr */
4763
4764 /************************************************************************
4765 * ixgbe_if_disable_intr
4766 ************************************************************************/
4767 static void
ixgbe_if_disable_intr(if_ctx_t ctx)4768 ixgbe_if_disable_intr(if_ctx_t ctx)
4769 {
4770 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4771
4772 if (sc->intr_type == IFLIB_INTR_MSIX)
4773 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
4774 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
4775 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
4776 } else {
4777 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
4778 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
4779 IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
4780 }
4781 IXGBE_WRITE_FLUSH(&sc->hw);
4782
4783 } /* ixgbe_if_disable_intr */
4784
4785 /************************************************************************
4786 * ixgbe_link_intr_enable
4787 ************************************************************************/
4788 static void
ixgbe_link_intr_enable(if_ctx_t ctx)4789 ixgbe_link_intr_enable(if_ctx_t ctx)
4790 {
4791 struct ixgbe_hw *hw =
4792 &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw;
4793
4794 /* Re-enable other interrupts */
4795 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
4796 } /* ixgbe_link_intr_enable */
4797
4798 /************************************************************************
4799 * ixgbe_if_rx_queue_intr_enable
4800 ************************************************************************/
4801 static int
ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx,uint16_t rxqid)4802 ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
4803 {
4804 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4805 struct ix_rx_queue *que = &sc->rx_queues[rxqid];
4806
4807 ixgbe_enable_queue(sc, que->msix);
4808
4809 return (0);
4810 } /* ixgbe_if_rx_queue_intr_enable */
4811
4812 /************************************************************************
4813 * ixgbe_enable_queue
4814 ************************************************************************/
4815 static void
ixgbe_enable_queue(struct ixgbe_softc * sc,u32 vector)4816 ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector)
4817 {
4818 struct ixgbe_hw *hw = &sc->hw;
4819 u64 queue = 1ULL << vector;
4820 u32 mask;
4821
4822 if (hw->mac.type == ixgbe_mac_82598EB) {
4823 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
4824 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4825 } else {
4826 mask = (queue & 0xFFFFFFFF);
4827 if (mask)
4828 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
4829 mask = (queue >> 32);
4830 if (mask)
4831 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
4832 }
4833 } /* ixgbe_enable_queue */
4834
4835 /************************************************************************
4836 * ixgbe_disable_queue
4837 ************************************************************************/
4838 static void
ixgbe_disable_queue(struct ixgbe_softc * sc,u32 vector)4839 ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector)
4840 {
4841 struct ixgbe_hw *hw = &sc->hw;
4842 u64 queue = 1ULL << vector;
4843 u32 mask;
4844
4845 if (hw->mac.type == ixgbe_mac_82598EB) {
4846 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
4847 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
4848 } else {
4849 mask = (queue & 0xFFFFFFFF);
4850 if (mask)
4851 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
4852 mask = (queue >> 32);
4853 if (mask)
4854 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
4855 }
4856 } /* ixgbe_disable_queue */
4857
4858 /************************************************************************
4859 * ixgbe_intr - Legacy Interrupt Service Routine
4860 ************************************************************************/
4861 int
ixgbe_intr(void * arg)4862 ixgbe_intr(void *arg)
4863 {
4864 struct ixgbe_softc *sc = arg;
4865 struct ix_rx_queue *que = sc->rx_queues;
4866 struct ixgbe_hw *hw = &sc->hw;
4867 if_ctx_t ctx = sc->ctx;
4868 u32 eicr, eicr_mask;
4869
4870 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4871
4872 ++que->irqs;
4873 if (eicr == 0) {
4874 ixgbe_if_enable_intr(ctx);
4875 return (FILTER_HANDLED);
4876 }
4877
4878 /* Check for fan failure */
4879 if ((sc->feat_en & IXGBE_FEATURE_FAN_FAIL) &&
4880 (eicr & IXGBE_EICR_GPI_SDP1)) {
4881 device_printf(sc->dev,
4882 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4883 IXGBE_WRITE_REG(hw, IXGBE_EIMS,
4884 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4885 }
4886
4887 /* Link status change */
4888 if (eicr & IXGBE_EICR_LSC) {
4889 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
4890 iflib_admin_intr_deferred(ctx);
4891 }
4892
4893 if (ixgbe_is_sfp(hw)) {
4894 /* Pluggable optics-related interrupt */
4895 if (hw->mac.type >= ixgbe_mac_X540)
4896 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
4897 else
4898 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
4899
4900 if (eicr & eicr_mask) {
4901 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
4902 sc->task_requests |= IXGBE_REQUEST_TASK_MOD;
4903 }
4904
4905 if ((hw->mac.type == ixgbe_mac_82599EB) &&
4906 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
4907 IXGBE_WRITE_REG(hw, IXGBE_EICR,
4908 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
4909 sc->task_requests |= IXGBE_REQUEST_TASK_MSF;
4910 }
4911 }
4912
4913 /* External PHY interrupt */
4914 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
4915 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
4916 sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
4917 }
4918
4919 return (FILTER_SCHEDULE_THREAD);
4920 } /* ixgbe_intr */
4921
4922 /************************************************************************
4923 * ixgbe_free_pci_resources
4924 ************************************************************************/
4925 static void
ixgbe_free_pci_resources(if_ctx_t ctx)4926 ixgbe_free_pci_resources(if_ctx_t ctx)
4927 {
4928 struct ixgbe_softc *sc = iflib_get_softc(ctx);
4929 struct ix_rx_queue *que = sc->rx_queues;
4930 device_t dev = iflib_get_dev(ctx);
4931
4932 /* Release all MSI-X queue resources */
4933 if (sc->intr_type == IFLIB_INTR_MSIX)
4934 iflib_irq_free(ctx, &sc->irq);
4935
4936 if (que != NULL) {
4937 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
4938 iflib_irq_free(ctx, &que->que_irq);
4939 }
4940 }
4941
4942 if (sc->pci_mem != NULL)
4943 bus_release_resource(dev, SYS_RES_MEMORY,
4944 rman_get_rid(sc->pci_mem), sc->pci_mem);
4945 } /* ixgbe_free_pci_resources */
4946
4947 /************************************************************************
4948 * ixgbe_sysctl_flowcntl
4949 *
4950 * SYSCTL wrapper around setting Flow Control
4951 ************************************************************************/
4952 static int
ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)4953 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
4954 {
4955 struct ixgbe_softc *sc;
4956 int error, fc;
4957
4958 sc = (struct ixgbe_softc *)arg1;
4959 fc = sc->hw.fc.requested_mode;
4960
4961 error = sysctl_handle_int(oidp, &fc, 0, req);
4962 if ((error) || (req->newptr == NULL))
4963 return (error);
4964
4965 /* Don't bother if it's not changed */
4966 if (fc == sc->hw.fc.current_mode)
4967 return (0);
4968
4969 return ixgbe_set_flowcntl(sc, fc);
4970 } /* ixgbe_sysctl_flowcntl */
4971
4972 /************************************************************************
4973 * ixgbe_set_flowcntl - Set flow control
4974 *
4975 * Flow control values:
4976 * 0 - off
4977 * 1 - rx pause
4978 * 2 - tx pause
4979 * 3 - full
4980 ************************************************************************/
4981 static int
ixgbe_set_flowcntl(struct ixgbe_softc * sc,int fc)4982 ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
4983 {
4984 switch (fc) {
4985 case ixgbe_fc_rx_pause:
4986 case ixgbe_fc_tx_pause:
4987 case ixgbe_fc_full:
4988 if (sc->num_rx_queues > 1)
4989 ixgbe_disable_rx_drop(sc);
4990 break;
4991 case ixgbe_fc_none:
4992 if (sc->num_rx_queues > 1)
4993 ixgbe_enable_rx_drop(sc);
4994 break;
4995 default:
4996 return (EINVAL);
4997 }
4998
4999 sc->hw.fc.requested_mode = fc;
5000
5001 /* Don't autoneg if forcing a value */
5002 sc->hw.fc.disable_fc_autoneg = true;
5003 ixgbe_fc_enable(&sc->hw);
5004
5005 return (0);
5006 } /* ixgbe_set_flowcntl */
5007
5008 /************************************************************************
5009 * ixgbe_enable_rx_drop
5010 *
5011 * Enable the hardware to drop packets when the buffer is
5012 * full. This is useful with multiqueue, so that no single
5013 * queue being full stalls the entire RX engine. We only
5014 * enable this when Multiqueue is enabled AND Flow Control
5015 * is disabled.
5016 ************************************************************************/
5017 static void
ixgbe_enable_rx_drop(struct ixgbe_softc * sc)5018 ixgbe_enable_rx_drop(struct ixgbe_softc *sc)
5019 {
5020 struct ixgbe_hw *hw = &sc->hw;
5021 struct rx_ring *rxr;
5022 u32 srrctl;
5023
5024 for (int i = 0; i < sc->num_rx_queues; i++) {
5025 rxr = &sc->rx_queues[i].rxr;
5026 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5027 srrctl |= IXGBE_SRRCTL_DROP_EN;
5028 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5029 }
5030
5031 /* enable drop for each vf */
5032 for (int i = 0; i < sc->num_vfs; i++) {
5033 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5034 (IXGBE_QDE_WRITE |
5035 (i << IXGBE_QDE_IDX_SHIFT) |
5036 IXGBE_QDE_ENABLE));
5037 }
5038 } /* ixgbe_enable_rx_drop */
5039
5040 /************************************************************************
5041 * ixgbe_disable_rx_drop
5042 ************************************************************************/
5043 static void
ixgbe_disable_rx_drop(struct ixgbe_softc * sc)5044 ixgbe_disable_rx_drop(struct ixgbe_softc *sc)
5045 {
5046 struct ixgbe_hw *hw = &sc->hw;
5047 struct rx_ring *rxr;
5048 u32 srrctl;
5049
5050 for (int i = 0; i < sc->num_rx_queues; i++) {
5051 rxr = &sc->rx_queues[i].rxr;
5052 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5053 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5054 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5055 }
5056
5057 /* disable drop for each vf */
5058 for (int i = 0; i < sc->num_vfs; i++) {
5059 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5060 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5061 }
5062 } /* ixgbe_disable_rx_drop */
5063
5064 /************************************************************************
5065 * ixgbe_sysctl_advertise
5066 *
5067 * SYSCTL wrapper around setting advertised speed
5068 ************************************************************************/
5069 static int
ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)5070 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
5071 {
5072 struct ixgbe_softc *sc;
5073 int error, advertise;
5074
5075 sc = (struct ixgbe_softc *)arg1;
5076 if (atomic_load_acq_int(&sc->recovery_mode))
5077 return (EPERM);
5078
5079 advertise = sc->advertise;
5080
5081 error = sysctl_handle_int(oidp, &advertise, 0, req);
5082 if ((error) || (req->newptr == NULL))
5083 return (error);
5084
5085 return ixgbe_set_advertise(sc, advertise);
5086 } /* ixgbe_sysctl_advertise */
5087
5088 /************************************************************************
5089 * ixgbe_set_advertise - Control advertised link speed
5090 *
5091 * Flags:
5092 * 0x1 - advertise 100 Mb
5093 * 0x2 - advertise 1G
5094 * 0x4 - advertise 10G
5095 * 0x8 - advertise 10 Mb (yes, Mb)
5096 * 0x10 - advertise 2.5G (disabled by default)
5097 * 0x20 - advertise 5G (disabled by default)
5098 *
5099 ************************************************************************/
5100 static int
ixgbe_set_advertise(struct ixgbe_softc * sc,int advertise)5101 ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
5102 {
5103 device_t dev = iflib_get_dev(sc->ctx);
5104 struct ixgbe_hw *hw;
5105 ixgbe_link_speed speed = 0;
5106 ixgbe_link_speed link_caps = 0;
5107 s32 err = IXGBE_NOT_IMPLEMENTED;
5108 bool negotiate = false;
5109
5110 /* Checks to validate new value */
5111 if (sc->advertise == advertise) /* no change */
5112 return (0);
5113
5114 hw = &sc->hw;
5115
5116 /* No speed changes for backplane media */
5117 if (hw->phy.media_type == ixgbe_media_type_backplane)
5118 return (ENODEV);
5119
5120 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5121 (hw->phy.multispeed_fiber))) {
5122 device_printf(dev,
5123 "Advertised speed can only be set on copper or multispeed"
5124 " fiber media types.\n");
5125 return (EINVAL);
5126 }
5127
5128 if (advertise < 0x1 || advertise > 0x3F) {
5129 device_printf(dev,
5130 "Invalid advertised speed; valid modes are 0x1 through"
5131 " 0x3F\n");
5132 return (EINVAL);
5133 }
5134
5135 if (hw->mac.ops.get_link_capabilities) {
5136 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5137 &negotiate);
5138 if (err != IXGBE_SUCCESS) {
5139 device_printf(dev,
5140 "Unable to determine supported advertise speeds"
5141 "\n");
5142 return (ENODEV);
5143 }
5144 }
5145
5146 /* Set new value and report new advertised mode */
5147 if (advertise & 0x1) {
5148 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5149 device_printf(dev,
5150 "Interface does not support 100Mb advertised"
5151 " speed\n");
5152 return (EINVAL);
5153 }
5154 speed |= IXGBE_LINK_SPEED_100_FULL;
5155 }
5156 if (advertise & 0x2) {
5157 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5158 device_printf(dev,
5159 "Interface does not support 1Gb advertised speed"
5160 "\n");
5161 return (EINVAL);
5162 }
5163 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5164 }
5165 if (advertise & 0x4) {
5166 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5167 device_printf(dev,
5168 "Interface does not support 10Gb advertised speed"
5169 "\n");
5170 return (EINVAL);
5171 }
5172 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5173 }
5174 if (advertise & 0x8) {
5175 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5176 device_printf(dev,
5177 "Interface does not support 10Mb advertised speed"
5178 "\n");
5179 return (EINVAL);
5180 }
5181 speed |= IXGBE_LINK_SPEED_10_FULL;
5182 }
5183 if (advertise & 0x10) {
5184 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5185 device_printf(dev,
5186 "Interface does not support 2.5G advertised speed"
5187 "\n");
5188 return (EINVAL);
5189 }
5190 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5191 }
5192 if (advertise & 0x20) {
5193 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5194 device_printf(dev,
5195 "Interface does not support 5G advertised speed"
5196 "\n");
5197 return (EINVAL);
5198 }
5199 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5200 }
5201
5202 hw->mac.autotry_restart = true;
5203 hw->mac.ops.setup_link(hw, speed, true);
5204 sc->advertise = advertise;
5205
5206 return (0);
5207 } /* ixgbe_set_advertise */
5208
5209 /************************************************************************
5210 * ixgbe_get_default_advertise - Get default advertised speed settings
5211 *
5212 * Formatted for sysctl usage.
5213 * Flags:
5214 * 0x1 - advertise 100 Mb
5215 * 0x2 - advertise 1G
5216 * 0x4 - advertise 10G
5217 * 0x8 - advertise 10 Mb (yes, Mb)
5218 * 0x10 - advertise 2.5G (disabled by default)
5219 * 0x20 - advertise 5G (disabled by default)
5220 ************************************************************************/
5221 static int
ixgbe_get_default_advertise(struct ixgbe_softc * sc)5222 ixgbe_get_default_advertise(struct ixgbe_softc *sc)
5223 {
5224 struct ixgbe_hw *hw = &sc->hw;
5225 int speed;
5226 ixgbe_link_speed link_caps = 0;
5227 s32 err;
5228 bool negotiate = false;
5229
5230 /*
5231 * Advertised speed means nothing unless it's copper or
5232 * multi-speed fiber
5233 */
5234 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5235 !(hw->phy.multispeed_fiber))
5236 return (0);
5237
5238 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5239 if (err != IXGBE_SUCCESS)
5240 return (0);
5241
5242 if (hw->mac.type == ixgbe_mac_X550) {
5243 /*
5244 * 2.5G and 5G autonegotiation speeds on X550
5245 * are disabled by default due to reported
5246 * interoperability issues with some switches.
5247 */
5248 link_caps &= ~(IXGBE_LINK_SPEED_2_5GB_FULL |
5249 IXGBE_LINK_SPEED_5GB_FULL);
5250 }
5251
5252 speed =
5253 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) |
5254 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) |
5255 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5256 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) |
5257 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) |
5258 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0);
5259
5260 return speed;
5261 } /* ixgbe_get_default_advertise */
5262
5263 /************************************************************************
5264 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5265 *
5266 * Control values:
5267 * 0/1 - off / on (use default value of 1000)
5268 *
5269 * Legal timer values are:
5270 * 50,100,250,500,1000,2000,5000,10000
5271 *
5272 * Turning off interrupt moderation will also turn this off.
5273 ************************************************************************/
5274 static int
ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)5275 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
5276 {
5277 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5278 if_t ifp = iflib_get_ifp(sc->ctx);
5279 int error;
5280 u16 newval;
5281
5282 newval = sc->dmac;
5283 error = sysctl_handle_16(oidp, &newval, 0, req);
5284 if ((error) || (req->newptr == NULL))
5285 return (error);
5286
5287 switch (newval) {
5288 case 0:
5289 /* Disabled */
5290 sc->dmac = 0;
5291 break;
5292 case 1:
5293 /* Enable and use default */
5294 sc->dmac = 1000;
5295 break;
5296 case 50:
5297 case 100:
5298 case 250:
5299 case 500:
5300 case 1000:
5301 case 2000:
5302 case 5000:
5303 case 10000:
5304 /* Legal values - allow */
5305 sc->dmac = newval;
5306 break;
5307 default:
5308 /* Do nothing, illegal value */
5309 return (EINVAL);
5310 }
5311
5312 /* Re-initialize hardware if it's already running */
5313 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
5314 if_init(ifp, ifp);
5315
5316 return (0);
5317 } /* ixgbe_sysctl_dmac */
5318
5319 #ifdef IXGBE_DEBUG
5320 /************************************************************************
5321 * ixgbe_sysctl_power_state
5322 *
5323 * Sysctl to test power states
5324 * Values:
5325 * 0 - set device to D0
5326 * 3 - set device to D3
5327 * (none) - get current device power state
5328 ************************************************************************/
5329 static int
ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)5330 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
5331 {
5332 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5333 device_t dev = sc->dev;
5334 int curr_ps, new_ps, error = 0;
5335
5336 curr_ps = new_ps = pci_get_powerstate(dev);
5337
5338 error = sysctl_handle_int(oidp, &new_ps, 0, req);
5339 if ((error) || (req->newptr == NULL))
5340 return (error);
5341
5342 if (new_ps == curr_ps)
5343 return (0);
5344
5345 if (new_ps == 3 && curr_ps == 0)
5346 error = DEVICE_SUSPEND(dev);
5347 else if (new_ps == 0 && curr_ps == 3)
5348 error = DEVICE_RESUME(dev);
5349 else
5350 return (EINVAL);
5351
5352 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5353
5354 return (error);
5355 } /* ixgbe_sysctl_power_state */
5356 #endif
5357
5358 /************************************************************************
5359 * ixgbe_sysctl_wol_enable
5360 *
5361 * Sysctl to enable/disable the WoL capability,
5362 * if supported by the adapter.
5363 *
5364 * Values:
5365 * 0 - disabled
5366 * 1 - enabled
5367 ************************************************************************/
5368 static int
ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)5369 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
5370 {
5371 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5372 struct ixgbe_hw *hw = &sc->hw;
5373 int new_wol_enabled;
5374 int error = 0;
5375
5376 new_wol_enabled = hw->wol_enabled;
5377 error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
5378 if ((error) || (req->newptr == NULL))
5379 return (error);
5380 new_wol_enabled = !!(new_wol_enabled);
5381 if (new_wol_enabled == hw->wol_enabled)
5382 return (0);
5383
5384 if (new_wol_enabled > 0 && !sc->wol_support)
5385 return (ENODEV);
5386 else
5387 hw->wol_enabled = new_wol_enabled;
5388
5389 return (0);
5390 } /* ixgbe_sysctl_wol_enable */
5391
5392 /************************************************************************
5393 * ixgbe_sysctl_wufc - Wake Up Filter Control
5394 *
5395 * Sysctl to enable/disable the types of packets that the
5396 * adapter will wake up on upon receipt.
5397 * Flags:
5398 * 0x1 - Link Status Change
5399 * 0x2 - Magic Packet
5400 * 0x4 - Direct Exact
5401 * 0x8 - Directed Multicast
5402 * 0x10 - Broadcast
5403 * 0x20 - ARP/IPv4 Request Packet
5404 * 0x40 - Direct IPv4 Packet
5405 * 0x80 - Direct IPv6 Packet
5406 *
5407 * Settings not listed above will cause the sysctl to return an error.
5408 ************************************************************************/
5409 static int
ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)5410 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
5411 {
5412 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5413 int error = 0;
5414 u32 new_wufc;
5415
5416 new_wufc = sc->wufc;
5417
5418 error = sysctl_handle_32(oidp, &new_wufc, 0, req);
5419 if ((error) || (req->newptr == NULL))
5420 return (error);
5421 if (new_wufc == sc->wufc)
5422 return (0);
5423
5424 if (new_wufc & 0xffffff00)
5425 return (EINVAL);
5426
5427 new_wufc &= 0xff;
5428 new_wufc |= (0xffffff & sc->wufc);
5429 sc->wufc = new_wufc;
5430
5431 return (0);
5432 } /* ixgbe_sysctl_wufc */
5433
5434 #ifdef IXGBE_DEBUG
5435 /************************************************************************
5436 * ixgbe_sysctl_print_rss_config
5437 ************************************************************************/
5438 static int
ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)5439 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
5440 {
5441 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5442 struct ixgbe_hw *hw = &sc->hw;
5443 device_t dev = sc->dev;
5444 struct sbuf *buf;
5445 int error = 0, reta_size;
5446 u32 reg;
5447
5448 if (atomic_load_acq_int(&sc->recovery_mode))
5449 return (EPERM);
5450
5451 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5452 if (!buf) {
5453 device_printf(dev, "Could not allocate sbuf for output.\n");
5454 return (ENOMEM);
5455 }
5456
5457 // TODO: use sbufs to make a string to print out
5458 /* Set multiplier for RETA setup and table size based on MAC */
5459 switch (sc->hw.mac.type) {
5460 case ixgbe_mac_X550:
5461 case ixgbe_mac_X550EM_x:
5462 case ixgbe_mac_X550EM_a:
5463 reta_size = 128;
5464 break;
5465 default:
5466 reta_size = 32;
5467 break;
5468 }
5469
5470 /* Print out the redirection table */
5471 sbuf_cat(buf, "\n");
5472 for (int i = 0; i < reta_size; i++) {
5473 if (i < 32) {
5474 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5475 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5476 } else {
5477 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5478 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5479 }
5480 }
5481
5482 // TODO: print more config
5483
5484 error = sbuf_finish(buf);
5485 if (error)
5486 device_printf(dev, "Error finishing sbuf: %d\n", error);
5487
5488 sbuf_delete(buf);
5489
5490 return (0);
5491 } /* ixgbe_sysctl_print_rss_config */
5492 #endif /* IXGBE_DEBUG */
5493
5494 /************************************************************************
5495 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5496 *
5497 * For X552/X557-AT devices using an external PHY
5498 ************************************************************************/
5499 static int
ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)5500 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
5501 {
5502 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5503 struct ixgbe_hw *hw = &sc->hw;
5504 u16 reg;
5505
5506 if (atomic_load_acq_int(&sc->recovery_mode))
5507 return (EPERM);
5508
5509 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5510 device_printf(iflib_get_dev(sc->ctx),
5511 "Device has no supported external thermal sensor.\n");
5512 return (ENODEV);
5513 }
5514
5515 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5516 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5517 device_printf(iflib_get_dev(sc->ctx),
5518 "Error reading from PHY's current temperature register"
5519 "\n");
5520 return (EAGAIN);
5521 }
5522
5523 /* Shift temp for output */
5524 reg = reg >> 8;
5525
5526 return (sysctl_handle_16(oidp, NULL, reg, req));
5527 } /* ixgbe_sysctl_phy_temp */
5528
5529 /************************************************************************
5530 * ixgbe_sysctl_phy_overtemp_occurred
5531 *
5532 * Reports (directly from the PHY) whether the current PHY
5533 * temperature is over the overtemp threshold.
5534 ************************************************************************/
5535 static int
ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)5536 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
5537 {
5538 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5539 struct ixgbe_hw *hw = &sc->hw;
5540 u16 reg;
5541
5542 if (atomic_load_acq_int(&sc->recovery_mode))
5543 return (EPERM);
5544
5545 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5546 device_printf(iflib_get_dev(sc->ctx),
5547 "Device has no supported external thermal sensor.\n");
5548 return (ENODEV);
5549 }
5550
5551 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5552 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) {
5553 device_printf(iflib_get_dev(sc->ctx),
5554 "Error reading from PHY's temperature status register\n");
5555 return (EAGAIN);
5556 }
5557
5558 /* Get occurrence bit */
5559 reg = !!(reg & 0x4000);
5560
5561 return (sysctl_handle_16(oidp, 0, reg, req));
5562 } /* ixgbe_sysctl_phy_overtemp_occurred */
5563
5564 /************************************************************************
5565 * ixgbe_sysctl_eee_state
5566 *
5567 * Sysctl to set EEE power saving feature
5568 * Values:
5569 * 0 - disable EEE
5570 * 1 - enable EEE
5571 * (none) - get current device EEE state
5572 ************************************************************************/
5573 static int
ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)5574 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
5575 {
5576 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5577 device_t dev = sc->dev;
5578 if_t ifp = iflib_get_ifp(sc->ctx);
5579 int curr_eee, new_eee, error = 0;
5580 s32 retval;
5581
5582 if (atomic_load_acq_int(&sc->recovery_mode))
5583 return (EPERM);
5584
5585 curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE);
5586
5587 error = sysctl_handle_int(oidp, &new_eee, 0, req);
5588 if ((error) || (req->newptr == NULL))
5589 return (error);
5590
5591 /* Nothing to do */
5592 if (new_eee == curr_eee)
5593 return (0);
5594
5595 /* Not supported */
5596 if (!(sc->feat_cap & IXGBE_FEATURE_EEE))
5597 return (EINVAL);
5598
5599 /* Bounds checking */
5600 if ((new_eee < 0) || (new_eee > 1))
5601 return (EINVAL);
5602
5603 /* If link speed is <= 1Gbps and EEE is being enabled, log info */
5604 if (sc->hw.mac.type == ixgbe_mac_E610 &&
5605 new_eee &&
5606 sc->link_speed <= IXGBE_LINK_SPEED_1GB_FULL) {
5607 device_printf(dev,
5608 "Energy Efficient Ethernet (EEE) feature is not "
5609 "supported on link speeds equal to or below 1Gbps. "
5610 "EEE is supported on speeds above 1Gbps.\n");
5611 return (EINVAL);
5612 }
5613
5614 retval = ixgbe_setup_eee(&sc->hw, new_eee);
5615 if (retval) {
5616 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5617 return (EINVAL);
5618 }
5619
5620 /* Restart auto-neg */
5621 if_init(ifp, ifp);
5622
5623 device_printf(dev, "New EEE state: %d\n", new_eee);
5624
5625 /* Cache new value */
5626 if (new_eee)
5627 sc->feat_en |= IXGBE_FEATURE_EEE;
5628 else
5629 sc->feat_en &= ~IXGBE_FEATURE_EEE;
5630
5631 return (error);
5632 } /* ixgbe_sysctl_eee_state */
5633
5634 static int
ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS)5635 ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS)
5636 {
5637 struct ixgbe_softc *sc;
5638 u32 reg, val, shift;
5639 int error, mask;
5640
5641 sc = oidp->oid_arg1;
5642 switch (oidp->oid_arg2) {
5643 case 0:
5644 reg = IXGBE_DTXTCPFLGL;
5645 shift = 0;
5646 break;
5647 case 1:
5648 reg = IXGBE_DTXTCPFLGL;
5649 shift = 16;
5650 break;
5651 case 2:
5652 reg = IXGBE_DTXTCPFLGH;
5653 shift = 0;
5654 break;
5655 default:
5656 return (EINVAL);
5657 break;
5658 }
5659 val = IXGBE_READ_REG(&sc->hw, reg);
5660 mask = (val >> shift) & 0xfff;
5661 error = sysctl_handle_int(oidp, &mask, 0, req);
5662 if (error != 0 || req->newptr == NULL)
5663 return (error);
5664 if (mask < 0 || mask > 0xfff)
5665 return (EINVAL);
5666 val = (val & ~(0xfff << shift)) | (mask << shift);
5667 IXGBE_WRITE_REG(&sc->hw, reg, val);
5668 return (0);
5669 }
5670
5671 /************************************************************************
5672 * ixgbe_init_device_features
5673 ************************************************************************/
5674 static void
ixgbe_init_device_features(struct ixgbe_softc * sc)5675 ixgbe_init_device_features(struct ixgbe_softc *sc)
5676 {
5677 s32 error;
5678
5679 sc->feat_cap = IXGBE_FEATURE_NETMAP |
5680 IXGBE_FEATURE_RSS |
5681 IXGBE_FEATURE_MSI |
5682 IXGBE_FEATURE_MSIX |
5683 IXGBE_FEATURE_LEGACY_IRQ;
5684
5685 /* Set capabilities first... */
5686 switch (sc->hw.mac.type) {
5687 case ixgbe_mac_82598EB:
5688 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT)
5689 sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
5690 break;
5691 case ixgbe_mac_X540:
5692 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
5693 sc->feat_cap |= IXGBE_FEATURE_FDIR;
5694 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
5695 (sc->hw.bus.func == 0))
5696 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
5697 break;
5698 case ixgbe_mac_X550:
5699 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
5700 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5701 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
5702 sc->feat_cap |= IXGBE_FEATURE_FDIR;
5703 break;
5704 case ixgbe_mac_X550EM_x:
5705 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
5706 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
5707 sc->feat_cap |= IXGBE_FEATURE_FDIR;
5708 if (sc->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
5709 sc->feat_cap |= IXGBE_FEATURE_EEE;
5710 break;
5711 case ixgbe_mac_X550EM_a:
5712 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
5713 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
5714 sc->feat_cap |= IXGBE_FEATURE_FDIR;
5715 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5716 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
5717 (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
5718 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
5719 sc->feat_cap |= IXGBE_FEATURE_EEE;
5720 }
5721 break;
5722 case ixgbe_mac_82599EB:
5723 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
5724 sc->feat_cap |= IXGBE_FEATURE_FDIR;
5725 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
5726 (sc->hw.bus.func == 0))
5727 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
5728 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
5729 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
5730 break;
5731 case ixgbe_mac_E610:
5732 sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
5733 sc->feat_cap |= IXGBE_FEATURE_DBG_DUMP;
5734 error = ixgbe_get_caps(&sc->hw);
5735 if (error == 0 && sc->hw.func_caps.common_cap.eee_support != 0)
5736 sc->feat_cap |= IXGBE_FEATURE_EEE;
5737 break;
5738 default:
5739 break;
5740 }
5741
5742 /* Enabled by default... */
5743 /* Fan failure detection */
5744 if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL)
5745 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL;
5746 /* Netmap */
5747 if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
5748 sc->feat_en |= IXGBE_FEATURE_NETMAP;
5749 /* EEE */
5750 if (sc->feat_cap & IXGBE_FEATURE_EEE)
5751 sc->feat_en |= IXGBE_FEATURE_EEE;
5752 /* Thermal Sensor */
5753 if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
5754 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
5755 /* Recovery mode */
5756 if (sc->feat_cap & IXGBE_FEATURE_RECOVERY_MODE)
5757 sc->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
5758 /* FW Debug Dump */
5759 if (sc->feat_cap & IXGBE_FEATURE_DBG_DUMP)
5760 sc->feat_en |= IXGBE_FEATURE_DBG_DUMP;
5761
5762 /* Enabled via global sysctl... */
5763 /* Flow Director */
5764 if (ixgbe_enable_fdir) {
5765 if (sc->feat_cap & IXGBE_FEATURE_FDIR)
5766 sc->feat_en |= IXGBE_FEATURE_FDIR;
5767 else
5768 device_printf(sc->dev,
5769 "Device does not support Flow Director."
5770 " Leaving disabled.");
5771 }
5772 /*
5773 * Message Signal Interrupts - Extended (MSI-X)
5774 * Normal MSI is only enabled if MSI-X calls fail.
5775 */
5776 if (!ixgbe_enable_msix)
5777 sc->feat_cap &= ~IXGBE_FEATURE_MSIX;
5778 /* Receive-Side Scaling (RSS) */
5779 if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
5780 sc->feat_en |= IXGBE_FEATURE_RSS;
5781
5782 /* Disable features with unmet dependencies... */
5783 /* No MSI-X */
5784 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) {
5785 sc->feat_cap &= ~IXGBE_FEATURE_RSS;
5786 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5787 sc->feat_en &= ~IXGBE_FEATURE_RSS;
5788 sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
5789 }
5790 } /* ixgbe_init_device_features */
5791
5792 /************************************************************************
5793 * ixgbe_check_fan_failure
5794 ************************************************************************/
5795 static void
ixgbe_check_fan_failure(struct ixgbe_softc * sc,u32 reg,bool in_interrupt)5796 ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt)
5797 {
5798 u32 mask;
5799
5800 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) :
5801 IXGBE_ESDP_SDP1;
5802
5803 if (reg & mask)
5804 device_printf(sc->dev,
5805 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
5806 } /* ixgbe_check_fan_failure */
5807
5808 /************************************************************************
5809 * ixgbe_sbuf_fw_version
5810 ************************************************************************/
5811 static void
ixgbe_sbuf_fw_version(struct ixgbe_hw * hw,struct sbuf * buf)5812 ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf)
5813 {
5814 struct ixgbe_nvm_version nvm_ver = {0};
5815 const char *space = "";
5816
5817 ixgbe_get_nvm_version(hw, &nvm_ver); /* NVM version */
5818 ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */
5819 ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack a build ID in Intel's SCM */
5820 ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */
5821
5822 /* FW version */
5823 if ((nvm_ver.phy_fw_maj == 0x0 &&
5824 nvm_ver.phy_fw_min == 0x0 &&
5825 nvm_ver.phy_fw_id == 0x0) ||
5826 (nvm_ver.phy_fw_maj == 0xF &&
5827 nvm_ver.phy_fw_min == 0xFF &&
5828 nvm_ver.phy_fw_id == 0xF)) {
5829 /* If major, minor and id numbers are set to 0,
5830 * reading FW version is unsupported. If major number
5831 * is set to 0xF, minor is set to 0xFF and id is set
5832 * to 0xF, this means that number read is invalid. */
5833 } else
5834 sbuf_printf(buf, "fw %d.%d.%d ",
5835 nvm_ver.phy_fw_maj, nvm_ver.phy_fw_min,
5836 nvm_ver.phy_fw_id);
5837
5838 /* NVM version */
5839 if ((nvm_ver.nvm_major == 0x0 &&
5840 nvm_ver.nvm_minor == 0x0 &&
5841 nvm_ver.nvm_id == 0x0) ||
5842 (nvm_ver.nvm_major == 0xF &&
5843 nvm_ver.nvm_minor == 0xFF &&
5844 nvm_ver.nvm_id == 0xF)) {
5845 /* If major, minor and id numbers are set to 0,
5846 * reading NVM version is unsupported. If major number
5847 * is set to 0xF, minor is set to 0xFF and id is set
5848 * to 0xF, this means that number read is invalid. */
5849 } else
5850 sbuf_printf(buf, "nvm %x.%02x.%x ",
5851 nvm_ver.nvm_major, nvm_ver.nvm_minor, nvm_ver.nvm_id);
5852
5853 if (nvm_ver.oem_valid) {
5854 sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major,
5855 nvm_ver.oem_minor, nvm_ver.oem_release);
5856 space = " ";
5857 }
5858
5859 if (nvm_ver.or_valid) {
5860 sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
5861 space, nvm_ver.or_major, nvm_ver.or_build,
5862 nvm_ver.or_patch);
5863 space = " ";
5864 }
5865
5866 if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) |
5867 NVM_VER_INVALID | 0xFFFFFFFF)) {
5868 sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id);
5869 }
5870 } /* ixgbe_sbuf_fw_version */
5871
5872 /************************************************************************
5873 * ixgbe_print_fw_version
5874 ************************************************************************/
5875 static void
ixgbe_print_fw_version(if_ctx_t ctx)5876 ixgbe_print_fw_version(if_ctx_t ctx)
5877 {
5878 struct ixgbe_softc *sc = iflib_get_softc(ctx);
5879 struct ixgbe_hw *hw = &sc->hw;
5880 device_t dev = sc->dev;
5881 struct sbuf *buf;
5882 int error = 0;
5883
5884 buf = sbuf_new_auto();
5885 if (!buf) {
5886 device_printf(dev, "Could not allocate sbuf for output.\n");
5887 return;
5888 }
5889
5890 ixgbe_sbuf_fw_version(hw, buf);
5891
5892 error = sbuf_finish(buf);
5893 if (error)
5894 device_printf(dev, "Error finishing sbuf: %d\n", error);
5895 else if (sbuf_len(buf))
5896 device_printf(dev, "%s\n", sbuf_data(buf));
5897
5898 sbuf_delete(buf);
5899 } /* ixgbe_print_fw_version */
5900
5901 /************************************************************************
5902 * ixgbe_sysctl_print_fw_version
5903 ************************************************************************/
5904 static int
ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)5905 ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)
5906 {
5907 struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
5908 struct ixgbe_hw *hw = &sc->hw;
5909 device_t dev = sc->dev;
5910 struct sbuf *buf;
5911 int error = 0;
5912
5913 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5914 if (!buf) {
5915 device_printf(dev, "Could not allocate sbuf for output.\n");
5916 return (ENOMEM);
5917 }
5918
5919 ixgbe_sbuf_fw_version(hw, buf);
5920
5921 error = sbuf_finish(buf);
5922 if (error)
5923 device_printf(dev, "Error finishing sbuf: %d\n", error);
5924
5925 sbuf_delete(buf);
5926
5927 return (0);
5928 } /* ixgbe_sysctl_print_fw_version */
5929