xref: /linux/drivers/net/ethernet/aquantia/atlantic/aq_main.c (revision 8f7aa3d3c7323f4ca2768a9e74ebbe359c4f8f88)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
3  *
4  * Copyright (C) 2014-2019 aQuantia Corporation
5  * Copyright (C) 2019-2020 Marvell International Ltd.
6  */
7 
8 /* File aq_main.c: Main file for aQuantia Linux driver. */
9 
10 #include "aq_main.h"
11 #include "aq_nic.h"
12 #include "aq_pci_func.h"
13 #include "aq_ethtool.h"
14 #include "aq_ptp.h"
15 #include "aq_filters.h"
16 #include "aq_hw_utils.h"
17 #include "aq_vec.h"
18 
19 #include <linux/netdevice.h>
20 #include <linux/module.h>
21 #include <linux/ip.h>
22 #include <linux/udp.h>
23 #include <net/pkt_cls.h>
24 #include <net/pkt_sched.h>
25 #include <linux/filter.h>
26 
27 MODULE_LICENSE("GPL v2");
28 MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
29 MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
30 
31 DEFINE_STATIC_KEY_FALSE(aq_xdp_locking_key);
32 EXPORT_SYMBOL(aq_xdp_locking_key);
33 
34 static const char aq_ndev_driver_name[] = AQ_CFG_DRV_NAME;
35 
36 static const struct net_device_ops aq_ndev_ops;
37 
38 static struct workqueue_struct *aq_ndev_wq;
39 
aq_ndev_schedule_work(struct work_struct * work)40 void aq_ndev_schedule_work(struct work_struct *work)
41 {
42 	queue_work(aq_ndev_wq, work);
43 }
44 
aq_ndev_alloc(void)45 struct net_device *aq_ndev_alloc(void)
46 {
47 	struct net_device *ndev = NULL;
48 	struct aq_nic_s *aq_nic = NULL;
49 
50 	ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_HW_QUEUES_MAX);
51 	if (!ndev)
52 		return NULL;
53 
54 	aq_nic = netdev_priv(ndev);
55 	aq_nic->ndev = ndev;
56 	ndev->netdev_ops = &aq_ndev_ops;
57 	ndev->ethtool_ops = &aq_ethtool_ops;
58 
59 	return ndev;
60 }
61 
aq_ndev_open(struct net_device * ndev)62 int aq_ndev_open(struct net_device *ndev)
63 {
64 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
65 	int err = 0;
66 
67 	err = aq_nic_init(aq_nic);
68 	if (err < 0)
69 		goto err_exit;
70 
71 	err = aq_reapply_rxnfc_all_rules(aq_nic);
72 	if (err < 0)
73 		goto err_exit;
74 
75 	err = aq_filters_vlans_update(aq_nic);
76 	if (err < 0)
77 		goto err_exit;
78 
79 	err = aq_nic_start(aq_nic);
80 	if (err < 0) {
81 		aq_nic_stop(aq_nic);
82 		goto err_exit;
83 	}
84 
85 err_exit:
86 	if (err < 0)
87 		aq_nic_deinit(aq_nic, true);
88 
89 	return err;
90 }
91 
aq_ndev_close(struct net_device * ndev)92 int aq_ndev_close(struct net_device *ndev)
93 {
94 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
95 	int err = 0;
96 
97 	err = aq_nic_stop(aq_nic);
98 	aq_nic_deinit(aq_nic, true);
99 
100 	return err;
101 }
102 
aq_ndev_start_xmit(struct sk_buff * skb,struct net_device * ndev)103 static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
104 {
105 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
106 
107 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
108 	if (unlikely(aq_utils_obj_test(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP))) {
109 		/* Hardware adds the Timestamp for PTPv2 802.AS1
110 		 * and PTPv2 IPv4 UDP.
111 		 * We have to push even general 320 port messages to the ptp
112 		 * queue explicitly. This is a limitation of current firmware
113 		 * and hardware PTP design of the chip. Otherwise ptp stream
114 		 * will fail to sync
115 		 */
116 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
117 		    unlikely((ip_hdr(skb)->version == 4) &&
118 			     (ip_hdr(skb)->protocol == IPPROTO_UDP) &&
119 			     ((udp_hdr(skb)->dest == htons(319)) ||
120 			      (udp_hdr(skb)->dest == htons(320)))) ||
121 		    unlikely(eth_hdr(skb)->h_proto == htons(ETH_P_1588)))
122 			return aq_ptp_xmit(aq_nic, skb);
123 	}
124 #endif
125 
126 	return aq_nic_xmit(aq_nic, skb);
127 }
128 
aq_ndev_change_mtu(struct net_device * ndev,int new_mtu)129 static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
130 {
131 	int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN;
132 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
133 	struct bpf_prog *prog;
134 	int err;
135 
136 	prog = READ_ONCE(aq_nic->xdp_prog);
137 	if (prog && !prog->aux->xdp_has_frags &&
138 	    new_frame_size > AQ_CFG_RX_FRAME_MAX) {
139 		netdev_err(ndev, "Illegal MTU %d for XDP prog without frags\n",
140 			   ndev->mtu);
141 		return -EOPNOTSUPP;
142 	}
143 
144 	err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
145 
146 	if (err < 0)
147 		goto err_exit;
148 	WRITE_ONCE(ndev->mtu, new_mtu);
149 
150 err_exit:
151 	return err;
152 }
153 
aq_ndev_set_features(struct net_device * ndev,netdev_features_t features)154 static int aq_ndev_set_features(struct net_device *ndev,
155 				netdev_features_t features)
156 {
157 	bool is_vlan_tx_insert = !!(features & NETIF_F_HW_VLAN_CTAG_TX);
158 	bool is_vlan_rx_strip = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
159 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
160 	bool need_ndev_restart = false;
161 	struct aq_nic_cfg_s *aq_cfg;
162 	bool is_lro = false;
163 	int err = 0;
164 
165 	aq_cfg = aq_nic_get_cfg(aq_nic);
166 
167 	if (!(features & NETIF_F_NTUPLE)) {
168 		if (aq_nic->ndev->features & NETIF_F_NTUPLE) {
169 			err = aq_clear_rxnfc_all_rules(aq_nic);
170 			if (unlikely(err))
171 				goto err_exit;
172 		}
173 	}
174 	if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
175 		if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
176 			err = aq_filters_vlan_offload_off(aq_nic);
177 			if (unlikely(err))
178 				goto err_exit;
179 		}
180 	}
181 
182 	aq_cfg->features = features;
183 
184 	if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) {
185 		is_lro = features & NETIF_F_LRO;
186 
187 		if (aq_cfg->is_lro != is_lro) {
188 			aq_cfg->is_lro = is_lro;
189 			need_ndev_restart = true;
190 		}
191 	}
192 
193 	if ((aq_nic->ndev->features ^ features) & NETIF_F_RXCSUM) {
194 		err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
195 							aq_cfg);
196 
197 		if (unlikely(err))
198 			goto err_exit;
199 	}
200 
201 	if (aq_cfg->is_vlan_rx_strip != is_vlan_rx_strip) {
202 		aq_cfg->is_vlan_rx_strip = is_vlan_rx_strip;
203 		need_ndev_restart = true;
204 	}
205 	if (aq_cfg->is_vlan_tx_insert != is_vlan_tx_insert) {
206 		aq_cfg->is_vlan_tx_insert = is_vlan_tx_insert;
207 		need_ndev_restart = true;
208 	}
209 
210 	if (need_ndev_restart && netif_running(ndev)) {
211 		aq_ndev_close(ndev);
212 		aq_ndev_open(ndev);
213 	}
214 
215 err_exit:
216 	return err;
217 }
218 
aq_ndev_fix_features(struct net_device * ndev,netdev_features_t features)219 static netdev_features_t aq_ndev_fix_features(struct net_device *ndev,
220 					      netdev_features_t features)
221 {
222 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
223 	struct bpf_prog *prog;
224 
225 	if (!(features & NETIF_F_RXCSUM))
226 		features &= ~NETIF_F_LRO;
227 
228 	prog = READ_ONCE(aq_nic->xdp_prog);
229 	if (prog && !prog->aux->xdp_has_frags &&
230 	    aq_nic->xdp_prog && features & NETIF_F_LRO) {
231 		netdev_err(ndev, "LRO is not supported with single buffer XDP, disabling\n");
232 		features &= ~NETIF_F_LRO;
233 	}
234 
235 	return features;
236 }
237 
aq_ndev_set_mac_address(struct net_device * ndev,void * addr)238 static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
239 {
240 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
241 	int err = 0;
242 
243 	err = eth_mac_addr(ndev, addr);
244 	if (err < 0)
245 		goto err_exit;
246 	err = aq_nic_set_mac(aq_nic, ndev);
247 	if (err < 0)
248 		goto err_exit;
249 
250 err_exit:
251 	return err;
252 }
253 
aq_ndev_set_multicast_settings(struct net_device * ndev)254 static void aq_ndev_set_multicast_settings(struct net_device *ndev)
255 {
256 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
257 
258 	(void)aq_nic_set_multicast_list(aq_nic, ndev);
259 }
260 
aq_ndev_hwtstamp_set(struct net_device * netdev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)261 static int aq_ndev_hwtstamp_set(struct net_device *netdev,
262 				struct kernel_hwtstamp_config *config,
263 				struct netlink_ext_ack *extack)
264 {
265 	struct aq_nic_s *aq_nic = netdev_priv(netdev);
266 
267 	if (!IS_REACHABLE(CONFIG_PTP_1588_CLOCK) || !aq_nic->aq_ptp)
268 		return -EOPNOTSUPP;
269 
270 	switch (config->tx_type) {
271 	case HWTSTAMP_TX_OFF:
272 	case HWTSTAMP_TX_ON:
273 		break;
274 	default:
275 		return -ERANGE;
276 	}
277 
278 	switch (config->rx_filter) {
279 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
280 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
281 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
282 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
283 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
284 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
285 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
286 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
287 		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
288 		break;
289 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
290 	case HWTSTAMP_FILTER_NONE:
291 		break;
292 	default:
293 		return -ERANGE;
294 	}
295 
296 	return aq_ptp_hwtstamp_config_set(aq_nic->aq_ptp, config);
297 }
298 
aq_ndev_hwtstamp_get(struct net_device * netdev,struct kernel_hwtstamp_config * config)299 static int aq_ndev_hwtstamp_get(struct net_device *netdev,
300 				struct kernel_hwtstamp_config *config)
301 {
302 	struct aq_nic_s *aq_nic = netdev_priv(netdev);
303 
304 	if (!aq_nic->aq_ptp)
305 		return -EOPNOTSUPP;
306 
307 	aq_ptp_hwtstamp_config_get(aq_nic->aq_ptp, config);
308 	return 0;
309 }
310 
aq_ndo_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)311 static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
312 				  u16 vid)
313 {
314 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
315 
316 	if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
317 		return -EOPNOTSUPP;
318 
319 	set_bit(vid, aq_nic->active_vlans);
320 
321 	return aq_filters_vlans_update(aq_nic);
322 }
323 
aq_ndo_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)324 static int aq_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto,
325 				   u16 vid)
326 {
327 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
328 
329 	if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
330 		return -EOPNOTSUPP;
331 
332 	clear_bit(vid, aq_nic->active_vlans);
333 
334 	if (-ENOENT == aq_del_fvlan_by_vlan(aq_nic, vid))
335 		return aq_filters_vlans_update(aq_nic);
336 
337 	return 0;
338 }
339 
aq_validate_mqprio_opt(struct aq_nic_s * self,struct tc_mqprio_qopt_offload * mqprio,const unsigned int num_tc)340 static int aq_validate_mqprio_opt(struct aq_nic_s *self,
341 				  struct tc_mqprio_qopt_offload *mqprio,
342 				  const unsigned int num_tc)
343 {
344 	const bool has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
345 	struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(self);
346 	const unsigned int tcs_max = min_t(u8, aq_nic_cfg->aq_hw_caps->tcs_max,
347 					   AQ_CFG_TCS_MAX);
348 
349 	if (num_tc > tcs_max) {
350 		netdev_err(self->ndev, "Too many TCs requested\n");
351 		return -EOPNOTSUPP;
352 	}
353 
354 	if (num_tc != 0 && !is_power_of_2(num_tc)) {
355 		netdev_err(self->ndev, "TC count should be power of 2\n");
356 		return -EOPNOTSUPP;
357 	}
358 
359 	if (has_min_rate && !ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ANTIGUA)) {
360 		netdev_err(self->ndev, "Min tx rate is not supported\n");
361 		return -EOPNOTSUPP;
362 	}
363 
364 	return 0;
365 }
366 
aq_ndo_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)367 static int aq_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type,
368 			   void *type_data)
369 {
370 	struct tc_mqprio_qopt_offload *mqprio = type_data;
371 	struct aq_nic_s *aq_nic = netdev_priv(dev);
372 	bool has_min_rate;
373 	bool has_max_rate;
374 	int err;
375 	int i;
376 
377 	if (type != TC_SETUP_QDISC_MQPRIO)
378 		return -EOPNOTSUPP;
379 
380 	has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
381 	has_max_rate = !!(mqprio->flags & TC_MQPRIO_F_MAX_RATE);
382 
383 	err = aq_validate_mqprio_opt(aq_nic, mqprio, mqprio->qopt.num_tc);
384 	if (err)
385 		return err;
386 
387 	for (i = 0; i < mqprio->qopt.num_tc; i++) {
388 		if (has_max_rate) {
389 			u64 max_rate = mqprio->max_rate[i];
390 
391 			do_div(max_rate, AQ_MBPS_DIVISOR);
392 			aq_nic_setup_tc_max_rate(aq_nic, i, (u32)max_rate);
393 		}
394 
395 		if (has_min_rate) {
396 			u64 min_rate = mqprio->min_rate[i];
397 
398 			do_div(min_rate, AQ_MBPS_DIVISOR);
399 			aq_nic_setup_tc_min_rate(aq_nic, i, (u32)min_rate);
400 		}
401 	}
402 
403 	return aq_nic_setup_tc_mqprio(aq_nic, mqprio->qopt.num_tc,
404 				      mqprio->qopt.prio_tc_map);
405 }
406 
aq_xdp_setup(struct net_device * ndev,struct bpf_prog * prog,struct netlink_ext_ack * extack)407 static int aq_xdp_setup(struct net_device *ndev, struct bpf_prog *prog,
408 			struct netlink_ext_ack *extack)
409 {
410 	bool need_update, running = netif_running(ndev);
411 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
412 	struct bpf_prog *old_prog;
413 
414 	if (prog && !prog->aux->xdp_has_frags) {
415 		if (ndev->mtu > AQ_CFG_RX_FRAME_MAX) {
416 			NL_SET_ERR_MSG_MOD(extack,
417 					   "prog does not support XDP frags");
418 			return -EOPNOTSUPP;
419 		}
420 
421 		if (prog && ndev->features & NETIF_F_LRO) {
422 			netdev_err(ndev,
423 				   "LRO is not supported with single buffer XDP, disabling\n");
424 			ndev->features &= ~NETIF_F_LRO;
425 		}
426 	}
427 
428 	need_update = !!aq_nic->xdp_prog != !!prog;
429 	if (running && need_update)
430 		aq_ndev_close(ndev);
431 
432 	old_prog = xchg(&aq_nic->xdp_prog, prog);
433 	if (old_prog)
434 		bpf_prog_put(old_prog);
435 
436 	if (!old_prog && prog)
437 		static_branch_inc(&aq_xdp_locking_key);
438 	else if (old_prog && !prog)
439 		static_branch_dec(&aq_xdp_locking_key);
440 
441 	if (running && need_update)
442 		return aq_ndev_open(ndev);
443 
444 	return 0;
445 }
446 
aq_xdp(struct net_device * dev,struct netdev_bpf * xdp)447 static int aq_xdp(struct net_device *dev, struct netdev_bpf *xdp)
448 {
449 	switch (xdp->command) {
450 	case XDP_SETUP_PROG:
451 		return aq_xdp_setup(dev, xdp->prog, xdp->extack);
452 	default:
453 		return -EINVAL;
454 	}
455 }
456 
457 static const struct net_device_ops aq_ndev_ops = {
458 	.ndo_open = aq_ndev_open,
459 	.ndo_stop = aq_ndev_close,
460 	.ndo_start_xmit = aq_ndev_start_xmit,
461 	.ndo_set_rx_mode = aq_ndev_set_multicast_settings,
462 	.ndo_change_mtu = aq_ndev_change_mtu,
463 	.ndo_set_mac_address = aq_ndev_set_mac_address,
464 	.ndo_set_features = aq_ndev_set_features,
465 	.ndo_fix_features = aq_ndev_fix_features,
466 	.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
467 	.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
468 	.ndo_setup_tc = aq_ndo_setup_tc,
469 	.ndo_bpf = aq_xdp,
470 	.ndo_xdp_xmit = aq_xdp_xmit,
471 	.ndo_hwtstamp_get = aq_ndev_hwtstamp_get,
472 	.ndo_hwtstamp_set = aq_ndev_hwtstamp_set,
473 };
474 
aq_ndev_init_module(void)475 static int __init aq_ndev_init_module(void)
476 {
477 	int ret;
478 
479 	aq_ndev_wq = create_singlethread_workqueue(aq_ndev_driver_name);
480 	if (!aq_ndev_wq) {
481 		pr_err("Failed to create workqueue\n");
482 		return -ENOMEM;
483 	}
484 
485 	ret = aq_pci_func_register_driver();
486 	if (ret) {
487 		destroy_workqueue(aq_ndev_wq);
488 		return ret;
489 	}
490 
491 	return 0;
492 }
493 
aq_ndev_exit_module(void)494 static void __exit aq_ndev_exit_module(void)
495 {
496 	aq_pci_func_unregister_driver();
497 
498 	if (aq_ndev_wq) {
499 		destroy_workqueue(aq_ndev_wq);
500 		aq_ndev_wq = NULL;
501 	}
502 }
503 
504 module_init(aq_ndev_init_module);
505 module_exit(aq_ndev_exit_module);
506