1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
3
4 #include "ice.h"
5 #include "ice_eswitch.h"
6 #include "devlink/devlink.h"
7 #include "devlink/port.h"
8 #include "ice_sriov.h"
9 #include "ice_tc_lib.h"
10 #include "ice_dcb_lib.h"
11
12 /**
13 * ice_repr_inc_tx_stats - increment Tx statistic by one packet
14 * @repr: repr to increment stats on
15 * @len: length of the packet
16 * @xmit_status: value returned by xmit function
17 */
ice_repr_inc_tx_stats(struct ice_repr * repr,unsigned int len,int xmit_status)18 void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
19 int xmit_status)
20 {
21 struct ice_repr_pcpu_stats *stats;
22
23 if (unlikely(xmit_status != NET_XMIT_SUCCESS &&
24 xmit_status != NET_XMIT_CN)) {
25 this_cpu_inc(repr->stats->tx_drops);
26 return;
27 }
28
29 stats = this_cpu_ptr(repr->stats);
30 u64_stats_update_begin(&stats->syncp);
31 stats->tx_packets++;
32 stats->tx_bytes += len;
33 u64_stats_update_end(&stats->syncp);
34 }
35
36 /**
37 * ice_repr_inc_rx_stats - increment Rx statistic by one packet
38 * @netdev: repr netdev to increment stats on
39 * @len: length of the packet
40 */
ice_repr_inc_rx_stats(struct net_device * netdev,unsigned int len)41 void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
42 {
43 struct ice_repr *repr = ice_netdev_to_repr(netdev);
44 struct ice_repr_pcpu_stats *stats;
45
46 stats = this_cpu_ptr(repr->stats);
47 u64_stats_update_begin(&stats->syncp);
48 stats->rx_packets++;
49 stats->rx_bytes += len;
50 u64_stats_update_end(&stats->syncp);
51 }
52
53 /**
54 * ice_repr_get_stats64 - get VF stats for VFPR use
55 * @netdev: pointer to port representor netdev
56 * @stats: pointer to struct where stats can be stored
57 */
58 static void
ice_repr_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)59 ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
60 {
61 struct ice_netdev_priv *np = netdev_priv(netdev);
62 struct ice_repr *repr = np->repr;
63 struct ice_eth_stats *eth_stats;
64 struct ice_vsi *vsi;
65
66 if (repr->ops.ready(repr))
67 return;
68 vsi = repr->src_vsi;
69
70 ice_update_vsi_stats(vsi);
71 eth_stats = &vsi->eth_stats;
72
73 stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast +
74 eth_stats->tx_multicast;
75 stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast +
76 eth_stats->rx_multicast;
77 stats->tx_bytes = eth_stats->tx_bytes;
78 stats->rx_bytes = eth_stats->rx_bytes;
79 stats->multicast = eth_stats->rx_multicast;
80 stats->tx_errors = eth_stats->tx_errors;
81 stats->tx_dropped = eth_stats->tx_discards;
82 stats->rx_dropped = eth_stats->rx_discards;
83 }
84
85 /**
86 * ice_netdev_to_repr - Get port representor for given netdevice
87 * @netdev: pointer to port representor netdev
88 */
ice_netdev_to_repr(const struct net_device * netdev)89 struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
90 {
91 struct ice_netdev_priv *np = netdev_priv(netdev);
92
93 return np->repr;
94 }
95
96 /**
97 * ice_repr_vf_open - Enable port representor's network interface
98 * @netdev: network interface device structure
99 *
100 * The open entry point is called when a port representor's network
101 * interface is made active by the system (IFF_UP). Corresponding
102 * VF is notified about link status change.
103 *
104 * Returns 0 on success
105 */
ice_repr_vf_open(struct net_device * netdev)106 static int ice_repr_vf_open(struct net_device *netdev)
107 {
108 struct ice_repr *repr = ice_netdev_to_repr(netdev);
109 struct ice_vf *vf;
110
111 vf = repr->vf;
112 vf->link_forced = true;
113 vf->link_up = true;
114 ice_vc_notify_vf_link_state(vf);
115
116 netif_carrier_on(netdev);
117 netif_tx_start_all_queues(netdev);
118
119 return 0;
120 }
121
ice_repr_sf_open(struct net_device * netdev)122 static int ice_repr_sf_open(struct net_device *netdev)
123 {
124 netif_carrier_on(netdev);
125 netif_tx_start_all_queues(netdev);
126
127 return 0;
128 }
129
130 /**
131 * ice_repr_vf_stop - Disable port representor's network interface
132 * @netdev: network interface device structure
133 *
134 * The stop entry point is called when a port representor's network
135 * interface is de-activated by the system. Corresponding
136 * VF is notified about link status change.
137 *
138 * Returns 0 on success
139 */
ice_repr_vf_stop(struct net_device * netdev)140 static int ice_repr_vf_stop(struct net_device *netdev)
141 {
142 struct ice_repr *repr = ice_netdev_to_repr(netdev);
143 struct ice_vf *vf;
144
145 vf = repr->vf;
146 vf->link_forced = true;
147 vf->link_up = false;
148 ice_vc_notify_vf_link_state(vf);
149
150 netif_carrier_off(netdev);
151 netif_tx_stop_all_queues(netdev);
152
153 return 0;
154 }
155
ice_repr_sf_stop(struct net_device * netdev)156 static int ice_repr_sf_stop(struct net_device *netdev)
157 {
158 netif_carrier_off(netdev);
159 netif_tx_stop_all_queues(netdev);
160
161 return 0;
162 }
163
164 /**
165 * ice_repr_sp_stats64 - get slow path stats for port representor
166 * @dev: network interface device structure
167 * @stats: netlink stats structure
168 */
169 static int
ice_repr_sp_stats64(const struct net_device * dev,struct rtnl_link_stats64 * stats)170 ice_repr_sp_stats64(const struct net_device *dev,
171 struct rtnl_link_stats64 *stats)
172 {
173 struct ice_repr *repr = ice_netdev_to_repr(dev);
174 int i;
175
176 for_each_possible_cpu(i) {
177 u64 tbytes, tpkts, tdrops, rbytes, rpkts;
178 struct ice_repr_pcpu_stats *repr_stats;
179 unsigned int start;
180
181 repr_stats = per_cpu_ptr(repr->stats, i);
182 do {
183 start = u64_stats_fetch_begin(&repr_stats->syncp);
184 tbytes = repr_stats->tx_bytes;
185 tpkts = repr_stats->tx_packets;
186 tdrops = repr_stats->tx_drops;
187 rbytes = repr_stats->rx_bytes;
188 rpkts = repr_stats->rx_packets;
189 } while (u64_stats_fetch_retry(&repr_stats->syncp, start));
190
191 stats->tx_bytes += tbytes;
192 stats->tx_packets += tpkts;
193 stats->tx_dropped += tdrops;
194 stats->rx_bytes += rbytes;
195 stats->rx_packets += rpkts;
196 }
197 return 0;
198 }
199
200 static bool
ice_repr_ndo_has_offload_stats(const struct net_device * dev,int attr_id)201 ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id)
202 {
203 return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
204 }
205
206 static int
ice_repr_ndo_get_offload_stats(int attr_id,const struct net_device * dev,void * sp)207 ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev,
208 void *sp)
209 {
210 if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
211 return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
212
213 return -EINVAL;
214 }
215
216 static int
ice_repr_setup_tc_cls_flower(struct ice_repr * repr,struct flow_cls_offload * flower)217 ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
218 struct flow_cls_offload *flower)
219 {
220 switch (flower->command) {
221 case FLOW_CLS_REPLACE:
222 return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower,
223 true);
224 case FLOW_CLS_DESTROY:
225 return ice_del_cls_flower(repr->src_vsi, flower);
226 default:
227 return -EINVAL;
228 }
229 }
230
231 static int
ice_repr_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)232 ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
233 void *cb_priv)
234 {
235 struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data;
236 struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv;
237
238 switch (type) {
239 case TC_SETUP_CLSFLOWER:
240 return ice_repr_setup_tc_cls_flower(np->repr, flower);
241 default:
242 return -EOPNOTSUPP;
243 }
244 }
245
246 static LIST_HEAD(ice_repr_block_cb_list);
247
248 static int
ice_repr_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)249 ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
250 void *type_data)
251 {
252 struct ice_netdev_priv *np = netdev_priv(netdev);
253
254 switch (type) {
255 case TC_SETUP_BLOCK:
256 return flow_block_cb_setup_simple((struct flow_block_offload *)
257 type_data,
258 &ice_repr_block_cb_list,
259 ice_repr_setup_tc_block_cb,
260 np, np, true);
261 default:
262 return -EOPNOTSUPP;
263 }
264 }
265
266 static const struct net_device_ops ice_repr_vf_netdev_ops = {
267 .ndo_get_stats64 = ice_repr_get_stats64,
268 .ndo_open = ice_repr_vf_open,
269 .ndo_stop = ice_repr_vf_stop,
270 .ndo_start_xmit = ice_eswitch_port_start_xmit,
271 .ndo_setup_tc = ice_repr_setup_tc,
272 .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
273 .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
274 };
275
276 static const struct net_device_ops ice_repr_sf_netdev_ops = {
277 .ndo_get_stats64 = ice_repr_get_stats64,
278 .ndo_open = ice_repr_sf_open,
279 .ndo_stop = ice_repr_sf_stop,
280 .ndo_start_xmit = ice_eswitch_port_start_xmit,
281 .ndo_setup_tc = ice_repr_setup_tc,
282 .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
283 .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
284 };
285
286 /**
287 * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev
288 * @netdev: pointer to netdev
289 */
ice_is_port_repr_netdev(const struct net_device * netdev)290 bool ice_is_port_repr_netdev(const struct net_device *netdev)
291 {
292 return netdev && (netdev->netdev_ops == &ice_repr_vf_netdev_ops ||
293 netdev->netdev_ops == &ice_repr_sf_netdev_ops);
294 }
295
296 /**
297 * ice_repr_reg_netdev - register port representor netdev
298 * @netdev: pointer to port representor netdev
299 * @ops: new ops for netdev
300 */
301 static int
ice_repr_reg_netdev(struct net_device * netdev,const struct net_device_ops * ops)302 ice_repr_reg_netdev(struct net_device *netdev, const struct net_device_ops *ops)
303 {
304 eth_hw_addr_random(netdev);
305 netdev->netdev_ops = ops;
306 ice_set_ethtool_repr_ops(netdev);
307
308 netdev->hw_features |= NETIF_F_HW_TC;
309
310 netif_carrier_off(netdev);
311 netif_tx_stop_all_queues(netdev);
312
313 return register_netdev(netdev);
314 }
315
ice_repr_ready_vf(struct ice_repr * repr)316 static int ice_repr_ready_vf(struct ice_repr *repr)
317 {
318 return !ice_check_vf_ready_for_cfg(repr->vf);
319 }
320
ice_repr_ready_sf(struct ice_repr * repr)321 static int ice_repr_ready_sf(struct ice_repr *repr)
322 {
323 return !repr->sf->active;
324 }
325
326 /**
327 * ice_repr_destroy - remove representor from VF
328 * @repr: pointer to representor structure
329 */
ice_repr_destroy(struct ice_repr * repr)330 void ice_repr_destroy(struct ice_repr *repr)
331 {
332 free_percpu(repr->stats);
333 free_netdev(repr->netdev);
334 kfree(repr);
335 }
336
ice_repr_rem_vf(struct ice_repr * repr)337 static void ice_repr_rem_vf(struct ice_repr *repr)
338 {
339 ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
340 ice_pass_vf_tx_lldp(repr->src_vsi, true);
341 unregister_netdev(repr->netdev);
342 ice_devlink_destroy_vf_port(repr->vf);
343 ice_virtchnl_set_dflt_ops(repr->vf);
344 }
345
ice_repr_rem_sf(struct ice_repr * repr)346 static void ice_repr_rem_sf(struct ice_repr *repr)
347 {
348 unregister_netdev(repr->netdev);
349 ice_devlink_destroy_sf_port(repr->sf);
350 }
351
ice_repr_set_tx_topology(struct ice_pf * pf,struct devlink * devlink)352 static void ice_repr_set_tx_topology(struct ice_pf *pf, struct devlink *devlink)
353 {
354 /* only export if ADQ and DCB disabled and eswitch enabled*/
355 if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) ||
356 !ice_is_switchdev_running(pf))
357 return;
358
359 ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
360 }
361
362 /**
363 * ice_repr_create - add representor for generic VSI
364 * @src_vsi: pointer to VSI structure of device to represent
365 */
ice_repr_create(struct ice_vsi * src_vsi)366 static struct ice_repr *ice_repr_create(struct ice_vsi *src_vsi)
367 {
368 struct ice_netdev_priv *np;
369 struct ice_repr *repr;
370 int err;
371
372 repr = kzalloc(sizeof(*repr), GFP_KERNEL);
373 if (!repr)
374 return ERR_PTR(-ENOMEM);
375
376 repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
377 if (!repr->netdev) {
378 err = -ENOMEM;
379 goto err_alloc;
380 }
381
382 repr->stats = netdev_alloc_pcpu_stats(struct ice_repr_pcpu_stats);
383 if (!repr->stats) {
384 err = -ENOMEM;
385 goto err_stats;
386 }
387
388 repr->src_vsi = src_vsi;
389 repr->id = src_vsi->vsi_num;
390 np = netdev_priv(repr->netdev);
391 np->repr = repr;
392
393 repr->netdev->min_mtu = ETH_MIN_MTU;
394 repr->netdev->max_mtu = ICE_MAX_MTU;
395
396 SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(src_vsi->back));
397
398 return repr;
399
400 err_stats:
401 free_netdev(repr->netdev);
402 err_alloc:
403 kfree(repr);
404 return ERR_PTR(err);
405 }
406
ice_repr_add_vf(struct ice_repr * repr)407 static int ice_repr_add_vf(struct ice_repr *repr)
408 {
409 struct ice_vf *vf = repr->vf;
410 struct devlink *devlink;
411 int err;
412
413 err = ice_devlink_create_vf_port(vf);
414 if (err)
415 return err;
416
417 SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);
418 err = ice_repr_reg_netdev(repr->netdev, &ice_repr_vf_netdev_ops);
419 if (err)
420 goto err_netdev;
421
422 err = ice_drop_vf_tx_lldp(repr->src_vsi, true);
423 if (err)
424 goto err_drop_lldp;
425
426 err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac);
427 if (err)
428 goto err_cfg_vsi;
429
430 ice_virtchnl_set_repr_ops(vf);
431
432 devlink = priv_to_devlink(vf->pf);
433 ice_repr_set_tx_topology(vf->pf, devlink);
434
435 return 0;
436
437 err_cfg_vsi:
438 ice_pass_vf_tx_lldp(repr->src_vsi, true);
439 err_drop_lldp:
440 unregister_netdev(repr->netdev);
441 err_netdev:
442 ice_devlink_destroy_vf_port(vf);
443 return err;
444 }
445
446 /**
447 * ice_repr_create_vf - add representor for VF VSI
448 * @vf: VF to create port representor on
449 *
450 * Set correct representor type for VF and functions pointer.
451 *
452 * Return: created port representor on success, error otherwise
453 */
ice_repr_create_vf(struct ice_vf * vf)454 struct ice_repr *ice_repr_create_vf(struct ice_vf *vf)
455 {
456 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
457 struct ice_repr *repr;
458
459 if (!vsi)
460 return ERR_PTR(-EINVAL);
461
462 repr = ice_repr_create(vsi);
463 if (IS_ERR(repr))
464 return repr;
465
466 repr->type = ICE_REPR_TYPE_VF;
467 repr->vf = vf;
468 repr->ops.add = ice_repr_add_vf;
469 repr->ops.rem = ice_repr_rem_vf;
470 repr->ops.ready = ice_repr_ready_vf;
471
472 ether_addr_copy(repr->parent_mac, vf->hw_lan_addr);
473
474 return repr;
475 }
476
ice_repr_add_sf(struct ice_repr * repr)477 static int ice_repr_add_sf(struct ice_repr *repr)
478 {
479 struct ice_dynamic_port *sf = repr->sf;
480 int err;
481
482 err = ice_devlink_create_sf_port(sf);
483 if (err)
484 return err;
485
486 SET_NETDEV_DEVLINK_PORT(repr->netdev, &sf->devlink_port);
487 err = ice_repr_reg_netdev(repr->netdev, &ice_repr_sf_netdev_ops);
488 if (err)
489 goto err_netdev;
490
491 ice_repr_set_tx_topology(sf->vsi->back, priv_to_devlink(sf->vsi->back));
492
493 return 0;
494
495 err_netdev:
496 ice_devlink_destroy_sf_port(sf);
497 return err;
498 }
499
500 /**
501 * ice_repr_create_sf - add representor for SF VSI
502 * @sf: SF to create port representor on
503 *
504 * Set correct representor type for SF and functions pointer.
505 *
506 * Return: created port representor on success, error otherwise
507 */
ice_repr_create_sf(struct ice_dynamic_port * sf)508 struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf)
509 {
510 struct ice_repr *repr = ice_repr_create(sf->vsi);
511
512 if (IS_ERR(repr))
513 return repr;
514
515 repr->type = ICE_REPR_TYPE_SF;
516 repr->sf = sf;
517 repr->ops.add = ice_repr_add_sf;
518 repr->ops.rem = ice_repr_rem_sf;
519 repr->ops.ready = ice_repr_ready_sf;
520
521 ether_addr_copy(repr->parent_mac, sf->hw_addr);
522
523 return repr;
524 }
525
ice_repr_get(struct ice_pf * pf,u32 id)526 struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id)
527 {
528 return xa_load(&pf->eswitch.reprs, id);
529 }
530
531 /**
532 * ice_repr_start_tx_queues - start Tx queues of port representor
533 * @repr: pointer to repr structure
534 */
ice_repr_start_tx_queues(struct ice_repr * repr)535 void ice_repr_start_tx_queues(struct ice_repr *repr)
536 {
537 netif_carrier_on(repr->netdev);
538 netif_tx_start_all_queues(repr->netdev);
539 }
540
541 /**
542 * ice_repr_stop_tx_queues - stop Tx queues of port representor
543 * @repr: pointer to repr structure
544 */
ice_repr_stop_tx_queues(struct ice_repr * repr)545 void ice_repr_stop_tx_queues(struct ice_repr *repr)
546 {
547 netif_carrier_off(repr->netdev);
548 netif_tx_stop_all_queues(repr->netdev);
549 }
550