xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h (revision 37a93dd5c49b5fda807fd204edf2547c3493319c)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #ifndef __MLX5_LAG_H__
5 #define __MLX5_LAG_H__
6 
7 #include <linux/debugfs.h>
8 
9 #define MLX5_LAG_MAX_HASH_BUCKETS 16
10 #include "mlx5_core.h"
11 #include "mp.h"
12 #include "port_sel.h"
13 #include "mpesw.h"
14 
15 enum {
16 	MLX5_LAG_P1,
17 	MLX5_LAG_P2,
18 };
19 
20 enum {
21 	MLX5_LAG_FLAG_NDEVS_READY,
22 };
23 
24 enum {
25 	MLX5_LAG_MODE_FLAG_HASH_BASED,
26 	MLX5_LAG_MODE_FLAG_SHARED_FDB,
27 	MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
28 };
29 
30 enum mlx5_lag_mode {
31 	MLX5_LAG_MODE_NONE,
32 	MLX5_LAG_MODE_ROCE,
33 	MLX5_LAG_MODE_SRIOV,
34 	MLX5_LAG_MODE_MULTIPATH,
35 	MLX5_LAG_MODE_MPESW,
36 };
37 
38 struct lag_func {
39 	struct mlx5_core_dev *dev;
40 	struct net_device    *netdev;
41 	bool has_drop;
42 	struct mlx5_nb port_change_nb;
43 };
44 
45 /* Used for collection of netdev event info. */
46 struct lag_tracker {
47 	enum   netdev_lag_tx_type           tx_type;
48 	struct netdev_lag_lower_state_info  netdev_state[MLX5_MAX_PORTS];
49 	unsigned int is_bonded:1;
50 	unsigned int has_inactive:1;
51 	enum netdev_lag_hash hash_type;
52 	u32 bond_speed_mbps;
53 };
54 
55 /* LAG data of a ConnectX card.
56  * It serves both its phys functions.
57  */
58 struct mlx5_lag {
59 	enum mlx5_lag_mode        mode;
60 	unsigned long		  mode_flags;
61 	unsigned long		  state_flags;
62 	u8			  ports;
63 	u8			  buckets;
64 	int			  mode_changes_in_progress;
65 	u8			  v2p_map[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS];
66 	struct kref               ref;
67 	struct lag_func           pf[MLX5_MAX_PORTS];
68 	struct lag_tracker        tracker;
69 	struct workqueue_struct   *wq;
70 	struct delayed_work       bond_work;
71 	struct work_struct        speed_update_work;
72 	struct notifier_block     nb;
73 	possible_net_t net;
74 	struct lag_mp             lag_mp;
75 	struct mlx5_lag_port_sel  port_sel;
76 	/* Protect lag fields/state changes */
77 	struct mutex		  lock;
78 	struct lag_mpesw	  lag_mpesw;
79 };
80 
81 static inline struct mlx5_lag *
mlx5_lag_dev(struct mlx5_core_dev * dev)82 mlx5_lag_dev(struct mlx5_core_dev *dev)
83 {
84 	return dev->priv.lag;
85 }
86 
87 static inline bool
__mlx5_lag_is_active(struct mlx5_lag * ldev)88 __mlx5_lag_is_active(struct mlx5_lag *ldev)
89 {
90 	return ldev->mode != MLX5_LAG_MODE_NONE;
91 }
92 
93 static inline bool
mlx5_lag_is_ready(struct mlx5_lag * ldev)94 mlx5_lag_is_ready(struct mlx5_lag *ldev)
95 {
96 	return test_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
97 }
98 
99 bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev);
100 bool mlx5_lag_check_prereq(struct mlx5_lag *ldev);
101 void mlx5_modify_lag(struct mlx5_lag *ldev,
102 		     struct lag_tracker *tracker);
103 int mlx5_activate_lag(struct mlx5_lag *ldev,
104 		      struct lag_tracker *tracker,
105 		      enum mlx5_lag_mode mode,
106 		      bool shared_fdb);
107 int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
108 				struct net_device *ndev);
109 
110 char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags);
111 void mlx5_infer_tx_enabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
112 			   u8 *ports, int *num_enabled);
113 
114 void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev);
115 void mlx5_ldev_remove_debugfs(struct dentry *dbg);
116 void mlx5_disable_lag(struct mlx5_lag *ldev);
117 void mlx5_lag_remove_devices(struct mlx5_lag *ldev);
118 int mlx5_deactivate_lag(struct mlx5_lag *ldev);
119 void mlx5_lag_add_devices(struct mlx5_lag *ldev);
120 struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev);
121 
122 #ifdef CONFIG_MLX5_ESWITCH
123 void mlx5_lag_set_vports_agg_speed(struct mlx5_lag *ldev);
124 void mlx5_lag_reset_vports_speed(struct mlx5_lag *ldev);
125 #else
mlx5_lag_set_vports_agg_speed(struct mlx5_lag * ldev)126 static inline void mlx5_lag_set_vports_agg_speed(struct mlx5_lag *ldev) {}
mlx5_lag_reset_vports_speed(struct mlx5_lag * ldev)127 static inline void mlx5_lag_reset_vports_speed(struct mlx5_lag *ldev) {}
128 #endif
129 
mlx5_lag_is_supported(struct mlx5_core_dev * dev)130 static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
131 {
132 	if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
133 	    !MLX5_CAP_GEN(dev, lag_master) ||
134 	    MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
135 	    mlx5_get_dev_index(dev) >= MLX5_MAX_PORTS ||
136 	    MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
137 		return false;
138 	return true;
139 }
140 
141 #define mlx5_ldev_for_each(i, start_index, ldev) \
142 	for (int tmp = start_index; tmp = mlx5_get_next_ldev_func(ldev, tmp), \
143 	     i = tmp, tmp < MLX5_MAX_PORTS; tmp++)
144 
145 #define mlx5_ldev_for_each_reverse(i, start_index, end_index, ldev)      \
146 	for (int tmp = start_index, tmp1 = end_index; \
147 	     tmp = mlx5_get_pre_ldev_func(ldev, tmp, tmp1), \
148 	     i = tmp, tmp >= tmp1; tmp--)
149 
150 int mlx5_get_pre_ldev_func(struct mlx5_lag *ldev, int start_idx, int end_idx);
151 int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx);
152 int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq);
153 int mlx5_lag_num_devs(struct mlx5_lag *ldev);
154 int mlx5_lag_num_netdevs(struct mlx5_lag *ldev);
155 #endif /* __MLX5_LAG_H__ */
156