1 /* 2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef _MLX5_FS_CORE_ 34 #define _MLX5_FS_CORE_ 35 36 #include <linux/refcount.h> 37 #include <linux/mlx5/fs.h> 38 #include <linux/rhashtable.h> 39 #include <linux/llist.h> 40 #include <steering/sws/fs_dr.h> 41 #include <steering/hws/fs_hws.h> 42 43 #define FDB_TC_MAX_CHAIN 3 44 #define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1) 45 #define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1) 46 47 /* The index of the last real chain (FT) + 1 as chain zero is valid as well */ 48 #define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1) 49 50 #define FDB_TC_MAX_PRIO 16 51 #define FDB_TC_LEVELS_PER_PRIO 2 52 53 struct mlx5_flow_definer { 54 enum mlx5_flow_namespace_type ns_type; 55 u32 id; 56 }; 57 58 enum mlx5_flow_resource_owner { 59 MLX5_FLOW_RESOURCE_OWNER_FW, 60 MLX5_FLOW_RESOURCE_OWNER_SW, 61 }; 62 63 struct mlx5_modify_hdr { 64 enum mlx5_flow_namespace_type ns_type; 65 enum mlx5_flow_resource_owner owner; 66 union { 67 struct mlx5_fs_dr_action fs_dr_action; 68 struct mlx5_fs_hws_action fs_hws_action; 69 u32 id; 70 }; 71 }; 72 73 struct mlx5_pkt_reformat { 74 enum mlx5_flow_namespace_type ns_type; 75 int reformat_type; /* from mlx5_ifc */ 76 enum mlx5_flow_resource_owner owner; 77 union { 78 struct mlx5_fs_dr_action fs_dr_action; 79 struct mlx5_fs_hws_action fs_hws_action; 80 u32 id; 81 }; 82 }; 83 84 /* FS_TYPE_PRIO_CHAINS is a PRIO that will have namespaces only, 85 * and those are in parallel to one another when going over them to connect 86 * a new flow table. Meaning the last flow table in a TYPE_PRIO prio in one 87 * parallel namespace will not automatically connect to the first flow table 88 * found in any prio in any next namespace, but skip the entire containing 89 * TYPE_PRIO_CHAINS prio. 90 * 91 * This is used to implement tc chains, each chain of prios is a different 92 * namespace inside a containing TYPE_PRIO_CHAINS prio. 93 */ 94 95 enum fs_node_type { 96 FS_TYPE_NAMESPACE, 97 FS_TYPE_PRIO, 98 FS_TYPE_PRIO_CHAINS, 99 FS_TYPE_FLOW_TABLE, 100 FS_TYPE_FLOW_GROUP, 101 FS_TYPE_FLOW_ENTRY, 102 FS_TYPE_FLOW_DEST 103 }; 104 105 enum fs_flow_table_type { 106 FS_FT_NIC_RX = 0x0, 107 FS_FT_NIC_TX = 0x1, 108 FS_FT_ESW_EGRESS_ACL = 0x2, 109 FS_FT_ESW_INGRESS_ACL = 0x3, 110 FS_FT_FDB = 0X4, 111 FS_FT_SNIFFER_RX = 0X5, 112 FS_FT_SNIFFER_TX = 0X6, 113 FS_FT_RDMA_RX = 0X7, 114 FS_FT_RDMA_TX = 0X8, 115 FS_FT_PORT_SEL = 0X9, 116 FS_FT_FDB_RX = 0xa, 117 FS_FT_FDB_TX = 0xb, 118 FS_FT_RDMA_TRANSPORT_RX = 0xd, 119 FS_FT_RDMA_TRANSPORT_TX = 0xe, 120 FS_FT_MAX_TYPE = FS_FT_RDMA_TRANSPORT_TX, 121 }; 122 123 enum fs_flow_table_op_mod { 124 FS_FT_OP_MOD_NORMAL, 125 FS_FT_OP_MOD_LAG_DEMUX, 126 }; 127 128 enum fs_fte_status { 129 FS_FTE_STATUS_EXISTING = 1UL << 0, 130 }; 131 132 enum mlx5_flow_steering_mode { 133 MLX5_FLOW_STEERING_MODE_DMFS, 134 MLX5_FLOW_STEERING_MODE_SMFS, 135 MLX5_FLOW_STEERING_MODE_HMFS, 136 }; 137 138 enum mlx5_flow_steering_capabilty { 139 MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX = 1UL << 0, 140 MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX = 1UL << 1, 141 MLX5_FLOW_STEERING_CAP_MATCH_RANGES = 1UL << 2, 142 MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH = 1UL << 3, 143 }; 144 145 struct mlx5_flow_steering { 146 struct mlx5_core_dev *dev; 147 enum mlx5_flow_steering_mode mode; 148 struct kmem_cache *fgs_cache; 149 struct kmem_cache *ftes_cache; 150 struct mlx5_flow_root_namespace *root_ns; 151 struct mlx5_flow_root_namespace *fdb_root_ns; 152 struct mlx5_flow_namespace **fdb_sub_ns; 153 struct mlx5_flow_root_namespace **esw_egress_root_ns; 154 struct mlx5_flow_root_namespace **esw_ingress_root_ns; 155 struct mlx5_flow_root_namespace *sniffer_tx_root_ns; 156 struct mlx5_flow_root_namespace *sniffer_rx_root_ns; 157 struct mlx5_flow_root_namespace *rdma_rx_root_ns; 158 struct mlx5_flow_root_namespace *rdma_tx_root_ns; 159 struct mlx5_flow_root_namespace *egress_root_ns; 160 struct mlx5_flow_root_namespace *port_sel_root_ns; 161 int esw_egress_acl_vports; 162 int esw_ingress_acl_vports; 163 struct mlx5_flow_root_namespace **rdma_transport_rx_root_ns; 164 struct mlx5_flow_root_namespace **rdma_transport_tx_root_ns; 165 int rdma_transport_rx_vports; 166 int rdma_transport_tx_vports; 167 }; 168 169 struct fs_node { 170 struct list_head list; 171 struct list_head children; 172 enum fs_node_type type; 173 struct fs_node *parent; 174 struct fs_node *root; 175 /* lock the node for writing and traversing */ 176 struct rw_semaphore lock; 177 refcount_t refcount; 178 bool active; 179 void (*del_hw_func)(struct fs_node *); 180 void (*del_sw_func)(struct fs_node *); 181 atomic_t version; 182 }; 183 184 struct mlx5_flow_rule { 185 struct fs_node node; 186 struct mlx5_flow_table *ft; 187 struct mlx5_flow_destination dest_attr; 188 /* next_ft should be accessed under chain_lock and only of 189 * destination type is FWD_NEXT_fT. 190 */ 191 struct list_head next_ft; 192 u32 sw_action; 193 }; 194 195 struct mlx5_flow_handle { 196 int num_rules; 197 struct mlx5_flow_rule *rule[] __counted_by(num_rules); 198 }; 199 200 /* Type of children is mlx5_flow_group */ 201 struct mlx5_flow_table { 202 struct fs_node node; 203 union { 204 struct mlx5_fs_dr_table fs_dr_table; 205 struct mlx5_fs_hws_table fs_hws_table; 206 }; 207 u32 id; 208 u16 vport; 209 unsigned int max_fte; 210 unsigned int level; 211 enum fs_flow_table_type type; 212 enum fs_flow_table_op_mod op_mod; 213 struct { 214 bool active; 215 unsigned int required_groups; 216 unsigned int group_size; 217 unsigned int num_groups; 218 unsigned int max_fte; 219 } autogroup; 220 /* Protect fwd_rules */ 221 struct mutex lock; 222 /* FWD rules that point on this flow table */ 223 struct list_head fwd_rules; 224 u32 flags; 225 struct rhltable fgs_hash; 226 enum mlx5_flow_table_miss_action def_miss_action; 227 struct mlx5_flow_namespace *ns; 228 }; 229 230 struct mlx5_ft_underlay_qp { 231 struct list_head list; 232 u32 qpn; 233 }; 234 235 #define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_e00 236 /* Calculate the fte_match_param length and without the reserved length. 237 * Make sure the reserved field is the last. 238 */ 239 #define MLX5_ST_SZ_DW_MATCH_PARAM \ 240 ((MLX5_BYTE_OFF(fte_match_param, MLX5_FTE_MATCH_PARAM_RESERVED) / sizeof(u32)) + \ 241 BUILD_BUG_ON_ZERO(MLX5_ST_SZ_BYTES(fte_match_param) != \ 242 MLX5_FLD_SZ_BYTES(fte_match_param, \ 243 MLX5_FTE_MATCH_PARAM_RESERVED) +\ 244 MLX5_BYTE_OFF(fte_match_param, \ 245 MLX5_FTE_MATCH_PARAM_RESERVED))) 246 247 struct fs_fte_action { 248 int modify_mask; 249 u32 dests_size; 250 u32 fwd_dests; 251 struct mlx5_flow_context flow_context; 252 struct mlx5_flow_act action; 253 }; 254 255 struct fs_fte_dup { 256 struct list_head children; 257 struct fs_fte_action act_dests; 258 }; 259 260 /* Type of children is mlx5_flow_rule */ 261 struct fs_fte { 262 struct fs_node node; 263 union { 264 struct mlx5_fs_dr_rule fs_dr_rule; 265 struct mlx5_fs_hws_rule fs_hws_rule; 266 }; 267 u32 val[MLX5_ST_SZ_DW_MATCH_PARAM]; 268 struct fs_fte_action act_dests; 269 struct fs_fte_dup *dup; 270 u32 index; 271 enum fs_fte_status status; 272 struct rhash_head hash; 273 }; 274 275 /* Type of children is mlx5_flow_table/namespace */ 276 struct fs_prio { 277 struct fs_node node; 278 unsigned int num_levels; 279 unsigned int start_level; 280 unsigned int prio; 281 unsigned int num_ft; 282 }; 283 284 /* Type of children is fs_prio */ 285 struct mlx5_flow_namespace { 286 /* parent == NULL => root ns */ 287 struct fs_node node; 288 enum mlx5_flow_table_miss_action def_miss_action; 289 }; 290 291 struct mlx5_flow_group_mask { 292 u8 match_criteria_enable; 293 u32 match_criteria[MLX5_ST_SZ_DW_MATCH_PARAM]; 294 }; 295 296 /* Type of children is fs_fte */ 297 struct mlx5_flow_group { 298 struct fs_node node; 299 union { 300 struct mlx5_fs_dr_matcher fs_dr_matcher; 301 struct mlx5_fs_hws_matcher fs_hws_matcher; 302 }; 303 struct mlx5_flow_group_mask mask; 304 u32 start_index; 305 u32 max_ftes; 306 struct ida fte_allocator; 307 u32 id; 308 struct rhashtable ftes_hash; 309 struct rhlist_head hash; 310 }; 311 312 struct mlx5_flow_root_namespace { 313 struct mlx5_flow_namespace ns; 314 enum mlx5_flow_steering_mode mode; 315 union { 316 struct mlx5_fs_dr_domain fs_dr_domain; 317 struct mlx5_fs_hws_context fs_hws_context; 318 }; 319 enum fs_flow_table_type table_type; 320 struct mlx5_core_dev *dev; 321 struct mlx5_flow_table *root_ft; 322 /* Should be held when chaining flow tables */ 323 struct mutex chain_lock; 324 struct list_head underlay_qpns; 325 const struct mlx5_flow_cmds *cmds; 326 }; 327 328 enum mlx5_fc_type { 329 MLX5_FC_TYPE_ACQUIRED = 0, 330 MLX5_FC_TYPE_LOCAL, 331 }; 332 333 struct mlx5_fc_cache { 334 u64 packets; 335 u64 bytes; 336 u64 lastuse; 337 }; 338 339 struct mlx5_fc { 340 u32 id; 341 bool aging; 342 enum mlx5_fc_type type; 343 struct mlx5_fc_bulk *bulk; 344 struct mlx5_fc_cache cache; 345 /* last{packets,bytes} are used for calculating deltas since last reading. */ 346 u64 lastpackets; 347 u64 lastbytes; 348 }; 349 350 struct mlx5_fc_bulk { 351 struct mlx5_fs_bulk fs_bulk; 352 u32 base_id; 353 struct mlx5_fs_hws_data hws_data; 354 struct mlx5_fc fcs[]; 355 }; 356 357 u32 mlx5_fc_get_base_id(struct mlx5_fc *counter); 358 int mlx5_init_fc_stats(struct mlx5_core_dev *dev); 359 void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev); 360 void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev, 361 struct delayed_work *dwork, 362 unsigned long delay); 363 void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev, 364 unsigned long interval); 365 366 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void); 367 368 int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns, 369 struct mlx5_flow_root_namespace *peer_ns, 370 u16 peer_vhca_id); 371 372 int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns, 373 enum mlx5_flow_steering_mode mode); 374 375 int mlx5_fs_core_alloc(struct mlx5_core_dev *dev); 376 void mlx5_fs_core_free(struct mlx5_core_dev *dev); 377 int mlx5_fs_core_init(struct mlx5_core_dev *dev); 378 void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev); 379 380 int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports); 381 void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev); 382 int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports); 383 void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev); 384 385 u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type); 386 387 struct mlx5_flow_root_namespace *find_root(struct fs_node *node); 388 389 #define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); } 390 391 #define fs_list_for_each_entry(pos, root) \ 392 list_for_each_entry(pos, root, node.list) 393 394 #define fs_list_for_each_entry_safe(pos, tmp, root) \ 395 list_for_each_entry_safe(pos, tmp, root, node.list) 396 397 #define fs_for_each_ns_or_ft_reverse(pos, prio) \ 398 list_for_each_entry_reverse(pos, &(prio)->node.children, list) 399 400 #define fs_for_each_ns_or_ft(pos, prio) \ 401 list_for_each_entry(pos, (&(prio)->node.children), list) 402 403 #define fs_for_each_prio(pos, ns) \ 404 fs_list_for_each_entry(pos, &(ns)->node.children) 405 406 #define fs_for_each_ns(pos, prio) \ 407 fs_list_for_each_entry(pos, &(prio)->node.children) 408 409 #define fs_for_each_ft(pos, prio) \ 410 fs_list_for_each_entry(pos, &(prio)->node.children) 411 412 #define fs_for_each_ft_safe(pos, tmp, prio) \ 413 fs_list_for_each_entry_safe(pos, tmp, &(prio)->node.children) 414 415 #define fs_for_each_fg(pos, ft) \ 416 fs_list_for_each_entry(pos, &(ft)->node.children) 417 418 #define fs_for_each_fte(pos, fg) \ 419 fs_list_for_each_entry(pos, &(fg)->node.children) 420 421 #define fs_for_each_dst(pos, fte) \ 422 fs_list_for_each_entry(pos, &(fte)->node.children) 423 424 #define MLX5_CAP_FLOWTABLE_TYPE(mdev, cap, type) ( \ 425 (type == FS_FT_NIC_RX) ? MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) : \ 426 (type == FS_FT_NIC_TX) ? MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) : \ 427 (type == FS_FT_ESW_EGRESS_ACL) ? MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) : \ 428 (type == FS_FT_ESW_INGRESS_ACL) ? MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) : \ 429 (type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \ 430 (type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \ 431 (type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \ 432 (type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \ 433 (type == FS_FT_RDMA_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) : \ 434 (type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \ 435 (type == FS_FT_FDB_RX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \ 436 (type == FS_FT_FDB_TX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \ 437 (type == FS_FT_RDMA_TRANSPORT_RX) ? MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(mdev, cap) : \ 438 (type == FS_FT_RDMA_TRANSPORT_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(mdev, cap) : \ 439 (BUILD_BUG_ON_ZERO(FS_FT_RDMA_TRANSPORT_TX != FS_FT_MAX_TYPE))\ 440 ) 441 442 #endif 443