1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
3
4 #ifndef HWS_BWC_H_
5 #define HWS_BWC_H_
6
7 #define MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG 1
8 #define MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP 1
9 #define MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH 70
10 #define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32
11
12 /* Max number of AT attach operations for the same matcher.
13 * When the limit is reached, a larger buffer is allocated for the ATs.
14 */
15 #define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 8
16
17 #define MLX5HWS_BWC_MAX_ACTS 16
18
19 #define MLX5HWS_BWC_POLLING_TIMEOUT 60
20
21 enum mlx5hws_bwc_matcher_type {
22 /* Standalone bwc matcher. */
23 MLX5HWS_BWC_MATCHER_SIMPLE,
24 /* The first matcher of a complex matcher. When rules are inserted into
25 * a matcher of this type, they are split into subrules and inserted
26 * into their corresponding submatchers.
27 */
28 MLX5HWS_BWC_MATCHER_COMPLEX_FIRST,
29 /* A submatcher that is part of a complex matcher. For most purposes
30 * these are treated as simple matchers, except when it comes to moving
31 * rules during resize.
32 */
33 MLX5HWS_BWC_MATCHER_COMPLEX_SUBMATCHER,
34 };
35
36 struct mlx5hws_bwc_matcher_complex_data;
37
38 struct mlx5hws_bwc_matcher_size {
39 u8 size_log;
40 atomic_t num_of_rules;
41 atomic_t rehash_required;
42 };
43
44 struct mlx5hws_bwc_matcher {
45 struct mlx5hws_matcher *matcher;
46 struct mlx5hws_match_template *mt;
47 struct mlx5hws_action_template **at;
48 struct mlx5hws_bwc_matcher_complex_data *complex;
49 u8 num_of_at;
50 u8 size_of_at_array;
51 enum mlx5hws_bwc_matcher_type matcher_type;
52 u32 priority;
53 struct mlx5hws_bwc_matcher_size rx_size;
54 struct mlx5hws_bwc_matcher_size tx_size;
55 struct list_head *rules;
56 };
57
58 struct mlx5hws_bwc_rule {
59 struct mlx5hws_bwc_matcher *bwc_matcher;
60 struct mlx5hws_rule *rule;
61 struct mlx5hws_bwc_rule *next_subrule;
62 struct mlx5hws_bwc_complex_subrule_data *subrule_data;
63 u32 flow_source;
64 u16 bwc_queue_idx;
65 bool skip_rx;
66 bool skip_tx;
67 struct list_head list_node;
68 };
69
70 int
71 mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
72 struct mlx5hws_table *table,
73 u32 priority,
74 u8 match_criteria_enable,
75 struct mlx5hws_match_parameters *mask,
76 enum mlx5hws_action_type action_types[]);
77
78 int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher);
79
80 struct mlx5hws_bwc_rule *mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher);
81
82 void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule);
83
84 int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
85 u32 *match_param,
86 struct mlx5hws_rule_action rule_actions[],
87 u32 flow_source,
88 u16 bwc_queue_idx);
89
90 int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule);
91
92 void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
93 u16 bwc_queue_idx,
94 u32 flow_source,
95 struct mlx5hws_rule_attr *rule_attr);
96
97 int mlx5hws_bwc_queue_poll(struct mlx5hws_context *ctx,
98 u16 queue_id,
99 u32 *pending_rules,
100 bool drain);
101
mlx5hws_bwc_queues(struct mlx5hws_context * ctx)102 static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
103 {
104 /* Besides the control queue, half of the queues are
105 * regular HWS queues, and the other half are BWC queues.
106 */
107 if (mlx5hws_context_bwc_supported(ctx))
108 return (ctx->queues - 1) / 2;
109 return 0;
110 }
111
mlx5hws_bwc_get_queue_id(struct mlx5hws_context * ctx,u16 idx)112 static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx)
113 {
114 return idx + mlx5hws_bwc_queues(ctx);
115 }
116
117 #endif /* HWS_BWC_H_ */
118