1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/bitops.h>
6 #include <linux/spinlock.h>
7
8 #include "spectrum_cnt.h"
9
10 struct mlxsw_sp_counter_sub_pool {
11 u64 size;
12 unsigned int base_index;
13 enum mlxsw_res_id entry_size_res_id;
14 const char *resource_name; /* devlink resource name */
15 u64 resource_id; /* devlink resource id */
16 unsigned int entry_size;
17 unsigned int bank_count;
18 atomic_t active_entries_count;
19 };
20
21 struct mlxsw_sp_counter_pool {
22 u64 pool_size;
23 unsigned long *usage; /* Usage bitmap */
24 spinlock_t counter_pool_lock; /* Protects counter pool allocations */
25 atomic_t active_entries_count;
26 unsigned int sub_pools_count;
27 struct mlxsw_sp_counter_sub_pool sub_pools[] __counted_by(sub_pools_count);
28 };
29
30 static const struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
31 [MLXSW_SP_COUNTER_SUB_POOL_FLOW] = {
32 .entry_size_res_id = MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
33 .resource_name = MLXSW_SP_RESOURCE_NAME_COUNTERS_FLOW,
34 .resource_id = MLXSW_SP_RESOURCE_COUNTERS_FLOW,
35 .bank_count = 6,
36 },
37 [MLXSW_SP_COUNTER_SUB_POOL_RIF] = {
38 .entry_size_res_id = MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC,
39 .resource_name = MLXSW_SP_RESOURCE_NAME_COUNTERS_RIF,
40 .resource_id = MLXSW_SP_RESOURCE_COUNTERS_RIF,
41 .bank_count = 2,
42 }
43 };
44
mlxsw_sp_counter_sub_pool_occ_get(void * priv)45 static u64 mlxsw_sp_counter_sub_pool_occ_get(void *priv)
46 {
47 const struct mlxsw_sp_counter_sub_pool *sub_pool = priv;
48
49 return atomic_read(&sub_pool->active_entries_count);
50 }
51
mlxsw_sp_counter_sub_pools_init(struct mlxsw_sp * mlxsw_sp)52 static int mlxsw_sp_counter_sub_pools_init(struct mlxsw_sp *mlxsw_sp)
53 {
54 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
55 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
56 struct mlxsw_sp_counter_sub_pool *sub_pool;
57 unsigned int base_index = 0;
58 enum mlxsw_res_id res_id;
59 int err;
60 int i;
61
62 for (i = 0; i < pool->sub_pools_count; i++) {
63 sub_pool = &pool->sub_pools[i];
64 res_id = sub_pool->entry_size_res_id;
65
66 if (!mlxsw_core_res_valid(mlxsw_sp->core, res_id))
67 return -EIO;
68 sub_pool->entry_size = mlxsw_core_res_get(mlxsw_sp->core,
69 res_id);
70 err = devl_resource_size_get(devlink,
71 sub_pool->resource_id,
72 &sub_pool->size);
73 if (err)
74 goto err_resource_size_get;
75
76 devl_resource_occ_get_register(devlink,
77 sub_pool->resource_id,
78 mlxsw_sp_counter_sub_pool_occ_get,
79 sub_pool);
80
81 sub_pool->base_index = base_index;
82 base_index += sub_pool->size;
83 atomic_set(&sub_pool->active_entries_count, 0);
84 }
85 return 0;
86
87 err_resource_size_get:
88 for (i--; i >= 0; i--) {
89 sub_pool = &pool->sub_pools[i];
90
91 devl_resource_occ_get_unregister(devlink,
92 sub_pool->resource_id);
93 }
94 return err;
95 }
96
mlxsw_sp_counter_sub_pools_fini(struct mlxsw_sp * mlxsw_sp)97 static void mlxsw_sp_counter_sub_pools_fini(struct mlxsw_sp *mlxsw_sp)
98 {
99 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
100 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
101 struct mlxsw_sp_counter_sub_pool *sub_pool;
102 int i;
103
104 for (i = 0; i < pool->sub_pools_count; i++) {
105 sub_pool = &pool->sub_pools[i];
106
107 WARN_ON(atomic_read(&sub_pool->active_entries_count));
108 devl_resource_occ_get_unregister(devlink,
109 sub_pool->resource_id);
110 }
111 }
112
mlxsw_sp_counter_pool_occ_get(void * priv)113 static u64 mlxsw_sp_counter_pool_occ_get(void *priv)
114 {
115 const struct mlxsw_sp_counter_pool *pool = priv;
116
117 return atomic_read(&pool->active_entries_count);
118 }
119
mlxsw_sp_counter_pool_init(struct mlxsw_sp * mlxsw_sp)120 int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
121 {
122 unsigned int sub_pools_count = ARRAY_SIZE(mlxsw_sp_counter_sub_pools);
123 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
124 struct mlxsw_sp_counter_pool *pool;
125 int err;
126
127 pool = kzalloc_flex(*pool, sub_pools, sub_pools_count);
128 if (!pool)
129 return -ENOMEM;
130 mlxsw_sp->counter_pool = pool;
131 pool->sub_pools_count = sub_pools_count;
132 memcpy(pool->sub_pools, mlxsw_sp_counter_sub_pools,
133 flex_array_size(pool, sub_pools, pool->sub_pools_count));
134 spin_lock_init(&pool->counter_pool_lock);
135 atomic_set(&pool->active_entries_count, 0);
136
137 err = devl_resource_size_get(devlink, MLXSW_SP_RESOURCE_COUNTERS,
138 &pool->pool_size);
139 if (err)
140 goto err_pool_resource_size_get;
141 devl_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_COUNTERS,
142 mlxsw_sp_counter_pool_occ_get, pool);
143
144 pool->usage = bitmap_zalloc(pool->pool_size, GFP_KERNEL);
145 if (!pool->usage) {
146 err = -ENOMEM;
147 goto err_usage_alloc;
148 }
149
150 err = mlxsw_sp_counter_sub_pools_init(mlxsw_sp);
151 if (err)
152 goto err_sub_pools_init;
153
154 return 0;
155
156 err_sub_pools_init:
157 bitmap_free(pool->usage);
158 err_usage_alloc:
159 devl_resource_occ_get_unregister(devlink,
160 MLXSW_SP_RESOURCE_COUNTERS);
161 err_pool_resource_size_get:
162 kfree(pool);
163 return err;
164 }
165
mlxsw_sp_counter_pool_fini(struct mlxsw_sp * mlxsw_sp)166 void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
167 {
168 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
169 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
170
171 mlxsw_sp_counter_sub_pools_fini(mlxsw_sp);
172 WARN_ON(!bitmap_empty(pool->usage, pool->pool_size));
173 WARN_ON(atomic_read(&pool->active_entries_count));
174 bitmap_free(pool->usage);
175 devl_resource_occ_get_unregister(devlink,
176 MLXSW_SP_RESOURCE_COUNTERS);
177 kfree(pool);
178 }
179
mlxsw_sp_counter_alloc(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_counter_sub_pool_id sub_pool_id,unsigned int * p_counter_index)180 int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
181 enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
182 unsigned int *p_counter_index)
183 {
184 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
185 struct mlxsw_sp_counter_sub_pool *sub_pool;
186 unsigned int entry_index;
187 unsigned int stop_index;
188 int i, err;
189
190 sub_pool = &pool->sub_pools[sub_pool_id];
191 stop_index = sub_pool->base_index + sub_pool->size;
192 entry_index = sub_pool->base_index;
193
194 spin_lock(&pool->counter_pool_lock);
195 entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index);
196 if (entry_index == stop_index) {
197 err = -ENOBUFS;
198 goto err_alloc;
199 }
200 /* The sub-pools can contain non-integer number of entries
201 * so we must check for overflow
202 */
203 if (entry_index + sub_pool->entry_size > stop_index) {
204 err = -ENOBUFS;
205 goto err_alloc;
206 }
207 for (i = 0; i < sub_pool->entry_size; i++)
208 __set_bit(entry_index + i, pool->usage);
209 spin_unlock(&pool->counter_pool_lock);
210
211 *p_counter_index = entry_index;
212 atomic_add(sub_pool->entry_size, &sub_pool->active_entries_count);
213 atomic_add(sub_pool->entry_size, &pool->active_entries_count);
214 return 0;
215
216 err_alloc:
217 spin_unlock(&pool->counter_pool_lock);
218 return err;
219 }
220
mlxsw_sp_counter_free(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_counter_sub_pool_id sub_pool_id,unsigned int counter_index)221 void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
222 enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
223 unsigned int counter_index)
224 {
225 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
226 struct mlxsw_sp_counter_sub_pool *sub_pool;
227 int i;
228
229 if (WARN_ON(counter_index >= pool->pool_size))
230 return;
231 sub_pool = &pool->sub_pools[sub_pool_id];
232 spin_lock(&pool->counter_pool_lock);
233 for (i = 0; i < sub_pool->entry_size; i++)
234 __clear_bit(counter_index + i, pool->usage);
235 spin_unlock(&pool->counter_pool_lock);
236 atomic_sub(sub_pool->entry_size, &sub_pool->active_entries_count);
237 atomic_sub(sub_pool->entry_size, &pool->active_entries_count);
238 }
239
mlxsw_sp_counter_resources_register(struct mlxsw_core * mlxsw_core)240 int mlxsw_sp_counter_resources_register(struct mlxsw_core *mlxsw_core)
241 {
242 static struct devlink_resource_size_params size_params;
243 struct devlink *devlink = priv_to_devlink(mlxsw_core);
244 const struct mlxsw_sp_counter_sub_pool *sub_pool;
245 unsigned int total_bank_config;
246 u64 sub_pool_size;
247 u64 base_index;
248 u64 pool_size;
249 u64 bank_size;
250 int err;
251 int i;
252
253 if (!MLXSW_CORE_RES_VALID(mlxsw_core, COUNTER_POOL_SIZE) ||
254 !MLXSW_CORE_RES_VALID(mlxsw_core, COUNTER_BANK_SIZE))
255 return -EIO;
256
257 pool_size = MLXSW_CORE_RES_GET(mlxsw_core, COUNTER_POOL_SIZE);
258 bank_size = MLXSW_CORE_RES_GET(mlxsw_core, COUNTER_BANK_SIZE);
259
260 devlink_resource_size_params_init(&size_params, pool_size,
261 pool_size, bank_size,
262 DEVLINK_RESOURCE_UNIT_ENTRY);
263 err = devl_resource_register(devlink,
264 MLXSW_SP_RESOURCE_NAME_COUNTERS,
265 pool_size,
266 MLXSW_SP_RESOURCE_COUNTERS,
267 DEVLINK_RESOURCE_ID_PARENT_TOP,
268 &size_params);
269 if (err)
270 return err;
271
272 /* Allocation is based on bank count which should be
273 * specified for each sub pool statically.
274 */
275 total_bank_config = 0;
276 base_index = 0;
277 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) {
278 sub_pool = &mlxsw_sp_counter_sub_pools[i];
279 sub_pool_size = sub_pool->bank_count * bank_size;
280 /* The last bank can't be fully used */
281 if (base_index + sub_pool_size > pool_size)
282 sub_pool_size = pool_size - base_index;
283 base_index += sub_pool_size;
284
285 devlink_resource_size_params_init(&size_params, sub_pool_size,
286 sub_pool_size, bank_size,
287 DEVLINK_RESOURCE_UNIT_ENTRY);
288 err = devl_resource_register(devlink,
289 sub_pool->resource_name,
290 sub_pool_size,
291 sub_pool->resource_id,
292 MLXSW_SP_RESOURCE_COUNTERS,
293 &size_params);
294 if (err)
295 return err;
296 total_bank_config += sub_pool->bank_count;
297 }
298
299 /* Check config is valid, no bank over subscription */
300 if (WARN_ON(total_bank_config > div64_u64(pool_size, bank_size) + 1))
301 return -EINVAL;
302
303 return 0;
304 }
305