xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c (revision 37a93dd5c49b5fda807fd204edf2547c3493319c)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
3 
4 #include <mlx5_core.h>
5 #include "fs_pool.h"
6 
mlx5_fs_bulk_bitmap_alloc(struct mlx5_core_dev * dev,struct mlx5_fs_bulk * fs_bulk)7 int mlx5_fs_bulk_bitmap_alloc(struct mlx5_core_dev *dev,
8 			      struct mlx5_fs_bulk *fs_bulk)
9 {
10 	int i;
11 
12 	fs_bulk->bitmask = kvcalloc(BITS_TO_LONGS(fs_bulk->bulk_len),
13 				    sizeof(unsigned long), GFP_KERNEL);
14 	if (!fs_bulk->bitmask)
15 		return -ENOMEM;
16 
17 	for (i = 0; i < fs_bulk->bulk_len; i++)
18 		set_bit(i, fs_bulk->bitmask);
19 
20 	return 0;
21 }
22 
mlx5_fs_bulk_init(struct mlx5_fs_bulk * fs_bulk,int bulk_len)23 void mlx5_fs_bulk_init(struct mlx5_fs_bulk *fs_bulk, int bulk_len)
24 {
25 	fs_bulk->bulk_len = bulk_len;
26 }
27 
mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk * fs_bulk)28 void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk)
29 {
30 	kvfree(fs_bulk->bitmask);
31 }
32 
mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk * bulk)33 int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk)
34 {
35 	return bitmap_weight(bulk->bitmask, bulk->bulk_len);
36 }
37 
mlx5_fs_bulk_acquire_index(struct mlx5_fs_bulk * fs_bulk,struct mlx5_fs_pool_index * pool_index)38 static int mlx5_fs_bulk_acquire_index(struct mlx5_fs_bulk *fs_bulk,
39 				      struct mlx5_fs_pool_index *pool_index)
40 {
41 	int free_index = find_first_bit(fs_bulk->bitmask, fs_bulk->bulk_len);
42 
43 	WARN_ON_ONCE(!pool_index || !fs_bulk);
44 	if (free_index >= fs_bulk->bulk_len)
45 		return -ENOSPC;
46 
47 	clear_bit(free_index, fs_bulk->bitmask);
48 	pool_index->fs_bulk = fs_bulk;
49 	pool_index->index = free_index;
50 	return 0;
51 }
52 
mlx5_fs_bulk_release_index(struct mlx5_fs_bulk * fs_bulk,int index)53 static int mlx5_fs_bulk_release_index(struct mlx5_fs_bulk *fs_bulk, int index)
54 {
55 	if (test_bit(index, fs_bulk->bitmask))
56 		return -EINVAL;
57 
58 	set_bit(index, fs_bulk->bitmask);
59 	return 0;
60 }
61 
mlx5_fs_pool_init(struct mlx5_fs_pool * pool,struct mlx5_core_dev * dev,const struct mlx5_fs_pool_ops * ops,void * pool_ctx)62 void mlx5_fs_pool_init(struct mlx5_fs_pool *pool, struct mlx5_core_dev *dev,
63 		       const struct mlx5_fs_pool_ops *ops, void *pool_ctx)
64 {
65 	WARN_ON_ONCE(!ops || !ops->bulk_destroy || !ops->bulk_create ||
66 		     !ops->update_threshold);
67 	pool->dev = dev;
68 	pool->pool_ctx = pool_ctx;
69 	mutex_init(&pool->pool_lock);
70 	INIT_LIST_HEAD(&pool->fully_used);
71 	INIT_LIST_HEAD(&pool->partially_used);
72 	INIT_LIST_HEAD(&pool->unused);
73 	pool->available_units = 0;
74 	pool->used_units = 0;
75 	pool->threshold = 0;
76 	pool->ops = ops;
77 }
78 
mlx5_fs_pool_cleanup(struct mlx5_fs_pool * pool)79 void mlx5_fs_pool_cleanup(struct mlx5_fs_pool *pool)
80 {
81 	struct mlx5_core_dev *dev = pool->dev;
82 	struct mlx5_fs_bulk *bulk;
83 	struct mlx5_fs_bulk *tmp;
84 
85 	list_for_each_entry_safe(bulk, tmp, &pool->fully_used, pool_list)
86 		pool->ops->bulk_destroy(dev, bulk);
87 	list_for_each_entry_safe(bulk, tmp, &pool->partially_used, pool_list)
88 		pool->ops->bulk_destroy(dev, bulk);
89 	list_for_each_entry_safe(bulk, tmp, &pool->unused, pool_list)
90 		pool->ops->bulk_destroy(dev, bulk);
91 }
92 
93 static struct mlx5_fs_bulk *
mlx5_fs_pool_alloc_new_bulk(struct mlx5_fs_pool * fs_pool)94 mlx5_fs_pool_alloc_new_bulk(struct mlx5_fs_pool *fs_pool)
95 {
96 	struct mlx5_core_dev *dev = fs_pool->dev;
97 	struct mlx5_fs_bulk *new_bulk;
98 
99 	new_bulk = fs_pool->ops->bulk_create(dev, fs_pool->pool_ctx);
100 	if (new_bulk)
101 		fs_pool->available_units += new_bulk->bulk_len;
102 	fs_pool->ops->update_threshold(fs_pool);
103 	return new_bulk;
104 }
105 
106 static void
mlx5_fs_pool_free_bulk(struct mlx5_fs_pool * fs_pool,struct mlx5_fs_bulk * bulk)107 mlx5_fs_pool_free_bulk(struct mlx5_fs_pool *fs_pool, struct mlx5_fs_bulk *bulk)
108 {
109 	struct mlx5_core_dev *dev = fs_pool->dev;
110 
111 	fs_pool->available_units -= bulk->bulk_len;
112 	fs_pool->ops->bulk_destroy(dev, bulk);
113 	fs_pool->ops->update_threshold(fs_pool);
114 }
115 
116 static int
mlx5_fs_pool_acquire_from_list(struct list_head * src_list,struct list_head * next_list,bool move_non_full_bulk,struct mlx5_fs_pool_index * pool_index)117 mlx5_fs_pool_acquire_from_list(struct list_head *src_list,
118 			       struct list_head *next_list,
119 			       bool move_non_full_bulk,
120 			       struct mlx5_fs_pool_index *pool_index)
121 {
122 	struct mlx5_fs_bulk *fs_bulk;
123 	int err;
124 
125 	if (list_empty(src_list))
126 		return -ENODATA;
127 
128 	fs_bulk = list_first_entry(src_list, struct mlx5_fs_bulk, pool_list);
129 	err = mlx5_fs_bulk_acquire_index(fs_bulk, pool_index);
130 	if (move_non_full_bulk || mlx5_fs_bulk_get_free_amount(fs_bulk) == 0)
131 		list_move(&fs_bulk->pool_list, next_list);
132 	return err;
133 }
134 
mlx5_fs_pool_acquire_index(struct mlx5_fs_pool * fs_pool,struct mlx5_fs_pool_index * pool_index)135 int mlx5_fs_pool_acquire_index(struct mlx5_fs_pool *fs_pool,
136 			       struct mlx5_fs_pool_index *pool_index)
137 {
138 	struct mlx5_fs_bulk *new_bulk;
139 	int err;
140 
141 	mutex_lock(&fs_pool->pool_lock);
142 
143 	err = mlx5_fs_pool_acquire_from_list(&fs_pool->partially_used,
144 					     &fs_pool->fully_used, false,
145 					     pool_index);
146 	if (err)
147 		err = mlx5_fs_pool_acquire_from_list(&fs_pool->unused,
148 						     &fs_pool->partially_used,
149 						     true, pool_index);
150 	if (err) {
151 		new_bulk = mlx5_fs_pool_alloc_new_bulk(fs_pool);
152 		if (!new_bulk) {
153 			err = -ENOENT;
154 			goto out;
155 		}
156 		err = mlx5_fs_bulk_acquire_index(new_bulk, pool_index);
157 		WARN_ON_ONCE(err);
158 		list_add(&new_bulk->pool_list, &fs_pool->partially_used);
159 	}
160 	fs_pool->available_units--;
161 	fs_pool->used_units++;
162 
163 out:
164 	mutex_unlock(&fs_pool->pool_lock);
165 	return err;
166 }
167 
mlx5_fs_pool_release_index(struct mlx5_fs_pool * fs_pool,struct mlx5_fs_pool_index * pool_index)168 int mlx5_fs_pool_release_index(struct mlx5_fs_pool *fs_pool,
169 			       struct mlx5_fs_pool_index *pool_index)
170 {
171 	struct mlx5_fs_bulk *bulk = pool_index->fs_bulk;
172 	int bulk_free_amount;
173 	int err;
174 
175 	mutex_lock(&fs_pool->pool_lock);
176 
177 	/* TBD would rather return void if there was no warn here in original code */
178 	err = mlx5_fs_bulk_release_index(bulk, pool_index->index);
179 	if (err)
180 		goto unlock;
181 
182 	fs_pool->available_units++;
183 	fs_pool->used_units--;
184 
185 	bulk_free_amount = mlx5_fs_bulk_get_free_amount(bulk);
186 	if (bulk_free_amount == 1)
187 		list_move_tail(&bulk->pool_list, &fs_pool->partially_used);
188 	if (bulk_free_amount == bulk->bulk_len) {
189 		list_del(&bulk->pool_list);
190 		if (fs_pool->available_units > fs_pool->threshold)
191 			mlx5_fs_pool_free_bulk(fs_pool, bulk);
192 		else
193 			list_add(&bulk->pool_list, &fs_pool->unused);
194 	}
195 
196 unlock:
197 	mutex_unlock(&fs_pool->pool_lock);
198 	return err;
199 }
200