Lines Matching full:pool

81 static inline const char *pool_name(struct rxe_pool *pool)  in pool_name()  argument
83 return rxe_type_info[pool->type].name; in pool_name()
86 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min) in rxe_pool_init_index() argument
91 if ((max - min + 1) < pool->max_elem) { in rxe_pool_init_index()
97 pool->max_index = max; in rxe_pool_init_index()
98 pool->min_index = min; in rxe_pool_init_index()
101 pool->table = kmalloc(size, GFP_KERNEL); in rxe_pool_init_index()
102 if (!pool->table) { in rxe_pool_init_index()
107 pool->table_size = size; in rxe_pool_init_index()
108 bitmap_zero(pool->table, max - min + 1); in rxe_pool_init_index()
116 struct rxe_pool *pool, in rxe_pool_init() argument
123 memset(pool, 0, sizeof(*pool)); in rxe_pool_init()
125 pool->rxe = rxe; in rxe_pool_init()
126 pool->type = type; in rxe_pool_init()
127 pool->max_elem = max_elem; in rxe_pool_init()
128 pool->elem_size = ALIGN(size, RXE_POOL_ALIGN); in rxe_pool_init()
129 pool->flags = rxe_type_info[type].flags; in rxe_pool_init()
130 pool->tree = RB_ROOT; in rxe_pool_init()
131 pool->cleanup = rxe_type_info[type].cleanup; in rxe_pool_init()
133 atomic_set(&pool->num_elem, 0); in rxe_pool_init()
135 kref_init(&pool->ref_cnt); in rxe_pool_init()
137 rwlock_init(&pool->pool_lock); in rxe_pool_init()
140 err = rxe_pool_init_index(pool, in rxe_pool_init()
148 pool->key_offset = rxe_type_info[type].key_offset; in rxe_pool_init()
149 pool->key_size = rxe_type_info[type].key_size; in rxe_pool_init()
152 pool->state = RXE_POOL_STATE_VALID; in rxe_pool_init()
160 struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt); in rxe_pool_release() local
162 pool->state = RXE_POOL_STATE_INVALID; in rxe_pool_release()
163 kfree(pool->table); in rxe_pool_release()
166 static void rxe_pool_put(struct rxe_pool *pool) in rxe_pool_put() argument
168 kref_put(&pool->ref_cnt, rxe_pool_release); in rxe_pool_put()
171 void rxe_pool_cleanup(struct rxe_pool *pool) in rxe_pool_cleanup() argument
175 write_lock_irqsave(&pool->pool_lock, flags); in rxe_pool_cleanup()
176 pool->state = RXE_POOL_STATE_INVALID; in rxe_pool_cleanup()
177 if (atomic_read(&pool->num_elem) > 0) in rxe_pool_cleanup()
178 pr_warn("%s pool destroyed with unfree'd elem\n", in rxe_pool_cleanup()
179 pool_name(pool)); in rxe_pool_cleanup()
180 write_unlock_irqrestore(&pool->pool_lock, flags); in rxe_pool_cleanup()
182 rxe_pool_put(pool); in rxe_pool_cleanup()
185 static u32 alloc_index(struct rxe_pool *pool) in alloc_index() argument
188 u32 range = pool->max_index - pool->min_index + 1; in alloc_index()
190 index = find_next_zero_bit(pool->table, range, pool->last); in alloc_index()
192 index = find_first_zero_bit(pool->table, range); in alloc_index()
195 set_bit(index, pool->table); in alloc_index()
196 pool->last = index; in alloc_index()
197 return index + pool->min_index; in alloc_index()
200 static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new) in insert_index() argument
202 struct rb_node **link = &pool->tree.rb_node; in insert_index()
222 rb_insert_color(&new->node, &pool->tree); in insert_index()
227 static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new) in insert_key() argument
229 struct rb_node **link = &pool->tree.rb_node; in insert_key()
238 cmp = memcmp((u8 *)elem + pool->key_offset, in insert_key()
239 (u8 *)new + pool->key_offset, pool->key_size); in insert_key()
253 rb_insert_color(&new->node, &pool->tree); in insert_key()
261 struct rxe_pool *pool = elem->pool; in rxe_add_key() local
264 write_lock_irqsave(&pool->pool_lock, flags); in rxe_add_key()
265 memcpy((u8 *)elem + pool->key_offset, key, pool->key_size); in rxe_add_key()
266 insert_key(pool, elem); in rxe_add_key()
267 write_unlock_irqrestore(&pool->pool_lock, flags); in rxe_add_key()
273 struct rxe_pool *pool = elem->pool; in rxe_drop_key() local
276 write_lock_irqsave(&pool->pool_lock, flags); in rxe_drop_key()
277 rb_erase(&elem->node, &pool->tree); in rxe_drop_key()
278 write_unlock_irqrestore(&pool->pool_lock, flags); in rxe_drop_key()
284 struct rxe_pool *pool = elem->pool; in rxe_add_index() local
287 write_lock_irqsave(&pool->pool_lock, flags); in rxe_add_index()
288 elem->index = alloc_index(pool); in rxe_add_index()
289 insert_index(pool, elem); in rxe_add_index()
290 write_unlock_irqrestore(&pool->pool_lock, flags); in rxe_add_index()
296 struct rxe_pool *pool = elem->pool; in rxe_drop_index() local
299 write_lock_irqsave(&pool->pool_lock, flags); in rxe_drop_index()
300 clear_bit(elem->index - pool->min_index, pool->table); in rxe_drop_index()
301 rb_erase(&elem->node, &pool->tree); in rxe_drop_index()
302 write_unlock_irqrestore(&pool->pool_lock, flags); in rxe_drop_index()
305 void *rxe_alloc(struct rxe_pool *pool) in rxe_alloc() argument
310 might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC)); in rxe_alloc()
312 read_lock_irqsave(&pool->pool_lock, flags); in rxe_alloc()
313 if (pool->state != RXE_POOL_STATE_VALID) { in rxe_alloc()
314 read_unlock_irqrestore(&pool->pool_lock, flags); in rxe_alloc()
317 kref_get(&pool->ref_cnt); in rxe_alloc()
318 read_unlock_irqrestore(&pool->pool_lock, flags); in rxe_alloc()
320 if (!ib_device_try_get(&pool->rxe->ib_dev)) in rxe_alloc()
323 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) in rxe_alloc()
326 elem = kzalloc(rxe_type_info[pool->type].size, in rxe_alloc()
327 (pool->flags & RXE_POOL_ATOMIC) ? in rxe_alloc()
332 elem->pool = pool; in rxe_alloc()
338 atomic_dec(&pool->num_elem); in rxe_alloc()
339 ib_device_put(&pool->rxe->ib_dev); in rxe_alloc()
341 rxe_pool_put(pool); in rxe_alloc()
345 int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem) in rxe_add_to_pool() argument
349 might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC)); in rxe_add_to_pool()
351 read_lock_irqsave(&pool->pool_lock, flags); in rxe_add_to_pool()
352 if (pool->state != RXE_POOL_STATE_VALID) { in rxe_add_to_pool()
353 read_unlock_irqrestore(&pool->pool_lock, flags); in rxe_add_to_pool()
356 kref_get(&pool->ref_cnt); in rxe_add_to_pool()
357 read_unlock_irqrestore(&pool->pool_lock, flags); in rxe_add_to_pool()
359 if (!ib_device_try_get(&pool->rxe->ib_dev)) in rxe_add_to_pool()
362 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) in rxe_add_to_pool()
365 elem->pool = pool; in rxe_add_to_pool()
371 atomic_dec(&pool->num_elem); in rxe_add_to_pool()
372 ib_device_put(&pool->rxe->ib_dev); in rxe_add_to_pool()
374 rxe_pool_put(pool); in rxe_add_to_pool()
382 struct rxe_pool *pool = elem->pool; in rxe_elem_release() local
384 if (pool->cleanup) in rxe_elem_release()
385 pool->cleanup(elem); in rxe_elem_release()
387 if (!(pool->flags & RXE_POOL_NO_ALLOC)) in rxe_elem_release()
389 atomic_dec(&pool->num_elem); in rxe_elem_release()
390 ib_device_put(&pool->rxe->ib_dev); in rxe_elem_release()
391 rxe_pool_put(pool); in rxe_elem_release()
394 void *rxe_pool_get_index(struct rxe_pool *pool, u32 index) in rxe_pool_get_index() argument
400 read_lock_irqsave(&pool->pool_lock, flags); in rxe_pool_get_index()
402 if (pool->state != RXE_POOL_STATE_VALID) in rxe_pool_get_index()
405 node = pool->tree.rb_node; in rxe_pool_get_index()
421 read_unlock_irqrestore(&pool->pool_lock, flags); in rxe_pool_get_index()
425 void *rxe_pool_get_key(struct rxe_pool *pool, void *key) in rxe_pool_get_key() argument
432 read_lock_irqsave(&pool->pool_lock, flags); in rxe_pool_get_key()
434 if (pool->state != RXE_POOL_STATE_VALID) in rxe_pool_get_key()
437 node = pool->tree.rb_node; in rxe_pool_get_key()
442 cmp = memcmp((u8 *)elem + pool->key_offset, in rxe_pool_get_key()
443 key, pool->key_size); in rxe_pool_get_key()
457 read_unlock_irqrestore(&pool->pool_lock, flags); in rxe_pool_get_key()