Lines Matching refs:pool

194 struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)
200 spin_lock_irqsave(&pool->clean_lock, flags);
201 ret = llist_del_first(&pool->clean_list);
202 spin_unlock_irqrestore(&pool->clean_lock, flags);
205 if (pool->pool_type == RDS_IB_MR_8K_POOL)
271 struct rds_ib_mr_pool *pool = ibmr->pool;
273 atomic_sub(pinned, &pool->free_pinned);
277 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
281 item_count = atomic_read(&pool->item_count);
333 * Flush our pool of MRs.
338 int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
348 if (pool->pool_type == RDS_IB_MR_8K_POOL)
355 while (!mutex_trylock(&pool->flush_lock)) {
356 ibmr = rds_ib_reuse_mr(pool);
359 finish_wait(&pool->flush_wait, &wait);
363 prepare_to_wait(&pool->flush_wait, &wait,
365 if (llist_empty(&pool->clean_list))
368 ibmr = rds_ib_reuse_mr(pool);
371 finish_wait(&pool->flush_wait, &wait);
375 finish_wait(&pool->flush_wait, &wait);
377 mutex_lock(&pool->flush_lock);
380 ibmr = rds_ib_reuse_mr(pool);
390 dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
391 dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
395 spin_lock_irqsave(&pool->clean_lock, flags);
396 llist_append_to_list(&pool->clean_list, &unmap_list);
397 spin_unlock_irqrestore(&pool->clean_lock, flags);
400 free_goal = rds_ib_flush_goal(pool, free_all);
417 spin_lock_irqsave(&pool->clean_lock, flags);
419 &pool->clean_list);
420 spin_unlock_irqrestore(&pool->clean_lock, flags);
424 atomic_sub(unpinned, &pool->free_pinned);
425 atomic_sub(dirty_to_clean, &pool->dirty_count);
426 atomic_sub(nfreed, &pool->item_count);
429 mutex_unlock(&pool->flush_lock);
430 if (waitqueue_active(&pool->flush_wait))
431 wake_up(&pool->flush_wait);
436 struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
442 ibmr = rds_ib_reuse_mr(pool);
446 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
449 atomic_dec(&pool->item_count);
452 if (pool->pool_type == RDS_IB_MR_8K_POOL)
460 if (pool->pool_type == RDS_IB_MR_8K_POOL)
465 rds_ib_flush_mr_pool(pool, 0, &ibmr);
475 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
477 rds_ib_flush_mr_pool(pool, 0, NULL);
483 struct rds_ib_mr_pool *pool = ibmr->pool;
498 /* Return it to the pool's free list */
501 atomic_add(ibmr->sg_len, &pool->free_pinned);
502 atomic_inc(&pool->dirty_count);
505 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
506 atomic_read(&pool->dirty_count) >= pool->max_items / 5)
507 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
511 rds_ib_flush_mr_pool(pool, 0, NULL);
517 &pool->flush_worker, 10);
630 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
632 cancel_delayed_work_sync(&pool->flush_worker);
633 rds_ib_flush_mr_pool(pool, 1, NULL);
634 WARN_ON(atomic_read(&pool->item_count));
635 WARN_ON(atomic_read(&pool->free_pinned));
636 kfree(pool);
642 struct rds_ib_mr_pool *pool;
644 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
645 if (!pool)
648 pool->pool_type = pool_type;
649 init_llist_head(&pool->free_list);
650 init_llist_head(&pool->drop_list);
651 init_llist_head(&pool->clean_list);
652 spin_lock_init(&pool->clean_lock);
653 mutex_init(&pool->flush_lock);
654 init_waitqueue_head(&pool->flush_wait);
655 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
659 pool->max_pages = RDS_MR_1M_MSG_SIZE + 1;
660 pool->max_items = rds_ibdev->max_1m_mrs;
663 pool->max_pages = RDS_MR_8K_MSG_SIZE + 1;
664 pool->max_items = rds_ibdev->max_8k_mrs;
667 pool->max_free_pinned = pool->max_items * pool->max_pages / 4;
668 pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
670 return pool;
682 * had their pools freed. As each pool is freed its work struct is waited on,
683 * so the pool flushing work queue should be idle by the time we get here.