Lines Matching refs:pool

106 static void free_cg_pool(struct dmem_cgroup_pool_state *pool)
108 list_del(&pool->region_node);
109 kfree(pool);
113 set_resource_min(struct dmem_cgroup_pool_state *pool, u64 val)
115 page_counter_set_min(&pool->cnt, val);
119 set_resource_low(struct dmem_cgroup_pool_state *pool, u64 val)
121 page_counter_set_low(&pool->cnt, val);
125 set_resource_max(struct dmem_cgroup_pool_state *pool, u64 val)
127 page_counter_set_max(&pool->cnt, val);
130 static u64 get_resource_low(struct dmem_cgroup_pool_state *pool)
132 return pool ? READ_ONCE(pool->cnt.low) : 0;
135 static u64 get_resource_min(struct dmem_cgroup_pool_state *pool)
137 return pool ? READ_ONCE(pool->cnt.min) : 0;
140 static u64 get_resource_max(struct dmem_cgroup_pool_state *pool)
142 return pool ? READ_ONCE(pool->cnt.max) : PAGE_COUNTER_MAX;
145 static u64 get_resource_current(struct dmem_cgroup_pool_state *pool)
147 return pool ? page_counter_read(&pool->cnt) : 0;
160 struct dmem_cgroup_pool_state *pool;
163 list_for_each_entry_rcu(pool, &dmemcs->pools, css_node)
164 reset_all_resource_limits(pool);
171 struct dmem_cgroup_pool_state *pool, *next;
174 list_for_each_entry_safe(pool, next, &dmemcs->pools, css_node) {
176 *The pool is dead and all references are 0,
179 list_del(&pool->css_node);
180 free_cg_pool(pool);
201 struct dmem_cgroup_pool_state *pool;
203 list_for_each_entry_rcu(pool, &dmemcs->pools, css_node, spin_is_locked(&dmemcg_lock))
204 if (pool->region == region)
205 return pool;
210 static struct dmem_cgroup_pool_state *pool_parent(struct dmem_cgroup_pool_state *pool)
212 if (!pool->cnt.parent)
215 return container_of(pool->cnt.parent, typeof(*pool), cnt);
225 struct dmem_cgroup_pool_state *pool, *found_pool;
235 list_for_each_entry_rcu(pool, &dmemcg_iter->pools, css_node) {
236 if (pool->region == limit_pool->region) {
237 found_pool = pool;
255 * @limit_pool: The pool for which we hit limits
256 * @test_pool: The pool for which to test
271 struct dmem_cgroup_pool_state *pool = test_pool;
275 /* Can always evict from current pool, despite limits */
283 for (pool = test_pool; pool && limit_pool != pool; pool = pool_parent(pool))
286 if (!pool)
324 struct dmem_cgroup_pool_state *pool, *ppool = NULL;
327 pool = kzalloc(sizeof(*pool), GFP_NOWAIT);
328 if (!pool)
331 pool = *allocpool;
335 pool->region = region;
336 pool->cs = dmemcs;
341 page_counter_init(&pool->cnt,
343 reset_all_resource_limits(pool);
345 list_add_tail_rcu(&pool->css_node, &dmemcs->pools);
346 list_add_tail(&pool->region_node, &region->pools);
349 pool->inited = true;
351 pool->inited = ppool ? ppool->inited : false;
352 return pool;
359 struct dmem_cgroup_pool_state *pool, *ppool, *retpool;
363 * Recursively create pool, we may not initialize yet on
367 pool = find_cg_pool_locked(p, region);
368 if (!pool)
369 pool = alloc_pool_single(p, region, allocpool);
371 if (IS_ERR(pool))
372 return pool;
374 if (p == dmemcs && pool->inited)
375 return pool;
377 if (pool->inited)
381 retpool = pool = find_cg_pool_locked(dmemcs, region);
383 if (pool->inited)
390 pool->cnt.parent = &ppool->cnt;
391 pool->inited = true;
393 pool = ppool;
402 struct dmem_cgroup_pool_state *pool, *next;
404 list_for_each_entry_safe(pool, next, &region->pools, region_node)
405 free_cg_pool(pool);
436 struct dmem_cgroup_pool_state *pool =
437 container_of(entry, typeof(*pool), region_node);
439 list_del_rcu(&pool->css_node);
513 * @pool: &dmem_cgroup_pool_state
515 * Called to drop a reference to the limiting pool returned by
518 void dmem_cgroup_pool_state_put(struct dmem_cgroup_pool_state *pool)
520 if (pool)
521 css_put(&pool->cs->css);
528 struct dmem_cgroup_pool_state *pool, *allocpool = NULL;
532 pool = find_cg_pool_locked(cg, region);
533 if (pool && !READ_ONCE(pool->inited))
534 pool = NULL;
537 while (!pool) {
540 pool = get_cg_pool_locked(cg, region, &allocpool);
542 pool = ERR_PTR(-ENODEV);
545 if (pool == ERR_PTR(-ENOMEM)) {
546 pool = NULL;
552 pool = NULL;
559 return pool;
563 * dmem_cgroup_uncharge() - Uncharge a pool.
564 * @pool: Pool to uncharge.
568 * Must be called with the returned pool as argument,
571 void dmem_cgroup_uncharge(struct dmem_cgroup_pool_state *pool, u64 size)
573 if (!pool)
576 page_counter_uncharge(&pool->cnt, size);
577 css_put(&pool->cs->css);
585 * @ret_pool: On succesfull allocation, the pool that is charged.
586 * @ret_limit_pool: On a failed allocation, the limiting pool.
594 * will be set to the pool for which the limit is hit. This can be used for
605 struct dmem_cgroup_pool_state *pool;
619 pool = get_cg_pool_unlocked(cg, region);
620 if (IS_ERR(pool)) {
621 ret = PTR_ERR(pool);
625 if (!page_counter_try_charge(&pool->cnt, size, &fail)) {
635 *ret_pool = pool;
682 struct dmem_cgroup_pool_state *pool = NULL;
713 pool = get_cg_pool_unlocked(dmemcs, region);
714 if (IS_ERR(pool)) {
715 err = PTR_ERR(pool);
720 apply(pool, new_limit);
738 struct dmem_cgroup_pool_state *pool = find_cg_pool_locked(dmemcs, region);
743 val = fn(pool);