Lines Matching defs:memcg

713 	struct mem_cgroup *memcg;
717 * concurrent memcg offlining:
720 * new entry will be reparented to memcg's parent's list_lru.
722 * new entry will be added directly to memcg's parent's list_lru.
727 memcg = mem_cgroup_from_entry(entry);
729 list_lru_add(list_lru, &entry->lru, nid, memcg);
736 struct mem_cgroup *memcg;
739 memcg = mem_cgroup_from_entry(entry);
741 list_lru_del(list_lru, &entry->lru, nid, memcg);
761 * This function should be called when a memcg is being offlined.
764 * of the memcg, we must check and release the reference in
768 * the reference of memcg being shrunk.
770 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
772 /* lock out zswap shrinker walking memcg tree */
774 if (zswap_next_shrink == memcg) {
1258 !mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
1275 struct mem_cgroup *memcg = sc->memcg;
1276 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
1282 if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
1294 * For memcg, use the cgroup-wide ZSWAP stats since we don't
1295 * have them per-node and thus per-lruvec. Careful if memcg is
1296 * runtime-disabled: we can get sc->memcg == NULL, which is ok
1299 * Without memcg, use the zswap pool-wide metrics.
1302 mem_cgroup_flush_stats(memcg);
1303 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
1304 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
1360 static int shrink_memcg(struct mem_cgroup *memcg)
1364 if (!mem_cgroup_zswap_writeback_enabled(memcg))
1369 * reclaiming from the parent instead of the dead memcg.
1371 if (memcg && !mem_cgroup_online(memcg))
1377 shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
1390 struct mem_cgroup *memcg;
1405 * - No writeback-candidate memcgs found in a memcg tree walk.
1406 * - Shrinking a writeback-candidate memcg failed.
1408 * We save iteration cursor memcg into zswap_next_shrink,
1409 * which can be modified by the offline memcg cleaner
1413 * offline memcg reference in zswap_next_shrink.
1414 * We can rely on the cleaner only if we get online memcg under lock.
1416 * If we get an offline memcg, we cannot determine if the cleaner has
1419 * offline memcg left in zswap_next_shrink will hold the reference
1424 * Start shrinking from the next memcg after zswap_next_shrink.
1426 * advancing the cursor here overlooks one memcg, but this
1429 * If we get an online memcg, keep the extra reference in case
1432 * memcg.
1436 memcg = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
1437 zswap_next_shrink = memcg;
1438 } while (memcg && !mem_cgroup_tryget_online(memcg));
1441 if (!memcg) {
1453 ret = shrink_memcg(memcg);
1455 mem_cgroup_put(memcg);
1458 * There are no writeback-candidate pages in the memcg.
1459 * This is not an issue as long as we can find another memcg
1561 struct mem_cgroup *memcg = NULL;
1574 memcg = get_mem_cgroup_from_objcg(objcg);
1575 if (shrink_memcg(memcg)) {
1576 mem_cgroup_put(memcg);
1579 mem_cgroup_put(memcg);
1590 memcg = get_mem_cgroup_from_objcg(objcg);
1591 if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) {
1592 mem_cgroup_put(memcg);
1595 mem_cgroup_put(memcg);