| /linux/mm/ |
| H A D | memcontrol.c | 112 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) in memcg_to_vmpressure() argument 114 if (!memcg) in memcg_to_vmpressure() 115 memcg = root_mem_cgroup; in memcg_to_vmpressure() 116 return &memcg->vmpressure; in memcg_to_vmpressure() 135 static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages); 169 struct mem_cgroup *memcg; in obj_cgroup_release() local 171 memcg = get_mem_cgroup_from_objcg(objcg); in obj_cgroup_release() 172 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); in obj_cgroup_release() 173 memcg1_account_kmem(memcg, -nr_pages); in obj_cgroup_release() 174 if (!mem_cgroup_is_root(memcg)) in obj_cgroup_release() [all …]
|
| H A D | memcontrol-v1.h | 25 unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap); 29 unsigned long memcg_events(struct mem_cgroup *memcg, int event); 30 unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item); 33 void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n); 34 struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg); 45 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event); 46 unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx); 47 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item); 48 bool memcg1_alloc_events(struct mem_cgroup *memcg); 49 void memcg1_free_events(struct mem_cgroup *memcg); [all …]
|
| H A D | memcontrol-v1.c | 56 struct mem_cgroup *memcg; member 70 int (*register_event)(struct mem_cgroup *memcg, 77 void (*unregister_event)(struct mem_cgroup *memcg, 167 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) in soft_limit_excess() argument 169 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess() 170 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); in soft_limit_excess() 179 static void memcg1_update_tree(struct mem_cgroup *memcg, int nid) in memcg1_update_tree() argument 186 if (soft_limit_excess(memcg)) in memcg1_update_tree() 187 lru_gen_soft_reclaim(memcg, nid); in memcg1_update_tree() 198 for (; memcg; memcg = parent_mem_cgroup(memcg)) { in memcg1_update_tree() [all …]
|
| H A D | shrinker.c | 62 void free_shrinker_info(struct mem_cgroup *memcg) in free_shrinker_info() argument 69 pn = memcg->nodeinfo[nid]; in free_shrinker_info() 77 int alloc_shrinker_info(struct mem_cgroup *memcg) in alloc_shrinker_info() argument 94 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info); in alloc_shrinker_info() 102 free_shrinker_info(memcg); in alloc_shrinker_info() 106 static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg, in shrinker_info_protected() argument 109 return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info, in shrinker_info_protected() 113 static int expand_one_shrinker_info(struct mem_cgroup *memcg, int new_size, in expand_one_shrinker_info() argument 121 pn = memcg->nodeinfo[nid]; in expand_one_shrinker_info() 122 old = shrinker_info_protected(memcg, nid); in expand_one_shrinker_info() [all …]
|
| H A D | shrinker_debug.c | 19 struct mem_cgroup *memcg, in shrinker_count_objects() argument 30 .memcg = memcg, in shrinker_count_objects() 51 struct mem_cgroup *memcg; in shrinker_debugfs_count_show() local 64 memcg = mem_cgroup_iter(NULL, NULL, NULL); in shrinker_debugfs_count_show() 66 if (memcg && !mem_cgroup_online(memcg)) in shrinker_debugfs_count_show() 70 memcg_aware ? memcg : NULL, in shrinker_debugfs_count_show() 73 seq_printf(m, "%lu", mem_cgroup_ino(memcg)); in shrinker_debugfs_count_show() 80 mem_cgroup_iter_break(NULL, memcg); in shrinker_debugfs_count_show() 85 mem_cgroup_iter_break(NULL, memcg); in shrinker_debugfs_count_show() 89 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); in shrinker_debugfs_count_show() [all …]
|
| H A D | list_lru.c | 80 lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg, in lock_list_lru_of_memcg() argument 87 l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg)); in lock_list_lru_of_memcg() 100 VM_WARN_ON(!css_is_dying(&memcg->css)); in lock_list_lru_of_memcg() 101 memcg = parent_mem_cgroup(memcg); in lock_list_lru_of_memcg() 138 lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg, in lock_list_lru_of_memcg() argument 162 struct mem_cgroup *memcg) in list_lru_add() argument 167 l = lock_list_lru_of_memcg(lru, nid, memcg, false, false); in list_lru_add() 174 set_shrinker_bit(memcg, nid, lru_shrinker_id(lru)); in list_lru_add() 202 struct mem_cgroup *memcg) in list_lru_del() argument 206 l = lock_list_lru_of_memcg(lru, nid, memcg, false, false); in list_lru_del() [all …]
|
| H A D | vmpressure.c | 77 struct mem_cgroup *memcg = vmpressure_to_memcg(vmpr); in vmpressure_parent() local 79 memcg = parent_mem_cgroup(memcg); in vmpressure_parent() 80 if (!memcg) in vmpressure_parent() 82 return memcg_to_vmpressure(memcg); in vmpressure_parent() 239 void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() argument 255 vmpr = memcg_to_vmpressure(memcg); in vmpressure() 295 if (!memcg || mem_cgroup_is_root(memcg)) in vmpressure() 319 mem_cgroup_set_socket_pressure(memcg); in vmpressure() 335 void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) in vmpressure_prio() argument 351 vmpressure(gfp, memcg, true, vmpressure_win, 0); in vmpressure_prio() [all …]
|
| H A D | zswap.c | 625 struct mem_cgroup *memcg; in zswap_lru_add() local 639 memcg = mem_cgroup_from_entry(entry); in zswap_lru_add() 641 list_lru_add(list_lru, &entry->lru, nid, memcg); in zswap_lru_add() 648 struct mem_cgroup *memcg; in zswap_lru_del() local 651 memcg = mem_cgroup_from_entry(entry); in zswap_lru_del() 653 list_lru_del(list_lru, &entry->lru, nid, memcg); in zswap_lru_del() 682 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) in zswap_memcg_offline_cleanup() argument 686 if (zswap_next_shrink == memcg) { in zswap_memcg_offline_cleanup() 1195 !mem_cgroup_zswap_writeback_enabled(sc->memcg)) { in zswap_shrinker_scan() 1212 struct mem_cgroup *memcg = sc->memcg; in zswap_shrinker_count() local [all …]
|
| H A D | vmscan.c | 245 static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg) in sc_swappiness() argument 249 return mem_cgroup_swappiness(memcg); in sc_swappiness() 267 static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg) in sc_swappiness() argument 345 struct mem_cgroup *memcg) in can_demote() argument 359 return mem_cgroup_node_allowed(memcg, demotion_nid); in can_demote() 362 static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg, in can_reclaim_anon_pages() argument 366 if (memcg == NULL) { in can_reclaim_anon_pages() 375 if (mem_cgroup_get_nr_swap_pages(memcg) > 0) in can_reclaim_anon_pages() 384 return can_demote(nid, sc, memcg); in can_reclaim_anon_pages() 430 struct mem_cgroup *memcg = NULL; in drop_slab_node() local [all …]
|
| H A D | workingset.c | 244 struct mem_cgroup *memcg = folio_memcg(folio); in lru_gen_eviction() local 249 lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_eviction() 257 return pack_shadow(mem_cgroup_id(memcg), pgdat, token, workingset); in lru_gen_eviction() 269 struct mem_cgroup *memcg; in lru_gen_test_recent() local 274 memcg = mem_cgroup_from_id(memcg_id); in lru_gen_test_recent() 275 *lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_test_recent() 538 struct mem_cgroup *memcg; in workingset_refault() local 560 memcg = folio_memcg(folio); in workingset_refault() 562 lruvec = mem_cgroup_lruvec(memcg, pgdat); in workingset_refault() 674 if (sc->memcg) { in count_shadow_nodes() [all …]
|
| /linux/include/linux/ |
| H A D | memcontrol.h | 89 struct mem_cgroup *memcg; /* Back pointer, we cannot */ member 175 struct mem_cgroup *memcg; member 382 return READ_ONCE(objcg->memcg); in obj_cgroup_memcg() 514 struct mem_cgroup *memcg; in get_mem_cgroup_from_objcg() local 518 memcg = obj_cgroup_memcg(objcg); in get_mem_cgroup_from_objcg() 519 if (unlikely(!css_tryget(&memcg->css))) in get_mem_cgroup_from_objcg() 523 return memcg; in get_mem_cgroup_from_objcg() 546 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) in mem_cgroup_is_root() argument 548 return (memcg == root_mem_cgroup); in mem_cgroup_is_root() 557 struct mem_cgroup *memcg, in mem_cgroup_protection() argument [all …]
|
| H A D | vmpressure.h | 33 extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, 35 extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); 39 extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); 41 extern int vmpressure_register_event(struct mem_cgroup *memcg, 44 extern void vmpressure_unregister_event(struct mem_cgroup *memcg, 47 static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() argument 49 static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, in vmpressure_prio() argument
|
| H A D | list_lru.h | 82 int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, 84 void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent); 116 struct mem_cgroup *memcg); 156 struct mem_cgroup *memcg); 184 int nid, struct mem_cgroup *memcg); 190 return list_lru_count_one(lru, sc->nid, sc->memcg); in list_lru_shrink_count() 234 int nid, struct mem_cgroup *memcg, 251 int nid, struct mem_cgroup *memcg, 262 return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg, in list_lru_shrink_walk() 270 return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg, in list_lru_shrink_walk_irq()
|
| H A D | zswap.h | 33 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg); 58 static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {} in zswap_memcg_offline_cleanup() argument
|
| H A D | swap.h | 394 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 592 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) in mem_cgroup_swappiness() argument 599 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) in mem_cgroup_swappiness() 602 return READ_ONCE(memcg->swappiness); in mem_cgroup_swappiness() 643 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); 657 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) in mem_cgroup_get_nr_swap_pages() argument
|
| H A D | mmzone.h | 621 void lru_gen_init_memcg(struct mem_cgroup *memcg); 622 void lru_gen_exit_memcg(struct mem_cgroup *memcg); 623 void lru_gen_online_memcg(struct mem_cgroup *memcg); 624 void lru_gen_offline_memcg(struct mem_cgroup *memcg); 625 void lru_gen_release_memcg(struct mem_cgroup *memcg); 626 void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid); 643 static inline void lru_gen_init_memcg(struct mem_cgroup *memcg) in lru_gen_init_memcg() argument 647 static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg) in lru_gen_exit_memcg() argument 651 static inline void lru_gen_online_memcg(struct mem_cgroup *memcg) in lru_gen_online_memcg() argument 655 static inline void lru_gen_offline_memcg(struct mem_cgroup *memcg) in lru_gen_offline_memcg() argument [all …]
|
| /linux/tools/testing/selftests/cgroup/ |
| H A D | test_memcontrol.c | 248 char *memcg; in test_memcg_current_peak() local 253 memcg = cg_name(root, "memcg_test"); in test_memcg_current_peak() 254 if (!memcg) in test_memcg_current_peak() 257 if (cg_create(memcg)) in test_memcg_current_peak() 260 current = cg_read_long(memcg, "memory.current"); in test_memcg_current_peak() 264 peak = cg_read_long(memcg, "memory.peak"); in test_memcg_current_peak() 268 if (cg_run(memcg, alloc_anon_50M_check, NULL)) in test_memcg_current_peak() 271 peak = cg_read_long(memcg, "memory.peak"); in test_memcg_current_peak() 280 peak_fd = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC); in test_memcg_current_peak() 301 peak_fd2 = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC); in test_memcg_current_peak() [all …]
|
| /linux/include/trace/events/ |
| H A D | memcg.h | 3 #define TRACE_SYSTEM memcg 14 TP_PROTO(struct mem_cgroup *memcg, int item, int val), 16 TP_ARGS(memcg, item, val), 25 __entry->id = cgroup_id(memcg->css.cgroup); 36 TP_PROTO(struct mem_cgroup *memcg, int item, int val), 38 TP_ARGS(memcg, item, val) 43 TP_PROTO(struct mem_cgroup *memcg, int item, int val), 45 TP_ARGS(memcg, item, val) 50 TP_PROTO(struct mem_cgroup *memcg, int item, unsigned long val), 52 TP_ARGS(memcg, item, val), [all …]
|
| /linux/tools/mm/ |
| H A D | show_page_info.py | 61 memcg = slabobj_ext.objcg.memcg.value_() 64 memcg = objcg.memcg.value_() 66 memcg = cast("struct mem_cgroup *", memcg_data & ~mask) 68 if memcg.value_() == 0: 70 cgrp = memcg.css.cgroup
|
| /linux/tools/cgroup/ |
| H A D | memcg_slabinfo.py | 42 memcg = container_of(css, 'struct mem_cgroup', 'css') 43 MEMCGS[css.cgroup.kn.id.value_()] = memcg 170 memcg = MEMCGS[cgroup_id] 186 obj_cgroups.add(memcg.objcg.value_()) 188 memcg.objcg_list.address_of_(), 220 memcg.kmem_caches.address_of_(),
|
| /linux/Documentation/admin-guide/cgroup-v1/ |
| H A D | memcg_test.rst | 9 Because VM is getting complex (one of reasons is memcg...), memcg's behavior 10 is complex. This is a document for memcg's internal behavior. 61 At commit(), the page is associated with the memcg. 114 But brief explanation of the behavior of memcg around shmem will be 136 Each memcg has its own vector of LRUs (inactive anon, active anon, 138 each LRU handled under a single lru_lock for that memcg and node. 145 9.1 Small limit to memcg. 148 When you do test to do racy case, it's good test to set memcg's limit 158 Historically, memcg's shmem handling was poor and we saw some amount 248 Besides management of swap is one of complicated parts of memcg, [all …]
|
| /linux/tools/testing/selftests/kvm/lib/ |
| H A D | lru_gen_util.c | 201 void lru_gen_read_memcg_stats(struct memcg_stats *stats, const char *memcg) in lru_gen_read_memcg_stats() argument 209 .name = memcg, in lru_gen_read_memcg_stats() 232 memcg); in lru_gen_read_memcg_stats() 237 print_memcg_stats(stats, memcg); in lru_gen_read_memcg_stats() 302 void lru_gen_do_aging(struct memcg_stats *stats, const char *memcg) in lru_gen_do_aging() argument 309 lru_gen_read_memcg_stats(stats, memcg); in lru_gen_do_aging() 325 lru_gen_read_memcg_stats(stats, memcg); in lru_gen_do_aging()
|
| /linux/tools/testing/selftests/kvm/include/ |
| H A D | lru_gen_util.h | 43 void lru_gen_read_memcg_stats(struct memcg_stats *stats, const char *memcg); 46 void lru_gen_do_aging(struct memcg_stats *stats, const char *memcg);
|
| /linux/include/linux/sched/ |
| H A D | mm.h | 489 set_active_memcg(struct mem_cgroup *memcg) in set_active_memcg() argument 495 this_cpu_write(int_active_memcg, memcg); in set_active_memcg() 498 current->active_memcg = memcg; in set_active_memcg() 505 set_active_memcg(struct mem_cgroup *memcg) in set_active_memcg() argument
|
| /linux/Documentation/translations/zh_CN/mm/ |
| H A D | hwpoison.rst | 119 corrupt-filter-memcg 120 限制注入到memgroup拥有的页面。由memcg的inode号指定。 130 echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg
|