| /linux/include/linux/ |
| H A D | mempolicy.h | 47 struct mempolicy { struct 65 extern void __mpol_put(struct mempolicy *pol); 66 static inline void mpol_put(struct mempolicy *pol) in mpol_put() 76 static inline int mpol_needs_cond_ref(struct mempolicy *pol) in mpol_needs_cond_ref() 81 static inline void mpol_cond_put(struct mempolicy *pol) in mpol_cond_put() 87 extern struct mempolicy *__mpol_dup(struct mempolicy *pol); 88 static inline struct mempolicy *mpol_dup(struct mempolicy *pol) in mpol_dup() 95 static inline void mpol_get(struct mempolicy *pol) in mpol_get() 101 extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b); 102 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) in mpol_equal() [all …]
|
| H A D | gfp.h | 14 struct mempolicy; 321 struct mempolicy *mpol, pgoff_t ilx, int nid); 334 struct mempolicy *mpol, pgoff_t ilx, int nid) in folio_alloc_mpol_noprof()
|
| H A D | shmem_fs.h | 73 struct mempolicy *mpol; /* default memory policy for mappings */
|
| H A D | sched.h | 69 struct mempolicy; 1354 struct mempolicy *mempolicy; member
|
| H A D | mm_types.h | 884 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
|
| H A D | mm.h | 40 struct mempolicy; 663 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 675 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
|
| /linux/mm/ |
| H A D | mempolicy.c | 136 static struct mempolicy default_policy = { 141 static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 339 struct mempolicy *get_task_policy(struct task_struct *p) in get_task_policy() 341 struct mempolicy *pol = p->mempolicy; in get_task_policy() 359 int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 360 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 363 static inline int mpol_store_user_nodemask(const struct mempolicy *pol) in mpol_store_user_nodemask() 376 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes) in mpol_new_nodemask() 384 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) in mpol_new_preferred() 402 static int mpol_set_nodemask(struct mempolicy *pol, in mpol_set_nodemask() [all …]
|
| H A D | swap.h | 6 struct mempolicy; 265 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, 268 struct mempolicy *mpol, pgoff_t ilx); 378 gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx) in swap_cluster_readahead()
|
| H A D | swap_state.c | 406 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, in __read_swap_cache_async() 523 struct mempolicy *mpol; in read_swap_cache_async() 623 struct mempolicy *mpol, pgoff_t ilx) in swap_cluster_readahead() 731 struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf) in swap_vma_readahead() 806 struct mempolicy *mpol; in swapin_readahead()
|
| H A D | vma.h | 104 struct mempolicy *policy; 288 struct mempolicy *new_pol);
|
| H A D | shmem.c | 116 struct mempolicy *mpol; 1682 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) in shmem_show_mpol() 1694 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) in shmem_get_sbmpol() 1696 struct mempolicy *mpol = NULL; in shmem_get_sbmpol() 1706 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) in shmem_show_mpol() 1709 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) in shmem_get_sbmpol() 1715 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info, 1721 struct mempolicy *mpol; in shmem_swapin_cluster() 1866 struct mempolicy *mpol; in shmem_alloc_folio() 2859 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) in shmem_set_policy() [all …]
|
| H A D | Makefile | 86 obj-$(CONFIG_NUMA) += mempolicy.o
|
| H A D | zswap.c | 1007 struct mempolicy *mpol; in zswap_writeback_entry()
|
| /linux/Documentation/ABI/testing/ |
| H A D | sysfs-kernel-mm-mempolicy-weighted-interleave | 1 What: /sys/kernel/mm/mempolicy/weighted_interleave/ 6 What: /sys/kernel/mm/mempolicy/weighted_interleave/nodeN 12 utilized by tasks which have set their mempolicy to 29 What: /sys/kernel/mm/mempolicy/weighted_interleave/auto
|
| H A D | sysfs-kernel-mm-mempolicy | 1 What: /sys/kernel/mm/mempolicy/
|
| /linux/tools/testing/vma/ |
| H A D | vma_internal.h | 248 struct mempolicy {}; struct 385 struct mempolicy *vm_policy; /* NUMA policy for the VMA */ 466 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 478 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 775 static inline void mpol_put(struct mempolicy *pol) in mpol_put() 977 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) in mpol_equal()
|
| /linux/fs/proc/ |
| H A D | internal.h | 19 struct mempolicy; 397 struct mempolicy *task_mempolicy;
|
| /linux/Documentation/driver-api/cxl/allocation/ |
| H A D | page-allocator.rst | 15 NUMA nodes and mempolicy 17 Unless a task explicitly registers a mempolicy, the default memory policy
|
| /linux/Documentation/translations/zh_CN/core-api/ |
| H A D | mm-api.rst | 117 mm/mempolicy.c
|
| /linux/Documentation/admin-guide/mm/ |
| H A D | numa_memory_policy.rst | 170 structure, struct mempolicy. Details of this structure will be 207 preferred_node member of struct mempolicy. When the internal 256 /sys/kernel/mm/mempolicy/weighted_interleave/ 269 Without this flag, any time a mempolicy is rebound because of a 301 mempolicy is rebound because of a change in the set of allowed 321 if not already set, sets the node in the mempolicy nodemask. 347 To resolve use/free races, struct mempolicy contains an atomic reference 350 the structure back to the mempolicy kmem cache when the reference count 427 definitions are defined in <linux/mempolicy.h>. 478 mempolicy range. Other address ranges are ignored. A home node is the NUMA node
|
| H A D | hugetlbpage.rst | 301 sysctl or attribute. When the ``nr_hugepages`` attribute is used, mempolicy 323 #. Regardless of mempolicy mode [see 326 specified in the mempolicy as if "interleave" had been specified. 342 Any of the other mempolicy modes may be used to specify a single node. 344 #. The nodes allowed mask will be derived from any non-default task mempolicy, 351 #. Any task mempolicy specified--e.g., using numactl--will be constrained by 383 resources exist, regardless of the task's mempolicy or cpuset constraints. 386 as we don't know until fault time, when the faulting task's mempolicy is
|
| /linux/Documentation/core-api/ |
| H A D | mm-api.rst | 101 .. kernel-doc:: mm/mempolicy.c
|
| /linux/kernel/ |
| H A D | fork.c | 2114 p->mempolicy = mpol_dup(p->mempolicy); in copy_process() 2115 if (IS_ERR(p->mempolicy)) { in copy_process() 2116 retval = PTR_ERR(p->mempolicy); in copy_process() 2117 p->mempolicy = NULL; in copy_process() 2484 mpol_put(p->mempolicy); in copy_process()
|
| /linux/ipc/ |
| H A D | shm.c | 571 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) in shm_set_policy() 581 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, in shm_get_policy() 585 struct mempolicy *mpol = vma->vm_policy; in shm_get_policy()
|
| /linux/Documentation/admin-guide/cgroup-v1/ |
| H A D | cpusets.rst | 342 except perhaps as modified by the task's NUMA mempolicy or cpuset 349 or slab caches to ignore the task's NUMA mempolicy and be spread 353 is turned off, then the currently specified NUMA mempolicy once again 631 mempolicy MPOL_BIND, and the nodes to which it was bound overlap with
|