Home
last modified time | relevance | path

Searched refs:mpol (Results 1 – 13 of 13) sorted by relevance

/linux/Documentation/translations/zh_CN/filesystems/
H A Dtmpfs.rst76 mpol=default 采用进程分配策略
78 mpol=prefer:Node 倾向从给定的节点分配
79 mpol=bind:NodeList 只允许从指定的链表分配
80 mpol=interleave 倾向于依次从每个节点分配
81 mpol=interleave:NodeList 依次从每个节点分配
82 mpol=local 优先本地节点分配内存
86 分隔符的十进制数来表示。例如,mpol=bind0-3,5,7,9-15
104 例如,mpol=bind=staticNodeList相当于MPOL_BIND|MPOL_F_STATIC_NODES的分配策略
109 可以在以后通过“mount -o remount,mpol=Policy:NodeList MountPoint”添加到挂载点。
/linux/include/linux/
H A Dmempolicy.h123 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
125 struct vm_area_struct *vma, struct mempolicy *mpol);
144 struct mempolicy **mpol, nodemask_t **nodemask);
163 extern int mpol_parse_str(char *str, struct mempolicy **mpol);
214 struct mempolicy *mpol) in mpol_shared_policy_init() argument
260 struct mempolicy **mpol, nodemask_t **nodemask) in huge_node() argument
262 *mpol = NULL; in huge_node()
283 static inline int mpol_parse_str(char *str, struct mempolicy **mpol) in mpol_parse_str() argument
H A Dgfp.h320 struct mempolicy *mpol, pgoff_t ilx, int nid);
333 struct mempolicy *mpol, pgoff_t ilx, int nid) in folio_alloc_mpol_noprof() argument
H A Dshmem_fs.h73 struct mempolicy *mpol; /* default memory policy for mappings */ member
/linux/mm/
H A Dswap_state.c365 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, in __read_swap_cache_async() argument
400 new_folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id()); in __read_swap_cache_async()
486 struct mempolicy *mpol; in read_swap_cache_async() local
494 mpol = get_vma_policy(vma, addr, 0, &ilx); in read_swap_cache_async()
495 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, in read_swap_cache_async()
497 mpol_cond_put(mpol); in read_swap_cache_async()
571 * @mpol: NUMA memory allocation policy to be applied
586 struct mempolicy *mpol, pgoff_t ilx) in swap_cluster_readahead() argument
615 gfp_mask, mpol, ilx, &page_allocated, false); in swap_cluster_readahead()
632 folio = __read_swap_cache_async(entry, gfp_mask, mpol, il in swap_cluster_readahead()
729 swap_vma_readahead(swp_entry_t targ_entry,gfp_t gfp_mask,struct mempolicy * mpol,pgoff_t targ_ilx,struct vm_fault * vmf) swap_vma_readahead() argument
804 struct mempolicy *mpol; swapin_readahead() local
[all...]
H A Dswap.h74 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
77 struct mempolicy *mpol, pgoff_t ilx);
154 gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx) in swap_cluster_readahead() argument
H A Dshmem.c116 struct mempolicy *mpol; member
1719 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) in shmem_show_mpol() argument
1723 if (!mpol || mpol->mode == MPOL_DEFAULT) in shmem_show_mpol()
1726 mpol_to_str(buffer, sizeof(buffer), mpol); in shmem_show_mpol()
1728 seq_printf(seq, ",mpol=%s", buffer); in shmem_show_mpol()
1733 struct mempolicy *mpol = NULL; in shmem_get_sbmpol() local
1734 if (sbinfo->mpol) { in shmem_get_sbmpol()
1736 mpol = sbinfo->mpol; in shmem_get_sbmpol()
1743 shmem_show_mpol(struct seq_file * seq,struct mempolicy * mpol) shmem_show_mpol() argument
1758 struct mempolicy *mpol; shmem_swapin_cluster() local
1903 struct mempolicy *mpol; shmem_alloc_folio() local
2920 shmem_set_policy(struct vm_area_struct * vma,struct mempolicy * mpol) shmem_set_policy() argument
2946 struct mempolicy *mpol; shmem_get_pgoff_policy() local
4861 struct mempolicy *mpol = NULL; shmem_reconfigure() local
4955 struct mempolicy *mpol; shmem_show_options() local
[all...]
H A Dmempolicy.c483 /* Slow path of a mpol destructor. */
1117 * Take a refcount on the mpol, because we are about to in do_get_mempolicy()
2241 * huge_node(@vma, @addr, @gfp_flags, @mpol)
2245 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2254 struct mempolicy **mpol, nodemask_t **nodemask) in huge_node() argument
2260 *mpol = get_vma_policy(vma, addr, hstate_vma(vma)->order, &ilx); in huge_node()
2261 *nodemask = policy_nodemask(gfp_flags, *mpol, ilx, &nid); in huge_node()
2367 * @nid: Preferred node (usually numa_node_id() but @mpol may override it).
3128 * @mpol: struct mempolicy to install
3130 * Install non-NULL @mpol i
3135 mpol_shared_policy_init(struct shared_policy * sp,struct mempolicy * mpol) mpol_shared_policy_init() argument
3341 mpol_parse_str(char * str,struct mempolicy ** mpol) mpol_parse_str() argument
[all...]
H A Dhugetlb.c1369 struct mempolicy *mpol; in dequeue_hugetlb_folio_vma() local
1382 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in dequeue_hugetlb_folio_vma()
1384 if (mpol_is_preferred_many(mpol)) { in dequeue_hugetlb_folio_vma()
1396 mpol_cond_put(mpol); in dequeue_hugetlb_folio_vma()
2312 struct mempolicy *mpol; in alloc_buddy_hugetlb_folio_with_mpol() local
2317 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); in alloc_buddy_hugetlb_folio_with_mpol()
2318 if (mpol_is_preferred_many(mpol)) { in alloc_buddy_hugetlb_folio_with_mpol()
2329 mpol_cond_put(mpol); in alloc_buddy_hugetlb_folio_with_mpol()
2380 struct mempolicy *mpol = get_task_policy(current); in policy_mbind_nodemask() local
2386 if (mpol in policy_mbind_nodemask()
6850 struct mempolicy *mpol; alloc_hugetlb_folio_vma() local
[all...]
H A Dzswap.c1070 struct mempolicy *mpol; in zswap_writeback_entry() local
1080 mpol = get_task_policy(current); in zswap_writeback_entry()
1081 folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol, in zswap_writeback_entry()
H A Dslub.c4036 struct mempolicy *mpol = current->mempolicy; in __slab_alloc_node() local
4038 if (mpol) { in __slab_alloc_node()
4046 if (mpol->mode != MPOL_BIND || !slab || in __slab_alloc_node()
4047 !node_isset(slab_nid(slab), mpol->nodes)) in __slab_alloc_node()
/linux/Documentation/filesystems/
H A Dtmpfs.rst165 mpol=default use the process allocation policy
167 mpol=prefer:Node prefers to allocate memory from the given Node
168 mpol=bind:NodeList allocates memory only from nodes in NodeList
169 mpol=interleave prefers to allocate from each node in turn
170 mpol=interleave:NodeList allocates from each node of NodeList in turn
171 mpol=local prefers to allocate memory from the local node
176 largest node numbers in the range. For example, mpol=bind:0-3,5,7,9-15
198 For example, mpol=bind=static:NodeList, is the equivalent of an
201 Note that trying to mount a tmpfs with an mpol option will fail if the
206 online, then it is advisable to omit the mpol optio
[all...]
/linux/kernel/futex/
H A Dcore.c339 struct mempolicy *mpol; in __futex_key_to_node() local
345 mpol = vma_policy(vma); in __futex_key_to_node()
346 if (!mpol) in __futex_key_to_node()
349 switch (mpol->mode) { in __futex_key_to_node()
351 node = first_node(mpol->nodes); in __futex_key_to_node()
355 if (mpol->home_node != NUMA_NO_NODE) in __futex_key_to_node()
356 node = mpol->home_node; in __futex_key_to_node()