/linux/mm/ |
H A D | sparse.c | 510 static int __init sparse_usage_init(int nid, unsigned long map_count) in sparse_usage_init() argument 514 size = mem_section_usage_size() * map_count; in sparse_usage_init() 533 * And number of present sections in this node is map_count. 537 unsigned long map_count) in sparse_init_nid() argument 543 if (sparse_usage_init(nid, map_count)) { in sparse_init_nid() 548 sparse_buffer_init(map_count * section_map_size(), nid); in sparse_init_nid() 597 unsigned long pnum_end, pnum_begin, map_count = 1; in sparse_init() local 614 map_count++; in sparse_init() 618 sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count); in sparse_init() 621 map_count in sparse_init() [all...] |
H A D | memory-tiers.c | 35 int map_count; member 527 if (!node_memory_types[node].map_count++) in __init_node_memory_type() 649 node_memory_types[node].map_count--; in clear_node_memory_type() 654 if (!node_memory_types[node].map_count) { in clear_node_memory_type()
|
H A D | vma.c | 355 mm->map_count++; in vma_complete() 386 mm->map_count--; in vma_complete() 595 if (vma->vm_mm->map_count >= sysctl_max_map_count) in split_vma() 686 if (++i > mm->map_count + 10) { in validate_mm() 691 if (i != mm->map_count) { in validate_mm() 692 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i); in validate_mm() 1269 mm->map_count -= vms->vma_count; in vms_complete_munmap_vmas() 1343 * Make sure that map_count on return from munmap() will in vms_gather_munmap_vmas() 1344 * not exceed its limit; but let map_count g in vms_gather_munmap_vmas() [all...] |
H A D | nommu.c | 579 vma->vm_mm->map_count--; in cleanup_vma_from_mm() 1201 current->mm->map_count++; in do_mmap() 1319 if (mm->map_count >= sysctl_max_map_count) in split_vma() 1369 mm->map_count++; in split_vma()
|
H A D | debug.c | 179 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" in dump_mm() 207 mm->map_count, in dump_mm()
|
H A D | mmap.c | 377 if (mm->map_count > sysctl_max_map_count) in do_mmap() 1311 BUG_ON(count != mm->map_count); in exit_mmap() 1812 mm->map_count++; in dup_mmap()
|
H A D | mremap.c | 1038 if (current->mm->map_count >= sysctl_max_map_count - 3) in prep_move_vma() 1809 if ((current->mm->map_count + 2) >= sysctl_max_map_count - 3) in check_mremap_params()
|
H A D | huge_memory.c | 3149 int ref_count, map_count; in __discard_anon_folio_pmd_locked() local 3169 map_count = folio_mapcount(folio); in __discard_anon_folio_pmd_locked() 3192 if (ref_count != map_count + 1) { in __discard_anon_folio_pmd_locked()
|
/linux/tools/testing/vma/ |
H A D | vma.c | 264 mm->map_count = 0; in cleanup_mm() 503 ASSERT_EQ(mm.map_count, 4); in test_merge_new() 522 ASSERT_EQ(mm.map_count, 3); in test_merge_new() 539 ASSERT_EQ(mm.map_count, 3); in test_merge_new() 558 ASSERT_EQ(mm.map_count, 3); in test_merge_new() 576 ASSERT_EQ(mm.map_count, 2); in test_merge_new() 594 ASSERT_EQ(mm.map_count, 2); in test_merge_new() 611 ASSERT_EQ(mm.map_count, 1); in test_merge_new() 970 ASSERT_EQ(mm.map_count, 2); in test_vma_merge_new_with_close() 1020 ASSERT_EQ(mm.map_count, in test_merge_existing() [all...] |
H A D | vma_internal.h | 254 int map_count; /* number of VMAs */ member
|
/linux/drivers/gpu/drm/qxl/ |
H A D | qxl_object.c | 41 WARN_ON_ONCE(bo->map_count > 0); in qxl_ttm_bo_destroy() 163 bo->map_count++; in qxl_bo_vmap_locked() 172 bo->map_count = 1; in qxl_bo_vmap_locked() 245 bo->map_count--; in qxl_bo_vunmap_locked() 246 if (bo->map_count > 0) in qxl_bo_vunmap_locked()
|
H A D | qxl_drv.h | 82 unsigned int map_count; member
|
/linux/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_bo.c | 363 atomic_inc(&vbo->map_count); in vmw_bo_map_and_cache_size() 388 int map_count; in vmw_bo_unmap() local 393 map_count = atomic_dec_return(&vbo->map_count); in vmw_bo_unmap() 395 if (!map_count) { in vmw_bo_unmap() 432 atomic_set(&vmw_bo->map_count, 0); in vmw_bo_init()
|
H A D | vmwgfx_bo.h | 75 * @map_count: The number of currently active maps. Will differ from the 95 atomic_t map_count; member
|
/linux/block/partitions/ |
H A D | mac.h | 14 __be32 map_count; /* # blocks in partition map */ member
|
H A D | mac.c | 80 blocks_in_map = be32_to_cpu(part->map_count); in mac_partition()
|
/linux/fs/xfs/ |
H A D | xfs_buf_item.h | 66 unsigned int xfs_buf_inval_log_space(unsigned int map_count,
|
H A D | xfs_buf_item.c | 167 unsigned int map_count, in xfs_buf_inval_log_space() argument 177 return ret * map_count; in xfs_buf_inval_log_space()
|
/linux/drivers/video/fbdev/omap2/omapfb/ |
H A D | omapfb.h | 52 atomic_t map_count; member
|
H A D | omapfb-main.c | 1071 atomic_inc(&rg->map_count); in mmap_user_open() 1080 atomic_dec(&rg->map_count); in mmap_user_close() 1117 atomic_inc(&rg->map_count); in omapfb_mmap() 1309 WARN_ON(atomic_read(&rg->map_count)); in omapfb_free_fbmem()
|
H A D | omapfb-sysfs.c | 450 if (atomic_read(&rg->map_count)) { in store_size()
|
H A D | omapfb-ioctl.c | 226 if (atomic_read(&rg->map_count)) { in omapfb_setup_mem()
|
/linux/drivers/crypto/axis/ |
H A D | artpec6_crypto.c | 251 size_t map_count; member 522 dma->map_count = 0; in artpec6_crypto_init_dma_operation() 614 if (dma->map_count >= ARRAY_SIZE(dma->maps)) in artpec6_crypto_dma_map_page() 621 map = &dma->maps[dma->map_count++]; in artpec6_crypto_dma_map_page() 683 for (i = 0; i < dma->map_count; i++) { in artpec6_crypto_dma_unmap_all() 689 dma->map_count = 0; in artpec6_crypto_dma_unmap_all()
|
/linux/include/linux/ |
H A D | mm_types.h | 1023 int map_count; /* number of VMAs */ member
|
/linux/kernel/ |
H A D | fork.c | 1041 mm->map_count = 0; in mm_init()
|