/linux/arch/csky/abiv1/ |
H A D | mmap.c | 13 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ 14 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) 20 * SHMLBA bytes. 50 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) in arch_get_unmapped_area() 70 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; in arch_get_unmapped_area()
|
/linux/arch/arm/mm/ |
H A D | mmap.c | 17 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ 18 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) 24 * SHMLBA bytes. 52 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) in arch_get_unmapped_area() 75 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; in arch_get_unmapped_area() 105 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) in arch_get_unmapped_area_topdown() 126 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; in arch_get_unmapped_area_topdown()
|
H A D | copypage-v6.c | 20 #if SHMLBA > 16384
|
/linux/arch/xtensa/kernel/ |
H A D | syscall.c | 36 ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \ 37 (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1))) 44 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA); in xtensa_shmat() 69 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) in arch_get_unmapped_area()
|
/linux/arch/sparc/kernel/ |
H A D | sys_sparc_64.c | 85 unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1); in COLOR_ALIGN() 86 unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1); in COLOR_ALIGN() 96 return PAGE_MASK & (SHMLBA - 1); in get_align_mask() 118 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) in arch_get_unmapped_area() 186 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) in arch_get_unmapped_area_topdown() 410 err = do_shmat(first, ptr, (int)second, &raddr, SHMLBA); in SYSCALL_DEFINE6()
|
/linux/drivers/infiniband/sw/rxe/ |
H A D | rxe_mmap.c | 132 rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA); in rxe_create_mmap_info() 135 rxe->mmap_offset += ALIGN(size, SHMLBA); in rxe_create_mmap_info()
|
/linux/drivers/infiniband/sw/rdmavt/ |
H A D | mmap.c | 132 rdi->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA); in rvt_create_mmap_info() 134 rdi->mmap_offset += ALIGN(size, SHMLBA); in rvt_create_mmap_info()
|
/linux/include/asm-generic/ |
H A D | shmparam.h | 5 #define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ macro
|
/linux/arch/nios2/include/asm/ |
H A D | shmparam.h | 8 #define SHMLBA CONFIG_NIOS2_DCACHE_SIZE macro
|
/linux/arch/sh/include/asm/ |
H A D | shmparam.h | 15 #define SHMLBA 0x4000 /* attach addr a multiple of this */ macro
|
/linux/arch/x86/include/asm/ |
H A D | shmparam.h | 5 #define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ macro
|
/linux/arch/mips/include/asm/ |
H A D | shmparam.h | 11 #define SHMLBA 0x40000 /* attach addr a multiple of this */ macro
|
/linux/arch/alpha/include/asm/ |
H A D | shmparam.h | 5 #define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ macro
|
/linux/arch/powerpc/include/asm/ |
H A D | shmparam.h | 5 #define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ macro
|
/linux/arch/csky/include/asm/ |
H A D | shmparam.h | 6 #define SHMLBA (4 * PAGE_SIZE) macro
|
/linux/arch/parisc/include/asm/ |
H A D | shmparam.h | 16 * granularity (SHMLBA) but have to ensure that, if two pages are 20 #define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ macro
|
/linux/arch/arm/include/asm/ |
H A D | shmparam.h | 10 #define SHMLBA (4 * PAGE_SIZE) /* attach addr a multiple of this */ macro 13 * Enforce SHMLBA in shmat
|
/linux/arch/arc/include/asm/ |
H A D | shmparam.h | 10 #define SHMLBA (2 * PAGE_SIZE) macro 12 /* Enforce SHMLBA in shmat */
|
/linux/arch/xtensa/include/asm/ |
H A D | shmparam.h | 19 #define SHMLBA ((PAGE_SIZE > DCACHE_WAY_SIZE)? PAGE_SIZE : DCACHE_WAY_SIZE) macro
|
/linux/arch/sparc/include/asm/ |
H A D | shmparam_32.h | 8 #define SHMLBA (vac_cache_size ? vac_cache_size : PAGE_SIZE) macro
|
H A D | shmparam_64.h | 9 #define SHMLBA ((PAGE_SIZE > L1DCACHE_SIZE) ? PAGE_SIZE : L1DCACHE_SIZE) macro
|
/linux/arch/csky/abiv1/inc/abi/ |
H A D | page.h | 10 return (addr1 ^ addr2) & (SHMLBA-1); in pages_do_alias()
|
/linux/ipc/ |
H A D | syscall.c | 86 second, &raddr, SHMLBA); in ksys_ipc() 121 #define COMPAT_SHMLBA SHMLBA
|
/linux/arch/arc/mm/ |
H A D | mmap.c | 22 * SHMLBA bytes. 38 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) in arch_get_unmapped_area()
|
/linux/arch/loongarch/mm/ |
H A D | mmap.c | 13 #define SHM_ALIGN_MASK (SHMLBA - 1)
|