/linux/drivers/char/agp/ |
H A D | efficeon-agp.c | 236 static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int type) in efficeon_insert_memory() argument 243 printk(KERN_DEBUG PFX "efficeon_insert_memory(%lx, %d)\n", pg_start, count); in efficeon_insert_memory() 246 if ((pg_start + mem->page_count) > num_entries) in efficeon_insert_memory() 258 int index = pg_start + i; in efficeon_insert_memory() 285 static int efficeon_remove_memory(struct agp_memory * mem, off_t pg_start, int type) in efficeon_remove_memory() argument 289 printk(KERN_DEBUG PFX "efficeon_remove_memory(%lx, %d)\n", pg_start, count); in efficeon_remove_memory() 293 if ((pg_start + mem->page_count) > num_entries) in efficeon_remove_memory() 299 int index = pg_start + i; in efficeon_remove_memory()
|
H A D | generic.c | 408 int agp_bind_memory(struct agp_memory *curr, off_t pg_start) in agp_bind_memory() argument 424 ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); in agp_bind_memory() 430 curr->pg_start = pg_start; in agp_bind_memory() 460 ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type); in agp_unbind_memory() 466 curr->pg_start = 0; in agp_unbind_memory() 1025 int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) in agp_generic_insert_memory() argument 1076 if (((pg_start + mem->page_count) > num_entries) || in agp_generic_insert_memory() 1077 ((pg_start + mem->page_count) < pg_start)) in agp_generic_insert_memory() 1080 j = pg_start; in agp_generic_insert_memory() 1082 while (j < (pg_start + mem->page_count)) { in agp_generic_insert_memory() [all …]
|
H A D | nvidia-agp.c | 202 static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type) in nvidia_insert_memory() argument 214 if ((pg_start + mem->page_count) > in nvidia_insert_memory() 218 for (j = pg_start; j < (pg_start + mem->page_count); j++) { in nvidia_insert_memory() 227 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in nvidia_insert_memory() 241 static int nvidia_remove_memory(struct agp_memory *mem, off_t pg_start, int type) in nvidia_remove_memory() argument 254 for (i = pg_start; i < (mem->page_count + pg_start); i++) in nvidia_remove_memory()
|
H A D | amd-k7-agp.c | 285 static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type) in amd_insert_memory() argument 297 if ((pg_start + mem->page_count) > num_entries) in amd_insert_memory() 300 j = pg_start; in amd_insert_memory() 301 while (j < (pg_start + mem->page_count)) { in amd_insert_memory() 314 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in amd_insert_memory() 327 static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type) in amd_remove_memory() argument 337 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in amd_remove_memory()
|
H A D | sworks-agp.c | 318 off_t pg_start, int type) in serverworks_insert_memory() argument 329 if ((pg_start + mem->page_count) > num_entries) { in serverworks_insert_memory() 333 j = pg_start; in serverworks_insert_memory() 334 while (j < (pg_start + mem->page_count)) { in serverworks_insert_memory() 347 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in serverworks_insert_memory() 358 static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start, in serverworks_remove_memory() argument 372 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in serverworks_remove_memory()
|
H A D | ati-agp.c | 260 off_t pg_start, int type) in ati_insert_memory() argument 276 if ((pg_start + mem->page_count) > num_entries) in ati_insert_memory() 279 j = pg_start; in ati_insert_memory() 280 while (j < (pg_start + mem->page_count)) { in ati_insert_memory() 294 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in ati_insert_memory() 307 static int ati_remove_memory(struct agp_memory * mem, off_t pg_start, in ati_remove_memory() argument 322 for (i = pg_start; i < (mem->page_count + pg_start); i++) { in ati_remove_memory()
|
H A D | intel-gtt.c | 214 static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start, in i810_insert_dcache_entries() argument 219 if ((pg_start + mem->page_count) in i810_insert_dcache_entries() 226 for (i = pg_start; i < (pg_start + mem->page_count); i++) { in i810_insert_dcache_entries() 883 unsigned int pg_start, in intel_gmch_gtt_insert_sg_entries() argument 890 j = pg_start; in intel_gmch_gtt_insert_sg_entries() 932 off_t pg_start, int type) in intel_fake_agp_insert_entries() argument 944 return i810_insert_dcache_entries(mem, pg_start, type); in intel_fake_agp_insert_entries() 949 if (pg_start + mem->page_count > intel_private.gtt_total_entries) in intel_fake_agp_insert_entries() 968 intel_gmch_gtt_insert_sg_entries(&st, pg_start, type); in intel_fake_agp_insert_entries() 972 intel_gmch_gtt_insert_pages(pg_start, mem->page_count, mem->pages, in intel_fake_agp_insert_entries() [all …]
|
H A D | parisc-agp.c | 124 parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type) in parisc_agp_insert_memory() argument 136 io_pg_start = info->io_pages_per_kpage * pg_start; in parisc_agp_insert_memory() 174 parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type) in parisc_agp_remove_memory() argument 184 io_pg_start = info->io_pages_per_kpage * pg_start; in parisc_agp_remove_memory()
|
H A D | uninorth-agp.c | 150 static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int type) in uninorth_insert_memory() argument 172 if ((pg_start + mem->page_count) > num_entries) in uninorth_insert_memory() 175 gp = (u32 *) &agp_bridge->gatt_table[pg_start]; in uninorth_insert_memory() 200 static int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type) in uninorth_remove_memory() argument 218 gp = (u32 *) &agp_bridge->gatt_table[pg_start]; in uninorth_remove_memory()
|
H A D | amd64-agp.c | 45 static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type) in amd64_insert_memory() argument 64 if (((unsigned long)pg_start + mem->page_count) > num_entries) in amd64_insert_memory() 67 j = pg_start; in amd64_insert_memory() 70 while (j < (pg_start + mem->page_count)) { in amd64_insert_memory() 81 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { in amd64_insert_memory()
|
/linux/include/linux/ |
H A D | agpgart.h | 52 off_t pg_start; /* starting page to populate */ member 58 off_t pg_start; member 81 off_t pg_start; /* starting page to populate */ member
|
H A D | agp_backend.h | 77 off_t pg_start; member
|
/linux/arch/alpha/kernel/ |
H A D | core_titan.c | 585 long pg_start; member 603 aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, in titan_agp_setup() 605 if (aper->pg_start < 0) { in titan_agp_setup() 612 aper->arena->dma_base + aper->pg_start * PAGE_SIZE; in titan_agp_setup() 625 status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); in titan_agp_cleanup() 629 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); in titan_agp_cleanup() 630 status = iommu_release(aper->arena, aper->pg_start, in titan_agp_cleanup() 683 titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) in titan_agp_bind_memory() argument 686 return iommu_bind(aper->arena, aper->pg_start + pg_start, in titan_agp_bind_memory() 691 titan_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) in titan_agp_unbind_memory() argument [all …]
|
H A D | core_marvel.c | 852 long pg_start; member 869 aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, in marvel_agp_setup() 872 if (aper->pg_start < 0) { in marvel_agp_setup() 879 aper->arena->dma_base + aper->pg_start * PAGE_SIZE; in marvel_agp_setup() 892 status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); in marvel_agp_cleanup() 896 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); in marvel_agp_cleanup() 897 status = iommu_release(aper->arena, aper->pg_start, in marvel_agp_cleanup() 972 marvel_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) in marvel_agp_bind_memory() argument 975 return iommu_bind(aper->arena, aper->pg_start + pg_start, in marvel_agp_bind_memory() 980 marvel_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) in marvel_agp_unbind_memory() argument [all …]
|
H A D | pci_iommu.c | 855 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count) in iommu_release() argument 865 for(i = pg_start; i < pg_start + pg_count; i++) in iommu_release() 869 iommu_arena_free(arena, pg_start, pg_count); in iommu_release() 874 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count, in iommu_bind() argument 887 for(j = pg_start; j < pg_start + pg_count; j++) { in iommu_bind() 894 for(i = 0, j = pg_start; i < pg_count; i++, j++) in iommu_bind() 903 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count) in iommu_unbind() argument 910 p = arena->ptes + pg_start; in iommu_unbind()
|
/linux/include/uapi/linux/ |
H A D | agpgart.h | 80 __kernel_off_t pg_start; /* starting page to populate */ member 103 __kernel_off_t pg_start;/* starting page to populate */ member
|
/linux/drivers/hv/ |
H A D | hv_balloon.c | 835 static unsigned long handle_pg_range(unsigned long pg_start, in handle_pg_range() argument 838 unsigned long start_pfn = pg_start; in handle_pg_range() 847 pg_start); in handle_pg_range() 915 static unsigned long process_hot_add(unsigned long pg_start, in process_hot_add() argument 927 covered = pfn_covered(pg_start, pfn_cnt); in process_hot_add() 949 ha_region->covered_start_pfn = pg_start; in process_hot_add() 950 ha_region->covered_end_pfn = pg_start; in process_hot_add() 963 return handle_pg_range(pg_start, pfn_cnt); in process_hot_add() 972 unsigned long pg_start, pfn_cnt; in hot_add_req() local 982 pg_start = dm->ha_wrk.ha_page_range.finfo.start_page; in hot_add_req() [all …]
|
/linux/arch/arc/include/asm/ |
H A D | ptrace.h | 137 unsigned long pg_start = (sp & ~(THREAD_SIZE - 1)); \ 138 (struct pt_regs *)(pg_start + THREAD_SIZE) - 1; \
|
/linux/include/drm/intel/ |
H A D | intel-gtt.h | 28 unsigned int pg_start,
|
/linux/tools/mm/ |
H A D | page-types.c | 174 static unsigned long pg_start[MAX_VMAS]; variable 767 if (pg_start[i] >= end) in walk_task() 770 start = max_t(unsigned long, pg_start[i], index); in walk_task() 927 pg_start[nr_vmas] = vm_start / page_size; in parse_pid()
|
/linux/drivers/misc/ |
H A D | fastrpc.c | 943 u64 pg_start, pg_end; in fastrpc_get_args() local 999 pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT; in fastrpc_get_args() 1002 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE; in fastrpc_get_args() 1022 pg_start = (rpra[i].buf.pv & PAGE_MASK) >> PAGE_SHIFT; in fastrpc_get_args() 1024 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE; in fastrpc_get_args()
|
/linux/fs/fuse/ |
H A D | inode.c | 562 pgoff_t pg_start; in fuse_reverse_inval_inode() local 577 pg_start = offset >> PAGE_SHIFT; in fuse_reverse_inval_inode() 583 pg_start, pg_end); in fuse_reverse_inval_inode()
|
/linux/fs/f2fs/ |
H A D | f2fs.h | 3631 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
|