Lines Matching full:chunk

97 	struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);  in page_to_drm()  local
99 return chunk->drm; in page_to_drm()
104 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); in nouveau_dmem_page_addr() local
106 chunk->pagemap.range.start; in nouveau_dmem_page_addr()
108 return chunk->bo->offset + off; in nouveau_dmem_page_addr()
113 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); in nouveau_dmem_page_free() local
114 struct nouveau_dmem *dmem = chunk->drm->dmem; in nouveau_dmem_page_free()
120 WARN_ON(!chunk->callocated); in nouveau_dmem_page_free()
121 chunk->callocated--; in nouveau_dmem_page_free()
123 * FIXME when chunk->callocated reach 0 we should add the chunk to in nouveau_dmem_page_free()
230 struct nouveau_dmem_chunk *chunk; in nouveau_dmem_chunk_alloc() local
237 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); in nouveau_dmem_chunk_alloc()
238 if (chunk == NULL) { in nouveau_dmem_chunk_alloc()
251 chunk->drm = drm; in nouveau_dmem_chunk_alloc()
252 chunk->pagemap.type = MEMORY_DEVICE_PRIVATE; in nouveau_dmem_chunk_alloc()
253 chunk->pagemap.range.start = res->start; in nouveau_dmem_chunk_alloc()
254 chunk->pagemap.range.end = res->end; in nouveau_dmem_chunk_alloc()
255 chunk->pagemap.nr_range = 1; in nouveau_dmem_chunk_alloc()
256 chunk->pagemap.ops = &nouveau_dmem_pagemap_ops; in nouveau_dmem_chunk_alloc()
257 chunk->pagemap.owner = drm->dev; in nouveau_dmem_chunk_alloc()
261 &chunk->bo); in nouveau_dmem_chunk_alloc()
265 ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false); in nouveau_dmem_chunk_alloc()
269 ptr = memremap_pages(&chunk->pagemap, numa_node_id()); in nouveau_dmem_chunk_alloc()
276 list_add(&chunk->list, &drm->dmem->chunks); in nouveau_dmem_chunk_alloc()
279 pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT; in nouveau_dmem_chunk_alloc()
287 chunk->callocated++; in nouveau_dmem_chunk_alloc()
296 nouveau_bo_unpin(chunk->bo); in nouveau_dmem_chunk_alloc()
298 nouveau_bo_fini(chunk->bo); in nouveau_dmem_chunk_alloc()
300 release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range)); in nouveau_dmem_chunk_alloc()
302 kfree(chunk); in nouveau_dmem_chunk_alloc()
310 struct nouveau_dmem_chunk *chunk; in nouveau_dmem_page_alloc_locked() local
318 chunk = nouveau_page_to_chunk(page); in nouveau_dmem_page_alloc_locked()
319 chunk->callocated++; in nouveau_dmem_page_alloc_locked()
342 struct nouveau_dmem_chunk *chunk; in nouveau_dmem_resume() local
349 list_for_each_entry(chunk, &drm->dmem->chunks, list) { in nouveau_dmem_resume()
350 ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false); in nouveau_dmem_resume()
360 struct nouveau_dmem_chunk *chunk; in nouveau_dmem_suspend() local
366 list_for_each_entry(chunk, &drm->dmem->chunks, list) in nouveau_dmem_suspend()
367 nouveau_bo_unpin(chunk->bo); in nouveau_dmem_suspend()
372 * Evict all pages mapping a chunk.
375 nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk) in nouveau_dmem_evict_chunk() argument
377 unsigned long i, npages = range_len(&chunk->pagemap.range) >> PAGE_SHIFT; in nouveau_dmem_evict_chunk()
386 migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT, in nouveau_dmem_evict_chunk()
400 nouveau_dmem_copy_one(chunk->drm, in nouveau_dmem_evict_chunk()
406 nouveau_fence_new(&fence, chunk->drm->dmem->migrate.chan); in nouveau_dmem_evict_chunk()
413 dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL); in nouveau_dmem_evict_chunk()
420 struct nouveau_dmem_chunk *chunk, *tmp; in nouveau_dmem_fini() local
427 list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) { in nouveau_dmem_fini()
428 nouveau_dmem_evict_chunk(chunk); in nouveau_dmem_fini()
429 nouveau_bo_unpin(chunk->bo); in nouveau_dmem_fini()
430 nouveau_bo_fini(chunk->bo); in nouveau_dmem_fini()
431 WARN_ON(chunk->callocated); in nouveau_dmem_fini()
432 list_del(&chunk->list); in nouveau_dmem_fini()
433 memunmap_pages(&chunk->pagemap); in nouveau_dmem_fini()
434 release_mem_region(chunk->pagemap.range.start, in nouveau_dmem_fini()
435 range_len(&chunk->pagemap.range)); in nouveau_dmem_fini()
436 kfree(chunk); in nouveau_dmem_fini()