Lines Matching +full:0 +full:- +full:job +full:- +full:ring
26 #include <linux/dma-fence-array.h>
50 * amdgpu_pasid_alloc - Allocate a PASID
56 * Returns a positive integer on success. Returns %-EINVAL if bits==0.
57 * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
62 int pasid = -EINVAL; in amdgpu_pasid_alloc()
64 for (bits = min(bits, 31U); bits > 0; bits--) { in amdgpu_pasid_alloc()
66 1U << (bits - 1), 1U << bits, in amdgpu_pasid_alloc()
68 if (pasid != -ENOSPC) in amdgpu_pasid_alloc()
72 if (pasid >= 0) in amdgpu_pasid_alloc()
79 * amdgpu_pasid_free - Free a PASID
94 amdgpu_pasid_free(cb->pasid); in amdgpu_pasid_free_cb()
100 * amdgpu_pasid_free_delayed - free pasid when fences signal
130 cb->pasid = pasid; in amdgpu_pasid_free_delayed()
131 if (dma_fence_add_callback(fence, &cb->cb, in amdgpu_pasid_free_delayed()
133 amdgpu_pasid_free_cb(fence, &cb->cb); in amdgpu_pasid_free_delayed()
154 * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
164 return id->current_gpu_reset_count != in amdgpu_vmid_had_gpu_reset()
165 atomic_read(&adev->gpu_reset_counter); in amdgpu_vmid_had_gpu_reset()
170 struct amdgpu_job *job) in amdgpu_vmid_gds_switch_needed() argument
172 return id->gds_base != job->gds_base || in amdgpu_vmid_gds_switch_needed()
173 id->gds_size != job->gds_size || in amdgpu_vmid_gds_switch_needed()
174 id->gws_base != job->gws_base || in amdgpu_vmid_gds_switch_needed()
175 id->gws_size != job->gws_size || in amdgpu_vmid_gds_switch_needed()
176 id->oa_base != job->oa_base || in amdgpu_vmid_gds_switch_needed()
177 id->oa_size != job->oa_size; in amdgpu_vmid_gds_switch_needed()
180 /* Check if the id is compatible with the job */
182 struct amdgpu_job *job) in amdgpu_vmid_compatible() argument
184 return id->pd_gpu_addr == job->vm_pd_addr && in amdgpu_vmid_compatible()
185 !amdgpu_vmid_gds_switch_needed(id, job); in amdgpu_vmid_compatible()
189 * amdgpu_vmid_grab_idle - grab idle VMID
191 * @ring: ring we want to submit job to
196 * object. Returns -ENOMEM when we are out of memory.
198 static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring, in amdgpu_vmid_grab_idle() argument
202 struct amdgpu_device *adev = ring->adev; in amdgpu_vmid_grab_idle()
203 unsigned vmhub = ring->vm_hub; in amdgpu_vmid_grab_idle()
204 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; in amdgpu_vmid_grab_idle()
208 if (!dma_fence_is_signaled(ring->vmid_wait)) { in amdgpu_vmid_grab_idle()
209 *fence = dma_fence_get(ring->vmid_wait); in amdgpu_vmid_grab_idle()
210 return 0; in amdgpu_vmid_grab_idle()
213 fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL); in amdgpu_vmid_grab_idle()
215 return -ENOMEM; in amdgpu_vmid_grab_idle()
218 i = 0; in amdgpu_vmid_grab_idle()
219 list_for_each_entry((*idle), &id_mgr->ids_lru, list) { in amdgpu_vmid_grab_idle()
221 struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ? in amdgpu_vmid_grab_idle()
222 NULL : ring; in amdgpu_vmid_grab_idle()
224 fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r); in amdgpu_vmid_grab_idle()
231 if (&(*idle)->list == &id_mgr->ids_lru) { in amdgpu_vmid_grab_idle()
232 u64 fence_context = adev->vm_manager.fence_context + ring->idx; in amdgpu_vmid_grab_idle()
233 unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; in amdgpu_vmid_grab_idle()
238 for (j = 0; j < i; ++j) in amdgpu_vmid_grab_idle()
244 for (j = 0; j < i; ++j) in amdgpu_vmid_grab_idle()
247 return -ENOMEM; in amdgpu_vmid_grab_idle()
250 *fence = dma_fence_get(&array->base); in amdgpu_vmid_grab_idle()
251 dma_fence_put(ring->vmid_wait); in amdgpu_vmid_grab_idle()
252 ring->vmid_wait = &array->base; in amdgpu_vmid_grab_idle()
253 return 0; in amdgpu_vmid_grab_idle()
257 return 0; in amdgpu_vmid_grab_idle()
261 * amdgpu_vmid_grab_reserved - try to assign reserved VMID
264 * @ring: ring we want to submit job to
265 * @job: job who wants to use the VMID
272 struct amdgpu_ring *ring, in amdgpu_vmid_grab_reserved() argument
273 struct amdgpu_job *job, in amdgpu_vmid_grab_reserved() argument
277 struct amdgpu_device *adev = ring->adev; in amdgpu_vmid_grab_reserved()
278 unsigned vmhub = ring->vm_hub; in amdgpu_vmid_grab_reserved()
279 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; in amdgpu_vmid_grab_reserved()
280 uint64_t fence_context = adev->fence_context + ring->idx; in amdgpu_vmid_grab_reserved()
281 bool needs_flush = vm->use_cpu_for_update; in amdgpu_vmid_grab_reserved()
285 *id = id_mgr->reserved; in amdgpu_vmid_grab_reserved()
286 if ((*id)->owner != vm->immediate.fence_context || in amdgpu_vmid_grab_reserved()
287 !amdgpu_vmid_compatible(*id, job) || in amdgpu_vmid_grab_reserved()
288 (*id)->flushed_updates < updates || in amdgpu_vmid_grab_reserved()
289 !(*id)->last_flush || in amdgpu_vmid_grab_reserved()
290 ((*id)->last_flush->context != fence_context && in amdgpu_vmid_grab_reserved()
291 !dma_fence_is_signaled((*id)->last_flush))) { in amdgpu_vmid_grab_reserved()
295 if (adev->vm_manager.concurrent_flush) in amdgpu_vmid_grab_reserved()
296 ring = NULL; in amdgpu_vmid_grab_reserved()
299 (*id)->pd_gpu_addr = 0; in amdgpu_vmid_grab_reserved()
300 tmp = amdgpu_sync_peek_fence(&(*id)->active, ring); in amdgpu_vmid_grab_reserved()
304 return 0; in amdgpu_vmid_grab_reserved()
312 r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished); in amdgpu_vmid_grab_reserved()
316 job->vm_needs_flush = needs_flush; in amdgpu_vmid_grab_reserved()
317 job->spm_update_needed = true; in amdgpu_vmid_grab_reserved()
318 return 0; in amdgpu_vmid_grab_reserved()
322 * amdgpu_vmid_grab_used - try to reuse a VMID
325 * @ring: ring we want to submit job to
326 * @job: job who wants to use the VMID
333 struct amdgpu_ring *ring, in amdgpu_vmid_grab_used() argument
334 struct amdgpu_job *job, in amdgpu_vmid_grab_used() argument
338 struct amdgpu_device *adev = ring->adev; in amdgpu_vmid_grab_used()
339 unsigned vmhub = ring->vm_hub; in amdgpu_vmid_grab_used()
340 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; in amdgpu_vmid_grab_used()
341 uint64_t fence_context = adev->fence_context + ring->idx; in amdgpu_vmid_grab_used()
345 job->vm_needs_flush = vm->use_cpu_for_update; in amdgpu_vmid_grab_used()
348 list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) { in amdgpu_vmid_grab_used()
349 bool needs_flush = vm->use_cpu_for_update; in amdgpu_vmid_grab_used()
352 if ((*id)->owner != vm->immediate.fence_context) in amdgpu_vmid_grab_used()
355 if (!amdgpu_vmid_compatible(*id, job)) in amdgpu_vmid_grab_used()
358 if (!(*id)->last_flush || in amdgpu_vmid_grab_used()
359 ((*id)->last_flush->context != fence_context && in amdgpu_vmid_grab_used()
360 !dma_fence_is_signaled((*id)->last_flush))) in amdgpu_vmid_grab_used()
363 if ((*id)->flushed_updates < updates) in amdgpu_vmid_grab_used()
366 if (needs_flush && !adev->vm_manager.concurrent_flush) in amdgpu_vmid_grab_used()
372 r = amdgpu_sync_fence(&(*id)->active, in amdgpu_vmid_grab_used()
373 &job->base.s_fence->finished); in amdgpu_vmid_grab_used()
377 job->vm_needs_flush |= needs_flush; in amdgpu_vmid_grab_used()
378 return 0; in amdgpu_vmid_grab_used()
382 return 0; in amdgpu_vmid_grab_used()
386 * amdgpu_vmid_grab - allocate the next free VMID
389 * @ring: ring we want to submit job to
390 * @job: job who wants to use the VMID
395 int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, in amdgpu_vmid_grab() argument
396 struct amdgpu_job *job, struct dma_fence **fence) in amdgpu_vmid_grab() argument
398 struct amdgpu_device *adev = ring->adev; in amdgpu_vmid_grab()
399 unsigned vmhub = ring->vm_hub; in amdgpu_vmid_grab()
400 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; in amdgpu_vmid_grab()
403 int r = 0; in amdgpu_vmid_grab()
405 mutex_lock(&id_mgr->lock); in amdgpu_vmid_grab()
406 r = amdgpu_vmid_grab_idle(ring, &idle, fence); in amdgpu_vmid_grab()
410 if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) { in amdgpu_vmid_grab()
411 r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence); in amdgpu_vmid_grab()
415 r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence); in amdgpu_vmid_grab()
424 r = amdgpu_sync_fence(&id->active, in amdgpu_vmid_grab()
425 &job->base.s_fence->finished); in amdgpu_vmid_grab()
429 job->vm_needs_flush = true; in amdgpu_vmid_grab()
432 list_move_tail(&id->list, &id_mgr->ids_lru); in amdgpu_vmid_grab()
435 job->gds_switch_needed = amdgpu_vmid_gds_switch_needed(id, job); in amdgpu_vmid_grab()
436 if (job->vm_needs_flush) { in amdgpu_vmid_grab()
437 id->flushed_updates = amdgpu_vm_tlb_seq(vm); in amdgpu_vmid_grab()
438 dma_fence_put(id->last_flush); in amdgpu_vmid_grab()
439 id->last_flush = NULL; in amdgpu_vmid_grab()
441 job->vmid = id - id_mgr->ids; in amdgpu_vmid_grab()
442 job->pasid = vm->pasid; in amdgpu_vmid_grab()
444 id->gds_base = job->gds_base; in amdgpu_vmid_grab()
445 id->gds_size = job->gds_size; in amdgpu_vmid_grab()
446 id->gws_base = job->gws_base; in amdgpu_vmid_grab()
447 id->gws_size = job->gws_size; in amdgpu_vmid_grab()
448 id->oa_base = job->oa_base; in amdgpu_vmid_grab()
449 id->oa_size = job->oa_size; in amdgpu_vmid_grab()
450 id->pd_gpu_addr = job->vm_pd_addr; in amdgpu_vmid_grab()
451 id->owner = vm->immediate.fence_context; in amdgpu_vmid_grab()
453 trace_amdgpu_vm_grab_id(vm, ring, job); in amdgpu_vmid_grab()
456 mutex_unlock(&id_mgr->lock); in amdgpu_vmid_grab()
463 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; in amdgpu_vmid_alloc_reserved()
465 mutex_lock(&id_mgr->lock); in amdgpu_vmid_alloc_reserved()
467 ++id_mgr->reserved_use_count; in amdgpu_vmid_alloc_reserved()
468 if (!id_mgr->reserved) { in amdgpu_vmid_alloc_reserved()
471 id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, in amdgpu_vmid_alloc_reserved()
474 list_del_init(&id->list); in amdgpu_vmid_alloc_reserved()
475 id_mgr->reserved = id; in amdgpu_vmid_alloc_reserved()
478 mutex_unlock(&id_mgr->lock); in amdgpu_vmid_alloc_reserved()
479 return 0; in amdgpu_vmid_alloc_reserved()
485 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; in amdgpu_vmid_free_reserved()
487 mutex_lock(&id_mgr->lock); in amdgpu_vmid_free_reserved()
488 if (!--id_mgr->reserved_use_count) { in amdgpu_vmid_free_reserved()
490 list_add(&id_mgr->reserved->list, &id_mgr->ids_lru); in amdgpu_vmid_free_reserved()
491 id_mgr->reserved = NULL; in amdgpu_vmid_free_reserved()
494 mutex_unlock(&id_mgr->lock); in amdgpu_vmid_free_reserved()
498 * amdgpu_vmid_reset - reset VMID to zero
509 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; in amdgpu_vmid_reset()
510 struct amdgpu_vmid *id = &id_mgr->ids[vmid]; in amdgpu_vmid_reset()
512 mutex_lock(&id_mgr->lock); in amdgpu_vmid_reset()
513 id->owner = 0; in amdgpu_vmid_reset()
514 id->gds_base = 0; in amdgpu_vmid_reset()
515 id->gds_size = 0; in amdgpu_vmid_reset()
516 id->gws_base = 0; in amdgpu_vmid_reset()
517 id->gws_size = 0; in amdgpu_vmid_reset()
518 id->oa_base = 0; in amdgpu_vmid_reset()
519 id->oa_size = 0; in amdgpu_vmid_reset()
520 mutex_unlock(&id_mgr->lock); in amdgpu_vmid_reset()
524 * amdgpu_vmid_reset_all - reset VMID to zero
534 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { in amdgpu_vmid_reset_all()
536 &adev->vm_manager.id_mgr[i]; in amdgpu_vmid_reset_all()
538 for (j = 1; j < id_mgr->num_ids; ++j) in amdgpu_vmid_reset_all()
544 * amdgpu_vmid_mgr_init - init the VMID manager
554 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { in amdgpu_vmid_mgr_init()
556 &adev->vm_manager.id_mgr[i]; in amdgpu_vmid_mgr_init()
558 mutex_init(&id_mgr->lock); in amdgpu_vmid_mgr_init()
559 INIT_LIST_HEAD(&id_mgr->ids_lru); in amdgpu_vmid_mgr_init()
560 id_mgr->reserved_use_count = 0; in amdgpu_vmid_mgr_init()
563 id_mgr->num_ids = adev->vm_manager.first_kfd_vmid; in amdgpu_vmid_mgr_init()
565 /* skip over VMID 0, since it is the system VM */ in amdgpu_vmid_mgr_init()
566 for (j = 1; j < id_mgr->num_ids; ++j) { in amdgpu_vmid_mgr_init()
568 amdgpu_sync_create(&id_mgr->ids[j].active); in amdgpu_vmid_mgr_init()
569 list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); in amdgpu_vmid_mgr_init()
574 amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0)); in amdgpu_vmid_mgr_init()
579 * amdgpu_vmid_mgr_fini - cleanup VM manager
589 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { in amdgpu_vmid_mgr_fini()
591 &adev->vm_manager.id_mgr[i]; in amdgpu_vmid_mgr_fini()
593 mutex_destroy(&id_mgr->lock); in amdgpu_vmid_mgr_fini()
594 for (j = 0; j < AMDGPU_NUM_VMID; ++j) { in amdgpu_vmid_mgr_fini()
595 struct amdgpu_vmid *id = &id_mgr->ids[j]; in amdgpu_vmid_mgr_fini()
597 amdgpu_sync_free(&id->active); in amdgpu_vmid_mgr_fini()
598 dma_fence_put(id->last_flush); in amdgpu_vmid_mgr_fini()
599 dma_fence_put(id->pasid_mapping); in amdgpu_vmid_mgr_fini()