Lines Matching full:pm

45 static void pm_calc_rlib_size(struct packet_manager *pm,  in pm_calc_rlib_size()  argument
52 struct kfd_node *node = pm->dqm->dev; in pm_calc_rlib_size()
55 process_count = pm->dqm->processes_count; in pm_calc_rlib_size()
56 queue_count = pm->dqm->active_queue_count; in pm_calc_rlib_size()
57 compute_queue_count = pm->dqm->active_cp_queue_count; in pm_calc_rlib_size()
58 gws_queue_count = pm->dqm->gws_queue_count; in pm_calc_rlib_size()
72 if (compute_queue_count > get_cp_queues_num(pm->dqm)) in pm_calc_rlib_size()
80 map_queue_size = pm->pmf->map_queues_size; in pm_calc_rlib_size()
82 *rlib_size = process_count * pm->pmf->map_process_size + in pm_calc_rlib_size()
90 *rlib_size += pm->pmf->runlist_size; in pm_calc_rlib_size()
95 static int pm_allocate_runlist_ib(struct packet_manager *pm, in pm_allocate_runlist_ib() argument
101 struct kfd_node *node = pm->dqm->dev; in pm_allocate_runlist_ib()
105 if (WARN_ON(pm->allocated)) in pm_allocate_runlist_ib()
108 pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription); in pm_allocate_runlist_ib()
110 mutex_lock(&pm->lock); in pm_allocate_runlist_ib()
112 retval = kfd_gtt_sa_allocate(node, *rl_buffer_size, &pm->ib_buffer_obj); in pm_allocate_runlist_ib()
119 *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr; in pm_allocate_runlist_ib()
120 *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr; in pm_allocate_runlist_ib()
123 pm->allocated = true; in pm_allocate_runlist_ib()
126 mutex_unlock(&pm->lock); in pm_allocate_runlist_ib()
130 static int pm_create_runlist_ib(struct packet_manager *pm, in pm_create_runlist_ib() argument
137 struct kfd_node *node = pm->dqm->dev; in pm_create_runlist_ib()
148 retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr, in pm_create_runlist_ib()
154 pm->ib_size_bytes = alloc_size_bytes; in pm_create_runlist_ib()
157 pm->dqm->processes_count, pm->dqm->active_queue_count); in pm_create_runlist_ib()
163 if (processes_mapped >= pm->dqm->processes_count) { in pm_create_runlist_ib()
165 pm_release_ib(pm); in pm_create_runlist_ib()
169 retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd); in pm_create_runlist_ib()
174 inc_wptr(&rl_wptr, pm->pmf->map_process_size, in pm_create_runlist_ib()
185 retval = pm->pmf->map_queues(pm, in pm_create_runlist_ib()
193 pm->pmf->map_queues_size, in pm_create_runlist_ib()
205 retval = pm->pmf->map_queues(pm, in pm_create_runlist_ib()
214 pm->pmf->map_queues_size, in pm_create_runlist_ib()
222 if (!pm->is_over_subscription) in pm_create_runlist_ib()
231 retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr], in pm_create_runlist_ib()
236 pm->is_over_subscription = !!is_over_subscription; in pm_create_runlist_ib()
245 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) in pm_init() argument
258 pm->pmf = &kfd_vi_pm_funcs; in pm_init()
265 pm->pmf = &kfd_aldebaran_pm_funcs; in pm_init()
267 pm->pmf = &kfd_v9_pm_funcs; in pm_init()
275 pm->dqm = dqm; in pm_init()
276 mutex_init(&pm->lock); in pm_init()
277 pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ); in pm_init()
278 if (!pm->priv_queue) { in pm_init()
279 mutex_destroy(&pm->lock); in pm_init()
282 pm->allocated = false; in pm_init()
287 void pm_uninit(struct packet_manager *pm) in pm_uninit() argument
289 mutex_destroy(&pm->lock); in pm_uninit()
290 kernel_queue_uninit(pm->priv_queue); in pm_uninit()
291 pm->priv_queue = NULL; in pm_uninit()
294 int pm_send_set_resources(struct packet_manager *pm, in pm_send_set_resources() argument
297 struct kfd_node *node = pm->dqm->dev; in pm_send_set_resources()
302 size = pm->pmf->set_resources_size; in pm_send_set_resources()
303 mutex_lock(&pm->lock); in pm_send_set_resources()
304 kq_acquire_packet_buffer(pm->priv_queue, in pm_send_set_resources()
313 retval = pm->pmf->set_resources(pm, buffer, res); in pm_send_set_resources()
315 retval = kq_submit_packet(pm->priv_queue); in pm_send_set_resources()
317 kq_rollback_packet(pm->priv_queue); in pm_send_set_resources()
320 mutex_unlock(&pm->lock); in pm_send_set_resources()
325 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) in pm_send_runlist() argument
332 retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr, in pm_send_runlist()
339 packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t); in pm_send_runlist()
340 mutex_lock(&pm->lock); in pm_send_runlist()
342 retval = kq_acquire_packet_buffer(pm->priv_queue, in pm_send_runlist()
347 retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr, in pm_send_runlist()
352 retval = kq_submit_packet(pm->priv_queue); in pm_send_runlist()
354 mutex_unlock(&pm->lock); in pm_send_runlist()
359 kq_rollback_packet(pm->priv_queue); in pm_send_runlist()
361 mutex_unlock(&pm->lock); in pm_send_runlist()
363 pm_release_ib(pm); in pm_send_runlist()
367 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, in pm_send_query_status() argument
370 struct kfd_node *node = pm->dqm->dev; in pm_send_query_status()
378 size = pm->pmf->query_status_size; in pm_send_query_status()
379 mutex_lock(&pm->lock); in pm_send_query_status()
380 kq_acquire_packet_buffer(pm->priv_queue, in pm_send_query_status()
388 retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value); in pm_send_query_status()
390 retval = kq_submit_packet(pm->priv_queue); in pm_send_query_status()
392 kq_rollback_packet(pm->priv_queue); in pm_send_query_status()
395 mutex_unlock(&pm->lock); in pm_send_query_status()
408 int pm_config_dequeue_wait_counts(struct packet_manager *pm, in pm_config_dequeue_wait_counts() argument
412 struct kfd_node *node = pm->dqm->dev; in pm_config_dequeue_wait_counts()
417 if (!pm->pmf->config_dequeue_wait_counts || in pm_config_dequeue_wait_counts()
418 !pm->pmf->config_dequeue_wait_counts_size) in pm_config_dequeue_wait_counts()
421 if (cmd == KFD_DEQUEUE_WAIT_INIT && (KFD_GC_VERSION(pm->dqm->dev) < IP_VERSION(9, 4, 1) || in pm_config_dequeue_wait_counts()
422 KFD_GC_VERSION(pm->dqm->dev) >= IP_VERSION(10, 0, 0))) in pm_config_dequeue_wait_counts()
425 size = pm->pmf->config_dequeue_wait_counts_size; in pm_config_dequeue_wait_counts()
427 mutex_lock(&pm->lock); in pm_config_dequeue_wait_counts()
430 kq_acquire_packet_buffer(pm->priv_queue, in pm_config_dequeue_wait_counts()
441 retval = pm->pmf->config_dequeue_wait_counts(pm, buffer, in pm_config_dequeue_wait_counts()
444 retval = kq_submit_packet(pm->priv_queue); in pm_config_dequeue_wait_counts()
448 update_dqm_wait_times(pm->dqm); in pm_config_dequeue_wait_counts()
450 kq_rollback_packet(pm->priv_queue); in pm_config_dequeue_wait_counts()
454 mutex_unlock(&pm->lock); in pm_config_dequeue_wait_counts()
458 int pm_send_unmap_queue(struct packet_manager *pm, in pm_send_unmap_queue() argument
462 struct kfd_node *node = pm->dqm->dev; in pm_send_unmap_queue()
467 size = pm->pmf->unmap_queues_size; in pm_send_unmap_queue()
468 mutex_lock(&pm->lock); in pm_send_unmap_queue()
469 kq_acquire_packet_buffer(pm->priv_queue, in pm_send_unmap_queue()
477 retval = pm->pmf->unmap_queues(pm, buffer, filter, filter_param, reset); in pm_send_unmap_queue()
479 retval = kq_submit_packet(pm->priv_queue); in pm_send_unmap_queue()
481 kq_rollback_packet(pm->priv_queue); in pm_send_unmap_queue()
484 mutex_unlock(&pm->lock); in pm_send_unmap_queue()
488 void pm_release_ib(struct packet_manager *pm) in pm_release_ib() argument
490 mutex_lock(&pm->lock); in pm_release_ib()
491 if (pm->allocated) { in pm_release_ib()
492 kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj); in pm_release_ib()
493 pm->allocated = false; in pm_release_ib()
495 mutex_unlock(&pm->lock); in pm_release_ib()
502 struct packet_manager *pm = data; in pm_debugfs_runlist() local
504 mutex_lock(&pm->lock); in pm_debugfs_runlist()
506 if (!pm->allocated) { in pm_debugfs_runlist()
512 pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false); in pm_debugfs_runlist()
515 mutex_unlock(&pm->lock); in pm_debugfs_runlist()
519 int pm_debugfs_hang_hws(struct packet_manager *pm) in pm_debugfs_hang_hws() argument
521 struct kfd_node *node = pm->dqm->dev; in pm_debugfs_hang_hws()
526 if (!pm->priv_queue) in pm_debugfs_hang_hws()
529 size = pm->pmf->query_status_size; in pm_debugfs_hang_hws()
530 mutex_lock(&pm->lock); in pm_debugfs_hang_hws()
531 kq_acquire_packet_buffer(pm->priv_queue, in pm_debugfs_hang_hws()
539 kq_submit_packet(pm->priv_queue); in pm_debugfs_hang_hws()
545 mutex_unlock(&pm->lock); in pm_debugfs_hang_hws()