Lines Matching +full:cs +full:- +full:out

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
13 * hl_queue_add_ptr - add to pi or ci and checks if it wraps around
23 ptr &= ((HL_QUEUE_LENGTH << 1) - 1); in hl_hw_queue_add_ptr()
28 return atomic_read(ci) & ((queue_len << 1) - 1); in queue_ci_get()
33 int delta = (q->pi - queue_ci_get(&q->ci, queue_len)); in queue_free_slots()
36 return (queue_len - delta); in queue_free_slots()
38 return (abs(delta) - queue_len); in queue_free_slots()
41 void hl_int_hw_queue_update_ci(struct hl_cs *cs) in hl_int_hw_queue_update_ci() argument
43 struct hl_device *hdev = cs->ctx->hdev; in hl_int_hw_queue_update_ci()
47 if (hdev->disabled) in hl_int_hw_queue_update_ci()
50 q = &hdev->kernel_queues[0]; in hl_int_hw_queue_update_ci()
51 for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) { in hl_int_hw_queue_update_ci()
52 if (q->queue_type == QUEUE_TYPE_INT) in hl_int_hw_queue_update_ci()
53 atomic_add(cs->jobs_in_queue_cnt[i], &q->ci); in hl_int_hw_queue_update_ci()
58 * ext_and_hw_queue_submit_bd() - Submit a buffer descriptor to an external or a
78 bd = q->kernel_address; in ext_and_hw_queue_submit_bd()
79 bd += hl_pi_2_offset(q->pi); in ext_and_hw_queue_submit_bd()
80 bd->ctl = cpu_to_le32(ctl); in ext_and_hw_queue_submit_bd()
81 bd->len = cpu_to_le32(len); in ext_and_hw_queue_submit_bd()
82 bd->ptr = cpu_to_le64(ptr); in ext_and_hw_queue_submit_bd()
84 q->pi = hl_queue_inc_ptr(q->pi); in ext_and_hw_queue_submit_bd()
85 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi); in ext_and_hw_queue_submit_bd()
89 * ext_queue_sanity_checks - perform some sanity checks on external queue
99 * - Make sure we have enough space in the h/w queue
100 * - Make sure we have enough space in the completion queue
101 * - Reserve space in the completion queue (needs to be reversed if there
111 &hdev->completion_queue[q->cq_id].free_slots_cnt; in ext_queue_sanity_checks()
118 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n", in ext_queue_sanity_checks()
119 q->hw_queue_id, num_of_entries); in ext_queue_sanity_checks()
120 return -EAGAIN; in ext_queue_sanity_checks()
126 * Add -1 to counter (decrement) unless counter was already 0 in ext_queue_sanity_checks()
131 if (atomic_add_negative(num_of_entries * -1, free_slots)) { in ext_queue_sanity_checks()
132 dev_dbg(hdev->dev, "No space for %d on CQ %d\n", in ext_queue_sanity_checks()
133 num_of_entries, q->hw_queue_id); in ext_queue_sanity_checks()
135 return -EAGAIN; in ext_queue_sanity_checks()
143 * int_queue_sanity_checks - perform some sanity checks on internal queue
152 * - Make sure we have enough space in the h/w queue
161 if (num_of_entries > q->int_queue_len) { in int_queue_sanity_checks()
162 dev_err(hdev->dev, in int_queue_sanity_checks()
164 q->hw_queue_id, num_of_entries); in int_queue_sanity_checks()
165 return -ENOMEM; in int_queue_sanity_checks()
169 free_slots_cnt = queue_free_slots(q, q->int_queue_len); in int_queue_sanity_checks()
172 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n", in int_queue_sanity_checks()
173 q->hw_queue_id, num_of_entries); in int_queue_sanity_checks()
174 return -EAGAIN; in int_queue_sanity_checks()
181 * hw_queue_sanity_checks() - Make sure we have enough space in the h/w queue
187 * more than once per CS for the same queue
199 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n", in hw_queue_sanity_checks()
200 q->hw_queue_id, num_of_entries); in hw_queue_sanity_checks()
201 return -EAGAIN; in hw_queue_sanity_checks()
208 * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion
221 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id]; in hl_hw_queue_send_cb_no_cmpl()
232 if (q->queue_type != QUEUE_TYPE_CPU) in hl_hw_queue_send_cb_no_cmpl()
233 hdev->asic_funcs->hw_queues_lock(hdev); in hl_hw_queue_send_cb_no_cmpl()
235 if (hdev->disabled) { in hl_hw_queue_send_cb_no_cmpl()
236 rc = -EPERM; in hl_hw_queue_send_cb_no_cmpl()
237 goto out; in hl_hw_queue_send_cb_no_cmpl()
245 if (q->queue_type != QUEUE_TYPE_HW) { in hl_hw_queue_send_cb_no_cmpl()
248 goto out; in hl_hw_queue_send_cb_no_cmpl()
253 out: in hl_hw_queue_send_cb_no_cmpl()
254 if (q->queue_type != QUEUE_TYPE_CPU) in hl_hw_queue_send_cb_no_cmpl()
255 hdev->asic_funcs->hw_queues_unlock(hdev); in hl_hw_queue_send_cb_no_cmpl()
261 * ext_queue_schedule_job - submit a JOB to an external queue
270 struct hl_device *hdev = job->cs->ctx->hdev; in ext_queue_schedule_job()
271 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; in ext_queue_schedule_job()
284 ctl = ((q->pi << BD_CTL_SHADOW_INDEX_SHIFT) & BD_CTL_SHADOW_INDEX_MASK); in ext_queue_schedule_job()
286 cb = job->patched_cb; in ext_queue_schedule_job()
287 len = job->job_cb_size; in ext_queue_schedule_job()
288 ptr = cb->bus_address; in ext_queue_schedule_job()
291 ((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT) in ext_queue_schedule_job()
303 cq = &hdev->completion_queue[q->cq_id]; in ext_queue_schedule_job()
304 cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry); in ext_queue_schedule_job()
306 hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len, in ext_queue_schedule_job()
309 q->msi_vec, in ext_queue_schedule_job()
310 job->contains_dma_pkt); in ext_queue_schedule_job()
312 q->shadow_queue[hl_pi_2_offset(q->pi)] = job; in ext_queue_schedule_job()
314 cq->pi = hl_cq_inc_ptr(cq->pi); in ext_queue_schedule_job()
320 * int_queue_schedule_job - submit a JOB to an internal queue
329 struct hl_device *hdev = job->cs->ctx->hdev; in int_queue_schedule_job()
330 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; in int_queue_schedule_job()
335 bd.len = cpu_to_le32(job->job_cb_size); in int_queue_schedule_job()
336 bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb); in int_queue_schedule_job()
338 pi = q->kernel_address + (q->pi & (q->int_queue_len - 1)) * sizeof(bd); in int_queue_schedule_job()
340 q->pi++; in int_queue_schedule_job()
341 q->pi &= ((q->int_queue_len << 1) - 1); in int_queue_schedule_job()
343 hdev->asic_funcs->pqe_write(hdev, pi, &bd); in int_queue_schedule_job()
345 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi); in int_queue_schedule_job()
349 * hw_queue_schedule_job - submit a JOB to a H/W queue
358 struct hl_device *hdev = job->cs->ctx->hdev; in hw_queue_schedule_job()
359 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; in hw_queue_schedule_job()
369 offset = job->cs->sequence & (hdev->asic_prop.max_pending_cs - 1); in hw_queue_schedule_job()
371 ((q->pi << BD_CTL_COMP_DATA_SHIFT) & BD_CTL_COMP_DATA_MASK); in hw_queue_schedule_job()
373 len = job->job_cb_size; in hw_queue_schedule_job()
381 if (job->patched_cb) in hw_queue_schedule_job()
382 ptr = job->patched_cb->bus_address; in hw_queue_schedule_job()
383 else if (job->is_kernel_allocated_cb) in hw_queue_schedule_job()
384 ptr = job->user_cb->bus_address; in hw_queue_schedule_job()
386 ptr = (u64) (uintptr_t) job->user_cb; in hw_queue_schedule_job()
392 * init_signal_wait_cs - initialize a signal/wait CS
393 * @cs: pointer to the signal/wait CS
397 static void init_signal_wait_cs(struct hl_cs *cs) in init_signal_wait_cs() argument
399 struct hl_ctx *ctx = cs->ctx; in init_signal_wait_cs()
400 struct hl_device *hdev = ctx->hdev; in init_signal_wait_cs()
403 container_of(cs->fence, struct hl_cs_compl, base_fence); in init_signal_wait_cs()
409 /* There is only one job in a signal/wait CS */ in init_signal_wait_cs()
410 job = list_first_entry(&cs->job_list, struct hl_cs_job, in init_signal_wait_cs()
412 q_idx = job->hw_queue_id; in init_signal_wait_cs()
413 hw_queue = &hdev->kernel_queues[q_idx]; in init_signal_wait_cs()
415 if (cs->type & CS_TYPE_SIGNAL) { in init_signal_wait_cs()
416 hw_sob = &hw_queue->hw_sob[hw_queue->curr_sob_offset]; in init_signal_wait_cs()
418 cs_cmpl->hw_sob = hw_sob; in init_signal_wait_cs()
419 cs_cmpl->sob_val = hw_queue->next_sob_val++; in init_signal_wait_cs()
421 dev_dbg(hdev->dev, in init_signal_wait_cs()
423 cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx); in init_signal_wait_cs()
425 hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb, in init_signal_wait_cs()
426 cs_cmpl->hw_sob->sob_id); in init_signal_wait_cs()
428 kref_get(&hw_sob->kref); in init_signal_wait_cs()
431 if (hw_queue->next_sob_val == HL_MAX_SOB_VAL) { in init_signal_wait_cs()
437 kref_put(&hw_sob->kref, hl_sob_reset_error); in init_signal_wait_cs()
438 hw_queue->next_sob_val = 1; in init_signal_wait_cs()
440 hw_queue->curr_sob_offset = in init_signal_wait_cs()
441 (hw_queue->curr_sob_offset + 1) % in init_signal_wait_cs()
444 dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n", in init_signal_wait_cs()
445 hw_queue->curr_sob_offset, q_idx); in init_signal_wait_cs()
447 } else if (cs->type & CS_TYPE_WAIT) { in init_signal_wait_cs()
450 signal_cs_cmpl = container_of(cs->signal_fence, in init_signal_wait_cs()
454 /* copy the the SOB id and value of the signal CS */ in init_signal_wait_cs()
455 cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob; in init_signal_wait_cs()
456 cs_cmpl->sob_val = signal_cs_cmpl->sob_val; in init_signal_wait_cs()
458 dev_dbg(hdev->dev, in init_signal_wait_cs()
460 cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, in init_signal_wait_cs()
461 hw_queue->base_mon_id, q_idx); in init_signal_wait_cs()
463 hdev->asic_funcs->gen_wait_cb(hdev, job->patched_cb, in init_signal_wait_cs()
464 cs_cmpl->hw_sob->sob_id, in init_signal_wait_cs()
465 cs_cmpl->sob_val, in init_signal_wait_cs()
466 hw_queue->base_mon_id, in init_signal_wait_cs()
469 kref_get(&cs_cmpl->hw_sob->kref); in init_signal_wait_cs()
473 * wait CS was submitted. in init_signal_wait_cs()
476 hl_fence_put(cs->signal_fence); in init_signal_wait_cs()
477 cs->signal_fence = NULL; in init_signal_wait_cs()
482 * hl_hw_queue_schedule_cs - schedule a command submission
483 * @cs: pointer to the CS
485 int hl_hw_queue_schedule_cs(struct hl_cs *cs) in hl_hw_queue_schedule_cs() argument
487 struct hl_ctx *ctx = cs->ctx; in hl_hw_queue_schedule_cs()
488 struct hl_device *hdev = ctx->hdev; in hl_hw_queue_schedule_cs()
494 hdev->asic_funcs->hw_queues_lock(hdev); in hl_hw_queue_schedule_cs()
497 ctx->cs_counters.device_in_reset_drop_cnt++; in hl_hw_queue_schedule_cs()
498 dev_err(hdev->dev, in hl_hw_queue_schedule_cs()
499 "device is disabled or in reset, CS rejected!\n"); in hl_hw_queue_schedule_cs()
500 rc = -EPERM; in hl_hw_queue_schedule_cs()
501 goto out; in hl_hw_queue_schedule_cs()
504 max_queues = hdev->asic_prop.max_queues; in hl_hw_queue_schedule_cs()
506 q = &hdev->kernel_queues[0]; in hl_hw_queue_schedule_cs()
508 if (cs->jobs_in_queue_cnt[i]) { in hl_hw_queue_schedule_cs()
509 switch (q->queue_type) { in hl_hw_queue_schedule_cs()
512 cs->jobs_in_queue_cnt[i], true); in hl_hw_queue_schedule_cs()
516 cs->jobs_in_queue_cnt[i]); in hl_hw_queue_schedule_cs()
520 cs->jobs_in_queue_cnt[i]); in hl_hw_queue_schedule_cs()
523 dev_err(hdev->dev, "Queue type %d is invalid\n", in hl_hw_queue_schedule_cs()
524 q->queue_type); in hl_hw_queue_schedule_cs()
525 rc = -EINVAL; in hl_hw_queue_schedule_cs()
530 ctx->cs_counters.queue_full_drop_cnt++; in hl_hw_queue_schedule_cs()
534 if (q->queue_type == QUEUE_TYPE_EXT) in hl_hw_queue_schedule_cs()
539 if ((cs->type == CS_TYPE_SIGNAL) || (cs->type == CS_TYPE_WAIT)) in hl_hw_queue_schedule_cs()
540 init_signal_wait_cs(cs); in hl_hw_queue_schedule_cs()
542 spin_lock(&hdev->hw_queues_mirror_lock); in hl_hw_queue_schedule_cs()
543 list_add_tail(&cs->mirror_node, &hdev->hw_queues_mirror_list); in hl_hw_queue_schedule_cs()
545 /* Queue TDR if the CS is the first entry and if timeout is wanted */ in hl_hw_queue_schedule_cs()
546 if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) && in hl_hw_queue_schedule_cs()
547 (list_first_entry(&hdev->hw_queues_mirror_list, in hl_hw_queue_schedule_cs()
548 struct hl_cs, mirror_node) == cs)) { in hl_hw_queue_schedule_cs()
549 cs->tdr_active = true; in hl_hw_queue_schedule_cs()
550 schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies); in hl_hw_queue_schedule_cs()
551 spin_unlock(&hdev->hw_queues_mirror_lock); in hl_hw_queue_schedule_cs()
553 spin_unlock(&hdev->hw_queues_mirror_lock); in hl_hw_queue_schedule_cs()
556 if (!hdev->cs_active_cnt++) { in hl_hw_queue_schedule_cs()
559 ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx]; in hl_hw_queue_schedule_cs()
560 ts->busy_to_idle_ts = ktime_set(0, 0); in hl_hw_queue_schedule_cs()
561 ts->idle_to_busy_ts = ktime_get(); in hl_hw_queue_schedule_cs()
564 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) in hl_hw_queue_schedule_cs()
565 switch (job->queue_type) { in hl_hw_queue_schedule_cs()
579 cs->submitted = true; in hl_hw_queue_schedule_cs()
581 goto out; in hl_hw_queue_schedule_cs()
584 q = &hdev->kernel_queues[0]; in hl_hw_queue_schedule_cs()
586 if ((q->queue_type == QUEUE_TYPE_EXT) && in hl_hw_queue_schedule_cs()
587 (cs->jobs_in_queue_cnt[i])) { in hl_hw_queue_schedule_cs()
589 &hdev->completion_queue[i].free_slots_cnt; in hl_hw_queue_schedule_cs()
590 atomic_add(cs->jobs_in_queue_cnt[i], free_slots); in hl_hw_queue_schedule_cs()
591 cq_cnt--; in hl_hw_queue_schedule_cs()
595 out: in hl_hw_queue_schedule_cs()
596 hdev->asic_funcs->hw_queues_unlock(hdev); in hl_hw_queue_schedule_cs()
602 * hl_hw_queue_inc_ci_kernel - increment ci for kernel's queue
609 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id]; in hl_hw_queue_inc_ci_kernel()
611 atomic_inc(&q->ci); in hl_hw_queue_inc_ci_kernel()
621 p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, in ext_and_cpu_queue_init()
623 &q->bus_address); in ext_and_cpu_queue_init()
625 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, in ext_and_cpu_queue_init()
627 &q->bus_address, in ext_and_cpu_queue_init()
630 return -ENOMEM; in ext_and_cpu_queue_init()
632 q->kernel_address = p; in ext_and_cpu_queue_init()
634 q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH, in ext_and_cpu_queue_init()
635 sizeof(*q->shadow_queue), in ext_and_cpu_queue_init()
637 if (!q->shadow_queue) { in ext_and_cpu_queue_init()
638 dev_err(hdev->dev, in ext_and_cpu_queue_init()
640 q->hw_queue_id); in ext_and_cpu_queue_init()
641 rc = -ENOMEM; in ext_and_cpu_queue_init()
646 atomic_set(&q->ci, 0); in ext_and_cpu_queue_init()
647 q->pi = 0; in ext_and_cpu_queue_init()
653 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, in ext_and_cpu_queue_init()
655 q->kernel_address); in ext_and_cpu_queue_init()
657 hdev->asic_funcs->asic_dma_free_coherent(hdev, in ext_and_cpu_queue_init()
659 q->kernel_address, in ext_and_cpu_queue_init()
660 q->bus_address); in ext_and_cpu_queue_init()
669 p = hdev->asic_funcs->get_int_queue_base(hdev, q->hw_queue_id, in int_queue_init()
670 &q->bus_address, &q->int_queue_len); in int_queue_init()
672 dev_err(hdev->dev, in int_queue_init()
674 q->hw_queue_id); in int_queue_init()
675 return -EFAULT; in int_queue_init()
678 q->kernel_address = p; in int_queue_init()
679 q->pi = 0; in int_queue_init()
680 atomic_set(&q->ci, 0); in int_queue_init()
699 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, in hw_queue_init()
701 &q->bus_address, in hw_queue_init()
704 return -ENOMEM; in hw_queue_init()
706 q->kernel_address = p; in hw_queue_init()
709 atomic_set(&q->ci, 0); in hw_queue_init()
710 q->pi = 0; in hw_queue_init()
717 struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx]; in sync_stream_queue_init()
718 struct asic_fixed_properties *prop = &hdev->asic_prop; in sync_stream_queue_init()
720 int sob, queue_idx = hdev->sync_stream_queue_idx++; in sync_stream_queue_init()
722 hw_queue->base_sob_id = in sync_stream_queue_init()
723 prop->sync_stream_first_sob + queue_idx * HL_RSVD_SOBS; in sync_stream_queue_init()
724 hw_queue->base_mon_id = in sync_stream_queue_init()
725 prop->sync_stream_first_mon + queue_idx * HL_RSVD_MONS; in sync_stream_queue_init()
726 hw_queue->next_sob_val = 1; in sync_stream_queue_init()
727 hw_queue->curr_sob_offset = 0; in sync_stream_queue_init()
730 hw_sob = &hw_queue->hw_sob[sob]; in sync_stream_queue_init()
731 hw_sob->hdev = hdev; in sync_stream_queue_init()
732 hw_sob->sob_id = hw_queue->base_sob_id + sob; in sync_stream_queue_init()
733 hw_sob->q_idx = q_idx; in sync_stream_queue_init()
734 kref_init(&hw_sob->kref); in sync_stream_queue_init()
740 struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx]; in sync_stream_queue_reset()
743 * In case we got here due to a stuck CS, the refcnt might be bigger in sync_stream_queue_reset()
746 kref_init(&hw_queue->hw_sob[hw_queue->curr_sob_offset].kref); in sync_stream_queue_reset()
747 hw_queue->curr_sob_offset = 0; in sync_stream_queue_reset()
748 hw_queue->next_sob_val = 1; in sync_stream_queue_reset()
752 * queue_init - main initialization function for H/W queue object
758 * Allocate dma-able memory for the queue and initialize fields
766 q->hw_queue_id = hw_queue_id; in queue_init()
768 switch (q->queue_type) { in queue_init()
782 q->valid = 0; in queue_init()
785 dev_crit(hdev->dev, "wrong queue type %d during init\n", in queue_init()
786 q->queue_type); in queue_init()
787 rc = -EINVAL; in queue_init()
791 if (q->supports_sync_stream) in queue_init()
792 sync_stream_queue_init(hdev, q->hw_queue_id); in queue_init()
797 q->valid = 1; in queue_init()
803 * hw_queue_fini - destroy queue
812 if (!q->valid) in queue_fini()
833 if (q->queue_type == QUEUE_TYPE_INT) in queue_fini()
836 kfree(q->shadow_queue); in queue_fini()
838 if (q->queue_type == QUEUE_TYPE_CPU) in queue_fini()
839 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, in queue_fini()
841 q->kernel_address); in queue_fini()
843 hdev->asic_funcs->asic_dma_free_coherent(hdev, in queue_fini()
845 q->kernel_address, in queue_fini()
846 q->bus_address); in queue_fini()
851 struct asic_fixed_properties *asic = &hdev->asic_prop; in hl_hw_queues_create()
855 hdev->kernel_queues = kcalloc(asic->max_queues, in hl_hw_queues_create()
856 sizeof(*hdev->kernel_queues), GFP_KERNEL); in hl_hw_queues_create()
858 if (!hdev->kernel_queues) { in hl_hw_queues_create()
859 dev_err(hdev->dev, "Not enough memory for H/W queues\n"); in hl_hw_queues_create()
860 return -ENOMEM; in hl_hw_queues_create()
864 for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues; in hl_hw_queues_create()
865 i < asic->max_queues ; i++, q_ready_cnt++, q++) { in hl_hw_queues_create()
867 q->queue_type = asic->hw_queues_props[i].type; in hl_hw_queues_create()
868 q->supports_sync_stream = in hl_hw_queues_create()
869 asic->hw_queues_props[i].supports_sync_stream; in hl_hw_queues_create()
872 dev_err(hdev->dev, in hl_hw_queues_create()
881 for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++) in hl_hw_queues_create()
884 kfree(hdev->kernel_queues); in hl_hw_queues_create()
892 u32 max_queues = hdev->asic_prop.max_queues; in hl_hw_queues_destroy()
895 for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++) in hl_hw_queues_destroy()
898 kfree(hdev->kernel_queues); in hl_hw_queues_destroy()
904 u32 max_queues = hdev->asic_prop.max_queues; in hl_hw_queue_reset()
907 for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++) { in hl_hw_queue_reset()
908 if ((!q->valid) || in hl_hw_queue_reset()
909 ((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU))) in hl_hw_queue_reset()
911 q->pi = 0; in hl_hw_queue_reset()
912 atomic_set(&q->ci, 0); in hl_hw_queue_reset()
914 if (q->supports_sync_stream) in hl_hw_queue_reset()
915 sync_stream_queue_reset(hdev, q->hw_queue_id); in hl_hw_queue_reset()