Lines Matching refs:ipc
23 struct ivpu_ipc_hdr ipc;
60 struct ivpu_ipc_info *ipc = vdev->ipc;
62 ivpu_bo_free(ipc->mem_rx);
63 ivpu_bo_free(ipc->mem_tx);
70 struct ivpu_ipc_info *ipc = vdev->ipc;
75 tx_buf_vpu_addr = gen_pool_alloc(ipc->mm_tx, sizeof(*tx_buf));
82 tx_buf = ivpu_to_cpu_addr(ipc->mem_tx, tx_buf_vpu_addr);
84 gen_pool_free(ipc->mm_tx, tx_buf_vpu_addr, sizeof(*tx_buf));
90 if (tx_buf->ipc.status != IVPU_IPC_HDR_FREE)
99 tx_buf->ipc.data_addr = jsm_vpu_addr;
101 tx_buf->ipc.data_size = sizeof(*req);
102 tx_buf->ipc.channel = cons->channel;
103 tx_buf->ipc.src_node = 0;
104 tx_buf->ipc.dst_node = 1;
105 tx_buf->ipc.status = IVPU_IPC_HDR_ALLOCATED;
110 req->request_id = atomic_inc_return(&ipc->request_id);
118 ivpu_ipc_msg_dump(vdev, "TX", &tx_buf->ipc, tx_buf_vpu_addr);
125 struct ivpu_ipc_info *ipc = vdev->ipc;
128 gen_pool_free(ipc->mm_tx, vpu_addr, sizeof(struct ivpu_ipc_tx_buf));
140 struct ivpu_ipc_info *ipc = vdev->ipc;
143 lockdep_assert_held(&ipc->cons_lock);
152 atomic_inc(&ipc->rx_msg_count);
159 list_add_tail(&rx_msg->link, &ipc->cb_msg_list);
173 atomic_dec(&vdev->ipc->rx_msg_count);
180 struct ivpu_ipc_info *ipc = vdev->ipc;
192 spin_lock_irq(&ipc->cons_lock);
193 list_add_tail(&cons->link, &ipc->cons_list);
194 spin_unlock_irq(&ipc->cons_lock);
199 struct ivpu_ipc_info *ipc = vdev->ipc;
202 spin_lock_irq(&ipc->cons_lock);
204 spin_unlock_irq(&ipc->cons_lock);
216 struct ivpu_ipc_info *ipc = vdev->ipc;
219 mutex_lock(&ipc->lock);
221 if (!ipc->on) {
234 mutex_unlock(&ipc->lock);
401 struct ivpu_ipc_info *ipc = vdev->ipc;
420 ipc_hdr = ivpu_to_cpu_addr(ipc->mem_rx, vpu_addr);
429 jsm_msg = ivpu_to_cpu_addr(ipc->mem_rx, ipc_hdr->data_addr);
439 if (atomic_read(&ipc->rx_msg_count) > IPC_MAX_RX_MSG) {
447 spin_lock_irqsave(&ipc->cons_lock, flags);
448 list_for_each_entry(cons, &ipc->cons_list, link) {
455 spin_unlock_irqrestore(&ipc->cons_lock, flags);
469 struct ivpu_ipc_info *ipc = vdev->ipc;
475 spin_lock_irq(&ipc->cons_lock);
476 list_splice_tail_init(&ipc->cb_msg_list, &cb_msg_list);
477 spin_unlock_irq(&ipc->cons_lock);
487 struct ivpu_ipc_info *ipc = vdev->ipc;
490 ipc->mem_tx = ivpu_bo_create_global(vdev, SZ_16K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
491 if (!ipc->mem_tx) {
496 ipc->mem_rx = ivpu_bo_create_global(vdev, SZ_16K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
497 if (!ipc->mem_rx) {
503 ipc->mm_tx = devm_gen_pool_create(vdev->drm.dev, __ffs(IVPU_IPC_ALIGNMENT),
505 if (IS_ERR(ipc->mm_tx)) {
506 ret = PTR_ERR(ipc->mm_tx);
507 ivpu_err(vdev, "Failed to create gen pool, %pe\n", ipc->mm_tx);
511 ret = gen_pool_add(ipc->mm_tx, ipc->mem_tx->vpu_addr, ivpu_bo_size(ipc->mem_tx), -1);
517 spin_lock_init(&ipc->cons_lock);
518 INIT_LIST_HEAD(&ipc->cons_list);
519 INIT_LIST_HEAD(&ipc->cb_msg_list);
520 ret = drmm_mutex_init(&vdev->drm, &ipc->lock);
522 ivpu_err(vdev, "Failed to initialize ipc->lock, ret %d\n", ret);
529 ivpu_bo_free(ipc->mem_rx);
531 ivpu_bo_free(ipc->mem_tx);
537 struct ivpu_ipc_info *ipc = vdev->ipc;
539 drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cons_list));
540 drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list));
541 drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0);
548 struct ivpu_ipc_info *ipc = vdev->ipc;
550 mutex_lock(&ipc->lock);
551 ipc->on = true;
552 mutex_unlock(&ipc->lock);
557 struct ivpu_ipc_info *ipc = vdev->ipc;
561 drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list));
563 mutex_lock(&ipc->lock);
564 ipc->on = false;
565 mutex_unlock(&ipc->lock);
567 spin_lock_irq(&ipc->cons_lock);
568 list_for_each_entry_safe(cons, c, &ipc->cons_list, link) {
577 spin_unlock_irq(&ipc->cons_lock);
579 drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0);
584 struct ivpu_ipc_info *ipc = vdev->ipc;
586 mutex_lock(&ipc->lock);
587 drm_WARN_ON(&vdev->drm, ipc->on);
589 memset(ivpu_bo_vaddr(ipc->mem_tx), 0, ivpu_bo_size(ipc->mem_tx));
590 memset(ivpu_bo_vaddr(ipc->mem_rx), 0, ivpu_bo_size(ipc->mem_rx));
593 mutex_unlock(&ipc->lock);