Lines Matching full:send

64  *      | 0x0000 | H2G `CTB Descriptor`_ (send)                  |      |
68 * | 0x1000 | H2G `CT Buffer`_ (send) | n*4K |
80 * of the receive buffer (relative to the send) to ensure a G2H response
142 spin_lock_init(&ct->ctbs.send.lock); in intel_guc_ct_init_early()
218 static int ct_register_buffer(struct intel_guc_ct *ct, bool send, in ct_register_buffer() argument
223 err = intel_guc_self_cfg64(ct_to_guc(ct), send ? in ct_register_buffer()
230 err = intel_guc_self_cfg64(ct_to_guc(ct), send ? in ct_register_buffer()
237 err = intel_guc_self_cfg32(ct_to_guc(ct), send ? in ct_register_buffer()
244 send ? "SEND" : "RECV", ERR_PTR(err)); in ct_register_buffer()
284 /* store pointers to desc and cmds for send ctb */ in intel_guc_ct_init()
289 CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "send", in intel_guc_ct_init()
293 guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size, resv_space); in intel_guc_ct_init()
344 /* blob should start with send descriptor */ in intel_guc_ct_enable()
346 GEM_BUG_ON(blob != ct->ctbs.send.desc); in intel_guc_ct_enable()
349 guc_ct_buffer_reset(&ct->ctbs.send); in intel_guc_ct_enable()
363 desc = base + ptrdiff(ct->ctbs.send.desc, blob); in intel_guc_ct_enable()
364 cmds = base + ptrdiff(ct->ctbs.send.cmds, blob); in intel_guc_ct_enable()
365 size = ct->ctbs.send.size * 4; in intel_guc_ct_enable()
435 struct intel_guc_ct_buffer *ctb = &ct->ctbs.send; in ct_write()
530 * For each sent request, GuC shall send back CT response message.
575 struct guc_ct_buffer_desc *send = ct->ctbs.send.desc; in ct_deadlocked() local
576 struct guc_ct_buffer_desc *recv = ct->ctbs.send.desc; in ct_deadlocked()
580 send->status, recv->status); in ct_deadlocked()
582 atomic_read(&ct->ctbs.send.space) * 4); in ct_deadlocked()
583 CT_ERROR(ct, "Head: %u (Dwords)\n", ct->ctbs.send.desc->head); in ct_deadlocked()
584 CT_ERROR(ct, "Tail: %u (Dwords)\n", ct->ctbs.send.desc->tail); in ct_deadlocked()
591 ct->ctbs.send.broken = true; in ct_deadlocked()
610 lockdep_assert_held(&ct->ctbs.send.lock); in g2h_reserve_space()
625 struct intel_guc_ct_buffer *ctb = &ct->ctbs.send; in h2g_has_room()
654 lockdep_assert_held(&ct->ctbs.send.lock); in has_room_nb()
685 struct intel_guc_ct_buffer *ctb = &ct->ctbs.send; in ct_send_nb()
718 struct intel_guc_ct_buffer *ctb = &ct->ctbs.send; in ct_send()
832 * Command Transport (CT) buffer based GuC send function.
844 WARN(!uc->reset_in_progress, "Unexpected send: action=%#x\n", *action); in intel_guc_ct_send()
848 if (unlikely(ct->ctbs.send.broken)) in intel_guc_ct_send()
860 CT_DEBUG(ct, "send action %#x returned %d (%#x)\n", in intel_guc_ct_send()
1222 * CTB processing in the below workqueue can send CTBs which creates a in ct_handle_event()
1367 atomic_read(&ct->ctbs.send.space) * 4); in intel_guc_ct_print_info()
1369 ct->ctbs.send.desc->head); in intel_guc_ct_print_info()
1371 ct->ctbs.send.desc->tail); in intel_guc_ct_print_info()