| /linux/drivers/infiniband/sw/rxe/ |
| H A D | rxe_queue.h | 53 enum queue_type { enum 70 enum queue_type type; 86 unsigned int elem_size, enum queue_type type); 101 enum queue_type type) in queue_get_producer() 128 enum queue_type type) in queue_get_consumer() 154 static inline int queue_empty(struct rxe_queue *q, enum queue_type type) in queue_empty() 162 static inline int queue_full(struct rxe_queue *q, enum queue_type type) in queue_full() 171 enum queue_type type) in queue_count() 180 enum queue_type type) in queue_advance_producer() 216 enum queue_type type) in queue_advance_consumer() [all …]
|
| H A D | rxe_queue.c | 56 unsigned int elem_size, enum queue_type type) in rxe_queue_init() 113 enum queue_type type = q->type; in resize_finish()
|
| H A D | rxe_cq.c | 47 enum queue_type type; in rxe_cq_from_init()
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| H A D | mes_userqueue.c | 161 queue_input.queue_type = queue->queue_type; in mes_userq_map() 224 int queue_type) in mes_userq_detect_and_reset() argument 243 input.queue_type = queue_type; in mes_userq_detect_and_reset() 246 r = amdgpu_mes_detect_and_reset_hung_queues(adev, queue_type, false, in mes_userq_detect_and_reset() 253 if (queue->queue_type == queue_type) { in mes_userq_detect_and_reset() 280 struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type]; in mes_userq_mqd_create() 309 if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) { in mes_userq_mqd_create() 339 } else if (queue->queue_type == AMDGPU_HW_IP_GFX) { in mes_userq_mqd_create() 382 } else if (queue->queue_type == AMDGPU_HW_IP_DMA) { in mes_userq_mqd_create()
|
| H A D | amdgpu_userq.c | 187 switch (queue->queue_type) { in amdgpu_userq_start_hang_detect_work() 343 adev->userq_funcs[queue->queue_type]; in amdgpu_userq_preempt_helper() 368 adev->userq_funcs[queue->queue_type]; in amdgpu_userq_restore_helper() 388 adev->userq_funcs[queue->queue_type]; in amdgpu_userq_unmap_helper() 414 adev->userq_funcs[queue->queue_type]; in amdgpu_userq_map_helper() 453 const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type]; in amdgpu_userq_cleanup() 590 switch (db_info->queue_type) { in amdgpu_userq_get_doorbell_index() 598 db_info->queue_type); in amdgpu_userq_get_doorbell_index() 654 atomic_dec(&uq_mgr->userq_count[queue->queue_type]); in amdgpu_userq_destroy() 736 seq_printf(m, "queue_type: %d\n", queue->queue_type); in amdgpu_mqd_info_read() [all …]
|
| H A D | amdgpu_userq.h | 57 int queue_type; member 91 int queue_type); 111 uint32_t queue_type; member
|
| H A D | mes_v11_0.c | 282 static int convert_to_mes_queue_type(int queue_type) in convert_to_mes_queue_type() argument 284 if (queue_type == AMDGPU_RING_TYPE_GFX) in convert_to_mes_queue_type() 286 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) in convert_to_mes_queue_type() 288 else if (queue_type == AMDGPU_RING_TYPE_SDMA) in convert_to_mes_queue_type() 347 mes_add_queue_pkt.queue_type = in mes_v11_0_add_hw_queue() 348 convert_to_mes_queue_type(input->queue_type); in mes_v11_0_add_hw_queue() 393 static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_type, in mes_v11_0_reset_queue_mmio() argument 403 if (queue_type == AMDGPU_RING_TYPE_GFX) { in mes_v11_0_reset_queue_mmio() 436 } else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) { in mes_v11_0_reset_queue_mmio() 456 } else if (queue_type == AMDGPU_RING_TYPE_SDMA) { in mes_v11_0_reset_queue_mmio() [all …]
|
| H A D | mes_v12_0.c | 271 static int convert_to_mes_queue_type(int queue_type) in convert_to_mes_queue_type() argument 273 if (queue_type == AMDGPU_RING_TYPE_GFX) in convert_to_mes_queue_type() 275 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) in convert_to_mes_queue_type() 277 else if (queue_type == AMDGPU_RING_TYPE_SDMA) in convert_to_mes_queue_type() 279 else if (queue_type == AMDGPU_RING_TYPE_MES) in convert_to_mes_queue_type() 334 mes_add_queue_pkt.queue_type = in mes_v12_0_add_hw_queue() 335 convert_to_mes_queue_type(input->queue_type); in mes_v12_0_add_hw_queue() 416 static int mes_v12_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_type, in mes_v12_0_reset_queue_mmio() argument 426 if (queue_type == AMDGPU_RING_TYPE_GFX) { in mes_v12_0_reset_queue_mmio() 459 } else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) { in mes_v12_0_reset_queue_mmio() [all …]
|
| /linux/drivers/accel/habanalabs/common/ |
| H A D | hw_queue.c | 53 if (!hdev->asic_prop.max_queues || q->queue_type == QUEUE_TYPE_HW) in hl_hw_queue_update_ci() 62 if (!cs_needs_completion(cs) || q->queue_type == QUEUE_TYPE_INT) in hl_hw_queue_update_ci() 257 if (q->queue_type != QUEUE_TYPE_HW) { in hl_hw_queue_send_cb_no_cmpl() 666 switch (q->queue_type) { in hl_hw_queue_schedule_cs() 683 q->queue_type); in hl_hw_queue_schedule_cs() 695 if (q->queue_type == QUEUE_TYPE_EXT) in hl_hw_queue_schedule_cs() 773 switch (job->queue_type) { in hl_hw_queue_schedule_cs() 796 if ((q->queue_type == QUEUE_TYPE_EXT) && in hl_hw_queue_schedule_cs() 1003 switch (q->queue_type) { in queue_init() 1021 q->queue_type); in queue_init() [all …]
|
| H A D | command_submission.c | 302 return (job->queue_type == QUEUE_TYPE_EXT); in is_cb_patched() 331 parser.queue_type = job->queue_type; in cs_parser() 384 (job->queue_type == QUEUE_TYPE_HW || job->queue_type == QUEUE_TYPE_INT)) { in hl_complete_job() 412 (job->queue_type == QUEUE_TYPE_EXT || job->queue_type == QUEUE_TYPE_HW)) { in hl_complete_job() 1205 enum hl_queue_type *queue_type, in validate_queue_index() argument 1270 *queue_type = hw_queue_prop->type; in validate_queue_index() 1301 enum hl_queue_type queue_type, bool is_kernel_allocated_cb) in hl_cs_allocate_job() argument 1313 job->queue_type = queue_type; in hl_cs_allocate_job() 1319 if (job->queue_type == QUEUE_TYPE_EXT) in hl_cs_allocate_job() 1543 enum hl_queue_type queue_type; in cs_ioctl_default() local [all …]
|
| /linux/tools/net/ynl/ynltool/ |
| H A D | qstats.c | 42 if (qs->_present.queue_type) in print_json_qstats() 44 netdev_queue_type_str(qs->queue_type)); in print_json_qstats() 162 if (qs->_present.queue_type && qs->_present.queue_id) in print_plain_qstats() 164 netdev_queue_type_str(qs->queue_type), in print_plain_qstats() 413 if (qa->queue_type != qb->queue_type) in cmp_ifindex_type() 414 return qa->queue_type - qb->queue_type; in cmp_ifindex_type() 467 enum netdev_queue_type type = sorted[i]->queue_type; in do_balance() 475 sorted[j]->queue_type == type; j++) in do_balance()
|
| /linux/drivers/staging/octeon/ |
| H A D | ethernet-tx.c | 134 enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type; in cvm_oct_xmit() local 191 queue_type = QUEUE_DROP; in cvm_oct_xmit() 397 queue_type = QUEUE_CORE; in cvm_oct_xmit() 400 queue_type = QUEUE_HW; in cvm_oct_xmit() 420 queue_type = QUEUE_DROP; in cvm_oct_xmit() 435 queue_type = QUEUE_DROP; in cvm_oct_xmit() 440 switch (queue_type) { in cvm_oct_xmit()
|
| /linux/drivers/net/wireless/ath/ath5k/ |
| H A D | qcu.c | 203 ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, in ath5k_hw_setup_tx_queue() argument 214 switch (queue_type) { in ath5k_hw_setup_tx_queue() 226 switch (queue_type) { in ath5k_hw_setup_tx_queue() 248 ah->ah_txq[queue].tqi_type = queue_type; in ath5k_hw_setup_tx_queue() 251 queue_info->tqi_type = queue_type; in ath5k_hw_setup_tx_queue()
|
| /linux/drivers/scsi/qla2xxx/ |
| H A D | qla_tmpl.h | 135 uint8_t queue_type; member 196 uint8_t queue_type; member
|
| H A D | qla_tmpl.c | 329 uint type = ent->t263.queue_type; in qla27xx_fwdt_entry_t263() 365 ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) { in qla27xx_fwdt_entry_t263() 633 ulong type = ent->t274.queue_type; in qla27xx_fwdt_entry_t274() 664 ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) { in qla27xx_fwdt_entry_t274()
|
| /linux/drivers/net/ethernet/ibm/ehea/ |
| H A D | ehea_phyp.c | 416 const u8 queue_type, const u64 resource_handle, in ehea_h_register_rpage() argument 422 | EHEA_BMASK_SET(H_REG_RPAGE_QT, queue_type); in ehea_h_register_rpage() 500 const u8 pagesize, const u8 queue_type, in ehea_h_register_rpage_mr() argument 509 queue_type, mr_handle, in ehea_h_register_rpage_mr()
|
| H A D | ehea_phyp.h | 376 const u8 queue_type, 397 const u8 pagesize, const u8 queue_type,
|
| /linux/drivers/gpu/drm/amd/include/ |
| H A D | mes_v11_api_def.h | 295 enum MES_QUEUE_TYPE queue_type; member 359 enum MES_QUEUE_TYPE queue_type; member 475 enum MES_QUEUE_TYPE queue_type; member
|
| /linux/drivers/gpu/drm/amd/amdkfd/ |
| H A D | kfd_pm4_headers_vi.h | 67 enum mes_set_resources_queue_type_enum queue_type:3; member 238 enum mes_map_queues_queue_type_vi_enum queue_type:3; member
|
| H A D | kfd_pm4_headers_ai.h | 68 enum mes_set_resources_queue_type_enum queue_type:3; member 289 enum mes_map_queues_queue_type_enum queue_type:3; member
|
| /linux/drivers/media/platform/qcom/iris/ |
| H A D | iris_hfi_queue.h | 128 u16 queue_type; member
|
| H A D | iris_hfi_queue.c | 67 if (queue->queue_type == IFACEQ_MSGQ_ID) in iris_hfi_queue_read() 205 iface_q->qhdr->queue_type = queue_id; in iris_hfi_queue_set_header()
|
| /linux/drivers/infiniband/hw/irdma/ |
| H A D | protos.h | 76 u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type);
|
| /linux/drivers/net/ethernet/huawei/hinic/ |
| H A D | hinic_hw_qp_ctxt.h | 150 u16 queue_type; member
|
| /linux/drivers/infiniband/hw/mana/ |
| H A D | qp.c | 407 static u32 mana_ib_queue_size(struct ib_qp_init_attr *attr, u32 queue_type) in mana_ib_queue_size() argument 414 if (queue_type == MANA_UD_SEND_QUEUE) in mana_ib_queue_size() 428 static enum gdma_queue_type mana_ib_queue_type(struct ib_qp_init_attr *attr, u32 queue_type) in mana_ib_queue_type() argument 435 if (queue_type == MANA_UD_SEND_QUEUE) in mana_ib_queue_type()
|