Lines Matching +full:supports +full:- +full:cqe
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
11 /* Timeout in micro-sec */
49 /* Abort - canceled by the driver */
73 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) { in ena_com_mem_addr_set()
74 pr_err("DMA address has more bits that the device supports\n"); in ena_com_mem_addr_set()
75 return -EINVAL; in ena_com_mem_addr_set()
78 ena_addr->mem_addr_low = lower_32_bits(addr); in ena_com_mem_addr_set()
79 ena_addr->mem_addr_high = (u16)upper_32_bits(addr); in ena_com_mem_addr_set()
86 struct ena_com_admin_sq *sq = &admin_queue->sq; in ena_com_admin_init_sq()
87 u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth); in ena_com_admin_init_sq()
89 sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, in ena_com_admin_init_sq()
90 &sq->dma_addr, GFP_KERNEL); in ena_com_admin_init_sq()
92 if (!sq->entries) { in ena_com_admin_init_sq()
94 return -ENOMEM; in ena_com_admin_init_sq()
97 sq->head = 0; in ena_com_admin_init_sq()
98 sq->tail = 0; in ena_com_admin_init_sq()
99 sq->phase = 1; in ena_com_admin_init_sq()
101 sq->db_addr = NULL; in ena_com_admin_init_sq()
108 struct ena_com_admin_cq *cq = &admin_queue->cq; in ena_com_admin_init_cq()
109 u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth); in ena_com_admin_init_cq()
111 cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, in ena_com_admin_init_cq()
112 &cq->dma_addr, GFP_KERNEL); in ena_com_admin_init_cq()
114 if (!cq->entries) { in ena_com_admin_init_cq()
116 return -ENOMEM; in ena_com_admin_init_cq()
119 cq->head = 0; in ena_com_admin_init_cq()
120 cq->phase = 1; in ena_com_admin_init_cq()
128 struct ena_com_aenq *aenq = &ena_dev->aenq; in ena_com_admin_init_aenq()
132 ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; in ena_com_admin_init_aenq()
134 aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size, in ena_com_admin_init_aenq()
135 &aenq->dma_addr, GFP_KERNEL); in ena_com_admin_init_aenq()
137 if (!aenq->entries) { in ena_com_admin_init_aenq()
139 return -ENOMEM; in ena_com_admin_init_aenq()
142 aenq->head = aenq->q_depth; in ena_com_admin_init_aenq()
143 aenq->phase = 1; in ena_com_admin_init_aenq()
145 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr); in ena_com_admin_init_aenq()
146 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr); in ena_com_admin_init_aenq()
148 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF); in ena_com_admin_init_aenq()
149 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF); in ena_com_admin_init_aenq()
152 aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; in ena_com_admin_init_aenq()
156 writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); in ena_com_admin_init_aenq()
160 return -EINVAL; in ena_com_admin_init_aenq()
163 aenq->aenq_handlers = aenq_handlers; in ena_com_admin_init_aenq()
171 comp_ctx->occupied = false; in comp_ctxt_release()
172 atomic_dec(&queue->outstanding_cmds); in comp_ctxt_release()
178 if (unlikely(command_id >= admin_queue->q_depth)) { in get_comp_ctxt()
180 command_id, admin_queue->q_depth); in get_comp_ctxt()
184 if (unlikely(!admin_queue->comp_ctx)) { in get_comp_ctxt()
189 if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) { in get_comp_ctxt()
195 atomic_inc(&admin_queue->outstanding_cmds); in get_comp_ctxt()
196 admin_queue->comp_ctx[command_id].occupied = true; in get_comp_ctxt()
199 return &admin_queue->comp_ctx[command_id]; in get_comp_ctxt()
213 queue_size_mask = admin_queue->q_depth - 1; in __ena_com_submit_admin_cmd()
215 tail_masked = admin_queue->sq.tail & queue_size_mask; in __ena_com_submit_admin_cmd()
218 cnt = (u16)atomic_read(&admin_queue->outstanding_cmds); in __ena_com_submit_admin_cmd()
219 if (cnt >= admin_queue->q_depth) { in __ena_com_submit_admin_cmd()
221 admin_queue->stats.out_of_space++; in __ena_com_submit_admin_cmd()
222 return ERR_PTR(-ENOSPC); in __ena_com_submit_admin_cmd()
225 cmd_id = admin_queue->curr_cmd_id; in __ena_com_submit_admin_cmd()
227 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase & in __ena_com_submit_admin_cmd()
230 cmd->aq_common_descriptor.command_id |= cmd_id & in __ena_com_submit_admin_cmd()
235 return ERR_PTR(-EINVAL); in __ena_com_submit_admin_cmd()
237 comp_ctx->status = ENA_CMD_SUBMITTED; in __ena_com_submit_admin_cmd()
238 comp_ctx->comp_size = (u32)comp_size_in_bytes; in __ena_com_submit_admin_cmd()
239 comp_ctx->user_cqe = comp; in __ena_com_submit_admin_cmd()
240 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode; in __ena_com_submit_admin_cmd()
242 reinit_completion(&comp_ctx->wait_event); in __ena_com_submit_admin_cmd()
244 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes); in __ena_com_submit_admin_cmd()
246 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) & in __ena_com_submit_admin_cmd()
249 admin_queue->sq.tail++; in __ena_com_submit_admin_cmd()
250 admin_queue->stats.submitted_cmd++; in __ena_com_submit_admin_cmd()
252 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0)) in __ena_com_submit_admin_cmd()
253 admin_queue->sq.phase = !admin_queue->sq.phase; in __ena_com_submit_admin_cmd()
255 writel(admin_queue->sq.tail, admin_queue->sq.db_addr); in __ena_com_submit_admin_cmd()
262 size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx); in ena_com_init_comp_ctxt()
266 admin_queue->comp_ctx = in ena_com_init_comp_ctxt()
267 devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL); in ena_com_init_comp_ctxt()
268 if (unlikely(!admin_queue->comp_ctx)) { in ena_com_init_comp_ctxt()
270 return -ENOMEM; in ena_com_init_comp_ctxt()
273 for (i = 0; i < admin_queue->q_depth; i++) { in ena_com_init_comp_ctxt()
276 init_completion(&comp_ctx->wait_event); in ena_com_init_comp_ctxt()
291 spin_lock_irqsave(&admin_queue->q_lock, flags); in ena_com_submit_admin_cmd()
292 if (unlikely(!admin_queue->running_state)) { in ena_com_submit_admin_cmd()
293 spin_unlock_irqrestore(&admin_queue->q_lock, flags); in ena_com_submit_admin_cmd()
294 return ERR_PTR(-ENODEV); in ena_com_submit_admin_cmd()
301 admin_queue->running_state = false; in ena_com_submit_admin_cmd()
302 spin_unlock_irqrestore(&admin_queue->q_lock, flags); in ena_com_submit_admin_cmd()
314 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); in ena_com_init_io_sq()
316 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits; in ena_com_init_io_sq()
317 io_sq->desc_entry_size = in ena_com_init_io_sq()
318 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? in ena_com_init_io_sq()
322 size = io_sq->desc_entry_size * io_sq->q_depth; in ena_com_init_io_sq()
324 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { in ena_com_init_io_sq()
325 dev_node = dev_to_node(ena_dev->dmadev); in ena_com_init_io_sq()
326 set_dev_node(ena_dev->dmadev, ctx->numa_node); in ena_com_init_io_sq()
327 io_sq->desc_addr.virt_addr = in ena_com_init_io_sq()
328 dma_alloc_coherent(ena_dev->dmadev, size, in ena_com_init_io_sq()
329 &io_sq->desc_addr.phys_addr, in ena_com_init_io_sq()
331 set_dev_node(ena_dev->dmadev, dev_node); in ena_com_init_io_sq()
332 if (!io_sq->desc_addr.virt_addr) { in ena_com_init_io_sq()
333 io_sq->desc_addr.virt_addr = in ena_com_init_io_sq()
334 dma_alloc_coherent(ena_dev->dmadev, size, in ena_com_init_io_sq()
335 &io_sq->desc_addr.phys_addr, in ena_com_init_io_sq()
339 if (!io_sq->desc_addr.virt_addr) { in ena_com_init_io_sq()
341 return -ENOMEM; in ena_com_init_io_sq()
345 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { in ena_com_init_io_sq()
347 io_sq->bounce_buf_ctrl.buffer_size = in ena_com_init_io_sq()
348 ena_dev->llq_info.desc_list_entry_size; in ena_com_init_io_sq()
349 io_sq->bounce_buf_ctrl.buffers_num = in ena_com_init_io_sq()
351 io_sq->bounce_buf_ctrl.next_to_use = 0; in ena_com_init_io_sq()
353 size = io_sq->bounce_buf_ctrl.buffer_size * in ena_com_init_io_sq()
354 io_sq->bounce_buf_ctrl.buffers_num; in ena_com_init_io_sq()
356 dev_node = dev_to_node(ena_dev->dmadev); in ena_com_init_io_sq()
357 set_dev_node(ena_dev->dmadev, ctx->numa_node); in ena_com_init_io_sq()
358 io_sq->bounce_buf_ctrl.base_buffer = in ena_com_init_io_sq()
359 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); in ena_com_init_io_sq()
360 set_dev_node(ena_dev->dmadev, dev_node); in ena_com_init_io_sq()
361 if (!io_sq->bounce_buf_ctrl.base_buffer) in ena_com_init_io_sq()
362 io_sq->bounce_buf_ctrl.base_buffer = in ena_com_init_io_sq()
363 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); in ena_com_init_io_sq()
365 if (!io_sq->bounce_buf_ctrl.base_buffer) { in ena_com_init_io_sq()
367 return -ENOMEM; in ena_com_init_io_sq()
370 memcpy(&io_sq->llq_info, &ena_dev->llq_info, in ena_com_init_io_sq()
371 sizeof(io_sq->llq_info)); in ena_com_init_io_sq()
374 io_sq->llq_buf_ctrl.curr_bounce_buf = in ena_com_init_io_sq()
375 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); in ena_com_init_io_sq()
376 memset(io_sq->llq_buf_ctrl.curr_bounce_buf, in ena_com_init_io_sq()
377 0x0, io_sq->llq_info.desc_list_entry_size); in ena_com_init_io_sq()
378 io_sq->llq_buf_ctrl.descs_left_in_line = in ena_com_init_io_sq()
379 io_sq->llq_info.descs_num_before_header; in ena_com_init_io_sq()
380 io_sq->disable_meta_caching = in ena_com_init_io_sq()
381 io_sq->llq_info.disable_meta_caching; in ena_com_init_io_sq()
383 if (io_sq->llq_info.max_entries_in_tx_burst > 0) in ena_com_init_io_sq()
384 io_sq->entries_in_tx_burst_left = in ena_com_init_io_sq()
385 io_sq->llq_info.max_entries_in_tx_burst; in ena_com_init_io_sq()
388 io_sq->tail = 0; in ena_com_init_io_sq()
389 io_sq->next_to_comp = 0; in ena_com_init_io_sq()
390 io_sq->phase = 1; in ena_com_init_io_sq()
402 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr)); in ena_com_init_io_cq()
405 io_cq->cdesc_entry_size_in_bytes = in ena_com_init_io_cq()
406 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? in ena_com_init_io_cq()
410 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; in ena_com_init_io_cq()
412 prev_node = dev_to_node(ena_dev->dmadev); in ena_com_init_io_cq()
413 set_dev_node(ena_dev->dmadev, ctx->numa_node); in ena_com_init_io_cq()
414 io_cq->cdesc_addr.virt_addr = in ena_com_init_io_cq()
415 dma_alloc_coherent(ena_dev->dmadev, size, in ena_com_init_io_cq()
416 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); in ena_com_init_io_cq()
417 set_dev_node(ena_dev->dmadev, prev_node); in ena_com_init_io_cq()
418 if (!io_cq->cdesc_addr.virt_addr) { in ena_com_init_io_cq()
419 io_cq->cdesc_addr.virt_addr = in ena_com_init_io_cq()
420 dma_alloc_coherent(ena_dev->dmadev, size, in ena_com_init_io_cq()
421 &io_cq->cdesc_addr.phys_addr, in ena_com_init_io_cq()
425 if (!io_cq->cdesc_addr.virt_addr) { in ena_com_init_io_cq()
427 return -ENOMEM; in ena_com_init_io_cq()
430 io_cq->phase = 1; in ena_com_init_io_cq()
431 io_cq->head = 0; in ena_com_init_io_cq()
437 struct ena_admin_acq_entry *cqe) in ena_com_handle_single_admin_completion() argument
442 cmd_id = cqe->acq_common_descriptor.command & in ena_com_handle_single_admin_completion()
448 admin_queue->running_state = false; in ena_com_handle_single_admin_completion()
452 comp_ctx->status = ENA_CMD_COMPLETED; in ena_com_handle_single_admin_completion()
453 comp_ctx->comp_status = cqe->acq_common_descriptor.status; in ena_com_handle_single_admin_completion()
455 if (comp_ctx->user_cqe) in ena_com_handle_single_admin_completion()
456 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size); in ena_com_handle_single_admin_completion()
458 if (!admin_queue->polling) in ena_com_handle_single_admin_completion()
459 complete(&comp_ctx->wait_event); in ena_com_handle_single_admin_completion()
464 struct ena_admin_acq_entry *cqe = NULL; in ena_com_handle_admin_completion() local
469 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1); in ena_com_handle_admin_completion()
470 phase = admin_queue->cq.phase; in ena_com_handle_admin_completion()
472 cqe = &admin_queue->cq.entries[head_masked]; in ena_com_handle_admin_completion()
475 while ((READ_ONCE(cqe->acq_common_descriptor.flags) & in ena_com_handle_admin_completion()
481 ena_com_handle_single_admin_completion(admin_queue, cqe); in ena_com_handle_admin_completion()
485 if (unlikely(head_masked == admin_queue->q_depth)) { in ena_com_handle_admin_completion()
490 cqe = &admin_queue->cq.entries[head_masked]; in ena_com_handle_admin_completion()
493 admin_queue->cq.head += comp_num; in ena_com_handle_admin_completion()
494 admin_queue->cq.phase = phase; in ena_com_handle_admin_completion()
495 admin_queue->sq.head += comp_num; in ena_com_handle_admin_completion()
496 admin_queue->stats.completed_cmd += comp_num; in ena_com_handle_admin_completion()
508 return -ENOMEM; in ena_com_comp_status_to_errno()
510 return -EOPNOTSUPP; in ena_com_comp_status_to_errno()
515 return -EINVAL; in ena_com_comp_status_to_errno()
517 return -EAGAIN; in ena_com_comp_status_to_errno()
520 return -EINVAL; in ena_com_comp_status_to_errno()
538 timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout); in ena_com_wait_and_process_admin_cq_polling()
541 spin_lock_irqsave(&admin_queue->q_lock, flags); in ena_com_wait_and_process_admin_cq_polling()
543 spin_unlock_irqrestore(&admin_queue->q_lock, flags); in ena_com_wait_and_process_admin_cq_polling()
545 if (comp_ctx->status != ENA_CMD_SUBMITTED) in ena_com_wait_and_process_admin_cq_polling()
551 spin_lock_irqsave(&admin_queue->q_lock, flags); in ena_com_wait_and_process_admin_cq_polling()
552 admin_queue->stats.no_completion++; in ena_com_wait_and_process_admin_cq_polling()
553 admin_queue->running_state = false; in ena_com_wait_and_process_admin_cq_polling()
554 spin_unlock_irqrestore(&admin_queue->q_lock, flags); in ena_com_wait_and_process_admin_cq_polling()
556 ret = -ETIME; in ena_com_wait_and_process_admin_cq_polling()
561 admin_queue->ena_dev->ena_min_poll_delay_us); in ena_com_wait_and_process_admin_cq_polling()
564 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { in ena_com_wait_and_process_admin_cq_polling()
566 spin_lock_irqsave(&admin_queue->q_lock, flags); in ena_com_wait_and_process_admin_cq_polling()
567 admin_queue->stats.aborted_cmd++; in ena_com_wait_and_process_admin_cq_polling()
568 spin_unlock_irqrestore(&admin_queue->q_lock, flags); in ena_com_wait_and_process_admin_cq_polling()
569 ret = -ENODEV; in ena_com_wait_and_process_admin_cq_polling()
573 WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", in ena_com_wait_and_process_admin_cq_polling()
574 comp_ctx->status); in ena_com_wait_and_process_admin_cq_polling()
576 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); in ena_com_wait_and_process_admin_cq_polling()
593 struct ena_com_llq_info *llq_info = &ena_dev->llq_info; in ena_com_set_llq()
597 admin_queue = &ena_dev->admin_queue; in ena_com_set_llq()
602 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl; in ena_com_set_llq()
603 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl; in ena_com_set_llq()
604 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; in ena_com_set_llq()
605 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; in ena_com_set_llq()
627 struct ena_com_llq_info *llq_info = &ena_dev->llq_info; in ena_com_config_llq_info()
634 supported_feat = llq_features->header_location_ctrl_supported; in ena_com_config_llq_info()
636 if (likely(supported_feat & llq_default_cfg->llq_header_location)) { in ena_com_config_llq_info()
637 llq_info->header_location_ctrl = in ena_com_config_llq_info()
638 llq_default_cfg->llq_header_location; in ena_com_config_llq_info()
642 return -EINVAL; in ena_com_config_llq_info()
645 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) { in ena_com_config_llq_info()
646 supported_feat = llq_features->descriptors_stride_ctrl_supported; in ena_com_config_llq_info()
647 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) { in ena_com_config_llq_info()
648 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl; in ena_com_config_llq_info()
651 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; in ena_com_config_llq_info()
653 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY; in ena_com_config_llq_info()
657 return -EINVAL; in ena_com_config_llq_info()
661 llq_default_cfg->llq_stride_ctrl, supported_feat, in ena_com_config_llq_info()
662 llq_info->desc_stride_ctrl); in ena_com_config_llq_info()
665 llq_info->desc_stride_ctrl = 0; in ena_com_config_llq_info()
668 supported_feat = llq_features->entry_size_ctrl_supported; in ena_com_config_llq_info()
669 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) { in ena_com_config_llq_info()
670 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size; in ena_com_config_llq_info()
671 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value; in ena_com_config_llq_info()
674 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B; in ena_com_config_llq_info()
675 llq_info->desc_list_entry_size = 128; in ena_com_config_llq_info()
677 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B; in ena_com_config_llq_info()
678 llq_info->desc_list_entry_size = 192; in ena_com_config_llq_info()
680 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B; in ena_com_config_llq_info()
681 llq_info->desc_list_entry_size = 256; in ena_com_config_llq_info()
685 return -EINVAL; in ena_com_config_llq_info()
689 llq_default_cfg->llq_ring_entry_size, supported_feat, in ena_com_config_llq_info()
690 llq_info->desc_list_entry_size); in ena_com_config_llq_info()
692 if (unlikely(llq_info->desc_list_entry_size & 0x7)) { in ena_com_config_llq_info()
696 pr_err("Illegal entry size %d\n", llq_info->desc_list_entry_size); in ena_com_config_llq_info()
697 return -EINVAL; in ena_com_config_llq_info()
700 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) in ena_com_config_llq_info()
701 llq_info->descs_per_entry = llq_info->desc_list_entry_size / in ena_com_config_llq_info()
704 llq_info->descs_per_entry = 1; in ena_com_config_llq_info()
706 supported_feat = llq_features->desc_num_before_header_supported; in ena_com_config_llq_info()
707 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) { in ena_com_config_llq_info()
708 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header; in ena_com_config_llq_info()
711 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; in ena_com_config_llq_info()
713 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1; in ena_com_config_llq_info()
715 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4; in ena_com_config_llq_info()
717 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8; in ena_com_config_llq_info()
721 return -EINVAL; in ena_com_config_llq_info()
725 llq_default_cfg->llq_num_decs_before_header, in ena_com_config_llq_info()
726 supported_feat, llq_info->descs_num_before_header); in ena_com_config_llq_info()
729 llq_accel_mode_get = llq_features->accel_mode.u.get; in ena_com_config_llq_info()
731 llq_info->disable_meta_caching = in ena_com_config_llq_info()
736 llq_info->max_entries_in_tx_burst = in ena_com_config_llq_info()
738 llq_default_cfg->llq_ring_entry_size_value; in ena_com_config_llq_info()
753 wait_for_completion_timeout(&comp_ctx->wait_event, in ena_com_wait_and_process_admin_cq_interrupts()
755 admin_queue->completion_timeout)); in ena_com_wait_and_process_admin_cq_interrupts()
760 * 2) There is completion but the device didn't get any msi-x interrupt. in ena_com_wait_and_process_admin_cq_interrupts()
762 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) { in ena_com_wait_and_process_admin_cq_interrupts()
763 spin_lock_irqsave(&admin_queue->q_lock, flags); in ena_com_wait_and_process_admin_cq_interrupts()
765 admin_queue->stats.no_completion++; in ena_com_wait_and_process_admin_cq_interrupts()
766 spin_unlock_irqrestore(&admin_queue->q_lock, flags); in ena_com_wait_and_process_admin_cq_interrupts()
768 if (comp_ctx->status == ENA_CMD_COMPLETED) { in ena_com_wait_and_process_admin_cq_interrupts()
769 …pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d),… in ena_com_wait_and_process_admin_cq_interrupts()
770 comp_ctx->cmd_opcode, in ena_com_wait_and_process_admin_cq_interrupts()
771 admin_queue->auto_polling ? "ON" : "OFF"); in ena_com_wait_and_process_admin_cq_interrupts()
773 if (admin_queue->auto_polling) in ena_com_wait_and_process_admin_cq_interrupts()
774 admin_queue->polling = true; in ena_com_wait_and_process_admin_cq_interrupts()
777 comp_ctx->cmd_opcode, comp_ctx->status); in ena_com_wait_and_process_admin_cq_interrupts()
783 if (!admin_queue->polling) { in ena_com_wait_and_process_admin_cq_interrupts()
784 admin_queue->running_state = false; in ena_com_wait_and_process_admin_cq_interrupts()
785 ret = -ETIME; in ena_com_wait_and_process_admin_cq_interrupts()
790 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); in ena_com_wait_and_process_admin_cq_interrupts()
802 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; in ena_com_reg_bar_read32()
804 mmio_read->read_resp; in ena_com_reg_bar_read32()
807 u32 timeout = mmio_read->reg_read_to; in ena_com_reg_bar_read32()
815 if (!mmio_read->readless_supported) in ena_com_reg_bar_read32()
816 return readl(ena_dev->reg_bar + offset); in ena_com_reg_bar_read32()
818 spin_lock_irqsave(&mmio_read->lock, flags); in ena_com_reg_bar_read32()
819 mmio_read->seq_num++; in ena_com_reg_bar_read32()
821 read_resp->req_id = mmio_read->seq_num + 0xDEAD; in ena_com_reg_bar_read32()
824 mmio_read_reg |= mmio_read->seq_num & in ena_com_reg_bar_read32()
827 writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); in ena_com_reg_bar_read32()
830 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num) in ena_com_reg_bar_read32()
838 mmio_read->seq_num, offset, read_resp->req_id, in ena_com_reg_bar_read32()
839 read_resp->reg_off); in ena_com_reg_bar_read32()
844 if (read_resp->reg_off != offset) { in ena_com_reg_bar_read32()
848 ret = read_resp->reg_val; in ena_com_reg_bar_read32()
851 spin_unlock_irqrestore(&mmio_read->lock, flags); in ena_com_reg_bar_read32()
857 * Polling mode - wait until the completion is available.
858 * Async mode - wait on wait queue until the completion is ready
866 if (admin_queue->polling) in ena_com_wait_and_process_admin_cq()
877 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_destroy_io_sq()
885 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) in ena_com_destroy_io_sq()
894 destroy_cmd.sq.sq_idx = io_sq->idx; in ena_com_destroy_io_sq()
903 if (unlikely(ret && (ret != -ENODEV))) in ena_com_destroy_io_sq()
915 if (io_cq->cdesc_addr.virt_addr) { in ena_com_io_queue_free()
916 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; in ena_com_io_queue_free()
918 dma_free_coherent(ena_dev->dmadev, size, in ena_com_io_queue_free()
919 io_cq->cdesc_addr.virt_addr, in ena_com_io_queue_free()
920 io_cq->cdesc_addr.phys_addr); in ena_com_io_queue_free()
922 io_cq->cdesc_addr.virt_addr = NULL; in ena_com_io_queue_free()
925 if (io_sq->desc_addr.virt_addr) { in ena_com_io_queue_free()
926 size = io_sq->desc_entry_size * io_sq->q_depth; in ena_com_io_queue_free()
928 dma_free_coherent(ena_dev->dmadev, size, in ena_com_io_queue_free()
929 io_sq->desc_addr.virt_addr, in ena_com_io_queue_free()
930 io_sq->desc_addr.phys_addr); in ena_com_io_queue_free()
932 io_sq->desc_addr.virt_addr = NULL; in ena_com_io_queue_free()
935 if (io_sq->bounce_buf_ctrl.base_buffer) { in ena_com_io_queue_free()
936 devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer); in ena_com_io_queue_free()
937 io_sq->bounce_buf_ctrl.base_buffer = NULL; in ena_com_io_queue_free()
955 return -ETIME; in wait_for_reset_state()
963 return -ETIME; in wait_for_reset_state()
965 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us); in wait_for_reset_state()
976 !(ena_dev->supported_features & feature_mask)) in ena_com_check_supported_feature_id()
995 return -EOPNOTSUPP; in ena_com_get_feature_ex()
999 admin_queue = &ena_dev->admin_queue; in ena_com_get_feature_ex()
1051 return ena_dev->rss.hash_func; in ena_com_get_current_hash_function()
1057 (ena_dev->rss).hash_key; in ena_com_hash_key_fill_default_key()
1059 netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key)); in ena_com_hash_key_fill_default_key()
1063 hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS; in ena_com_hash_key_fill_default_key()
1068 struct ena_rss *rss = &ena_dev->rss; in ena_com_hash_key_allocate()
1072 return -EOPNOTSUPP; in ena_com_hash_key_allocate()
1074 rss->hash_key = in ena_com_hash_key_allocate()
1075 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), in ena_com_hash_key_allocate()
1076 &rss->hash_key_dma_addr, GFP_KERNEL); in ena_com_hash_key_allocate()
1078 if (unlikely(!rss->hash_key)) in ena_com_hash_key_allocate()
1079 return -ENOMEM; in ena_com_hash_key_allocate()
1086 struct ena_rss *rss = &ena_dev->rss; in ena_com_hash_key_destroy()
1088 if (rss->hash_key) in ena_com_hash_key_destroy()
1089 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), in ena_com_hash_key_destroy()
1090 rss->hash_key, rss->hash_key_dma_addr); in ena_com_hash_key_destroy()
1091 rss->hash_key = NULL; in ena_com_hash_key_destroy()
1096 struct ena_rss *rss = &ena_dev->rss; in ena_com_hash_ctrl_init()
1098 rss->hash_ctrl = in ena_com_hash_ctrl_init()
1099 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), in ena_com_hash_ctrl_init()
1100 &rss->hash_ctrl_dma_addr, GFP_KERNEL); in ena_com_hash_ctrl_init()
1102 if (unlikely(!rss->hash_ctrl)) in ena_com_hash_ctrl_init()
1103 return -ENOMEM; in ena_com_hash_ctrl_init()
1110 struct ena_rss *rss = &ena_dev->rss; in ena_com_hash_ctrl_destroy()
1112 if (rss->hash_ctrl) in ena_com_hash_ctrl_destroy()
1113 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), in ena_com_hash_ctrl_destroy()
1114 rss->hash_ctrl, rss->hash_ctrl_dma_addr); in ena_com_hash_ctrl_destroy()
1115 rss->hash_ctrl = NULL; in ena_com_hash_ctrl_destroy()
1121 struct ena_rss *rss = &ena_dev->rss; in ena_com_indirect_table_allocate()
1136 return -EINVAL; in ena_com_indirect_table_allocate()
1142 rss->rss_ind_tbl = in ena_com_indirect_table_allocate()
1143 dma_alloc_coherent(ena_dev->dmadev, tbl_size, in ena_com_indirect_table_allocate()
1144 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); in ena_com_indirect_table_allocate()
1145 if (unlikely(!rss->rss_ind_tbl)) in ena_com_indirect_table_allocate()
1149 rss->host_rss_ind_tbl = in ena_com_indirect_table_allocate()
1150 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL); in ena_com_indirect_table_allocate()
1151 if (unlikely(!rss->host_rss_ind_tbl)) in ena_com_indirect_table_allocate()
1154 rss->tbl_log_size = log_size; in ena_com_indirect_table_allocate()
1162 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, in ena_com_indirect_table_allocate()
1163 rss->rss_ind_tbl_dma_addr); in ena_com_indirect_table_allocate()
1164 rss->rss_ind_tbl = NULL; in ena_com_indirect_table_allocate()
1166 rss->tbl_log_size = 0; in ena_com_indirect_table_allocate()
1167 return -ENOMEM; in ena_com_indirect_table_allocate()
1172 struct ena_rss *rss = &ena_dev->rss; in ena_com_indirect_table_destroy()
1173 size_t tbl_size = (1ULL << rss->tbl_log_size) * in ena_com_indirect_table_destroy()
1176 if (rss->rss_ind_tbl) in ena_com_indirect_table_destroy()
1177 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, in ena_com_indirect_table_destroy()
1178 rss->rss_ind_tbl_dma_addr); in ena_com_indirect_table_destroy()
1179 rss->rss_ind_tbl = NULL; in ena_com_indirect_table_destroy()
1181 if (rss->host_rss_ind_tbl) in ena_com_indirect_table_destroy()
1182 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl); in ena_com_indirect_table_destroy()
1183 rss->host_rss_ind_tbl = NULL; in ena_com_indirect_table_destroy()
1189 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_create_io_sq()
1199 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) in ena_com_create_io_sq()
1208 create_cmd.sq_caps_2 |= io_sq->mem_queue_type & in ena_com_create_io_sq()
1219 create_cmd.sq_depth = io_sq->q_depth; in ena_com_create_io_sq()
1221 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { in ena_com_create_io_sq()
1224 io_sq->desc_addr.phys_addr); in ena_com_create_io_sq()
1241 io_sq->idx = cmd_completion.sq_idx; in ena_com_create_io_sq()
1243 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + in ena_com_create_io_sq()
1246 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { in ena_com_create_io_sq()
1247 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar in ena_com_create_io_sq()
1250 io_sq->desc_addr.pbuf_dev_addr = in ena_com_create_io_sq()
1251 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + in ena_com_create_io_sq()
1255 pr_debug("Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); in ena_com_create_io_sq()
1262 struct ena_rss *rss = &ena_dev->rss; in ena_com_ind_tbl_convert_to_device()
1267 for (i = 0; i < 1 << rss->tbl_log_size; i++) { in ena_com_ind_tbl_convert_to_device()
1268 qid = rss->host_rss_ind_tbl[i]; in ena_com_ind_tbl_convert_to_device()
1270 return -EINVAL; in ena_com_ind_tbl_convert_to_device()
1272 io_sq = &ena_dev->io_sq_queues[qid]; in ena_com_ind_tbl_convert_to_device()
1274 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX) in ena_com_ind_tbl_convert_to_device()
1275 return -EINVAL; in ena_com_ind_tbl_convert_to_device()
1277 rss->rss_ind_tbl[i].cq_idx = io_sq->idx; in ena_com_ind_tbl_convert_to_device()
1286 u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution; in ena_com_update_intr_delay_resolution()
1294 ena_dev->intr_moder_rx_interval = in ena_com_update_intr_delay_resolution()
1295 ena_dev->intr_moder_rx_interval * in ena_com_update_intr_delay_resolution()
1300 ena_dev->intr_moder_tx_interval = in ena_com_update_intr_delay_resolution()
1301 ena_dev->intr_moder_tx_interval * in ena_com_update_intr_delay_resolution()
1305 ena_dev->intr_delay_resolution = intr_delay_resolution; in ena_com_update_intr_delay_resolution()
1324 if (comp_ctx == ERR_PTR(-ENODEV)) in ena_com_execute_admin_command()
1336 if (admin_queue->running_state) in ena_com_execute_admin_command()
1347 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_create_io_cq()
1356 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) & in ena_com_create_io_cq()
1361 create_cmd.msix_vector = io_cq->msix_vector; in ena_com_create_io_cq()
1362 create_cmd.cq_depth = io_cq->q_depth; in ena_com_create_io_cq()
1366 io_cq->cdesc_addr.phys_addr); in ena_com_create_io_cq()
1382 io_cq->idx = cmd_completion.cq_idx; in ena_com_create_io_cq()
1384 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + in ena_com_create_io_cq()
1388 io_cq->cq_head_db_reg = in ena_com_create_io_cq()
1389 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + in ena_com_create_io_cq()
1393 io_cq->numa_node_cfg_reg = in ena_com_create_io_cq()
1394 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + in ena_com_create_io_cq()
1397 pr_debug("Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); in ena_com_create_io_cq()
1409 return -EINVAL; in ena_com_get_io_handlers()
1412 *io_sq = &ena_dev->io_sq_queues[qid]; in ena_com_get_io_handlers()
1413 *io_cq = &ena_dev->io_cq_queues[qid]; in ena_com_get_io_handlers()
1420 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_abort_admin_commands()
1424 if (!admin_queue->comp_ctx) in ena_com_abort_admin_commands()
1427 for (i = 0; i < admin_queue->q_depth; i++) { in ena_com_abort_admin_commands()
1432 comp_ctx->status = ENA_CMD_ABORTED; in ena_com_abort_admin_commands()
1434 complete(&comp_ctx->wait_event); in ena_com_abort_admin_commands()
1440 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_wait_for_abort_completion()
1444 spin_lock_irqsave(&admin_queue->q_lock, flags); in ena_com_wait_for_abort_completion()
1445 while (atomic_read(&admin_queue->outstanding_cmds) != 0) { in ena_com_wait_for_abort_completion()
1446 spin_unlock_irqrestore(&admin_queue->q_lock, flags); in ena_com_wait_for_abort_completion()
1448 ena_dev->ena_min_poll_delay_us); in ena_com_wait_for_abort_completion()
1449 spin_lock_irqsave(&admin_queue->q_lock, flags); in ena_com_wait_for_abort_completion()
1451 spin_unlock_irqrestore(&admin_queue->q_lock, flags); in ena_com_wait_for_abort_completion()
1457 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_destroy_io_cq()
1464 destroy_cmd.cq_idx = io_cq->idx; in ena_com_destroy_io_cq()
1473 if (unlikely(ret && (ret != -ENODEV))) in ena_com_destroy_io_cq()
1481 return ena_dev->admin_queue.running_state; in ena_com_get_admin_running_state()
1486 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_set_admin_running_state()
1489 spin_lock_irqsave(&admin_queue->q_lock, flags); in ena_com_set_admin_running_state()
1490 ena_dev->admin_queue.running_state = state; in ena_com_set_admin_running_state()
1491 spin_unlock_irqrestore(&admin_queue->q_lock, flags); in ena_com_set_admin_running_state()
1496 u16 depth = ena_dev->aenq.q_depth; in ena_com_admin_aenq_enable()
1498 WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n"); in ena_com_admin_aenq_enable()
1503 writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); in ena_com_admin_aenq_enable()
1523 return -EOPNOTSUPP; in ena_com_set_aenq_config()
1527 admin_queue = &ena_dev->admin_queue; in ena_com_set_aenq_config()
1553 return -ETIME; in ena_com_get_dma_width()
1563 return -EINVAL; in ena_com_get_dma_width()
1566 ena_dev->dma_addr_bits = width; in ena_com_get_dma_width()
1587 return -ETIME; in ena_com_validate_version()
1611 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n"); in ena_com_validate_version()
1612 return -1; in ena_com_validate_version()
1623 if (!admin_queue->comp_ctx) in ena_com_free_ena_admin_queue_comp_ctx()
1626 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx); in ena_com_free_ena_admin_queue_comp_ctx()
1628 admin_queue->comp_ctx = NULL; in ena_com_free_ena_admin_queue_comp_ctx()
1633 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_admin_destroy()
1634 struct ena_com_admin_cq *cq = &admin_queue->cq; in ena_com_admin_destroy()
1635 struct ena_com_admin_sq *sq = &admin_queue->sq; in ena_com_admin_destroy()
1636 struct ena_com_aenq *aenq = &ena_dev->aenq; in ena_com_admin_destroy()
1641 size = ADMIN_SQ_SIZE(admin_queue->q_depth); in ena_com_admin_destroy()
1642 if (sq->entries) in ena_com_admin_destroy()
1643 dma_free_coherent(ena_dev->dmadev, size, sq->entries, in ena_com_admin_destroy()
1644 sq->dma_addr); in ena_com_admin_destroy()
1645 sq->entries = NULL; in ena_com_admin_destroy()
1647 size = ADMIN_CQ_SIZE(admin_queue->q_depth); in ena_com_admin_destroy()
1648 if (cq->entries) in ena_com_admin_destroy()
1649 dma_free_coherent(ena_dev->dmadev, size, cq->entries, in ena_com_admin_destroy()
1650 cq->dma_addr); in ena_com_admin_destroy()
1651 cq->entries = NULL; in ena_com_admin_destroy()
1653 size = ADMIN_AENQ_SIZE(aenq->q_depth); in ena_com_admin_destroy()
1654 if (ena_dev->aenq.entries) in ena_com_admin_destroy()
1655 dma_free_coherent(ena_dev->dmadev, size, aenq->entries, in ena_com_admin_destroy()
1656 aenq->dma_addr); in ena_com_admin_destroy()
1657 aenq->entries = NULL; in ena_com_admin_destroy()
1667 writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF); in ena_com_set_admin_polling_mode()
1668 ena_dev->admin_queue.polling = polling; in ena_com_set_admin_polling_mode()
1674 ena_dev->admin_queue.auto_polling = polling; in ena_com_set_admin_auto_polling_mode()
1679 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; in ena_com_mmio_reg_read_request_init()
1681 spin_lock_init(&mmio_read->lock); in ena_com_mmio_reg_read_request_init()
1682 mmio_read->read_resp = in ena_com_mmio_reg_read_request_init()
1683 dma_alloc_coherent(ena_dev->dmadev, in ena_com_mmio_reg_read_request_init()
1684 sizeof(*mmio_read->read_resp), in ena_com_mmio_reg_read_request_init()
1685 &mmio_read->read_resp_dma_addr, GFP_KERNEL); in ena_com_mmio_reg_read_request_init()
1686 if (unlikely(!mmio_read->read_resp)) in ena_com_mmio_reg_read_request_init()
1691 mmio_read->read_resp->req_id = 0x0; in ena_com_mmio_reg_read_request_init()
1692 mmio_read->seq_num = 0x0; in ena_com_mmio_reg_read_request_init()
1693 mmio_read->readless_supported = true; in ena_com_mmio_reg_read_request_init()
1699 return -ENOMEM; in ena_com_mmio_reg_read_request_init()
1704 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; in ena_com_set_mmio_read_mode()
1706 mmio_read->readless_supported = readless_supported; in ena_com_set_mmio_read_mode()
1711 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; in ena_com_mmio_reg_read_request_destroy()
1713 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); in ena_com_mmio_reg_read_request_destroy()
1714 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); in ena_com_mmio_reg_read_request_destroy()
1716 dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), in ena_com_mmio_reg_read_request_destroy()
1717 mmio_read->read_resp, mmio_read->read_resp_dma_addr); in ena_com_mmio_reg_read_request_destroy()
1719 mmio_read->read_resp = NULL; in ena_com_mmio_reg_read_request_destroy()
1724 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; in ena_com_mmio_reg_read_request_write_dev_addr()
1727 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr); in ena_com_mmio_reg_read_request_write_dev_addr()
1728 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr); in ena_com_mmio_reg_read_request_write_dev_addr()
1730 writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); in ena_com_mmio_reg_read_request_write_dev_addr()
1731 writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); in ena_com_mmio_reg_read_request_write_dev_addr()
1737 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_admin_init()
1745 return -ETIME; in ena_com_admin_init()
1750 return -ENODEV; in ena_com_admin_init()
1753 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH; in ena_com_admin_init()
1755 admin_queue->q_dmadev = ena_dev->dmadev; in ena_com_admin_init()
1756 admin_queue->polling = false; in ena_com_admin_init()
1757 admin_queue->curr_cmd_id = 0; in ena_com_admin_init()
1759 atomic_set(&admin_queue->outstanding_cmds, 0); in ena_com_admin_init()
1761 spin_lock_init(&admin_queue->q_lock); in ena_com_admin_init()
1775 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + in ena_com_admin_init()
1778 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr); in ena_com_admin_init()
1779 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr); in ena_com_admin_init()
1781 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF); in ena_com_admin_init()
1782 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF); in ena_com_admin_init()
1784 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr); in ena_com_admin_init()
1785 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr); in ena_com_admin_init()
1787 writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF); in ena_com_admin_init()
1788 writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF); in ena_com_admin_init()
1791 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK; in ena_com_admin_init()
1797 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK; in ena_com_admin_init()
1802 writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF); in ena_com_admin_init()
1803 writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF); in ena_com_admin_init()
1808 admin_queue->ena_dev = ena_dev; in ena_com_admin_init()
1809 admin_queue->running_state = true; in ena_com_admin_init()
1825 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { in ena_com_create_io_queue()
1827 ctx->qid, ENA_TOTAL_NUM_QUEUES); in ena_com_create_io_queue()
1828 return -EINVAL; in ena_com_create_io_queue()
1831 io_sq = &ena_dev->io_sq_queues[ctx->qid]; in ena_com_create_io_queue()
1832 io_cq = &ena_dev->io_cq_queues[ctx->qid]; in ena_com_create_io_queue()
1838 io_cq->q_depth = ctx->queue_size; in ena_com_create_io_queue()
1839 io_cq->direction = ctx->direction; in ena_com_create_io_queue()
1840 io_cq->qid = ctx->qid; in ena_com_create_io_queue()
1842 io_cq->msix_vector = ctx->msix_vector; in ena_com_create_io_queue()
1844 io_sq->q_depth = ctx->queue_size; in ena_com_create_io_queue()
1845 io_sq->direction = ctx->direction; in ena_com_create_io_queue()
1846 io_sq->qid = ctx->qid; in ena_com_create_io_queue()
1848 io_sq->mem_queue_type = ctx->mem_queue_type; in ena_com_create_io_queue()
1850 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) in ena_com_create_io_queue()
1852 io_sq->tx_max_header_size = in ena_com_create_io_queue()
1853 min_t(u32, ena_dev->tx_max_header_size, SZ_256); in ena_com_create_io_queue()
1866 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx); in ena_com_create_io_queue()
1890 io_sq = &ena_dev->io_sq_queues[qid]; in ena_com_destroy_io_queue()
1891 io_cq = &ena_dev->io_cq_queues[qid]; in ena_com_destroy_io_queue()
1916 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr, in ena_com_get_dev_attr_feat()
1919 ena_dev->supported_features = get_resp.u.dev_attr.supported_features; in ena_com_get_dev_attr_feat()
1921 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { in ena_com_get_dev_attr_feat()
1929 return -EINVAL; in ena_com_get_dev_attr_feat()
1931 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext, in ena_com_get_dev_attr_feat()
1933 ena_dev->tx_max_header_size = in ena_com_get_dev_attr_feat()
1938 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue, in ena_com_get_dev_attr_feat()
1940 ena_dev->tx_max_header_size = in ena_com_get_dev_attr_feat()
1952 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq, in ena_com_get_dev_attr_feat()
1960 memcpy(&get_feat_ctx->offload, &get_resp.u.offload, in ena_com_get_dev_attr_feat()
1969 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, in ena_com_get_dev_attr_feat()
1971 else if (rc == -EOPNOTSUPP) in ena_com_get_dev_attr_feat()
1972 memset(&get_feat_ctx->hw_hints, 0x0, in ena_com_get_dev_attr_feat()
1973 sizeof(get_feat_ctx->hw_hints)); in ena_com_get_dev_attr_feat()
1979 memcpy(&get_feat_ctx->llq, &get_resp.u.llq, in ena_com_get_dev_attr_feat()
1981 else if (rc == -EOPNOTSUPP) in ena_com_get_dev_attr_feat()
1982 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq)); in ena_com_get_dev_attr_feat()
1991 ena_com_handle_admin_completion(&ena_dev->admin_queue); in ena_com_admin_q_comp_intr_handler()
2000 struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers; in ena_com_get_specific_aenq_cb()
2002 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group]) in ena_com_get_specific_aenq_cb()
2003 return aenq_handlers->handlers[group]; in ena_com_get_specific_aenq_cb()
2005 return aenq_handlers->unimplemented_handler; in ena_com_get_specific_aenq_cb()
2016 struct ena_com_aenq *aenq = &ena_dev->aenq; in ena_com_aenq_intr_handler()
2022 masked_head = aenq->head & (aenq->q_depth - 1); in ena_com_aenq_intr_handler()
2023 phase = aenq->phase; in ena_com_aenq_intr_handler()
2024 aenq_e = &aenq->entries[masked_head]; /* Get first entry */ in ena_com_aenq_intr_handler()
2025 aenq_common = &aenq_e->aenq_common_desc; in ena_com_aenq_intr_handler()
2028 while ((READ_ONCE(aenq_common->flags) & in ena_com_aenq_intr_handler()
2035 timestamp = (u64)aenq_common->timestamp_low | in ena_com_aenq_intr_handler()
2036 ((u64)aenq_common->timestamp_high << 32); in ena_com_aenq_intr_handler()
2039 aenq_common->group, aenq_common->syndrome, timestamp); in ena_com_aenq_intr_handler()
2043 aenq_common->group); in ena_com_aenq_intr_handler()
2050 if (unlikely(masked_head == aenq->q_depth)) { in ena_com_aenq_intr_handler()
2054 aenq_e = &aenq->entries[masked_head]; in ena_com_aenq_intr_handler()
2055 aenq_common = &aenq_e->aenq_common_desc; in ena_com_aenq_intr_handler()
2058 aenq->head += processed; in ena_com_aenq_intr_handler()
2059 aenq->phase = phase; in ena_com_aenq_intr_handler()
2067 writel_relaxed((u32)aenq->head, in ena_com_aenq_intr_handler()
2068 ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); in ena_com_aenq_intr_handler()
2083 return -ETIME; in ena_com_dev_reset()
2088 return -EINVAL; in ena_com_dev_reset()
2095 return -EINVAL; in ena_com_dev_reset()
2102 writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); in ena_com_dev_reset()
2115 writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); in ena_com_dev_reset()
2126 ena_dev->admin_queue.completion_timeout = timeout * 100000; in ena_com_dev_reset()
2128 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US; in ena_com_dev_reset()
2137 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd; in ena_get_dev_stats()
2138 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp; in ena_get_dev_stats()
2142 admin_queue = &ena_dev->admin_queue; in ena_get_dev_stats()
2144 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS; in ena_get_dev_stats()
2145 get_cmd->aq_common_descriptor.flags = 0; in ena_get_dev_stats()
2146 get_cmd->type = type; in ena_get_dev_stats()
2199 return -EOPNOTSUPP; in ena_com_set_dev_mtu()
2203 admin_queue = &ena_dev->admin_queue; in ena_com_set_dev_mtu()
2242 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_set_hash_function()
2243 struct ena_rss *rss = &ena_dev->rss; in ena_com_set_hash_function()
2253 return -EOPNOTSUPP; in ena_com_set_hash_function()
2262 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) { in ena_com_set_hash_function()
2264 rss->hash_func); in ena_com_set_hash_function()
2265 return -EOPNOTSUPP; in ena_com_set_hash_function()
2274 cmd.u.flow_hash_func.init_val = rss->hash_init_val; in ena_com_set_hash_function()
2275 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func; in ena_com_set_hash_function()
2279 rss->hash_key_dma_addr); in ena_com_set_hash_function()
2285 cmd.control_buffer.length = sizeof(*rss->hash_key); in ena_com_set_hash_function()
2294 rss->hash_func, ret); in ena_com_set_hash_function()
2295 return -EINVAL; in ena_com_set_hash_function()
2308 struct ena_rss *rss = &ena_dev->rss; in ena_com_fill_hash_function()
2311 hash_key = rss->hash_key; in ena_com_fill_hash_function()
2315 return -EINVAL; in ena_com_fill_hash_function()
2319 rss->hash_key_dma_addr, in ena_com_fill_hash_function()
2320 sizeof(*rss->hash_key), 0); in ena_com_fill_hash_function()
2326 return -EOPNOTSUPP; in ena_com_fill_hash_function()
2332 if (key_len != sizeof(hash_key->key)) { in ena_com_fill_hash_function()
2334 key_len, sizeof(hash_key->key)); in ena_com_fill_hash_function()
2335 return -EINVAL; in ena_com_fill_hash_function()
2337 memcpy(hash_key->key, key, key_len); in ena_com_fill_hash_function()
2338 rss->hash_init_val = init_val; in ena_com_fill_hash_function()
2339 hash_key->key_parts = key_len / sizeof(hash_key->key[0]); in ena_com_fill_hash_function()
2343 rss->hash_init_val = init_val; in ena_com_fill_hash_function()
2347 return -EINVAL; in ena_com_fill_hash_function()
2350 old_func = rss->hash_func; in ena_com_fill_hash_function()
2351 rss->hash_func = func; in ena_com_fill_hash_function()
2356 rss->hash_func = old_func; in ena_com_fill_hash_function()
2364 struct ena_rss *rss = &ena_dev->rss; in ena_com_get_hash_function()
2369 return -EINVAL; in ena_com_get_hash_function()
2373 rss->hash_key_dma_addr, in ena_com_get_hash_function()
2374 sizeof(*rss->hash_key), 0); in ena_com_get_hash_function()
2379 rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func); in ena_com_get_hash_function()
2380 if (rss->hash_func) in ena_com_get_hash_function()
2381 rss->hash_func--; in ena_com_get_hash_function()
2383 *func = rss->hash_func; in ena_com_get_hash_function()
2391 ena_dev->rss.hash_key; in ena_com_get_hash_key()
2394 memcpy(key, hash_key->key, in ena_com_get_hash_key()
2395 (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0])); in ena_com_get_hash_key()
2404 struct ena_rss *rss = &ena_dev->rss; in ena_com_get_hash_ctrl()
2410 rss->hash_ctrl_dma_addr, in ena_com_get_hash_ctrl()
2411 sizeof(*rss->hash_ctrl), 0); in ena_com_get_hash_ctrl()
2416 *fields = rss->hash_ctrl->selected_fields[proto].fields; in ena_com_get_hash_ctrl()
2423 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_set_hash_ctrl()
2424 struct ena_rss *rss = &ena_dev->rss; in ena_com_set_hash_ctrl()
2425 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; in ena_com_set_hash_ctrl()
2434 return -EOPNOTSUPP; in ena_com_set_hash_ctrl()
2449 rss->hash_ctrl_dma_addr); in ena_com_set_hash_ctrl()
2469 struct ena_rss *rss = &ena_dev->rss; in ena_com_set_default_hash_ctrl()
2471 rss->hash_ctrl; in ena_com_set_default_hash_ctrl()
2480 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields = in ena_com_set_default_hash_ctrl()
2484 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields = in ena_com_set_default_hash_ctrl()
2488 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields = in ena_com_set_default_hash_ctrl()
2492 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields = in ena_com_set_default_hash_ctrl()
2496 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields = in ena_com_set_default_hash_ctrl()
2499 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields = in ena_com_set_default_hash_ctrl()
2502 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = in ena_com_set_default_hash_ctrl()
2505 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields = in ena_com_set_default_hash_ctrl()
2509 available_fields = hash_ctrl->selected_fields[i].fields & in ena_com_set_default_hash_ctrl()
2510 hash_ctrl->supported_fields[i].fields; in ena_com_set_default_hash_ctrl()
2511 if (available_fields != hash_ctrl->selected_fields[i].fields) { in ena_com_set_default_hash_ctrl()
2513 i, hash_ctrl->supported_fields[i].fields, in ena_com_set_default_hash_ctrl()
2514 hash_ctrl->selected_fields[i].fields); in ena_com_set_default_hash_ctrl()
2515 return -EOPNOTSUPP; in ena_com_set_default_hash_ctrl()
2532 struct ena_rss *rss = &ena_dev->rss; in ena_com_fill_hash_ctrl()
2533 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; in ena_com_fill_hash_ctrl()
2539 return -EINVAL; in ena_com_fill_hash_ctrl()
2548 supported_fields = hash_ctrl->supported_fields[proto].fields; in ena_com_fill_hash_ctrl()
2550 pr_err("Proto %d doesn't support the required fields %x. supports only: %x\n", in ena_com_fill_hash_ctrl()
2554 hash_ctrl->selected_fields[proto].fields = hash_fields; in ena_com_fill_hash_ctrl()
2568 struct ena_rss *rss = &ena_dev->rss; in ena_com_indirect_table_fill_entry()
2570 if (unlikely(entry_idx >= (1 << rss->tbl_log_size))) in ena_com_indirect_table_fill_entry()
2571 return -EINVAL; in ena_com_indirect_table_fill_entry()
2574 return -EINVAL; in ena_com_indirect_table_fill_entry()
2576 rss->host_rss_ind_tbl[entry_idx] = entry_value; in ena_com_indirect_table_fill_entry()
2583 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_indirect_table_set()
2584 struct ena_rss *rss = &ena_dev->rss; in ena_com_indirect_table_set()
2593 return -EOPNOTSUPP; in ena_com_indirect_table_set()
2608 cmd.u.ind_table.size = rss->tbl_log_size; in ena_com_indirect_table_set()
2613 rss->rss_ind_tbl_dma_addr); in ena_com_indirect_table_set()
2619 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) * in ena_com_indirect_table_set()
2636 struct ena_rss *rss = &ena_dev->rss; in ena_com_indirect_table_get()
2641 tbl_size = (1ULL << rss->tbl_log_size) * in ena_com_indirect_table_get()
2646 rss->rss_ind_tbl_dma_addr, in ena_com_indirect_table_get()
2654 for (i = 0; i < (1 << rss->tbl_log_size); i++) in ena_com_indirect_table_get()
2655 ind_tbl[i] = rss->host_rss_ind_tbl[i]; in ena_com_indirect_table_get()
2664 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); in ena_com_rss_init()
2677 else if (rc != -EOPNOTSUPP) in ena_com_rss_init()
2701 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); in ena_com_rss_destroy()
2706 struct ena_host_attribute *host_attr = &ena_dev->host_attr; in ena_com_allocate_host_info()
2708 host_attr->host_info = in ena_com_allocate_host_info()
2709 dma_alloc_coherent(ena_dev->dmadev, SZ_4K, in ena_com_allocate_host_info()
2710 &host_attr->host_info_dma_addr, GFP_KERNEL); in ena_com_allocate_host_info()
2711 if (unlikely(!host_attr->host_info)) in ena_com_allocate_host_info()
2712 return -ENOMEM; in ena_com_allocate_host_info()
2714 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR << in ena_com_allocate_host_info()
2724 struct ena_host_attribute *host_attr = &ena_dev->host_attr; in ena_com_allocate_debug_area()
2726 host_attr->debug_area_virt_addr = in ena_com_allocate_debug_area()
2727 dma_alloc_coherent(ena_dev->dmadev, debug_area_size, in ena_com_allocate_debug_area()
2728 &host_attr->debug_area_dma_addr, GFP_KERNEL); in ena_com_allocate_debug_area()
2729 if (unlikely(!host_attr->debug_area_virt_addr)) { in ena_com_allocate_debug_area()
2730 host_attr->debug_area_size = 0; in ena_com_allocate_debug_area()
2731 return -ENOMEM; in ena_com_allocate_debug_area()
2734 host_attr->debug_area_size = debug_area_size; in ena_com_allocate_debug_area()
2741 struct ena_host_attribute *host_attr = &ena_dev->host_attr; in ena_com_delete_host_info()
2743 if (host_attr->host_info) { in ena_com_delete_host_info()
2744 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info, in ena_com_delete_host_info()
2745 host_attr->host_info_dma_addr); in ena_com_delete_host_info()
2746 host_attr->host_info = NULL; in ena_com_delete_host_info()
2752 struct ena_host_attribute *host_attr = &ena_dev->host_attr; in ena_com_delete_debug_area()
2754 if (host_attr->debug_area_virt_addr) { in ena_com_delete_debug_area()
2755 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size, in ena_com_delete_debug_area()
2756 host_attr->debug_area_virt_addr, in ena_com_delete_debug_area()
2757 host_attr->debug_area_dma_addr); in ena_com_delete_debug_area()
2758 host_attr->debug_area_virt_addr = NULL; in ena_com_delete_debug_area()
2764 struct ena_host_attribute *host_attr = &ena_dev->host_attr; in ena_com_set_host_attributes()
2776 admin_queue = &ena_dev->admin_queue; in ena_com_set_host_attributes()
2783 host_attr->debug_area_dma_addr); in ena_com_set_host_attributes()
2791 host_attr->host_info_dma_addr); in ena_com_set_host_attributes()
2797 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size; in ena_com_set_host_attributes()
2824 return -EFAULT; in ena_com_update_nonadaptive_moderation_interval()
2836 ena_dev->intr_delay_resolution, in ena_com_update_nonadaptive_moderation_interval_tx()
2837 &ena_dev->intr_moder_tx_interval); in ena_com_update_nonadaptive_moderation_interval_tx()
2844 ena_dev->intr_delay_resolution, in ena_com_update_nonadaptive_moderation_interval_rx()
2845 &ena_dev->intr_moder_rx_interval); in ena_com_update_nonadaptive_moderation_interval_rx()
2858 if (rc == -EOPNOTSUPP) { in ena_com_init_interrupt_moderation()
2876 /* Disable adaptive moderation by default - can be enabled later */ in ena_com_init_interrupt_moderation()
2884 return ena_dev->intr_moder_tx_interval; in ena_com_get_nonadaptive_moderation_interval_tx()
2889 return ena_dev->intr_moder_rx_interval; in ena_com_get_nonadaptive_moderation_interval_rx()
2896 struct ena_com_llq_info *llq_info = &ena_dev->llq_info; in ena_com_config_dev_mode()
2899 if (!llq_features->max_llq_num) { in ena_com_config_dev_mode()
2900 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; in ena_com_config_dev_mode()
2908 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size - in ena_com_config_dev_mode()
2909 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc)); in ena_com_config_dev_mode()
2911 if (unlikely(ena_dev->tx_max_header_size == 0)) { in ena_com_config_dev_mode()
2913 return -EINVAL; in ena_com_config_dev_mode()
2916 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; in ena_com_config_dev_mode()