Lines Matching +full:t +full:- +full:head
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
11 /* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
15 #define ENA_LLQ_HEADER (128UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
16 #define ENA_LLQ_LARGE_HEADER (256UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
21 /* For LLQ, header buffer - pushed to the device mem space */
38 u8 df; /* Don't fragment */
73 writel(intr_reg->intr_control, io_cq->unmask_reg); in ena_com_unmask_intr()
80 next_to_comp = io_sq->next_to_comp; in ena_com_free_q_entries()
81 tail = io_sq->tail; in ena_com_free_q_entries()
82 cnt = tail - next_to_comp; in ena_com_free_q_entries()
84 return io_sq->q_depth - 1 - cnt; in ena_com_free_q_entries()
93 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) in ena_com_sq_have_enough_space()
96 /* This calculation doesn't need to be 100% accurate. So to reduce in ena_com_sq_have_enough_space()
101 temp = required_buffers / io_sq->llq_info.descs_per_entry + 2; in ena_com_sq_have_enough_space()
109 if (!ena_tx_ctx->meta_valid) in ena_com_meta_desc_changed()
112 return !!memcmp(&io_sq->cached_tx_meta, in ena_com_meta_desc_changed()
113 &ena_tx_ctx->ena_meta, in ena_com_meta_desc_changed()
119 return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) && in is_llq_max_tx_burst_exists()
120 io_sq->llq_info.max_entries_in_tx_burst > 0; in is_llq_max_tx_burst_exists()
134 llq_info = &io_sq->llq_info; in ena_com_is_doorbell_needed()
135 num_descs = ena_tx_ctx->num_bufs; in ena_com_is_doorbell_needed()
137 if (llq_info->disable_meta_caching || in ena_com_is_doorbell_needed()
141 if (num_descs > llq_info->descs_num_before_header) { in ena_com_is_doorbell_needed()
142 descs_after_first_entry = num_descs - llq_info->descs_num_before_header; in ena_com_is_doorbell_needed()
144 llq_info->descs_per_entry); in ena_com_is_doorbell_needed()
147 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_is_doorbell_needed()
149 io_sq->qid, num_descs, num_entries_needed); in ena_com_is_doorbell_needed()
151 return num_entries_needed > io_sq->entries_in_tx_burst_left; in ena_com_is_doorbell_needed()
156 u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst; in ena_com_write_sq_doorbell()
157 u16 tail = io_sq->tail; in ena_com_write_sq_doorbell()
159 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_write_sq_doorbell()
161 io_sq->qid, tail); in ena_com_write_sq_doorbell()
163 writel(tail, io_sq->db_addr); in ena_com_write_sq_doorbell()
166 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_write_sq_doorbell()
168 io_sq->qid, max_entries_in_tx_burst); in ena_com_write_sq_doorbell()
169 io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst; in ena_com_write_sq_doorbell()
177 u16 unreported_comp, head; in ena_com_update_dev_comp_head() local
180 if (unlikely(io_cq->cq_head_db_reg)) { in ena_com_update_dev_comp_head()
181 head = io_cq->head; in ena_com_update_dev_comp_head()
182 unreported_comp = head - io_cq->last_head_update; in ena_com_update_dev_comp_head()
183 need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); in ena_com_update_dev_comp_head()
186 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, in ena_com_update_dev_comp_head()
187 "Write completion queue doorbell for queue %d: head: %d\n", in ena_com_update_dev_comp_head()
188 io_cq->qid, head); in ena_com_update_dev_comp_head()
189 writel(head, io_cq->cq_head_db_reg); in ena_com_update_dev_comp_head()
190 io_cq->last_head_update = head; in ena_com_update_dev_comp_head()
202 if (!io_cq->numa_node_cfg_reg) in ena_com_update_numa_node()
208 writel(numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg); in ena_com_update_numa_node()
213 io_sq->next_to_comp += elem; in ena_com_comp_ack()
218 io_cq->head++; in ena_com_cq_inc_head()
221 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) in ena_com_cq_inc_head()
222 io_cq->phase ^= 1; in ena_com_cq_inc_head()
232 masked_head = io_cq->head & (io_cq->q_depth - 1); in ena_com_tx_comp_req_id_get()
233 expected_phase = io_cq->phase; in ena_com_tx_comp_req_id_get()
236 ((uintptr_t)io_cq->cdesc_addr.virt_addr + in ena_com_tx_comp_req_id_get()
237 (masked_head * io_cq->cdesc_entry_size_in_bytes)); in ena_com_tx_comp_req_id_get()
239 /* When the current completion descriptor phase isn't the same as the in ena_com_tx_comp_req_id_get()
240 * expected, it mean that the device still didn't update in ena_com_tx_comp_req_id_get()
243 cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK; in ena_com_tx_comp_req_id_get()
245 return -EAGAIN; in ena_com_tx_comp_req_id_get()
249 *req_id = READ_ONCE(cdesc->req_id); in ena_com_tx_comp_req_id_get()
250 if (unlikely(*req_id >= io_cq->q_depth)) { in ena_com_tx_comp_req_id_get()
251 netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, in ena_com_tx_comp_req_id_get()
252 "Invalid req id %d\n", cdesc->req_id); in ena_com_tx_comp_req_id_get()
253 return -EINVAL; in ena_com_tx_comp_req_id_get()