Lines Matching +full:32 +full:- +full:63

1 /* SPDX-License-Identifier: GPL-2.0-or-later */
11 * Jan-Bernd Themann <themann@de.ibm.com>
32 #define EHEA_HUGEPAGE_PFN_MASK ((EHEA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
40 * WQE - Work Queue Entry
41 * SWQE - Send Work Queue Entry
42 * RWQE - Receive Work Queue Entry
43 * CQE - Completion Queue Entry
44 * EQE - Event Queue Entry
45 * MR - Memory Region
56 #define EHEA_WR_ID_REFILL EHEA_BMASK_IBM(48, 63)
66 #define SWQE2_MAX_IMM (0xD0 - 0x30)
84 #define SWQE_HEADER_SIZE 32
115 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
172 #define EHEA_EQE_QP_TOKEN EHEA_BMASK_IBM(32, 63)
173 #define EHEA_EQE_CQ_TOKEN EHEA_BMASK_IBM(32, 63)
174 #define EHEA_EQE_KEY EHEA_BMASK_IBM(32, 63)
175 #define EHEA_EQE_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
176 #define EHEA_EQE_EQ_NUMBER EHEA_BMASK_IBM(48, 63)
177 #define EHEA_EQE_SM_ID EHEA_BMASK_IBM(48, 63)
179 #define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
193 #define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52, 63)
200 if (q_offset >= queue->queue_length) in hw_qeit_calc()
201 q_offset -= queue->queue_length; in hw_qeit_calc()
202 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT]; in hw_qeit_calc()
203 return &current_page->entries[q_offset & (EHEA_PAGESIZE - 1)]; in hw_qeit_calc()
208 return hw_qeit_calc(queue, queue->current_q_offset); in hw_qeit_get()
213 queue->current_q_offset += queue->qe_size; in hw_qeit_inc()
214 if (queue->current_q_offset >= queue->queue_length) { in hw_qeit_inc()
215 queue->current_q_offset = 0; in hw_qeit_inc()
217 queue->toggle_state = (~queue->toggle_state) & 1; in hw_qeit_inc()
231 u8 valid = retvalue->valid; in hw_qeit_get_inc_valid()
234 if ((valid >> 7) == (queue->toggle_state & 1)) { in hw_qeit_get_inc_valid()
237 pref = hw_qeit_calc(queue, queue->current_q_offset); in hw_qeit_get_inc_valid()
251 pref = hw_qeit_calc(queue, queue->current_q_offset); in hw_qeit_get_valid()
255 valid = retvalue->valid; in hw_qeit_get_valid()
256 if (!((valid >> 7) == (queue->toggle_state & 1))) in hw_qeit_get_valid()
263 queue->current_q_offset = 0; in hw_qeit_reset()
269 u64 last_entry_in_q = queue->queue_length - queue->qe_size; in hw_qeit_eq_get_inc()
273 queue->current_q_offset += queue->qe_size; in hw_qeit_eq_get_inc()
274 if (queue->current_q_offset > last_entry_in_q) { in hw_qeit_eq_get_inc()
275 queue->current_q_offset = 0; in hw_qeit_eq_get_inc()
276 queue->toggle_state = (~queue->toggle_state) & 1; in hw_qeit_eq_get_inc()
285 if ((qe >> 7) == (queue->toggle_state & 1)) in hw_eqit_eq_get_inc_valid()
298 queue = &qp->hw_rqueue1; in ehea_get_next_rwqe()
300 queue = &qp->hw_rqueue2; in ehea_get_next_rwqe()
302 queue = &qp->hw_rqueue3; in ehea_get_next_rwqe()
310 struct hw_queue *queue = &my_qp->hw_squeue; in ehea_get_swqe()
313 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ); in ehea_get_swqe()
314 wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue); in ehea_get_swqe()
327 struct hw_queue *queue = &qp->hw_rqueue1; in ehea_poll_rq1()
329 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1); in ehea_poll_rq1()
335 hw_qeit_inc(&cq->hw_queue); in ehea_inc_cq()
340 hw_qeit_inc(&qp->hw_rqueue1); in ehea_inc_rq1()
345 return hw_qeit_get_valid(&my_cq->hw_queue); in ehea_poll_cq()