Searched refs:qpages (Results 1 – 8 of 8) sorted by relevance
| /linux/drivers/net/ethernet/huawei/hinic3/ |
| H A D | hinic3_queue_common.c | 9 void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth, in hinic3_queue_pages_init() argument 16 qpages->pages = NULL; in hinic3_queue_pages_init() 17 qpages->page_size = page_size; in hinic3_queue_pages_init() 18 qpages->num_pages = max(q_depth / elem_per_page, 1); in hinic3_queue_pages_init() 19 qpages->elem_size_shift = ilog2(elem_size); in hinic3_queue_pages_init() 20 qpages->elem_per_pg_shift = ilog2(elem_per_page); in hinic3_queue_pages_init() 24 struct hinic3_queue_pages *qpages, u32 pg_cnt) in __queue_pages_free() argument 29 qpages->pages + pg_cnt); in __queue_pages_free() 31 kfree(qpages->pages); in __queue_pages_free() 32 qpages->pages = NULL; in __queue_pages_free() [all …]
|
| H A D | hinic3_wq.c | 16 struct hinic3_queue_pages *qpages = &wq->qpages; in wq_init_wq_block() local 20 wq->wq_block_paddr = qpages->pages[0].align_paddr; in wq_init_wq_block() 21 wq->wq_block_vaddr = qpages->pages[0].align_vaddr; in wq_init_wq_block() 26 if (wq->qpages.num_pages > WQ_MAX_NUM_PAGES) { in wq_init_wq_block() 39 for (i = 0; i < qpages->num_pages; i++) in wq_init_wq_block() 40 wq->wq_block_vaddr[i] = cpu_to_be64(qpages->pages[i].align_paddr); in wq_init_wq_block() 49 err = hinic3_queue_pages_alloc(hwdev, &wq->qpages, 0); in wq_alloc_pages() 55 hinic3_queue_pages_free(hwdev, &wq->qpages); in wq_alloc_pages() 70 hinic3_queue_pages_free(hwdev, &wq->qpages); in wq_free_pages() 91 hinic3_queue_pages_init(&wq->qpages, q_depth, wq_page_size, wqebb_size); in hinic3_wq_create() [all …]
|
| H A D | hinic3_queue_common.h | 24 void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth, 27 struct hinic3_queue_pages *qpages, u32 align); 29 struct hinic3_queue_pages *qpages); 36 static inline void *get_q_element(const struct hinic3_queue_pages *qpages, in get_q_element() argument 43 shift = qpages->elem_per_pg_shift; in get_q_element() 44 page_idx = (idx >> shift) & (qpages->num_pages - 1); in get_q_element() 49 ofs = elem_idx << qpages->elem_size_shift; in get_q_element() 50 page = qpages->pages + page_idx; in get_q_element()
|
| H A D | hinic3_eqs.c | 98 return get_q_element(&eq->qpages, eq->cons_idx, NULL); in get_curr_aeq_elem() 103 return get_q_element(&eq->qpages, eq->cons_idx, NULL); in get_curr_ceq_elem() 394 struct hinic3_queue_pages *qpages; in set_eq_ctrls() local 400 qpages = &eq->qpages; in set_eq_ctrls() 401 page_size_val = ilog2(qpages->page_size / HINIC3_MIN_PAGE_SIZE); in set_eq_ctrls() 419 elem_size = qpages->elem_size_shift - 5; in set_eq_ctrls() 450 ceqe = get_q_element(&eq->qpages, i, NULL); in ceq_elements_init() 463 aeqe = get_q_element(&eq->qpages, i, NULL); in aeq_elements_init() 481 struct hinic3_queue_pages *qpages; in alloc_eq_pages() local 487 qpages = &eq->qpages; in alloc_eq_pages() [all …]
|
| H A D | hinic3_wq.h | 24 struct hinic3_queue_pages qpages; member 63 return get_q_element(&wq->qpages, *pi, NULL); in hinic3_wq_get_one_wqebb() 73 return wq->qpages.pages[0].align_paddr; in hinic3_wq_get_first_wqe_page_addr()
|
| H A D | hinic3_rx.c | 120 struct hinic3_queue_pages *qpages; in rq_associate_cqes() local 125 qpages = &rxq->rq->wq.qpages; in rq_associate_cqes() 128 rq_wqe = get_q_element(qpages, i, NULL); in rq_associate_cqes() 141 rq_wqe = get_q_element(&rq->wq.qpages, wqe_idx, NULL); in rq_wqe_buf_set()
|
| H A D | hinic3_eqs.h | 49 struct hinic3_queue_pages qpages; member
|
| H A D | hinic3_cmdq.c | 116 return get_q_element(&wq->qpages, wq->cons_idx, NULL); in cmdq_read_wqe() 687 if (cmdqs->cmdq[HINIC3_CMDQ_SYNC].wq.qpages.num_pages > in create_cmdq_wq() 708 cmdqs->cmdq[cmdq_type].wq.qpages.num_pages * in create_cmdq_wq()
|