Lines Matching defs:srq
35 #include <linux/mlx4/srq.h>
42 static void *get_wqe(struct mlx4_ib_srq *srq, int n)
44 return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
47 static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
50 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
54 event.element.srq = ibsrq;
64 "on SRQ %06x\n", type, srq->srqn);
79 struct mlx4_ib_srq *srq = to_msrq(ib_srq);
98 mutex_init(&srq->mutex);
99 spin_lock_init(&srq->lock);
100 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
101 srq->msrq.max_gs = init_attr->attr.max_sge;
105 srq->msrq.max_gs *
107 srq->msrq.wqe_shift = ilog2(desc_size);
109 buf_size = srq->msrq.max * desc_size;
117 srq->umem =
119 if (IS_ERR(srq->umem))
120 return PTR_ERR(srq->umem);
123 dev->dev, ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE),
124 PAGE_SHIFT, &srq->mtt);
128 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem);
132 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &srq->db);
136 err = mlx4_db_alloc(dev->dev, &srq->db, 0);
140 *srq->db.db = 0;
143 &srq->buf)) {
148 srq->head = 0;
149 srq->tail = srq->msrq.max - 1;
150 srq->wqe_ctr = 0;
152 for (i = 0; i < srq->msrq.max; ++i) {
153 next = get_wqe(srq, i);
155 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
163 err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift,
164 &srq->mtt);
168 err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
172 srq->wrid = kvmalloc_array(srq->msrq.max,
174 if (!srq->wrid) {
186 &srq->mtt, srq->db.dma, &srq->msrq);
190 srq->msrq.event = mlx4_ib_srq_event;
191 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
194 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) {
199 init_attr->attr.max_wr = srq->msrq.max - 1;
205 mlx4_ib_db_unmap_user(ucontext, &srq->db);
207 kvfree(srq->wrid);
210 mlx4_mtt_cleanup(dev->dev, &srq->mtt);
213 if (!srq->umem)
214 mlx4_buf_free(dev->dev, buf_size, &srq->buf);
215 ib_umem_release(srq->umem);
219 mlx4_db_free(dev->dev, &srq->db);
228 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
236 if (attr->srq_limit >= srq->msrq.max)
239 mutex_lock(&srq->mutex);
240 ret = mlx4_srq_arm(dev->dev, &srq->msrq, attr->srq_limit);
241 mutex_unlock(&srq->mutex);
253 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
257 ret = mlx4_srq_query(dev->dev, &srq->msrq, &limit_watermark);
262 srq_attr->max_wr = srq->msrq.max - 1;
263 srq_attr->max_sge = srq->msrq.max_gs;
268 int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
270 struct mlx4_ib_dev *dev = to_mdev(srq->device);
271 struct mlx4_ib_srq *msrq = to_msrq(srq);
293 void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index)
298 spin_lock(&srq->lock);
300 next = get_wqe(srq, srq->tail);
302 srq->tail = wqe_index;
304 spin_unlock(&srq->lock);
310 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
319 spin_lock_irqsave(&srq->lock, flags);
327 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
333 if (unlikely(srq->head == srq->tail)) {
339 srq->wrid[srq->head] = wr->wr_id;
341 next = get_wqe(srq, srq->head);
342 srq->head = be16_to_cpu(next->next_wqe_index);
351 if (i < srq->msrq.max_gs) {
359 srq->wqe_ctr += nreq;
367 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
371 spin_unlock_irqrestore(&srq->lock, flags);