1 /*-
2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #ifndef __MLX5_WQ_H__
27 #define __MLX5_WQ_H__
28
29 #include <dev/mlx5/mlx5_ifc.h>
30 #include <dev/mlx5/cq.h>
31
32 struct mlx5_wq_param {
33 int linear;
34 };
35
36 struct mlx5_wq_ctrl {
37 struct mlx5_core_dev *mdev;
38 struct mlx5_buf buf;
39 struct mlx5_db db;
40 };
41
42 struct mlx5_frag_wq_ctrl {
43 struct mlx5_core_dev *mdev;
44 struct mlx5_frag_buf frag_buf;
45 struct mlx5_db db;
46 };
47
48 struct mlx5_wq_cyc {
49 void *buf;
50 __be32 *db;
51 u16 sz_m1;
52 u8 log_stride;
53 };
54
55 struct mlx5_wq_qp {
56 struct mlx5_wq_cyc rq;
57 struct mlx5_wq_cyc sq;
58 };
59
60 struct mlx5_cqwq {
61 void *buf;
62 __be32 *db;
63 u32 sz_m1;
64 u32 cc; /* consumer counter */
65 u8 log_sz;
66 u8 log_stride;
67 };
68
69 struct mlx5_wq_ll {
70 void *buf;
71 __be32 *db;
72 __be16 *tail_next;
73 u16 sz_m1;
74 u16 head;
75 u16 wqe_ctr;
76 u16 cur_sz;
77 u8 log_stride;
78 };
79
80 int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
81 void *wqc, struct mlx5_wq_cyc *wq,
82 struct mlx5_wq_ctrl *wq_ctrl);
83 u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
84
85 int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
86 void *cqc, struct mlx5_cqwq *wq,
87 struct mlx5_wq_ctrl *wq_ctrl);
88 u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
89
90 int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
91 void *wqc, struct mlx5_wq_ll *wq,
92 struct mlx5_wq_ctrl *wq_ctrl);
93 u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
94
95 void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
96
mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc * wq,u16 ctr)97 static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
98 {
99 return ctr & wq->sz_m1;
100 }
101
mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc * wq,u16 ix)102 static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
103 {
104 return wq->buf + (ix << wq->log_stride);
105 }
106
mlx5_wq_cyc_cc_bigger(u16 cc1,u16 cc2)107 static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
108 {
109 int equal = (cc1 == cc2);
110 int smaller = 0x8000 & (cc1 - cc2);
111
112 return !equal && !smaller;
113 }
114
mlx5_cqwq_get_ci(struct mlx5_cqwq * wq)115 static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
116 {
117 return wq->cc & wq->sz_m1;
118 }
119
mlx5_cqwq_get_wqe(struct mlx5_cqwq * wq,u32 ix)120 static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
121 {
122 return wq->buf + (ix << wq->log_stride);
123 }
124
mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq * wq)125 static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
126 {
127 return wq->cc >> wq->log_sz;
128 }
129
mlx5_cqwq_pop(struct mlx5_cqwq * wq)130 static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)
131 {
132 wq->cc++;
133 }
134
mlx5_cqwq_update_db_record(struct mlx5_cqwq * wq)135 static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq)
136 {
137 *wq->db = cpu_to_be32(wq->cc & 0xffffff);
138 }
139
mlx5_cqwq_get_cqe(struct mlx5_cqwq * wq)140 static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq)
141 {
142 u32 ci = mlx5_cqwq_get_ci(wq);
143 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
144 u8 cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
145 u8 sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
146
147 if (cqe_ownership_bit != sw_ownership_val)
148 return NULL;
149
150 /* ensure cqe content is read after cqe ownership bit */
151 atomic_thread_fence_acq();
152
153 return cqe;
154 }
155
mlx5_wq_ll_is_full(struct mlx5_wq_ll * wq)156 static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
157 {
158 return wq->cur_sz == wq->sz_m1;
159 }
160
mlx5_wq_ll_is_empty(struct mlx5_wq_ll * wq)161 static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
162 {
163 return !wq->cur_sz;
164 }
165
mlx5_wq_ll_push(struct mlx5_wq_ll * wq,u16 head_next)166 static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next)
167 {
168 wq->head = head_next;
169 wq->wqe_ctr++;
170 wq->cur_sz++;
171 }
172
mlx5_wq_ll_pop(struct mlx5_wq_ll * wq,__be16 ix,__be16 * next_tail_next)173 static inline void mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix,
174 __be16 *next_tail_next)
175 {
176 *wq->tail_next = ix;
177 wq->tail_next = next_tail_next;
178 wq->cur_sz--;
179 }
mlx5_wq_ll_update_db_record(struct mlx5_wq_ll * wq)180 static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq)
181 {
182 *wq->db = cpu_to_be32(wq->wqe_ctr);
183 }
184
mlx5_wq_ll_get_wqe(struct mlx5_wq_ll * wq,u16 ix)185 static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
186 {
187 return wq->buf + (ix << wq->log_stride);
188 }
189
190 #endif /* __MLX5_WQ_H__ */
191