1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33 #ifndef __T4_H__
34 #define __T4_H__
35
36 #include "common/t4_regs_values.h"
37 #include "common/t4_regs.h"
38 /*
39 * Fixme: Adding missing defines
40 */
41 #define SGE_PF_KDOORBELL 0x0
42 #define QID_MASK 0xffff8000U
43 #define QID_SHIFT 15
44 #define QID(x) ((x) << QID_SHIFT)
45 #define DBPRIO 0x00004000U
46 #define PIDX_MASK 0x00003fffU
47 #define PIDX_SHIFT 0
48 #define PIDX(x) ((x) << PIDX_SHIFT)
49
50 #define SGE_PF_GTS 0x4
51 #define INGRESSQID_MASK 0xffff0000U
52 #define INGRESSQID_SHIFT 16
53 #define INGRESSQID(x) ((x) << INGRESSQID_SHIFT)
54 #define TIMERREG_MASK 0x0000e000U
55 #define TIMERREG_SHIFT 13
56 #define TIMERREG(x) ((x) << TIMERREG_SHIFT)
57 #define SEINTARM_MASK 0x00001000U
58 #define SEINTARM_SHIFT 12
59 #define SEINTARM(x) ((x) << SEINTARM_SHIFT)
60 #define CIDXINC_MASK 0x00000fffU
61 #define CIDXINC_SHIFT 0
62 #define CIDXINC(x) ((x) << CIDXINC_SHIFT)
63
64 #define T4_MAX_NUM_PD 65536
65 #define T4_MAX_MR_SIZE (~0ULL)
66 #define T4_PAGESIZE_MASK 0xffffffff000 /* 4KB-8TB */
67 #define T4_FW_MAJ 0
68 #define A_PCIE_MA_SYNC 0x30b4
69
70 struct t4_status_page {
71 __be32 rsvd1; /* flit 0 - hw owns */
72 __be16 rsvd2;
73 __be16 qid;
74 __be16 cidx;
75 __be16 pidx;
76 u8 qp_err; /* flit 1 - sw owns */
77 u8 db_off;
78 u8 pad;
79 u16 host_wq_pidx;
80 u16 host_cidx;
81 u16 host_pidx;
82 };
83
84 #define T4_EQ_ENTRY_SIZE 64
85
86 #define T4_SQ_NUM_SLOTS 5
87 #define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS)
88 #define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
89 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
90 #define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
91 sizeof(struct fw_ri_immd)))
92 #define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - \
93 sizeof(struct fw_ri_rdma_write_wr) - \
94 sizeof(struct fw_ri_immd)))
95 #define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - \
96 sizeof(struct fw_ri_rdma_write_wr) - \
97 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
98 #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
99 sizeof(struct fw_ri_immd)) & ~31UL)
100 #define T4_MAX_FR_IMMD_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
101 #define T4_MAX_FR_DSGL 1024
102 #define T4_MAX_FR_DSGL_DEPTH (T4_MAX_FR_DSGL / sizeof(u64))
103 #define T4_MAX_FR_FW_DSGL 4096
104 #define T4_MAX_FR_FW_DSGL_DEPTH (T4_MAX_FR_FW_DSGL / sizeof(u64))
105
106 #define T4_RQ_NUM_SLOTS 2
107 #define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
108 #define T4_MAX_RECV_SGE 4
109
110 union t4_wr {
111 struct fw_ri_res_wr res;
112 struct fw_ri_wr ri;
113 struct fw_ri_rdma_write_wr write;
114 struct fw_ri_send_wr send;
115 struct fw_ri_rdma_read_wr read;
116 struct fw_ri_bind_mw_wr bind;
117 struct fw_ri_fr_nsmr_wr fr;
118 struct fw_ri_fr_nsmr_tpte_wr fr_tpte;
119 struct fw_ri_inv_lstag_wr inv;
120 struct t4_status_page status;
121 __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
122 };
123
124 union t4_recv_wr {
125 struct fw_ri_recv_wr recv;
126 struct t4_status_page status;
127 __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
128 };
129
init_wr_hdr(union t4_wr * wqe,u16 wrid,enum fw_wr_opcodes opcode,u8 flags,u8 len16)130 static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
131 enum fw_wr_opcodes opcode, u8 flags, u8 len16)
132 {
133 wqe->send.opcode = (u8)opcode;
134 wqe->send.flags = flags;
135 wqe->send.wrid = wrid;
136 wqe->send.r1[0] = 0;
137 wqe->send.r1[1] = 0;
138 wqe->send.r1[2] = 0;
139 wqe->send.len16 = len16;
140 }
141
142 /* CQE/AE status codes */
143 #define T4_ERR_SUCCESS 0x0
144 #define T4_ERR_STAG 0x1 /* STAG invalid: either the */
145 /* STAG is offlimt, being 0, */
146 /* or STAG_key mismatch */
147 #define T4_ERR_PDID 0x2 /* PDID mismatch */
148 #define T4_ERR_QPID 0x3 /* QPID mismatch */
149 #define T4_ERR_ACCESS 0x4 /* Invalid access right */
150 #define T4_ERR_WRAP 0x5 /* Wrap error */
151 #define T4_ERR_BOUND 0x6 /* base and bounds voilation */
152 #define T4_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
153 /* shared memory region */
154 #define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
155 /* shared memory region */
156 #define T4_ERR_ECC 0x9 /* ECC error detected */
157 #define T4_ERR_ECC_PSTAG 0xA /* ECC error detected when */
158 /* reading PSTAG for a MW */
159 /* Invalidate */
160 #define T4_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
161 /* software error */
162 #define T4_ERR_SWFLUSH 0xC /* SW FLUSHED */
163 #define T4_ERR_CRC 0x10 /* CRC error */
164 #define T4_ERR_MARKER 0x11 /* Marker error */
165 #define T4_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
166 #define T4_ERR_OUT_OF_RQE 0x13 /* out of RQE */
167 #define T4_ERR_DDP_VERSION 0x14 /* wrong DDP version */
168 #define T4_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
169 #define T4_ERR_OPCODE 0x16 /* invalid rdma opcode */
170 #define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
171 #define T4_ERR_MSN 0x18 /* MSN error */
172 #define T4_ERR_TBIT 0x19 /* tag bit not set correctly */
173 #define T4_ERR_MO 0x1A /* MO not 0 for TERMINATE */
174 /* or READ_REQ */
175 #define T4_ERR_MSN_GAP 0x1B
176 #define T4_ERR_MSN_RANGE 0x1C
177 #define T4_ERR_IRD_OVERFLOW 0x1D
178 #define T4_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
179 /* software error */
180 #define T4_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
181 /* mismatch) */
182 /*
183 * CQE defs
184 */
185 struct t4_cqe {
186 __be32 header;
187 __be32 len;
188 union {
189 struct {
190 __be32 stag;
191 __be32 msn;
192 } rcqe;
193 struct {
194 u32 stag;
195 u16 nada2;
196 u16 cidx;
197 } scqe;
198 struct {
199 __be32 wrid_hi;
200 __be32 wrid_low;
201 } gen;
202 u64 drain_cookie;
203 } u;
204 __be64 reserved;
205 __be64 bits_type_ts;
206 };
207
208 /* macros for flit 0 of the cqe */
209
210 #define S_CQE_QPID 12
211 #define M_CQE_QPID 0xFFFFF
212 #define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
213 #define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
214
215 #define S_CQE_SWCQE 11
216 #define M_CQE_SWCQE 0x1
217 #define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
218 #define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
219
220 #define S_CQE_STATUS 5
221 #define M_CQE_STATUS 0x1F
222 #define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
223 #define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
224
225 #define S_CQE_TYPE 4
226 #define M_CQE_TYPE 0x1
227 #define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
228 #define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
229
230 #define S_CQE_OPCODE 0
231 #define M_CQE_OPCODE 0xF
232 #define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
233 #define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
234
235 #define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header)))
236 #define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header)))
237 #define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header)))
238 #define SQ_TYPE(x) (CQE_TYPE((x)))
239 #define RQ_TYPE(x) (!CQE_TYPE((x)))
240 #define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header)))
241 #define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header)))
242
243 #define CQE_SEND_OPCODE(x)(\
244 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
245 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
246 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
247 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
248
249 #define CQE_LEN(x) (be32_to_cpu((x)->len))
250
251 /* used for RQ completion processing */
252 #define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag))
253 #define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn))
254
255 /* used for SQ completion processing */
256 #define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
257 #define CQE_WRID_FR_STAG(x) (be32_to_cpu((x)->u.scqe.stag))
258
259 /* generic accessor macros */
260 #define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi)
261 #define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low)
262 #define CQE_DRAIN_COOKIE(x) (x)->u.drain_cookie;
263
264 /* macros for flit 3 of the cqe */
265 #define S_CQE_GENBIT 63
266 #define M_CQE_GENBIT 0x1
267 #define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
268 #define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
269
270 #define S_CQE_OVFBIT 62
271 #define M_CQE_OVFBIT 0x1
272 #define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT)
273
274 #define S_CQE_IQTYPE 60
275 #define M_CQE_IQTYPE 0x3
276 #define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE)
277
278 #define M_CQE_TS 0x0fffffffffffffffULL
279 #define G_CQE_TS(x) ((x) & M_CQE_TS)
280
281 #define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts)))
282 #define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts)))
283 #define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts)))
284
285 struct t4_swsqe {
286 u64 wr_id;
287 struct t4_cqe cqe;
288 int read_len;
289 int opcode;
290 int complete;
291 int signaled;
292 u16 idx;
293 int flushed;
294 struct timespec host_ts;
295 u64 sge_ts;
296 };
297
t4_pgprot_wc(pgprot_t prot)298 static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
299 {
300 #if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
301 return pgprot_writecombine(prot);
302 #else
303 return pgprot_noncached(prot);
304 #endif
305 }
306
307 enum {
308 T4_SQ_ONCHIP = (1<<0),
309 };
310
311 struct t4_sq {
312 union t4_wr *queue;
313 bus_addr_t dma_addr;
314 DEFINE_DMA_UNMAP_ADDR(mapping);
315 unsigned long phys_addr;
316 struct t4_swsqe *sw_sq;
317 struct t4_swsqe *oldest_read;
318 void __iomem *bar2_va;
319 u64 bar2_pa;
320 size_t memsize;
321 u32 bar2_qid;
322 u32 qid;
323 u16 in_use;
324 u16 size;
325 u16 cidx;
326 u16 pidx;
327 u16 wq_pidx;
328 u16 wq_pidx_inc;
329 u16 flags;
330 short flush_cidx;
331 };
332
333 struct t4_swrqe {
334 u64 wr_id;
335 };
336
337 struct t4_rq {
338 union t4_recv_wr *queue;
339 bus_addr_t dma_addr;
340 DEFINE_DMA_UNMAP_ADDR(mapping);
341 unsigned long phys_addr;
342 struct t4_swrqe *sw_rq;
343 void __iomem *bar2_va;
344 u64 bar2_pa;
345 size_t memsize;
346 u32 bar2_qid;
347 u32 qid;
348 u32 msn;
349 u32 rqt_hwaddr;
350 u16 rqt_size;
351 u16 in_use;
352 u16 size;
353 u16 cidx;
354 u16 pidx;
355 u16 wq_pidx;
356 u16 wq_pidx_inc;
357 };
358
359 struct t4_wq {
360 struct t4_sq sq;
361 struct t4_rq rq;
362 struct c4iw_rdev *rdev;
363 int flushed;
364 };
365
t4_rqes_posted(struct t4_wq * wq)366 static inline int t4_rqes_posted(struct t4_wq *wq)
367 {
368 return wq->rq.in_use;
369 }
370
t4_rq_empty(struct t4_wq * wq)371 static inline int t4_rq_empty(struct t4_wq *wq)
372 {
373 return wq->rq.in_use == 0;
374 }
375
t4_rq_full(struct t4_wq * wq)376 static inline int t4_rq_full(struct t4_wq *wq)
377 {
378 return wq->rq.in_use == (wq->rq.size - 1);
379 }
380
t4_rq_avail(struct t4_wq * wq)381 static inline u32 t4_rq_avail(struct t4_wq *wq)
382 {
383 return wq->rq.size - 1 - wq->rq.in_use;
384 }
385
t4_rq_produce(struct t4_wq * wq,u8 len16)386 static inline void t4_rq_produce(struct t4_wq *wq, u8 len16)
387 {
388 wq->rq.in_use++;
389 if (++wq->rq.pidx == wq->rq.size)
390 wq->rq.pidx = 0;
391 wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
392 if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS)
393 wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS;
394 }
395
t4_rq_consume(struct t4_wq * wq)396 static inline void t4_rq_consume(struct t4_wq *wq)
397 {
398 wq->rq.in_use--;
399 wq->rq.msn++;
400 if (++wq->rq.cidx == wq->rq.size)
401 wq->rq.cidx = 0;
402 }
403
t4_rq_host_wq_pidx(struct t4_wq * wq)404 static inline u16 t4_rq_host_wq_pidx(struct t4_wq *wq)
405 {
406 return wq->rq.queue[wq->rq.size].status.host_wq_pidx;
407 }
408
t4_rq_wq_size(struct t4_wq * wq)409 static inline u16 t4_rq_wq_size(struct t4_wq *wq)
410 {
411 return wq->rq.size * T4_RQ_NUM_SLOTS;
412 }
413
t4_sq_onchip(struct t4_sq * sq)414 static inline int t4_sq_onchip(struct t4_sq *sq)
415 {
416 return sq->flags & T4_SQ_ONCHIP;
417 }
418
t4_sq_empty(struct t4_wq * wq)419 static inline int t4_sq_empty(struct t4_wq *wq)
420 {
421 return wq->sq.in_use == 0;
422 }
423
t4_sq_full(struct t4_wq * wq)424 static inline int t4_sq_full(struct t4_wq *wq)
425 {
426 return wq->sq.in_use == (wq->sq.size - 1);
427 }
428
t4_sq_avail(struct t4_wq * wq)429 static inline u32 t4_sq_avail(struct t4_wq *wq)
430 {
431 return wq->sq.size - 1 - wq->sq.in_use;
432 }
433
t4_sq_produce(struct t4_wq * wq,u8 len16)434 static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
435 {
436 wq->sq.in_use++;
437 if (++wq->sq.pidx == wq->sq.size)
438 wq->sq.pidx = 0;
439 wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
440 if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS)
441 wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS;
442 }
443
t4_sq_consume(struct t4_wq * wq)444 static inline void t4_sq_consume(struct t4_wq *wq)
445 {
446 BUG_ON(wq->sq.in_use < 1);
447 if (wq->sq.cidx == wq->sq.flush_cidx)
448 wq->sq.flush_cidx = -1;
449 wq->sq.in_use--;
450 if (++wq->sq.cidx == wq->sq.size)
451 wq->sq.cidx = 0;
452 }
453
t4_sq_host_wq_pidx(struct t4_wq * wq)454 static inline u16 t4_sq_host_wq_pidx(struct t4_wq *wq)
455 {
456 return wq->sq.queue[wq->sq.size].status.host_wq_pidx;
457 }
458
t4_sq_wq_size(struct t4_wq * wq)459 static inline u16 t4_sq_wq_size(struct t4_wq *wq)
460 {
461 return wq->sq.size * T4_SQ_NUM_SLOTS;
462 }
463
464 /* This function copies 64 byte coalesced work request to memory
465 * mapped BAR2 space. For coalesced WRs, the SGE fetches data
466 * from the FIFO instead of from Host.
467 */
pio_copy(u64 __iomem * dst,u64 * src)468 static inline void pio_copy(u64 __iomem *dst, u64 *src)
469 {
470 int count = 8;
471
472 while (count) {
473 writeq(*src, dst);
474 src++;
475 dst++;
476 count--;
477 }
478 }
479
480 static inline void
t4_ring_sq_db(struct t4_wq * wq,u16 inc,union t4_wr * wqe,u8 wc)481 t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe, u8 wc)
482 {
483
484 /* Flush host queue memory writes. */
485 wmb();
486 if (wc && inc == 1 && wq->sq.bar2_qid == 0 && wqe) {
487 CTR2(KTR_IW_CXGBE, "%s: WC wq->sq.pidx = %d",
488 __func__, wq->sq.pidx);
489 pio_copy((u64 __iomem *)
490 ((u64)wq->sq.bar2_va + SGE_UDB_WCDOORBELL),
491 (u64 *)wqe);
492 } else {
493 CTR2(KTR_IW_CXGBE, "%s: DB wq->sq.pidx = %d",
494 __func__, wq->sq.pidx);
495 writel(V_PIDX_T5(inc) | V_QID(wq->sq.bar2_qid),
496 (void __iomem *)((u64)wq->sq.bar2_va +
497 SGE_UDB_KDOORBELL));
498 }
499
500 /* Flush user doorbell area writes. */
501 wmb();
502 return;
503 }
504
505 static inline void
t4_ring_rq_db(struct t4_wq * wq,u16 inc,union t4_recv_wr * wqe,u8 wc)506 t4_ring_rq_db(struct t4_wq *wq, u16 inc, union t4_recv_wr *wqe, u8 wc)
507 {
508
509 /* Flush host queue memory writes. */
510 wmb();
511 if (wc && inc == 1 && wq->rq.bar2_qid == 0 && wqe) {
512 CTR2(KTR_IW_CXGBE, "%s: WC wq->rq.pidx = %d",
513 __func__, wq->rq.pidx);
514 pio_copy((u64 __iomem *)((u64)wq->rq.bar2_va +
515 SGE_UDB_WCDOORBELL), (u64 *)wqe);
516 } else {
517 CTR2(KTR_IW_CXGBE, "%s: DB wq->rq.pidx = %d",
518 __func__, wq->rq.pidx);
519 writel(V_PIDX_T5(inc) | V_QID(wq->rq.bar2_qid),
520 (void __iomem *)((u64)wq->rq.bar2_va +
521 SGE_UDB_KDOORBELL));
522 }
523
524 /* Flush user doorbell area writes. */
525 wmb();
526 return;
527 }
528
t4_wq_in_error(struct t4_wq * wq)529 static inline int t4_wq_in_error(struct t4_wq *wq)
530 {
531 return wq->rq.queue[wq->rq.size].status.qp_err;
532 }
533
t4_set_wq_in_error(struct t4_wq * wq)534 static inline void t4_set_wq_in_error(struct t4_wq *wq)
535 {
536 wq->rq.queue[wq->rq.size].status.qp_err = 1;
537 }
538
539 enum t4_cq_flags {
540 CQ_ARMED = 1,
541 };
542
543 struct t4_cq {
544 struct t4_cqe *queue;
545 bus_addr_t dma_addr;
546 DEFINE_DMA_UNMAP_ADDR(mapping);
547 struct t4_cqe *sw_queue;
548 void __iomem *bar2_va;
549 u64 bar2_pa;
550 u32 bar2_qid;
551 struct c4iw_rdev *rdev;
552 size_t memsize;
553 __be64 bits_type_ts;
554 u32 cqid;
555 u32 qid_mask;
556 int vector;
557 u16 size; /* including status page */
558 u16 cidx;
559 u16 sw_pidx;
560 u16 sw_cidx;
561 u16 sw_in_use;
562 u16 cidx_inc;
563 u8 gen;
564 u8 error;
565 unsigned long flags;
566 };
567
write_gts(struct t4_cq * cq,u32 val)568 static inline void write_gts(struct t4_cq *cq, u32 val)
569 {
570 writel(val | V_INGRESSQID(cq->bar2_qid),
571 (void __iomem *)((u64)cq->bar2_va + SGE_UDB_GTS));
572 }
573
t4_clear_cq_armed(struct t4_cq * cq)574 static inline int t4_clear_cq_armed(struct t4_cq *cq)
575 {
576 return test_and_clear_bit(CQ_ARMED, &cq->flags);
577 }
578
t4_arm_cq(struct t4_cq * cq,int se)579 static inline int t4_arm_cq(struct t4_cq *cq, int se)
580 {
581 u32 val;
582
583 set_bit(CQ_ARMED, &cq->flags);
584 while (cq->cidx_inc > CIDXINC_MASK) {
585 val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7);
586 writel(val | V_INGRESSQID(cq->bar2_qid),
587 (void __iomem *)((u64)cq->bar2_va + SGE_UDB_GTS));
588 cq->cidx_inc -= CIDXINC_MASK;
589 }
590 val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6);
591 writel(val | V_INGRESSQID(cq->bar2_qid),
592 (void __iomem *)((u64)cq->bar2_va + SGE_UDB_GTS));
593 cq->cidx_inc = 0;
594 return 0;
595 }
596
t4_swcq_produce(struct t4_cq * cq)597 static inline void t4_swcq_produce(struct t4_cq *cq)
598 {
599 cq->sw_in_use++;
600 if (cq->sw_in_use == cq->size) {
601 CTR2(KTR_IW_CXGBE, "%s cxgb4 sw cq overflow cqid %u",
602 __func__, cq->cqid);
603 cq->error = 1;
604 BUG_ON(1);
605 }
606 if (++cq->sw_pidx == cq->size)
607 cq->sw_pidx = 0;
608 }
609
t4_swcq_consume(struct t4_cq * cq)610 static inline void t4_swcq_consume(struct t4_cq *cq)
611 {
612 BUG_ON(cq->sw_in_use < 1);
613 cq->sw_in_use--;
614 if (++cq->sw_cidx == cq->size)
615 cq->sw_cidx = 0;
616 }
617
t4_hwcq_consume(struct t4_cq * cq)618 static inline void t4_hwcq_consume(struct t4_cq *cq)
619 {
620 cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
621 if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == M_CIDXINC) {
622 u32 val;
623
624 val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7);
625 write_gts(cq, val);
626 cq->cidx_inc = 0;
627 }
628 if (++cq->cidx == cq->size) {
629 cq->cidx = 0;
630 cq->gen ^= 1;
631 }
632 }
633
t4_valid_cqe(struct t4_cq * cq,struct t4_cqe * cqe)634 static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
635 {
636 return (CQE_GENBIT(cqe) == cq->gen);
637 }
638
t4_cq_notempty(struct t4_cq * cq)639 static inline int t4_cq_notempty(struct t4_cq *cq)
640 {
641 return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]);
642 }
643
t4_next_hw_cqe(struct t4_cq * cq,struct t4_cqe ** cqe)644 static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
645 {
646 int ret;
647 u16 prev_cidx;
648
649 if (cq->cidx == 0)
650 prev_cidx = cq->size - 1;
651 else
652 prev_cidx = cq->cidx - 1;
653
654 if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) {
655 ret = -EOVERFLOW;
656 cq->error = 1;
657 printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
658 BUG_ON(1);
659 } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
660
661 /* Ensure CQE is flushed to memory */
662 rmb();
663 *cqe = &cq->queue[cq->cidx];
664 ret = 0;
665 } else
666 ret = -ENODATA;
667 return ret;
668 }
669
t4_next_sw_cqe(struct t4_cq * cq)670 static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
671 {
672 if (cq->sw_in_use == cq->size) {
673 CTR2(KTR_IW_CXGBE, "%s cxgb4 sw cq overflow cqid %u",
674 __func__, cq->cqid);
675 cq->error = 1;
676 BUG_ON(1);
677 return NULL;
678 }
679 if (cq->sw_in_use)
680 return &cq->sw_queue[cq->sw_cidx];
681 return NULL;
682 }
683
t4_next_cqe(struct t4_cq * cq,struct t4_cqe ** cqe)684 static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
685 {
686 int ret = 0;
687
688 if (cq->error)
689 ret = -ENODATA;
690 else if (cq->sw_in_use)
691 *cqe = &cq->sw_queue[cq->sw_cidx];
692 else
693 ret = t4_next_hw_cqe(cq, cqe);
694 return ret;
695 }
696
t4_cq_in_error(struct t4_cq * cq)697 static inline int t4_cq_in_error(struct t4_cq *cq)
698 {
699 return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
700 }
701
t4_set_cq_in_error(struct t4_cq * cq)702 static inline void t4_set_cq_in_error(struct t4_cq *cq)
703 {
704 ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
705 }
706 struct t4_dev_status_page {
707 u8 db_off;
708 u8 wc_supported;
709 u16 pad2;
710 u32 pad3;
711 u64 qp_start;
712 u64 qp_size;
713 u64 cq_start;
714 u64 cq_size;
715 };
716 #endif
717