1 /*-
2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3 *
4 * Copyright (c) 2015 - 2026 Intel Corporation
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenFabrics.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #ifndef IRDMA_VERBS_H
36 #define IRDMA_VERBS_H
37
38 #define IRDMA_MAX_SAVED_PHY_PGADDR 4
39 #define IRDMA_FLUSH_DELAY_MS 20
40 #define IRDMA_PERIODIC_FLUSH_MS 2000
41
42 #define IRDMA_PKEY_TBL_SZ 1
43 #define IRDMA_DEFAULT_PKEY 0xFFFF
44
45 #define IRDMA_SHADOW_PGCNT 1
46
47 #define iwdev_to_idev(iwdev) (&(iwdev)->rf->sc_dev)
48
49 struct irdma_ucontext {
50 struct ib_ucontext ibucontext;
51 struct irdma_device *iwdev;
52 struct rdma_user_mmap_entry *db_mmap_entry;
53 struct list_head cq_reg_mem_list;
54 spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
55 struct list_head qp_reg_mem_list;
56 spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
57 /* FIXME: Move to kcompat ideally. Used < 4.20.0 for old diassasscoaite flow */
58 struct list_head vma_list;
59 struct mutex vma_list_mutex; /* protect the vma_list */
60 int abi_ver;
61 bool legacy_mode:1;
62 bool use_raw_attrs:1;
63 };
64
65 struct irdma_pd {
66 struct ib_pd ibpd;
67 struct irdma_sc_pd sc_pd;
68 struct list_head udqp_list;
69 spinlock_t udqp_list_lock;
70 };
71
72 union irdma_sockaddr {
73 struct sockaddr_in saddr_in;
74 struct sockaddr_in6 saddr_in6;
75 };
76
77 struct irdma_av {
78 u8 macaddr[16];
79 struct ib_ah_attr attrs;
80 union irdma_sockaddr sgid_addr;
81 union irdma_sockaddr dgid_addr;
82 u8 net_type;
83 };
84
85 struct irdma_ah {
86 struct ib_ah ibah;
87 struct irdma_sc_ah sc_ah;
88 struct irdma_pd *pd;
89 struct irdma_av av;
90 u8 sgid_index;
91 union ib_gid dgid;
92 };
93
94 struct irdma_hmc_pble {
95 union {
96 u32 idx;
97 dma_addr_t addr;
98 };
99 };
100
101 struct irdma_cq_mr {
102 struct irdma_hmc_pble cq_pbl;
103 dma_addr_t shadow;
104 bool split;
105 };
106
107 struct irdma_qp_mr {
108 struct irdma_hmc_pble sq_pbl;
109 struct irdma_hmc_pble rq_pbl;
110 dma_addr_t shadow;
111 struct page *sq_page;
112 };
113
114 struct irdma_cq_buf {
115 struct irdma_dma_mem kmem_buf;
116 struct irdma_cq_uk cq_uk;
117 struct irdma_hw *hw;
118 struct list_head list;
119 struct work_struct work;
120 };
121
122 struct irdma_pbl {
123 struct list_head list;
124 union {
125 struct irdma_qp_mr qp_mr;
126 struct irdma_cq_mr cq_mr;
127 };
128
129 bool pbl_allocated:1;
130 bool on_list:1;
131 u64 user_base;
132 struct irdma_pble_alloc pble_alloc;
133 struct irdma_mr *iwmr;
134 };
135
136 struct irdma_mr {
137 union {
138 struct ib_mr ibmr;
139 struct ib_mw ibmw;
140 };
141 struct ib_umem *region;
142 int access;
143 u8 is_hwreg;
144 u16 type;
145 bool dma_mr:1;
146 u32 page_cnt;
147 u64 page_size;
148 u64 page_msk;
149 u32 npages;
150 u32 stag;
151 u64 len;
152 u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];
153 struct irdma_pbl iwpbl;
154 };
155
156 struct irdma_cq {
157 struct ib_cq ibcq;
158 struct irdma_sc_cq sc_cq;
159 u32 cq_num;
160 bool user_mode;
161 atomic_t armed;
162 enum irdma_cmpl_notify last_notify;
163 struct irdma_dma_mem kmem;
164 struct irdma_dma_mem kmem_shadow;
165 struct completion free_cq;
166 atomic_t refcnt;
167 spinlock_t lock; /* for poll cq */
168 struct list_head resize_list;
169 struct irdma_cq_poll_info cur_cqe;
170 struct list_head cmpl_generated;
171 };
172
173 struct irdma_cmpl_gen {
174 struct list_head list;
175 struct irdma_cq_poll_info cpi;
176 };
177
178 struct disconn_work {
179 struct work_struct work;
180 struct irdma_qp *iwqp;
181 };
182
183 struct if_notify_work {
184 struct work_struct work;
185 struct irdma_device *iwdev;
186 u32 ipaddr[4];
187 u16 vlan_id;
188 bool ipv4:1;
189 bool ifup:1;
190 };
191
192 struct iw_cm_id;
193
194 struct irdma_qp_kmode {
195 struct irdma_dma_mem dma_mem;
196 u32 *sig_trk_mem;
197 struct irdma_sq_uk_wr_trk_info *sq_wrid_mem;
198 u64 *rq_wrid_mem;
199 };
200
201 struct irdma_qp {
202 struct ib_qp ibqp;
203 struct irdma_sc_qp sc_qp;
204 struct irdma_device *iwdev;
205 struct irdma_cq *iwscq;
206 struct irdma_cq *iwrcq;
207 struct irdma_pd *iwpd;
208 struct rdma_user_mmap_entry *push_wqe_mmap_entry;
209 struct rdma_user_mmap_entry *push_db_mmap_entry;
210 struct irdma_qp_host_ctx_info ctx_info;
211 union {
212 struct irdma_iwarp_offload_info iwarp_info;
213 struct irdma_roce_offload_info roce_info;
214 };
215
216 union {
217 struct irdma_tcp_offload_info tcp_info;
218 struct irdma_udp_offload_info udp_info;
219 };
220
221 struct irdma_ah roce_ah;
222 struct list_head teardown_entry;
223 struct list_head ud_list_elem;
224 atomic_t refcnt;
225 struct iw_cm_id *cm_id;
226 struct irdma_cm_node *cm_node;
227 struct delayed_work dwork_flush;
228 struct ib_mr *lsmm_mr;
229 atomic_t hw_mod_qp_pend;
230 enum ib_qp_state ibqp_state;
231 u32 qp_mem_size;
232 u32 last_aeq;
233 int max_send_wr;
234 int max_recv_wr;
235 atomic_t close_timer_started;
236 spinlock_t lock; /* serialize posting WRs to SQ/RQ */
237 spinlock_t dwork_flush_lock; /* protect mod_delayed_work */
238 struct irdma_qp_context *iwqp_context;
239 void *pbl_vbase;
240 dma_addr_t pbl_pbase;
241 struct page *page;
242 u8 iwarp_state;
243 atomic_t flush_issued;
244 u16 term_sq_flush_code;
245 u16 term_rq_flush_code;
246 u8 hw_iwarp_state;
247 u8 hw_tcp_state;
248 u8 ae_src;
249 struct irdma_qp_kmode kqp;
250 struct irdma_dma_mem host_ctx;
251 struct timer_list terminate_timer;
252 struct irdma_pbl *iwpbl;
253 struct ib_sge *sg_list;
254 struct irdma_dma_mem q2_ctx_mem;
255 struct irdma_dma_mem ietf_mem;
256 struct completion free_qp;
257 wait_queue_head_t waitq;
258 wait_queue_head_t mod_qp_waitq;
259 u8 rts_ae_rcvd;
260 bool active_conn:1;
261 bool user_mode:1;
262 bool hte_added:1;
263 bool sig_all:1;
264 bool pau_mode:1;
265 bool suspend_pending:1;
266 };
267
268 struct irdma_udqs_work {
269 struct work_struct work;
270 struct irdma_qp *iwqp;
271 u8 user_prio;
272 bool qs_change:1;
273 };
274
275 enum irdma_mmap_flag {
276 IRDMA_MMAP_IO_NC,
277 IRDMA_MMAP_IO_WC,
278 };
279
280 struct irdma_user_mmap_entry {
281 struct rdma_user_mmap_entry rdma_entry;
282 u64 bar_offset;
283 u8 mmap_flag;
284 };
285
irdma_fw_major_ver(struct irdma_sc_dev * dev)286 static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
287 {
288 return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
289 }
290
irdma_fw_minor_ver(struct irdma_sc_dev * dev)291 static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
292 {
293 return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
294 }
295
set_ib_wc_op_sq(struct irdma_cq_poll_info * cq_poll_info,struct ib_wc * entry)296 static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
297 struct ib_wc *entry)
298 {
299 struct irdma_sc_qp *qp;
300
301 switch (cq_poll_info->op_type) {
302 case IRDMA_OP_TYPE_RDMA_WRITE:
303 case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
304 entry->opcode = IB_WC_RDMA_WRITE;
305 break;
306 case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
307 case IRDMA_OP_TYPE_RDMA_READ:
308 entry->opcode = IB_WC_RDMA_READ;
309 break;
310 case IRDMA_OP_TYPE_SEND_SOL:
311 case IRDMA_OP_TYPE_SEND_SOL_INV:
312 case IRDMA_OP_TYPE_SEND_INV:
313 case IRDMA_OP_TYPE_SEND:
314 entry->opcode = IB_WC_SEND;
315 break;
316 case IRDMA_OP_TYPE_FAST_REG_NSMR:
317 entry->opcode = IB_WC_REG_MR;
318 break;
319 case IRDMA_OP_TYPE_INV_STAG:
320 entry->opcode = IB_WC_LOCAL_INV;
321 break;
322 default:
323 qp = cq_poll_info->qp_handle;
324 irdma_dev_err(to_ibdev(qp->dev), "Invalid opcode = %d in CQE\n",
325 cq_poll_info->op_type);
326 entry->status = IB_WC_GENERAL_ERR;
327 }
328 }
329
set_ib_wc_op_rq(struct irdma_cq_poll_info * cq_poll_info,struct ib_wc * entry,bool send_imm_support)330 static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
331 struct ib_wc *entry, bool send_imm_support)
332 {
333 /**
334 * iWARP does not support sendImm, so the presence of Imm data
335 * must be WriteImm.
336 */
337 if (!send_imm_support) {
338 entry->opcode = cq_poll_info->imm_valid ?
339 IB_WC_RECV_RDMA_WITH_IMM :
340 IB_WC_RECV;
341 return;
342 }
343 switch (cq_poll_info->op_type) {
344 case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
345 case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
346 entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
347 break;
348 default:
349 entry->opcode = IB_WC_RECV;
350 }
351 }
352
353 /**
354 * irdma_mcast_mac_v4 - Get the multicast MAC for an IP address
355 * @ip_addr: IPv4 address
356 * @mac: pointer to result MAC address
357 *
358 */
irdma_mcast_mac_v4(u32 * ip_addr,u8 * mac)359 static inline void irdma_mcast_mac_v4(u32 *ip_addr, u8 *mac)
360 {
361 u8 *ip = (u8 *)ip_addr;
362 unsigned char mac4[ETHER_ADDR_LEN] = {0x01, 0x00, 0x5E, ip[2] & 0x7F, ip[1],
363 ip[0]};
364
365 ether_addr_copy(mac, mac4);
366 }
367
368 /**
369 * irdma_mcast_mac_v6 - Get the multicast MAC for an IP address
370 * @ip_addr: IPv6 address
371 * @mac: pointer to result MAC address
372 *
373 */
irdma_mcast_mac_v6(u32 * ip_addr,u8 * mac)374 static inline void irdma_mcast_mac_v6(u32 *ip_addr, u8 *mac)
375 {
376 u8 *ip = (u8 *)ip_addr;
377 unsigned char mac6[ETHER_ADDR_LEN] = {0x33, 0x33, ip[3], ip[2], ip[1], ip[0]};
378
379 ether_addr_copy(mac, mac6);
380 }
381
382 struct rdma_user_mmap_entry*
383 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
384 enum irdma_mmap_flag mmap_flag, u64 *mmap_offset);
385 struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region,
386 struct ib_pd *pd, u64 virt,
387 enum irdma_memreg_type reg_type);
388 void irdma_free_iwmr(struct irdma_mr *iwmr);
389 int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access,
390 bool create_stag);
391 int irdma_ib_register_device(struct irdma_device *iwdev);
392 void irdma_ib_unregister_device(struct irdma_device *iwdev);
393 void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
394 void irdma_generate_flush_completions(struct irdma_qp *iwqp);
395 void irdma_remove_cmpls_list(struct irdma_cq *iwcq);
396 int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info);
397 void irdma_sched_qp_flush_work(struct irdma_qp *iwqp);
398 void irdma_kern_flush_worker(struct work_struct *work);
399 void irdma_user_flush_worker(struct work_struct *work);
400 int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr);
401 #endif /* IRDMA_VERBS_H */
402