xref: /src/sys/dev/irdma/irdma_utils.c (revision 5b7aa6c7bc9db19e8bd34a5b7892fb5df2a3068b)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2015 - 2026 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include "irdma_main.h"
36 
37 LIST_HEAD(irdma_handlers);
38 DEFINE_SPINLOCK(irdma_handler_lock);
39 
40 static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS];
41 static const struct ae_desc ae_desc_list[] = {
42 	{IRDMA_AE_AMP_UNALLOCATED_STAG, "Unallocated memory key (L-Key/R-Key)"},
43 	{IRDMA_AE_AMP_INVALID_STAG, "Invalid memory key (L-Key/R-Key)"},
44 	{IRDMA_AE_AMP_BAD_QP,
45 	"Memory protection error: Accessing Memory Window (MW) which belongs to a different QP"},
46 	{IRDMA_AE_AMP_BAD_PD,
47 	"Memory protection error: Accessing Memory Window (MW)/Memory Region (MR) which belongs to a different PD"},
48 	{IRDMA_AE_AMP_BAD_STAG_KEY, "Bad memory key (L-Key/R-Key)"},
49 	{IRDMA_AE_AMP_BAD_STAG_INDEX, "Bad memory key (L-Key/R-Key): Too large memory key index"},
50 	{IRDMA_AE_AMP_BOUNDS_VIOLATION, "Memory Window (MW)/Memory Region (MR) bounds violation"},
51 	{IRDMA_AE_AMP_RIGHTS_VIOLATION, "Memory Window (MW)/Memory Region (MR) rights violation"},
52 	{IRDMA_AE_AMP_TO_WRAP,
53 	"Memory protection error: The address within Memory Window (MW)/Memory Region (MR) wraps"},
54 	{IRDMA_AE_AMP_FASTREG_VALID_STAG,
55 	"Fastreg error: Registration to a valid MR"},
56 	{IRDMA_AE_AMP_FASTREG_MW_STAG,
57 	"Fastreg error: Registration to a valid Memory Window (MW)"},
58 	{IRDMA_AE_AMP_FASTREG_INVALID_RIGHTS, "Fastreg error: Invalid rights"},
59 	{IRDMA_AE_AMP_FASTREG_INVALID_LENGTH, "Fastreg error: Invalid length"},
60 	{IRDMA_AE_AMP_INVALIDATE_SHARED, "Attempt to invalidate a shared MR"},
61 	{IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS,
62 	"Attempt to remotely invalidate Memory Window (MW)/Memory Region (MR) without rights"},
63 	{IRDMA_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS,
64 	"Attempt to invalidate MR with a bound Memory Window (MW)"},
65 	{IRDMA_AE_AMP_MWBIND_VALID_STAG,
66 	"Attempt to bind an Memory Window (MW) with a valid MW memory key (L-Key/R-Key)"},
67 	{IRDMA_AE_AMP_MWBIND_OF_MR_STAG,
68 	"Attempt to bind an Memory Window (MW) with an MR memory key (L-Key/R-Key)"},
69 	{IRDMA_AE_AMP_MWBIND_TO_ZERO_BASED_STAG,
70 	"Attempt to bind an Memory Window (MW) to a zero based MR"},
71 	{IRDMA_AE_AMP_MWBIND_TO_MW_STAG,
72 	"Attempt to bind an Memory Window (MW) using MW memory key (L-Key/R-Key) instead of MR memory key (L-Key/R-Key)"},
73 	{IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS, "Memory Window (MW) bind error: Invalid rights"},
74 	{IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS, "Memory Window (MW) bind error: Invalid bounds"},
75 	{IRDMA_AE_AMP_MWBIND_TO_INVALID_PARENT,
76 	"Memory Window (MW) bind error: Invalid parent MR"},
77 	{IRDMA_AE_AMP_MWBIND_BIND_DISABLED,
78 	"Memory Window (MW) bind error: Disabled bind support"},
79 	{IRDMA_AE_PRIV_OPERATION_DENIED,
80 	"Denying a privileged operation on a non-privileged QP"},
81 	{IRDMA_AE_AMP_INVALIDATE_TYPE1_MW, "Memory Window (MW) error: Invalidate type 1 MW"},
82 	{IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW,
83 	"Memory Window (MW) bind error: Zero-based addressing for type 1 MW"},
84 	{IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG,
85 	"Fastreg error: Invalid host page size config"},
86 	{IRDMA_AE_AMP_MWBIND_WRONG_TYPE, "MB bind error: Wrong Memory Window (MW) type"},
87 	{IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH,
88 	"Fastreg error: Invalid request to change physical MR to virtual or vice versa"},
89 	{IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG,
90 	"Userspace Direct Access (UDA) QP xmit error: Packet length exceeds the QP MTU"},
91 	{IRDMA_AE_UDA_XMIT_BAD_PD,
92 	"Userspace Direct Access (UDA) QP xmit error: Attempt to access a different PD"},
93 	{IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT,
94 	"Userspace Direct Access (UDA) QP xmit error: Too short packet length"},
95 	{IRDMA_AE_UDA_L4LEN_INVALID,
96 	"Userspace Direct Access (UDA) error: Invalid packet length field"},
97 	{IRDMA_AE_BAD_CLOSE,
98 	"iWARP error: Data is received when QP state is closing"},
99 	{IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE,
100 	"iWARP error: FIN is received when xmit data is pending"},
101 	{IRDMA_AE_CQ_OPERATION_ERROR, "CQ overflow"},
102 	{IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO,
103 	"QP error: Attempted RDMA Read when the outbound RDMA Read queue depth is zero"},
104 	{IRDMA_AE_STAG_ZERO_INVALID,
105 	"Zero invalid memory key (L-Key/R-Key) on inbound RDMA R/W"},
106 	{IRDMA_AE_IB_RREQ_AND_Q1_FULL,
107 	"QP error: Received RDMA Read request when the inbound RDMA Read queue is full"},
108 	{IRDMA_AE_IB_INVALID_REQUEST,
109 	"QP error: Invalid operation detected by the remote peer"},
110 	{IRDMA_AE_WQE_UNEXPECTED_OPCODE,
111 	"QP error: Invalid opcode in SQ WQE"},
112 	{IRDMA_AE_WQE_INVALID_PARAMETER,
113 	"QP error: Invalid parameter in a WQE"},
114 	{IRDMA_AE_WQE_INVALID_FRAG_DATA,
115 	"QP error: Invalid fragment in a WQE"},
116 	{IRDMA_AE_IB_REMOTE_ACCESS_ERROR,
117 	"RoCEv2 error: Remote access error"},
118 	{IRDMA_AE_IB_REMOTE_OP_ERROR,
119 	"RoCEv2 error: Remote operation error"},
120 	{IRDMA_AE_WQE_LSMM_TOO_LONG, "iWARP error: Connection error"},
121 	{IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN,
122 	"iWARP error: Invalid message sequence number"},
123 	{IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER,
124 	"iWARP error: Inbound message is too long for the available buffer"},
125 	{IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION, "iWARP error: Invalid DDP protocol version"},
126 	{IRDMA_AE_DDP_UBE_INVALID_MO, "Received message with too large offset"},
127 	{IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE,
128 	"iWARP error: Inbound Send message when no receive buffer is available"},
129 	{IRDMA_AE_DDP_UBE_INVALID_QN, "iWARP error: Invalid QP number in inbound packet"},
130 	{IRDMA_AE_DDP_NO_L_BIT,
131 	"iWARP error: Last bit not set in an inbound packet which completes RDMA Read"},
132 	{IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION, "iWARP error: Invalid RDMAP protocol version"},
133 	{IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE, "QP error: Invalid opcode"},
134 	{IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST, "Inbound Read request when QP isn't enabled for RDMA Read"},
135 	{IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP,
136 	"Inbound RDMA Read response or RDMA Write when QP isn't enabled for RDMA R/W"},
137 	{IRDMA_AE_ROCE_RSP_LENGTH_ERROR, "RoCEv2 error: Received packet with incorrect length field"},
138 	{IRDMA_AE_ROCE_EMPTY_MCG, "RoCEv2 error: Multicast group has no valid members"},
139 	{IRDMA_AE_ROCE_BAD_MC_IP_ADDR, "RoCEv2 error: Multicast IP address doesn't match"},
140 	{IRDMA_AE_ROCE_BAD_MC_QPID, "RoCEv2 error: Multicast packet QP number isn't 0xffffff"},
141 	{IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH, "RoCEv2 error: Multicast packet protocol mismatch"},
142 	{IRDMA_AE_INVALID_ARP_ENTRY, "Invalid ARP entry"},
143 	{IRDMA_AE_INVALID_TCP_OPTION_RCVD, "iWARP error: Invalid TCP option"},
144 	{IRDMA_AE_STALE_ARP_ENTRY, "Stale ARP entry"},
145 	{IRDMA_AE_INVALID_AH_ENTRY, "Invalid AH entry"},
146 	{IRDMA_AE_LLP_CLOSE_COMPLETE,
147 	"iWARP event: Graceful close complete"},
148 	{IRDMA_AE_LLP_CONNECTION_RESET,
149 	"iWARP event: Received a TCP packet with a RST bit set"},
150 	{IRDMA_AE_LLP_FIN_RECEIVED,
151 	"iWARP event: Received a TCP packet with a FIN bit set"},
152 	{IRDMA_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH,
153 	"iWARP error: Unable to close a gap in the TCP sequence"},
154 	{IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR, "Received an ICRC error"},
155 	{IRDMA_AE_LLP_SEGMENT_TOO_SMALL,
156 	"iWARP error: Received a packet with insufficient space for protocol headers"},
157 	{IRDMA_AE_LLP_SYN_RECEIVED,
158 	"iWARP event: Received a TCP packet with a SYN bit set"},
159 	{IRDMA_AE_LLP_TERMINATE_RECEIVED,
160 	"iWARP error: Received a terminate message"},
161 	{IRDMA_AE_LLP_TOO_MANY_RETRIES, "Connection error: The max number of retries has been reached"},
162 	{IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES,
163 	"Connection error: The max number of keepalive retries has been reached"},
164 	{IRDMA_AE_LLP_DOUBT_REACHABILITY,
165 	"Connection error: Doubt reachability (usually occurs after the max number of retries has been reached)"},
166 	{IRDMA_AE_LLP_CONNECTION_ESTABLISHED,
167 	"iWARP event: Connection established"},
168 	{IRDMA_AE_LLP_TOO_MANY_RNRS, "RoCEv2: Too many RNR NACKs"},
169 	{IRDMA_AE_RESOURCE_EXHAUSTION,
170 	"QP error: Resource exhaustion"},
171 	{IRDMA_AE_RESET_SENT,
172 	"Reset sent (as requested via Modify QP)"},
173 	{IRDMA_AE_TERMINATE_SENT,
174 	"Terminate sent (as requested via Modify QP)"},
175 	{IRDMA_AE_RESET_NOT_SENT,
176 	"Reset not sent (but requested via Modify QP)"},
177 	{IRDMA_AE_LCE_QP_CATASTROPHIC,
178 	"QP error: HW transaction resulted in catastrophic error"},
179 	{IRDMA_AE_LCE_FUNCTION_CATASTROPHIC,
180 	"PCIe function error: HW transaction resulted in catastrophic error"},
181 	{IRDMA_AE_LCE_CQ_CATASTROPHIC,
182 	"CQ error: HW transaction resulted in catastrophic error"},
183 	{IRDMA_AE_QP_SUSPEND_COMPLETE, "QP event: Suspend complete"},
184 };
185 
186 /**
187  * irdma_get_ae_desc - returns AE description
188  * @ae_id: the AE number
189  */
190 const char *
irdma_get_ae_desc(u16 ae_id)191 irdma_get_ae_desc(u16 ae_id)
192 {
193 	const char *desc = "";
194 	int i;
195 
196 	for (i = 0; i < ARRAY_SIZE(ae_desc_list); i++) {
197 		if (ae_desc_list[i].id == ae_id) {
198 			desc = ae_desc_list[i].desc;
199 			break;
200 		}
201 	}
202 	return desc;
203 }
204 
205 /**
206  * irdma_arp_table -manage arp table
207  * @rf: RDMA PCI function
208  * @ip_addr: ip address for device
209  * @mac_addr: mac address ptr
210  * @action: modify, delete or add/update
211  */
212 int
irdma_arp_table(struct irdma_pci_f * rf,u32 * ip_addr,const u8 * mac_addr,u32 action)213 irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, const u8 *mac_addr,
214 		u32 action)
215 {
216 	unsigned long flags;
217 	int arp_index;
218 	u32 ip[4] = {};
219 
220 	memcpy(ip, ip_addr, sizeof(ip));
221 
222 	spin_lock_irqsave(&rf->arp_lock, flags);
223 	for (arp_index = 0; (u32)arp_index < rf->arp_table_size; arp_index++) {
224 		if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip)) &&
225 		    !rf->arp_table[arp_index].delete_pending)
226 			break;
227 	}
228 
229 	switch (action) {
230 	case IRDMA_ARP_ADD_UPDATE:	/* ARP Add or Update */
231 		if (arp_index == rf->arp_table_size) {
232 			if (irdma_alloc_rsrc(rf, rf->allocated_arps,
233 					     rf->arp_table_size,
234 					     (u32 *)&arp_index,
235 					     &rf->next_arp_index)) {
236 				arp_index = -1;
237 				break;
238 			}
239 			atomic_set(&rf->arp_table[arp_index].refcnt, 0);
240 		}
241 
242 		memcpy(rf->arp_table[arp_index].ip_addr, ip,
243 		       sizeof(rf->arp_table[arp_index].ip_addr));
244 		ether_addr_copy(rf->arp_table[arp_index].mac_addr, mac_addr);
245 		break;
246 	case IRDMA_ARP_RESOLVE:
247 		if (arp_index == rf->arp_table_size)
248 			arp_index = -1;
249 		break;
250 	case IRDMA_ARP_DELETE:
251 		if (arp_index == rf->arp_table_size) {
252 			arp_index = -1;
253 			break;
254 		}
255 
256 		if (!atomic_read(&rf->arp_table[arp_index].refcnt)) {
257 			memset(rf->arp_table[arp_index].ip_addr, 0,
258 			       sizeof(rf->arp_table[arp_index].ip_addr));
259 			eth_zero_addr(rf->arp_table[arp_index].mac_addr);
260 			irdma_free_rsrc(rf, rf->allocated_arps, arp_index);
261 			rf->arp_table[arp_index].delete_pending = false;
262 		} else {
263 			rf->arp_table[arp_index].delete_pending = true;
264 			arp_index = -1;	/* prevent immediate CQP ARP index deletion */
265 		}
266 		break;
267 	default:
268 		arp_index = -1;
269 		break;
270 	}
271 
272 	spin_unlock_irqrestore(&rf->arp_lock, flags);
273 	return arp_index;
274 }
275 
276 static int
irdma_get_arp(struct irdma_pci_f * rf,u16 arp_index)277 irdma_get_arp(struct irdma_pci_f *rf, u16 arp_index)
278 {
279 	unsigned long flags;
280 	u32 ip_zero[4] = {};
281 
282 	if (arp_index >= rf->arp_table_size)
283 		return -EINVAL;
284 
285 	spin_lock_irqsave(&rf->arp_lock, flags);
286 	if (!memcmp(rf->arp_table[arp_index].ip_addr, ip_zero, sizeof(ip_zero))) {
287 		spin_unlock_irqrestore(&rf->arp_lock, flags);
288 		return -EINVAL;
289 	}
290 	if (!atomic_read(&rf->arp_table[arp_index].refcnt))
291 		atomic_set(&rf->arp_table[arp_index].refcnt, 1);
292 	else
293 		atomic_inc(&rf->arp_table[arp_index].refcnt);
294 	spin_unlock_irqrestore(&rf->arp_lock, flags);
295 
296 	return 0;
297 }
298 
299 static void
irdma_put_arp(struct irdma_pci_f * rf,u16 arp_index)300 irdma_put_arp(struct irdma_pci_f *rf, u16 arp_index)
301 {
302 	unsigned long flags;
303 
304 	if (arp_index >= rf->arp_table_size)
305 		return;
306 	spin_lock_irqsave(&rf->arp_lock, flags);
307 	if (!atomic_dec_and_test(&rf->arp_table[arp_index].refcnt)) {
308 		spin_unlock_irqrestore(&rf->arp_lock, flags);
309 		return;
310 	}
311 
312 	if (rf->arp_table[arp_index].delete_pending) {
313 		u32 ip_addr[4];
314 
315 		memcpy(ip_addr, rf->arp_table[arp_index].ip_addr,
316 		       sizeof(ip_addr));
317 		memset(rf->arp_table[arp_index].ip_addr, 0,
318 		       sizeof(rf->arp_table[arp_index].ip_addr));
319 		eth_zero_addr(rf->arp_table[arp_index].mac_addr);
320 		spin_unlock_irqrestore(&rf->arp_lock, flags);
321 		irdma_arp_cqp_op(rf, arp_index, NULL, IRDMA_ARP_DELETE);
322 		rf->arp_table[arp_index].delete_pending = false;
323 		irdma_free_rsrc(rf, rf->allocated_arps, arp_index);
324 	} else {
325 		spin_unlock_irqrestore(&rf->arp_lock, flags);
326 	}
327 }
328 
329 /**
330  * irdma_add_arp - add a new arp entry if needed and resolve it
331  * @rf: RDMA function
332  * @ip: IP address
333  * @mac: MAC address
334  */
335 int
irdma_add_arp(struct irdma_pci_f * rf,u32 * ip,const u8 * mac)336 irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, const u8 *mac)
337 {
338 	irdma_manage_arp_cache(rf, mac, ip, IRDMA_ARP_ADD_UPDATE);
339 
340 	return irdma_arp_table(rf, ip, NULL, IRDMA_ARP_RESOLVE);
341 }
342 
343 /**
344  * irdma_netdevice_event - system notifier for netdev events
345  * @notifier: not used
346  * @event: event for notifier
347  * @ptr: netdev
348  */
349 int
irdma_netdevice_event(struct notifier_block * notifier,unsigned long event,void * ptr)350 irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
351 		      void *ptr)
352 {
353 	struct irdma_device *iwdev;
354 	struct ifnet *netdev = netdev_notifier_info_to_ifp(ptr);
355 
356 	iwdev = container_of(notifier, struct irdma_device, nb_netdevice_event);
357 	if (iwdev->netdev != netdev)
358 		return NOTIFY_DONE;
359 
360 	iwdev->iw_status = 1;
361 	switch (event) {
362 	case NETDEV_DOWN:
363 		iwdev->iw_status = 0;
364 		/* fallthrough */
365 	case NETDEV_UP:
366 		irdma_port_ibevent(iwdev);
367 		break;
368 	default:
369 		break;
370 	}
371 
372 	return NOTIFY_DONE;
373 }
374 
375 void
irdma_unregister_notifiers(struct irdma_device * iwdev)376 irdma_unregister_notifiers(struct irdma_device *iwdev)
377 {
378 	unregister_netdevice_notifier(&iwdev->nb_netdevice_event);
379 }
380 
381 int
irdma_register_notifiers(struct irdma_device * iwdev)382 irdma_register_notifiers(struct irdma_device *iwdev)
383 {
384 	int ret;
385 
386 	iwdev->nb_netdevice_event.notifier_call = irdma_netdevice_event;
387 	ret = register_netdevice_notifier(&iwdev->nb_netdevice_event);
388 	if (ret) {
389 		irdma_dev_err(&iwdev->ibdev, "register_netdevice_notifier failed\n");
390 		return ret;
391 	}
392 	return ret;
393 }
394 /**
395  * irdma_alloc_and_get_cqp_request - get cqp struct
396  * @cqp: device cqp ptr
397  * @wait: cqp to be used in wait mode
398  */
399 struct irdma_cqp_request *
irdma_alloc_and_get_cqp_request(struct irdma_cqp * cqp,bool wait)400 irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
401 				bool wait)
402 {
403 	struct irdma_cqp_request *cqp_request = NULL;
404 	unsigned long flags;
405 
406 	spin_lock_irqsave(&cqp->req_lock, flags);
407 	if (!list_empty(&cqp->cqp_avail_reqs)) {
408 		cqp_request = list_entry(cqp->cqp_avail_reqs.next,
409 					 struct irdma_cqp_request, list);
410 		list_del_init(&cqp_request->list);
411 	}
412 	spin_unlock_irqrestore(&cqp->req_lock, flags);
413 	if (!cqp_request) {
414 		cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC);
415 		if (cqp_request) {
416 			cqp_request->dynamic = true;
417 			if (wait)
418 				init_waitqueue_head(&cqp_request->waitq);
419 		}
420 	}
421 	if (!cqp_request) {
422 		irdma_debug(cqp->sc_cqp.dev, IRDMA_DEBUG_ERR, "CQP Request Fail: No Memory");
423 		return NULL;
424 	}
425 
426 	cqp_request->waiting = wait;
427 	atomic_set(&cqp_request->refcnt, 1);
428 	memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info));
429 
430 	memset(&cqp_request->info, 0, sizeof(cqp_request->info));
431 
432 	return cqp_request;
433 }
434 
435 /**
436  * irdma_get_cqp_request - increase refcount for cqp_request
437  * @cqp_request: pointer to cqp_request instance
438  */
439 static inline void
irdma_get_cqp_request(struct irdma_cqp_request * cqp_request)440 irdma_get_cqp_request(struct irdma_cqp_request *cqp_request)
441 {
442 	atomic_inc(&cqp_request->refcnt);
443 }
444 
445 /**
446  * irdma_free_cqp_request - free cqp request
447  * @cqp: cqp ptr
448  * @cqp_request: to be put back in cqp list
449  */
450 void
irdma_free_cqp_request(struct irdma_cqp * cqp,struct irdma_cqp_request * cqp_request)451 irdma_free_cqp_request(struct irdma_cqp *cqp,
452 		       struct irdma_cqp_request *cqp_request)
453 {
454 	unsigned long flags;
455 
456 	if (cqp_request->dynamic) {
457 		kfree(cqp_request);
458 	} else {
459 		WRITE_ONCE(cqp_request->request_done, false);
460 		cqp_request->callback_fcn = NULL;
461 		cqp_request->waiting = false;
462 
463 		spin_lock_irqsave(&cqp->req_lock, flags);
464 		list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
465 		spin_unlock_irqrestore(&cqp->req_lock, flags);
466 	}
467 	wake_up(&cqp->remove_wq);
468 }
469 
470 /**
471  * irdma_put_cqp_request - dec ref count and free if 0
472  * @cqp: cqp ptr
473  * @cqp_request: to be put back in cqp list
474  */
475 void
irdma_put_cqp_request(struct irdma_cqp * cqp,struct irdma_cqp_request * cqp_request)476 irdma_put_cqp_request(struct irdma_cqp *cqp,
477 		      struct irdma_cqp_request *cqp_request)
478 {
479 	if (atomic_dec_and_test(&cqp_request->refcnt))
480 		irdma_free_cqp_request(cqp, cqp_request);
481 }
482 
483 /**
484  * irdma_free_pending_cqp_request -free pending cqp request objs
485  * @cqp: cqp ptr
486  * @cqp_request: to be put back in cqp list
487  */
488 static void
irdma_free_pending_cqp_request(struct irdma_cqp * cqp,struct irdma_cqp_request * cqp_request)489 irdma_free_pending_cqp_request(struct irdma_cqp *cqp,
490 			       struct irdma_cqp_request *cqp_request)
491 {
492 	cqp_request->compl_info.error = true;
493 	WRITE_ONCE(cqp_request->request_done, true);
494 
495 	if (cqp_request->waiting)
496 		wake_up(&cqp_request->waitq);
497 	wait_event_timeout(cqp->remove_wq,
498 			   atomic_read(&cqp_request->refcnt) == 1, 1000);
499 	irdma_put_cqp_request(cqp, cqp_request);
500 }
501 
502 /**
503  * irdma_cleanup_pending_cqp_op - clean-up cqp with no
504  * completions
505  * @rf: RDMA PCI function
506  */
507 void
irdma_cleanup_pending_cqp_op(struct irdma_pci_f * rf)508 irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
509 {
510 	struct irdma_sc_dev *dev = &rf->sc_dev;
511 	struct irdma_cqp *cqp = &rf->cqp;
512 	struct irdma_cqp_request *cqp_request = NULL;
513 	struct cqp_cmds_info *pcmdinfo = NULL;
514 	u32 i, pending_work, wqe_idx;
515 
516 	pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring);
517 	wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring);
518 	for (i = 0; i < pending_work; i++) {
519 		cqp_request = (struct irdma_cqp_request *)(uintptr_t)
520 		    cqp->scratch_array[wqe_idx];
521 		if (cqp_request)
522 			irdma_free_pending_cqp_request(cqp, cqp_request);
523 		wqe_idx = (wqe_idx + 1) % IRDMA_RING_SIZE(cqp->sc_cqp.sq_ring);
524 	}
525 
526 	while (!list_empty(&dev->cqp_cmd_head)) {
527 		pcmdinfo = irdma_remove_cqp_head(dev);
528 		cqp_request =
529 		    container_of(pcmdinfo, struct irdma_cqp_request, info);
530 		if (cqp_request)
531 			irdma_free_pending_cqp_request(cqp, cqp_request);
532 	}
533 }
534 
535 /**
536  * irdma_wait_event - wait for completion
537  * @rf: RDMA PCI function
538  * @cqp_request: cqp request to wait
539  */
540 static int
irdma_wait_event(struct irdma_pci_f * rf,struct irdma_cqp_request * cqp_request)541 irdma_wait_event(struct irdma_pci_f *rf,
542 		 struct irdma_cqp_request *cqp_request)
543 {
544 	struct irdma_cqp_timeout cqp_timeout = {0};
545 	bool cqp_error = false;
546 	int err_code = 0;
547 
548 	cqp_timeout.compl_cqp_cmds = atomic64_read(&rf->sc_dev.cqp->completed_ops);
549 	do {
550 		int wait_time_ms = rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms;
551 
552 		irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
553 		if (wait_event_timeout(cqp_request->waitq,
554 				       READ_ONCE(cqp_request->request_done),
555 				       msecs_to_jiffies(wait_time_ms)))
556 			break;
557 		if (cqp_request->info.cqp_cmd_exec_status) {
558 			irdma_debug(&rf->sc_dev, IRDMA_DEBUG_CQP,
559 				    "%s (%d) cqp op error status reported: %d, %d %x %x\n",
560 				    irdma_cqp_cmd_names[cqp_request->info.cqp_cmd],
561 				    cqp_request->info.cqp_cmd,
562 				    cqp_request->info.cqp_cmd_exec_status,
563 				    cqp_request->compl_info.error,
564 				    cqp_request->compl_info.maj_err_code,
565 				    cqp_request->compl_info.min_err_code);
566 			break;
567 		}
568 
569 		irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev);
570 
571 		if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
572 			continue;
573 
574 		if (!rf->reset) {
575 			rf->reset = true;
576 			rf->gen_ops.request_reset(rf);
577 		}
578 		return -ETIMEDOUT;
579 	} while (1);
580 
581 	cqp_error = cqp_request->compl_info.error;
582 	if (cqp_error) {
583 		err_code = -EIO;
584 		if (cqp_request->compl_info.maj_err_code == 0xFFFF) {
585 			if (cqp_request->compl_info.min_err_code == 0x8002) {
586 				err_code = -EBUSY;
587 			} else if (cqp_request->compl_info.min_err_code == 0x8029) {
588 				if (!rf->reset) {
589 					rf->reset = true;
590 					rf->gen_ops.request_reset(rf);
591 				}
592 			}
593 		}
594 	}
595 
596 	return err_code;
597 }
598 
599 static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
600 	[IRDMA_OP_CEQ_DESTROY] = "Destroy CEQ Cmd",
601 	[IRDMA_OP_AEQ_DESTROY] = "Destroy AEQ Cmd",
602 	[IRDMA_OP_DELETE_ARP_CACHE_ENTRY] = "Delete ARP Cache Cmd",
603 	[IRDMA_OP_MANAGE_APBVT_ENTRY] = "Manage APBV Table Entry Cmd",
604 	[IRDMA_OP_CEQ_CREATE] = "CEQ Create Cmd",
605 	[IRDMA_OP_AEQ_CREATE] = "AEQ Create Cmd",
606 	[IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY] = "Manage Quad Hash Table Entry Cmd",
607 	[IRDMA_OP_QP_MODIFY] = "Modify QP Cmd",
608 	[IRDMA_OP_QP_UPLOAD_CONTEXT] = "Upload Context Cmd",
609 	[IRDMA_OP_CQ_CREATE] = "Create CQ Cmd",
610 	[IRDMA_OP_CQ_DESTROY] = "Destroy CQ Cmd",
611 	[IRDMA_OP_QP_CREATE] = "Create QP Cmd",
612 	[IRDMA_OP_QP_DESTROY] = "Destroy QP Cmd",
613 	[IRDMA_OP_ALLOC_STAG] = "Allocate STag Cmd",
614 	[IRDMA_OP_MR_REG_NON_SHARED] = "Register Non-Shared MR Cmd",
615 	[IRDMA_OP_DEALLOC_STAG] = "Deallocate STag Cmd",
616 	[IRDMA_OP_MW_ALLOC] = "Allocate Memory Window Cmd",
617 	[IRDMA_OP_QP_FLUSH_WQES] = "Flush QP Cmd",
618 	[IRDMA_OP_ADD_ARP_CACHE_ENTRY] = "Add ARP Cache Cmd",
619 	[IRDMA_OP_MANAGE_PUSH_PAGE] = "Manage Push Page Cmd",
620 	[IRDMA_OP_UPDATE_PE_SDS] = "Update PE SDs Cmd",
621 	[IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE] = "Manage HMC PM Function Table Cmd",
622 	[IRDMA_OP_SUSPEND] = "Suspend QP Cmd",
623 	[IRDMA_OP_RESUME] = "Resume QP Cmd",
624 	[IRDMA_OP_QUERY_FPM_VAL] = "Query FPM Values Cmd",
625 	[IRDMA_OP_COMMIT_FPM_VAL] = "Commit FPM Values Cmd",
626 	[IRDMA_OP_AH_CREATE] = "Create Address Handle Cmd",
627 	[IRDMA_OP_AH_MODIFY] = "Modify Address Handle Cmd",
628 	[IRDMA_OP_AH_DESTROY] = "Destroy Address Handle Cmd",
629 	[IRDMA_OP_MC_CREATE] = "Create Multicast Group Cmd",
630 	[IRDMA_OP_MC_DESTROY] = "Destroy Multicast Group Cmd",
631 	[IRDMA_OP_MC_MODIFY] = "Modify Multicast Group Cmd",
632 	[IRDMA_OP_STATS_ALLOCATE] = "Add Statistics Instance Cmd",
633 	[IRDMA_OP_STATS_FREE] = "Free Statistics Instance Cmd",
634 	[IRDMA_OP_STATS_GATHER] = "Gather Statistics Cmd",
635 	[IRDMA_OP_WS_ADD_NODE] = "Add Work Scheduler Node Cmd",
636 	[IRDMA_OP_WS_MODIFY_NODE] = "Modify Work Scheduler Node Cmd",
637 	[IRDMA_OP_WS_DELETE_NODE] = "Delete Work Scheduler Node Cmd",
638 	[IRDMA_OP_WS_FAILOVER_START] = "Failover Start Cmd",
639 	[IRDMA_OP_WS_FAILOVER_COMPLETE] = "Failover Complete Cmd",
640 	[IRDMA_OP_SET_UP_MAP] = "Set UP-UP Mapping Cmd",
641 	[IRDMA_OP_GEN_AE] = "Generate AE Cmd",
642 	[IRDMA_OP_QUERY_RDMA_FEATURES] = "RDMA Get Features Cmd",
643 	[IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY] = "Allocate Local MAC Entry Cmd",
644 	[IRDMA_OP_ADD_LOCAL_MAC_ENTRY] = "Add Local MAC Entry Cmd",
645 	[IRDMA_OP_DELETE_LOCAL_MAC_ENTRY] = "Delete Local MAC Entry Cmd",
646 	[IRDMA_OP_CQ_MODIFY] = "CQ Modify Cmd",
647 };
648 
649 static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = {
650 	{0xffff, 0x8002, "Invalid State"},
651 	{0xffff, 0x8006, "Flush No Wqe Pending"},
652 	{0xffff, 0x8007, "Modify QP Bad Close"},
653 	{0xffff, 0x8009, "LLP Closed"},
654 	{0xffff, 0x800a, "Reset Not Sent"},
655 	{0xffff, 0x0200, "Failover Pending"},
656 };
657 
658 /**
659  * irdma_cqp_crit_err - check if CQP error is critical
660  * @dev: pointer to dev structure
661  * @cqp_cmd: code for last CQP operation
662  * @maj_err_code: major error code
663  * @min_err_code: minot error code
664  */
665 bool
irdma_cqp_crit_err(struct irdma_sc_dev * dev,u8 cqp_cmd,u16 maj_err_code,u16 min_err_code)666 irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
667 		   u16 maj_err_code, u16 min_err_code)
668 {
669 	int i;
670 
671 	for (i = 0; i < ARRAY_SIZE(irdma_noncrit_err_list); ++i) {
672 		if (maj_err_code == irdma_noncrit_err_list[i].maj &&
673 		    min_err_code == irdma_noncrit_err_list[i].min) {
674 			irdma_debug(dev, IRDMA_DEBUG_CQP,
675 				    "[%s Error][%s] maj=0x%x min=0x%x\n",
676 				    irdma_noncrit_err_list[i].desc,
677 				    irdma_cqp_cmd_names[cqp_cmd], maj_err_code,
678 				    min_err_code);
679 			return false;
680 		}
681 	}
682 	return true;
683 }
684 
685 /**
686  * irdma_handle_cqp_op - process cqp command
687  * @rf: RDMA PCI function
688  * @cqp_request: cqp request to process
689  */
690 int
irdma_handle_cqp_op(struct irdma_pci_f * rf,struct irdma_cqp_request * cqp_request)691 irdma_handle_cqp_op(struct irdma_pci_f *rf,
692 		    struct irdma_cqp_request *cqp_request)
693 {
694 	struct irdma_sc_dev *dev = &rf->sc_dev;
695 	struct cqp_cmds_info *info = &cqp_request->info;
696 	int status;
697 	bool put_cqp_request = true;
698 
699 	if (rf->reset)
700 		return (info->create ? -EBUSY : 0);
701 
702 	irdma_get_cqp_request(cqp_request);
703 	status = irdma_process_cqp_cmd(dev, info);
704 	if (status)
705 		goto err;
706 
707 	if (cqp_request->waiting) {
708 		put_cqp_request = false;
709 		status = irdma_wait_event(rf, cqp_request);
710 		if (status)
711 			goto err;
712 	}
713 
714 	return 0;
715 
716 err:
717 	if (irdma_cqp_crit_err(dev, info->cqp_cmd,
718 			       cqp_request->compl_info.maj_err_code,
719 			       cqp_request->compl_info.min_err_code)) {
720 		int qpn = -1;
721 
722 		if (info->cqp_cmd == IRDMA_OP_QP_CREATE)
723 			qpn = cqp_request->info.in.u.qp_create.qp->qp_uk.qp_id;
724 		else if (info->cqp_cmd == IRDMA_OP_QP_MODIFY)
725 			qpn = cqp_request->info.in.u.qp_modify.qp->qp_uk.qp_id;
726 		else if (info->cqp_cmd == IRDMA_OP_QP_DESTROY)
727 			qpn = cqp_request->info.in.u.qp_destroy.qp->qp_uk.qp_id;
728 
729 		irdma_dev_err(&rf->iwdev->ibdev,
730 			      "[%s Error] status=%d waiting=%d completion_err=%d maj=0x%x min=0x%x qpn=%d\n",
731 			      irdma_cqp_cmd_names[info->cqp_cmd], status,
732 			      cqp_request->waiting, cqp_request->compl_info.error,
733 			      cqp_request->compl_info.maj_err_code,
734 			      cqp_request->compl_info.min_err_code, qpn);
735 	}
736 
737 	if (put_cqp_request)
738 		irdma_put_cqp_request(&rf->cqp, cqp_request);
739 
740 	return status;
741 }
742 
743 void
irdma_qp_add_ref(struct ib_qp * ibqp)744 irdma_qp_add_ref(struct ib_qp *ibqp)
745 {
746 	struct irdma_qp *iwqp = to_iwqp(ibqp);
747 
748 	atomic_inc(&iwqp->refcnt);
749 }
750 
751 void
irdma_qp_rem_ref(struct ib_qp * ibqp)752 irdma_qp_rem_ref(struct ib_qp *ibqp)
753 {
754 	struct irdma_qp *iwqp = to_iwqp(ibqp);
755 	struct irdma_device *iwdev = iwqp->iwdev;
756 	unsigned long flags;
757 
758 	spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
759 	if (!atomic_dec_and_test(&iwqp->refcnt)) {
760 		spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
761 		return;
762 	}
763 
764 	iwdev->rf->qp_table[iwqp->ibqp.qp_num] = NULL;
765 	spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
766 	complete(&iwqp->free_qp);
767 }
768 
769 void
irdma_cq_add_ref(struct ib_cq * ibcq)770 irdma_cq_add_ref(struct ib_cq *ibcq)
771 {
772 	struct irdma_cq *iwcq = to_iwcq(ibcq);
773 
774 	atomic_inc(&iwcq->refcnt);
775 }
776 
777 void
irdma_cq_rem_ref(struct ib_cq * ibcq)778 irdma_cq_rem_ref(struct ib_cq *ibcq)
779 {
780 	struct irdma_cq *iwcq = to_iwcq(ibcq);
781 	struct irdma_pci_f *rf = container_of(iwcq->sc_cq.dev, struct irdma_pci_f, sc_dev);
782 	unsigned long flags;
783 
784 	spin_lock_irqsave(&rf->cqtable_lock, flags);
785 	if (!atomic_dec_and_test(&iwcq->refcnt)) {
786 		spin_unlock_irqrestore(&rf->cqtable_lock, flags);
787 		return;
788 	}
789 
790 	WRITE_ONCE(rf->cq_table[iwcq->cq_num], NULL);
791 	spin_unlock_irqrestore(&rf->cqtable_lock, flags);
792 	complete(&iwcq->free_cq);
793 }
794 
795 struct ib_device *
to_ibdev(struct irdma_sc_dev * dev)796 to_ibdev(struct irdma_sc_dev *dev)
797 {
798 	return &(container_of(dev, struct irdma_pci_f, sc_dev))->iwdev->ibdev;
799 }
800 
801 /**
802  * irdma_get_qp - get qp address
803  * @device: iwarp device
804  * @qpn: qp number
805  */
806 struct ib_qp *
irdma_get_qp(struct ib_device * device,int qpn)807 irdma_get_qp(struct ib_device *device, int qpn)
808 {
809 	struct irdma_device *iwdev = to_iwdev(device);
810 
811 	if (qpn < IW_FIRST_QPN || qpn >= iwdev->rf->max_qp)
812 		return NULL;
813 
814 	return &iwdev->rf->qp_table[qpn]->ibqp;
815 }
816 
817 /**
818  * irdma_remove_cqp_head - return head entry and remove
819  * @dev: device
820  */
821 void *
irdma_remove_cqp_head(struct irdma_sc_dev * dev)822 irdma_remove_cqp_head(struct irdma_sc_dev *dev)
823 {
824 	struct list_head *entry;
825 	struct list_head *list = &dev->cqp_cmd_head;
826 
827 	if (list_empty(list))
828 		return NULL;
829 
830 	entry = list->next;
831 	list_del(entry);
832 
833 	return entry;
834 }
835 
836 /**
837  * irdma_cqp_sds_cmd - create cqp command for sd
838  * @dev: hardware control device structure
839  * @sdinfo: information for sd cqp
840  *
841  */
842 int
irdma_cqp_sds_cmd(struct irdma_sc_dev * dev,struct irdma_update_sds_info * sdinfo)843 irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
844 		  struct irdma_update_sds_info *sdinfo)
845 {
846 	struct irdma_cqp_request *cqp_request;
847 	struct cqp_cmds_info *cqp_info;
848 	struct irdma_pci_f *rf = dev_to_rf(dev);
849 	int status;
850 
851 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
852 	if (!cqp_request)
853 		return -ENOMEM;
854 
855 	cqp_info = &cqp_request->info;
856 	memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,
857 	       sizeof(cqp_info->in.u.update_pe_sds.info));
858 	cqp_info->cqp_cmd = IRDMA_OP_UPDATE_PE_SDS;
859 	cqp_info->post_sq = 1;
860 	cqp_info->in.u.update_pe_sds.dev = dev;
861 	cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request;
862 
863 	status = irdma_handle_cqp_op(rf, cqp_request);
864 	irdma_put_cqp_request(&rf->cqp, cqp_request);
865 
866 	return status;
867 }
868 
869 /**
870  * irdma_cqp_qp_suspend_resume - cqp command for suspend/resume
871  * @qp: hardware control qp
872  * @op: suspend or resume
873  */
874 int
irdma_cqp_qp_suspend_resume(struct irdma_sc_qp * qp,u8 op)875 irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, u8 op)
876 {
877 	struct irdma_sc_dev *dev = qp->dev;
878 	struct irdma_cqp_request *cqp_request;
879 	struct irdma_sc_cqp *cqp = dev->cqp;
880 	struct cqp_cmds_info *cqp_info;
881 	struct irdma_pci_f *rf = dev_to_rf(dev);
882 	int status;
883 
884 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
885 	if (!cqp_request)
886 		return -ENOMEM;
887 
888 	cqp_info = &cqp_request->info;
889 	cqp_info->cqp_cmd = op;
890 	cqp_info->in.u.suspend_resume.cqp = cqp;
891 	cqp_info->in.u.suspend_resume.qp = qp;
892 	cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
893 
894 	status = irdma_handle_cqp_op(rf, cqp_request);
895 	irdma_put_cqp_request(&rf->cqp, cqp_request);
896 
897 	return status;
898 }
899 
900 /**
901  * irdma_term_modify_qp - modify qp for term message
902  * @qp: hardware control qp
903  * @next_state: qp's next state
904  * @term: terminate code
905  * @term_len: length
906  */
907 void
irdma_term_modify_qp(struct irdma_sc_qp * qp,u8 next_state,u8 term,u8 term_len)908 irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term,
909 		     u8 term_len)
910 {
911 	struct irdma_qp *iwqp;
912 
913 	iwqp = qp->qp_uk.back_qp;
914 	irdma_next_iw_state(iwqp, next_state, 0, term, term_len);
915 };
916 
917 /**
918  * irdma_terminate_done - after terminate is completed
919  * @qp: hardware control qp
920  * @timeout_occurred: indicates if terminate timer expired
921  */
922 void
irdma_terminate_done(struct irdma_sc_qp * qp,int timeout_occurred)923 irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred)
924 {
925 	struct irdma_qp *iwqp;
926 	u8 hte = 0;
927 	bool first_time;
928 	unsigned long flags;
929 
930 	iwqp = qp->qp_uk.back_qp;
931 	spin_lock_irqsave(&iwqp->lock, flags);
932 	if (iwqp->hte_added) {
933 		iwqp->hte_added = 0;
934 		hte = 1;
935 	}
936 	first_time = !(qp->term_flags & IRDMA_TERM_DONE);
937 	qp->term_flags |= IRDMA_TERM_DONE;
938 	spin_unlock_irqrestore(&iwqp->lock, flags);
939 	if (first_time) {
940 		if (!timeout_occurred)
941 			irdma_terminate_del_timer(qp);
942 
943 		irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, hte, 0, 0);
944 		irdma_cm_disconn(iwqp);
945 	}
946 }
947 
948 static void
irdma_terminate_timeout(struct timer_list * t)949 irdma_terminate_timeout(struct timer_list *t)
950 {
951 	struct irdma_qp *iwqp = timer_container_of(iwqp, t, terminate_timer);
952 	struct irdma_sc_qp *qp = &iwqp->sc_qp;
953 
954 	irdma_terminate_done(qp, 1);
955 	irdma_qp_rem_ref(&iwqp->ibqp);
956 }
957 
958 /**
959  * irdma_terminate_start_timer - start terminate timeout
960  * @qp: hardware control qp
961  */
962 void
irdma_terminate_start_timer(struct irdma_sc_qp * qp)963 irdma_terminate_start_timer(struct irdma_sc_qp *qp)
964 {
965 	struct irdma_qp *iwqp;
966 
967 	iwqp = qp->qp_uk.back_qp;
968 	irdma_qp_add_ref(&iwqp->ibqp);
969 	timer_setup(&iwqp->terminate_timer, irdma_terminate_timeout, 0);
970 	iwqp->terminate_timer.expires = jiffies + HZ;
971 
972 	add_timer(&iwqp->terminate_timer);
973 }
974 
975 /**
976  * irdma_terminate_del_timer - delete terminate timeout
977  * @qp: hardware control qp
978  */
979 void
irdma_terminate_del_timer(struct irdma_sc_qp * qp)980 irdma_terminate_del_timer(struct irdma_sc_qp *qp)
981 {
982 	struct irdma_qp *iwqp;
983 	int ret;
984 
985 	iwqp = qp->qp_uk.back_qp;
986 	ret = irdma_del_timer_compat(&iwqp->terminate_timer);
987 	if (ret)
988 		irdma_qp_rem_ref(&iwqp->ibqp);
989 }
990 
991 /**
992  * irdma_cqp_query_fpm_val_cmd - send cqp command for fpm
993  * @dev: function device struct
994  * @val_mem: buffer for fpm
995  * @hmc_fn_id: function id for fpm
996  */
997 int
irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev * dev,struct irdma_dma_mem * val_mem,u16 hmc_fn_id)998 irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
999 			    struct irdma_dma_mem *val_mem, u16 hmc_fn_id)
1000 {
1001 	struct irdma_cqp_request *cqp_request;
1002 	struct cqp_cmds_info *cqp_info;
1003 	struct irdma_pci_f *rf = dev_to_rf(dev);
1004 	int status;
1005 
1006 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1007 	if (!cqp_request)
1008 		return -ENOMEM;
1009 
1010 	cqp_info = &cqp_request->info;
1011 	cqp_request->param = NULL;
1012 	cqp_info->in.u.query_fpm_val.cqp = dev->cqp;
1013 	cqp_info->in.u.query_fpm_val.fpm_val_pa = val_mem->pa;
1014 	cqp_info->in.u.query_fpm_val.fpm_val_va = val_mem->va;
1015 	cqp_info->in.u.query_fpm_val.hmc_fn_id = hmc_fn_id;
1016 	cqp_info->cqp_cmd = IRDMA_OP_QUERY_FPM_VAL;
1017 	cqp_info->post_sq = 1;
1018 	cqp_info->in.u.query_fpm_val.scratch = (uintptr_t)cqp_request;
1019 
1020 	status = irdma_handle_cqp_op(rf, cqp_request);
1021 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1022 
1023 	return status;
1024 }
1025 
1026 /**
1027  * irdma_cqp_commit_fpm_val_cmd - commit fpm values in hw
1028  * @dev: hardware control device structure
1029  * @val_mem: buffer with fpm values
1030  * @hmc_fn_id: function id for fpm
1031  */
1032 int
irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev * dev,struct irdma_dma_mem * val_mem,u16 hmc_fn_id)1033 irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
1034 			     struct irdma_dma_mem *val_mem, u16 hmc_fn_id)
1035 {
1036 	struct irdma_cqp_request *cqp_request;
1037 	struct cqp_cmds_info *cqp_info;
1038 	struct irdma_pci_f *rf = dev_to_rf(dev);
1039 	int status;
1040 
1041 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1042 	if (!cqp_request)
1043 		return -ENOMEM;
1044 
1045 	cqp_info = &cqp_request->info;
1046 	cqp_request->param = NULL;
1047 	cqp_info->in.u.commit_fpm_val.cqp = dev->cqp;
1048 	cqp_info->in.u.commit_fpm_val.fpm_val_pa = val_mem->pa;
1049 	cqp_info->in.u.commit_fpm_val.fpm_val_va = val_mem->va;
1050 	cqp_info->in.u.commit_fpm_val.hmc_fn_id = hmc_fn_id;
1051 	cqp_info->cqp_cmd = IRDMA_OP_COMMIT_FPM_VAL;
1052 	cqp_info->post_sq = 1;
1053 	cqp_info->in.u.commit_fpm_val.scratch = (uintptr_t)cqp_request;
1054 
1055 	status = irdma_handle_cqp_op(rf, cqp_request);
1056 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1057 
1058 	return status;
1059 }
1060 
1061 /**
1062  * irdma_cqp_cq_create_cmd - create a cq for the cqp
1063  * @dev: device pointer
1064  * @cq: pointer to created cq
1065  */
1066 int
irdma_cqp_cq_create_cmd(struct irdma_sc_dev * dev,struct irdma_sc_cq * cq)1067 irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
1068 {
1069 	struct irdma_pci_f *rf = dev_to_rf(dev);
1070 	struct irdma_cqp *iwcqp = &rf->cqp;
1071 	struct irdma_cqp_request *cqp_request;
1072 	struct cqp_cmds_info *cqp_info;
1073 	int status;
1074 
1075 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
1076 	if (!cqp_request)
1077 		return -ENOMEM;
1078 
1079 	cqp_info = &cqp_request->info;
1080 	cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
1081 	cqp_info->post_sq = 1;
1082 	cqp_info->in.u.cq_create.cq = cq;
1083 	cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1084 	cqp_info->create = true;
1085 
1086 	status = irdma_handle_cqp_op(rf, cqp_request);
1087 	irdma_put_cqp_request(iwcqp, cqp_request);
1088 
1089 	return status;
1090 }
1091 
1092 /**
1093  * irdma_cqp_qp_create_cmd - create a qp for the cqp
1094  * @dev: device pointer
1095  * @qp: pointer to created qp
1096  */
1097 int
irdma_cqp_qp_create_cmd(struct irdma_sc_dev * dev,struct irdma_sc_qp * qp)1098 irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
1099 {
1100 	struct irdma_pci_f *rf = dev_to_rf(dev);
1101 	struct irdma_cqp *iwcqp = &rf->cqp;
1102 	struct irdma_cqp_request *cqp_request;
1103 	struct cqp_cmds_info *cqp_info;
1104 	struct irdma_create_qp_info *qp_info;
1105 	int status;
1106 
1107 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
1108 	if (!cqp_request)
1109 		return -ENOMEM;
1110 
1111 	cqp_info = &cqp_request->info;
1112 	qp_info = &cqp_request->info.in.u.qp_create.info;
1113 	qp_info->cq_num_valid = true;
1114 	qp_info->next_iwarp_state = IRDMA_QP_STATE_RTS;
1115 	cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
1116 	cqp_info->post_sq = 1;
1117 	cqp_info->in.u.qp_create.qp = qp;
1118 	cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
1119 	cqp_info->create = true;
1120 
1121 	status = irdma_handle_cqp_op(rf, cqp_request);
1122 	irdma_put_cqp_request(iwcqp, cqp_request);
1123 
1124 	return status;
1125 }
1126 
1127 /**
1128  * irdma_dealloc_push_page - free a push page for qp
1129  * @rf: RDMA PCI function
1130  * @iwqp: QP pointer
1131  */
1132 void
irdma_dealloc_push_page(struct irdma_pci_f * rf,struct irdma_qp * iwqp)1133 irdma_dealloc_push_page(struct irdma_pci_f *rf,
1134 			struct irdma_qp *iwqp)
1135 {
1136 	struct irdma_sc_qp *qp = &iwqp->sc_qp;
1137 	struct irdma_cqp_request *cqp_request;
1138 	struct cqp_cmds_info *cqp_info;
1139 	int status;
1140 
1141 	if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX)
1142 		return;
1143 
1144 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
1145 	if (!cqp_request)
1146 		return;
1147 
1148 	cqp_info = &cqp_request->info;
1149 	cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
1150 	cqp_info->post_sq = 1;
1151 	cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
1152 	cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
1153 	cqp_info->in.u.manage_push_page.info.free_page = 1;
1154 	cqp_info->in.u.manage_push_page.info.push_page_type = 0;
1155 	cqp_info->in.u.manage_push_page.cqp = &rf->cqp.sc_cqp;
1156 	cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
1157 
1158 	status = irdma_handle_cqp_op(rf, cqp_request);
1159 	if (!status)
1160 		qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
1161 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1162 }
1163 
1164 /**
1165  * irdma_cq_wq_destroy - send cq destroy cqp
1166  * @rf: RDMA PCI function
1167  * @cq: hardware control cq
1168  */
1169 void
irdma_cq_wq_destroy(struct irdma_pci_f * rf,struct irdma_sc_cq * cq)1170 irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
1171 {
1172 	struct irdma_cqp_request *cqp_request;
1173 	struct cqp_cmds_info *cqp_info;
1174 
1175 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1176 	if (!cqp_request)
1177 		return;
1178 
1179 	cqp_info = &cqp_request->info;
1180 	cqp_info->cqp_cmd = IRDMA_OP_CQ_DESTROY;
1181 	cqp_info->post_sq = 1;
1182 	cqp_info->in.u.cq_destroy.cq = cq;
1183 	cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
1184 
1185 	irdma_handle_cqp_op(rf, cqp_request);
1186 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1187 }
1188 
1189 /**
1190  * irdma_hw_modify_qp_callback - handle state for modifyQPs that don't wait
1191  * @cqp_request: modify QP completion
1192  */
1193 static void
irdma_hw_modify_qp_callback(struct irdma_cqp_request * cqp_request)1194 irdma_hw_modify_qp_callback(struct irdma_cqp_request *cqp_request)
1195 {
1196 	struct cqp_cmds_info *cqp_info;
1197 	struct irdma_qp *iwqp;
1198 
1199 	cqp_info = &cqp_request->info;
1200 	iwqp = cqp_info->in.u.qp_modify.qp->qp_uk.back_qp;
1201 	atomic_dec(&iwqp->hw_mod_qp_pend);
1202 	wake_up(&iwqp->mod_qp_waitq);
1203 }
1204 
1205 /**
1206  * irdma_hw_modify_qp - setup cqp for modify qp
1207  * @iwdev: RDMA device
1208  * @iwqp: qp ptr (user or kernel)
1209  * @info: info for modify qp
1210  * @wait: flag to wait or not for modify qp completion
1211  */
1212 int
irdma_hw_modify_qp(struct irdma_device * iwdev,struct irdma_qp * iwqp,struct irdma_modify_qp_info * info,bool wait)1213 irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
1214 		   struct irdma_modify_qp_info *info, bool wait)
1215 {
1216 	int status;
1217 	struct irdma_pci_f *rf = iwdev->rf;
1218 	struct irdma_cqp_request *cqp_request;
1219 	struct cqp_cmds_info *cqp_info;
1220 	struct irdma_modify_qp_info *m_info;
1221 
1222 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
1223 	if (!cqp_request)
1224 		return -ENOMEM;
1225 
1226 	if (!wait) {
1227 		cqp_request->callback_fcn = irdma_hw_modify_qp_callback;
1228 		atomic_inc(&iwqp->hw_mod_qp_pend);
1229 	}
1230 	cqp_info = &cqp_request->info;
1231 	m_info = &cqp_info->in.u.qp_modify.info;
1232 	memcpy(m_info, info, sizeof(*m_info));
1233 	cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
1234 	cqp_info->post_sq = 1;
1235 	cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
1236 	cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
1237 	cqp_info->create = false;
1238 	status = irdma_handle_cqp_op(rf, cqp_request);
1239 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1240 	if (status) {
1241 		if (rdma_protocol_roce(&iwdev->ibdev, 1))
1242 			return status;
1243 
1244 		switch (m_info->next_iwarp_state) {
1245 			struct irdma_gen_ae_info ae_info;
1246 
1247 		case IRDMA_QP_STATE_RTS:
1248 		case IRDMA_QP_STATE_IDLE:
1249 		case IRDMA_QP_STATE_TERMINATE:
1250 		case IRDMA_QP_STATE_CLOSING:
1251 			if (info->curr_iwarp_state == IRDMA_QP_STATE_IDLE)
1252 				irdma_send_reset(iwqp->cm_node);
1253 			else
1254 				iwqp->sc_qp.term_flags = IRDMA_TERM_DONE;
1255 			if (!wait) {
1256 				ae_info.ae_code = IRDMA_AE_BAD_CLOSE;
1257 				ae_info.ae_src = 0;
1258 				irdma_gen_ae(rf, &iwqp->sc_qp, &ae_info, false);
1259 			} else {
1260 				cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp,
1261 									      wait);
1262 				if (!cqp_request)
1263 					return -ENOMEM;
1264 
1265 				cqp_info = &cqp_request->info;
1266 				m_info = &cqp_info->in.u.qp_modify.info;
1267 				memcpy(m_info, info, sizeof(*m_info));
1268 				cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
1269 				cqp_info->post_sq = 1;
1270 				cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
1271 				cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
1272 				m_info->next_iwarp_state = IRDMA_QP_STATE_ERROR;
1273 				m_info->reset_tcp_conn = true;
1274 				irdma_handle_cqp_op(rf, cqp_request);
1275 				irdma_put_cqp_request(&rf->cqp, cqp_request);
1276 			}
1277 			break;
1278 		case IRDMA_QP_STATE_ERROR:
1279 		default:
1280 			break;
1281 		}
1282 	}
1283 
1284 	return status;
1285 }
1286 
1287 /**
1288  * irdma_cqp_cq_destroy_cmd - destroy the cqp cq
1289  * @dev: device pointer
1290  * @cq: pointer to cq
1291  */
1292 void
irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev * dev,struct irdma_sc_cq * cq)1293 irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
1294 {
1295 	struct irdma_pci_f *rf = dev_to_rf(dev);
1296 
1297 	irdma_cq_wq_destroy(rf, cq);
1298 }
1299 
1300 /**
1301  * irdma_cqp_qp_destroy_cmd - destroy the cqp
1302  * @dev: device pointer
1303  * @qp: pointer to qp
1304  */
1305 int
irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev * dev,struct irdma_sc_qp * qp)1306 irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
1307 {
1308 	struct irdma_pci_f *rf = dev_to_rf(dev);
1309 	struct irdma_cqp *iwcqp = &rf->cqp;
1310 	struct irdma_cqp_request *cqp_request;
1311 	struct cqp_cmds_info *cqp_info;
1312 	int status;
1313 
1314 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
1315 	if (!cqp_request)
1316 		return -ENOMEM;
1317 
1318 	cqp_info = &cqp_request->info;
1319 	cqp_info->cqp_cmd = IRDMA_OP_QP_DESTROY;
1320 	cqp_info->post_sq = 1;
1321 	cqp_info->in.u.qp_destroy.qp = qp;
1322 	cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
1323 	cqp_info->in.u.qp_destroy.remove_hash_idx = true;
1324 
1325 	status = irdma_handle_cqp_op(rf, cqp_request);
1326 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1327 
1328 	return status;
1329 }
1330 
1331 /**
1332  * irdma_ieq_mpa_crc_ae - generate AE for crc error
1333  * @dev: hardware control device structure
1334  * @qp: hardware control qp
1335  */
1336 void
irdma_ieq_mpa_crc_ae(struct irdma_sc_dev * dev,struct irdma_sc_qp * qp)1337 irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
1338 {
1339 	struct irdma_gen_ae_info info = {0};
1340 	struct irdma_pci_f *rf = dev_to_rf(dev);
1341 
1342 	irdma_debug(&rf->sc_dev, IRDMA_DEBUG_AEQ, "Generate MPA CRC AE\n");
1343 	info.ae_code = IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR;
1344 	info.ae_src = IRDMA_AE_SOURCE_RQ;
1345 	irdma_gen_ae(rf, qp, &info, false);
1346 }
1347 
1348 /**
1349  * irdma_ieq_get_qp - get qp based on quad in puda buffer
1350  * @dev: hardware control device structure
1351  * @buf: receive puda buffer on exception q
1352  */
1353 struct irdma_sc_qp *
irdma_ieq_get_qp(struct irdma_sc_dev * dev,struct irdma_puda_buf * buf)1354 irdma_ieq_get_qp(struct irdma_sc_dev *dev,
1355 		 struct irdma_puda_buf *buf)
1356 {
1357 	struct irdma_qp *iwqp;
1358 	struct irdma_cm_node *cm_node;
1359 	struct irdma_device *iwdev = buf->vsi->back_vsi;
1360 	u32 loc_addr[4] = {0};
1361 	u32 rem_addr[4] = {0};
1362 	u16 loc_port, rem_port;
1363 	struct ip6_hdr *ip6h;
1364 	struct ip *iph = (struct ip *)buf->iph;
1365 	struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
1366 
1367 	if (iph->ip_v == 4) {
1368 		loc_addr[0] = ntohl(iph->ip_dst.s_addr);
1369 		rem_addr[0] = ntohl(iph->ip_src.s_addr);
1370 	} else {
1371 		ip6h = (struct ip6_hdr *)buf->iph;
1372 		irdma_copy_ip_ntohl(loc_addr, ip6h->ip6_dst.__u6_addr.__u6_addr32);
1373 		irdma_copy_ip_ntohl(rem_addr, ip6h->ip6_src.__u6_addr.__u6_addr32);
1374 	}
1375 	loc_port = ntohs(tcph->th_dport);
1376 	rem_port = ntohs(tcph->th_sport);
1377 	cm_node = irdma_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port,
1378 				  loc_addr, buf->vlan_valid ? buf->vlan_id : 0xFFFF);
1379 	if (!cm_node)
1380 		return NULL;
1381 
1382 	iwqp = cm_node->iwqp;
1383 	irdma_rem_ref_cmnode(cm_node);
1384 
1385 	return &iwqp->sc_qp;
1386 }
1387 
1388 /**
1389  * irdma_send_ieq_ack - ACKs for duplicate or OOO partials FPDUs
1390  * @qp: qp ptr
1391  */
1392 void
irdma_send_ieq_ack(struct irdma_sc_qp * qp)1393 irdma_send_ieq_ack(struct irdma_sc_qp *qp)
1394 {
1395 	struct irdma_cm_node *cm_node = ((struct irdma_qp *)qp->qp_uk.back_qp)->cm_node;
1396 	struct irdma_puda_buf *buf = qp->pfpdu.lastrcv_buf;
1397 	struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
1398 
1399 	cm_node->tcp_cntxt.rcv_nxt = qp->pfpdu.nextseqnum;
1400 	cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->th_ack);
1401 
1402 	irdma_send_ack(cm_node);
1403 }
1404 
1405 /**
1406  * irdma_puda_ieq_get_ah_info - get AH info from IEQ buffer
1407  * @qp: qp pointer
1408  * @ah_info: AH info pointer
1409  */
1410 void
irdma_puda_ieq_get_ah_info(struct irdma_sc_qp * qp,struct irdma_ah_info * ah_info)1411 irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp,
1412 			   struct irdma_ah_info *ah_info)
1413 {
1414 	struct irdma_puda_buf *buf = qp->pfpdu.ah_buf;
1415 	struct ip *iph;
1416 	struct ip6_hdr *ip6h;
1417 
1418 	memset(ah_info, 0, sizeof(*ah_info));
1419 	ah_info->do_lpbk = true;
1420 	ah_info->vlan_tag = buf->vlan_id;
1421 	ah_info->insert_vlan_tag = buf->vlan_valid;
1422 	ah_info->ipv4_valid = buf->ipv4;
1423 	ah_info->vsi = qp->vsi;
1424 
1425 	if (buf->smac_valid)
1426 		ether_addr_copy(ah_info->mac_addr, buf->smac);
1427 
1428 	if (buf->ipv4) {
1429 		ah_info->ipv4_valid = true;
1430 		iph = (struct ip *)buf->iph;
1431 		ah_info->hop_ttl = iph->ip_ttl;
1432 		ah_info->tc_tos = iph->ip_tos;
1433 		ah_info->dest_ip_addr[0] = ntohl(iph->ip_dst.s_addr);
1434 		ah_info->src_ip_addr[0] = ntohl(iph->ip_src.s_addr);
1435 	} else {
1436 		ip6h = (struct ip6_hdr *)buf->iph;
1437 		ah_info->hop_ttl = ip6h->ip6_hops;
1438 		ah_info->tc_tos = ip6h->ip6_vfc;
1439 		irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
1440 				    ip6h->ip6_dst.__u6_addr.__u6_addr32);
1441 		irdma_copy_ip_ntohl(ah_info->src_ip_addr,
1442 				    ip6h->ip6_src.__u6_addr.__u6_addr32);
1443 	}
1444 
1445 	ah_info->dst_arpindex = irdma_arp_table(dev_to_rf(qp->dev),
1446 						ah_info->dest_ip_addr,
1447 						NULL, IRDMA_ARP_RESOLVE);
1448 }
1449 
1450 /**
1451  * irdma_gen1_ieq_update_tcpip_info - update tcpip in the buffer
1452  * @buf: puda to update
1453  * @len: length of buffer
1454  * @seqnum: seq number for tcp
1455  */
1456 static void
irdma_gen1_ieq_update_tcpip_info(struct irdma_puda_buf * buf,u16 len,u32 seqnum)1457 irdma_gen1_ieq_update_tcpip_info(struct irdma_puda_buf *buf,
1458 				 u16 len, u32 seqnum)
1459 {
1460 	struct tcphdr *tcph;
1461 	struct ip *iph;
1462 	u16 iphlen;
1463 	u16 pktsize;
1464 	u8 *addr = buf->mem.va;
1465 
1466 	iphlen = (buf->ipv4) ? 20 : 40;
1467 	iph = (struct ip *)(addr + buf->maclen);
1468 	tcph = (struct tcphdr *)(addr + buf->maclen + iphlen);
1469 	pktsize = len + buf->tcphlen + iphlen;
1470 	iph->ip_len = htons(pktsize);
1471 	tcph->th_seq = htonl(seqnum);
1472 }
1473 
1474 /**
1475  * irdma_ieq_update_tcpip_info - update tcpip in the buffer
1476  * @buf: puda to update
1477  * @len: length of buffer
1478  * @seqnum: seq number for tcp
1479  */
1480 void
irdma_ieq_update_tcpip_info(struct irdma_puda_buf * buf,u16 len,u32 seqnum)1481 irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
1482 			    u32 seqnum)
1483 {
1484 	struct tcphdr *tcph;
1485 	u8 *addr;
1486 
1487 	if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1488 		return irdma_gen1_ieq_update_tcpip_info(buf, len, seqnum);
1489 
1490 	addr = buf->mem.va;
1491 	tcph = (struct tcphdr *)addr;
1492 	tcph->th_seq = htonl(seqnum);
1493 }
1494 
1495 /**
1496  * irdma_gen1_puda_get_tcpip_info - get tcpip info from puda
1497  * buffer
1498  * @info: to get information
1499  * @buf: puda buffer
1500  */
1501 static int
irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info * info,struct irdma_puda_buf * buf)1502 irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
1503 			       struct irdma_puda_buf *buf)
1504 {
1505 	struct ip *iph;
1506 	struct ip6_hdr *ip6h;
1507 	struct tcphdr *tcph;
1508 	u16 iphlen;
1509 	u16 pkt_len;
1510 	u8 *mem = buf->mem.va;
1511 	struct ether_header *ethh = buf->mem.va;
1512 
1513 	if (ethh->ether_type == htons(0x8100)) {
1514 		info->vlan_valid = true;
1515 		buf->vlan_id = ntohs(((struct ether_vlan_header *)ethh)->evl_tag) &
1516 		    EVL_VLID_MASK;
1517 	}
1518 
1519 	buf->maclen = (info->vlan_valid) ? 18 : 14;
1520 	iphlen = (info->l3proto) ? 40 : 20;
1521 	buf->ipv4 = (info->l3proto) ? false : true;
1522 	buf->iph = mem + buf->maclen;
1523 	iph = (struct ip *)buf->iph;
1524 	buf->tcph = buf->iph + iphlen;
1525 	tcph = (struct tcphdr *)buf->tcph;
1526 
1527 	if (buf->ipv4) {
1528 		pkt_len = ntohs(iph->ip_len);
1529 	} else {
1530 		ip6h = (struct ip6_hdr *)buf->iph;
1531 		pkt_len = ntohs(ip6h->ip6_plen) + iphlen;
1532 	}
1533 
1534 	buf->totallen = pkt_len + buf->maclen;
1535 
1536 	if (info->payload_len < buf->totallen) {
1537 		irdma_debug(buf->vsi->dev, IRDMA_DEBUG_ERR,
1538 			    "payload_len = 0x%x totallen expected0x%x\n",
1539 			    info->payload_len, buf->totallen);
1540 		return -EINVAL;
1541 	}
1542 
1543 	buf->tcphlen = tcph->th_off << 2;
1544 	buf->datalen = pkt_len - iphlen - buf->tcphlen;
1545 	buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
1546 	buf->hdrlen = buf->maclen + iphlen + buf->tcphlen;
1547 	buf->seqnum = ntohl(tcph->th_seq);
1548 
1549 	return 0;
1550 }
1551 
1552 /**
1553  * irdma_puda_get_tcpip_info - get tcpip info from puda buffer
1554  * @info: to get information
1555  * @buf: puda buffer
1556  */
1557 int
irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info * info,struct irdma_puda_buf * buf)1558 irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
1559 			  struct irdma_puda_buf *buf)
1560 {
1561 	struct tcphdr *tcph;
1562 	u32 pkt_len;
1563 	u8 *mem;
1564 
1565 	if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1566 		return irdma_gen1_puda_get_tcpip_info(info, buf);
1567 
1568 	mem = buf->mem.va;
1569 	buf->vlan_valid = info->vlan_valid;
1570 	if (info->vlan_valid)
1571 		buf->vlan_id = info->vlan;
1572 
1573 	buf->ipv4 = info->ipv4;
1574 	if (buf->ipv4)
1575 		buf->iph = mem + IRDMA_IPV4_PAD;
1576 	else
1577 		buf->iph = mem;
1578 
1579 	buf->tcph = mem + IRDMA_TCP_OFFSET;
1580 	tcph = (struct tcphdr *)buf->tcph;
1581 	pkt_len = info->payload_len;
1582 	buf->totallen = pkt_len;
1583 	buf->tcphlen = tcph->th_off << 2;
1584 	buf->datalen = pkt_len - IRDMA_TCP_OFFSET - buf->tcphlen;
1585 	buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
1586 	buf->hdrlen = IRDMA_TCP_OFFSET + buf->tcphlen;
1587 	buf->seqnum = ntohl(tcph->th_seq);
1588 
1589 	if (info->smac_valid) {
1590 		ether_addr_copy(buf->smac, info->smac);
1591 		buf->smac_valid = true;
1592 	}
1593 
1594 	return 0;
1595 }
1596 
1597 /**
1598  * irdma_hw_stats_timeout - Stats timer-handler which updates all HW stats
1599  * @t: timer_list pointer
1600  */
1601 static void
irdma_hw_stats_timeout(struct timer_list * t)1602 irdma_hw_stats_timeout(struct timer_list *t)
1603 {
1604 	struct irdma_vsi_pestat *pf_devstat =
1605 	timer_container_of(pf_devstat, t, stats_timer);
1606 	struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi;
1607 
1608 	if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1609 		irdma_cqp_gather_stats_cmd(sc_vsi->dev, sc_vsi->pestat, false);
1610 
1611 	mod_timer(&pf_devstat->stats_timer,
1612 		  jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1613 }
1614 
1615 /**
1616  * irdma_hw_stats_start_timer - Start periodic stats timer
1617  * @vsi: vsi structure pointer
1618  */
1619 void
irdma_hw_stats_start_timer(struct irdma_sc_vsi * vsi)1620 irdma_hw_stats_start_timer(struct irdma_sc_vsi *vsi)
1621 {
1622 	struct irdma_vsi_pestat *devstat = vsi->pestat;
1623 
1624 	timer_setup(&devstat->stats_timer, irdma_hw_stats_timeout, 0);
1625 	mod_timer(&devstat->stats_timer,
1626 		  jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1627 }
1628 
1629 /**
1630  * irdma_hw_stats_stop_timer - Delete periodic stats timer
1631  * @vsi: pointer to vsi structure
1632  */
1633 void
irdma_hw_stats_stop_timer(struct irdma_sc_vsi * vsi)1634 irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi)
1635 {
1636 	struct irdma_vsi_pestat *devstat = vsi->pestat;
1637 
1638 	del_timer_sync(&devstat->stats_timer);
1639 }
1640 
1641 /**
1642  * irdma_process_cqp_stats - Checking for wrap and update stats
1643  * @cqp_request: cqp_request structure pointer
1644  */
1645 static void
irdma_process_cqp_stats(struct irdma_cqp_request * cqp_request)1646 irdma_process_cqp_stats(struct irdma_cqp_request *cqp_request)
1647 {
1648 	struct irdma_vsi_pestat *pestat = cqp_request->param;
1649 
1650 	sc_vsi_update_stats(pestat->vsi);
1651 }
1652 
1653 /**
1654  * irdma_cqp_gather_stats_cmd - Gather stats
1655  * @dev: pointer to device structure
1656  * @pestat: pointer to stats info
1657  * @wait: flag to wait or not wait for stats
1658  */
1659 int
irdma_cqp_gather_stats_cmd(struct irdma_sc_dev * dev,struct irdma_vsi_pestat * pestat,bool wait)1660 irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
1661 			   struct irdma_vsi_pestat *pestat, bool wait)
1662 {
1663 
1664 	struct irdma_pci_f *rf = dev_to_rf(dev);
1665 	struct irdma_cqp *iwcqp = &rf->cqp;
1666 	struct irdma_cqp_request *cqp_request;
1667 	struct cqp_cmds_info *cqp_info;
1668 	int status;
1669 
1670 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
1671 	if (!cqp_request)
1672 		return -ENOMEM;
1673 
1674 	cqp_info = &cqp_request->info;
1675 	cqp_info->cqp_cmd = IRDMA_OP_STATS_GATHER;
1676 	cqp_info->post_sq = 1;
1677 	cqp_info->in.u.stats_gather.info = pestat->gather_info;
1678 	cqp_info->in.u.stats_gather.scratch = (uintptr_t)cqp_request;
1679 	cqp_info->in.u.stats_gather.cqp = &rf->cqp.sc_cqp;
1680 	cqp_request->param = pestat;
1681 	if (!wait)
1682 		cqp_request->callback_fcn = irdma_process_cqp_stats;
1683 	status = irdma_handle_cqp_op(rf, cqp_request);
1684 	if (wait)
1685 		sc_vsi_update_stats(pestat->vsi);
1686 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1687 
1688 	return status;
1689 }
1690 
1691 /**
1692  * irdma_cqp_ceq_cmd - Create/Destroy CEQ's after CEQ 0
1693  * @dev: pointer to device info
1694  * @sc_ceq: pointer to ceq structure
1695  * @op: Create or Destroy
1696  */
1697 int
irdma_cqp_ceq_cmd(struct irdma_sc_dev * dev,struct irdma_sc_ceq * sc_ceq,u8 op)1698 irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq,
1699 		  u8 op)
1700 {
1701 	struct irdma_cqp_request *cqp_request;
1702 	struct cqp_cmds_info *cqp_info;
1703 	struct irdma_pci_f *rf = dev_to_rf(dev);
1704 	int status;
1705 
1706 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1707 	if (!cqp_request)
1708 		return -ENOMEM;
1709 
1710 	cqp_info = &cqp_request->info;
1711 	cqp_info->post_sq = 1;
1712 	cqp_info->cqp_cmd = op;
1713 	cqp_info->in.u.ceq_create.ceq = sc_ceq;
1714 	cqp_info->in.u.ceq_create.scratch = (uintptr_t)cqp_request;
1715 
1716 	status = irdma_handle_cqp_op(rf, cqp_request);
1717 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1718 
1719 	return status;
1720 }
1721 
1722 /**
1723  * irdma_cqp_aeq_cmd - Create/Destroy AEQ
1724  * @dev: pointer to device info
1725  * @sc_aeq: pointer to aeq structure
1726  * @op: Create or Destroy
1727  */
1728 int
irdma_cqp_aeq_cmd(struct irdma_sc_dev * dev,struct irdma_sc_aeq * sc_aeq,u8 op)1729 irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq,
1730 		  u8 op)
1731 {
1732 	struct irdma_cqp_request *cqp_request;
1733 	struct cqp_cmds_info *cqp_info;
1734 	struct irdma_pci_f *rf = dev_to_rf(dev);
1735 	int status;
1736 
1737 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1738 	if (!cqp_request)
1739 		return -ENOMEM;
1740 
1741 	cqp_info = &cqp_request->info;
1742 	cqp_info->post_sq = 1;
1743 	cqp_info->cqp_cmd = op;
1744 	cqp_info->in.u.aeq_create.aeq = sc_aeq;
1745 	cqp_info->in.u.aeq_create.scratch = (uintptr_t)cqp_request;
1746 
1747 	status = irdma_handle_cqp_op(rf, cqp_request);
1748 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1749 
1750 	return status;
1751 }
1752 
1753 /**
1754  * irdma_cqp_ws_node_cmd - Add/modify/delete ws node
1755  * @dev: pointer to device structure
1756  * @cmd: Add, modify or delete
1757  * @node_info: pointer to ws node info
1758  */
1759 int
irdma_cqp_ws_node_cmd(struct irdma_sc_dev * dev,u8 cmd,struct irdma_ws_node_info * node_info)1760 irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
1761 		      struct irdma_ws_node_info *node_info)
1762 {
1763 	struct irdma_pci_f *rf = dev_to_rf(dev);
1764 	struct irdma_cqp *iwcqp = &rf->cqp;
1765 	struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp;
1766 	struct irdma_cqp_request *cqp_request;
1767 	struct cqp_cmds_info *cqp_info;
1768 	int status;
1769 	bool poll;
1770 
1771 	if (!rf->sc_dev.ceq_valid)
1772 		poll = true;
1773 	else
1774 		poll = false;
1775 
1776 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, !poll);
1777 	if (!cqp_request)
1778 		return -ENOMEM;
1779 
1780 	cqp_info = &cqp_request->info;
1781 	cqp_info->cqp_cmd = cmd;
1782 	cqp_info->post_sq = 1;
1783 	cqp_info->in.u.ws_node.info = *node_info;
1784 	cqp_info->in.u.ws_node.cqp = cqp;
1785 	cqp_info->in.u.ws_node.scratch = (uintptr_t)cqp_request;
1786 	cqp_info->create = true;
1787 	status = irdma_handle_cqp_op(rf, cqp_request);
1788 	if (status)
1789 		goto exit;
1790 
1791 	if (poll) {
1792 		struct irdma_ccq_cqe_info compl_info;
1793 
1794 		status = irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_WORK_SCHED_NODE,
1795 						       &compl_info);
1796 		node_info->qs_handle = compl_info.op_ret_val;
1797 		irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB,
1798 			    "opcode=%d, compl_info.retval=%d\n",
1799 			    compl_info.op_code, compl_info.op_ret_val);
1800 	} else {
1801 		node_info->qs_handle = cqp_request->compl_info.op_ret_val;
1802 	}
1803 
1804 exit:
1805 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1806 
1807 	return status;
1808 }
1809 
1810 /**
1811  * irdma_ah_do_cqp - perform an AH cqp operation
1812  * @rf: RDMA PCI function
1813  * @sc_ah: address handle
1814  * @cmd: AH operation
1815  * @wait: wait if true
1816  * @callback_fcn: Callback function on CQP op completion
1817  * @cb_param: parameter for callback function
1818  *
1819  * returns errno
1820  */
1821 static int
irdma_ah_do_cqp(struct irdma_pci_f * rf,struct irdma_sc_ah * sc_ah,u8 cmd,bool wait,void (* callback_fcn)(struct irdma_cqp_request *),void * cb_param)1822 irdma_ah_do_cqp(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
1823 		bool wait,
1824 		void (*callback_fcn) (struct irdma_cqp_request *),
1825 		void *cb_param)
1826 {
1827 	struct irdma_cqp_request *cqp_request;
1828 	struct cqp_cmds_info *cqp_info;
1829 	int status;
1830 
1831 	if (cmd != IRDMA_OP_AH_CREATE && cmd != IRDMA_OP_AH_DESTROY)
1832 		return -EINVAL;
1833 
1834 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
1835 	if (!cqp_request)
1836 		return -ENOMEM;
1837 
1838 	cqp_info = &cqp_request->info;
1839 	cqp_info->cqp_cmd = cmd;
1840 	cqp_info->post_sq = 1;
1841 	if (cmd == IRDMA_OP_AH_CREATE) {
1842 		if (!wait)
1843 			irdma_get_cqp_request(cqp_request);
1844 		sc_ah->ah_info.cqp_request = cqp_request;
1845 
1846 		cqp_info->in.u.ah_create.info = sc_ah->ah_info;
1847 		cqp_info->in.u.ah_create.scratch = (uintptr_t)cqp_request;
1848 		cqp_info->in.u.ah_create.cqp = &rf->cqp.sc_cqp;
1849 		cqp_info->create = true;
1850 	} else if (cmd == IRDMA_OP_AH_DESTROY) {
1851 		cqp_info->in.u.ah_destroy.info = sc_ah->ah_info;
1852 		cqp_info->in.u.ah_destroy.scratch = (uintptr_t)cqp_request;
1853 		cqp_info->in.u.ah_destroy.cqp = &rf->cqp.sc_cqp;
1854 	}
1855 
1856 	if (!wait) {
1857 		cqp_request->callback_fcn = callback_fcn;
1858 		cqp_request->param = cb_param;
1859 	}
1860 	status = irdma_handle_cqp_op(rf, cqp_request);
1861 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1862 
1863 	if (status)
1864 		return -ENOMEM;
1865 
1866 	if (wait)
1867 		sc_ah->ah_info.ah_valid = (cmd != IRDMA_OP_AH_DESTROY);
1868 
1869 	return 0;
1870 }
1871 
1872 int
irdma_ah_cqp_op(struct irdma_pci_f * rf,struct irdma_sc_ah * sc_ah,u8 cmd,bool wait,void (* callback_fcn)(struct irdma_cqp_request *),void * cb_param)1873 irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
1874 		bool wait,
1875 		void (*callback_fcn) (struct irdma_cqp_request *),
1876 		void *cb_param)
1877 {
1878 	int status;
1879 
1880 	if (cmd == IRDMA_OP_AH_CREATE) {
1881 		status = irdma_get_arp(rf, sc_ah->ah_info.dst_arpindex);
1882 		if (status) {
1883 			irdma_dev_err(&rf->iwdev->ibdev, "%s get_arp failed for index = %d\n",
1884 				      __func__, sc_ah->ah_info.dst_arpindex);
1885 
1886 			return -EINVAL;
1887 		}
1888 		status = irdma_ah_do_cqp(rf, sc_ah, cmd, wait, callback_fcn,
1889 					 cb_param);
1890 		if (status)
1891 			irdma_put_arp(rf, sc_ah->ah_info.dst_arpindex);
1892 	} else {
1893 		status = irdma_ah_do_cqp(rf, sc_ah, cmd, wait, callback_fcn,
1894 					 cb_param);
1895 		if (cmd == IRDMA_OP_AH_DESTROY)
1896 			irdma_put_arp(rf, sc_ah->ah_info.dst_arpindex);
1897 	}
1898 
1899 	return status;
1900 }
1901 
1902 /**
1903  * irdma_ieq_ah_cb - callback after creation of AH for IEQ
1904  * @cqp_request: pointer to cqp_request of create AH
1905  */
1906 static void
irdma_ieq_ah_cb(struct irdma_cqp_request * cqp_request)1907 irdma_ieq_ah_cb(struct irdma_cqp_request *cqp_request)
1908 {
1909 	struct irdma_sc_qp *qp = cqp_request->param;
1910 	struct irdma_sc_ah *sc_ah = qp->pfpdu.ah;
1911 	unsigned long flags;
1912 
1913 	spin_lock_irqsave(&qp->pfpdu.lock, flags);
1914 	if (!cqp_request->compl_info.op_ret_val) {
1915 		sc_ah->ah_info.ah_valid = true;
1916 		irdma_ieq_process_fpdus(qp, qp->vsi->ieq);
1917 	} else {
1918 		sc_ah->ah_info.ah_valid = false;
1919 		irdma_ieq_cleanup_qp(qp->vsi->ieq, qp);
1920 	}
1921 	spin_unlock_irqrestore(&qp->pfpdu.lock, flags);
1922 	if (!cqp_request->waiting)
1923 		irdma_put_cqp_request(sc_ah->dev->cqp->back_cqp,
1924 				      cqp_request);
1925 }
1926 
1927 /**
1928  * irdma_ilq_ah_cb - callback after creation of AH for ILQ
1929  * @cqp_request: pointer to cqp_request of create AH
1930  */
1931 static void
irdma_ilq_ah_cb(struct irdma_cqp_request * cqp_request)1932 irdma_ilq_ah_cb(struct irdma_cqp_request *cqp_request)
1933 {
1934 	struct irdma_cm_node *cm_node = cqp_request->param;
1935 	struct irdma_sc_ah *sc_ah = cm_node->ah;
1936 
1937 	sc_ah->ah_info.ah_valid = !cqp_request->compl_info.op_ret_val;
1938 	irdma_add_conn_est_qh(cm_node);
1939 	if (!cqp_request->waiting)
1940 		irdma_put_cqp_request(sc_ah->dev->cqp->back_cqp,
1941 				      cqp_request);
1942 }
1943 
1944 /**
1945  * irdma_puda_create_ah - create AH for ILQ/IEQ qp's
1946  * @dev: device pointer
1947  * @ah_info: Address handle info
1948  * @wait: When true will wait for operation to complete
1949  * @type: ILQ/IEQ
1950  * @cb_param: Callback param when not waiting
1951  * @ah_ret: Returned pointer to address handle if created
1952  *
1953  */
1954 int
irdma_puda_create_ah(struct irdma_sc_dev * dev,struct irdma_ah_info * ah_info,bool wait,enum puda_rsrc_type type,void * cb_param,struct irdma_sc_ah ** ah_ret)1955 irdma_puda_create_ah(struct irdma_sc_dev *dev,
1956 		     struct irdma_ah_info *ah_info, bool wait,
1957 		     enum puda_rsrc_type type, void *cb_param,
1958 		     struct irdma_sc_ah **ah_ret)
1959 {
1960 	struct irdma_sc_ah *ah;
1961 	struct irdma_pci_f *rf = dev_to_rf(dev);
1962 	int err;
1963 
1964 	ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
1965 	*ah_ret = ah;
1966 	if (!ah)
1967 		return -ENOMEM;
1968 
1969 	err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah,
1970 			       &ah_info->ah_idx, &rf->next_ah);
1971 	if (err)
1972 		goto err_free;
1973 
1974 	ah->dev = dev;
1975 	ah->ah_info = *ah_info;
1976 
1977 	if (type == IRDMA_PUDA_RSRC_TYPE_ILQ)
1978 		err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
1979 				      irdma_ilq_ah_cb, cb_param);
1980 	else
1981 		err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
1982 				      irdma_ieq_ah_cb, cb_param);
1983 
1984 	if (err)
1985 		goto error;
1986 	return 0;
1987 
1988 error:
1989 	irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
1990 err_free:
1991 	kfree(ah);
1992 	*ah_ret = NULL;
1993 	return -ENOMEM;
1994 }
1995 
1996 /**
1997  * irdma_puda_free_ah - free a puda address handle
1998  * @dev: device pointer
1999  * @ah: The address handle to free
2000  */
2001 void
irdma_puda_free_ah(struct irdma_sc_dev * dev,struct irdma_sc_ah * ah)2002 irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah)
2003 {
2004 	struct irdma_pci_f *rf = dev_to_rf(dev);
2005 
2006 	if (!ah)
2007 		return;
2008 
2009 	if (ah->ah_info.ah_valid) {
2010 		irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_DESTROY, false, NULL, NULL);
2011 		irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
2012 	}
2013 
2014 	kfree(ah);
2015 }
2016 
2017 /**
2018  * irdma_prm_add_pble_mem - add moemory to pble resources
2019  * @pprm: pble resource manager
2020  * @pchunk: chunk of memory to add
2021  */
2022 int
irdma_prm_add_pble_mem(struct irdma_pble_prm * pprm,struct irdma_chunk * pchunk)2023 irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
2024 		       struct irdma_chunk *pchunk)
2025 {
2026 	u64 sizeofbitmap;
2027 
2028 	if (pchunk->size & 0xfff)
2029 		return -EINVAL;
2030 
2031 	sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift;
2032 
2033 	pchunk->bitmapbuf = bitmap_zalloc(sizeofbitmap, GFP_KERNEL);
2034 	if (!pchunk->bitmapbuf)
2035 		return -ENOMEM;
2036 
2037 	pchunk->sizeofbitmap = sizeofbitmap;
2038 	/* each pble is 8 bytes hence shift by 3 */
2039 	pprm->total_pble_alloc += pchunk->size >> 3;
2040 	pprm->free_pble_cnt += pchunk->size >> 3;
2041 
2042 	return 0;
2043 }
2044 
2045 /**
2046  * irdma_prm_get_pbles - get pble's from prm
2047  * @pprm: pble resource manager
2048  * @chunkinfo: nformation about chunk where pble's were acquired
2049  * @mem_size: size of pble memory needed
2050  * @vaddr: returns virtual address of pble memory
2051  * @fpm_addr: returns fpm address of pble memory
2052  */
2053 int
irdma_prm_get_pbles(struct irdma_pble_prm * pprm,struct irdma_pble_chunkinfo * chunkinfo,u64 mem_size,u64 ** vaddr,u64 * fpm_addr)2054 irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
2055 		    struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size,
2056 		    u64 **vaddr, u64 *fpm_addr)
2057 {
2058 	u64 bits_needed;
2059 	u64 bit_idx = PBLE_INVALID_IDX;
2060 	struct irdma_chunk *pchunk = NULL;
2061 	struct list_head *chunk_entry = (&pprm->clist)->next;
2062 	u32 offset;
2063 	unsigned long flags;
2064 
2065 	*vaddr = NULL;
2066 	*fpm_addr = 0;
2067 
2068 	bits_needed = DIV_ROUND_UP_ULL(mem_size, BIT_ULL(pprm->pble_shift));
2069 
2070 	spin_lock_irqsave(&pprm->prm_lock, flags);
2071 	while (chunk_entry != &pprm->clist) {
2072 		pchunk = (struct irdma_chunk *)chunk_entry;
2073 		bit_idx = bitmap_find_next_zero_area(pchunk->bitmapbuf,
2074 						     pchunk->sizeofbitmap, 0,
2075 						     bits_needed, 0);
2076 		if (bit_idx < pchunk->sizeofbitmap)
2077 			break;
2078 
2079 		/* list.next used macro */
2080 		chunk_entry = (&pchunk->list)->next;
2081 	}
2082 
2083 	if (!pchunk || bit_idx >= pchunk->sizeofbitmap) {
2084 		spin_unlock_irqrestore(&pprm->prm_lock, flags);
2085 		return -ENOMEM;
2086 	}
2087 
2088 	bitmap_set(pchunk->bitmapbuf, bit_idx, bits_needed);
2089 	offset = bit_idx << pprm->pble_shift;
2090 	*vaddr = (u64 *)((u8 *)pchunk->vaddr + offset);
2091 	*fpm_addr = pchunk->fpm_addr + offset;
2092 
2093 	chunkinfo->pchunk = pchunk;
2094 	chunkinfo->bit_idx = bit_idx;
2095 	chunkinfo->bits_used = bits_needed;
2096 	/* 3 is sizeof pble divide */
2097 	pprm->free_pble_cnt -= chunkinfo->bits_used << (pprm->pble_shift - 3);
2098 	spin_unlock_irqrestore(&pprm->prm_lock, flags);
2099 
2100 	return 0;
2101 }
2102 
2103 /**
2104  * irdma_prm_return_pbles - return pbles back to prm
2105  * @pprm: pble resource manager
2106  * @chunkinfo: chunk where pble's were acquired and to be freed
2107  */
2108 void
irdma_prm_return_pbles(struct irdma_pble_prm * pprm,struct irdma_pble_chunkinfo * chunkinfo)2109 irdma_prm_return_pbles(struct irdma_pble_prm *pprm,
2110 		       struct irdma_pble_chunkinfo *chunkinfo)
2111 {
2112 	unsigned long flags;
2113 
2114 	spin_lock_irqsave(&pprm->prm_lock, flags);
2115 	pprm->free_pble_cnt += chunkinfo->bits_used << (pprm->pble_shift - 3);
2116 	bitmap_clear(chunkinfo->pchunk->bitmapbuf, chunkinfo->bit_idx,
2117 		     chunkinfo->bits_used);
2118 	spin_unlock_irqrestore(&pprm->prm_lock, flags);
2119 }
2120 
2121 int
irdma_map_vm_page_list(struct irdma_hw * hw,void * va,dma_addr_t * pg_dma,u32 pg_cnt)2122 irdma_map_vm_page_list(struct irdma_hw *hw, void *va, dma_addr_t * pg_dma,
2123 		       u32 pg_cnt)
2124 {
2125 	struct page *vm_page;
2126 	int i;
2127 	u8 *addr;
2128 
2129 	addr = (u8 *)(uintptr_t)va;
2130 	for (i = 0; i < pg_cnt; i++) {
2131 		vm_page = vmalloc_to_page(addr);
2132 		if (!vm_page)
2133 			goto err;
2134 
2135 		pg_dma[i] = dma_map_page(hw_to_dev(hw), vm_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
2136 		if (dma_mapping_error(hw_to_dev(hw), pg_dma[i]))
2137 			goto err;
2138 
2139 		addr += PAGE_SIZE;
2140 	}
2141 
2142 	return 0;
2143 
2144 err:
2145 	irdma_unmap_vm_page_list(hw, pg_dma, i);
2146 	return -ENOMEM;
2147 }
2148 
2149 void
irdma_unmap_vm_page_list(struct irdma_hw * hw,dma_addr_t * pg_dma,u32 pg_cnt)2150 irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t * pg_dma, u32 pg_cnt)
2151 {
2152 	int i;
2153 
2154 	for (i = 0; i < pg_cnt; i++)
2155 		dma_unmap_page(hw_to_dev(hw), pg_dma[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
2156 }
2157 
2158 /**
2159  * irdma_pble_free_paged_mem - free virtual paged memory
2160  * @chunk: chunk to free with paged memory
2161  */
2162 void
irdma_pble_free_paged_mem(struct irdma_chunk * chunk)2163 irdma_pble_free_paged_mem(struct irdma_chunk *chunk)
2164 {
2165 	if (!chunk->pg_cnt)
2166 		goto done;
2167 
2168 	irdma_unmap_vm_page_list(chunk->dev->hw, chunk->dmainfo.dmaaddrs,
2169 				 chunk->pg_cnt);
2170 
2171 done:
2172 	kfree(chunk->dmainfo.dmaaddrs);
2173 	chunk->dmainfo.dmaaddrs = NULL;
2174 	vfree(chunk->vaddr);
2175 	chunk->vaddr = NULL;
2176 	chunk->type = 0;
2177 }
2178 
2179 /**
2180  * irdma_pble_get_paged_mem -allocate paged memory for pbles
2181  * @chunk: chunk to add for paged memory
2182  * @pg_cnt: number of pages needed
2183  */
2184 int
irdma_pble_get_paged_mem(struct irdma_chunk * chunk,u32 pg_cnt)2185 irdma_pble_get_paged_mem(struct irdma_chunk *chunk, u32 pg_cnt)
2186 {
2187 	u32 size;
2188 	void *va;
2189 
2190 	chunk->dmainfo.dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
2191 	if (!chunk->dmainfo.dmaaddrs)
2192 		return -ENOMEM;
2193 
2194 	size = PAGE_SIZE * pg_cnt;
2195 	va = vmalloc(size);
2196 	if (!va)
2197 		goto err;
2198 
2199 	if (irdma_map_vm_page_list(chunk->dev->hw, va, chunk->dmainfo.dmaaddrs,
2200 				   pg_cnt)) {
2201 		vfree(va);
2202 		goto err;
2203 	}
2204 	chunk->vaddr = va;
2205 	chunk->size = size;
2206 	chunk->pg_cnt = pg_cnt;
2207 	chunk->type = PBLE_SD_PAGED;
2208 
2209 	return 0;
2210 err:
2211 	kfree(chunk->dmainfo.dmaaddrs);
2212 	chunk->dmainfo.dmaaddrs = NULL;
2213 
2214 	return -ENOMEM;
2215 }
2216 
2217 /**
2218  * irdma_alloc_ws_node_id - Allocate a tx scheduler node ID
2219  * @dev: device pointer
2220  */
2221 u16
irdma_alloc_ws_node_id(struct irdma_sc_dev * dev)2222 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev)
2223 {
2224 	struct irdma_pci_f *rf = dev_to_rf(dev);
2225 	u32 next = 1;
2226 	u32 node_id;
2227 
2228 	if (irdma_alloc_rsrc(rf, rf->allocated_ws_nodes, rf->max_ws_node_id,
2229 			     &node_id, &next))
2230 		return IRDMA_WS_NODE_INVALID;
2231 
2232 	return (u16)node_id;
2233 }
2234 
2235 /**
2236  * irdma_free_ws_node_id - Free a tx scheduler node ID
2237  * @dev: device pointer
2238  * @node_id: Work scheduler node ID
2239  */
2240 void
irdma_free_ws_node_id(struct irdma_sc_dev * dev,u16 node_id)2241 irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id)
2242 {
2243 	struct irdma_pci_f *rf = dev_to_rf(dev);
2244 
2245 	irdma_free_rsrc(rf, rf->allocated_ws_nodes, (u32)node_id);
2246 }
2247 
2248 /**
2249  * irdma_modify_qp_to_err - Modify a QP to error
2250  * @sc_qp: qp structure
2251  */
2252 void
irdma_modify_qp_to_err(struct irdma_sc_qp * sc_qp)2253 irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp)
2254 {
2255 	struct irdma_qp *qp = sc_qp->qp_uk.back_qp;
2256 	struct ib_qp_attr attr;
2257 
2258 	if (qp->iwdev->rf->reset)
2259 		return;
2260 	attr.qp_state = IB_QPS_ERR;
2261 
2262 	if (rdma_protocol_roce(qp->ibqp.device, 1))
2263 		irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
2264 	else
2265 		irdma_modify_qp(&qp->ibqp, &attr, IB_QP_STATE, NULL);
2266 }
2267 
2268 void
irdma_ib_qp_event(struct irdma_qp * iwqp,enum irdma_qp_event_type event)2269 irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
2270 {
2271 	struct ib_event ibevent;
2272 
2273 	if (!iwqp->ibqp.event_handler)
2274 		return;
2275 
2276 	switch (event) {
2277 	case IRDMA_QP_EVENT_CATASTROPHIC:
2278 		ibevent.event = IB_EVENT_QP_FATAL;
2279 		break;
2280 	case IRDMA_QP_EVENT_ACCESS_ERR:
2281 		ibevent.event = IB_EVENT_QP_ACCESS_ERR;
2282 		break;
2283 	case IRDMA_QP_EVENT_REQ_ERR:
2284 		ibevent.event = IB_EVENT_QP_REQ_ERR;
2285 		break;
2286 	}
2287 	ibevent.device = iwqp->ibqp.device;
2288 	ibevent.element.qp = &iwqp->ibqp;
2289 	iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
2290 }
2291 
2292 static void
clear_qp_ctx_addr(__le64 * ctx)2293 clear_qp_ctx_addr(__le64 * ctx)
2294 {
2295 	u64 tmp;
2296 
2297 	get_64bit_val(ctx, 272, &tmp);
2298 	tmp &= GENMASK_ULL(63, 58);
2299 	set_64bit_val(ctx, 272, tmp);
2300 
2301 	get_64bit_val(ctx, 296, &tmp);
2302 	tmp &= GENMASK_ULL(7, 0);
2303 	set_64bit_val(ctx, 296, tmp);
2304 
2305 	get_64bit_val(ctx, 312, &tmp);
2306 	tmp &= GENMASK_ULL(7, 0);
2307 	set_64bit_val(ctx, 312, tmp);
2308 
2309 	set_64bit_val(ctx, 368, 0);
2310 }
2311 
2312 /**
2313  * irdma_upload_qp_context - upload raw QP context
2314  * @rf: RDMA PCI function
2315  * @qpn: QP ID
2316  * @qp_type: QP Type
2317  * @freeze: freeze QP
2318  * @raw: raw context flag
2319  */
2320 int
irdma_upload_qp_context(struct irdma_pci_f * rf,u32 qpn,u8 qp_type,bool freeze,bool raw)2321 irdma_upload_qp_context(struct irdma_pci_f *rf, u32 qpn,
2322 			u8 qp_type, bool freeze, bool raw)
2323 {
2324 	struct irdma_dma_mem dma_mem;
2325 	struct irdma_sc_dev *dev;
2326 	struct irdma_cqp *iwcqp;
2327 	struct irdma_cqp_request *cqp_request;
2328 	struct cqp_cmds_info *cqp_info;
2329 	struct irdma_upload_context_info *info;
2330 	int ret;
2331 	u32 *ctx;
2332 
2333 	dev = &rf->sc_dev;
2334 	iwcqp = &rf->cqp;
2335 
2336 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2337 	if (!cqp_request) {
2338 		irdma_debug((dev), IRDMA_DEBUG_QP, "Could not get CQP req for QP [%u]\n", qpn);
2339 		return -EINVAL;
2340 	}
2341 	cqp_info = &cqp_request->info;
2342 	info = &cqp_info->in.u.qp_upload_context.info;
2343 	cqp_info->cqp_cmd = IRDMA_OP_QP_UPLOAD_CONTEXT;
2344 	cqp_info->post_sq = 1;
2345 	cqp_info->in.u.qp_upload_context.dev = dev;
2346 	cqp_info->in.u.qp_upload_context.scratch = (uintptr_t)cqp_request;
2347 
2348 	dma_mem.size = PAGE_SIZE;
2349 	dma_mem.va = irdma_allocate_dma_mem(dev->hw, &dma_mem, dma_mem.size, PAGE_SIZE);
2350 	if (!dma_mem.va) {
2351 		irdma_put_cqp_request(&rf->cqp, cqp_request);
2352 		irdma_debug((dev), IRDMA_DEBUG_QP, "Could not allocate buffer for QP [%u]\n", qpn);
2353 		return -ENOMEM;
2354 	}
2355 
2356 	ctx = dma_mem.va;
2357 	info->buf_pa = dma_mem.pa;
2358 	info->raw_format = raw;
2359 	info->freeze_qp = freeze;
2360 	info->qp_type = qp_type;	/* 1 is iWARP and 2 UDA */
2361 	info->qp_id = qpn;
2362 	ret = irdma_handle_cqp_op(rf, cqp_request);
2363 	if (ret)
2364 		goto error;
2365 	irdma_debug((dev), IRDMA_DEBUG_QP, "PRINT CONTXT QP [%u]\n", info->qp_id);
2366 	{
2367 		u32 i, j;
2368 
2369 		clear_qp_ctx_addr(dma_mem.va);
2370 		for (i = 0, j = 0; i < 32; i++, j += 4)
2371 			irdma_debug((dev), IRDMA_DEBUG_QP,
2372 				    "[%u] %u:\t [%08X %08x %08X %08X]\n",
2373 				    info->qp_id, (j * 4), ctx[j], ctx[j + 1],
2374 				    ctx[j + 2], ctx[j + 3]);
2375 	}
2376 error:
2377 	irdma_put_cqp_request(iwcqp, cqp_request);
2378 	irdma_free_dma_mem(dev->hw, &dma_mem);
2379 
2380 	return ret;
2381 }
2382 
2383 static bool
qp_has_unpolled_cqes(struct irdma_qp * iwqp,struct irdma_cq * iwcq)2384 qp_has_unpolled_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
2385 {
2386 	struct irdma_cq_uk *cq = &iwcq->sc_cq.cq_uk;
2387 	struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk;
2388 	u32 cq_head = IRDMA_RING_CURRENT_HEAD(cq->cq_ring);
2389 	u64 qword3, comp_ctx;
2390 	__le64 *cqe;
2391 	u8 polarity, cq_polarity;
2392 
2393 	cq_polarity = cq->polarity;
2394 	do {
2395 		if (cq->avoid_mem_cflct)
2396 			cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
2397 		else
2398 			cqe = cq->cq_base[cq_head].buf;
2399 		get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
2400 		polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
2401 
2402 		if (polarity != cq_polarity)
2403 			break;
2404 
2405 		/* Ensure CQE contents are read after valid bit is checked */
2406 		rmb();
2407 
2408 		get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
2409 		if ((struct irdma_qp_uk *)(irdma_uintptr) comp_ctx == qp)
2410 			return true;
2411 
2412 		cq_head = (cq_head + 1) % cq->cq_ring.size;
2413 		if (!cq_head)
2414 			cq_polarity ^= 1;
2415 	} while (true);
2416 
2417 	return false;
2418 }
2419 
2420 void
irdma_remove_cmpls_list(struct irdma_cq * iwcq)2421 irdma_remove_cmpls_list(struct irdma_cq *iwcq)
2422 {
2423 	struct irdma_cmpl_gen *cmpl_node;
2424 	struct list_head *tmp_node, *list_node;
2425 
2426 	list_for_each_safe(list_node, tmp_node, &iwcq->cmpl_generated) {
2427 		cmpl_node = list_entry(list_node, struct irdma_cmpl_gen, list);
2428 		list_del(&cmpl_node->list);
2429 		kfree(cmpl_node);
2430 	}
2431 }
2432 
2433 int
irdma_generated_cmpls(struct irdma_cq * iwcq,struct irdma_cq_poll_info * cq_poll_info)2434 irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info)
2435 {
2436 	struct irdma_cmpl_gen *cmpl;
2437 
2438 	if (list_empty(&iwcq->cmpl_generated))
2439 		return -ENOENT;
2440 	cmpl = list_first_entry_or_null(&iwcq->cmpl_generated, struct irdma_cmpl_gen, list);
2441 	list_del(&cmpl->list);
2442 	memcpy(cq_poll_info, &cmpl->cpi, sizeof(*cq_poll_info));
2443 	kfree(cmpl);
2444 
2445 	irdma_debug(iwcq->sc_cq.dev, IRDMA_DEBUG_VERBS,
2446 		    "%s: Poll artificially generated completion for QP 0x%X, op %u, wr_id=0x%lx\n",
2447 		    __func__, cq_poll_info->qp_id, cq_poll_info->op_type,
2448 		    cq_poll_info->wr_id);
2449 
2450 	return 0;
2451 }
2452 
2453 /**
2454  * irdma_set_cpi_common_values - fill in values for polling info struct
2455  * @cpi: resulting structure of cq_poll_info type
2456  * @qp: QPair
2457  * @qp_num: id of the QP
2458  */
2459 static void
irdma_set_cpi_common_values(struct irdma_cq_poll_info * cpi,struct irdma_qp_uk * qp,u32 qp_num)2460 irdma_set_cpi_common_values(struct irdma_cq_poll_info *cpi,
2461 			    struct irdma_qp_uk *qp, u32 qp_num)
2462 {
2463 	cpi->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
2464 	cpi->error = 1;
2465 	cpi->major_err = IRDMA_FLUSH_MAJOR_ERR;
2466 	cpi->minor_err = FLUSH_GENERAL_ERR;
2467 	cpi->qp_handle = (irdma_qp_handle) (uintptr_t)qp;
2468 	cpi->qp_id = qp_num;
2469 }
2470 
2471 static inline void
irdma_comp_handler(struct irdma_cq * cq)2472 irdma_comp_handler(struct irdma_cq *cq)
2473 {
2474 	struct irdma_device *iwdev = to_iwdev(cq->ibcq.device);
2475 	struct irdma_ceq *ceq = &iwdev->rf->ceqlist[cq->sc_cq.ceq_id];
2476 	unsigned long flags;
2477 
2478 	if (!cq->ibcq.comp_handler)
2479 		return;
2480 
2481 	if (atomic_read(&cq->armed)) {
2482 		spin_lock_irqsave(&ceq->ce_lock, flags);
2483 		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
2484 		spin_unlock_irqrestore(&ceq->ce_lock, flags);
2485 	}
2486 }
2487 
2488 /**
2489  * irdma_generate_flush_completions - generate completion from WRs
2490  * @iwqp: pointer to QP
2491  */
2492 void
irdma_generate_flush_completions(struct irdma_qp * iwqp)2493 irdma_generate_flush_completions(struct irdma_qp *iwqp)
2494 {
2495 	struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk;
2496 	struct irdma_ring *sq_ring = &qp->sq_ring;
2497 	struct irdma_ring *rq_ring = &qp->rq_ring;
2498 	struct irdma_cmpl_gen *cmpl;
2499 	__le64 *sw_wqe;
2500 	u64 wqe_qword;
2501 	u32 wqe_idx;
2502 	bool compl_generated = false;
2503 	unsigned long flags1;
2504 
2505 	spin_lock_irqsave(&iwqp->iwscq->lock, flags1);
2506 	if (!qp_has_unpolled_cqes(iwqp, iwqp->iwscq)) {
2507 		unsigned long flags2;
2508 
2509 		spin_lock_irqsave(&iwqp->lock, flags2);
2510 		while (IRDMA_RING_MORE_WORK(*sq_ring)) {
2511 			cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
2512 			if (!cmpl) {
2513 				spin_unlock_irqrestore(&iwqp->lock, flags2);
2514 				spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
2515 				return;
2516 			}
2517 
2518 			wqe_idx = sq_ring->tail;
2519 			irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id);
2520 
2521 			cmpl->cpi.wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
2522 			cmpl->cpi.signaled = qp->sq_wrtrk_array[wqe_idx].signaled;
2523 			sw_wqe = qp->sq_base[wqe_idx].elem;
2524 			get_64bit_val(sw_wqe, IRDMA_BYTE_24, &wqe_qword);
2525 			cmpl->cpi.op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
2526 			cmpl->cpi.q_type = IRDMA_CQE_QTYPE_SQ;
2527 			/* remove the SQ WR by moving SQ tail */
2528 			IRDMA_RING_SET_TAIL(*sq_ring,
2529 					    sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
2530 
2531 			if (cmpl->cpi.op_type == IRDMAQP_OP_NOP) {
2532 				kfree(cmpl);
2533 				continue;
2534 			}
2535 			irdma_debug(iwqp->sc_qp.dev, IRDMA_DEBUG_DEV,
2536 				    "%s: adding wr_id = 0x%lx SQ Completion to list qp_id=%d\n",
2537 				    __func__, cmpl->cpi.wr_id, qp->qp_id);
2538 			list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated);
2539 			compl_generated = true;
2540 		}
2541 		spin_unlock_irqrestore(&iwqp->lock, flags2);
2542 		spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
2543 		if (compl_generated) {
2544 			irdma_comp_handler(iwqp->iwscq);
2545 			compl_generated = false;
2546 		}
2547 	} else {
2548 		spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
2549 		irdma_sched_qp_flush_work(iwqp);
2550 	}
2551 
2552 	spin_lock_irqsave(&iwqp->iwrcq->lock, flags1);
2553 	if (!qp_has_unpolled_cqes(iwqp, iwqp->iwrcq)) {
2554 		unsigned long flags2;
2555 
2556 		spin_lock_irqsave(&iwqp->lock, flags2);
2557 		while (IRDMA_RING_MORE_WORK(*rq_ring)) {
2558 			cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
2559 			if (!cmpl) {
2560 				spin_unlock_irqrestore(&iwqp->lock, flags2);
2561 				spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
2562 				return;
2563 			}
2564 
2565 			wqe_idx = rq_ring->tail;
2566 			irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id);
2567 
2568 			cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx];
2569 			cmpl->cpi.signaled = 1;
2570 			cmpl->cpi.op_type = IRDMA_OP_TYPE_REC;
2571 			cmpl->cpi.q_type = IRDMA_CQE_QTYPE_RQ;
2572 			/* remove the RQ WR by moving RQ tail */
2573 			IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1);
2574 			irdma_debug(iwqp->sc_qp.dev, IRDMA_DEBUG_DEV,
2575 				    "%s: adding wr_id = 0x%lx RQ Completion to list qp_id=%d, wqe_idx=%d\n",
2576 				    __func__, cmpl->cpi.wr_id, qp->qp_id,
2577 				    wqe_idx);
2578 
2579 			list_add_tail(&cmpl->list, &iwqp->iwrcq->cmpl_generated);
2580 
2581 			compl_generated = true;
2582 		}
2583 		spin_unlock_irqrestore(&iwqp->lock, flags2);
2584 		spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
2585 		if (compl_generated)
2586 			irdma_comp_handler(iwqp->iwrcq);
2587 	} else {
2588 		spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
2589 		irdma_sched_qp_flush_work(iwqp);
2590 	}
2591 }
2592 
2593 /**
2594  * irdma_udqp_qs_change - change qs for UD QP in a worker thread
2595  * @iwqp: QP pointer
2596  * @user_prio: new user priority value
2597  * @qs_change: when false, only user priority changes, QS handle do not need to change
2598  */
2599 static void
irdma_udqp_qs_change(struct irdma_qp * iwqp,u8 user_prio,bool qs_change)2600 irdma_udqp_qs_change(struct irdma_qp *iwqp, u8 user_prio, bool qs_change)
2601 {
2602 	irdma_qp_rem_qos(&iwqp->sc_qp);
2603 	if (qs_change)
2604 		iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi, iwqp->ctx_info.user_pri);
2605 
2606 	iwqp->ctx_info.user_pri = user_prio;
2607 	iwqp->sc_qp.user_pri = user_prio;
2608 
2609 	if (qs_change)
2610 		if (iwqp->sc_qp.dev->ws_add(iwqp->sc_qp.vsi, user_prio))
2611 			irdma_dev_warn(&iwqp->iwdev->ibdev,
2612 				       "WS add failed during %s, qp_id: %x user_pri: %x",
2613 				       __func__, iwqp->ibqp.qp_num, user_prio);
2614 	irdma_qp_add_qos(&iwqp->sc_qp);
2615 }
2616 
2617 void
irdma_udqp_qs_worker(struct work_struct * work)2618 irdma_udqp_qs_worker(struct work_struct *work)
2619 {
2620 	struct irdma_udqs_work *udqs_work = container_of(work, struct irdma_udqs_work, work);
2621 
2622 	irdma_udqp_qs_change(udqs_work->iwqp, udqs_work->user_prio, udqs_work->qs_change);
2623 	if (udqs_work->qs_change)
2624 		irdma_cqp_qp_suspend_resume(&udqs_work->iwqp->sc_qp, IRDMA_OP_RESUME);
2625 	irdma_qp_rem_ref(&udqs_work->iwqp->ibqp);
2626 	kfree(udqs_work);
2627 }
2628 
2629 void
irdma_chk_free_stag(struct irdma_pci_f * rf)2630 irdma_chk_free_stag(struct irdma_pci_f *rf)
2631 {
2632 	struct irdma_cqp_request *cqp_request;
2633 	struct cqp_cmds_info *cqp_info;
2634 	struct irdma_dealloc_stag_info *info;
2635 
2636 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2637 	if (!cqp_request)
2638 		return;
2639 
2640 	cqp_info = &cqp_request->info;
2641 	info = &cqp_info->in.u.dealloc_stag.info;
2642 	info->stag_idx = RS_64_1(rf->chk_stag, IRDMA_CQPSQ_STAG_IDX_S);
2643 	cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
2644 	cqp_info->post_sq = 1;
2645 	cqp_info->in.u.dealloc_stag.dev = &rf->sc_dev;
2646 	cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2647 	irdma_handle_cqp_op(rf, cqp_request);
2648 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2649 }
2650 
2651 void
cqp_poll_worker(struct work_struct * work)2652 cqp_poll_worker(struct work_struct *work)
2653 {
2654 	struct delayed_work *dwork = to_delayed_work(work);
2655 	struct irdma_pci_f *rf = container_of(dwork, struct irdma_pci_f, dwork_cqp_poll);
2656 	struct irdma_mr iwmr = {};
2657 	struct irdma_pd *iwpd;
2658 
2659 	iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL);
2660 	if (!iwpd)
2661 		return;
2662 	iwmr.stag = rf->chk_stag;
2663 	iwmr.ibmw.type = IB_MW_TYPE_1;
2664 	iwmr.ibmr.pd = &iwpd->ibpd;
2665 	if (irdma_hw_alloc_mw(rf->iwdev, &iwmr))
2666 		goto exit;
2667 	irdma_chk_free_stag(rf);
2668 
2669 	mod_delayed_work(rf->iwdev->cleanup_wq, &rf->dwork_cqp_poll,
2670 			 msecs_to_jiffies(3000));
2671 exit:
2672 	kfree(iwpd);
2673 }
2674