xref: /linux/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
3 
4 #include "hinic3_eqs.h"
5 #include "hinic3_hwdev.h"
6 #include "hinic3_hwif.h"
7 #include "hinic3_mbox.h"
8 #include "hinic3_mgmt.h"
9 
10 #define HINIC3_MSG_TO_MGMT_MAX_LEN  2016
11 
12 #define MGMT_MAX_PF_BUF_SIZE        2048UL
13 #define MGMT_SEG_LEN_MAX            48
14 #define MGMT_ASYNC_MSG_FLAG         0x8
15 
16 #define HINIC3_MGMT_WQ_NAME         "hinic3_mgmt"
17 
18 /* Bogus sequence ID to prevent accidental match following partial message */
19 #define MGMT_BOGUS_SEQ_ID  \
20 	(MGMT_MAX_PF_BUF_SIZE / MGMT_SEG_LEN_MAX + 1)
21 
22 static void
hinic3_mgmt_resp_msg_handler(struct hinic3_msg_pf_to_mgmt * pf_to_mgmt,struct hinic3_recv_msg * recv_msg)23 hinic3_mgmt_resp_msg_handler(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt,
24 			     struct hinic3_recv_msg *recv_msg)
25 {
26 	struct device *dev = pf_to_mgmt->hwdev->dev;
27 
28 	/* Ignore async msg */
29 	if (recv_msg->msg_id & MGMT_ASYNC_MSG_FLAG)
30 		return;
31 
32 	spin_lock(&pf_to_mgmt->sync_event_lock);
33 	if (recv_msg->msg_id != pf_to_mgmt->sync_msg_id) {
34 		dev_err(dev, "msg id mismatch, send msg id: 0x%x, recv msg id: 0x%x, event state: %d\n",
35 			pf_to_mgmt->sync_msg_id, recv_msg->msg_id,
36 			pf_to_mgmt->event_flag);
37 	} else if (pf_to_mgmt->event_flag == COMM_SEND_EVENT_START) {
38 		pf_to_mgmt->event_flag = COMM_SEND_EVENT_SUCCESS;
39 		complete(&recv_msg->recv_done);
40 	} else {
41 		dev_err(dev, "Wait timeout, send msg id: 0x%x, recv msg id: 0x%x, event state: %d\n",
42 			pf_to_mgmt->sync_msg_id, recv_msg->msg_id,
43 			pf_to_mgmt->event_flag);
44 	}
45 	spin_unlock(&pf_to_mgmt->sync_event_lock);
46 }
47 
hinic3_recv_mgmt_msg_work_handler(struct work_struct * work)48 static void hinic3_recv_mgmt_msg_work_handler(struct work_struct *work)
49 {
50 	struct hinic3_msg_pf_to_mgmt *pf_to_mgmt;
51 	struct mgmt_msg_handle_work *mgmt_work;
52 	struct mgmt_msg_head *ack_cmd;
53 
54 	mgmt_work = container_of(work, struct mgmt_msg_handle_work, work);
55 
56 	/* At the moment, we do not expect any meaningful messages but if the
57 	 * sender expects an ACK we still need to provide one with "unsupported"
58 	 * status.
59 	 */
60 	if (mgmt_work->async_mgmt_to_pf)
61 		goto out;
62 
63 	pf_to_mgmt = mgmt_work->pf_to_mgmt;
64 	ack_cmd = pf_to_mgmt->mgmt_ack_buf;
65 	memset(ack_cmd, 0, sizeof(*ack_cmd));
66 	ack_cmd->status = MGMT_STATUS_CMD_UNSUPPORTED;
67 
68 	hinic3_response_mbox_to_mgmt(pf_to_mgmt->hwdev, mgmt_work->mod,
69 				     mgmt_work->cmd, ack_cmd, sizeof(*ack_cmd),
70 				     mgmt_work->msg_id);
71 
72 out:
73 	kfree(mgmt_work->msg);
74 	kfree(mgmt_work);
75 }
76 
hinic3_recv_msg_add_seg(struct hinic3_recv_msg * recv_msg,__le64 msg_header,const void * seg_data,bool * is_complete)77 static int hinic3_recv_msg_add_seg(struct hinic3_recv_msg *recv_msg,
78 				   __le64 msg_header, const void *seg_data,
79 				   bool *is_complete)
80 {
81 	u8 seq_id, msg_id, seg_len, is_last;
82 	char *msg_buff;
83 	u32 offset;
84 
85 	seg_len = MBOX_MSG_HEADER_GET(msg_header, SEG_LEN);
86 	is_last = MBOX_MSG_HEADER_GET(msg_header, LAST);
87 	seq_id  = MBOX_MSG_HEADER_GET(msg_header, SEQID);
88 	msg_id = MBOX_MSG_HEADER_GET(msg_header, MSG_ID);
89 
90 	if (seg_len > MGMT_SEG_LEN_MAX)
91 		return -EINVAL;
92 
93 	/* All segments but last must be of maximal size */
94 	if (seg_len != MGMT_SEG_LEN_MAX && !is_last)
95 		return -EINVAL;
96 
97 	if (seq_id == 0) {
98 		recv_msg->seq_id = seq_id;
99 		recv_msg->msg_id = msg_id;
100 	} else if (seq_id != recv_msg->seq_id + 1 ||
101 		   msg_id != recv_msg->msg_id) {
102 		return -EINVAL;
103 	}
104 
105 	offset = seq_id * MGMT_SEG_LEN_MAX;
106 	if (offset + seg_len > MGMT_MAX_PF_BUF_SIZE)
107 		return -EINVAL;
108 
109 	msg_buff = recv_msg->msg;
110 	memcpy(msg_buff + offset, seg_data, seg_len);
111 	recv_msg->msg_len = offset + seg_len;
112 	recv_msg->seq_id = seq_id;
113 	*is_complete = !!is_last;
114 
115 	return 0;
116 }
117 
hinic3_init_mgmt_msg_work(struct hinic3_msg_pf_to_mgmt * pf_to_mgmt,struct hinic3_recv_msg * recv_msg)118 static void hinic3_init_mgmt_msg_work(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt,
119 				      struct hinic3_recv_msg *recv_msg)
120 {
121 	struct mgmt_msg_handle_work *mgmt_work;
122 
123 	mgmt_work = kmalloc_obj(*mgmt_work);
124 	if (!mgmt_work)
125 		return;
126 
127 	if (recv_msg->msg_len) {
128 		mgmt_work->msg = kmemdup(recv_msg->msg, recv_msg->msg_len,
129 					 GFP_KERNEL);
130 		if (!mgmt_work->msg) {
131 			kfree(mgmt_work);
132 			return;
133 		}
134 	} else {
135 		mgmt_work->msg = NULL;
136 	}
137 
138 	mgmt_work->pf_to_mgmt = pf_to_mgmt;
139 	mgmt_work->msg_len = recv_msg->msg_len;
140 	mgmt_work->msg_id = recv_msg->msg_id;
141 	mgmt_work->mod = recv_msg->mod;
142 	mgmt_work->cmd = recv_msg->cmd;
143 	mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf;
144 
145 	INIT_WORK(&mgmt_work->work, hinic3_recv_mgmt_msg_work_handler);
146 	queue_work(pf_to_mgmt->workq, &mgmt_work->work);
147 }
148 
149 static void
hinic3_recv_mgmt_msg_handler(struct hinic3_msg_pf_to_mgmt * pf_to_mgmt,const u8 * header,struct hinic3_recv_msg * recv_msg)150 hinic3_recv_mgmt_msg_handler(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt,
151 			     const u8 *header,
152 			     struct hinic3_recv_msg *recv_msg)
153 {
154 	struct hinic3_hwdev *hwdev = pf_to_mgmt->hwdev;
155 	const void *seg_data;
156 	__le64 msg_header;
157 	bool is_complete;
158 	u8 dir, msg_id;
159 	int err;
160 
161 	msg_header = *(__force __le64 *)header;
162 	dir = MBOX_MSG_HEADER_GET(msg_header, DIRECTION);
163 	msg_id = MBOX_MSG_HEADER_GET(msg_header, MSG_ID);
164 	/* Don't need to get anything from hw when cmd is async */
165 	if (dir == MBOX_MSG_RESP && (msg_id & MGMT_ASYNC_MSG_FLAG))
166 		return;
167 
168 	seg_data = header + sizeof(msg_header);
169 	err = hinic3_recv_msg_add_seg(recv_msg, msg_header,
170 				      seg_data, &is_complete);
171 	if (err) {
172 		dev_err(hwdev->dev, "invalid receive segment\n");
173 		/* set seq_id to invalid seq_id */
174 		recv_msg->seq_id = MGMT_BOGUS_SEQ_ID;
175 
176 		return;
177 	}
178 
179 	if (!is_complete)
180 		return;
181 
182 	recv_msg->cmd = MBOX_MSG_HEADER_GET(msg_header, CMD);
183 	recv_msg->mod = MBOX_MSG_HEADER_GET(msg_header, MODULE);
184 	recv_msg->async_mgmt_to_pf = MBOX_MSG_HEADER_GET(msg_header, NO_ACK);
185 	recv_msg->seq_id = MGMT_BOGUS_SEQ_ID;
186 
187 	if (dir == MBOX_MSG_RESP)
188 		hinic3_mgmt_resp_msg_handler(pf_to_mgmt, recv_msg);
189 	else
190 		hinic3_init_mgmt_msg_work(pf_to_mgmt, recv_msg);
191 }
192 
alloc_recv_msg(struct hinic3_recv_msg * recv_msg)193 static int alloc_recv_msg(struct hinic3_recv_msg *recv_msg)
194 {
195 	recv_msg->seq_id = MGMT_BOGUS_SEQ_ID;
196 
197 	recv_msg->msg = kzalloc(MGMT_MAX_PF_BUF_SIZE, GFP_KERNEL);
198 	if (!recv_msg->msg)
199 		return -ENOMEM;
200 
201 	return 0;
202 }
203 
free_recv_msg(struct hinic3_recv_msg * recv_msg)204 static void free_recv_msg(struct hinic3_recv_msg *recv_msg)
205 {
206 	kfree(recv_msg->msg);
207 }
208 
alloc_msg_buf(struct hinic3_msg_pf_to_mgmt * pf_to_mgmt)209 static int alloc_msg_buf(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt)
210 {
211 	struct device *dev = pf_to_mgmt->hwdev->dev;
212 	int err;
213 
214 	err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
215 	if (err) {
216 		dev_err(dev, "Failed to allocate recv msg\n");
217 		return err;
218 	}
219 
220 	err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
221 	if (err) {
222 		dev_err(dev, "Failed to allocate resp recv msg\n");
223 		goto err_free_msg_from_mgmt;
224 	}
225 
226 	pf_to_mgmt->mgmt_ack_buf = kzalloc(MGMT_MAX_PF_BUF_SIZE, GFP_KERNEL);
227 	if (!pf_to_mgmt->mgmt_ack_buf) {
228 		err = -ENOMEM;
229 		goto err_free_resp_msg_from_mgmt;
230 	}
231 
232 	return 0;
233 
234 err_free_resp_msg_from_mgmt:
235 	free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
236 err_free_msg_from_mgmt:
237 	free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
238 
239 	return err;
240 }
241 
free_msg_buf(struct hinic3_msg_pf_to_mgmt * pf_to_mgmt)242 static void free_msg_buf(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt)
243 {
244 	kfree(pf_to_mgmt->mgmt_ack_buf);
245 
246 	free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
247 	free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
248 }
249 
hinic3_pf_to_mgmt_init(struct hinic3_hwdev * hwdev)250 int hinic3_pf_to_mgmt_init(struct hinic3_hwdev *hwdev)
251 {
252 	struct hinic3_msg_pf_to_mgmt *pf_to_mgmt;
253 	int err;
254 
255 	pf_to_mgmt = kzalloc_obj(*pf_to_mgmt);
256 	if (!pf_to_mgmt)
257 		return -ENOMEM;
258 
259 	hwdev->pf_to_mgmt = pf_to_mgmt;
260 	pf_to_mgmt->hwdev = hwdev;
261 	spin_lock_init(&pf_to_mgmt->sync_event_lock);
262 	pf_to_mgmt->workq = create_singlethread_workqueue(HINIC3_MGMT_WQ_NAME);
263 	if (!pf_to_mgmt->workq) {
264 		dev_err(hwdev->dev, "Failed to initialize MGMT workqueue\n");
265 		err = -ENOMEM;
266 		goto err_free_pf_to_mgmt;
267 	}
268 
269 	err = alloc_msg_buf(pf_to_mgmt);
270 	if (err) {
271 		dev_err(hwdev->dev, "Failed to allocate msg buffers\n");
272 		goto err_destroy_workqueue;
273 	}
274 
275 	return 0;
276 
277 err_destroy_workqueue:
278 	destroy_workqueue(pf_to_mgmt->workq);
279 err_free_pf_to_mgmt:
280 	kfree(pf_to_mgmt);
281 
282 	return err;
283 }
284 
hinic3_pf_to_mgmt_free(struct hinic3_hwdev * hwdev)285 void hinic3_pf_to_mgmt_free(struct hinic3_hwdev *hwdev)
286 {
287 	struct hinic3_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt;
288 
289 	/* destroy workqueue before free related pf_to_mgmt resources in case of
290 	 * illegal resource access
291 	 */
292 	destroy_workqueue(pf_to_mgmt->workq);
293 
294 	free_msg_buf(pf_to_mgmt);
295 	kfree(pf_to_mgmt);
296 }
297 
hinic3_flush_mgmt_workq(struct hinic3_hwdev * hwdev)298 void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev)
299 {
300 	if (hwdev->aeqs)
301 		flush_workqueue(hwdev->aeqs->workq);
302 
303 	if (HINIC3_IS_PF(hwdev) && hwdev->pf_to_mgmt)
304 		flush_workqueue(hwdev->pf_to_mgmt->workq);
305 }
306 
hinic3_mgmt_msg_aeqe_handler(struct hinic3_hwdev * hwdev,u8 * header,u8 size)307 void hinic3_mgmt_msg_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header,
308 				  u8 size)
309 {
310 	struct hinic3_msg_pf_to_mgmt *pf_to_mgmt;
311 	struct hinic3_recv_msg *recv_msg;
312 	__le64 msg_header;
313 	bool is_send_dir;
314 
315 	if (MBOX_MSG_HEADER_GET(*(__force __le64 *)header, SOURCE) ==
316 	    MBOX_MSG_FROM_MBOX) {
317 		hinic3_mbox_func_aeqe_handler(hwdev, header, size);
318 
319 		return;
320 	}
321 
322 	pf_to_mgmt = hwdev->pf_to_mgmt;
323 	msg_header = *(__force __le64 *)header;
324 
325 	is_send_dir = (MBOX_MSG_HEADER_GET(msg_header, DIRECTION) ==
326 		       MBOX_MSG_SEND) ? true : false;
327 
328 	recv_msg = is_send_dir ? &pf_to_mgmt->recv_msg_from_mgmt :
329 		   &pf_to_mgmt->recv_resp_msg_from_mgmt;
330 
331 	hinic3_recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg);
332 }
333