xref: /linux/drivers/fwctl/pds/main.c (revision 6315d93541f8a5f77c5ef5c4f25233e66d189603)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) Advanced Micro Devices, Inc */
3 
4 #include <linux/module.h>
5 #include <linux/auxiliary_bus.h>
6 #include <linux/pci.h>
7 #include <linux/vmalloc.h>
8 #include <linux/bitfield.h>
9 
10 #include <uapi/fwctl/fwctl.h>
11 #include <uapi/fwctl/pds.h>
12 #include <linux/fwctl.h>
13 
14 #include <linux/pds/pds_common.h>
15 #include <linux/pds/pds_core_if.h>
16 #include <linux/pds/pds_adminq.h>
17 #include <linux/pds/pds_auxbus.h>
18 
19 struct pdsfc_uctx {
20 	struct fwctl_uctx uctx;
21 	u32 uctx_caps;
22 };
23 
24 struct pdsfc_rpc_endpoint_info {
25 	u32 endpoint;
26 	dma_addr_t operations_pa;
27 	struct pds_fwctl_query_data *operations;
28 	struct mutex lock;	/* lock for endpoint info management */
29 };
30 
31 struct pdsfc_dev {
32 	struct fwctl_device fwctl;
33 	struct pds_auxiliary_dev *padev;
34 	u32 caps;
35 	struct pds_fwctl_ident ident;
36 	dma_addr_t endpoints_pa;
37 	struct pds_fwctl_query_data *endpoints;
38 	struct pdsfc_rpc_endpoint_info *endpoint_info;
39 };
40 
41 static int pdsfc_open_uctx(struct fwctl_uctx *uctx)
42 {
43 	struct pdsfc_dev *pdsfc = container_of(uctx->fwctl, struct pdsfc_dev, fwctl);
44 	struct pdsfc_uctx *pdsfc_uctx = container_of(uctx, struct pdsfc_uctx, uctx);
45 
46 	pdsfc_uctx->uctx_caps = pdsfc->caps;
47 
48 	return 0;
49 }
50 
51 static void pdsfc_close_uctx(struct fwctl_uctx *uctx)
52 {
53 }
54 
55 static void *pdsfc_info(struct fwctl_uctx *uctx, size_t *length)
56 {
57 	struct pdsfc_uctx *pdsfc_uctx = container_of(uctx, struct pdsfc_uctx, uctx);
58 	struct fwctl_info_pds *info;
59 
60 	info = kzalloc(sizeof(*info), GFP_KERNEL);
61 	if (!info)
62 		return ERR_PTR(-ENOMEM);
63 
64 	info->uctx_caps = pdsfc_uctx->uctx_caps;
65 
66 	return info;
67 }
68 
69 static int pdsfc_identify(struct pdsfc_dev *pdsfc)
70 {
71 	struct device *dev = &pdsfc->fwctl.dev;
72 	union pds_core_adminq_comp comp = {0};
73 	union pds_core_adminq_cmd cmd;
74 	struct pds_fwctl_ident *ident;
75 	dma_addr_t ident_pa;
76 	int err;
77 
78 	ident = dma_alloc_coherent(dev->parent, sizeof(*ident), &ident_pa, GFP_KERNEL);
79 	if (!ident) {
80 		dev_err(dev, "Failed to map ident buffer\n");
81 		return -ENOMEM;
82 	}
83 
84 	cmd = (union pds_core_adminq_cmd) {
85 		.fwctl_ident = {
86 			.opcode = PDS_FWCTL_CMD_IDENT,
87 			.version = 0,
88 			.len = cpu_to_le32(sizeof(*ident)),
89 			.ident_pa = cpu_to_le64(ident_pa),
90 		}
91 	};
92 
93 	err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0);
94 	if (err)
95 		dev_err(dev, "Failed to send adminq cmd opcode: %u err: %d\n",
96 			cmd.fwctl_ident.opcode, err);
97 	else
98 		pdsfc->ident = *ident;
99 
100 	dma_free_coherent(dev->parent, sizeof(*ident), ident, ident_pa);
101 
102 	return err;
103 }
104 
105 static void pdsfc_free_endpoints(struct pdsfc_dev *pdsfc)
106 {
107 	struct device *dev = &pdsfc->fwctl.dev;
108 	int i;
109 
110 	if (!pdsfc->endpoints)
111 		return;
112 
113 	for (i = 0; pdsfc->endpoint_info && i < pdsfc->endpoints->num_entries; i++)
114 		mutex_destroy(&pdsfc->endpoint_info[i].lock);
115 	vfree(pdsfc->endpoint_info);
116 	pdsfc->endpoint_info = NULL;
117 	dma_free_coherent(dev->parent, PAGE_SIZE,
118 			  pdsfc->endpoints, pdsfc->endpoints_pa);
119 	pdsfc->endpoints = NULL;
120 	pdsfc->endpoints_pa = DMA_MAPPING_ERROR;
121 }
122 
123 static void pdsfc_free_operations(struct pdsfc_dev *pdsfc)
124 {
125 	struct device *dev = &pdsfc->fwctl.dev;
126 	u32 num_endpoints;
127 	int i;
128 
129 	num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries);
130 	for (i = 0; i < num_endpoints; i++) {
131 		struct pdsfc_rpc_endpoint_info *ei = &pdsfc->endpoint_info[i];
132 
133 		if (ei->operations) {
134 			dma_free_coherent(dev->parent, PAGE_SIZE,
135 					  ei->operations, ei->operations_pa);
136 			ei->operations = NULL;
137 			ei->operations_pa = DMA_MAPPING_ERROR;
138 		}
139 	}
140 }
141 
142 static struct pds_fwctl_query_data *pdsfc_get_endpoints(struct pdsfc_dev *pdsfc,
143 							dma_addr_t *pa)
144 {
145 	struct device *dev = &pdsfc->fwctl.dev;
146 	union pds_core_adminq_comp comp = {0};
147 	struct pds_fwctl_query_data *data;
148 	union pds_core_adminq_cmd cmd;
149 	dma_addr_t data_pa;
150 	int err;
151 
152 	data = dma_alloc_coherent(dev->parent, PAGE_SIZE, &data_pa, GFP_KERNEL);
153 	if (!data) {
154 		dev_err(dev, "Failed to map endpoint list\n");
155 		return ERR_PTR(-ENOMEM);
156 	}
157 
158 	cmd = (union pds_core_adminq_cmd) {
159 		.fwctl_query = {
160 			.opcode = PDS_FWCTL_CMD_QUERY,
161 			.entity = PDS_FWCTL_RPC_ROOT,
162 			.version = 0,
163 			.query_data_buf_len = cpu_to_le32(PAGE_SIZE),
164 			.query_data_buf_pa = cpu_to_le64(data_pa),
165 		}
166 	};
167 
168 	err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0);
169 	if (err) {
170 		dev_err(dev, "Failed to send adminq cmd opcode: %u entity: %u err: %d\n",
171 			cmd.fwctl_query.opcode, cmd.fwctl_query.entity, err);
172 		dma_free_coherent(dev->parent, PAGE_SIZE, data, data_pa);
173 		return ERR_PTR(err);
174 	}
175 
176 	*pa = data_pa;
177 
178 	return data;
179 }
180 
181 static int pdsfc_init_endpoints(struct pdsfc_dev *pdsfc)
182 {
183 	struct pds_fwctl_query_data_endpoint *ep_entry;
184 	u32 num_endpoints;
185 	int i;
186 
187 	pdsfc->endpoints = pdsfc_get_endpoints(pdsfc, &pdsfc->endpoints_pa);
188 	if (IS_ERR(pdsfc->endpoints))
189 		return PTR_ERR(pdsfc->endpoints);
190 
191 	num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries);
192 	pdsfc->endpoint_info = vcalloc(num_endpoints,
193 				       sizeof(*pdsfc->endpoint_info));
194 	if (!pdsfc->endpoint_info) {
195 		pdsfc_free_endpoints(pdsfc);
196 		return -ENOMEM;
197 	}
198 
199 	ep_entry = (struct pds_fwctl_query_data_endpoint *)pdsfc->endpoints->entries;
200 	for (i = 0; i < num_endpoints; i++) {
201 		mutex_init(&pdsfc->endpoint_info[i].lock);
202 		pdsfc->endpoint_info[i].endpoint = ep_entry[i].id;
203 	}
204 
205 	return 0;
206 }
207 
208 static struct pds_fwctl_query_data *pdsfc_get_operations(struct pdsfc_dev *pdsfc,
209 							 dma_addr_t *pa, u32 ep)
210 {
211 	struct pds_fwctl_query_data_operation *entries;
212 	struct device *dev = &pdsfc->fwctl.dev;
213 	union pds_core_adminq_comp comp = {0};
214 	struct pds_fwctl_query_data *data;
215 	union pds_core_adminq_cmd cmd;
216 	dma_addr_t data_pa;
217 	int err;
218 	int i;
219 
220 	/* Query the operations list for the given endpoint */
221 	data = dma_alloc_coherent(dev->parent, PAGE_SIZE, &data_pa, GFP_KERNEL);
222 	if (!data) {
223 		dev_err(dev, "Failed to map operations list\n");
224 		return ERR_PTR(-ENOMEM);
225 	}
226 
227 	cmd = (union pds_core_adminq_cmd) {
228 		.fwctl_query = {
229 			.opcode = PDS_FWCTL_CMD_QUERY,
230 			.entity = PDS_FWCTL_RPC_ENDPOINT,
231 			.version = 0,
232 			.query_data_buf_len = cpu_to_le32(PAGE_SIZE),
233 			.query_data_buf_pa = cpu_to_le64(data_pa),
234 			.ep = cpu_to_le32(ep),
235 		}
236 	};
237 
238 	err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0);
239 	if (err) {
240 		dev_err(dev, "Failed to send adminq cmd opcode: %u entity: %u err: %d\n",
241 			cmd.fwctl_query.opcode, cmd.fwctl_query.entity, err);
242 		dma_free_coherent(dev->parent, PAGE_SIZE, data, data_pa);
243 		return ERR_PTR(err);
244 	}
245 
246 	*pa = data_pa;
247 
248 	entries = (struct pds_fwctl_query_data_operation *)data->entries;
249 	dev_dbg(dev, "num_entries %d\n", data->num_entries);
250 	for (i = 0; i < data->num_entries; i++) {
251 
252 		/* Translate FW command attribute to fwctl scope */
253 		switch (entries[i].scope) {
254 		case PDSFC_FW_CMD_ATTR_READ:
255 		case PDSFC_FW_CMD_ATTR_WRITE:
256 		case PDSFC_FW_CMD_ATTR_SYNC:
257 			entries[i].scope = FWCTL_RPC_CONFIGURATION;
258 			break;
259 		case PDSFC_FW_CMD_ATTR_DEBUG_READ:
260 			entries[i].scope = FWCTL_RPC_DEBUG_READ_ONLY;
261 			break;
262 		case PDSFC_FW_CMD_ATTR_DEBUG_WRITE:
263 			entries[i].scope = FWCTL_RPC_DEBUG_WRITE;
264 			break;
265 		default:
266 			entries[i].scope = FWCTL_RPC_DEBUG_WRITE_FULL;
267 			break;
268 		}
269 		dev_dbg(dev, "endpoint %d operation: id %x scope %d\n",
270 			ep, entries[i].id, entries[i].scope);
271 	}
272 
273 	return data;
274 }
275 
276 static int pdsfc_validate_rpc(struct pdsfc_dev *pdsfc,
277 			      struct fwctl_rpc_pds *rpc,
278 			      enum fwctl_rpc_scope scope)
279 {
280 	struct pds_fwctl_query_data_operation *op_entry;
281 	struct pdsfc_rpc_endpoint_info *ep_info = NULL;
282 	struct device *dev = &pdsfc->fwctl.dev;
283 	int i;
284 
285 	/* validate rpc in_len & out_len based
286 	 * on ident.max_req_sz & max_resp_sz
287 	 */
288 	if (rpc->in.len > pdsfc->ident.max_req_sz) {
289 		dev_dbg(dev, "Invalid request size %u, max %u\n",
290 			rpc->in.len, pdsfc->ident.max_req_sz);
291 		return -EINVAL;
292 	}
293 
294 	if (rpc->out.len > pdsfc->ident.max_resp_sz) {
295 		dev_dbg(dev, "Invalid response size %u, max %u\n",
296 			rpc->out.len, pdsfc->ident.max_resp_sz);
297 		return -EINVAL;
298 	}
299 
300 	for (i = 0; i < pdsfc->endpoints->num_entries; i++) {
301 		if (pdsfc->endpoint_info[i].endpoint == rpc->in.ep) {
302 			ep_info = &pdsfc->endpoint_info[i];
303 			break;
304 		}
305 	}
306 	if (!ep_info) {
307 		dev_dbg(dev, "Invalid endpoint %d\n", rpc->in.ep);
308 		return -EINVAL;
309 	}
310 
311 	/* query and cache this endpoint's operations */
312 	mutex_lock(&ep_info->lock);
313 	if (!ep_info->operations) {
314 		struct pds_fwctl_query_data *operations;
315 
316 		operations = pdsfc_get_operations(pdsfc,
317 						  &ep_info->operations_pa,
318 						  rpc->in.ep);
319 		if (IS_ERR(operations)) {
320 			mutex_unlock(&ep_info->lock);
321 			return -ENOMEM;
322 		}
323 		ep_info->operations = operations;
324 	}
325 	mutex_unlock(&ep_info->lock);
326 
327 	/* reject unsupported and/or out of scope commands */
328 	op_entry = (struct pds_fwctl_query_data_operation *)ep_info->operations->entries;
329 	for (i = 0; i < ep_info->operations->num_entries; i++) {
330 		if (PDS_FWCTL_RPC_OPCODE_CMP(rpc->in.op, op_entry[i].id)) {
331 			if (scope < op_entry[i].scope)
332 				return -EPERM;
333 			return 0;
334 		}
335 	}
336 
337 	dev_dbg(dev, "Invalid operation %d for endpoint %d\n", rpc->in.op, rpc->in.ep);
338 
339 	return -EINVAL;
340 }
341 
342 static void *pdsfc_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
343 			  void *in, size_t in_len, size_t *out_len)
344 {
345 	struct pdsfc_dev *pdsfc = container_of(uctx->fwctl, struct pdsfc_dev, fwctl);
346 	struct device *dev = &uctx->fwctl->dev;
347 	union pds_core_adminq_comp comp = {0};
348 	dma_addr_t out_payload_dma_addr = 0;
349 	dma_addr_t in_payload_dma_addr = 0;
350 	struct fwctl_rpc_pds *rpc = in;
351 	union pds_core_adminq_cmd cmd;
352 	void *out_payload = NULL;
353 	void *in_payload = NULL;
354 	void *out = NULL;
355 	int err;
356 
357 	err = pdsfc_validate_rpc(pdsfc, rpc, scope);
358 	if (err)
359 		return ERR_PTR(err);
360 
361 	if (rpc->in.len > 0) {
362 		in_payload = kzalloc(rpc->in.len, GFP_KERNEL);
363 		if (!in_payload) {
364 			dev_err(dev, "Failed to allocate in_payload\n");
365 			err = -ENOMEM;
366 			goto err_out;
367 		}
368 
369 		if (copy_from_user(in_payload, u64_to_user_ptr(rpc->in.payload),
370 				   rpc->in.len)) {
371 			dev_dbg(dev, "Failed to copy in_payload from user\n");
372 			err = -EFAULT;
373 			goto err_in_payload;
374 		}
375 
376 		in_payload_dma_addr = dma_map_single(dev->parent, in_payload,
377 						     rpc->in.len, DMA_TO_DEVICE);
378 		err = dma_mapping_error(dev->parent, in_payload_dma_addr);
379 		if (err) {
380 			dev_dbg(dev, "Failed to map in_payload\n");
381 			goto err_in_payload;
382 		}
383 	}
384 
385 	if (rpc->out.len > 0) {
386 		out_payload = kzalloc(rpc->out.len, GFP_KERNEL);
387 		if (!out_payload) {
388 			dev_dbg(dev, "Failed to allocate out_payload\n");
389 			err = -ENOMEM;
390 			goto err_out_payload;
391 		}
392 
393 		out_payload_dma_addr = dma_map_single(dev->parent, out_payload,
394 						      rpc->out.len, DMA_FROM_DEVICE);
395 		err = dma_mapping_error(dev->parent, out_payload_dma_addr);
396 		if (err) {
397 			dev_dbg(dev, "Failed to map out_payload\n");
398 			goto err_out_payload;
399 		}
400 	}
401 
402 	cmd = (union pds_core_adminq_cmd) {
403 		.fwctl_rpc = {
404 			.opcode = PDS_FWCTL_CMD_RPC,
405 			.flags = PDS_FWCTL_RPC_IND_REQ | PDS_FWCTL_RPC_IND_RESP,
406 			.ep = cpu_to_le32(rpc->in.ep),
407 			.op = cpu_to_le32(rpc->in.op),
408 			.req_pa = cpu_to_le64(in_payload_dma_addr),
409 			.req_sz = cpu_to_le32(rpc->in.len),
410 			.resp_pa = cpu_to_le64(out_payload_dma_addr),
411 			.resp_sz = cpu_to_le32(rpc->out.len),
412 		}
413 	};
414 
415 	err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0);
416 	if (err) {
417 		dev_dbg(dev, "%s: ep %d op %x req_pa %llx req_sz %d req_sg %d resp_pa %llx resp_sz %d resp_sg %d err %d\n",
418 			__func__, rpc->in.ep, rpc->in.op,
419 			cmd.fwctl_rpc.req_pa, cmd.fwctl_rpc.req_sz, cmd.fwctl_rpc.req_sg_elems,
420 			cmd.fwctl_rpc.resp_pa, cmd.fwctl_rpc.resp_sz, cmd.fwctl_rpc.resp_sg_elems,
421 			err);
422 		goto done;
423 	}
424 
425 	dynamic_hex_dump("out ", DUMP_PREFIX_OFFSET, 16, 1, out_payload, rpc->out.len, true);
426 
427 	if (copy_to_user(u64_to_user_ptr(rpc->out.payload), out_payload, rpc->out.len)) {
428 		dev_dbg(dev, "Failed to copy out_payload to user\n");
429 		out = ERR_PTR(-EFAULT);
430 		goto done;
431 	}
432 
433 	rpc->out.retval = le32_to_cpu(comp.fwctl_rpc.err);
434 	*out_len = in_len;
435 	out = in;
436 
437 done:
438 	if (out_payload_dma_addr)
439 		dma_unmap_single(dev->parent, out_payload_dma_addr,
440 				 rpc->out.len, DMA_FROM_DEVICE);
441 err_out_payload:
442 	kfree(out_payload);
443 
444 	if (in_payload_dma_addr)
445 		dma_unmap_single(dev->parent, in_payload_dma_addr,
446 				 rpc->in.len, DMA_TO_DEVICE);
447 err_in_payload:
448 	kfree(in_payload);
449 err_out:
450 	if (err)
451 		return ERR_PTR(err);
452 
453 	return out;
454 }
455 
456 static const struct fwctl_ops pdsfc_ops = {
457 	.device_type = FWCTL_DEVICE_TYPE_PDS,
458 	.uctx_size = sizeof(struct pdsfc_uctx),
459 	.open_uctx = pdsfc_open_uctx,
460 	.close_uctx = pdsfc_close_uctx,
461 	.info = pdsfc_info,
462 	.fw_rpc = pdsfc_fw_rpc,
463 };
464 
465 static int pdsfc_probe(struct auxiliary_device *adev,
466 		       const struct auxiliary_device_id *id)
467 {
468 	struct pds_auxiliary_dev *padev =
469 			container_of(adev, struct pds_auxiliary_dev, aux_dev);
470 	struct device *dev = &adev->dev;
471 	struct pdsfc_dev *pdsfc;
472 	int err;
473 
474 	pdsfc = fwctl_alloc_device(&padev->vf_pdev->dev, &pdsfc_ops,
475 				   struct pdsfc_dev, fwctl);
476 	if (!pdsfc)
477 		return dev_err_probe(dev, -ENOMEM, "Failed to allocate fwctl device struct\n");
478 	pdsfc->padev = padev;
479 
480 	err = pdsfc_identify(pdsfc);
481 	if (err) {
482 		fwctl_put(&pdsfc->fwctl);
483 		return dev_err_probe(dev, err, "Failed to identify device\n");
484 	}
485 
486 	err = pdsfc_init_endpoints(pdsfc);
487 	if (err) {
488 		fwctl_put(&pdsfc->fwctl);
489 		return dev_err_probe(dev, err, "Failed to init endpoints\n");
490 	}
491 
492 	pdsfc->caps = PDS_FWCTL_QUERY_CAP | PDS_FWCTL_SEND_CAP;
493 
494 	err = fwctl_register(&pdsfc->fwctl);
495 	if (err) {
496 		pdsfc_free_endpoints(pdsfc);
497 		fwctl_put(&pdsfc->fwctl);
498 		return dev_err_probe(dev, err, "Failed to register device\n");
499 	}
500 
501 	auxiliary_set_drvdata(adev, pdsfc);
502 
503 	return 0;
504 }
505 
506 static void pdsfc_remove(struct auxiliary_device *adev)
507 {
508 	struct pdsfc_dev *pdsfc = auxiliary_get_drvdata(adev);
509 
510 	fwctl_unregister(&pdsfc->fwctl);
511 	pdsfc_free_operations(pdsfc);
512 	pdsfc_free_endpoints(pdsfc);
513 
514 	fwctl_put(&pdsfc->fwctl);
515 }
516 
517 static const struct auxiliary_device_id pdsfc_id_table[] = {
518 	{.name = PDS_CORE_DRV_NAME "." PDS_DEV_TYPE_FWCTL_STR },
519 	{}
520 };
521 MODULE_DEVICE_TABLE(auxiliary, pdsfc_id_table);
522 
523 static struct auxiliary_driver pdsfc_driver = {
524 	.name = "pds_fwctl",
525 	.probe = pdsfc_probe,
526 	.remove = pdsfc_remove,
527 	.id_table = pdsfc_id_table,
528 };
529 
530 module_auxiliary_driver(pdsfc_driver);
531 
532 MODULE_IMPORT_NS("FWCTL");
533 MODULE_DESCRIPTION("pds fwctl driver");
534 MODULE_AUTHOR("Shannon Nelson <shannon.nelson@amd.com>");
535 MODULE_AUTHOR("Brett Creeley <brett.creeley@amd.com>");
536 MODULE_LICENSE("GPL");
537