1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright(c) 2024 Intel Corporation.
4  */
5 
6 #include "xe_pxp_submit.h"
7 
8 #include <linux/delay.h>
9 #include <uapi/drm/xe_drm.h>
10 
11 #include "xe_device_types.h"
12 #include "xe_bb.h"
13 #include "xe_bo.h"
14 #include "xe_exec_queue.h"
15 #include "xe_gsc_submit.h"
16 #include "xe_gt.h"
17 #include "xe_lrc.h"
18 #include "xe_map.h"
19 #include "xe_pxp.h"
20 #include "xe_pxp_types.h"
21 #include "xe_sched_job.h"
22 #include "xe_vm.h"
23 #include "abi/gsc_command_header_abi.h"
24 #include "abi/gsc_pxp_commands_abi.h"
25 #include "instructions/xe_gsc_commands.h"
26 #include "instructions/xe_mfx_commands.h"
27 #include "instructions/xe_mi_commands.h"
28 
29 /*
30  * The VCS is used for kernel-owned GGTT submissions to issue key termination.
31  * Terminations are serialized, so we only need a single queue and a single
32  * batch.
33  */
allocate_vcs_execution_resources(struct xe_pxp * pxp)34 static int allocate_vcs_execution_resources(struct xe_pxp *pxp)
35 {
36 	struct xe_gt *gt = pxp->gt;
37 	struct xe_device *xe = pxp->xe;
38 	struct xe_tile *tile = gt_to_tile(gt);
39 	struct xe_hw_engine *hwe;
40 	struct xe_exec_queue *q;
41 	struct xe_bo *bo;
42 	int err;
43 
44 	hwe = xe_gt_hw_engine(gt, XE_ENGINE_CLASS_VIDEO_DECODE, 0, true);
45 	if (!hwe)
46 		return -ENODEV;
47 
48 	q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1, hwe,
49 				 EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_PERMANENT, 0);
50 	if (IS_ERR(q))
51 		return PTR_ERR(q);
52 
53 	/*
54 	 * Each termination is 16 DWORDS, so 4K is enough to contain a
55 	 * termination for each sessions.
56 	 */
57 	bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K, ttm_bo_type_kernel,
58 				  XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT);
59 	if (IS_ERR(bo)) {
60 		err = PTR_ERR(bo);
61 		goto out_queue;
62 	}
63 
64 	pxp->vcs_exec.q = q;
65 	pxp->vcs_exec.bo = bo;
66 
67 	return 0;
68 
69 out_queue:
70 	xe_exec_queue_put(q);
71 	return err;
72 }
73 
destroy_vcs_execution_resources(struct xe_pxp * pxp)74 static void destroy_vcs_execution_resources(struct xe_pxp *pxp)
75 {
76 	if (pxp->vcs_exec.bo)
77 		xe_bo_unpin_map_no_vm(pxp->vcs_exec.bo);
78 
79 	if (pxp->vcs_exec.q)
80 		xe_exec_queue_put(pxp->vcs_exec.q);
81 }
82 
83 #define PXP_BB_SIZE		XE_PAGE_SIZE
allocate_gsc_client_resources(struct xe_gt * gt,struct xe_pxp_gsc_client_resources * gsc_res,size_t inout_size)84 static int allocate_gsc_client_resources(struct xe_gt *gt,
85 					 struct xe_pxp_gsc_client_resources *gsc_res,
86 					 size_t inout_size)
87 {
88 	struct xe_tile *tile = gt_to_tile(gt);
89 	struct xe_device *xe = tile_to_xe(tile);
90 	struct xe_hw_engine *hwe;
91 	struct xe_vm *vm;
92 	struct xe_bo *bo;
93 	struct xe_exec_queue *q;
94 	struct dma_fence *fence;
95 	long timeout;
96 	int err = 0;
97 
98 	hwe = xe_gt_hw_engine(gt, XE_ENGINE_CLASS_OTHER, 0, true);
99 
100 	/* we shouldn't reach here if the GSC engine is not available */
101 	xe_assert(xe, hwe);
102 
103 	/* PXP instructions must be issued from PPGTT */
104 	vm = xe_vm_create(xe, XE_VM_FLAG_GSC);
105 	if (IS_ERR(vm))
106 		return PTR_ERR(vm);
107 
108 	/* We allocate a single object for the batch and the in/out memory */
109 	xe_vm_lock(vm, false);
110 	bo = xe_bo_create_pin_map(xe, tile, vm, PXP_BB_SIZE + inout_size * 2,
111 				  ttm_bo_type_kernel,
112 				  XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_NEEDS_UC);
113 	xe_vm_unlock(vm);
114 	if (IS_ERR(bo)) {
115 		err = PTR_ERR(bo);
116 		goto vm_out;
117 	}
118 
119 	fence = xe_vm_bind_kernel_bo(vm, bo, NULL, 0, XE_CACHE_WB);
120 	if (IS_ERR(fence)) {
121 		err = PTR_ERR(fence);
122 		goto bo_out;
123 	}
124 
125 	timeout = dma_fence_wait_timeout(fence, false, HZ);
126 	dma_fence_put(fence);
127 	if (timeout <= 0) {
128 		err = timeout ?: -ETIME;
129 		goto bo_out;
130 	}
131 
132 	q = xe_exec_queue_create(xe, vm, BIT(hwe->logical_instance), 1, hwe,
133 				 EXEC_QUEUE_FLAG_KERNEL |
134 				 EXEC_QUEUE_FLAG_PERMANENT, 0);
135 	if (IS_ERR(q)) {
136 		err = PTR_ERR(q);
137 		goto bo_out;
138 	}
139 
140 	gsc_res->vm = vm;
141 	gsc_res->bo = bo;
142 	gsc_res->inout_size = inout_size;
143 	gsc_res->batch = IOSYS_MAP_INIT_OFFSET(&bo->vmap, 0);
144 	gsc_res->msg_in = IOSYS_MAP_INIT_OFFSET(&bo->vmap, PXP_BB_SIZE);
145 	gsc_res->msg_out = IOSYS_MAP_INIT_OFFSET(&bo->vmap, PXP_BB_SIZE + inout_size);
146 	gsc_res->q = q;
147 
148 	/* initialize host-session-handle (for all Xe-to-gsc-firmware PXP cmds) */
149 	gsc_res->host_session_handle = xe_gsc_create_host_session_id();
150 
151 	return 0;
152 
153 bo_out:
154 	xe_bo_unpin_map_no_vm(bo);
155 vm_out:
156 	xe_vm_close_and_put(vm);
157 
158 	return err;
159 }
160 
destroy_gsc_client_resources(struct xe_pxp_gsc_client_resources * gsc_res)161 static void destroy_gsc_client_resources(struct xe_pxp_gsc_client_resources *gsc_res)
162 {
163 	if (!gsc_res->q)
164 		return;
165 
166 	xe_exec_queue_put(gsc_res->q);
167 	xe_bo_unpin_map_no_vm(gsc_res->bo);
168 	xe_vm_close_and_put(gsc_res->vm);
169 }
170 
171 /**
172  * xe_pxp_allocate_execution_resources - Allocate PXP submission objects
173  * @pxp: the xe_pxp structure
174  *
175  * Allocates exec_queues objects for VCS and GSCCS submission. The GSCCS
176  * submissions are done via PPGTT, so this function allocates a VM for it and
177  * maps the object into it.
178  *
179  * Returns 0 if the allocation and mapping is successful, an errno value
180  * otherwise.
181  */
xe_pxp_allocate_execution_resources(struct xe_pxp * pxp)182 int xe_pxp_allocate_execution_resources(struct xe_pxp *pxp)
183 {
184 	int err;
185 
186 	err = allocate_vcs_execution_resources(pxp);
187 	if (err)
188 		return err;
189 
190 	/*
191 	 * PXP commands can require a lot of BO space (see PXP_MAX_PACKET_SIZE),
192 	 * but we currently only support a subset of commands that are small
193 	 * (< 20 dwords), so a single page is enough for now.
194 	 */
195 	err = allocate_gsc_client_resources(pxp->gt, &pxp->gsc_res, XE_PAGE_SIZE);
196 	if (err)
197 		goto destroy_vcs_context;
198 
199 	return 0;
200 
201 destroy_vcs_context:
202 	destroy_vcs_execution_resources(pxp);
203 	return err;
204 }
205 
xe_pxp_destroy_execution_resources(struct xe_pxp * pxp)206 void xe_pxp_destroy_execution_resources(struct xe_pxp *pxp)
207 {
208 	destroy_gsc_client_resources(&pxp->gsc_res);
209 	destroy_vcs_execution_resources(pxp);
210 }
211 
212 #define emit_cmd(xe_, map_, offset_, val_) \
213 	xe_map_wr(xe_, map_, (offset_) * sizeof(u32), u32, val_)
214 
215 /* stall until prior PXP and MFX/HCP/HUC objects are completed */
216 #define MFX_WAIT_PXP (MFX_WAIT | \
217 		      MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG | \
218 		      MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG)
pxp_emit_wait(struct xe_device * xe,struct iosys_map * batch,u32 offset)219 static u32 pxp_emit_wait(struct xe_device *xe, struct iosys_map *batch, u32 offset)
220 {
221 	/* wait for cmds to go through */
222 	emit_cmd(xe, batch, offset++, MFX_WAIT_PXP);
223 	emit_cmd(xe, batch, offset++, 0);
224 
225 	return offset;
226 }
227 
pxp_emit_session_selection(struct xe_device * xe,struct iosys_map * batch,u32 offset,u32 idx)228 static u32 pxp_emit_session_selection(struct xe_device *xe, struct iosys_map *batch,
229 				      u32 offset, u32 idx)
230 {
231 	offset = pxp_emit_wait(xe, batch, offset);
232 
233 	/* pxp off */
234 	emit_cmd(xe, batch, offset++, MI_FLUSH_DW | MI_FLUSH_IMM_DW);
235 	emit_cmd(xe, batch, offset++, 0);
236 	emit_cmd(xe, batch, offset++, 0);
237 	emit_cmd(xe, batch, offset++, 0);
238 
239 	/* select session */
240 	emit_cmd(xe, batch, offset++, MI_SET_APPID | MI_SET_APPID_SESSION_ID(idx));
241 	emit_cmd(xe, batch, offset++, 0);
242 
243 	offset = pxp_emit_wait(xe, batch, offset);
244 
245 	/* pxp on */
246 	emit_cmd(xe, batch, offset++, MI_FLUSH_DW |
247 				      MI_FLUSH_DW_PROTECTED_MEM_EN |
248 				      MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX |
249 				      MI_FLUSH_IMM_DW);
250 	emit_cmd(xe, batch, offset++, LRC_PPHWSP_PXP_INVAL_SCRATCH_ADDR |
251 				      MI_FLUSH_DW_USE_GTT);
252 	emit_cmd(xe, batch, offset++, 0);
253 	emit_cmd(xe, batch, offset++, 0);
254 
255 	offset = pxp_emit_wait(xe, batch, offset);
256 
257 	return offset;
258 }
259 
pxp_emit_inline_termination(struct xe_device * xe,struct iosys_map * batch,u32 offset)260 static u32 pxp_emit_inline_termination(struct xe_device *xe,
261 				       struct iosys_map *batch, u32 offset)
262 {
263 	/* session inline termination */
264 	emit_cmd(xe, batch, offset++, CRYPTO_KEY_EXCHANGE);
265 	emit_cmd(xe, batch, offset++, 0);
266 
267 	return offset;
268 }
269 
pxp_emit_session_termination(struct xe_device * xe,struct iosys_map * batch,u32 offset,u32 idx)270 static u32 pxp_emit_session_termination(struct xe_device *xe, struct iosys_map *batch,
271 					u32 offset, u32 idx)
272 {
273 	offset = pxp_emit_session_selection(xe, batch, offset, idx);
274 	offset = pxp_emit_inline_termination(xe, batch, offset);
275 
276 	return offset;
277 }
278 
279 /**
280  * xe_pxp_submit_session_termination - submits a PXP inline termination
281  * @pxp: the xe_pxp structure
282  * @id: the session to terminate
283  *
284  * Emit an inline termination via the VCS engine to terminate a session.
285  *
286  * Returns 0 if the submission is successful, an errno value otherwise.
287  */
xe_pxp_submit_session_termination(struct xe_pxp * pxp,u32 id)288 int xe_pxp_submit_session_termination(struct xe_pxp *pxp, u32 id)
289 {
290 	struct xe_sched_job *job;
291 	struct dma_fence *fence;
292 	long timeout;
293 	u32 offset = 0;
294 	u64 addr = xe_bo_ggtt_addr(pxp->vcs_exec.bo);
295 
296 	offset = pxp_emit_session_termination(pxp->xe, &pxp->vcs_exec.bo->vmap, offset, id);
297 	offset = pxp_emit_wait(pxp->xe, &pxp->vcs_exec.bo->vmap, offset);
298 	emit_cmd(pxp->xe, &pxp->vcs_exec.bo->vmap, offset, MI_BATCH_BUFFER_END);
299 
300 	job = xe_sched_job_create(pxp->vcs_exec.q, &addr);
301 	if (IS_ERR(job))
302 		return PTR_ERR(job);
303 
304 	xe_sched_job_arm(job);
305 	fence = dma_fence_get(&job->drm.s_fence->finished);
306 	xe_sched_job_push(job);
307 
308 	timeout = dma_fence_wait_timeout(fence, false, HZ);
309 
310 	dma_fence_put(fence);
311 
312 	if (!timeout)
313 		return -ETIMEDOUT;
314 	else if (timeout < 0)
315 		return timeout;
316 
317 	return 0;
318 }
319 
320 static bool
is_fw_err_platform_config(u32 type)321 is_fw_err_platform_config(u32 type)
322 {
323 	switch (type) {
324 	case PXP_STATUS_ERROR_API_VERSION:
325 	case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF:
326 	case PXP_STATUS_PLATFCONFIG_KF1_BAD:
327 	case PXP_STATUS_PLATFCONFIG_FIXED_KF1_NOT_SUPPORTED:
328 		return true;
329 	default:
330 		break;
331 	}
332 	return false;
333 }
334 
335 static const char *
fw_err_to_string(u32 type)336 fw_err_to_string(u32 type)
337 {
338 	switch (type) {
339 	case PXP_STATUS_ERROR_API_VERSION:
340 		return "ERR_API_VERSION";
341 	case PXP_STATUS_NOT_READY:
342 		return "ERR_NOT_READY";
343 	case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF:
344 	case PXP_STATUS_PLATFCONFIG_KF1_BAD:
345 	case PXP_STATUS_PLATFCONFIG_FIXED_KF1_NOT_SUPPORTED:
346 		return "ERR_PLATFORM_CONFIG";
347 	default:
348 		break;
349 	}
350 	return NULL;
351 }
352 
pxp_pkt_submit(struct xe_exec_queue * q,u64 batch_addr)353 static int pxp_pkt_submit(struct xe_exec_queue *q, u64 batch_addr)
354 {
355 	struct xe_gt *gt = q->gt;
356 	struct xe_device *xe = gt_to_xe(gt);
357 	struct xe_sched_job *job;
358 	struct dma_fence *fence;
359 	long timeout;
360 
361 	xe_assert(xe, q->hwe->engine_id == XE_HW_ENGINE_GSCCS0);
362 
363 	job = xe_sched_job_create(q, &batch_addr);
364 	if (IS_ERR(job))
365 		return PTR_ERR(job);
366 
367 	xe_sched_job_arm(job);
368 	fence = dma_fence_get(&job->drm.s_fence->finished);
369 	xe_sched_job_push(job);
370 
371 	timeout = dma_fence_wait_timeout(fence, false, HZ);
372 	dma_fence_put(fence);
373 	if (timeout < 0)
374 		return timeout;
375 	else if (!timeout)
376 		return -ETIME;
377 
378 	return 0;
379 }
380 
emit_pxp_heci_cmd(struct xe_device * xe,struct iosys_map * batch,u64 addr_in,u32 size_in,u64 addr_out,u32 size_out)381 static void emit_pxp_heci_cmd(struct xe_device *xe, struct iosys_map *batch,
382 			      u64 addr_in, u32 size_in, u64 addr_out, u32 size_out)
383 {
384 	u32 len = 0;
385 
386 	xe_map_wr(xe, batch, len++ * sizeof(u32), u32, GSC_HECI_CMD_PKT);
387 	xe_map_wr(xe, batch, len++ * sizeof(u32), u32, lower_32_bits(addr_in));
388 	xe_map_wr(xe, batch, len++ * sizeof(u32), u32, upper_32_bits(addr_in));
389 	xe_map_wr(xe, batch, len++ * sizeof(u32), u32, size_in);
390 	xe_map_wr(xe, batch, len++ * sizeof(u32), u32, lower_32_bits(addr_out));
391 	xe_map_wr(xe, batch, len++ * sizeof(u32), u32, upper_32_bits(addr_out));
392 	xe_map_wr(xe, batch, len++ * sizeof(u32), u32, size_out);
393 	xe_map_wr(xe, batch, len++ * sizeof(u32), u32, 0);
394 	xe_map_wr(xe, batch, len++ * sizeof(u32), u32, MI_BATCH_BUFFER_END);
395 }
396 
397 #define GSC_PENDING_RETRY_MAXCOUNT 40
398 #define GSC_PENDING_RETRY_PAUSE_MS 50
gsccs_send_message(struct xe_pxp_gsc_client_resources * gsc_res,void * msg_in,size_t msg_in_size,void * msg_out,size_t msg_out_size_max)399 static int gsccs_send_message(struct xe_pxp_gsc_client_resources *gsc_res,
400 			      void *msg_in, size_t msg_in_size,
401 			      void *msg_out, size_t msg_out_size_max)
402 {
403 	struct xe_device *xe = gsc_res->vm->xe;
404 	const size_t max_msg_size = gsc_res->inout_size - sizeof(struct intel_gsc_mtl_header);
405 	u32 wr_offset;
406 	u32 rd_offset;
407 	u32 reply_size;
408 	u32 min_reply_size = 0;
409 	int ret;
410 	int retry = GSC_PENDING_RETRY_MAXCOUNT;
411 
412 	if (msg_in_size > max_msg_size || msg_out_size_max > max_msg_size)
413 		return -ENOSPC;
414 
415 	wr_offset = xe_gsc_emit_header(xe, &gsc_res->msg_in, 0,
416 				       HECI_MEADDRESS_PXP,
417 				       gsc_res->host_session_handle,
418 				       msg_in_size);
419 
420 	/* NOTE: zero size packets are used for session-cleanups */
421 	if (msg_in && msg_in_size) {
422 		xe_map_memcpy_to(xe, &gsc_res->msg_in, wr_offset,
423 				 msg_in, msg_in_size);
424 		min_reply_size = sizeof(struct pxp_cmd_header);
425 	}
426 
427 	/* Make sure the reply header does not contain stale data */
428 	xe_gsc_poison_header(xe, &gsc_res->msg_out, 0);
429 
430 	/*
431 	 * The BO is mapped at address 0 of the PPGTT, so no need to add its
432 	 * base offset when calculating the in/out addresses.
433 	 */
434 	emit_pxp_heci_cmd(xe, &gsc_res->batch, PXP_BB_SIZE,
435 			  wr_offset + msg_in_size, PXP_BB_SIZE + gsc_res->inout_size,
436 			  wr_offset + msg_out_size_max);
437 
438 	xe_device_wmb(xe);
439 
440 	/*
441 	 * If the GSC needs to communicate with CSME to complete our request,
442 	 * it'll set the "pending" flag in the return header. In this scenario
443 	 * we're expected to wait 50ms to give some time to the proxy code to
444 	 * handle the GSC<->CSME communication and then try again. Note that,
445 	 * although in most case the 50ms window is enough, the proxy flow is
446 	 * not actually guaranteed to complete within that time period, so we
447 	 * might have to try multiple times, up to a worst case of 2 seconds,
448 	 * after which the request is considered aborted.
449 	 */
450 	do {
451 		ret = pxp_pkt_submit(gsc_res->q, 0);
452 		if (ret)
453 			break;
454 
455 		if (xe_gsc_check_and_update_pending(xe, &gsc_res->msg_in, 0,
456 						    &gsc_res->msg_out, 0)) {
457 			ret = -EAGAIN;
458 			msleep(GSC_PENDING_RETRY_PAUSE_MS);
459 		}
460 	} while (--retry && ret == -EAGAIN);
461 
462 	if (ret) {
463 		drm_err(&xe->drm, "failed to submit GSC PXP message (%pe)\n", ERR_PTR(ret));
464 		return ret;
465 	}
466 
467 	ret = xe_gsc_read_out_header(xe, &gsc_res->msg_out, 0,
468 				     min_reply_size, &rd_offset);
469 	if (ret) {
470 		drm_err(&xe->drm, "invalid GSC reply for PXP (%pe)\n", ERR_PTR(ret));
471 		return ret;
472 	}
473 
474 	if (msg_out && min_reply_size) {
475 		reply_size = xe_map_rd_field(xe, &gsc_res->msg_out, rd_offset,
476 					     struct pxp_cmd_header, buffer_len);
477 		reply_size += sizeof(struct pxp_cmd_header);
478 
479 		if (reply_size > msg_out_size_max) {
480 			drm_warn(&xe->drm, "PXP reply size overflow: %u (%zu)\n",
481 				 reply_size, msg_out_size_max);
482 			reply_size = msg_out_size_max;
483 		}
484 
485 		xe_map_memcpy_from(xe, msg_out, &gsc_res->msg_out,
486 				   rd_offset, reply_size);
487 	}
488 
489 	xe_gsc_poison_header(xe, &gsc_res->msg_in, 0);
490 
491 	return ret;
492 }
493 
494 /**
495  * xe_pxp_submit_session_init - submits a PXP GSC session initialization
496  * @gsc_res: the pxp client resources
497  * @id: the session to initialize
498  *
499  * Submit a message to the GSC FW to initialize (i.e. start) a PXP session.
500  *
501  * Returns 0 if the submission is successful, an errno value otherwise.
502  */
xe_pxp_submit_session_init(struct xe_pxp_gsc_client_resources * gsc_res,u32 id)503 int xe_pxp_submit_session_init(struct xe_pxp_gsc_client_resources *gsc_res, u32 id)
504 {
505 	struct xe_device *xe = gsc_res->vm->xe;
506 	struct pxp43_create_arb_in msg_in = {0};
507 	struct pxp43_create_arb_out msg_out = {0};
508 	int ret;
509 
510 	msg_in.header.api_version = PXP_APIVER(4, 3);
511 	msg_in.header.command_id = PXP43_CMDID_INIT_SESSION;
512 	msg_in.header.stream_id = (FIELD_PREP(PXP43_INIT_SESSION_APPID, id) |
513 				   FIELD_PREP(PXP43_INIT_SESSION_VALID, 1) |
514 				   FIELD_PREP(PXP43_INIT_SESSION_APPTYPE, 0));
515 	msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
516 
517 	if (id == DRM_XE_PXP_HWDRM_DEFAULT_SESSION)
518 		msg_in.protection_mode = PXP43_INIT_SESSION_PROTECTION_ARB;
519 
520 	ret = gsccs_send_message(gsc_res, &msg_in, sizeof(msg_in),
521 				 &msg_out, sizeof(msg_out));
522 	if (ret) {
523 		drm_err(&xe->drm, "Failed to init PXP session %u (%pe)\n", id, ERR_PTR(ret));
524 	} else if (msg_out.header.status != 0) {
525 		ret = -EIO;
526 
527 		if (is_fw_err_platform_config(msg_out.header.status))
528 			drm_info_once(&xe->drm,
529 				      "Failed to init PXP session %u due to BIOS/SOC, s=0x%x(%s)\n",
530 				      id, msg_out.header.status,
531 				      fw_err_to_string(msg_out.header.status));
532 		else
533 			drm_dbg(&xe->drm, "Failed to init PXP session %u, s=0x%x\n",
534 				id, msg_out.header.status);
535 	}
536 
537 	return ret;
538 }
539 
540 /**
541  * xe_pxp_submit_session_invalidation - submits a PXP GSC invalidation
542  * @gsc_res: the pxp client resources
543  * @id: the session to invalidate
544  *
545  * Submit a message to the GSC FW to notify it that a session has been
546  * terminated and is therefore invalid.
547  *
548  * Returns 0 if the submission is successful, an errno value otherwise.
549  */
xe_pxp_submit_session_invalidation(struct xe_pxp_gsc_client_resources * gsc_res,u32 id)550 int xe_pxp_submit_session_invalidation(struct xe_pxp_gsc_client_resources *gsc_res, u32 id)
551 {
552 	struct xe_device *xe = gsc_res->vm->xe;
553 	struct pxp43_inv_stream_key_in msg_in = {0};
554 	struct pxp43_inv_stream_key_out msg_out = {0};
555 	int ret = 0;
556 
557 	/*
558 	 * Stream key invalidation reuses the same version 4.2 input/output
559 	 * command format but firmware requires 4.3 API interaction
560 	 */
561 	msg_in.header.api_version = PXP_APIVER(4, 3);
562 	msg_in.header.command_id = PXP43_CMDID_INVALIDATE_STREAM_KEY;
563 	msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
564 
565 	msg_in.header.stream_id = FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_VALID, 1);
566 	msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_APP_TYPE, 0);
567 	msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_ID, id);
568 
569 	ret = gsccs_send_message(gsc_res, &msg_in, sizeof(msg_in),
570 				 &msg_out, sizeof(msg_out));
571 	if (ret) {
572 		drm_err(&xe->drm, "Failed to invalidate PXP stream-key %u (%pe)\n",
573 			id, ERR_PTR(ret));
574 	} else if (msg_out.header.status != 0) {
575 		ret = -EIO;
576 
577 		if (is_fw_err_platform_config(msg_out.header.status))
578 			drm_info_once(&xe->drm,
579 				      "Failed to invalidate PXP stream-key %u: BIOS/SOC 0x%08x(%s)\n",
580 				      id, msg_out.header.status,
581 				      fw_err_to_string(msg_out.header.status));
582 		else
583 			drm_dbg(&xe->drm, "Failed to invalidate stream-key %u, s=0x%08x\n",
584 				id, msg_out.header.status);
585 	}
586 
587 	return ret;
588 }
589