1dd08ebf6SMatthew Brost // SPDX-License-Identifier: MIT
2dd08ebf6SMatthew Brost /*
3dd08ebf6SMatthew Brost * Copyright © 2022 Intel Corporation
4dd08ebf6SMatthew Brost */
5dd08ebf6SMatthew Brost
6ea9f879dSLucas De Marchi #include "xe_guc_ct.h"
7ea9f879dSLucas De Marchi
8dd08ebf6SMatthew Brost #include <linux/bitfield.h>
9dd08ebf6SMatthew Brost #include <linux/circ_buf.h>
10dd08ebf6SMatthew Brost #include <linux/delay.h>
1191b2c42cSFrancois Dugast #include <linux/fault-inject.h>
12dd08ebf6SMatthew Brost
134469eae6SMichal Wajdeczko #include <kunit/static_stub.h>
144469eae6SMichal Wajdeczko
15dd08ebf6SMatthew Brost #include <drm/drm_managed.h>
16dd08ebf6SMatthew Brost
17b67cb798SMichal Wajdeczko #include "abi/guc_actions_abi.h"
1826d4481aSMichal Wajdeczko #include "abi/guc_actions_sriov_abi.h"
19b67cb798SMichal Wajdeczko #include "abi/guc_klvs_abi.h"
20dd08ebf6SMatthew Brost #include "xe_bo.h"
21d7c925b2SJohn Harrison #include "xe_devcoredump.h"
22dd08ebf6SMatthew Brost #include "xe_device.h"
23dd08ebf6SMatthew Brost #include "xe_gt.h"
24dd08ebf6SMatthew Brost #include "xe_gt_pagefault.h"
250eb16fd2SDaniele Ceraolo Spurio #include "xe_gt_printk.h"
26aed2c1d7SMichal Wajdeczko #include "xe_gt_sriov_pf_control.h"
27335d62adSMichal Wajdeczko #include "xe_gt_sriov_pf_monitor.h"
28a9351846SMatthew Brost #include "xe_gt_sriov_printk.h"
29ea9f879dSLucas De Marchi #include "xe_gt_tlb_invalidation.h"
30d2c5a5a9SJohn Harrison #include "xe_guc.h"
3126d4481aSMichal Wajdeczko #include "xe_guc_log.h"
32dd08ebf6SMatthew Brost #include "xe_guc_relay.h"
33dd08ebf6SMatthew Brost #include "xe_guc_submit.h"
34a00b8f1aSMatthew Auld #include "xe_map.h"
356a04e1fcSRadhakrishna Sripada #include "xe_pm.h"
36dd08ebf6SMatthew Brost #include "xe_trace_guc.h"
37d2c5a5a9SJohn Harrison
38d2c5a5a9SJohn Harrison static void receive_g2h(struct xe_guc_ct *ct);
39d2c5a5a9SJohn Harrison static void g2h_worker_func(struct work_struct *w);
40d2c5a5a9SJohn Harrison static void safe_mode_worker_func(struct work_struct *w);
41d2c5a5a9SJohn Harrison static void ct_exit_safe_mode(struct xe_guc_ct *ct);
42d2c5a5a9SJohn Harrison
43d2c5a5a9SJohn Harrison #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
44d2c5a5a9SJohn Harrison enum {
45d2c5a5a9SJohn Harrison /* Internal states, not error conditions */
46d2c5a5a9SJohn Harrison CT_DEAD_STATE_REARM, /* 0x0001 */
47d2c5a5a9SJohn Harrison CT_DEAD_STATE_CAPTURE, /* 0x0002 */
48d2c5a5a9SJohn Harrison
49d2c5a5a9SJohn Harrison /* Error conditions */
50d2c5a5a9SJohn Harrison CT_DEAD_SETUP, /* 0x0004 */
51d2c5a5a9SJohn Harrison CT_DEAD_H2G_WRITE, /* 0x0008 */
52d2c5a5a9SJohn Harrison CT_DEAD_H2G_HAS_ROOM, /* 0x0010 */
53d2c5a5a9SJohn Harrison CT_DEAD_G2H_READ, /* 0x0020 */
54d2c5a5a9SJohn Harrison CT_DEAD_G2H_RECV, /* 0x0040 */
55d2c5a5a9SJohn Harrison CT_DEAD_G2H_RELEASE, /* 0x0080 */
56d2c5a5a9SJohn Harrison CT_DEAD_DEADLOCK, /* 0x0100 */
577d4d1c54SJohn Harrison CT_DEAD_PROCESS_FAILED, /* 0x0200 */
58d2c5a5a9SJohn Harrison CT_DEAD_FAST_G2H, /* 0x0400 */
59d2c5a5a9SJohn Harrison CT_DEAD_PARSE_G2H_RESPONSE, /* 0x0800 */
60d2c5a5a9SJohn Harrison CT_DEAD_PARSE_G2H_UNKNOWN, /* 0x1000 */
61d2c5a5a9SJohn Harrison CT_DEAD_PARSE_G2H_ORIGIN, /* 0x2000 */
62d2c5a5a9SJohn Harrison CT_DEAD_PARSE_G2H_TYPE, /* 0x4000 */
63d2c5a5a9SJohn Harrison CT_DEAD_CRASH, /* 0x8000 */
64d2c5a5a9SJohn Harrison };
65d2c5a5a9SJohn Harrison
66d2c5a5a9SJohn Harrison static void ct_dead_worker_func(struct work_struct *w);
67d2c5a5a9SJohn Harrison static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code);
68d2c5a5a9SJohn Harrison
69d2c5a5a9SJohn Harrison #define CT_DEAD(ct, ctb, reason_code) ct_dead_capture((ct), (ctb), CT_DEAD_##reason_code)
70d2c5a5a9SJohn Harrison #else
71d2c5a5a9SJohn Harrison #define CT_DEAD(ct, ctb, reason) \
72d2c5a5a9SJohn Harrison do { \
73dd08ebf6SMatthew Brost struct guc_ctb *_ctb = (ctb); \
74dd08ebf6SMatthew Brost if (_ctb) \
75dd08ebf6SMatthew Brost _ctb->info.broken = true; \
76dd08ebf6SMatthew Brost } while (0)
77a54e016aSMichal Wajdeczko #endif
78dd08ebf6SMatthew Brost
79dd08ebf6SMatthew Brost /* Used when a CT send wants to block and / or receive data */
80dd08ebf6SMatthew Brost struct g2h_fence {
81dd08ebf6SMatthew Brost u32 *response_buffer;
82dd08ebf6SMatthew Brost u32 seqno;
83dd08ebf6SMatthew Brost u32 response_data;
84dd08ebf6SMatthew Brost u16 response_len;
85dd08ebf6SMatthew Brost u16 error;
86dd08ebf6SMatthew Brost u16 hint;
87dd08ebf6SMatthew Brost u16 reason;
88dd08ebf6SMatthew Brost bool cancel;
89dd08ebf6SMatthew Brost bool retry;
90a54e016aSMichal Wajdeczko bool fail;
91dd08ebf6SMatthew Brost bool done;
92dd08ebf6SMatthew Brost };
93dd08ebf6SMatthew Brost
94dd08ebf6SMatthew Brost #define make_u64(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
95dd08ebf6SMatthew Brost
g2h_fence_init(struct g2h_fence * g2h_fence,u32 * response_buffer)96dd08ebf6SMatthew Brost static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
97dd08ebf6SMatthew Brost {
98dd08ebf6SMatthew Brost memset(g2h_fence, 0, sizeof(*g2h_fence));
99dd08ebf6SMatthew Brost g2h_fence->response_buffer = response_buffer;
100dd08ebf6SMatthew Brost g2h_fence->seqno = ~0x0;
101dd08ebf6SMatthew Brost }
102dd08ebf6SMatthew Brost
g2h_fence_cancel(struct g2h_fence * g2h_fence)103dd08ebf6SMatthew Brost static void g2h_fence_cancel(struct g2h_fence *g2h_fence)
104dd08ebf6SMatthew Brost {
105dd08ebf6SMatthew Brost g2h_fence->cancel = true;
106dd08ebf6SMatthew Brost g2h_fence->fail = true;
107dd08ebf6SMatthew Brost g2h_fence->done = true;
108dd08ebf6SMatthew Brost }
109dd08ebf6SMatthew Brost
g2h_fence_needs_alloc(struct g2h_fence * g2h_fence)110dd08ebf6SMatthew Brost static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
111dd08ebf6SMatthew Brost {
112dd08ebf6SMatthew Brost return g2h_fence->seqno == ~0x0;
113dd08ebf6SMatthew Brost }
114dd08ebf6SMatthew Brost
115dd08ebf6SMatthew Brost static struct xe_guc *
ct_to_guc(struct xe_guc_ct * ct)116dd08ebf6SMatthew Brost ct_to_guc(struct xe_guc_ct *ct)
117dd08ebf6SMatthew Brost {
118dd08ebf6SMatthew Brost return container_of(ct, struct xe_guc, ct);
119dd08ebf6SMatthew Brost }
120dd08ebf6SMatthew Brost
121dd08ebf6SMatthew Brost static struct xe_gt *
ct_to_gt(struct xe_guc_ct * ct)122dd08ebf6SMatthew Brost ct_to_gt(struct xe_guc_ct *ct)
123dd08ebf6SMatthew Brost {
124dd08ebf6SMatthew Brost return container_of(ct, struct xe_gt, uc.guc.ct);
125dd08ebf6SMatthew Brost }
126dd08ebf6SMatthew Brost
127dd08ebf6SMatthew Brost static struct xe_device *
ct_to_xe(struct xe_guc_ct * ct)128dd08ebf6SMatthew Brost ct_to_xe(struct xe_guc_ct *ct)
129dd08ebf6SMatthew Brost {
130dd08ebf6SMatthew Brost return gt_to_xe(ct_to_gt(ct));
131dd08ebf6SMatthew Brost }
132dd08ebf6SMatthew Brost
133dd08ebf6SMatthew Brost /**
134dd08ebf6SMatthew Brost * DOC: GuC CTB Blob
135dd08ebf6SMatthew Brost *
136dd08ebf6SMatthew Brost * We allocate single blob to hold both CTB descriptors and buffers:
137dd08ebf6SMatthew Brost *
138dd08ebf6SMatthew Brost * +--------+-----------------------------------------------+------+
139dd08ebf6SMatthew Brost * | offset | contents | size |
140dd08ebf6SMatthew Brost * +========+===============================================+======+
141dd08ebf6SMatthew Brost * | 0x0000 | H2G CTB Descriptor (send) | |
142dd08ebf6SMatthew Brost * +--------+-----------------------------------------------+ 4K |
143dd08ebf6SMatthew Brost * | 0x0800 | G2H CTB Descriptor (g2h) | |
144dd08ebf6SMatthew Brost * +--------+-----------------------------------------------+------+
145dd08ebf6SMatthew Brost * | 0x1000 | H2G CT Buffer (send) | n*4K |
146dd08ebf6SMatthew Brost * | | | |
147df2dbc92SStuart Summers * +--------+-----------------------------------------------+------+
148df2dbc92SStuart Summers * | 0x1000 | G2H CT Buffer (g2h) | m*4K |
149df2dbc92SStuart Summers * | + n*4K | | |
150df2dbc92SStuart Summers * +--------+-----------------------------------------------+------+
151df2dbc92SStuart Summers *
152df2dbc92SStuart Summers * Size of each ``CT Buffer`` must be multiple of 4K.
153df2dbc92SStuart Summers * We don't expect too many messages in flight at any time, unless we are
154df2dbc92SStuart Summers * using the GuC submission. In that case each request requires a minimum
155dd08ebf6SMatthew Brost * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
156dd08ebf6SMatthew Brost * enough space to avoid backpressure on the driver. We increase the size
157dd08ebf6SMatthew Brost * of the receive buffer (relative to the send) to ensure a G2H response
158dd08ebf6SMatthew Brost * CTB has a landing spot.
159df2dbc92SStuart Summers *
160df2dbc92SStuart Summers * In addition to submissions, the G2H buffer needs to be able to hold
161dd08ebf6SMatthew Brost * enough space for recoverable page fault notifications. The number of
162eb523ec3SNirmoy Das * page faults is interrupt driven and can be as much as the number of
163eb523ec3SNirmoy Das * compute resources available. However, most of the actual work for these
164eb523ec3SNirmoy Das * is in a separate page fault worker thread. Therefore we only need to
165eb523ec3SNirmoy Das * make sure the queue has enough space to handle all of the submissions
166eb523ec3SNirmoy Das * and responses and an extra buffer for incoming page faults.
167eb523ec3SNirmoy Das */
168eb523ec3SNirmoy Das
169eb523ec3SNirmoy Das #define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
170eb523ec3SNirmoy Das #define CTB_H2G_BUFFER_SIZE (SZ_4K)
171eb523ec3SNirmoy Das #define CTB_G2H_BUFFER_SIZE (SZ_128K)
172eb523ec3SNirmoy Das #define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 2)
173eb523ec3SNirmoy Das
174eb523ec3SNirmoy Das /**
175eb523ec3SNirmoy Das * xe_guc_ct_queue_proc_time_jiffies - Return maximum time to process a full
176eb523ec3SNirmoy Das * CT command queue
177eb523ec3SNirmoy Das * @ct: the &xe_guc_ct. Unused at this moment but will be used in the future.
178eb523ec3SNirmoy Das *
179dd08ebf6SMatthew Brost * Observation is that a 4KiB buffer full of commands takes a little over a
180dd08ebf6SMatthew Brost * second to process. Use that to calculate maximum time to process a full CT
181dd08ebf6SMatthew Brost * command queue.
182dd08ebf6SMatthew Brost *
183dd08ebf6SMatthew Brost * Return: Maximum time to process a full CT queue in jiffies.
184dd08ebf6SMatthew Brost */
xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct * ct)185dd08ebf6SMatthew Brost long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct)
186dd08ebf6SMatthew Brost {
187dd08ebf6SMatthew Brost BUILD_BUG_ON(!IS_ALIGNED(CTB_H2G_BUFFER_SIZE, SZ_4));
188dd08ebf6SMatthew Brost return (CTB_H2G_BUFFER_SIZE / SZ_4K) * HZ;
18950aec966SMatthew Brost }
190dd08ebf6SMatthew Brost
guc_ct_size(void)191dd08ebf6SMatthew Brost static size_t guc_ct_size(void)
192dd08ebf6SMatthew Brost {
19309b28695SMichal Wajdeczko return 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE +
194dd08ebf6SMatthew Brost CTB_G2H_BUFFER_SIZE;
19509b28695SMichal Wajdeczko }
196dd08ebf6SMatthew Brost
guc_ct_fini(struct drm_device * drm,void * arg)197dd08ebf6SMatthew Brost static void guc_ct_fini(struct drm_device *drm, void *arg)
198dd08ebf6SMatthew Brost {
199dd08ebf6SMatthew Brost struct xe_guc_ct *ct = arg;
200dd08ebf6SMatthew Brost
201dd08ebf6SMatthew Brost ct_exit_safe_mode(ct);
202dd08ebf6SMatthew Brost destroy_workqueue(ct->g2h_wq);
203dd08ebf6SMatthew Brost xa_destroy(&ct->fence_lookup);
204dd08ebf6SMatthew Brost }
205dd08ebf6SMatthew Brost
primelockdep(struct xe_guc_ct * ct)206dd08ebf6SMatthew Brost static void primelockdep(struct xe_guc_ct *ct)
207dd08ebf6SMatthew Brost {
208dd08ebf6SMatthew Brost if (!IS_ENABLED(CONFIG_LOCKDEP))
209dd08ebf6SMatthew Brost return;
210dd08ebf6SMatthew Brost
211876611c2SMatt Roper fs_reclaim_acquire(GFP_KERNEL);
212dd08ebf6SMatthew Brost might_lock(&ct->lock);
213dd08ebf6SMatthew Brost fs_reclaim_release(GFP_KERNEL);
214dd08ebf6SMatthew Brost }
2159c1857d5SMichal Wajdeczko
xe_guc_ct_init_noalloc(struct xe_guc_ct * ct)216dd08ebf6SMatthew Brost int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct)
217179e0179SMatthew Brost {
21850aec966SMatthew Brost struct xe_device *xe = ct_to_xe(ct);
21950aec966SMatthew Brost struct xe_gt *gt = ct_to_gt(ct);
22050aec966SMatthew Brost int err;
221dd08ebf6SMatthew Brost
222dd08ebf6SMatthew Brost xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE));
223dd08ebf6SMatthew Brost
22409b28695SMichal Wajdeczko ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", WQ_MEM_RECLAIM);
225d2c5a5a9SJohn Harrison if (!ct->g2h_wq)
226d2c5a5a9SJohn Harrison return -ENOMEM;
227d2c5a5a9SJohn Harrison
228d2c5a5a9SJohn Harrison spin_lock_init(&ct->fast_lock);
229dd08ebf6SMatthew Brost xa_init(&ct->fence_lookup);
2302988cf02SNiranjana Vishwanathapura INIT_WORK(&ct->g2h_worker, g2h_worker_func);
231dd08ebf6SMatthew Brost INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func);
232b4abeb55SDaniele Ceraolo Spurio #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
233b4abeb55SDaniele Ceraolo Spurio spin_lock_init(&ct->dead.lock);
234b4abeb55SDaniele Ceraolo Spurio INIT_WORK(&ct->dead.worker, ct_dead_worker_func);
235b4abeb55SDaniele Ceraolo Spurio #endif
236dd08ebf6SMatthew Brost init_waitqueue_head(&ct->wq);
237dd08ebf6SMatthew Brost init_waitqueue_head(&ct->g2h_fence_wq);
2380e1a47fcSMichał Winiarski
23962742d12SLucas De Marchi err = drmm_mutex_init(&xe->drm, &ct->lock);
24062742d12SLucas De Marchi if (err)
241045448daSMatthew Brost return err;
242045448daSMatthew Brost
243dd08ebf6SMatthew Brost primelockdep(ct);
244dd08ebf6SMatthew Brost
245dd08ebf6SMatthew Brost err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
246dd08ebf6SMatthew Brost if (err)
247dd08ebf6SMatthew Brost return err;
248dd08ebf6SMatthew Brost
249dd08ebf6SMatthew Brost xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
250dd08ebf6SMatthew Brost ct->state = XE_GUC_CT_STATE_DISABLED;
251dd08ebf6SMatthew Brost return 0;
2529c1857d5SMichal Wajdeczko }
253dc75d037SMatthew Brost ALLOW_ERROR_INJECTION(xe_guc_ct_init_noalloc, ERRNO); /* See xe_pci_probe() */
254dd08ebf6SMatthew Brost
xe_guc_ct_init(struct xe_guc_ct * ct)255dd08ebf6SMatthew Brost int xe_guc_ct_init(struct xe_guc_ct *ct)
25691b2c42cSFrancois Dugast {
257dd08ebf6SMatthew Brost struct xe_device *xe = ct_to_xe(ct);
258dd08ebf6SMatthew Brost struct xe_gt *gt = ct_to_gt(ct);
259dd08ebf6SMatthew Brost struct xe_tile *tile = gt_to_tile(gt);
260dd08ebf6SMatthew Brost struct xe_bo *bo;
261dd08ebf6SMatthew Brost
262dd08ebf6SMatthew Brost bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
263dd08ebf6SMatthew Brost XE_BO_FLAG_SYSTEM |
264dd08ebf6SMatthew Brost XE_BO_FLAG_GGTT |
265dd08ebf6SMatthew Brost XE_BO_FLAG_GGTT_INVALIDATE |
266dd08ebf6SMatthew Brost XE_BO_FLAG_PINNED_NORESTORE);
267dd08ebf6SMatthew Brost if (IS_ERR(bo))
268dd08ebf6SMatthew Brost return PTR_ERR(bo);
269a7ca8157SRodrigo Vivi
270a7ca8157SRodrigo Vivi ct->bo = bo;
271a7ca8157SRodrigo Vivi return 0;
272a7ca8157SRodrigo Vivi }
273a7ca8157SRodrigo Vivi ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */
274a7ca8157SRodrigo Vivi
275a7ca8157SRodrigo Vivi #define desc_read(xe_, guc_ctb__, field_) \
276a7ca8157SRodrigo Vivi xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \
277dd08ebf6SMatthew Brost struct guc_ct_buffer_desc, field_)
278dd08ebf6SMatthew Brost
279dd08ebf6SMatthew Brost #define desc_write(xe_, guc_ctb__, field_, val_) \
280dd08ebf6SMatthew Brost xe_map_wr_field(xe_, &guc_ctb__->desc, 0, \
281dd08ebf6SMatthew Brost struct guc_ct_buffer_desc, field_, val_)
282dd08ebf6SMatthew Brost
guc_ct_ctb_h2g_init(struct xe_device * xe,struct guc_ctb * h2g,struct iosys_map * map)283dd08ebf6SMatthew Brost static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g,
284dd08ebf6SMatthew Brost struct iosys_map *map)
285dd08ebf6SMatthew Brost {
286dd08ebf6SMatthew Brost h2g->info.size = CTB_H2G_BUFFER_SIZE / sizeof(u32);
287a7ca8157SRodrigo Vivi h2g->info.resv_space = 0;
288a7ca8157SRodrigo Vivi h2g->info.tail = 0;
289a7ca8157SRodrigo Vivi h2g->info.head = 0;
290a7ca8157SRodrigo Vivi h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
291a7ca8157SRodrigo Vivi h2g->info.size) -
292a7ca8157SRodrigo Vivi h2g->info.resv_space;
293a7ca8157SRodrigo Vivi h2g->info.broken = false;
294a7ca8157SRodrigo Vivi
295dd08ebf6SMatthew Brost h2g->desc = *map;
296dd08ebf6SMatthew Brost xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
297dd08ebf6SMatthew Brost
298dd08ebf6SMatthew Brost h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2);
299dd08ebf6SMatthew Brost }
300dd08ebf6SMatthew Brost
guc_ct_ctb_g2h_init(struct xe_device * xe,struct guc_ctb * g2h,struct iosys_map * map)301dd08ebf6SMatthew Brost static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
302dd08ebf6SMatthew Brost struct iosys_map *map)
303dd08ebf6SMatthew Brost {
304dd08ebf6SMatthew Brost g2h->info.size = CTB_G2H_BUFFER_SIZE / sizeof(u32);
305dd08ebf6SMatthew Brost g2h->info.resv_space = G2H_ROOM_BUFFER_SIZE / sizeof(u32);
306dd08ebf6SMatthew Brost g2h->info.head = 0;
307dd08ebf6SMatthew Brost g2h->info.tail = 0;
308dd08ebf6SMatthew Brost g2h->info.space = CIRC_SPACE(g2h->info.tail, g2h->info.head,
309dd08ebf6SMatthew Brost g2h->info.size) -
310dd08ebf6SMatthew Brost g2h->info.resv_space;
311a7ca8157SRodrigo Vivi g2h->info.broken = false;
312dd08ebf6SMatthew Brost
313dd08ebf6SMatthew Brost g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE);
314dd08ebf6SMatthew Brost xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
315dd08ebf6SMatthew Brost
316dd08ebf6SMatthew Brost g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2 +
317dd08ebf6SMatthew Brost CTB_H2G_BUFFER_SIZE);
318dd08ebf6SMatthew Brost }
319dd08ebf6SMatthew Brost
guc_ct_ctb_h2g_register(struct xe_guc_ct * ct)320dd08ebf6SMatthew Brost static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
321dd08ebf6SMatthew Brost {
322dd08ebf6SMatthew Brost struct xe_guc *guc = ct_to_guc(ct);
323dd08ebf6SMatthew Brost u32 desc_addr, ctb_addr, size;
324dd08ebf6SMatthew Brost int err;
325dd08ebf6SMatthew Brost
326dd08ebf6SMatthew Brost desc_addr = xe_bo_ggtt_addr(ct->bo);
327dd08ebf6SMatthew Brost ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2;
328dd08ebf6SMatthew Brost size = ct->ctbs.h2g.info.size * sizeof(u32);
329dd08ebf6SMatthew Brost
330dd08ebf6SMatthew Brost err = xe_guc_self_cfg64(guc,
331dd08ebf6SMatthew Brost GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY,
332dd08ebf6SMatthew Brost desc_addr);
333dd08ebf6SMatthew Brost if (err)
334dd08ebf6SMatthew Brost return err;
335dd08ebf6SMatthew Brost
336dd08ebf6SMatthew Brost err = xe_guc_self_cfg64(guc,
337dd08ebf6SMatthew Brost GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY,
338dd08ebf6SMatthew Brost ctb_addr);
339a7ca8157SRodrigo Vivi if (err)
340dd08ebf6SMatthew Brost return err;
341dd08ebf6SMatthew Brost
342dd08ebf6SMatthew Brost return xe_guc_self_cfg32(guc,
343dd08ebf6SMatthew Brost GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY,
344dd08ebf6SMatthew Brost size);
345dd08ebf6SMatthew Brost }
346dd08ebf6SMatthew Brost
guc_ct_ctb_g2h_register(struct xe_guc_ct * ct)347dd08ebf6SMatthew Brost static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct)
348dd08ebf6SMatthew Brost {
349dd08ebf6SMatthew Brost struct xe_guc *guc = ct_to_guc(ct);
350dd08ebf6SMatthew Brost u32 desc_addr, ctb_addr, size;
351dd08ebf6SMatthew Brost int err;
352dd08ebf6SMatthew Brost
353dd08ebf6SMatthew Brost desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE;
354dd08ebf6SMatthew Brost ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2 +
355dd08ebf6SMatthew Brost CTB_H2G_BUFFER_SIZE;
356dd08ebf6SMatthew Brost size = ct->ctbs.g2h.info.size * sizeof(u32);
357dd08ebf6SMatthew Brost
358dd08ebf6SMatthew Brost err = xe_guc_self_cfg64(guc,
359dd08ebf6SMatthew Brost GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY,
360dd08ebf6SMatthew Brost desc_addr);
361dd08ebf6SMatthew Brost if (err)
362dd08ebf6SMatthew Brost return err;
363dd08ebf6SMatthew Brost
364dd08ebf6SMatthew Brost err = xe_guc_self_cfg64(guc,
365dd08ebf6SMatthew Brost GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY,
366dd08ebf6SMatthew Brost ctb_addr);
367dd08ebf6SMatthew Brost if (err)
368dd08ebf6SMatthew Brost return err;
369f900725aSMatthew Brost
370dd08ebf6SMatthew Brost return xe_guc_self_cfg32(guc,
371dd08ebf6SMatthew Brost GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY,
372dd08ebf6SMatthew Brost size);
373dd08ebf6SMatthew Brost }
374dc75d037SMatthew Brost
guc_ct_control_toggle(struct xe_guc_ct * ct,bool enable)375dc75d037SMatthew Brost static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable)
376dc75d037SMatthew Brost {
377dc75d037SMatthew Brost u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = {
378dc75d037SMatthew Brost FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
379dc75d037SMatthew Brost FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
380dc75d037SMatthew Brost FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
381dc75d037SMatthew Brost GUC_ACTION_HOST2GUC_CONTROL_CTB),
382dc75d037SMatthew Brost FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL,
383d930c19fSMatthew Brost enable ? GUC_CTB_CONTROL_ENABLE :
384d930c19fSMatthew Brost GUC_CTB_CONTROL_DISABLE),
385dc75d037SMatthew Brost };
386dc75d037SMatthew Brost int ret = xe_guc_mmio_send(ct_to_guc(ct), request, ARRAY_SIZE(request));
387dc75d037SMatthew Brost
388dc75d037SMatthew Brost return ret > 0 ? -EPROTO : ret;
389dc75d037SMatthew Brost }
390dc75d037SMatthew Brost
guc_ct_change_state(struct xe_guc_ct * ct,enum xe_guc_ct_state state)391dc75d037SMatthew Brost static void guc_ct_change_state(struct xe_guc_ct *ct,
392dc75d037SMatthew Brost enum xe_guc_ct_state state)
393dc75d037SMatthew Brost {
394dc75d037SMatthew Brost struct xe_gt *gt = ct_to_gt(ct);
395dc75d037SMatthew Brost struct g2h_fence *g2h_fence;
396dc75d037SMatthew Brost unsigned long idx;
397dc75d037SMatthew Brost
398dc75d037SMatthew Brost mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */
39909b28695SMichal Wajdeczko spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */
40009b28695SMichal Wajdeczko
40109b28695SMichal Wajdeczko xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 ||
40209b28695SMichal Wajdeczko state == XE_GUC_CT_STATE_STOPPED);
40309b28695SMichal Wajdeczko
40409b28695SMichal Wajdeczko if (ct->g2h_outstanding)
40509b28695SMichal Wajdeczko xe_pm_runtime_put(ct_to_xe(ct));
40609b28695SMichal Wajdeczko ct->g2h_outstanding = 0;
40709b28695SMichal Wajdeczko ct->state = state;
40809b28695SMichal Wajdeczko
40909b28695SMichal Wajdeczko xe_gt_dbg(gt, "GuC CT communication channel %s\n",
41009b28695SMichal Wajdeczko state == XE_GUC_CT_STATE_STOPPED ? "stopped" :
41109b28695SMichal Wajdeczko str_enabled_disabled(state == XE_GUC_CT_STATE_ENABLED));
41209b28695SMichal Wajdeczko
41309b28695SMichal Wajdeczko spin_unlock_irq(&ct->fast_lock);
41409b28695SMichal Wajdeczko
41509b28695SMichal Wajdeczko /* cancel all in-flight send-recv requests */
41609b28695SMichal Wajdeczko xa_for_each(&ct->fence_lookup, idx, g2h_fence)
41709b28695SMichal Wajdeczko g2h_fence_cancel(g2h_fence);
41809b28695SMichal Wajdeczko
41909b28695SMichal Wajdeczko /* make sure guc_ct_send_recv() will see g2h_fence changes */
42009b28695SMichal Wajdeczko smp_mb();
42109b28695SMichal Wajdeczko wake_up_all(&ct->g2h_fence_wq);
42209b28695SMichal Wajdeczko
42309b28695SMichal Wajdeczko /*
42409b28695SMichal Wajdeczko * Lockdep doesn't like this under the fast lock and he destroy only
42509b28695SMichal Wajdeczko * needs to be serialized with the send path which ct lock provides.
42609b28695SMichal Wajdeczko */
42709b28695SMichal Wajdeczko xa_destroy(&ct->fence_lookup);
42809b28695SMichal Wajdeczko
42909b28695SMichal Wajdeczko mutex_unlock(&ct->lock);
43009b28695SMichal Wajdeczko }
43109b28695SMichal Wajdeczko
ct_needs_safe_mode(struct xe_guc_ct * ct)43209b28695SMichal Wajdeczko static bool ct_needs_safe_mode(struct xe_guc_ct *ct)
43309b28695SMichal Wajdeczko {
43409b28695SMichal Wajdeczko return !pci_dev_msi_enabled(to_pci_dev(ct_to_xe(ct)->drm.dev));
435dd08ebf6SMatthew Brost }
436dd08ebf6SMatthew Brost
ct_restart_safe_mode_worker(struct xe_guc_ct * ct)437dd08ebf6SMatthew Brost static bool ct_restart_safe_mode_worker(struct xe_guc_ct *ct)
43813c52251SMichal Wajdeczko {
439dd08ebf6SMatthew Brost if (!ct_needs_safe_mode(ct))
440dd08ebf6SMatthew Brost return false;
4419c1857d5SMichal Wajdeczko
442dd08ebf6SMatthew Brost queue_delayed_work(ct->g2h_wq, &ct->safe_mode_worker, HZ / 10);
443d7c925b2SJohn Harrison return true;
444dd08ebf6SMatthew Brost }
445dd08ebf6SMatthew Brost
safe_mode_worker_func(struct work_struct * w)446dd08ebf6SMatthew Brost static void safe_mode_worker_func(struct work_struct *w)
447dd08ebf6SMatthew Brost {
448dd08ebf6SMatthew Brost struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, safe_mode_worker.work);
449dd08ebf6SMatthew Brost
450dd08ebf6SMatthew Brost receive_g2h(ct);
451dd08ebf6SMatthew Brost
452dd08ebf6SMatthew Brost if (!ct_restart_safe_mode_worker(ct))
453dd08ebf6SMatthew Brost xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode canceled\n");
454dd08ebf6SMatthew Brost }
455dd08ebf6SMatthew Brost
ct_enter_safe_mode(struct xe_guc_ct * ct)456dd08ebf6SMatthew Brost static void ct_enter_safe_mode(struct xe_guc_ct *ct)
457dd08ebf6SMatthew Brost {
458dd08ebf6SMatthew Brost if (ct_restart_safe_mode_worker(ct))
459dc75d037SMatthew Brost xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode enabled\n");
460dd08ebf6SMatthew Brost }
461dd08ebf6SMatthew Brost
ct_exit_safe_mode(struct xe_guc_ct * ct)462dd08ebf6SMatthew Brost static void ct_exit_safe_mode(struct xe_guc_ct *ct)
46313c52251SMichal Wajdeczko {
464dd08ebf6SMatthew Brost if (cancel_delayed_work_sync(&ct->safe_mode_worker))
46509b28695SMichal Wajdeczko xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode disabled\n");
46609b28695SMichal Wajdeczko }
46709b28695SMichal Wajdeczko
xe_guc_ct_enable(struct xe_guc_ct * ct)468d2c5a5a9SJohn Harrison int xe_guc_ct_enable(struct xe_guc_ct *ct)
469d2c5a5a9SJohn Harrison {
470d2c5a5a9SJohn Harrison struct xe_device *xe = ct_to_xe(ct);
471d2c5a5a9SJohn Harrison struct xe_gt *gt = ct_to_gt(ct);
472d2c5a5a9SJohn Harrison int err;
473d2c5a5a9SJohn Harrison
47436bcc52bSJohn Harrison xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
475d2c5a5a9SJohn Harrison
47636bcc52bSJohn Harrison xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo));
47736bcc52bSJohn Harrison guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
478d2c5a5a9SJohn Harrison guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
479d2c5a5a9SJohn Harrison
480d2c5a5a9SJohn Harrison err = guc_ct_ctb_h2g_register(ct);
481dd08ebf6SMatthew Brost if (err)
482dd08ebf6SMatthew Brost goto err_out;
483dd08ebf6SMatthew Brost
48413c52251SMichal Wajdeczko err = guc_ct_ctb_g2h_register(ct);
485d2c5a5a9SJohn Harrison if (err)
486dd08ebf6SMatthew Brost goto err_out;
487dd08ebf6SMatthew Brost
488dd08ebf6SMatthew Brost err = guc_ct_control_toggle(ct, true);
489dd08ebf6SMatthew Brost if (err)
490d688b86aSMatthew Brost goto err_out;
491d688b86aSMatthew Brost
492d688b86aSMatthew Brost guc_ct_change_state(ct, XE_GUC_CT_STATE_ENABLED);
493d688b86aSMatthew Brost
494d688b86aSMatthew Brost smp_mb();
495dc75d037SMatthew Brost wake_up_all(&ct->wq);
496dc75d037SMatthew Brost
497dc75d037SMatthew Brost if (ct_needs_safe_mode(ct))
498dc75d037SMatthew Brost ct_enter_safe_mode(ct);
499d688b86aSMatthew Brost
500d688b86aSMatthew Brost #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
501dc75d037SMatthew Brost /*
502dd08ebf6SMatthew Brost * The CT has now been reset so the dumper can be re-armed
503dd08ebf6SMatthew Brost * after any existing dead state has been dumped.
504dc75d037SMatthew Brost */
50509b28695SMichal Wajdeczko spin_lock_irq(&ct->dead.lock);
506d688b86aSMatthew Brost if (ct->dead.reason) {
507dc75d037SMatthew Brost ct->dead.reason |= (1 << CT_DEAD_STATE_REARM);
508dd08ebf6SMatthew Brost queue_work(system_unbound_wq, &ct->dead.worker);
509dc75d037SMatthew Brost }
510dc75d037SMatthew Brost spin_unlock_irq(&ct->dead.lock);
511dc75d037SMatthew Brost #endif
512dc75d037SMatthew Brost
513d688b86aSMatthew Brost return 0;
514dc75d037SMatthew Brost
515dc75d037SMatthew Brost err_out:
516dc75d037SMatthew Brost xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err));
517dc75d037SMatthew Brost CT_DEAD(ct, NULL, SETUP);
518d688b86aSMatthew Brost
519dd08ebf6SMatthew Brost return err;
520dd08ebf6SMatthew Brost }
521dd08ebf6SMatthew Brost
stop_g2h_handler(struct xe_guc_ct * ct)522dd08ebf6SMatthew Brost static void stop_g2h_handler(struct xe_guc_ct *ct)
523dd08ebf6SMatthew Brost {
524dd08ebf6SMatthew Brost cancel_work_sync(&ct->g2h_worker);
525dd08ebf6SMatthew Brost }
526dd08ebf6SMatthew Brost
527a7ca8157SRodrigo Vivi /**
528a7ca8157SRodrigo Vivi * xe_guc_ct_disable - Set GuC to disabled state
529d2c5a5a9SJohn Harrison * @ct: the &xe_guc_ct
530d2c5a5a9SJohn Harrison *
531d2c5a5a9SJohn Harrison * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected
532d2c5a5a9SJohn Harrison * in this transition.
533d2c5a5a9SJohn Harrison */
xe_guc_ct_disable(struct xe_guc_ct * ct)534d2c5a5a9SJohn Harrison void xe_guc_ct_disable(struct xe_guc_ct *ct)
535d2c5a5a9SJohn Harrison {
536d2c5a5a9SJohn Harrison guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED);
537d2c5a5a9SJohn Harrison ct_exit_safe_mode(ct);
538d2c5a5a9SJohn Harrison stop_g2h_handler(ct);
539d2c5a5a9SJohn Harrison }
540d2c5a5a9SJohn Harrison
541d2c5a5a9SJohn Harrison /**
542a7ca8157SRodrigo Vivi * xe_guc_ct_stop - Set GuC to stopped state
543a7ca8157SRodrigo Vivi * @ct: the &xe_guc_ct
544a7ca8157SRodrigo Vivi *
545a7ca8157SRodrigo Vivi * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h
546dd08ebf6SMatthew Brost */
xe_guc_ct_stop(struct xe_guc_ct * ct)547dd08ebf6SMatthew Brost void xe_guc_ct_stop(struct xe_guc_ct *ct)
548dd08ebf6SMatthew Brost {
549dd08ebf6SMatthew Brost if (!xe_guc_ct_initialized(ct))
550dd08ebf6SMatthew Brost return;
551dd08ebf6SMatthew Brost
552dd08ebf6SMatthew Brost guc_ct_change_state(ct, XE_GUC_CT_STATE_STOPPED);
553dd08ebf6SMatthew Brost stop_g2h_handler(ct);
554dad33831SMatthew Auld }
555dad33831SMatthew Auld
h2g_has_room(struct xe_guc_ct * ct,u32 cmd_len)556dad33831SMatthew Auld static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
557dad33831SMatthew Auld {
558dd08ebf6SMatthew Brost struct guc_ctb *h2g = &ct->ctbs.h2g;
559a7ca8157SRodrigo Vivi
560dd08ebf6SMatthew Brost lockdep_assert_held(&ct->lock);
561dd08ebf6SMatthew Brost
562dd08ebf6SMatthew Brost if (cmd_len > h2g->info.space) {
563dd08ebf6SMatthew Brost h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
564dd08ebf6SMatthew Brost
565dd08ebf6SMatthew Brost if (h2g->info.head > h2g->info.size) {
566dd08ebf6SMatthew Brost struct xe_device *xe = ct_to_xe(ct);
567dd08ebf6SMatthew Brost u32 desc_status = desc_read(xe, h2g, status);
568dd08ebf6SMatthew Brost
569dd08ebf6SMatthew Brost desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
570dd08ebf6SMatthew Brost
571dd08ebf6SMatthew Brost xe_gt_err(ct_to_gt(ct), "CT: invalid head offset %u >= %u)\n",
572dd08ebf6SMatthew Brost h2g->info.head, h2g->info.size);
573dd08ebf6SMatthew Brost CT_DEAD(ct, h2g, H2G_HAS_ROOM);
574dd08ebf6SMatthew Brost return false;
575a7ca8157SRodrigo Vivi }
576dd08ebf6SMatthew Brost
577dd08ebf6SMatthew Brost h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
578dad33831SMatthew Auld h2g->info.size) -
579dd08ebf6SMatthew Brost h2g->info.resv_space;
5809c1857d5SMichal Wajdeczko if (cmd_len > h2g->info.space)
581d930c19fSMatthew Brost return false;
582d930c19fSMatthew Brost }
583dd08ebf6SMatthew Brost
584dd08ebf6SMatthew Brost return true;
585dad33831SMatthew Auld }
586dad33831SMatthew Auld
g2h_has_room(struct xe_guc_ct * ct,u32 g2h_len)587d930c19fSMatthew Brost static bool g2h_has_room(struct xe_guc_ct *ct, u32 g2h_len)
588d930c19fSMatthew Brost {
589d930c19fSMatthew Brost if (!g2h_len)
590a7ca8157SRodrigo Vivi return true;
591dd08ebf6SMatthew Brost
592dd08ebf6SMatthew Brost lockdep_assert_held(&ct->fast_lock);
593dd08ebf6SMatthew Brost
594dd08ebf6SMatthew Brost return ct->ctbs.g2h.info.space > g2h_len;
595dd08ebf6SMatthew Brost }
596dd08ebf6SMatthew Brost
has_room(struct xe_guc_ct * ct,u32 cmd_len,u32 g2h_len)597d2c5a5a9SJohn Harrison static int has_room(struct xe_guc_ct *ct, u32 cmd_len, u32 g2h_len)
598d2c5a5a9SJohn Harrison {
599dd08ebf6SMatthew Brost lockdep_assert_held(&ct->lock);
600d2c5a5a9SJohn Harrison
601d2c5a5a9SJohn Harrison if (!g2h_has_room(ct, g2h_len) || !h2g_has_room(ct, cmd_len))
602d2c5a5a9SJohn Harrison return -EBUSY;
603d2c5a5a9SJohn Harrison
604d2c5a5a9SJohn Harrison return 0;
605d2c5a5a9SJohn Harrison }
606d2c5a5a9SJohn Harrison
h2g_reserve_space(struct xe_guc_ct * ct,u32 cmd_len)607d2c5a5a9SJohn Harrison static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
608d2c5a5a9SJohn Harrison {
609d2c5a5a9SJohn Harrison lockdep_assert_held(&ct->lock);
610d2c5a5a9SJohn Harrison ct->ctbs.h2g.info.space -= cmd_len;
611d2c5a5a9SJohn Harrison }
612d2c5a5a9SJohn Harrison
__g2h_reserve_space(struct xe_guc_ct * ct,u32 g2h_len,u32 num_g2h)613d2c5a5a9SJohn Harrison static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
614d2c5a5a9SJohn Harrison {
615dd08ebf6SMatthew Brost xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space);
616a7ca8157SRodrigo Vivi xe_gt_assert(ct_to_gt(ct), (!g2h_len && !num_g2h) ||
617d930c19fSMatthew Brost (g2h_len && num_g2h));
618d930c19fSMatthew Brost
619dd08ebf6SMatthew Brost if (g2h_len) {
620dd08ebf6SMatthew Brost lockdep_assert_held(&ct->fast_lock);
621dd08ebf6SMatthew Brost
622dd08ebf6SMatthew Brost if (!ct->g2h_outstanding)
623dd08ebf6SMatthew Brost xe_pm_runtime_get_noresume(ct_to_xe(ct));
624dd08ebf6SMatthew Brost
625dd08ebf6SMatthew Brost ct->ctbs.g2h.info.space -= g2h_len;
626dd08ebf6SMatthew Brost ct->g2h_outstanding += num_g2h;
627dd08ebf6SMatthew Brost }
628c7fac450SAlan Previn }
629c7fac450SAlan Previn
__g2h_release_space(struct xe_guc_ct * ct,u32 g2h_len)630dd08ebf6SMatthew Brost static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
631dd08ebf6SMatthew Brost {
632dd08ebf6SMatthew Brost bool bad = false;
633dd08ebf6SMatthew Brost
6349c1857d5SMichal Wajdeczko lockdep_assert_held(&ct->fast_lock);
635dd08ebf6SMatthew Brost
636c7fac450SAlan Previn bad = ct->ctbs.g2h.info.space + g2h_len >
637a7ca8157SRodrigo Vivi ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space;
638c7fac450SAlan Previn bad |= !ct->g2h_outstanding;
639dd08ebf6SMatthew Brost
640dd08ebf6SMatthew Brost if (bad) {
641d2c5a5a9SJohn Harrison xe_gt_err(ct_to_gt(ct), "Invalid G2H release: %d + %d vs %d - %d -> %d vs %d, outstanding = %d!\n",
642dd08ebf6SMatthew Brost ct->ctbs.g2h.info.space, g2h_len,
643c7fac450SAlan Previn ct->ctbs.g2h.info.size, ct->ctbs.g2h.info.resv_space,
644c7fac450SAlan Previn ct->ctbs.g2h.info.space + g2h_len,
645dd08ebf6SMatthew Brost ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space,
6469c1857d5SMichal Wajdeczko ct->g2h_outstanding);
647d2c5a5a9SJohn Harrison CT_DEAD(ct, &ct->ctbs.g2h, G2H_RELEASE);
648d2c5a5a9SJohn Harrison return;
649d2c5a5a9SJohn Harrison }
650d2c5a5a9SJohn Harrison
651d2c5a5a9SJohn Harrison ct->ctbs.g2h.info.space += g2h_len;
652d2c5a5a9SJohn Harrison if (!--ct->g2h_outstanding)
653d2c5a5a9SJohn Harrison xe_pm_runtime_put(ct_to_xe(ct));
654d2c5a5a9SJohn Harrison }
655d2c5a5a9SJohn Harrison
g2h_release_space(struct xe_guc_ct * ct,u32 g2h_len)656d2c5a5a9SJohn Harrison static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
657d2c5a5a9SJohn Harrison {
658d2c5a5a9SJohn Harrison spin_lock_irq(&ct->fast_lock);
659d2c5a5a9SJohn Harrison __g2h_release_space(ct, g2h_len);
660d2c5a5a9SJohn Harrison spin_unlock_irq(&ct->fast_lock);
661d2c5a5a9SJohn Harrison }
662d2c5a5a9SJohn Harrison
663d2c5a5a9SJohn Harrison #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
fast_req_track(struct xe_guc_ct * ct,u16 fence,u16 action)664d2c5a5a9SJohn Harrison static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action)
665d2c5a5a9SJohn Harrison {
666d2c5a5a9SJohn Harrison unsigned int slot = fence % ARRAY_SIZE(ct->fast_req);
667d2c5a5a9SJohn Harrison #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
668d2c5a5a9SJohn Harrison unsigned long entries[SZ_32];
669d2c5a5a9SJohn Harrison unsigned int n;
670d2c5a5a9SJohn Harrison
671d2c5a5a9SJohn Harrison n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
672d2c5a5a9SJohn Harrison
673d2c5a5a9SJohn Harrison /* May be called under spinlock, so avoid sleeping */
674d2c5a5a9SJohn Harrison ct->fast_req[slot].stack = stack_depot_save(entries, n, GFP_NOWAIT);
675d2c5a5a9SJohn Harrison #endif
676d2c5a5a9SJohn Harrison ct->fast_req[slot].fence = fence;
677d2c5a5a9SJohn Harrison ct->fast_req[slot].action = action;
678dd08ebf6SMatthew Brost }
679dd08ebf6SMatthew Brost #else
fast_req_track(struct xe_guc_ct * ct,u16 fence,u16 action)680c7fac450SAlan Previn static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action)
681a7ca8157SRodrigo Vivi {
682a7ca8157SRodrigo Vivi }
683a7ca8157SRodrigo Vivi #endif
684a7ca8157SRodrigo Vivi
685a7ca8157SRodrigo Vivi /*
686dd08ebf6SMatthew Brost * The CT protocol accepts a 16 bits fence. This field is fully owned by the
687dd08ebf6SMatthew Brost * driver, the GuC will just copy it to the reply message. Since we need to
688dd08ebf6SMatthew Brost * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
689dd08ebf6SMatthew Brost * we use one bit of the seqno as an indicator for that and a rolling counter
690dd08ebf6SMatthew Brost * for the remaining 15 bits.
691dd08ebf6SMatthew Brost */
692dd08ebf6SMatthew Brost #define CT_SEQNO_MASK GENMASK(14, 0)
693dd08ebf6SMatthew Brost #define CT_SEQNO_UNTRACKED BIT(15)
next_ct_seqno(struct xe_guc_ct * ct,bool is_g2h_fence)694dd08ebf6SMatthew Brost static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
695c7fac450SAlan Previn {
696dd08ebf6SMatthew Brost u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
697dd08ebf6SMatthew Brost
698dd08ebf6SMatthew Brost if (!is_g2h_fence)
699c7fac450SAlan Previn seqno |= CT_SEQNO_UNTRACKED;
700dd08ebf6SMatthew Brost
701dd08ebf6SMatthew Brost return seqno;
702dd08ebf6SMatthew Brost }
703dd08ebf6SMatthew Brost
704c7fac450SAlan Previn #define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */
7050eb16fd2SDaniele Ceraolo Spurio
h2g_write(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 ct_fence_value,bool want_response)706dd08ebf6SMatthew Brost static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
707dd08ebf6SMatthew Brost u32 ct_fence_value, bool want_response)
708dd08ebf6SMatthew Brost {
709c7fac450SAlan Previn struct xe_device *xe = ct_to_xe(ct);
710c7fac450SAlan Previn struct xe_gt *gt = ct_to_gt(ct);
711c7fac450SAlan Previn struct guc_ctb *h2g = &ct->ctbs.h2g;
712c7fac450SAlan Previn u32 cmd[H2G_CT_HEADERS];
713dd08ebf6SMatthew Brost u32 tail = h2g->info.tail;
71475fd04f2SNitin Gote u32 full_len;
715c7fac450SAlan Previn struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds,
716c7fac450SAlan Previn tail * sizeof(u32));
7178656ea9aSBalasubramani Vivekanandan u32 desc_status;
718dd08ebf6SMatthew Brost
719dd08ebf6SMatthew Brost full_len = len + GUC_CTB_HDR_LEN;
720c7fac450SAlan Previn
721c7fac450SAlan Previn lockdep_assert_held(&ct->lock);
722dd08ebf6SMatthew Brost xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN);
723dd08ebf6SMatthew Brost
724a7ca8157SRodrigo Vivi desc_status = desc_read(xe, h2g, status);
725dd08ebf6SMatthew Brost if (desc_status) {
7263cba2f1dSRadhakrishna Sripada xe_gt_err(gt, "CT write: non-zero status: %u\n", desc_status);
7278656ea9aSBalasubramani Vivekanandan goto corrupted;
7288656ea9aSBalasubramani Vivekanandan }
729dd08ebf6SMatthew Brost
730d2c5a5a9SJohn Harrison if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
731d2c5a5a9SJohn Harrison u32 desc_tail = desc_read(xe, h2g, tail);
732d2c5a5a9SJohn Harrison u32 desc_head = desc_read(xe, h2g, head);
733d2c5a5a9SJohn Harrison
734dd08ebf6SMatthew Brost if (tail != desc_tail) {
735dd08ebf6SMatthew Brost desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_MISMATCH);
7360eb16fd2SDaniele Ceraolo Spurio xe_gt_err(gt, "CT write: tail was modified %u != %u\n", desc_tail, tail);
7370eb16fd2SDaniele Ceraolo Spurio goto corrupted;
7380eb16fd2SDaniele Ceraolo Spurio }
7390eb16fd2SDaniele Ceraolo Spurio
7400eb16fd2SDaniele Ceraolo Spurio if (tail > h2g->info.size) {
7410eb16fd2SDaniele Ceraolo Spurio desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
7420eb16fd2SDaniele Ceraolo Spurio xe_gt_err(gt, "CT write: tail out of range: %u vs %u\n",
7430eb16fd2SDaniele Ceraolo Spurio tail, h2g->info.size);
7440eb16fd2SDaniele Ceraolo Spurio goto corrupted;
7450eb16fd2SDaniele Ceraolo Spurio }
7460eb16fd2SDaniele Ceraolo Spurio
7470eb16fd2SDaniele Ceraolo Spurio if (desc_head >= h2g->info.size) {
7480eb16fd2SDaniele Ceraolo Spurio desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
7490eb16fd2SDaniele Ceraolo Spurio xe_gt_err(gt, "CT write: invalid head offset %u >= %u)\n",
7500eb16fd2SDaniele Ceraolo Spurio desc_head, h2g->info.size);
7510eb16fd2SDaniele Ceraolo Spurio goto corrupted;
7520eb16fd2SDaniele Ceraolo Spurio }
7530eb16fd2SDaniele Ceraolo Spurio }
7540eb16fd2SDaniele Ceraolo Spurio
755dd08ebf6SMatthew Brost /* Command will wrap, zero fill (NOPs), return and check credits again */
756dd08ebf6SMatthew Brost if (tail + full_len > h2g->info.size) {
757dd08ebf6SMatthew Brost xe_map_memset(xe, &map, 0, 0,
758dd08ebf6SMatthew Brost (h2g->info.size - tail) * sizeof(u32));
7599c1857d5SMichal Wajdeczko h2g_reserve_space(ct, (h2g->info.size - tail));
7600eb16fd2SDaniele Ceraolo Spurio h2g->info.tail = 0;
761dd08ebf6SMatthew Brost desc_write(xe, h2g, tail, h2g->info.tail);
762dd08ebf6SMatthew Brost
7639c1857d5SMichal Wajdeczko return -EAGAIN;
7649c1857d5SMichal Wajdeczko }
7659c1857d5SMichal Wajdeczko
7669c1857d5SMichal Wajdeczko /*
7679c1857d5SMichal Wajdeczko * dw0: CT header (including fence)
768dd08ebf6SMatthew Brost * dw1: HXG header (including action code)
769dd08ebf6SMatthew Brost * dw2+: action data
770a7ca8157SRodrigo Vivi */
771dd08ebf6SMatthew Brost cmd[0] = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
772dd08ebf6SMatthew Brost FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
773dd08ebf6SMatthew Brost FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value);
774dd08ebf6SMatthew Brost if (want_response) {
775dc75d037SMatthew Brost cmd[1] =
776dd08ebf6SMatthew Brost FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
777dd08ebf6SMatthew Brost FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
778dd08ebf6SMatthew Brost GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
779dd08ebf6SMatthew Brost } else {
780dc75d037SMatthew Brost fast_req_track(ct, ct_fence_value,
781dc75d037SMatthew Brost FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, action[0]));
782dc75d037SMatthew Brost
783dc75d037SMatthew Brost cmd[1] =
784dc75d037SMatthew Brost FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) |
7859c1857d5SMichal Wajdeczko FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
786dc75d037SMatthew Brost GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
787dd08ebf6SMatthew Brost }
788dd08ebf6SMatthew Brost
789dd08ebf6SMatthew Brost /* H2G header in cmd[1] replaces action[0] so: */
790dd08ebf6SMatthew Brost --len;
791dd08ebf6SMatthew Brost ++action;
7920eb16fd2SDaniele Ceraolo Spurio
7931aa4b786SMatthew Auld /* Write H2G ensuring visible before descriptor update */
7941aa4b786SMatthew Auld xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32));
7951aa4b786SMatthew Auld xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32));
7961aa4b786SMatthew Auld xe_device_wmb(xe);
797dd08ebf6SMatthew Brost
798dd08ebf6SMatthew Brost /* Update local copies */
7990eb16fd2SDaniele Ceraolo Spurio h2g->info.tail = (tail + full_len) % h2g->info.size;
8000eb16fd2SDaniele Ceraolo Spurio h2g_reserve_space(ct, full_len);
8010eb16fd2SDaniele Ceraolo Spurio
8020eb16fd2SDaniele Ceraolo Spurio /* Update descriptor */
803dd08ebf6SMatthew Brost desc_write(xe, h2g, tail, h2g->info.tail);
804dd08ebf6SMatthew Brost
805dad33831SMatthew Auld trace_xe_guc_ctb_h2g(xe, gt->info.id, *(action - 1), full_len,
806dad33831SMatthew Auld desc_read(xe, h2g, head), h2g->info.tail);
807dd08ebf6SMatthew Brost
808dd08ebf6SMatthew Brost return 0;
809dd08ebf6SMatthew Brost
810dad33831SMatthew Auld corrupted:
811dd08ebf6SMatthew Brost CT_DEAD(ct, &ct->ctbs.h2g, H2G_WRITE);
8120eb16fd2SDaniele Ceraolo Spurio return -EPIPE;
813dd08ebf6SMatthew Brost }
814dd08ebf6SMatthew Brost
__guc_ct_send_locked(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 g2h_len,u32 num_g2h,struct g2h_fence * g2h_fence)815dd08ebf6SMatthew Brost static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
816dad33831SMatthew Auld u32 len, u32 g2h_len, u32 num_g2h,
817dd08ebf6SMatthew Brost struct g2h_fence *g2h_fence)
818dd08ebf6SMatthew Brost {
819dad33831SMatthew Auld struct xe_gt *gt __maybe_unused = ct_to_gt(ct);
820dd08ebf6SMatthew Brost u16 seqno;
821dad33831SMatthew Auld int ret;
822dad33831SMatthew Auld
823dad33831SMatthew Auld xe_gt_assert(gt, xe_guc_ct_initialized(ct));
824dd08ebf6SMatthew Brost xe_gt_assert(gt, !g2h_len || !g2h_fence);
825dd08ebf6SMatthew Brost xe_gt_assert(gt, !num_g2h || !g2h_fence);
826dd08ebf6SMatthew Brost xe_gt_assert(gt, !g2h_len || num_g2h);
827dd08ebf6SMatthew Brost xe_gt_assert(gt, g2h_len || !num_g2h);
828dd08ebf6SMatthew Brost lockdep_assert_held(&ct->lock);
829dd08ebf6SMatthew Brost
830dd08ebf6SMatthew Brost if (unlikely(ct->ctbs.h2g.info.broken)) {
831dd08ebf6SMatthew Brost ret = -EPIPE;
832dd08ebf6SMatthew Brost goto out;
833dd08ebf6SMatthew Brost }
834dd08ebf6SMatthew Brost
835dd08ebf6SMatthew Brost if (ct->state == XE_GUC_CT_STATE_DISABLED) {
836dd08ebf6SMatthew Brost ret = -ENODEV;
837dd08ebf6SMatthew Brost goto out;
838dd08ebf6SMatthew Brost }
8393cba2f1dSRadhakrishna Sripada
84013c52251SMichal Wajdeczko if (ct->state == XE_GUC_CT_STATE_STOPPED) {
841dd08ebf6SMatthew Brost ret = -ECANCELED;
842dd08ebf6SMatthew Brost goto out;
843dd08ebf6SMatthew Brost }
8449c1857d5SMichal Wajdeczko
845dd08ebf6SMatthew Brost xe_gt_assert(gt, xe_guc_ct_enabled(ct));
846565ce72eSMatthew Auld
847dd08ebf6SMatthew Brost if (g2h_fence) {
848dd08ebf6SMatthew Brost g2h_len = GUC_CTB_HXG_MSG_MAX_LEN;
849dd08ebf6SMatthew Brost num_g2h = 1;
850dd08ebf6SMatthew Brost
851dd08ebf6SMatthew Brost if (g2h_fence_needs_alloc(g2h_fence)) {
852dd08ebf6SMatthew Brost g2h_fence->seqno = next_ct_seqno(ct, true);
853dd08ebf6SMatthew Brost ret = xa_err(xa_store(&ct->fence_lookup,
854dd08ebf6SMatthew Brost g2h_fence->seqno, g2h_fence,
855dd08ebf6SMatthew Brost GFP_ATOMIC));
856dd08ebf6SMatthew Brost if (ret)
857dd08ebf6SMatthew Brost goto out;
858dd08ebf6SMatthew Brost }
859dd08ebf6SMatthew Brost
860dd08ebf6SMatthew Brost seqno = g2h_fence->seqno;
861dd08ebf6SMatthew Brost } else {
862dd08ebf6SMatthew Brost seqno = next_ct_seqno(ct, false);
863dd08ebf6SMatthew Brost }
864dd08ebf6SMatthew Brost
865dd08ebf6SMatthew Brost if (g2h_len)
8663cba2f1dSRadhakrishna Sripada spin_lock_irq(&ct->fast_lock);
867a7ca8157SRodrigo Vivi retry:
868a7ca8157SRodrigo Vivi ret = has_room(ct, len + GUC_CTB_HDR_LEN, g2h_len);
869dd08ebf6SMatthew Brost if (unlikely(ret))
870dd08ebf6SMatthew Brost goto out_unlock;
871dd08ebf6SMatthew Brost
872dd08ebf6SMatthew Brost ret = h2g_write(ct, action, len, seqno, !!g2h_fence);
873dd08ebf6SMatthew Brost if (unlikely(ret)) {
874dd08ebf6SMatthew Brost if (ret == -EAGAIN)
875dd08ebf6SMatthew Brost goto retry;
876dd08ebf6SMatthew Brost goto out_unlock;
877dd08ebf6SMatthew Brost }
8783cba2f1dSRadhakrishna Sripada
879dd08ebf6SMatthew Brost __g2h_reserve_space(ct, g2h_len, num_g2h);
880a7ca8157SRodrigo Vivi xe_guc_notify(ct_to_guc(ct));
881a7ca8157SRodrigo Vivi out_unlock:
882dd08ebf6SMatthew Brost if (g2h_len)
883dd08ebf6SMatthew Brost spin_unlock_irq(&ct->fast_lock);
884dd08ebf6SMatthew Brost out:
885dd08ebf6SMatthew Brost return ret;
886dd08ebf6SMatthew Brost }
887a7ca8157SRodrigo Vivi
kick_reset(struct xe_guc_ct * ct)888dd08ebf6SMatthew Brost static void kick_reset(struct xe_guc_ct *ct)
889dd08ebf6SMatthew Brost {
890dd08ebf6SMatthew Brost xe_gt_reset_async(ct_to_gt(ct));
891dd08ebf6SMatthew Brost }
892dd08ebf6SMatthew Brost
893d2c5a5a9SJohn Harrison static int dequeue_one_g2h(struct xe_guc_ct *ct);
894d2c5a5a9SJohn Harrison
guc_ct_send_locked(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 g2h_len,u32 num_g2h,struct g2h_fence * g2h_fence)895d2c5a5a9SJohn Harrison static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
896d2c5a5a9SJohn Harrison u32 g2h_len, u32 num_g2h,
897d2c5a5a9SJohn Harrison struct g2h_fence *g2h_fence)
898dd08ebf6SMatthew Brost {
899d2c5a5a9SJohn Harrison struct xe_device *xe = ct_to_xe(ct);
900dd08ebf6SMatthew Brost struct xe_gt *gt = ct_to_gt(ct);
901dd08ebf6SMatthew Brost unsigned int sleep_period_ms = 1;
902dd08ebf6SMatthew Brost int ret;
903dd08ebf6SMatthew Brost
904dd08ebf6SMatthew Brost xe_gt_assert(gt, !g2h_len || !g2h_fence);
905dd08ebf6SMatthew Brost lockdep_assert_held(&ct->lock);
906dd08ebf6SMatthew Brost xe_device_assert_mem_access(ct_to_xe(ct));
90713c52251SMichal Wajdeczko
908d2c5a5a9SJohn Harrison try_again:
909dd08ebf6SMatthew Brost ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h,
910dd08ebf6SMatthew Brost g2h_fence);
911dd08ebf6SMatthew Brost
912dd08ebf6SMatthew Brost /*
913dd08ebf6SMatthew Brost * We wait to try to restore credits for about 1 second before bailing.
914dd08ebf6SMatthew Brost * In the case of H2G credits we have no choice but just to wait for the
915dd08ebf6SMatthew Brost * GuC to consume H2Gs in the channel so we use a wait / sleep loop. In
916dd08ebf6SMatthew Brost * the case of G2H we process any G2H in the channel, hopefully freeing
917dd08ebf6SMatthew Brost * credits as we consume the G2H messages.
9189c1857d5SMichal Wajdeczko */
919dd08ebf6SMatthew Brost if (unlikely(ret == -EBUSY &&
920dd08ebf6SMatthew Brost !h2g_has_room(ct, len + GUC_CTB_HDR_LEN))) {
921dd08ebf6SMatthew Brost struct guc_ctb *h2g = &ct->ctbs.h2g;
922dd08ebf6SMatthew Brost
923dd08ebf6SMatthew Brost if (sleep_period_ms == 1024)
924dd08ebf6SMatthew Brost goto broken;
925dd08ebf6SMatthew Brost
926dd08ebf6SMatthew Brost trace_xe_guc_ct_h2g_flow_control(xe, h2g->info.head, h2g->info.tail,
927dd08ebf6SMatthew Brost h2g->info.size,
928dd08ebf6SMatthew Brost h2g->info.space,
929dd08ebf6SMatthew Brost len + GUC_CTB_HDR_LEN);
930dd08ebf6SMatthew Brost msleep(sleep_period_ms);
931dd08ebf6SMatthew Brost sleep_period_ms <<= 1;
932dd08ebf6SMatthew Brost
933dd08ebf6SMatthew Brost goto try_again;
934dd08ebf6SMatthew Brost } else if (unlikely(ret == -EBUSY)) {
935dd08ebf6SMatthew Brost struct xe_device *xe = ct_to_xe(ct);
936dd08ebf6SMatthew Brost struct guc_ctb *g2h = &ct->ctbs.g2h;
937dd08ebf6SMatthew Brost
938dd08ebf6SMatthew Brost trace_xe_guc_ct_g2h_flow_control(xe, g2h->info.head,
939dd08ebf6SMatthew Brost desc_read(xe, g2h, tail),
940dd08ebf6SMatthew Brost g2h->info.size,
941dd08ebf6SMatthew Brost g2h->info.space,
942dd08ebf6SMatthew Brost g2h_fence ?
943dd08ebf6SMatthew Brost GUC_CTB_HXG_MSG_MAX_LEN :
944dd08ebf6SMatthew Brost g2h_len);
945dd08ebf6SMatthew Brost
946dd08ebf6SMatthew Brost #define g2h_avail(ct) \
947dd08ebf6SMatthew Brost (desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head)
948dd08ebf6SMatthew Brost if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding ||
949dd08ebf6SMatthew Brost g2h_avail(ct), HZ))
950dd08ebf6SMatthew Brost goto broken;
951dd08ebf6SMatthew Brost #undef g2h_avail
952dd08ebf6SMatthew Brost
953dd08ebf6SMatthew Brost ret = dequeue_one_g2h(ct);
954dd08ebf6SMatthew Brost if (ret < 0) {
955dd08ebf6SMatthew Brost if (ret != -ECANCELED)
956dd08ebf6SMatthew Brost xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)",
957dd08ebf6SMatthew Brost ERR_PTR(ret));
958dd08ebf6SMatthew Brost goto broken;
959dd08ebf6SMatthew Brost }
960dd08ebf6SMatthew Brost
961dd08ebf6SMatthew Brost goto try_again;
962dd08ebf6SMatthew Brost }
963dd08ebf6SMatthew Brost
964dd08ebf6SMatthew Brost return ret;
965dd08ebf6SMatthew Brost
966dd08ebf6SMatthew Brost broken:
967dd08ebf6SMatthew Brost xe_gt_err(gt, "No forward process on H2G, reset required\n");
968dd08ebf6SMatthew Brost CT_DEAD(ct, &ct->ctbs.h2g, DEADLOCK);
969dd08ebf6SMatthew Brost
970dd08ebf6SMatthew Brost return -EDEADLK;
971dd08ebf6SMatthew Brost }
972dd08ebf6SMatthew Brost
guc_ct_send(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 g2h_len,u32 num_g2h,struct g2h_fence * g2h_fence)973dd08ebf6SMatthew Brost static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
974dc75d037SMatthew Brost u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence)
975dc75d037SMatthew Brost {
976dd08ebf6SMatthew Brost int ret;
977dd08ebf6SMatthew Brost
978dd08ebf6SMatthew Brost xe_gt_assert(ct_to_gt(ct), !g2h_len || !g2h_fence);
979dd08ebf6SMatthew Brost
980dd08ebf6SMatthew Brost mutex_lock(&ct->lock);
981dd08ebf6SMatthew Brost ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence);
982dd08ebf6SMatthew Brost mutex_unlock(&ct->lock);
983dd08ebf6SMatthew Brost
984dd08ebf6SMatthew Brost return ret;
985dd08ebf6SMatthew Brost }
98613c52251SMichal Wajdeczko
xe_guc_ct_send(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 g2h_len,u32 num_g2h)987dd08ebf6SMatthew Brost int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
988dd08ebf6SMatthew Brost u32 g2h_len, u32 num_g2h)
989dd08ebf6SMatthew Brost {
990dd08ebf6SMatthew Brost int ret;
991dd08ebf6SMatthew Brost
992dd08ebf6SMatthew Brost ret = guc_ct_send(ct, action, len, g2h_len, num_g2h, NULL);
993dd08ebf6SMatthew Brost if (ret == -EDEADLK)
994dd08ebf6SMatthew Brost kick_reset(ct);
995dd08ebf6SMatthew Brost
996dd08ebf6SMatthew Brost return ret;
997dd08ebf6SMatthew Brost }
998dd08ebf6SMatthew Brost
xe_guc_ct_send_locked(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 g2h_len,u32 num_g2h)999dd08ebf6SMatthew Brost int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
1000dd08ebf6SMatthew Brost u32 g2h_len, u32 num_g2h)
1001dd08ebf6SMatthew Brost {
1002dd08ebf6SMatthew Brost int ret;
1003dd08ebf6SMatthew Brost
10041aa4b786SMatthew Auld ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, NULL);
10051aa4b786SMatthew Auld if (ret == -EDEADLK)
10061aa4b786SMatthew Auld kick_reset(ct);
10071aa4b786SMatthew Auld
1008dd08ebf6SMatthew Brost return ret;
1009dd08ebf6SMatthew Brost }
1010dd08ebf6SMatthew Brost
xe_guc_ct_send_g2h_handler(struct xe_guc_ct * ct,const u32 * action,u32 len)1011dd08ebf6SMatthew Brost int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action, u32 len)
1012dd08ebf6SMatthew Brost {
1013dd08ebf6SMatthew Brost int ret;
1014dd08ebf6SMatthew Brost
1015dd08ebf6SMatthew Brost lockdep_assert_held(&ct->lock);
1016dd08ebf6SMatthew Brost
1017dd08ebf6SMatthew Brost ret = guc_ct_send_locked(ct, action, len, 0, 0, NULL);
101811bfc4a2SMatthew Auld if (ret == -EDEADLK)
1019dd08ebf6SMatthew Brost kick_reset(ct);
1020dd08ebf6SMatthew Brost
1021dd08ebf6SMatthew Brost return ret;
1022dd08ebf6SMatthew Brost }
10232988cf02SNiranjana Vishwanathapura
1024e5152723SBadal Nilawar /*
1025cbe006a6SNirmoy Das * Check if a GT reset is in progress or will occur and if GT reset brought the
1026e5152723SBadal Nilawar * CT back up. Randomly picking 5 seconds for an upper limit to do a GT a reset.
1027e5152723SBadal Nilawar */
retry_failure(struct xe_guc_ct * ct,int ret)1028e5152723SBadal Nilawar static bool retry_failure(struct xe_guc_ct *ct, int ret)
1029e5152723SBadal Nilawar {
1030e5152723SBadal Nilawar if (!(ret == -EDEADLK || ret == -EPIPE || ret == -ENODEV))
1031e5152723SBadal Nilawar return false;
1032e5152723SBadal Nilawar
1033e5152723SBadal Nilawar #define ct_alive(ct) \
103452789ce3SMatthew Auld (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \
103552789ce3SMatthew Auld !ct->ctbs.g2h.info.broken)
103652789ce3SMatthew Auld if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
103752789ce3SMatthew Auld return false;
103852789ce3SMatthew Auld #undef ct_alive
103952789ce3SMatthew Auld
1040dd08ebf6SMatthew Brost return true;
104152789ce3SMatthew Auld }
104252789ce3SMatthew Auld
guc_ct_send_recv(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 * response_buffer,bool no_fail)104311bfc4a2SMatthew Auld static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
104452789ce3SMatthew Auld u32 *response_buffer, bool no_fail)
1045dd08ebf6SMatthew Brost {
1046dd08ebf6SMatthew Brost struct xe_gt *gt = ct_to_gt(ct);
1047dd08ebf6SMatthew Brost struct g2h_fence g2h_fence;
1048dd08ebf6SMatthew Brost int ret = 0;
1049be3bf9ddSMichal Wajdeczko
1050dd08ebf6SMatthew Brost /*
105152789ce3SMatthew Auld * We use a fence to implement blocking sends / receiving response data.
1052dd08ebf6SMatthew Brost * The seqno of the fence is sent in the H2G, returned in the G2H, and
1053dd08ebf6SMatthew Brost * an xarray is used as storage media with the seqno being to key.
1054dd08ebf6SMatthew Brost * Fields in the fence hold success, failure, retry status and the
105592e9db6eSMichal Wajdeczko * response data. Safe to allocate on the stack as the xarray is the
1056dd08ebf6SMatthew Brost * only reference and it cannot be present after this function exits.
1057dd08ebf6SMatthew Brost */
1058dd08ebf6SMatthew Brost retry:
1059dd08ebf6SMatthew Brost g2h_fence_init(&g2h_fence, response_buffer);
106052789ce3SMatthew Auld retry_same_fence:
106152789ce3SMatthew Auld ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence);
106252789ce3SMatthew Auld if (unlikely(ret == -ENOMEM)) {
106352789ce3SMatthew Auld /* Retry allocation /w GFP_KERNEL */
106452789ce3SMatthew Auld ret = xa_err(xa_store(&ct->fence_lookup, g2h_fence.seqno,
106552789ce3SMatthew Auld &g2h_fence, GFP_KERNEL));
1066dd08ebf6SMatthew Brost if (ret)
1067dd08ebf6SMatthew Brost return ret;
10686af7ee08SMichal Wajdeczko
10696af7ee08SMichal Wajdeczko goto retry_same_fence;
10706af7ee08SMichal Wajdeczko } else if (unlikely(ret)) {
10716af7ee08SMichal Wajdeczko if (ret == -EDEADLK)
10726af7ee08SMichal Wajdeczko kick_reset(ct);
10736af7ee08SMichal Wajdeczko
10746af7ee08SMichal Wajdeczko if (no_fail && retry_failure(ct, ret))
10756af7ee08SMichal Wajdeczko goto retry_same_fence;
10766af7ee08SMichal Wajdeczko
10776af7ee08SMichal Wajdeczko if (!g2h_fence_needs_alloc(&g2h_fence))
10786af7ee08SMichal Wajdeczko xa_erase(&ct->fence_lookup, g2h_fence.seqno);
10796af7ee08SMichal Wajdeczko
10806af7ee08SMichal Wajdeczko return ret;
10816af7ee08SMichal Wajdeczko }
10826af7ee08SMichal Wajdeczko
10836af7ee08SMichal Wajdeczko ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
10846af7ee08SMichal Wajdeczko if (!ret) {
10856af7ee08SMichal Wajdeczko LNL_FLUSH_WORK(&ct->g2h_worker);
1086dd08ebf6SMatthew Brost if (g2h_fence.done) {
1087dd08ebf6SMatthew Brost xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
1088dd08ebf6SMatthew Brost g2h_fence.seqno, action[0]);
10894469eae6SMichal Wajdeczko ret = 1;
1090dd08ebf6SMatthew Brost }
1091dd08ebf6SMatthew Brost }
1092*104080e3SSatyanarayana K V P
1093dd08ebf6SMatthew Brost /*
1094dd08ebf6SMatthew Brost * Ensure we serialize with completion side to prevent UAF with fence going out of scope on
1095dd08ebf6SMatthew Brost * the stack, since we have no clue if it will fire after the timeout before we can erase
1096dd08ebf6SMatthew Brost * from the xa. Also we have some dependent loads and stores below for which we need the
1097dd08ebf6SMatthew Brost * correct ordering, and we lack the needed barriers.
1098dd08ebf6SMatthew Brost */
1099dd08ebf6SMatthew Brost mutex_lock(&ct->lock);
1100d4978a67SMichal Wajdeczko if (!ret) {
1101d4978a67SMichal Wajdeczko xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s",
1102d4978a67SMichal Wajdeczko g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done));
1103d4978a67SMichal Wajdeczko xa_erase(&ct->fence_lookup, g2h_fence.seqno);
1104d4978a67SMichal Wajdeczko mutex_unlock(&ct->lock);
1105d4978a67SMichal Wajdeczko return -ETIME;
1106d4978a67SMichal Wajdeczko }
1107d4978a67SMichal Wajdeczko
1108d4978a67SMichal Wajdeczko if (g2h_fence.retry) {
1109d4978a67SMichal Wajdeczko xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n",
1110dd08ebf6SMatthew Brost action[0], g2h_fence.reason);
1111dd08ebf6SMatthew Brost mutex_unlock(&ct->lock);
1112d4978a67SMichal Wajdeczko goto retry;
1113d4978a67SMichal Wajdeczko }
1114dd08ebf6SMatthew Brost if (g2h_fence.fail) {
1115dd08ebf6SMatthew Brost if (g2h_fence.cancel) {
1116dd08ebf6SMatthew Brost xe_gt_dbg(gt, "H2G request %#x canceled!\n", action[0]);
1117dd08ebf6SMatthew Brost ret = -ECANCELED;
1118dd08ebf6SMatthew Brost goto unlock;
1119dd08ebf6SMatthew Brost }
1120dd08ebf6SMatthew Brost xe_gt_err(gt, "H2G request %#x failed: error %#x hint %#x\n",
1121dd08ebf6SMatthew Brost action[0], g2h_fence.error, g2h_fence.hint);
1122dd08ebf6SMatthew Brost ret = -EIO;
1123dd08ebf6SMatthew Brost }
1124dd08ebf6SMatthew Brost
1125dd08ebf6SMatthew Brost if (ret > 0)
1126dd08ebf6SMatthew Brost ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data;
1127dd08ebf6SMatthew Brost
11287d4d1c54SJohn Harrison unlock:
11297d4d1c54SJohn Harrison mutex_unlock(&ct->lock);
11307d4d1c54SJohn Harrison
11317d4d1c54SJohn Harrison return ret;
11327d4d1c54SJohn Harrison }
11337d4d1c54SJohn Harrison
11347d4d1c54SJohn Harrison /**
11357d4d1c54SJohn Harrison * xe_guc_ct_send_recv - Send and receive HXG to the GuC
11367d4d1c54SJohn Harrison * @ct: the &xe_guc_ct
11377d4d1c54SJohn Harrison * @action: the dword array with `HXG Request`_ message (can't be NULL)
11387d4d1c54SJohn Harrison * @len: length of the `HXG Request`_ message (in dwords, can't be 0)
11397d4d1c54SJohn Harrison * @response_buffer: placeholder for the `HXG Response`_ message (can be NULL)
11407d4d1c54SJohn Harrison *
11417d4d1c54SJohn Harrison * Send a `HXG Request`_ message to the GuC over CT communication channel and
11427d4d1c54SJohn Harrison * blocks until GuC replies with a `HXG Response`_ message.
11437d4d1c54SJohn Harrison *
11447d4d1c54SJohn Harrison * For non-blocking communication with GuC use xe_guc_ct_send().
11457d4d1c54SJohn Harrison *
1146dd08ebf6SMatthew Brost * Note: The size of &response_buffer must be at least GUC_CTB_MAX_DWORDS_.
1147dd08ebf6SMatthew Brost *
11480eb16fd2SDaniele Ceraolo Spurio * Return: response length (in dwords) if &response_buffer was not NULL, or
1149d4978a67SMichal Wajdeczko * DATA0 from `HXG Response`_ if &response_buffer was NULL, or
1150d4978a67SMichal Wajdeczko * a negative error code on failure.
1151dd08ebf6SMatthew Brost */
xe_guc_ct_send_recv(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 * response_buffer)1152d4978a67SMichal Wajdeczko int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
1153dd08ebf6SMatthew Brost u32 *response_buffer)
1154dd08ebf6SMatthew Brost {
1155dd08ebf6SMatthew Brost KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer);
1156dd08ebf6SMatthew Brost return guc_ct_send_recv(ct, action, len, response_buffer, false);
11570eb16fd2SDaniele Ceraolo Spurio }
11580eb16fd2SDaniele Ceraolo Spurio ALLOW_ERROR_INJECTION(xe_guc_ct_send_recv, ERRNO);
11590eb16fd2SDaniele Ceraolo Spurio
xe_guc_ct_send_recv_no_fail(struct xe_guc_ct * ct,const u32 * action,u32 len,u32 * response_buffer)11600eb16fd2SDaniele Ceraolo Spurio int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
11610eb16fd2SDaniele Ceraolo Spurio u32 len, u32 *response_buffer)
11620eb16fd2SDaniele Ceraolo Spurio {
11630eb16fd2SDaniele Ceraolo Spurio return guc_ct_send_recv(ct, action, len, response_buffer, true);
11640eb16fd2SDaniele Ceraolo Spurio }
11650eb16fd2SDaniele Ceraolo Spurio
msg_to_hxg(u32 * msg)11660eb16fd2SDaniele Ceraolo Spurio static u32 *msg_to_hxg(u32 *msg)
11670eb16fd2SDaniele Ceraolo Spurio {
11680eb16fd2SDaniele Ceraolo Spurio return msg + GUC_CTB_MSG_MIN_LEN;
1169d4978a67SMichal Wajdeczko }
1170d4978a67SMichal Wajdeczko
msg_len_to_hxg_len(u32 len)11710eb16fd2SDaniele Ceraolo Spurio static u32 msg_len_to_hxg_len(u32 len)
11720eb16fd2SDaniele Ceraolo Spurio {
11730eb16fd2SDaniele Ceraolo Spurio return len - GUC_CTB_MSG_MIN_LEN;
1174d2c5a5a9SJohn Harrison }
11750eb16fd2SDaniele Ceraolo Spurio
parse_g2h_event(struct xe_guc_ct * ct,u32 * msg,u32 len)11760eb16fd2SDaniele Ceraolo Spurio static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
11770eb16fd2SDaniele Ceraolo Spurio {
11780eb16fd2SDaniele Ceraolo Spurio u32 *hxg = msg_to_hxg(msg);
1179dd08ebf6SMatthew Brost u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1180dd08ebf6SMatthew Brost
1181dd08ebf6SMatthew Brost lockdep_assert_held(&ct->lock);
1182d2c5a5a9SJohn Harrison
11830eb16fd2SDaniele Ceraolo Spurio switch (action) {
1184dd08ebf6SMatthew Brost case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1185dd08ebf6SMatthew Brost case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1186dd08ebf6SMatthew Brost case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
1187dd08ebf6SMatthew Brost case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
11889c1857d5SMichal Wajdeczko g2h_release_space(ct, len);
1189dd08ebf6SMatthew Brost }
1190dd08ebf6SMatthew Brost
1191dd08ebf6SMatthew Brost return 0;
1192d4978a67SMichal Wajdeczko }
1193d4978a67SMichal Wajdeczko
guc_crash_process_msg(struct xe_guc_ct * ct,u32 action)1194dd08ebf6SMatthew Brost static int guc_crash_process_msg(struct xe_guc_ct *ct, u32 action)
1195dd08ebf6SMatthew Brost {
1196d4978a67SMichal Wajdeczko struct xe_gt *gt = ct_to_gt(ct);
1197dd08ebf6SMatthew Brost
1198d4978a67SMichal Wajdeczko if (action == XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED)
1199d4978a67SMichal Wajdeczko xe_gt_err(gt, "GuC Crash dump notification\n");
1200a54e016aSMichal Wajdeczko else if (action == XE_GUC_ACTION_NOTIFY_EXCEPTION)
1201a54e016aSMichal Wajdeczko xe_gt_err(gt, "GuC Exception notification\n");
1202dd08ebf6SMatthew Brost else
1203dd08ebf6SMatthew Brost xe_gt_err(gt, "Unknown GuC crash notification: 0x%04X\n", action);
1204dd08ebf6SMatthew Brost
1205dd08ebf6SMatthew Brost CT_DEAD(ct, NULL, CRASH);
1206dd08ebf6SMatthew Brost
1207dd08ebf6SMatthew Brost kick_reset(ct);
1208dd08ebf6SMatthew Brost
12092988cf02SNiranjana Vishwanathapura return 0;
1210dd08ebf6SMatthew Brost }
1211dd08ebf6SMatthew Brost
1212dd08ebf6SMatthew Brost #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
fast_req_report(struct xe_guc_ct * ct,u16 fence)1213dd08ebf6SMatthew Brost static void fast_req_report(struct xe_guc_ct *ct, u16 fence)
1214dd08ebf6SMatthew Brost {
1215dd08ebf6SMatthew Brost u16 fence_min = U16_MAX, fence_max = 0;
121613c52251SMichal Wajdeczko struct xe_gt *gt = ct_to_gt(ct);
1217d4978a67SMichal Wajdeczko bool found = false;
1218d4978a67SMichal Wajdeczko unsigned int n;
1219dd08ebf6SMatthew Brost #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
1220dd08ebf6SMatthew Brost char *buf;
1221dd08ebf6SMatthew Brost #endif
1222dd08ebf6SMatthew Brost
1223d4978a67SMichal Wajdeczko lockdep_assert_held(&ct->lock);
1224dd08ebf6SMatthew Brost
122513c52251SMichal Wajdeczko for (n = 0; n < ARRAY_SIZE(ct->fast_req); n++) {
1226dd08ebf6SMatthew Brost if (ct->fast_req[n].fence < fence_min)
1227d2c5a5a9SJohn Harrison fence_min = ct->fast_req[n].fence;
1228dd08ebf6SMatthew Brost if (ct->fast_req[n].fence > fence_max)
1229dd08ebf6SMatthew Brost fence_max = ct->fast_req[n].fence;
1230dd08ebf6SMatthew Brost
1231dd08ebf6SMatthew Brost if (ct->fast_req[n].fence != fence)
1232d4978a67SMichal Wajdeczko continue;
1233dd08ebf6SMatthew Brost found = true;
1234dd08ebf6SMatthew Brost
1235dd08ebf6SMatthew Brost #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
1236dd08ebf6SMatthew Brost buf = kmalloc(SZ_4K, GFP_NOWAIT);
1237dd08ebf6SMatthew Brost if (buf && stack_depot_snprint(ct->fast_req[n].stack, buf, SZ_4K, 0))
1238dd08ebf6SMatthew Brost xe_gt_err(gt, "Fence 0x%x was used by action %#04x sent at:\n%s",
1239dd08ebf6SMatthew Brost fence, ct->fast_req[n].action, buf);
1240dd08ebf6SMatthew Brost else
1241dd08ebf6SMatthew Brost xe_gt_err(gt, "Fence 0x%x was used by action %#04x [failed to retrieve stack]\n",
1242dd08ebf6SMatthew Brost fence, ct->fast_req[n].action);
124313c52251SMichal Wajdeczko kfree(buf);
1244dd08ebf6SMatthew Brost #else
1245d2c5a5a9SJohn Harrison xe_gt_err(gt, "Fence 0x%x was used by action %#04x\n",
1246dd08ebf6SMatthew Brost fence, ct->fast_req[n].action);
1247dd08ebf6SMatthew Brost #endif
1248dd08ebf6SMatthew Brost break;
1249dd08ebf6SMatthew Brost }
1250dd08ebf6SMatthew Brost
1251dd08ebf6SMatthew Brost if (!found)
1252dd08ebf6SMatthew Brost xe_gt_warn(gt, "Fence 0x%x not found - tracking buffer wrapped? [range = 0x%x -> 0x%x, next = 0x%X]\n",
1253dd08ebf6SMatthew Brost fence, fence_min, fence_max, ct->fence_seqno);
1254dd08ebf6SMatthew Brost }
1255dd08ebf6SMatthew Brost #else
fast_req_report(struct xe_guc_ct * ct,u16 fence)1256aed2c1d7SMichal Wajdeczko static void fast_req_report(struct xe_guc_ct *ct, u16 fence)
1257d4978a67SMichal Wajdeczko {
1258d4978a67SMichal Wajdeczko }
1259d4978a67SMichal Wajdeczko #endif
1260d4978a67SMichal Wajdeczko
parse_g2h_response(struct xe_guc_ct * ct,u32 * msg,u32 len)1261dd08ebf6SMatthew Brost static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
1262dd08ebf6SMatthew Brost {
1263d4978a67SMichal Wajdeczko struct xe_gt *gt = ct_to_gt(ct);
1264dd08ebf6SMatthew Brost u32 *hxg = msg_to_hxg(msg);
1265dd08ebf6SMatthew Brost u32 hxg_len = msg_len_to_hxg_len(len);
1266d4978a67SMichal Wajdeczko u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]);
1267d4978a67SMichal Wajdeczko u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
1268d4978a67SMichal Wajdeczko struct g2h_fence *g2h_fence;
1269d4978a67SMichal Wajdeczko
1270dd08ebf6SMatthew Brost lockdep_assert_held(&ct->lock);
1271dd08ebf6SMatthew Brost
1272dd08ebf6SMatthew Brost /*
1273dd08ebf6SMatthew Brost * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup.
1274dd08ebf6SMatthew Brost * Those messages should never fail, so if we do get an error back it
1275dd08ebf6SMatthew Brost * means we're likely doing an illegal operation and the GuC is
1276dd08ebf6SMatthew Brost * rejecting it. We have no way to inform the code that submitted the
1277dd08ebf6SMatthew Brost * H2G that the message was rejected, so we need to escalate the
12789b9529ceSFrancois Dugast * failure to trigger a reset.
1279dd08ebf6SMatthew Brost */
1280dd08ebf6SMatthew Brost if (fence & CT_SEQNO_UNTRACKED) {
12819b9529ceSFrancois Dugast if (type == GUC_HXG_TYPE_RESPONSE_FAILURE)
1282dd08ebf6SMatthew Brost xe_gt_err(gt, "FAST_REQ H2G fence 0x%x failed! e=0x%x, h=%u\n",
1283dd08ebf6SMatthew Brost fence,
1284dd08ebf6SMatthew Brost FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]),
1285dd08ebf6SMatthew Brost FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]));
1286dd08ebf6SMatthew Brost else
1287dd08ebf6SMatthew Brost xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
12888bfc4963SZhanjun Dong type, fence);
12898bfc4963SZhanjun Dong
1290dd08ebf6SMatthew Brost fast_req_report(ct, fence);
1291dd08ebf6SMatthew Brost
1292dd08ebf6SMatthew Brost CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE);
1293dd08ebf6SMatthew Brost
12949b9529ceSFrancois Dugast return -EPROTO;
1295dd08ebf6SMatthew Brost }
1296dd08ebf6SMatthew Brost
1297dd08ebf6SMatthew Brost g2h_fence = xa_erase(&ct->fence_lookup, fence);
1298dd08ebf6SMatthew Brost if (unlikely(!g2h_fence)) {
1299dd08ebf6SMatthew Brost /* Don't tear down channel, as send could've timed out */
1300dd08ebf6SMatthew Brost /* CT_DEAD(ct, NULL, PARSE_G2H_UNKNOWN); */
1301dd08ebf6SMatthew Brost xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence);
1302dd08ebf6SMatthew Brost g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
1303dd08ebf6SMatthew Brost return 0;
1304dd08ebf6SMatthew Brost }
1305dd08ebf6SMatthew Brost
1306dd08ebf6SMatthew Brost xe_gt_assert(gt, fence == g2h_fence->seqno);
1307dd08ebf6SMatthew Brost
130826d4481aSMichal Wajdeczko if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) {
130948c64d49SMichal Wajdeczko g2h_fence->fail = true;
131026d4481aSMichal Wajdeczko g2h_fence->error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]);
131126d4481aSMichal Wajdeczko g2h_fence->hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]);
131248c64d49SMichal Wajdeczko } else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
131326d4481aSMichal Wajdeczko g2h_fence->retry = true;
1314aed2c1d7SMichal Wajdeczko g2h_fence->reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, hxg[0]);
1315aed2c1d7SMichal Wajdeczko } else if (g2h_fence->response_buffer) {
1316aed2c1d7SMichal Wajdeczko g2h_fence->response_len = hxg_len;
1317335d62adSMichal Wajdeczko memcpy(g2h_fence->response_buffer, hxg, hxg_len * sizeof(u32));
1318335d62adSMichal Wajdeczko } else {
1319335d62adSMichal Wajdeczko g2h_fence->response_data = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, hxg[0]);
13207d4d1c54SJohn Harrison }
13217d4d1c54SJohn Harrison
13227d4d1c54SJohn Harrison g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
13237d4d1c54SJohn Harrison
1324dd08ebf6SMatthew Brost g2h_fence->done = true;
132513c52251SMichal Wajdeczko smp_mb();
1326dd08ebf6SMatthew Brost
1327dd08ebf6SMatthew Brost wake_up_all(&ct->g2h_fence_wq);
1328d2c5a5a9SJohn Harrison
1329c4ed1bb1SMichal Wajdeczko return 0;
1330c4ed1bb1SMichal Wajdeczko }
1331d2c5a5a9SJohn Harrison
parse_g2h_msg(struct xe_guc_ct * ct,u32 * msg,u32 len)1332d2c5a5a9SJohn Harrison static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
1333dd08ebf6SMatthew Brost {
1334dd08ebf6SMatthew Brost struct xe_gt *gt = ct_to_gt(ct);
1335dd08ebf6SMatthew Brost u32 *hxg = msg_to_hxg(msg);
1336dd08ebf6SMatthew Brost u32 origin, type;
1337dd08ebf6SMatthew Brost int ret;
1338dd08ebf6SMatthew Brost
1339dd08ebf6SMatthew Brost lockdep_assert_held(&ct->lock);
134013c52251SMichal Wajdeczko
1341dd08ebf6SMatthew Brost origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
1342d2c5a5a9SJohn Harrison if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
1343dd08ebf6SMatthew Brost xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n",
13448656ea9aSBalasubramani Vivekanandan origin);
1345d4978a67SMichal Wajdeczko CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_ORIGIN);
1346dd08ebf6SMatthew Brost
13479c1857d5SMichal Wajdeczko return -EPROTO;
1348dd08ebf6SMatthew Brost }
1349dd08ebf6SMatthew Brost
1350dc75d037SMatthew Brost type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
1351dd08ebf6SMatthew Brost switch (type) {
1352dd08ebf6SMatthew Brost case GUC_HXG_TYPE_EVENT:
1353dc75d037SMatthew Brost ret = parse_g2h_event(ct, msg, len);
1354dc75d037SMatthew Brost break;
1355dc75d037SMatthew Brost case GUC_HXG_TYPE_RESPONSE_SUCCESS:
1356a7ca8157SRodrigo Vivi case GUC_HXG_TYPE_RESPONSE_FAILURE:
1357dd08ebf6SMatthew Brost case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
1358dd08ebf6SMatthew Brost ret = parse_g2h_response(ct, msg, len);
13599c1857d5SMichal Wajdeczko break;
1360dc75d037SMatthew Brost default:
1361d2c5a5a9SJohn Harrison xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n",
1362d2c5a5a9SJohn Harrison type);
1363d2c5a5a9SJohn Harrison CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_TYPE);
1364d2c5a5a9SJohn Harrison
1365d2c5a5a9SJohn Harrison ret = -EOPNOTSUPP;
1366d2c5a5a9SJohn Harrison }
1367d2c5a5a9SJohn Harrison
1368d2c5a5a9SJohn Harrison return ret;
1369d2c5a5a9SJohn Harrison }
1370d2c5a5a9SJohn Harrison
process_g2h_msg(struct xe_guc_ct * ct,u32 * msg,u32 len)1371d2c5a5a9SJohn Harrison static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
1372d2c5a5a9SJohn Harrison {
1373d2c5a5a9SJohn Harrison struct xe_guc *guc = ct_to_guc(ct);
1374d2c5a5a9SJohn Harrison struct xe_gt *gt = ct_to_gt(ct);
1375d2c5a5a9SJohn Harrison u32 hxg_len = msg_len_to_hxg_len(len);
1376d2c5a5a9SJohn Harrison u32 *hxg = msg_to_hxg(msg);
1377d2c5a5a9SJohn Harrison u32 action, adj_len;
1378d2c5a5a9SJohn Harrison u32 *payload;
1379d2c5a5a9SJohn Harrison int ret = 0;
1380d2c5a5a9SJohn Harrison
1381d2c5a5a9SJohn Harrison if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1382d2c5a5a9SJohn Harrison return 0;
1383d2c5a5a9SJohn Harrison
1384d2c5a5a9SJohn Harrison action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1385d2c5a5a9SJohn Harrison payload = hxg + GUC_HXG_EVENT_MSG_MIN_LEN;
1386d2c5a5a9SJohn Harrison adj_len = hxg_len - GUC_HXG_EVENT_MSG_MIN_LEN;
1387d2c5a5a9SJohn Harrison
138875fd04f2SNitin Gote switch (action) {
1389d2c5a5a9SJohn Harrison case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1390d2c5a5a9SJohn Harrison ret = xe_guc_sched_done_handler(guc, payload, adj_len);
1391d2c5a5a9SJohn Harrison break;
1392d2c5a5a9SJohn Harrison case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1393d2c5a5a9SJohn Harrison ret = xe_guc_deregister_done_handler(guc, payload, adj_len);
1394d2c5a5a9SJohn Harrison break;
1395d2c5a5a9SJohn Harrison case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
1396d2c5a5a9SJohn Harrison ret = xe_guc_exec_queue_reset_handler(guc, payload, adj_len);
1397d2c5a5a9SJohn Harrison break;
1398d2c5a5a9SJohn Harrison case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
1399d2c5a5a9SJohn Harrison ret = xe_guc_exec_queue_reset_failure_handler(guc, payload,
1400d2c5a5a9SJohn Harrison adj_len);
1401d2c5a5a9SJohn Harrison break;
1402d2c5a5a9SJohn Harrison case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
1403d2c5a5a9SJohn Harrison /* Selftest only at the moment */
1404d2c5a5a9SJohn Harrison break;
1405d2c5a5a9SJohn Harrison case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
1406d2c5a5a9SJohn Harrison ret = xe_guc_error_capture_handler(guc, payload, adj_len);
1407d2c5a5a9SJohn Harrison break;
1408d2c5a5a9SJohn Harrison case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
1409d2c5a5a9SJohn Harrison /* FIXME: Handle this */
1410d2c5a5a9SJohn Harrison break;
1411d2c5a5a9SJohn Harrison case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR:
1412d2c5a5a9SJohn Harrison ret = xe_guc_exec_queue_memory_cat_error_handler(guc, payload,
1413d2c5a5a9SJohn Harrison adj_len);
1414d2c5a5a9SJohn Harrison break;
1415d2c5a5a9SJohn Harrison case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1416d2c5a5a9SJohn Harrison ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1417d2c5a5a9SJohn Harrison break;
1418dd08ebf6SMatthew Brost case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1419dd08ebf6SMatthew Brost ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
1420a7ca8157SRodrigo Vivi adj_len);
1421dd08ebf6SMatthew Brost break;
1422dd08ebf6SMatthew Brost case XE_GUC_ACTION_ACCESS_COUNTER_NOTIFY:
1423dd08ebf6SMatthew Brost ret = xe_guc_access_counter_notify_handler(guc, payload,
1424dd08ebf6SMatthew Brost adj_len);
1425a7ca8157SRodrigo Vivi break;
1426dd08ebf6SMatthew Brost case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
1427dd08ebf6SMatthew Brost ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len);
1428a7ca8157SRodrigo Vivi break;
1429a7ca8157SRodrigo Vivi case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
1430dd08ebf6SMatthew Brost ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len);
1431dd08ebf6SMatthew Brost break;
143213c52251SMichal Wajdeczko case GUC_ACTION_GUC2PF_VF_STATE_NOTIFY:
1433dd08ebf6SMatthew Brost ret = xe_gt_sriov_pf_control_process_guc2pf(gt, hxg, hxg_len);
1434d2c5a5a9SJohn Harrison break;
1435dd08ebf6SMatthew Brost case GUC_ACTION_GUC2PF_ADVERSE_EVENT:
1436dd08ebf6SMatthew Brost ret = xe_gt_sriov_pf_monitor_process_guc2pf(gt, hxg, hxg_len);
1437a7ca8157SRodrigo Vivi break;
1438dd08ebf6SMatthew Brost case XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED:
1439dd08ebf6SMatthew Brost case XE_GUC_ACTION_NOTIFY_EXCEPTION:
1440dd08ebf6SMatthew Brost ret = guc_crash_process_msg(ct, action);
1441a7ca8157SRodrigo Vivi break;
1442a7ca8157SRodrigo Vivi default:
1443dd08ebf6SMatthew Brost xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action);
1444dd08ebf6SMatthew Brost }
1445dd08ebf6SMatthew Brost
1446dd08ebf6SMatthew Brost if (ret) {
1447dd08ebf6SMatthew Brost xe_gt_err(gt, "G2H action %#04x failed (%pe) len %u msg %*ph\n",
1448dd08ebf6SMatthew Brost action, ERR_PTR(ret), hxg_len, (int)sizeof(u32) * hxg_len, hxg);
1449dd08ebf6SMatthew Brost CT_DEAD(ct, NULL, PROCESS_FAILED);
1450dd08ebf6SMatthew Brost }
1451dd08ebf6SMatthew Brost
1452dd08ebf6SMatthew Brost return 0;
1453dd08ebf6SMatthew Brost }
1454dd08ebf6SMatthew Brost
g2h_read(struct xe_guc_ct * ct,u32 * msg,bool fast_path)1455dd08ebf6SMatthew Brost static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
1456d4978a67SMichal Wajdeczko {
1457d4978a67SMichal Wajdeczko struct xe_device *xe = ct_to_xe(ct);
14588656ea9aSBalasubramani Vivekanandan struct xe_gt *gt = ct_to_gt(ct);
1459dd08ebf6SMatthew Brost struct guc_ctb *g2h = &ct->ctbs.g2h;
1460d4978a67SMichal Wajdeczko u32 tail, head, len, desc_status;
1461dd08ebf6SMatthew Brost s32 avail;
1462dd08ebf6SMatthew Brost u32 action;
14638656ea9aSBalasubramani Vivekanandan u32 *hxg;
1464dd08ebf6SMatthew Brost
146535c8a964SMatthew Auld xe_gt_assert(gt, xe_guc_ct_initialized(ct));
1466dd08ebf6SMatthew Brost lockdep_assert_held(&ct->fast_lock);
1467dd08ebf6SMatthew Brost
1468dd08ebf6SMatthew Brost if (ct->state == XE_GUC_CT_STATE_DISABLED)
1469dd08ebf6SMatthew Brost return -ENODEV;
1470dd08ebf6SMatthew Brost
1471dd08ebf6SMatthew Brost if (ct->state == XE_GUC_CT_STATE_STOPPED)
1472dd08ebf6SMatthew Brost return -ECANCELED;
1473a7ca8157SRodrigo Vivi
1474a7ca8157SRodrigo Vivi if (g2h->info.broken)
1475dd08ebf6SMatthew Brost return -EPIPE;
14763cba2f1dSRadhakrishna Sripada
14773cba2f1dSRadhakrishna Sripada xe_gt_assert(gt, xe_guc_ct_enabled(ct));
14788656ea9aSBalasubramani Vivekanandan
1479dd08ebf6SMatthew Brost desc_status = desc_read(xe, g2h, status);
1480d2c5a5a9SJohn Harrison if (desc_status) {
1481d2c5a5a9SJohn Harrison if (desc_status & GUC_CTB_STATUS_DISABLED) {
1482d2c5a5a9SJohn Harrison /*
1483d2c5a5a9SJohn Harrison * Potentially valid if a CLIENT_RESET request resulted in
1484dd08ebf6SMatthew Brost * contexts/engines being reset. But should never happen as
1485dd08ebf6SMatthew Brost * no contexts should be active when CLIENT_RESET is sent.
1486dd08ebf6SMatthew Brost */
1487dd08ebf6SMatthew Brost xe_gt_err(gt, "CT read: unexpected G2H after GuC has stopped!\n");
148813c52251SMichal Wajdeczko desc_status &= ~GUC_CTB_STATUS_DISABLED;
1489dd08ebf6SMatthew Brost }
1490d4978a67SMichal Wajdeczko
1491d4978a67SMichal Wajdeczko if (desc_status) {
1492d4978a67SMichal Wajdeczko xe_gt_err(gt, "CT read: non-zero status: %u\n", desc_status);
1493d4978a67SMichal Wajdeczko goto corrupted;
1494d4978a67SMichal Wajdeczko }
1495dd08ebf6SMatthew Brost }
1496dd08ebf6SMatthew Brost
1497dd08ebf6SMatthew Brost if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
1498dd08ebf6SMatthew Brost u32 desc_tail = desc_read(xe, g2h, tail);
1499dd08ebf6SMatthew Brost /*
1500dd08ebf6SMatthew Brost u32 desc_head = desc_read(xe, g2h, head);
1501dd08ebf6SMatthew Brost
1502dd08ebf6SMatthew Brost * info.head and desc_head are updated back-to-back at the end of
1503dd08ebf6SMatthew Brost * this function and nowhere else. Hence, they cannot be different
1504dd08ebf6SMatthew Brost * unless two g2h_read calls are running concurrently. Which is not
1505dd08ebf6SMatthew Brost * possible because it is guarded by ct->fast_lock. And yet, some
1506dd08ebf6SMatthew Brost * discrete platforms are regularly hitting this error :(.
150713c52251SMichal Wajdeczko *
1508dd08ebf6SMatthew Brost * desc_head rolling backwards shouldn't cause any noticeable
1509dd08ebf6SMatthew Brost * problems - just a delay in GuC being allowed to proceed past that
1510d2c5a5a9SJohn Harrison * point in the queue. So for now, just disable the error until it
151113c52251SMichal Wajdeczko * can be root caused.
151213c52251SMichal Wajdeczko *
1513d2c5a5a9SJohn Harrison if (g2h->info.head != desc_head) {
1514d2c5a5a9SJohn Harrison desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_MISMATCH);
1515dd08ebf6SMatthew Brost xe_gt_err(gt, "CT read: head was modified %u != %u\n",
1516dd08ebf6SMatthew Brost desc_head, g2h->info.head);
1517dd08ebf6SMatthew Brost goto corrupted;
1518dd08ebf6SMatthew Brost }
1519dd08ebf6SMatthew Brost */
1520dd08ebf6SMatthew Brost
1521dd08ebf6SMatthew Brost if (g2h->info.head > g2h->info.size) {
1522dd08ebf6SMatthew Brost desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
1523dd08ebf6SMatthew Brost xe_gt_err(gt, "CT read: head out of range: %u vs %u\n",
1524dd08ebf6SMatthew Brost g2h->info.head, g2h->info.size);
1525dd08ebf6SMatthew Brost goto corrupted;
1526dd08ebf6SMatthew Brost }
1527dd08ebf6SMatthew Brost
1528a00b8f1aSMatthew Auld if (desc_tail >= g2h->info.size) {
1529dd08ebf6SMatthew Brost desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
1530dd08ebf6SMatthew Brost xe_gt_err(gt, "CT read: invalid tail offset %u >= %u)\n",
153116b57c90SRodrigo Vivi desc_tail, g2h->info.size);
1532a00b8f1aSMatthew Auld goto corrupted;
1533dd08ebf6SMatthew Brost }
1534dd08ebf6SMatthew Brost }
1535dd08ebf6SMatthew Brost
1536dd08ebf6SMatthew Brost /* Calculate DW available to read */
1537dd08ebf6SMatthew Brost tail = desc_read(xe, g2h, tail);
1538dd08ebf6SMatthew Brost avail = tail - g2h->info.head;
1539dd08ebf6SMatthew Brost if (unlikely(avail == 0))
1540dd08ebf6SMatthew Brost return 0;
1541dd08ebf6SMatthew Brost
1542c4bbc32eSMatthew Auld if (avail < 0)
1543a00b8f1aSMatthew Auld avail += g2h->info.size;
154416b57c90SRodrigo Vivi
1545dd08ebf6SMatthew Brost /* Read header */
1546dd08ebf6SMatthew Brost xe_map_memcpy_from(xe, msg, &g2h->cmds, sizeof(u32) * g2h->info.head,
1547dd08ebf6SMatthew Brost sizeof(u32));
1548dd08ebf6SMatthew Brost len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
1549dd08ebf6SMatthew Brost if (len > avail) {
1550dd08ebf6SMatthew Brost xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n",
1551dd08ebf6SMatthew Brost avail, len);
1552dd08ebf6SMatthew Brost goto corrupted;
1553dd08ebf6SMatthew Brost }
1554dd08ebf6SMatthew Brost
1555dd08ebf6SMatthew Brost head = (g2h->info.head + 1) % g2h->info.size;
1556dd08ebf6SMatthew Brost avail = len - 1;
1557dd08ebf6SMatthew Brost
1558dd08ebf6SMatthew Brost /* Read G2H message */
1559dd08ebf6SMatthew Brost if (avail + head > g2h->info.size) {
1560dd08ebf6SMatthew Brost u32 avail_til_wrap = g2h->info.size - head;
1561dd08ebf6SMatthew Brost
1562dd08ebf6SMatthew Brost xe_map_memcpy_from(xe, msg + 1,
1563dd08ebf6SMatthew Brost &g2h->cmds, sizeof(u32) * head,
1564dd08ebf6SMatthew Brost avail_til_wrap * sizeof(u32));
1565dd08ebf6SMatthew Brost xe_map_memcpy_from(xe, msg + 1 + avail_til_wrap,
1566dd08ebf6SMatthew Brost &g2h->cmds, 0,
1567dd08ebf6SMatthew Brost (avail - avail_til_wrap) * sizeof(u32));
1568dd08ebf6SMatthew Brost } else {
1569dd08ebf6SMatthew Brost xe_map_memcpy_from(xe, msg + 1,
1570dd08ebf6SMatthew Brost &g2h->cmds, sizeof(u32) * head,
1571dd08ebf6SMatthew Brost avail * sizeof(u32));
157237e01731SMichal Wajdeczko }
1573dd08ebf6SMatthew Brost
1574a00b8f1aSMatthew Auld hxg = msg_to_hxg(msg);
1575dd08ebf6SMatthew Brost action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1576dd08ebf6SMatthew Brost
1577a00b8f1aSMatthew Auld if (fast_path) {
1578a00b8f1aSMatthew Auld if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1579a00b8f1aSMatthew Auld return 0;
1580a00b8f1aSMatthew Auld
1581a00b8f1aSMatthew Auld switch (action) {
1582a00b8f1aSMatthew Auld case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1583a00b8f1aSMatthew Auld case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1584a00b8f1aSMatthew Auld break; /* Process these in fast-path */
1585a00b8f1aSMatthew Auld default:
1586a00b8f1aSMatthew Auld return 0;
1587a00b8f1aSMatthew Auld }
1588a00b8f1aSMatthew Auld }
1589a00b8f1aSMatthew Auld
1590a00b8f1aSMatthew Auld /* Update local / descriptor header */
1591a00b8f1aSMatthew Auld g2h->info.head = (head + avail) % g2h->info.size;
1592a00b8f1aSMatthew Auld desc_write(xe, g2h, head, g2h->info.head);
1593a00b8f1aSMatthew Auld
1594a00b8f1aSMatthew Auld trace_xe_guc_ctb_g2h(xe, ct_to_gt(ct)->info.id,
1595a00b8f1aSMatthew Auld action, len, g2h->info.head, tail);
1596a00b8f1aSMatthew Auld
1597a00b8f1aSMatthew Auld return len;
1598a00b8f1aSMatthew Auld
1599a00b8f1aSMatthew Auld corrupted:
160016b57c90SRodrigo Vivi CT_DEAD(ct, &ct->ctbs.g2h, G2H_READ);
1601a00b8f1aSMatthew Auld return -EPROTO;
1602a00b8f1aSMatthew Auld }
1603a00b8f1aSMatthew Auld
g2h_fast_path(struct xe_guc_ct * ct,u32 * msg,u32 len)1604dd08ebf6SMatthew Brost static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
1605dd08ebf6SMatthew Brost {
1606dd08ebf6SMatthew Brost struct xe_gt *gt = ct_to_gt(ct);
1607dd08ebf6SMatthew Brost struct xe_guc *guc = ct_to_guc(ct);
1608dd08ebf6SMatthew Brost u32 hxg_len = msg_len_to_hxg_len(len);
1609dd08ebf6SMatthew Brost u32 *hxg = msg_to_hxg(msg);
1610d2c5a5a9SJohn Harrison u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1611d2c5a5a9SJohn Harrison u32 *payload = hxg + GUC_HXG_MSG_MIN_LEN;
1612dd08ebf6SMatthew Brost u32 adj_len = hxg_len - GUC_HXG_MSG_MIN_LEN;
1613dd08ebf6SMatthew Brost int ret = 0;
1614dd08ebf6SMatthew Brost
1615a00b8f1aSMatthew Auld switch (action) {
1616a00b8f1aSMatthew Auld case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
161716b57c90SRodrigo Vivi ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1618dd08ebf6SMatthew Brost break;
1619dd08ebf6SMatthew Brost case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
162037e01731SMichal Wajdeczko __g2h_release_space(ct, len);
162137e01731SMichal Wajdeczko ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
162237e01731SMichal Wajdeczko adj_len);
162337e01731SMichal Wajdeczko break;
162437e01731SMichal Wajdeczko default:
162537e01731SMichal Wajdeczko xe_gt_warn(gt, "NOT_POSSIBLE");
162637e01731SMichal Wajdeczko }
1627db38fdb7SJohn Harrison
1628db38fdb7SJohn Harrison if (ret) {
1629dd08ebf6SMatthew Brost xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
1630d7c925b2SJohn Harrison action, ERR_PTR(ret));
1631dd08ebf6SMatthew Brost CT_DEAD(ct, NULL, FAST_G2H);
1632d7c925b2SJohn Harrison }
1633d7c925b2SJohn Harrison }
1634d7c925b2SJohn Harrison
1635d7c925b2SJohn Harrison /**
1636db38fdb7SJohn Harrison * xe_guc_ct_fast_path - process critical G2H in the IRQ handler
1637d7c925b2SJohn Harrison * @ct: GuC CT object
1638d7c925b2SJohn Harrison *
1639d7c925b2SJohn Harrison * Anything related to page faults is critical for performance, process these
1640d7c925b2SJohn Harrison * critical G2H in the IRQ. This is safe as these handlers either just wake up
1641d7c925b2SJohn Harrison * waiters or queue another worker.
1642d7c925b2SJohn Harrison */
xe_guc_ct_fast_path(struct xe_guc_ct * ct)1643d7c925b2SJohn Harrison void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
1644d7c925b2SJohn Harrison {
1645d7c925b2SJohn Harrison struct xe_device *xe = ct_to_xe(ct);
1646d7c925b2SJohn Harrison bool ongoing;
1647513260dfSRodrigo Vivi int len;
1648513260dfSRodrigo Vivi
1649513260dfSRodrigo Vivi ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
1650dd08ebf6SMatthew Brost if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1651dd08ebf6SMatthew Brost return;
1652513260dfSRodrigo Vivi
1653513260dfSRodrigo Vivi spin_lock(&ct->fast_lock);
1654dd08ebf6SMatthew Brost do {
1655513260dfSRodrigo Vivi len = g2h_read(ct, ct->fast_msg, true);
1656429d56a6SMatthew Auld if (len > 0)
1657513260dfSRodrigo Vivi g2h_fast_path(ct, ct->fast_msg, len);
1658513260dfSRodrigo Vivi } while (len > 0);
1659513260dfSRodrigo Vivi spin_unlock(&ct->fast_lock);
1660513260dfSRodrigo Vivi
1661513260dfSRodrigo Vivi if (ongoing)
1662513260dfSRodrigo Vivi xe_pm_runtime_put(xe);
1663513260dfSRodrigo Vivi }
1664513260dfSRodrigo Vivi
1665513260dfSRodrigo Vivi /* Returns less than zero on error, 0 on done, 1 on more available */
dequeue_one_g2h(struct xe_guc_ct * ct)1666db38fdb7SJohn Harrison static int dequeue_one_g2h(struct xe_guc_ct *ct)
1667db38fdb7SJohn Harrison {
1668513260dfSRodrigo Vivi int len;
1669513260dfSRodrigo Vivi int ret;
1670513260dfSRodrigo Vivi
1671513260dfSRodrigo Vivi lockdep_assert_held(&ct->lock);
1672db38fdb7SJohn Harrison
1673513260dfSRodrigo Vivi spin_lock_irq(&ct->fast_lock);
1674d2c5a5a9SJohn Harrison len = g2h_read(ct, ct->msg, false);
1675513260dfSRodrigo Vivi spin_unlock_irq(&ct->fast_lock);
1676513260dfSRodrigo Vivi if (len <= 0)
1677513260dfSRodrigo Vivi return len;
16781db3594cSMatthew Brost
1679513260dfSRodrigo Vivi ret = parse_g2h_msg(ct, ct->msg, len);
16800b688f9bSMatthew Auld if (unlikely(ret < 0))
1681d7c925b2SJohn Harrison return ret;
1682d7c925b2SJohn Harrison
1683513260dfSRodrigo Vivi ret = process_g2h_msg(ct, ct->msg, len);
1684513260dfSRodrigo Vivi if (unlikely(ret < 0))
1685d7c925b2SJohn Harrison return ret;
1686d7c925b2SJohn Harrison
1687d7c925b2SJohn Harrison return 1;
1688513260dfSRodrigo Vivi }
1689513260dfSRodrigo Vivi
receive_g2h(struct xe_guc_ct * ct)1690513260dfSRodrigo Vivi static void receive_g2h(struct xe_guc_ct *ct)
1691513260dfSRodrigo Vivi {
1692db38fdb7SJohn Harrison bool ongoing;
1693db38fdb7SJohn Harrison int ret;
1694db38fdb7SJohn Harrison
1695db38fdb7SJohn Harrison /*
1696db38fdb7SJohn Harrison * Normal users must always hold mem_access.ref around CT calls. However
1697db38fdb7SJohn Harrison * during the runtime pm callbacks we rely on CT to talk to the GuC, but
1698db38fdb7SJohn Harrison * at this stage we can't rely on mem_access.ref and even the
1699db38fdb7SJohn Harrison * callback_task will be different than current. For such cases we just
1700db38fdb7SJohn Harrison * need to ensure we always process the responses from any blocking
1701db38fdb7SJohn Harrison * ct_send requests or where we otherwise expect some response when
1702db38fdb7SJohn Harrison * initiated from those callbacks (which will need to wait for the below
1703db38fdb7SJohn Harrison * dequeue_one_g2h()). The dequeue_one_g2h() will gracefully fail if
1704db38fdb7SJohn Harrison * the device has suspended to the point that the CT communication has
1705db38fdb7SJohn Harrison * been disabled.
1706db38fdb7SJohn Harrison *
1707513260dfSRodrigo Vivi * If we are inside the runtime pm callback, we can be the only task
1708513260dfSRodrigo Vivi * still issuing CT requests (since that requires having the
1709513260dfSRodrigo Vivi * mem_access.ref). It seems like it might in theory be possible to
1710513260dfSRodrigo Vivi * receive unsolicited events from the GuC just as we are
1711513260dfSRodrigo Vivi * suspending-resuming, but those will currently anyway be lost when
1712513260dfSRodrigo Vivi * eventually exiting from suspend, hence no need to wake up the device
1713513260dfSRodrigo Vivi * here. If we ever need something stronger than get_if_ongoing() then
1714513260dfSRodrigo Vivi * we need to be careful with blocking the pm callbacks from getting CT
1715513260dfSRodrigo Vivi * responses, if the worker here is blocked on those callbacks
1716513260dfSRodrigo Vivi * completing, creating a deadlock.
1717513260dfSRodrigo Vivi */
1718513260dfSRodrigo Vivi ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
1719513260dfSRodrigo Vivi if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1720c65908c3SJosé Roberto de Souza return;
1721513260dfSRodrigo Vivi
1722dd08ebf6SMatthew Brost do {
17230114f663SJohn Harrison mutex_lock(&ct->lock);
1724513260dfSRodrigo Vivi ret = dequeue_one_g2h(ct);
1725513260dfSRodrigo Vivi mutex_unlock(&ct->lock);
1726513260dfSRodrigo Vivi
1727d7c925b2SJohn Harrison if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) {
1728cb1f868cSJosé Roberto de Souza xe_gt_err(ct_to_gt(ct), "CT dequeue failed: %d", ret);
17297748289dSLucas De Marchi CT_DEAD(ct, NULL, G2H_RECV);
1730cb1f868cSJosé Roberto de Souza kick_reset(ct);
17312c95bbf5SLucas De Marchi }
1732cb1f868cSJosé Roberto de Souza } while (ret == 1);
1733d7c925b2SJohn Harrison
1734c65908c3SJosé Roberto de Souza if (ongoing)
1735dd08ebf6SMatthew Brost xe_pm_runtime_put(ct_to_xe(ct));
1736dd08ebf6SMatthew Brost }
1737dd08ebf6SMatthew Brost
g2h_worker_func(struct work_struct * w)1738513260dfSRodrigo Vivi static void g2h_worker_func(struct work_struct *w)
1739513260dfSRodrigo Vivi {
1740513260dfSRodrigo Vivi struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker);
1741513260dfSRodrigo Vivi
1742513260dfSRodrigo Vivi receive_g2h(ct);
1743513260dfSRodrigo Vivi }
1744513260dfSRodrigo Vivi
xe_fixup_u64_in_cmds(struct xe_device * xe,struct iosys_map * cmds,u32 size,u32 idx,s64 shift)1745513260dfSRodrigo Vivi static void xe_fixup_u64_in_cmds(struct xe_device *xe, struct iosys_map *cmds,
1746513260dfSRodrigo Vivi u32 size, u32 idx, s64 shift)
1747513260dfSRodrigo Vivi {
1748513260dfSRodrigo Vivi u32 hi, lo;
1749513260dfSRodrigo Vivi u64 offset;
1750d7c925b2SJohn Harrison
1751513260dfSRodrigo Vivi lo = xe_map_rd_ring_u32(xe, cmds, idx, size);
1752513260dfSRodrigo Vivi hi = xe_map_rd_ring_u32(xe, cmds, idx + 1, size);
1753513260dfSRodrigo Vivi offset = make_u64(hi, lo);
1754513260dfSRodrigo Vivi offset += shift;
1755513260dfSRodrigo Vivi lo = lower_32_bits(offset);
1756513260dfSRodrigo Vivi hi = upper_32_bits(offset);
1757513260dfSRodrigo Vivi xe_map_wr_ring_u32(xe, cmds, idx, size, lo);
1758db38fdb7SJohn Harrison xe_map_wr_ring_u32(xe, cmds, idx + 1, size, hi);
1759513260dfSRodrigo Vivi }
1760db38fdb7SJohn Harrison
1761db38fdb7SJohn Harrison /*
1762513260dfSRodrigo Vivi * Shift any GGTT addresses within a single message left within CTB from
1763db38fdb7SJohn Harrison * before post-migration recovery.
1764513260dfSRodrigo Vivi * @ct: pointer to CT struct of the target GuC
1765513260dfSRodrigo Vivi * @cmds: iomap buffer containing CT messages
1766513260dfSRodrigo Vivi * @head: start of the target message within the buffer
1767db38fdb7SJohn Harrison * @len: length of the target message
1768513260dfSRodrigo Vivi * @size: size of the commands buffer
1769513260dfSRodrigo Vivi * @shift: the address shift to be added to each GGTT reference
1770513260dfSRodrigo Vivi * Return: true if the message was fixed or needed no fixups, false on failure
1771d2c5a5a9SJohn Harrison */
ct_fixup_ggtt_in_message(struct xe_guc_ct * ct,struct iosys_map * cmds,u32 head,u32 len,u32 size,s64 shift)1772d2c5a5a9SJohn Harrison static bool ct_fixup_ggtt_in_message(struct xe_guc_ct *ct,
1773d2c5a5a9SJohn Harrison struct iosys_map *cmds, u32 head,
1774d2c5a5a9SJohn Harrison u32 len, u32 size, s64 shift)
1775d2c5a5a9SJohn Harrison {
1776d2c5a5a9SJohn Harrison struct xe_gt *gt = ct_to_gt(ct);
1777d2c5a5a9SJohn Harrison struct xe_device *xe = ct_to_xe(ct);
1778d2c5a5a9SJohn Harrison u32 msg[GUC_HXG_MSG_MIN_LEN];
1779d2c5a5a9SJohn Harrison u32 action, i, n;
1780d2c5a5a9SJohn Harrison
1781d2c5a5a9SJohn Harrison xe_gt_assert(gt, len >= GUC_HXG_MSG_MIN_LEN);
1782d2c5a5a9SJohn Harrison
1783d2c5a5a9SJohn Harrison msg[0] = xe_map_rd_ring_u32(xe, cmds, head, size);
1784d2c5a5a9SJohn Harrison action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]);
1785d2c5a5a9SJohn Harrison
1786d2c5a5a9SJohn Harrison xe_gt_sriov_dbg_verbose(gt, "fixing H2G %#x\n", action);
1787d2c5a5a9SJohn Harrison
1788d2c5a5a9SJohn Harrison switch (action) {
1789d2c5a5a9SJohn Harrison case XE_GUC_ACTION_REGISTER_CONTEXT:
1790d2c5a5a9SJohn Harrison if (len != XE_GUC_REGISTER_CONTEXT_MSG_LEN)
1791d2c5a5a9SJohn Harrison goto err_len;
1792d2c5a5a9SJohn Harrison xe_fixup_u64_in_cmds(xe, cmds, size, head +
1793d2c5a5a9SJohn Harrison XE_GUC_REGISTER_CONTEXT_DATA_5_WQ_DESC_ADDR_LOWER,
1794d2c5a5a9SJohn Harrison shift);
1795d2c5a5a9SJohn Harrison xe_fixup_u64_in_cmds(xe, cmds, size, head +
1796d2c5a5a9SJohn Harrison XE_GUC_REGISTER_CONTEXT_DATA_7_WQ_BUF_BASE_LOWER,
1797d2c5a5a9SJohn Harrison shift);
1798d2c5a5a9SJohn Harrison xe_fixup_u64_in_cmds(xe, cmds, size, head +
1799d2c5a5a9SJohn Harrison XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR, shift);
1800d2c5a5a9SJohn Harrison break;
1801db38fdb7SJohn Harrison case XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC:
1802d2c5a5a9SJohn Harrison if (len < XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN)
1803d2c5a5a9SJohn Harrison goto err_len;
1804d2c5a5a9SJohn Harrison n = xe_map_rd_ring_u32(xe, cmds, head +
1805d2c5a5a9SJohn Harrison XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS, size);
1806d2c5a5a9SJohn Harrison if (len != XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN + 2 * n)
1807d2c5a5a9SJohn Harrison goto err_len;
1808d2c5a5a9SJohn Harrison xe_fixup_u64_in_cmds(xe, cmds, size, head +
1809d2c5a5a9SJohn Harrison XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER,
1810d2c5a5a9SJohn Harrison shift);
1811d2c5a5a9SJohn Harrison xe_fixup_u64_in_cmds(xe, cmds, size, head +
1812d2c5a5a9SJohn Harrison XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER,
1813d2c5a5a9SJohn Harrison shift);
1814d2c5a5a9SJohn Harrison for (i = 0; i < n; i++)
1815d2c5a5a9SJohn Harrison xe_fixup_u64_in_cmds(xe, cmds, size, head +
1816d2c5a5a9SJohn Harrison XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR
1817d2c5a5a9SJohn Harrison + 2 * i, shift);
1818d2c5a5a9SJohn Harrison break;
1819d2c5a5a9SJohn Harrison default:
1820d2c5a5a9SJohn Harrison break;
1821d2c5a5a9SJohn Harrison }
1822d2c5a5a9SJohn Harrison return true;
1823d2c5a5a9SJohn Harrison
1824d2c5a5a9SJohn Harrison err_len:
1825d2c5a5a9SJohn Harrison xe_gt_err(gt, "Skipped G2G %#x message fixups, unexpected length (%u)\n", action, len);
1826d2c5a5a9SJohn Harrison return false;
1827d2c5a5a9SJohn Harrison }
1828d2c5a5a9SJohn Harrison
1829d2c5a5a9SJohn Harrison /*
1830d2c5a5a9SJohn Harrison * Apply fixups to the next outgoing CT message within given CTB
1831d2c5a5a9SJohn Harrison * @ct: the &xe_guc_ct struct instance representing the target GuC
1832d2c5a5a9SJohn Harrison * @h2g: the &guc_ctb struct instance of the target buffer
1833d2c5a5a9SJohn Harrison * @shift: shift to be added to all GGTT addresses within the CTB
1834d2c5a5a9SJohn Harrison * @mhead: pointer to an integer storing message start position; the
1835d2c5a5a9SJohn Harrison * position is changed to next message before this function return
183666fb0dd2SJohn Harrison * @avail: size of the area available for parsing, that is length
1837d2c5a5a9SJohn Harrison * of all remaining messages stored within the CTB
1838d2c5a5a9SJohn Harrison * Return: size of the area available for parsing after one message
1839d2c5a5a9SJohn Harrison * has been parsed, that is length remaining from the updated mhead
1840d2c5a5a9SJohn Harrison */
ct_fixup_ggtt_in_buffer(struct xe_guc_ct * ct,struct guc_ctb * h2g,s64 shift,u32 * mhead,s32 avail)1841d2c5a5a9SJohn Harrison static int ct_fixup_ggtt_in_buffer(struct xe_guc_ct *ct, struct guc_ctb *h2g,
1842d2c5a5a9SJohn Harrison s64 shift, u32 *mhead, s32 avail)
1843d2c5a5a9SJohn Harrison {
1844d2c5a5a9SJohn Harrison struct xe_gt *gt = ct_to_gt(ct);
1845d2c5a5a9SJohn Harrison struct xe_device *xe = ct_to_xe(ct);
1846d2c5a5a9SJohn Harrison u32 msg[GUC_HXG_MSG_MIN_LEN];
1847d2c5a5a9SJohn Harrison u32 size = h2g->info.size;
1848d2c5a5a9SJohn Harrison u32 head = *mhead;
1849d2c5a5a9SJohn Harrison u32 len;
1850d2c5a5a9SJohn Harrison
1851d2c5a5a9SJohn Harrison xe_gt_assert(gt, avail >= (s32)GUC_CTB_MSG_MIN_LEN);
1852d2c5a5a9SJohn Harrison
1853d2c5a5a9SJohn Harrison /* Read header */
1854d2c5a5a9SJohn Harrison msg[0] = xe_map_rd_ring_u32(xe, &h2g->cmds, head, size);
1855d2c5a5a9SJohn Harrison len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
1856d2c5a5a9SJohn Harrison
1857d2c5a5a9SJohn Harrison if (unlikely(len > (u32)avail)) {
1858d2c5a5a9SJohn Harrison xe_gt_err(gt, "H2G channel broken on read, avail=%d, len=%d, fixups skipped\n",
1859d2c5a5a9SJohn Harrison avail, len);
1860d2c5a5a9SJohn Harrison return 0;
1861d2c5a5a9SJohn Harrison }
1862d2c5a5a9SJohn Harrison
1863d2c5a5a9SJohn Harrison head = (head + GUC_CTB_MSG_MIN_LEN) % size;
1864d2c5a5a9SJohn Harrison if (!ct_fixup_ggtt_in_message(ct, &h2g->cmds, head, msg_len_to_hxg_len(len), size, shift))
1865d2c5a5a9SJohn Harrison return 0;
1866d2c5a5a9SJohn Harrison *mhead = (head + msg_len_to_hxg_len(len)) % size;
1867d2c5a5a9SJohn Harrison
1868d2c5a5a9SJohn Harrison return avail - len;
1869d2c5a5a9SJohn Harrison }
1870d2c5a5a9SJohn Harrison
1871d2c5a5a9SJohn Harrison /**
1872d2c5a5a9SJohn Harrison * xe_guc_ct_fixup_messages_with_ggtt - Fixup any pending H2G CTB messages
1873d2c5a5a9SJohn Harrison * @ct: pointer to CT struct of the target GuC
1874d2c5a5a9SJohn Harrison * @ggtt_shift: shift to be added to all GGTT addresses within the CTB
1875d2c5a5a9SJohn Harrison *
1876 * Messages in GuC to Host CTB are owned by GuC and any fixups in them
1877 * are made by GuC. But content of the Host to GuC CTB is owned by the
1878 * KMD, so fixups to GGTT references in any pending messages need to be
1879 * applied here.
1880 * This function updates GGTT offsets in payloads of pending H2G CTB
1881 * messages (messages which were not consumed by GuC before the VF got
1882 * paused).
1883 */
xe_guc_ct_fixup_messages_with_ggtt(struct xe_guc_ct * ct,s64 ggtt_shift)1884 void xe_guc_ct_fixup_messages_with_ggtt(struct xe_guc_ct *ct, s64 ggtt_shift)
1885 {
1886 struct guc_ctb *h2g = &ct->ctbs.h2g;
1887 struct xe_guc *guc = ct_to_guc(ct);
1888 struct xe_gt *gt = guc_to_gt(guc);
1889 u32 head, tail, size;
1890 s32 avail;
1891
1892 if (unlikely(h2g->info.broken))
1893 return;
1894
1895 h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
1896 head = h2g->info.head;
1897 tail = READ_ONCE(h2g->info.tail);
1898 size = h2g->info.size;
1899
1900 if (unlikely(head > size))
1901 goto corrupted;
1902
1903 if (unlikely(tail >= size))
1904 goto corrupted;
1905
1906 avail = tail - head;
1907
1908 /* beware of buffer wrap case */
1909 if (unlikely(avail < 0))
1910 avail += size;
1911 xe_gt_dbg(gt, "available %d (%u:%u:%u)\n", avail, head, tail, size);
1912 xe_gt_assert(gt, avail >= 0);
1913
1914 while (avail > 0)
1915 avail = ct_fixup_ggtt_in_buffer(ct, h2g, ggtt_shift, &head, avail);
1916
1917 return;
1918
1919 corrupted:
1920 xe_gt_err(gt, "Corrupted H2G descriptor head=%u tail=%u size=%u, fixups not applied\n",
1921 head, tail, size);
1922 h2g->info.broken = true;
1923 }
1924
guc_ct_snapshot_alloc(struct xe_guc_ct * ct,bool atomic,bool want_ctb)1925 static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic,
1926 bool want_ctb)
1927 {
1928 struct xe_guc_ct_snapshot *snapshot;
1929
1930 snapshot = kzalloc(sizeof(*snapshot), atomic ? GFP_ATOMIC : GFP_KERNEL);
1931 if (!snapshot)
1932 return NULL;
1933
1934 if (ct->bo && want_ctb) {
1935 snapshot->ctb_size = xe_bo_size(ct->bo);
1936 snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL);
1937 }
1938
1939 return snapshot;
1940 }
1941
guc_ctb_snapshot_capture(struct xe_device * xe,struct guc_ctb * ctb,struct guc_ctb_snapshot * snapshot)1942 static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
1943 struct guc_ctb_snapshot *snapshot)
1944 {
1945 xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0,
1946 sizeof(struct guc_ct_buffer_desc));
1947 memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info));
1948 }
1949
guc_ctb_snapshot_print(struct guc_ctb_snapshot * snapshot,struct drm_printer * p)1950 static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot,
1951 struct drm_printer *p)
1952 {
1953 drm_printf(p, "\tsize: %d\n", snapshot->info.size);
1954 drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space);
1955 drm_printf(p, "\thead: %d\n", snapshot->info.head);
1956 drm_printf(p, "\ttail: %d\n", snapshot->info.tail);
1957 drm_printf(p, "\tspace: %d\n", snapshot->info.space);
1958 drm_printf(p, "\tbroken: %d\n", snapshot->info.broken);
1959 drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head);
1960 drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail);
1961 drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status);
1962 }
1963
guc_ct_snapshot_capture(struct xe_guc_ct * ct,bool atomic,bool want_ctb)1964 static struct xe_guc_ct_snapshot *guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic,
1965 bool want_ctb)
1966 {
1967 struct xe_device *xe = ct_to_xe(ct);
1968 struct xe_guc_ct_snapshot *snapshot;
1969
1970 snapshot = guc_ct_snapshot_alloc(ct, atomic, want_ctb);
1971 if (!snapshot) {
1972 xe_gt_err(ct_to_gt(ct), "Skipping CTB snapshot entirely.\n");
1973 return NULL;
1974 }
1975
1976 if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) {
1977 snapshot->ct_enabled = true;
1978 snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
1979 guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, &snapshot->h2g);
1980 guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, &snapshot->g2h);
1981 }
1982
1983 if (ct->bo && snapshot->ctb)
1984 xe_map_memcpy_from(xe, snapshot->ctb, &ct->bo->vmap, 0, snapshot->ctb_size);
1985
1986 return snapshot;
1987 }
1988
1989 /**
1990 * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state.
1991 * @ct: GuC CT object.
1992 *
1993 * This can be printed out in a later stage like during dev_coredump
1994 * analysis. This is safe to be called during atomic context.
1995 *
1996 * Returns: a GuC CT snapshot object that must be freed by the caller
1997 * by using `xe_guc_ct_snapshot_free`.
1998 */
xe_guc_ct_snapshot_capture(struct xe_guc_ct * ct)1999 struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct)
2000 {
2001 return guc_ct_snapshot_capture(ct, true, true);
2002 }
2003
2004 /**
2005 * xe_guc_ct_snapshot_print - Print out a given GuC CT snapshot.
2006 * @snapshot: GuC CT snapshot object.
2007 * @p: drm_printer where it will be printed out.
2008 *
2009 * This function prints out a given GuC CT snapshot object.
2010 */
xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot * snapshot,struct drm_printer * p)2011 void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
2012 struct drm_printer *p)
2013 {
2014 if (!snapshot)
2015 return;
2016
2017 if (snapshot->ct_enabled) {
2018 drm_puts(p, "H2G CTB (all sizes in DW):\n");
2019 guc_ctb_snapshot_print(&snapshot->h2g, p);
2020
2021 drm_puts(p, "G2H CTB (all sizes in DW):\n");
2022 guc_ctb_snapshot_print(&snapshot->g2h, p);
2023 drm_printf(p, "\tg2h outstanding: %d\n",
2024 snapshot->g2h_outstanding);
2025
2026 if (snapshot->ctb) {
2027 drm_printf(p, "[CTB].length: 0x%zx\n", snapshot->ctb_size);
2028 xe_print_blob_ascii85(p, "[CTB].data", '\n',
2029 snapshot->ctb, 0, snapshot->ctb_size);
2030 }
2031 } else {
2032 drm_puts(p, "CT disabled\n");
2033 }
2034 }
2035
2036 /**
2037 * xe_guc_ct_snapshot_free - Free all allocated objects for a given snapshot.
2038 * @snapshot: GuC CT snapshot object.
2039 *
2040 * This function free all the memory that needed to be allocated at capture
2041 * time.
2042 */
xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot * snapshot)2043 void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot)
2044 {
2045 if (!snapshot)
2046 return;
2047
2048 kfree(snapshot->ctb);
2049 kfree(snapshot);
2050 }
2051
2052 /**
2053 * xe_guc_ct_print - GuC CT Print.
2054 * @ct: GuC CT.
2055 * @p: drm_printer where it will be printed out.
2056 * @want_ctb: Should the full CTB content be dumped (vs just the headers)
2057 *
2058 * This function will quickly capture a snapshot of the CT state
2059 * and immediately print it out.
2060 */
xe_guc_ct_print(struct xe_guc_ct * ct,struct drm_printer * p,bool want_ctb)2061 void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb)
2062 {
2063 struct xe_guc_ct_snapshot *snapshot;
2064
2065 snapshot = guc_ct_snapshot_capture(ct, false, want_ctb);
2066 xe_guc_ct_snapshot_print(snapshot, p);
2067 xe_guc_ct_snapshot_free(snapshot);
2068 }
2069
2070 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
2071
2072 #ifdef CONFIG_FUNCTION_ERROR_INJECTION
2073 /*
2074 * This is a helper function which assists the driver in identifying if a fault
2075 * injection test is currently active, allowing it to reduce unnecessary debug
2076 * output. Typically, the function returns zero, but the fault injection
2077 * framework can alter this to return an error. Since faults are injected
2078 * through this function, it's important to ensure the compiler doesn't optimize
2079 * it into an inline function. To avoid such optimization, the 'noinline'
2080 * attribute is applied. Compiler optimizes the static function defined in the
2081 * header file as an inline function.
2082 */
xe_is_injection_active(void)2083 noinline int xe_is_injection_active(void) { return 0; }
2084 ALLOW_ERROR_INJECTION(xe_is_injection_active, ERRNO);
2085 #else
xe_is_injection_active(void)2086 int xe_is_injection_active(void) { return 0; }
2087 #endif
2088
ct_dead_capture(struct xe_guc_ct * ct,struct guc_ctb * ctb,u32 reason_code)2089 static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code)
2090 {
2091 struct xe_guc_log_snapshot *snapshot_log;
2092 struct xe_guc_ct_snapshot *snapshot_ct;
2093 struct xe_guc *guc = ct_to_guc(ct);
2094 unsigned long flags;
2095 bool have_capture;
2096
2097 if (ctb)
2098 ctb->info.broken = true;
2099 /*
2100 * Huge dump is getting generated when injecting error for guc CT/MMIO
2101 * functions. So, let us suppress the dump when fault is injected.
2102 */
2103 if (xe_is_injection_active())
2104 return;
2105
2106 /* Ignore further errors after the first dump until a reset */
2107 if (ct->dead.reported)
2108 return;
2109
2110 spin_lock_irqsave(&ct->dead.lock, flags);
2111
2112 /* And only capture one dump at a time */
2113 have_capture = ct->dead.reason & (1 << CT_DEAD_STATE_CAPTURE);
2114 ct->dead.reason |= (1 << reason_code) |
2115 (1 << CT_DEAD_STATE_CAPTURE);
2116
2117 spin_unlock_irqrestore(&ct->dead.lock, flags);
2118
2119 if (have_capture)
2120 return;
2121
2122 snapshot_log = xe_guc_log_snapshot_capture(&guc->log, true);
2123 snapshot_ct = xe_guc_ct_snapshot_capture((ct));
2124
2125 spin_lock_irqsave(&ct->dead.lock, flags);
2126
2127 if (ct->dead.snapshot_log || ct->dead.snapshot_ct) {
2128 xe_gt_err(ct_to_gt(ct), "Got unexpected dead CT capture!\n");
2129 xe_guc_log_snapshot_free(snapshot_log);
2130 xe_guc_ct_snapshot_free(snapshot_ct);
2131 } else {
2132 ct->dead.snapshot_log = snapshot_log;
2133 ct->dead.snapshot_ct = snapshot_ct;
2134 }
2135
2136 spin_unlock_irqrestore(&ct->dead.lock, flags);
2137
2138 queue_work(system_unbound_wq, &(ct)->dead.worker);
2139 }
2140
ct_dead_print(struct xe_dead_ct * dead)2141 static void ct_dead_print(struct xe_dead_ct *dead)
2142 {
2143 struct xe_guc_ct *ct = container_of(dead, struct xe_guc_ct, dead);
2144 struct xe_device *xe = ct_to_xe(ct);
2145 struct xe_gt *gt = ct_to_gt(ct);
2146 static int g_count;
2147 struct drm_printer ip = xe_gt_info_printer(gt);
2148 struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count);
2149
2150 if (!dead->reason) {
2151 xe_gt_err(gt, "CTB is dead for no reason!?\n");
2152 return;
2153 }
2154
2155 /* Can't generate a genuine core dump at this point, so just do the good bits */
2156 drm_puts(&lp, "**** Xe Device Coredump ****\n");
2157 drm_printf(&lp, "Reason: CTB is dead - 0x%X\n", dead->reason);
2158 xe_device_snapshot_print(xe, &lp);
2159
2160 drm_printf(&lp, "**** GT #%d ****\n", gt->info.id);
2161 drm_printf(&lp, "\tTile: %d\n", gt->tile->id);
2162
2163 drm_puts(&lp, "**** GuC Log ****\n");
2164 xe_guc_log_snapshot_print(dead->snapshot_log, &lp);
2165
2166 drm_puts(&lp, "**** GuC CT ****\n");
2167 xe_guc_ct_snapshot_print(dead->snapshot_ct, &lp);
2168
2169 drm_puts(&lp, "Done.\n");
2170 }
2171
ct_dead_worker_func(struct work_struct * w)2172 static void ct_dead_worker_func(struct work_struct *w)
2173 {
2174 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, dead.worker);
2175
2176 if (!ct->dead.reported) {
2177 ct->dead.reported = true;
2178 ct_dead_print(&ct->dead);
2179 }
2180
2181 spin_lock_irq(&ct->dead.lock);
2182
2183 xe_guc_log_snapshot_free(ct->dead.snapshot_log);
2184 ct->dead.snapshot_log = NULL;
2185 xe_guc_ct_snapshot_free(ct->dead.snapshot_ct);
2186 ct->dead.snapshot_ct = NULL;
2187
2188 if (ct->dead.reason & (1 << CT_DEAD_STATE_REARM)) {
2189 /* A reset has occurred so re-arm the error reporting */
2190 ct->dead.reason = 0;
2191 ct->dead.reported = false;
2192 }
2193
2194 spin_unlock_irq(&ct->dead.lock);
2195 }
2196 #endif
2197