1 /*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Eddie Dong <eddie.dong@intel.com>
25 * Kevin Tian <kevin.tian@intel.com>
26 *
27 * Contributors:
28 * Zhi Wang <zhi.a.wang@intel.com>
29 * Changbin Du <changbin.du@intel.com>
30 * Zhenyu Wang <zhenyuw@linux.intel.com>
31 * Tina Zhang <tina.zhang@intel.com>
32 * Bing Niu <bing.niu@intel.com>
33 *
34 */
35
36 #include <drm/drm_print.h>
37
38 #include "gt/intel_context.h"
39 #include "gt/intel_engine_regs.h"
40 #include "gt/intel_gpu_commands.h"
41 #include "gt/intel_gt_regs.h"
42 #include "gt/intel_ring.h"
43
44 #include "gvt.h"
45 #include "i915_drv.h"
46 #include "i915_reg.h"
47 #include "i915_wait_util.h"
48 #include "trace.h"
49
50 #define GEN9_MOCS_SIZE 64
51
52 struct engine_mmio {
53 enum intel_engine_id id;
54 i915_reg_t reg;
55 u32 mask;
56 bool in_context;
57 u32 value;
58 };
59
60 /* Raw offset is append to each line for convenience. */
61 static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
62 {RCS0, RING_MODE_GEN7(RENDER_RING_BASE), 0xffff, false}, /* 0x229c */
63 {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
64 {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */
65 {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */
66 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
67 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
68 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
69 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
70 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
71 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
72 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
73 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
74 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
75 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
76 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
77 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
78 {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
79 {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
80 {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
81 {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
82 {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
83 {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
84
85 {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
86 {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
87 {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
88 {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
89 {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
90 {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
91 };
92
93 static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
94 {RCS0, RING_MODE_GEN7(RENDER_RING_BASE), 0xffff, false}, /* 0x229c */
95 {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
96 {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */
97 {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */
98 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
99 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
100 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
101 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
102 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
103 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
104 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
105 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
106 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
107 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
108 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
109 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
110 {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
111 {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
112 {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
113 {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
114 {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
115 {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
116
117 {RCS0, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
118 {RCS0, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
119 {RCS0, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
120 {RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
121 {RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
122 {RCS0, _MMIO(0xb118), 0, false}, /* GEN8_L3SQCREG4 */
123 {RCS0, _MMIO(0xb11c), 0, false}, /* GEN9_SCRATCH1 */
124 {RCS0, GEN9_SCRATCH_LNCF1, 0, false}, /* 0xb008 */
125 {RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
126 {RCS0, _MMIO(0xe180), 0xffff, true}, /* HALF_SLICE_CHICKEN2 */
127 {RCS0, _MMIO(0xe184), 0xffff, true}, /* GEN8_HALF_SLICE_CHICKEN3 */
128 {RCS0, _MMIO(0xe188), 0xffff, true}, /* GEN9_HALF_SLICE_CHICKEN5 */
129 {RCS0, _MMIO(0xe194), 0xffff, true}, /* GEN9_HALF_SLICE_CHICKEN7 */
130 {RCS0, _MMIO(0xe4f0), 0xffff, true}, /* GEN8_ROW_CHICKEN */
131 {RCS0, TRVATTL3PTRDW(0), 0, true}, /* 0x4de0 */
132 {RCS0, TRVATTL3PTRDW(1), 0, true}, /* 0x4de4 */
133 {RCS0, TRNULLDETCT, 0, true}, /* 0x4de8 */
134 {RCS0, TRINVTILEDETCT, 0, true}, /* 0x4dec */
135 {RCS0, TRVADR, 0, true}, /* 0x4df0 */
136 {RCS0, TRTTE, 0, true}, /* 0x4df4 */
137 {RCS0, _MMIO(0x4dfc), 0, true},
138
139 {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
140 {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
141 {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
142 {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
143 {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
144
145 {VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
146
147 {VECS0, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
148
149 {RCS0, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
150 {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
151 {RCS0, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
152 {RCS0, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
153
154 {RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
155 {RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
156 {RCS0, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
157
158 {RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
159 {RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
160 {RCS0, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
161 {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
162 };
163
164 static struct {
165 bool initialized;
166 u32 control_table[I915_NUM_ENGINES][GEN9_MOCS_SIZE];
167 u32 l3cc_table[GEN9_MOCS_SIZE / 2];
168 } gen9_render_mocs;
169
170 static u32 gen9_mocs_mmio_offset_list[] = {
171 [RCS0] = 0xc800,
172 [VCS0] = 0xc900,
173 [VCS1] = 0xca00,
174 [BCS0] = 0xcc00,
175 [VECS0] = 0xcb00,
176 };
177
load_render_mocs(const struct intel_engine_cs * engine)178 static void load_render_mocs(const struct intel_engine_cs *engine)
179 {
180 struct intel_gvt *gvt = engine->i915->gvt;
181 struct intel_uncore *uncore = engine->uncore;
182 u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt;
183 u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list;
184 i915_reg_t offset;
185 int ring_id, i;
186
187 /* Platform doesn't have mocs mmios. */
188 if (!regs)
189 return;
190
191 for (ring_id = 0; ring_id < cnt; ring_id++) {
192 if (!HAS_ENGINE(engine->gt, ring_id))
193 continue;
194
195 offset.reg = regs[ring_id];
196 for (i = 0; i < GEN9_MOCS_SIZE; i++) {
197 gen9_render_mocs.control_table[ring_id][i] =
198 intel_uncore_read_fw(uncore, offset);
199 offset.reg += 4;
200 }
201 }
202
203 offset.reg = 0xb020;
204 for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
205 gen9_render_mocs.l3cc_table[i] =
206 intel_uncore_read_fw(uncore, offset);
207 offset.reg += 4;
208 }
209 gen9_render_mocs.initialized = true;
210 }
211
212 static int
restore_context_mmio_for_inhibit(struct intel_vgpu * vgpu,struct i915_request * req)213 restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
214 struct i915_request *req)
215 {
216 u32 *cs;
217 int ret;
218 struct engine_mmio *mmio;
219 struct intel_gvt *gvt = vgpu->gvt;
220 int ring_id = req->engine->id;
221 int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id];
222
223 if (count == 0)
224 return 0;
225
226 ret = req->engine->emit_flush(req, EMIT_BARRIER);
227 if (ret)
228 return ret;
229
230 cs = intel_ring_begin(req, count * 2 + 2);
231 if (IS_ERR(cs))
232 return PTR_ERR(cs);
233
234 *cs++ = MI_LOAD_REGISTER_IMM(count);
235 for (mmio = gvt->engine_mmio_list.mmio;
236 i915_mmio_reg_valid(mmio->reg); mmio++) {
237 if (mmio->id != ring_id || !mmio->in_context)
238 continue;
239
240 *cs++ = i915_mmio_reg_offset(mmio->reg);
241 *cs++ = vgpu_vreg_t(vgpu, mmio->reg) | (mmio->mask << 16);
242 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
243 *(cs-2), *(cs-1), vgpu->id, ring_id);
244 }
245
246 *cs++ = MI_NOOP;
247 intel_ring_advance(req, cs);
248
249 ret = req->engine->emit_flush(req, EMIT_BARRIER);
250 if (ret)
251 return ret;
252
253 return 0;
254 }
255
256 static int
restore_render_mocs_control_for_inhibit(struct intel_vgpu * vgpu,struct i915_request * req)257 restore_render_mocs_control_for_inhibit(struct intel_vgpu *vgpu,
258 struct i915_request *req)
259 {
260 unsigned int index;
261 u32 *cs;
262
263 cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2);
264 if (IS_ERR(cs))
265 return PTR_ERR(cs);
266
267 *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE);
268
269 for (index = 0; index < GEN9_MOCS_SIZE; index++) {
270 *cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index));
271 *cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index));
272 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
273 *(cs-2), *(cs-1), vgpu->id, req->engine->id);
274
275 }
276
277 *cs++ = MI_NOOP;
278 intel_ring_advance(req, cs);
279
280 return 0;
281 }
282
283 static int
restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu * vgpu,struct i915_request * req)284 restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu *vgpu,
285 struct i915_request *req)
286 {
287 unsigned int index;
288 u32 *cs;
289
290 cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2);
291 if (IS_ERR(cs))
292 return PTR_ERR(cs);
293
294 *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2);
295
296 for (index = 0; index < GEN9_MOCS_SIZE / 2; index++) {
297 *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index));
298 *cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index));
299 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
300 *(cs-2), *(cs-1), vgpu->id, req->engine->id);
301
302 }
303
304 *cs++ = MI_NOOP;
305 intel_ring_advance(req, cs);
306
307 return 0;
308 }
309
310 /*
311 * Use lri command to initialize the mmio which is in context state image for
312 * inhibit context, it contains tracked engine mmio, render_mocs and
313 * render_mocs_l3cc.
314 */
intel_vgpu_restore_inhibit_context(struct intel_vgpu * vgpu,struct i915_request * req)315 int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
316 struct i915_request *req)
317 {
318 int ret;
319 u32 *cs;
320
321 cs = intel_ring_begin(req, 2);
322 if (IS_ERR(cs))
323 return PTR_ERR(cs);
324
325 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
326 *cs++ = MI_NOOP;
327 intel_ring_advance(req, cs);
328
329 ret = restore_context_mmio_for_inhibit(vgpu, req);
330 if (ret)
331 goto out;
332
333 /* no MOCS register in context except render engine */
334 if (req->engine->id != RCS0)
335 goto out;
336
337 ret = restore_render_mocs_control_for_inhibit(vgpu, req);
338 if (ret)
339 goto out;
340
341 ret = restore_render_mocs_l3cc_for_inhibit(vgpu, req);
342 if (ret)
343 goto out;
344
345 out:
346 cs = intel_ring_begin(req, 2);
347 if (IS_ERR(cs))
348 return PTR_ERR(cs);
349
350 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
351 *cs++ = MI_NOOP;
352 intel_ring_advance(req, cs);
353
354 return ret;
355 }
356
357 static u32 gen8_tlb_mmio_offset_list[] = {
358 [RCS0] = 0x4260,
359 [VCS0] = 0x4264,
360 [VCS1] = 0x4268,
361 [BCS0] = 0x426c,
362 [VECS0] = 0x4270,
363 };
364
handle_tlb_pending_event(struct intel_vgpu * vgpu,const struct intel_engine_cs * engine)365 static void handle_tlb_pending_event(struct intel_vgpu *vgpu,
366 const struct intel_engine_cs *engine)
367 {
368 struct intel_uncore *uncore = engine->uncore;
369 struct intel_vgpu_submission *s = &vgpu->submission;
370 u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list;
371 u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt;
372 enum forcewake_domains fw;
373 i915_reg_t reg;
374
375 if (!regs)
376 return;
377
378 if (drm_WARN_ON(&engine->i915->drm, engine->id >= cnt))
379 return;
380
381 if (!test_and_clear_bit(engine->id, (void *)s->tlb_handle_pending))
382 return;
383
384 reg = _MMIO(regs[engine->id]);
385
386 /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
387 * we need to put a forcewake when invalidating RCS TLB caches,
388 * otherwise device can go to RC6 state and interrupt invalidation
389 * process
390 */
391 fw = intel_uncore_forcewake_for_reg(uncore, reg,
392 FW_REG_READ | FW_REG_WRITE);
393 if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) >= 9)
394 fw |= FORCEWAKE_RENDER;
395
396 intel_uncore_forcewake_get(uncore, fw);
397
398 intel_uncore_write_fw(uncore, reg, 0x1);
399
400 if (wait_for_atomic(intel_uncore_read_fw(uncore, reg) == 0, 50))
401 gvt_vgpu_err("timeout in invalidate ring %s tlb\n",
402 engine->name);
403 else
404 vgpu_vreg_t(vgpu, reg) = 0;
405
406 intel_uncore_forcewake_put(uncore, fw);
407
408 gvt_dbg_core("invalidate TLB for ring %s\n", engine->name);
409 }
410
switch_mocs(struct intel_vgpu * pre,struct intel_vgpu * next,const struct intel_engine_cs * engine)411 static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
412 const struct intel_engine_cs *engine)
413 {
414 u32 regs[] = {
415 [RCS0] = 0xc800,
416 [VCS0] = 0xc900,
417 [VCS1] = 0xca00,
418 [BCS0] = 0xcc00,
419 [VECS0] = 0xcb00,
420 };
421 struct intel_uncore *uncore = engine->uncore;
422 i915_reg_t offset, l3_offset;
423 u32 old_v, new_v;
424 int i;
425
426 if (drm_WARN_ON(&engine->i915->drm, engine->id >= ARRAY_SIZE(regs)))
427 return;
428
429 if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) == 9)
430 return;
431
432 if (!pre && !gen9_render_mocs.initialized)
433 load_render_mocs(engine);
434
435 offset.reg = regs[engine->id];
436 for (i = 0; i < GEN9_MOCS_SIZE; i++) {
437 if (pre)
438 old_v = vgpu_vreg_t(pre, offset);
439 else
440 old_v = gen9_render_mocs.control_table[engine->id][i];
441 if (next)
442 new_v = vgpu_vreg_t(next, offset);
443 else
444 new_v = gen9_render_mocs.control_table[engine->id][i];
445
446 if (old_v != new_v)
447 intel_uncore_write_fw(uncore, offset, new_v);
448
449 offset.reg += 4;
450 }
451
452 if (engine->id == RCS0) {
453 l3_offset.reg = 0xb020;
454 for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
455 if (pre)
456 old_v = vgpu_vreg_t(pre, l3_offset);
457 else
458 old_v = gen9_render_mocs.l3cc_table[i];
459 if (next)
460 new_v = vgpu_vreg_t(next, l3_offset);
461 else
462 new_v = gen9_render_mocs.l3cc_table[i];
463
464 if (old_v != new_v)
465 intel_uncore_write_fw(uncore, l3_offset, new_v);
466
467 l3_offset.reg += 4;
468 }
469 }
470 }
471
472 #define CTX_CONTEXT_CONTROL_VAL 0x03
473
is_inhibit_context(struct intel_context * ce)474 bool is_inhibit_context(struct intel_context *ce)
475 {
476 const u32 *reg_state = ce->lrc_reg_state;
477 u32 inhibit_mask =
478 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
479
480 return inhibit_mask ==
481 (reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask);
482 }
483
484 /* Switch ring mmio values (context). */
switch_mmio(struct intel_vgpu * pre,struct intel_vgpu * next,const struct intel_engine_cs * engine)485 static void switch_mmio(struct intel_vgpu *pre,
486 struct intel_vgpu *next,
487 const struct intel_engine_cs *engine)
488 {
489 struct intel_uncore *uncore = engine->uncore;
490 struct intel_vgpu_submission *s;
491 struct engine_mmio *mmio;
492 u32 old_v, new_v;
493
494 if (GRAPHICS_VER(engine->i915) >= 9)
495 switch_mocs(pre, next, engine);
496
497 for (mmio = engine->i915->gvt->engine_mmio_list.mmio;
498 i915_mmio_reg_valid(mmio->reg); mmio++) {
499 if (mmio->id != engine->id)
500 continue;
501 /*
502 * No need to do save or restore of the mmio which is in context
503 * state image on gen9, it's initialized by lri command and
504 * save or restore with context together.
505 */
506 if (GRAPHICS_VER(engine->i915) == 9 && mmio->in_context)
507 continue;
508
509 // save
510 if (pre) {
511 vgpu_vreg_t(pre, mmio->reg) =
512 intel_uncore_read_fw(uncore, mmio->reg);
513 if (mmio->mask)
514 vgpu_vreg_t(pre, mmio->reg) &=
515 ~(mmio->mask << 16);
516 old_v = vgpu_vreg_t(pre, mmio->reg);
517 } else {
518 old_v = mmio->value =
519 intel_uncore_read_fw(uncore, mmio->reg);
520 }
521
522 // restore
523 if (next) {
524 s = &next->submission;
525 /*
526 * No need to restore the mmio which is in context state
527 * image if it's not inhibit context, it will restore
528 * itself.
529 */
530 if (mmio->in_context &&
531 !is_inhibit_context(s->shadow[engine->id]))
532 continue;
533
534 if (mmio->mask)
535 new_v = vgpu_vreg_t(next, mmio->reg) |
536 (mmio->mask << 16);
537 else
538 new_v = vgpu_vreg_t(next, mmio->reg);
539 } else {
540 if (mmio->in_context)
541 continue;
542 if (mmio->mask)
543 new_v = mmio->value | (mmio->mask << 16);
544 else
545 new_v = mmio->value;
546 }
547
548 intel_uncore_write_fw(uncore, mmio->reg, new_v);
549
550 trace_render_mmio(pre ? pre->id : 0,
551 next ? next->id : 0,
552 "switch",
553 i915_mmio_reg_offset(mmio->reg),
554 old_v, new_v);
555 }
556
557 if (next)
558 handle_tlb_pending_event(next, engine);
559 }
560
561 /**
562 * intel_gvt_switch_mmio - switch mmio context of specific engine
563 * @pre: the last vGPU that own the engine
564 * @next: the vGPU to switch to
565 * @engine: the engine
566 *
567 * If pre is null indicates that host own the engine. If next is null
568 * indicates that we are switching to host workload.
569 */
intel_gvt_switch_mmio(struct intel_vgpu * pre,struct intel_vgpu * next,const struct intel_engine_cs * engine)570 void intel_gvt_switch_mmio(struct intel_vgpu *pre,
571 struct intel_vgpu *next,
572 const struct intel_engine_cs *engine)
573 {
574 if (WARN(!pre && !next, "switch ring %s from host to HOST\n",
575 engine->name))
576 return;
577
578 gvt_dbg_render("switch ring %s from %s to %s\n", engine->name,
579 pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
580
581 /**
582 * We are using raw mmio access wrapper to improve the
583 * performance for batch mmio read/write, so we need
584 * handle forcewake manually.
585 */
586 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
587 switch_mmio(pre, next, engine);
588 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
589 }
590
591 /**
592 * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list
593 * @gvt: GVT device
594 *
595 */
intel_gvt_init_engine_mmio_context(struct intel_gvt * gvt)596 void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
597 {
598 struct engine_mmio *mmio;
599
600 if (GRAPHICS_VER(gvt->gt->i915) >= 9) {
601 gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
602 gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
603 gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
604 gvt->engine_mmio_list.mocs_mmio_offset_list = gen9_mocs_mmio_offset_list;
605 gvt->engine_mmio_list.mocs_mmio_offset_list_cnt = ARRAY_SIZE(gen9_mocs_mmio_offset_list);
606 } else {
607 gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
608 gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
609 gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
610 }
611
612 for (mmio = gvt->engine_mmio_list.mmio;
613 i915_mmio_reg_valid(mmio->reg); mmio++) {
614 if (mmio->in_context) {
615 gvt->engine_mmio_list.ctx_mmio_count[mmio->id]++;
616 intel_gvt_mmio_set_sr_in_ctx(gvt, mmio->reg.reg);
617 }
618 }
619 }
620