1 /*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Ke Yu
25 * Kevin Tian <kevin.tian@intel.com>
26 * Zhiyuan Lv <zhiyuan.lv@intel.com>
27 *
28 * Contributors:
29 * Min He <min.he@intel.com>
30 * Ping Gao <ping.a.gao@intel.com>
31 * Tina Zhang <tina.zhang@intel.com>
32 * Yulei Zhang <yulei.zhang@intel.com>
33 * Zhi Wang <zhi.a.wang@intel.com>
34 *
35 */
36
37 #include <linux/slab.h>
38
39 #include "i915_drv.h"
40 #include "i915_reg.h"
41 #include "gt/intel_engine_regs.h"
42 #include "gt/intel_gpu_commands.h"
43 #include "gt/intel_gt_regs.h"
44 #include "gt/intel_lrc.h"
45 #include "gt/intel_ring.h"
46 #include "gt/intel_gt_requests.h"
47 #include "gt/shmem_utils.h"
48 #include "gvt.h"
49 #include "i915_pvinfo.h"
50 #include "trace.h"
51
52 #include "display/i9xx_plane_regs.h"
53 #include "display/intel_sprite_regs.h"
54 #include "gem/i915_gem_context.h"
55 #include "gem/i915_gem_pm.h"
56 #include "gt/intel_context.h"
57
58 #define INVALID_OP (~0U)
59
60 #define OP_LEN_MI 9
61 #define OP_LEN_2D 10
62 #define OP_LEN_3D_MEDIA 16
63 #define OP_LEN_MFX_VC 16
64 #define OP_LEN_VEBOX 16
65
66 #define CMD_TYPE(cmd) (((cmd) >> 29) & 7)
67
68 struct sub_op_bits {
69 int hi;
70 int low;
71 };
72 struct decode_info {
73 const char *name;
74 int op_len;
75 int nr_sub_op;
76 const struct sub_op_bits *sub_op;
77 };
78
79 #define MAX_CMD_BUDGET 0x7fffffff
80 #define MI_WAIT_FOR_PLANE_C_FLIP_PENDING (1<<15)
81 #define MI_WAIT_FOR_PLANE_B_FLIP_PENDING (1<<9)
82 #define MI_WAIT_FOR_PLANE_A_FLIP_PENDING (1<<1)
83
84 #define MI_WAIT_FOR_SPRITE_C_FLIP_PENDING (1<<20)
85 #define MI_WAIT_FOR_SPRITE_B_FLIP_PENDING (1<<10)
86 #define MI_WAIT_FOR_SPRITE_A_FLIP_PENDING (1<<2)
87
88 /* Render Command Map */
89
90 /* MI_* command Opcode (28:23) */
91 #define OP_MI_NOOP 0x0
92 #define OP_MI_SET_PREDICATE 0x1 /* HSW+ */
93 #define OP_MI_USER_INTERRUPT 0x2
94 #define OP_MI_WAIT_FOR_EVENT 0x3
95 #define OP_MI_FLUSH 0x4
96 #define OP_MI_ARB_CHECK 0x5
97 #define OP_MI_RS_CONTROL 0x6 /* HSW+ */
98 #define OP_MI_REPORT_HEAD 0x7
99 #define OP_MI_ARB_ON_OFF 0x8
100 #define OP_MI_URB_ATOMIC_ALLOC 0x9 /* HSW+ */
101 #define OP_MI_BATCH_BUFFER_END 0xA
102 #define OP_MI_SUSPEND_FLUSH 0xB
103 #define OP_MI_PREDICATE 0xC /* IVB+ */
104 #define OP_MI_TOPOLOGY_FILTER 0xD /* IVB+ */
105 #define OP_MI_SET_APPID 0xE /* IVB+ */
106 #define OP_MI_RS_CONTEXT 0xF /* HSW+ */
107 #define OP_MI_LOAD_SCAN_LINES_INCL 0x12 /* HSW+ */
108 #define OP_MI_DISPLAY_FLIP 0x14
109 #define OP_MI_SEMAPHORE_MBOX 0x16
110 #define OP_MI_SET_CONTEXT 0x18
111 #define OP_MI_MATH 0x1A
112 #define OP_MI_URB_CLEAR 0x19
113 #define OP_MI_SEMAPHORE_SIGNAL 0x1B /* BDW+ */
114 #define OP_MI_SEMAPHORE_WAIT 0x1C /* BDW+ */
115
116 #define OP_MI_STORE_DATA_IMM 0x20
117 #define OP_MI_STORE_DATA_INDEX 0x21
118 #define OP_MI_LOAD_REGISTER_IMM 0x22
119 #define OP_MI_UPDATE_GTT 0x23
120 #define OP_MI_STORE_REGISTER_MEM 0x24
121 #define OP_MI_FLUSH_DW 0x26
122 #define OP_MI_CLFLUSH 0x27
123 #define OP_MI_REPORT_PERF_COUNT 0x28
124 #define OP_MI_LOAD_REGISTER_MEM 0x29 /* HSW+ */
125 #define OP_MI_LOAD_REGISTER_REG 0x2A /* HSW+ */
126 #define OP_MI_RS_STORE_DATA_IMM 0x2B /* HSW+ */
127 #define OP_MI_LOAD_URB_MEM 0x2C /* HSW+ */
128 #define OP_MI_STORE_URM_MEM 0x2D /* HSW+ */
129 #define OP_MI_2E 0x2E /* BDW+ */
130 #define OP_MI_2F 0x2F /* BDW+ */
131 #define OP_MI_BATCH_BUFFER_START 0x31
132
133 /* Bit definition for dword 0 */
134 #define _CMDBIT_BB_START_IN_PPGTT (1UL << 8)
135
136 #define OP_MI_CONDITIONAL_BATCH_BUFFER_END 0x36
137
138 #define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
139 #define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
140 #define BATCH_BUFFER_ADR_SPACE_BIT(x) (((x) >> 8) & 1U)
141 #define BATCH_BUFFER_2ND_LEVEL_BIT(x) ((x) >> 22 & 1U)
142
143 /* 2D command: Opcode (28:22) */
144 #define OP_2D(x) ((2<<7) | x)
145
146 #define OP_XY_SETUP_BLT OP_2D(0x1)
147 #define OP_XY_SETUP_CLIP_BLT OP_2D(0x3)
148 #define OP_XY_SETUP_MONO_PATTERN_SL_BLT OP_2D(0x11)
149 #define OP_XY_PIXEL_BLT OP_2D(0x24)
150 #define OP_XY_SCANLINES_BLT OP_2D(0x25)
151 #define OP_XY_TEXT_BLT OP_2D(0x26)
152 #define OP_XY_TEXT_IMMEDIATE_BLT OP_2D(0x31)
153 #define OP_XY_COLOR_BLT OP_2D(0x50)
154 #define OP_XY_PAT_BLT OP_2D(0x51)
155 #define OP_XY_MONO_PAT_BLT OP_2D(0x52)
156 #define OP_XY_SRC_COPY_BLT OP_2D(0x53)
157 #define OP_XY_MONO_SRC_COPY_BLT OP_2D(0x54)
158 #define OP_XY_FULL_BLT OP_2D(0x55)
159 #define OP_XY_FULL_MONO_SRC_BLT OP_2D(0x56)
160 #define OP_XY_FULL_MONO_PATTERN_BLT OP_2D(0x57)
161 #define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT OP_2D(0x58)
162 #define OP_XY_MONO_PAT_FIXED_BLT OP_2D(0x59)
163 #define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT OP_2D(0x71)
164 #define OP_XY_PAT_BLT_IMMEDIATE OP_2D(0x72)
165 #define OP_XY_SRC_COPY_CHROMA_BLT OP_2D(0x73)
166 #define OP_XY_FULL_IMMEDIATE_PATTERN_BLT OP_2D(0x74)
167 #define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT OP_2D(0x75)
168 #define OP_XY_PAT_CHROMA_BLT OP_2D(0x76)
169 #define OP_XY_PAT_CHROMA_BLT_IMMEDIATE OP_2D(0x77)
170
171 /* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */
172 #define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \
173 ((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode))
174
175 #define OP_STATE_PREFETCH OP_3D_MEDIA(0x0, 0x0, 0x03)
176
177 #define OP_STATE_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x01)
178 #define OP_STATE_SIP OP_3D_MEDIA(0x0, 0x1, 0x02)
179 #define OP_3D_MEDIA_0_1_4 OP_3D_MEDIA(0x0, 0x1, 0x04)
180 #define OP_SWTESS_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x03)
181
182 #define OP_3DSTATE_VF_STATISTICS_GM45 OP_3D_MEDIA(0x1, 0x0, 0x0B)
183
184 #define OP_PIPELINE_SELECT OP_3D_MEDIA(0x1, 0x1, 0x04)
185
186 #define OP_MEDIA_VFE_STATE OP_3D_MEDIA(0x2, 0x0, 0x0)
187 #define OP_MEDIA_CURBE_LOAD OP_3D_MEDIA(0x2, 0x0, 0x1)
188 #define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD OP_3D_MEDIA(0x2, 0x0, 0x2)
189 #define OP_MEDIA_GATEWAY_STATE OP_3D_MEDIA(0x2, 0x0, 0x3)
190 #define OP_MEDIA_STATE_FLUSH OP_3D_MEDIA(0x2, 0x0, 0x4)
191 #define OP_MEDIA_POOL_STATE OP_3D_MEDIA(0x2, 0x0, 0x5)
192
193 #define OP_MEDIA_OBJECT OP_3D_MEDIA(0x2, 0x1, 0x0)
194 #define OP_MEDIA_OBJECT_PRT OP_3D_MEDIA(0x2, 0x1, 0x2)
195 #define OP_MEDIA_OBJECT_WALKER OP_3D_MEDIA(0x2, 0x1, 0x3)
196 #define OP_GPGPU_WALKER OP_3D_MEDIA(0x2, 0x1, 0x5)
197
198 #define OP_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */
199 #define OP_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */
200 #define OP_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */
201 #define OP_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */
202 #define OP_3DSTATE_VERTEX_BUFFERS OP_3D_MEDIA(0x3, 0x0, 0x08)
203 #define OP_3DSTATE_VERTEX_ELEMENTS OP_3D_MEDIA(0x3, 0x0, 0x09)
204 #define OP_3DSTATE_INDEX_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x0A)
205 #define OP_3DSTATE_VF_STATISTICS OP_3D_MEDIA(0x3, 0x0, 0x0B)
206 #define OP_3DSTATE_VF OP_3D_MEDIA(0x3, 0x0, 0x0C) /* HSW+ */
207 #define OP_3DSTATE_CC_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0E)
208 #define OP_3DSTATE_SCISSOR_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0F)
209 #define OP_3DSTATE_VS OP_3D_MEDIA(0x3, 0x0, 0x10)
210 #define OP_3DSTATE_GS OP_3D_MEDIA(0x3, 0x0, 0x11)
211 #define OP_3DSTATE_CLIP OP_3D_MEDIA(0x3, 0x0, 0x12)
212 #define OP_3DSTATE_SF OP_3D_MEDIA(0x3, 0x0, 0x13)
213 #define OP_3DSTATE_WM OP_3D_MEDIA(0x3, 0x0, 0x14)
214 #define OP_3DSTATE_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x15)
215 #define OP_3DSTATE_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x16)
216 #define OP_3DSTATE_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x17)
217 #define OP_3DSTATE_SAMPLE_MASK OP_3D_MEDIA(0x3, 0x0, 0x18)
218 #define OP_3DSTATE_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */
219 #define OP_3DSTATE_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */
220 #define OP_3DSTATE_HS OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */
221 #define OP_3DSTATE_TE OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */
222 #define OP_3DSTATE_DS OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */
223 #define OP_3DSTATE_STREAMOUT OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */
224 #define OP_3DSTATE_SBE OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */
225 #define OP_3DSTATE_PS OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */
226 #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */
227 #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */
228 #define OP_3DSTATE_BLEND_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */
229 #define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */
230 #define OP_3DSTATE_BINDING_TABLE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */
231 #define OP_3DSTATE_BINDING_TABLE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */
232 #define OP_3DSTATE_BINDING_TABLE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */
233 #define OP_3DSTATE_BINDING_TABLE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */
234 #define OP_3DSTATE_BINDING_TABLE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */
235 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */
236 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */
237 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */
238 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */
239 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */
240 #define OP_3DSTATE_URB_VS OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */
241 #define OP_3DSTATE_URB_HS OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */
242 #define OP_3DSTATE_URB_DS OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */
243 #define OP_3DSTATE_URB_GS OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */
244 #define OP_3DSTATE_GATHER_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */
245 #define OP_3DSTATE_GATHER_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */
246 #define OP_3DSTATE_GATHER_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */
247 #define OP_3DSTATE_GATHER_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */
248 #define OP_3DSTATE_GATHER_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */
249 #define OP_3DSTATE_DX9_CONSTANTF_VS OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */
250 #define OP_3DSTATE_DX9_CONSTANTF_PS OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */
251 #define OP_3DSTATE_DX9_CONSTANTI_VS OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */
252 #define OP_3DSTATE_DX9_CONSTANTI_PS OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */
253 #define OP_3DSTATE_DX9_CONSTANTB_VS OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */
254 #define OP_3DSTATE_DX9_CONSTANTB_PS OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */
255 #define OP_3DSTATE_DX9_LOCAL_VALID_VS OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */
256 #define OP_3DSTATE_DX9_LOCAL_VALID_PS OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */
257 #define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */
258 #define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */
259 #define OP_3DSTATE_BINDING_TABLE_EDIT_VS OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */
260 #define OP_3DSTATE_BINDING_TABLE_EDIT_GS OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */
261 #define OP_3DSTATE_BINDING_TABLE_EDIT_HS OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */
262 #define OP_3DSTATE_BINDING_TABLE_EDIT_DS OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */
263 #define OP_3DSTATE_BINDING_TABLE_EDIT_PS OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */
264
265 #define OP_3DSTATE_VF_INSTANCING OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */
266 #define OP_3DSTATE_VF_SGVS OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */
267 #define OP_3DSTATE_VF_TOPOLOGY OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */
268 #define OP_3DSTATE_WM_CHROMAKEY OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */
269 #define OP_3DSTATE_PS_BLEND OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */
270 #define OP_3DSTATE_WM_DEPTH_STENCIL OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */
271 #define OP_3DSTATE_PS_EXTRA OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */
272 #define OP_3DSTATE_RASTER OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */
273 #define OP_3DSTATE_SBE_SWIZ OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */
274 #define OP_3DSTATE_WM_HZ_OP OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */
275 #define OP_3DSTATE_COMPONENT_PACKING OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */
276
277 #define OP_3DSTATE_DRAWING_RECTANGLE OP_3D_MEDIA(0x3, 0x1, 0x00)
278 #define OP_3DSTATE_SAMPLER_PALETTE_LOAD0 OP_3D_MEDIA(0x3, 0x1, 0x02)
279 #define OP_3DSTATE_CHROMA_KEY OP_3D_MEDIA(0x3, 0x1, 0x04)
280 #define OP_SNB_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x05)
281 #define OP_3DSTATE_POLY_STIPPLE_OFFSET OP_3D_MEDIA(0x3, 0x1, 0x06)
282 #define OP_3DSTATE_POLY_STIPPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x07)
283 #define OP_3DSTATE_LINE_STIPPLE OP_3D_MEDIA(0x3, 0x1, 0x08)
284 #define OP_3DSTATE_AA_LINE_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x0A)
285 #define OP_3DSTATE_GS_SVB_INDEX OP_3D_MEDIA(0x3, 0x1, 0x0B)
286 #define OP_3DSTATE_SAMPLER_PALETTE_LOAD1 OP_3D_MEDIA(0x3, 0x1, 0x0C)
287 #define OP_3DSTATE_MULTISAMPLE_BDW OP_3D_MEDIA(0x3, 0x0, 0x0D)
288 #define OP_SNB_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0E)
289 #define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0F)
290 #define OP_SNB_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x10)
291 #define OP_3DSTATE_MONOFILTER_SIZE OP_3D_MEDIA(0x3, 0x1, 0x11)
292 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */
293 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */
294 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */
295 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */
296 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */
297 #define OP_3DSTATE_SO_DECL_LIST OP_3D_MEDIA(0x3, 0x1, 0x17)
298 #define OP_3DSTATE_SO_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x18)
299 #define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */
300 #define OP_3DSTATE_GATHER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */
301 #define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */
302 #define OP_3DSTATE_SAMPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x1C)
303 #define OP_PIPE_CONTROL OP_3D_MEDIA(0x3, 0x2, 0x00)
304 #define OP_3DPRIMITIVE OP_3D_MEDIA(0x3, 0x3, 0x00)
305
306 /* VCCP Command Parser */
307
308 /*
309 * Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License)
310 * git://anongit.freedesktop.org/vaapi/intel-driver
311 * src/i965_defines.h
312 *
313 */
314
315 #define OP_MFX(pipeline, op, sub_opa, sub_opb) \
316 (3 << 13 | \
317 (pipeline) << 11 | \
318 (op) << 8 | \
319 (sub_opa) << 5 | \
320 (sub_opb))
321
322 #define OP_MFX_PIPE_MODE_SELECT OP_MFX(2, 0, 0, 0) /* ALL */
323 #define OP_MFX_SURFACE_STATE OP_MFX(2, 0, 0, 1) /* ALL */
324 #define OP_MFX_PIPE_BUF_ADDR_STATE OP_MFX(2, 0, 0, 2) /* ALL */
325 #define OP_MFX_IND_OBJ_BASE_ADDR_STATE OP_MFX(2, 0, 0, 3) /* ALL */
326 #define OP_MFX_BSP_BUF_BASE_ADDR_STATE OP_MFX(2, 0, 0, 4) /* ALL */
327 #define OP_2_0_0_5 OP_MFX(2, 0, 0, 5) /* ALL */
328 #define OP_MFX_STATE_POINTER OP_MFX(2, 0, 0, 6) /* ALL */
329 #define OP_MFX_QM_STATE OP_MFX(2, 0, 0, 7) /* IVB+ */
330 #define OP_MFX_FQM_STATE OP_MFX(2, 0, 0, 8) /* IVB+ */
331 #define OP_MFX_PAK_INSERT_OBJECT OP_MFX(2, 0, 2, 8) /* IVB+ */
332 #define OP_MFX_STITCH_OBJECT OP_MFX(2, 0, 2, 0xA) /* IVB+ */
333
334 #define OP_MFD_IT_OBJECT OP_MFX(2, 0, 1, 9) /* ALL */
335
336 #define OP_MFX_WAIT OP_MFX(1, 0, 0, 0) /* IVB+ */
337 #define OP_MFX_AVC_IMG_STATE OP_MFX(2, 1, 0, 0) /* ALL */
338 #define OP_MFX_AVC_QM_STATE OP_MFX(2, 1, 0, 1) /* ALL */
339 #define OP_MFX_AVC_DIRECTMODE_STATE OP_MFX(2, 1, 0, 2) /* ALL */
340 #define OP_MFX_AVC_SLICE_STATE OP_MFX(2, 1, 0, 3) /* ALL */
341 #define OP_MFX_AVC_REF_IDX_STATE OP_MFX(2, 1, 0, 4) /* ALL */
342 #define OP_MFX_AVC_WEIGHTOFFSET_STATE OP_MFX(2, 1, 0, 5) /* ALL */
343 #define OP_MFD_AVC_PICID_STATE OP_MFX(2, 1, 1, 5) /* HSW+ */
344 #define OP_MFD_AVC_DPB_STATE OP_MFX(2, 1, 1, 6) /* IVB+ */
345 #define OP_MFD_AVC_SLICEADDR OP_MFX(2, 1, 1, 7) /* IVB+ */
346 #define OP_MFD_AVC_BSD_OBJECT OP_MFX(2, 1, 1, 8) /* ALL */
347 #define OP_MFC_AVC_PAK_OBJECT OP_MFX(2, 1, 2, 9) /* ALL */
348
349 #define OP_MFX_VC1_PRED_PIPE_STATE OP_MFX(2, 2, 0, 1) /* ALL */
350 #define OP_MFX_VC1_DIRECTMODE_STATE OP_MFX(2, 2, 0, 2) /* ALL */
351 #define OP_MFD_VC1_SHORT_PIC_STATE OP_MFX(2, 2, 1, 0) /* IVB+ */
352 #define OP_MFD_VC1_LONG_PIC_STATE OP_MFX(2, 2, 1, 1) /* IVB+ */
353 #define OP_MFD_VC1_BSD_OBJECT OP_MFX(2, 2, 1, 8) /* ALL */
354
355 #define OP_MFX_MPEG2_PIC_STATE OP_MFX(2, 3, 0, 0) /* ALL */
356 #define OP_MFX_MPEG2_QM_STATE OP_MFX(2, 3, 0, 1) /* ALL */
357 #define OP_MFD_MPEG2_BSD_OBJECT OP_MFX(2, 3, 1, 8) /* ALL */
358 #define OP_MFC_MPEG2_SLICEGROUP_STATE OP_MFX(2, 3, 2, 3) /* ALL */
359 #define OP_MFC_MPEG2_PAK_OBJECT OP_MFX(2, 3, 2, 9) /* ALL */
360
361 #define OP_MFX_2_6_0_0 OP_MFX(2, 6, 0, 0) /* IVB+ */
362 #define OP_MFX_2_6_0_8 OP_MFX(2, 6, 0, 8) /* IVB+ */
363 #define OP_MFX_2_6_0_9 OP_MFX(2, 6, 0, 9) /* IVB+ */
364
365 #define OP_MFX_JPEG_PIC_STATE OP_MFX(2, 7, 0, 0)
366 #define OP_MFX_JPEG_HUFF_TABLE_STATE OP_MFX(2, 7, 0, 2)
367 #define OP_MFD_JPEG_BSD_OBJECT OP_MFX(2, 7, 1, 8)
368
369 #define OP_VEB(pipeline, op, sub_opa, sub_opb) \
370 (3 << 13 | \
371 (pipeline) << 11 | \
372 (op) << 8 | \
373 (sub_opa) << 5 | \
374 (sub_opb))
375
376 #define OP_VEB_SURFACE_STATE OP_VEB(2, 4, 0, 0)
377 #define OP_VEB_STATE OP_VEB(2, 4, 0, 2)
378 #define OP_VEB_DNDI_IECP_STATE OP_VEB(2, 4, 0, 3)
379
380 struct parser_exec_state;
381
382 typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
383
384 #define GVT_CMD_HASH_BITS 7
385
386 /* which DWords need address fix */
387 #define ADDR_FIX_1(x1) (1 << (x1))
388 #define ADDR_FIX_2(x1, x2) (ADDR_FIX_1(x1) | ADDR_FIX_1(x2))
389 #define ADDR_FIX_3(x1, x2, x3) (ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3))
390 #define ADDR_FIX_4(x1, x2, x3, x4) (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
391 #define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
392
393 #define DWORD_FIELD(dword, end, start) \
394 FIELD_GET(GENMASK(end, start), cmd_val(s, dword))
395
396 #define OP_LENGTH_BIAS 2
397 #define CMD_LEN(value) (value + OP_LENGTH_BIAS)
398
gvt_check_valid_cmd_length(int len,int valid_len)399 static int gvt_check_valid_cmd_length(int len, int valid_len)
400 {
401 if (valid_len != len) {
402 gvt_err("len is not valid: len=%u valid_len=%u\n",
403 len, valid_len);
404 return -EFAULT;
405 }
406 return 0;
407 }
408
409 struct cmd_info {
410 const char *name;
411 u32 opcode;
412
413 #define F_LEN_MASK 3U
414 #define F_LEN_CONST 1U
415 #define F_LEN_VAR 0U
416 /* value is const although LEN maybe variable */
417 #define F_LEN_VAR_FIXED (1<<1)
418
419 /*
420 * command has its own ip advance logic
421 * e.g. MI_BATCH_START, MI_BATCH_END
422 */
423 #define F_IP_ADVANCE_CUSTOM (1<<2)
424 u32 flag;
425
426 #define R_RCS BIT(RCS0)
427 #define R_VCS1 BIT(VCS0)
428 #define R_VCS2 BIT(VCS1)
429 #define R_VCS (R_VCS1 | R_VCS2)
430 #define R_BCS BIT(BCS0)
431 #define R_VECS BIT(VECS0)
432 #define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
433 /* rings that support this cmd: BLT/RCS/VCS/VECS */
434 intel_engine_mask_t rings;
435
436 /* devices that support this cmd: SNB/IVB/HSW/... */
437 u16 devices;
438
439 /* which DWords are address that need fix up.
440 * bit 0 means a 32-bit non address operand in command
441 * bit 1 means address operand, which could be 32-bit
442 * or 64-bit depending on different architectures.(
443 * defined by "gmadr_bytes_in_cmd" in intel_gvt.
444 * No matter the address length, each address only takes
445 * one bit in the bitmap.
446 */
447 u16 addr_bitmap;
448
449 /* flag == F_LEN_CONST : command length
450 * flag == F_LEN_VAR : length bias bits
451 * Note: length is in DWord
452 */
453 u32 len;
454
455 parser_cmd_handler handler;
456
457 /* valid length in DWord */
458 u32 valid_len;
459 };
460
461 struct cmd_entry {
462 struct hlist_node hlist;
463 const struct cmd_info *info;
464 };
465
466 enum {
467 RING_BUFFER_INSTRUCTION,
468 BATCH_BUFFER_INSTRUCTION,
469 BATCH_BUFFER_2ND_LEVEL,
470 RING_BUFFER_CTX,
471 };
472
473 enum {
474 GTT_BUFFER,
475 PPGTT_BUFFER
476 };
477
478 struct parser_exec_state {
479 struct intel_vgpu *vgpu;
480 const struct intel_engine_cs *engine;
481
482 int buf_type;
483
484 /* batch buffer address type */
485 int buf_addr_type;
486
487 /* graphics memory address of ring buffer start */
488 unsigned long ring_start;
489 unsigned long ring_size;
490 unsigned long ring_head;
491 unsigned long ring_tail;
492
493 /* instruction graphics memory address */
494 unsigned long ip_gma;
495
496 /* mapped va of the instr_gma */
497 void *ip_va;
498 void *rb_va;
499
500 void *ret_bb_va;
501 /* next instruction when return from batch buffer to ring buffer */
502 unsigned long ret_ip_gma_ring;
503
504 /* next instruction when return from 2nd batch buffer to batch buffer */
505 unsigned long ret_ip_gma_bb;
506
507 /* batch buffer address type (GTT or PPGTT)
508 * used when ret from 2nd level batch buffer
509 */
510 int saved_buf_addr_type;
511 bool is_ctx_wa;
512 bool is_init_ctx;
513
514 const struct cmd_info *info;
515
516 struct intel_vgpu_workload *workload;
517 };
518
519 #define gmadr_dw_number(s) \
520 (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
521
522 static unsigned long bypass_scan_mask = 0;
523
524 /* ring ALL, type = 0 */
525 static const struct sub_op_bits sub_op_mi[] = {
526 {31, 29},
527 {28, 23},
528 };
529
530 static const struct decode_info decode_info_mi = {
531 "MI",
532 OP_LEN_MI,
533 ARRAY_SIZE(sub_op_mi),
534 sub_op_mi,
535 };
536
537 /* ring RCS, command type 2 */
538 static const struct sub_op_bits sub_op_2d[] = {
539 {31, 29},
540 {28, 22},
541 };
542
543 static const struct decode_info decode_info_2d = {
544 "2D",
545 OP_LEN_2D,
546 ARRAY_SIZE(sub_op_2d),
547 sub_op_2d,
548 };
549
550 /* ring RCS, command type 3 */
551 static const struct sub_op_bits sub_op_3d_media[] = {
552 {31, 29},
553 {28, 27},
554 {26, 24},
555 {23, 16},
556 };
557
558 static const struct decode_info decode_info_3d_media = {
559 "3D_Media",
560 OP_LEN_3D_MEDIA,
561 ARRAY_SIZE(sub_op_3d_media),
562 sub_op_3d_media,
563 };
564
565 /* ring VCS, command type 3 */
566 static const struct sub_op_bits sub_op_mfx_vc[] = {
567 {31, 29},
568 {28, 27},
569 {26, 24},
570 {23, 21},
571 {20, 16},
572 };
573
574 static const struct decode_info decode_info_mfx_vc = {
575 "MFX_VC",
576 OP_LEN_MFX_VC,
577 ARRAY_SIZE(sub_op_mfx_vc),
578 sub_op_mfx_vc,
579 };
580
581 /* ring VECS, command type 3 */
582 static const struct sub_op_bits sub_op_vebox[] = {
583 {31, 29},
584 {28, 27},
585 {26, 24},
586 {23, 21},
587 {20, 16},
588 };
589
590 static const struct decode_info decode_info_vebox = {
591 "VEBOX",
592 OP_LEN_VEBOX,
593 ARRAY_SIZE(sub_op_vebox),
594 sub_op_vebox,
595 };
596
597 static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
598 [RCS0] = {
599 &decode_info_mi,
600 NULL,
601 NULL,
602 &decode_info_3d_media,
603 NULL,
604 NULL,
605 NULL,
606 NULL,
607 },
608
609 [VCS0] = {
610 &decode_info_mi,
611 NULL,
612 NULL,
613 &decode_info_mfx_vc,
614 NULL,
615 NULL,
616 NULL,
617 NULL,
618 },
619
620 [BCS0] = {
621 &decode_info_mi,
622 NULL,
623 &decode_info_2d,
624 NULL,
625 NULL,
626 NULL,
627 NULL,
628 NULL,
629 },
630
631 [VECS0] = {
632 &decode_info_mi,
633 NULL,
634 NULL,
635 &decode_info_vebox,
636 NULL,
637 NULL,
638 NULL,
639 NULL,
640 },
641
642 [VCS1] = {
643 &decode_info_mi,
644 NULL,
645 NULL,
646 &decode_info_mfx_vc,
647 NULL,
648 NULL,
649 NULL,
650 NULL,
651 },
652 };
653
get_opcode(u32 cmd,const struct intel_engine_cs * engine)654 static inline u32 get_opcode(u32 cmd, const struct intel_engine_cs *engine)
655 {
656 const struct decode_info *d_info;
657
658 d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
659 if (d_info == NULL)
660 return INVALID_OP;
661
662 return cmd >> (32 - d_info->op_len);
663 }
664
665 static inline const struct cmd_info *
find_cmd_entry(struct intel_gvt * gvt,unsigned int opcode,const struct intel_engine_cs * engine)666 find_cmd_entry(struct intel_gvt *gvt, unsigned int opcode,
667 const struct intel_engine_cs *engine)
668 {
669 struct cmd_entry *e;
670
671 hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
672 if (opcode == e->info->opcode &&
673 e->info->rings & engine->mask)
674 return e->info;
675 }
676 return NULL;
677 }
678
679 static inline const struct cmd_info *
get_cmd_info(struct intel_gvt * gvt,u32 cmd,const struct intel_engine_cs * engine)680 get_cmd_info(struct intel_gvt *gvt, u32 cmd,
681 const struct intel_engine_cs *engine)
682 {
683 u32 opcode;
684
685 opcode = get_opcode(cmd, engine);
686 if (opcode == INVALID_OP)
687 return NULL;
688
689 return find_cmd_entry(gvt, opcode, engine);
690 }
691
sub_op_val(u32 cmd,u32 hi,u32 low)692 static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
693 {
694 return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
695 }
696
print_opcode(u32 cmd,const struct intel_engine_cs * engine)697 static inline void print_opcode(u32 cmd, const struct intel_engine_cs *engine)
698 {
699 const struct decode_info *d_info;
700 int i;
701
702 d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
703 if (d_info == NULL)
704 return;
705
706 gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
707 cmd >> (32 - d_info->op_len), d_info->name);
708
709 for (i = 0; i < d_info->nr_sub_op; i++)
710 pr_err("0x%x ", sub_op_val(cmd, d_info->sub_op[i].hi,
711 d_info->sub_op[i].low));
712
713 pr_err("\n");
714 }
715
cmd_ptr(struct parser_exec_state * s,int index)716 static inline u32 *cmd_ptr(struct parser_exec_state *s, int index)
717 {
718 return s->ip_va + (index << 2);
719 }
720
cmd_val(struct parser_exec_state * s,int index)721 static inline u32 cmd_val(struct parser_exec_state *s, int index)
722 {
723 return *cmd_ptr(s, index);
724 }
725
is_init_ctx(struct parser_exec_state * s)726 static inline bool is_init_ctx(struct parser_exec_state *s)
727 {
728 return (s->buf_type == RING_BUFFER_CTX && s->is_init_ctx);
729 }
730
parser_exec_state_dump(struct parser_exec_state * s)731 static void parser_exec_state_dump(struct parser_exec_state *s)
732 {
733 int cnt = 0;
734 int i;
735
736 gvt_dbg_cmd(" vgpu%d RING%s: ring_start(%08lx) ring_end(%08lx)"
737 " ring_head(%08lx) ring_tail(%08lx)\n",
738 s->vgpu->id, s->engine->name,
739 s->ring_start, s->ring_start + s->ring_size,
740 s->ring_head, s->ring_tail);
741
742 gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
743 s->buf_type == RING_BUFFER_INSTRUCTION ?
744 "RING_BUFFER" : ((s->buf_type == RING_BUFFER_CTX) ?
745 "CTX_BUFFER" : "BATCH_BUFFER"),
746 s->buf_addr_type == GTT_BUFFER ?
747 "GTT" : "PPGTT", s->ip_gma);
748
749 if (s->ip_va == NULL) {
750 gvt_dbg_cmd(" ip_va(NULL)");
751 return;
752 }
753
754 gvt_dbg_cmd(" ip_va=%p: %08x %08x %08x %08x\n",
755 s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
756 cmd_val(s, 2), cmd_val(s, 3));
757
758 print_opcode(cmd_val(s, 0), s->engine);
759
760 s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
761
762 while (cnt < 1024) {
763 gvt_dbg_cmd("ip_va=%p: ", s->ip_va);
764 for (i = 0; i < 8; i++)
765 gvt_dbg_cmd("%08x ", cmd_val(s, i));
766 gvt_dbg_cmd("\n");
767
768 s->ip_va += 8 * sizeof(u32);
769 cnt += 8;
770 }
771 }
772
update_ip_va(struct parser_exec_state * s)773 static inline void update_ip_va(struct parser_exec_state *s)
774 {
775 unsigned long len = 0;
776
777 if (WARN_ON(s->ring_head == s->ring_tail))
778 return;
779
780 if (s->buf_type == RING_BUFFER_INSTRUCTION ||
781 s->buf_type == RING_BUFFER_CTX) {
782 unsigned long ring_top = s->ring_start + s->ring_size;
783
784 if (s->ring_head > s->ring_tail) {
785 if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top)
786 len = (s->ip_gma - s->ring_head);
787 else if (s->ip_gma >= s->ring_start &&
788 s->ip_gma <= s->ring_tail)
789 len = (ring_top - s->ring_head) +
790 (s->ip_gma - s->ring_start);
791 } else
792 len = (s->ip_gma - s->ring_head);
793
794 s->ip_va = s->rb_va + len;
795 } else {/* shadow batch buffer */
796 s->ip_va = s->ret_bb_va;
797 }
798 }
799
ip_gma_set(struct parser_exec_state * s,unsigned long ip_gma)800 static inline int ip_gma_set(struct parser_exec_state *s,
801 unsigned long ip_gma)
802 {
803 WARN_ON(!IS_ALIGNED(ip_gma, 4));
804
805 s->ip_gma = ip_gma;
806 update_ip_va(s);
807 return 0;
808 }
809
ip_gma_advance(struct parser_exec_state * s,unsigned int dw_len)810 static inline int ip_gma_advance(struct parser_exec_state *s,
811 unsigned int dw_len)
812 {
813 s->ip_gma += (dw_len << 2);
814
815 if (s->buf_type == RING_BUFFER_INSTRUCTION) {
816 if (s->ip_gma >= s->ring_start + s->ring_size)
817 s->ip_gma -= s->ring_size;
818 update_ip_va(s);
819 } else {
820 s->ip_va += (dw_len << 2);
821 }
822
823 return 0;
824 }
825
get_cmd_length(const struct cmd_info * info,u32 cmd)826 static inline int get_cmd_length(const struct cmd_info *info, u32 cmd)
827 {
828 if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
829 return info->len;
830 else
831 return (cmd & ((1U << info->len) - 1)) + 2;
832 return 0;
833 }
834
cmd_length(struct parser_exec_state * s)835 static inline int cmd_length(struct parser_exec_state *s)
836 {
837 return get_cmd_length(s->info, cmd_val(s, 0));
838 }
839
840 /* do not remove this, some platform may need clflush here */
841 #define patch_value(s, addr, val) do { \
842 *addr = val; \
843 } while (0)
844
is_mocs_mmio(unsigned int offset)845 static inline bool is_mocs_mmio(unsigned int offset)
846 {
847 return ((offset >= 0xc800) && (offset <= 0xcff8)) ||
848 ((offset >= 0xb020) && (offset <= 0xb0a0));
849 }
850
is_cmd_update_pdps(unsigned int offset,struct parser_exec_state * s)851 static int is_cmd_update_pdps(unsigned int offset,
852 struct parser_exec_state *s)
853 {
854 u32 base = s->workload->engine->mmio_base;
855 return i915_mmio_reg_equal(_MMIO(offset), GEN8_RING_PDP_UDW(base, 0));
856 }
857
cmd_pdp_mmio_update_handler(struct parser_exec_state * s,unsigned int offset,unsigned int index)858 static int cmd_pdp_mmio_update_handler(struct parser_exec_state *s,
859 unsigned int offset, unsigned int index)
860 {
861 struct intel_vgpu *vgpu = s->vgpu;
862 struct intel_vgpu_mm *shadow_mm = s->workload->shadow_mm;
863 struct intel_vgpu_mm *mm;
864 u64 pdps[GEN8_3LVL_PDPES];
865
866 if (shadow_mm->ppgtt_mm.root_entry_type ==
867 GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
868 pdps[0] = (u64)cmd_val(s, 2) << 32;
869 pdps[0] |= cmd_val(s, 4);
870
871 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
872 if (!mm) {
873 gvt_vgpu_err("failed to get the 4-level shadow vm\n");
874 return -EINVAL;
875 }
876 intel_vgpu_mm_get(mm);
877 list_add_tail(&mm->ppgtt_mm.link,
878 &s->workload->lri_shadow_mm);
879 *cmd_ptr(s, 2) = upper_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
880 *cmd_ptr(s, 4) = lower_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
881 } else {
882 /* Currently all guests use PML4 table and now can't
883 * have a guest with 3-level table but uses LRI for
884 * PPGTT update. So this is simply un-testable. */
885 GEM_BUG_ON(1);
886 gvt_vgpu_err("invalid shared shadow vm type\n");
887 return -EINVAL;
888 }
889 return 0;
890 }
891
cmd_reg_handler(struct parser_exec_state * s,unsigned int offset,unsigned int index,char * cmd)892 static int cmd_reg_handler(struct parser_exec_state *s,
893 unsigned int offset, unsigned int index, char *cmd)
894 {
895 struct intel_vgpu *vgpu = s->vgpu;
896 struct intel_gvt *gvt = vgpu->gvt;
897 u32 ctx_sr_ctl;
898 u32 *vreg, vreg_old;
899
900 if (offset + 4 > gvt->device_info.mmio_size) {
901 gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
902 cmd, offset);
903 return -EFAULT;
904 }
905
906 if (is_init_ctx(s)) {
907 struct intel_gvt_mmio_info *mmio_info;
908
909 intel_gvt_mmio_set_cmd_accessible(gvt, offset);
910 mmio_info = intel_gvt_find_mmio_info(gvt, offset);
911 if (mmio_info && mmio_info->write)
912 intel_gvt_mmio_set_cmd_write_patch(gvt, offset);
913 return 0;
914 }
915
916 if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) {
917 gvt_vgpu_err("%s access to non-render register (%x)\n",
918 cmd, offset);
919 return -EBADRQC;
920 }
921
922 if (!strncmp(cmd, "srm", 3) ||
923 !strncmp(cmd, "lrm", 3)) {
924 if (offset == i915_mmio_reg_offset(GEN8_L3SQCREG4) ||
925 offset == 0x21f0 ||
926 (IS_BROADWELL(gvt->gt->i915) &&
927 offset == i915_mmio_reg_offset(INSTPM)))
928 return 0;
929 else {
930 gvt_vgpu_err("%s access to register (%x)\n",
931 cmd, offset);
932 return -EPERM;
933 }
934 }
935
936 if (!strncmp(cmd, "lrr-src", 7) ||
937 !strncmp(cmd, "lrr-dst", 7)) {
938 if (IS_BROADWELL(gvt->gt->i915) && offset == 0x215c)
939 return 0;
940 else {
941 gvt_vgpu_err("not allowed cmd %s reg (%x)\n", cmd, offset);
942 return -EPERM;
943 }
944 }
945
946 if (!strncmp(cmd, "pipe_ctrl", 9)) {
947 /* TODO: add LRI POST logic here */
948 return 0;
949 }
950
951 if (strncmp(cmd, "lri", 3))
952 return -EPERM;
953
954 /* below are all lri handlers */
955 vreg = &vgpu_vreg(s->vgpu, offset);
956
957 if (is_cmd_update_pdps(offset, s) &&
958 cmd_pdp_mmio_update_handler(s, offset, index))
959 return -EINVAL;
960
961 if (offset == i915_mmio_reg_offset(DERRMR) ||
962 offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
963 /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
964 patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
965 }
966
967 if (is_mocs_mmio(offset))
968 *vreg = cmd_val(s, index + 1);
969
970 vreg_old = *vreg;
971
972 if (intel_gvt_mmio_is_cmd_write_patch(gvt, offset)) {
973 u32 cmdval_new, cmdval;
974 struct intel_gvt_mmio_info *mmio_info;
975
976 cmdval = cmd_val(s, index + 1);
977
978 mmio_info = intel_gvt_find_mmio_info(gvt, offset);
979 if (!mmio_info) {
980 cmdval_new = cmdval;
981 } else {
982 u64 ro_mask = mmio_info->ro_mask;
983 int ret;
984
985 if (likely(!ro_mask))
986 ret = mmio_info->write(s->vgpu, offset,
987 &cmdval, 4);
988 else {
989 gvt_vgpu_err("try to write RO reg %x\n",
990 offset);
991 ret = -EBADRQC;
992 }
993 if (ret)
994 return ret;
995 cmdval_new = *vreg;
996 }
997 if (cmdval_new != cmdval)
998 patch_value(s, cmd_ptr(s, index+1), cmdval_new);
999 }
1000
1001 /* only patch cmd. restore vreg value if changed in mmio write handler*/
1002 *vreg = vreg_old;
1003
1004 /* TODO
1005 * In order to let workload with inhibit context to generate
1006 * correct image data into memory, vregs values will be loaded to
1007 * hw via LRIs in the workload with inhibit context. But as
1008 * indirect context is loaded prior to LRIs in workload, we don't
1009 * want reg values specified in indirect context overwritten by
1010 * LRIs in workloads. So, when scanning an indirect context, we
1011 * update reg values in it into vregs, so LRIs in workload with
1012 * inhibit context will restore with correct values
1013 */
1014 if (GRAPHICS_VER(s->engine->i915) == 9 &&
1015 intel_gvt_mmio_is_sr_in_ctx(gvt, offset) &&
1016 !strncmp(cmd, "lri", 3)) {
1017 intel_gvt_read_gpa(s->vgpu,
1018 s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
1019 /* check inhibit context */
1020 if (ctx_sr_ctl & 1) {
1021 u32 data = cmd_val(s, index + 1);
1022
1023 if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset))
1024 intel_vgpu_mask_mmio_write(vgpu,
1025 offset, &data, 4);
1026 else
1027 vgpu_vreg(vgpu, offset) = data;
1028 }
1029 }
1030
1031 return 0;
1032 }
1033
1034 #define cmd_reg(s, i) \
1035 (cmd_val(s, i) & GENMASK(22, 2))
1036
1037 #define cmd_reg_inhibit(s, i) \
1038 (cmd_val(s, i) & GENMASK(22, 18))
1039
1040 #define cmd_gma(s, i) \
1041 (cmd_val(s, i) & GENMASK(31, 2))
1042
1043 #define cmd_gma_hi(s, i) \
1044 (cmd_val(s, i) & GENMASK(15, 0))
1045
cmd_handler_lri(struct parser_exec_state * s)1046 static int cmd_handler_lri(struct parser_exec_state *s)
1047 {
1048 int i, ret = 0;
1049 int cmd_len = cmd_length(s);
1050
1051 for (i = 1; i < cmd_len; i += 2) {
1052 if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) {
1053 if (s->engine->id == BCS0 &&
1054 cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR))
1055 ret |= 0;
1056 else
1057 ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0;
1058 }
1059 if (ret)
1060 break;
1061 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
1062 if (ret)
1063 break;
1064 }
1065 return ret;
1066 }
1067
cmd_handler_lrr(struct parser_exec_state * s)1068 static int cmd_handler_lrr(struct parser_exec_state *s)
1069 {
1070 int i, ret = 0;
1071 int cmd_len = cmd_length(s);
1072
1073 for (i = 1; i < cmd_len; i += 2) {
1074 if (IS_BROADWELL(s->engine->i915))
1075 ret |= ((cmd_reg_inhibit(s, i) ||
1076 (cmd_reg_inhibit(s, i + 1)))) ?
1077 -EBADRQC : 0;
1078 if (ret)
1079 break;
1080 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
1081 if (ret)
1082 break;
1083 ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
1084 if (ret)
1085 break;
1086 }
1087 return ret;
1088 }
1089
1090 static inline int cmd_address_audit(struct parser_exec_state *s,
1091 unsigned long guest_gma, int op_size, bool index_mode);
1092
cmd_handler_lrm(struct parser_exec_state * s)1093 static int cmd_handler_lrm(struct parser_exec_state *s)
1094 {
1095 struct intel_gvt *gvt = s->vgpu->gvt;
1096 int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
1097 unsigned long gma;
1098 int i, ret = 0;
1099 int cmd_len = cmd_length(s);
1100
1101 for (i = 1; i < cmd_len;) {
1102 if (IS_BROADWELL(s->engine->i915))
1103 ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0;
1104 if (ret)
1105 break;
1106 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
1107 if (ret)
1108 break;
1109 if (cmd_val(s, 0) & (1 << 22)) {
1110 gma = cmd_gma(s, i + 1);
1111 if (gmadr_bytes == 8)
1112 gma |= (cmd_gma_hi(s, i + 2)) << 32;
1113 ret |= cmd_address_audit(s, gma, sizeof(u32), false);
1114 if (ret)
1115 break;
1116 }
1117 i += gmadr_dw_number(s) + 1;
1118 }
1119 return ret;
1120 }
1121
cmd_handler_srm(struct parser_exec_state * s)1122 static int cmd_handler_srm(struct parser_exec_state *s)
1123 {
1124 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1125 unsigned long gma;
1126 int i, ret = 0;
1127 int cmd_len = cmd_length(s);
1128
1129 for (i = 1; i < cmd_len;) {
1130 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
1131 if (ret)
1132 break;
1133 if (cmd_val(s, 0) & (1 << 22)) {
1134 gma = cmd_gma(s, i + 1);
1135 if (gmadr_bytes == 8)
1136 gma |= (cmd_gma_hi(s, i + 2)) << 32;
1137 ret |= cmd_address_audit(s, gma, sizeof(u32), false);
1138 if (ret)
1139 break;
1140 }
1141 i += gmadr_dw_number(s) + 1;
1142 }
1143 return ret;
1144 }
1145
1146 struct cmd_interrupt_event {
1147 int pipe_control_notify;
1148 int mi_flush_dw;
1149 int mi_user_interrupt;
1150 };
1151
1152 static const struct cmd_interrupt_event cmd_interrupt_events[] = {
1153 [RCS0] = {
1154 .pipe_control_notify = RCS_PIPE_CONTROL,
1155 .mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
1156 .mi_user_interrupt = RCS_MI_USER_INTERRUPT,
1157 },
1158 [BCS0] = {
1159 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1160 .mi_flush_dw = BCS_MI_FLUSH_DW,
1161 .mi_user_interrupt = BCS_MI_USER_INTERRUPT,
1162 },
1163 [VCS0] = {
1164 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1165 .mi_flush_dw = VCS_MI_FLUSH_DW,
1166 .mi_user_interrupt = VCS_MI_USER_INTERRUPT,
1167 },
1168 [VCS1] = {
1169 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1170 .mi_flush_dw = VCS2_MI_FLUSH_DW,
1171 .mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
1172 },
1173 [VECS0] = {
1174 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1175 .mi_flush_dw = VECS_MI_FLUSH_DW,
1176 .mi_user_interrupt = VECS_MI_USER_INTERRUPT,
1177 },
1178 };
1179
cmd_handler_pipe_control(struct parser_exec_state * s)1180 static int cmd_handler_pipe_control(struct parser_exec_state *s)
1181 {
1182 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1183 unsigned long gma;
1184 bool index_mode = false;
1185 unsigned int post_sync;
1186 int ret = 0;
1187 u32 hws_pga, val;
1188
1189 post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
1190
1191 /* LRI post sync */
1192 if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE)
1193 ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl");
1194 /* post sync */
1195 else if (post_sync) {
1196 if (post_sync == 2)
1197 ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl");
1198 else if (post_sync == 3)
1199 ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
1200 else if (post_sync == 1) {
1201 /* check ggtt*/
1202 if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
1203 gma = cmd_val(s, 2) & GENMASK(31, 3);
1204 if (gmadr_bytes == 8)
1205 gma |= (cmd_gma_hi(s, 3)) << 32;
1206 /* Store Data Index */
1207 if (cmd_val(s, 1) & (1 << 21))
1208 index_mode = true;
1209 ret |= cmd_address_audit(s, gma, sizeof(u64),
1210 index_mode);
1211 if (ret)
1212 return ret;
1213 if (index_mode) {
1214 hws_pga = s->vgpu->hws_pga[s->engine->id];
1215 gma = hws_pga + gma;
1216 patch_value(s, cmd_ptr(s, 2), gma);
1217 val = cmd_val(s, 1) & (~(1 << 21));
1218 patch_value(s, cmd_ptr(s, 1), val);
1219 }
1220 }
1221 }
1222 }
1223
1224 if (ret)
1225 return ret;
1226
1227 if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
1228 set_bit(cmd_interrupt_events[s->engine->id].pipe_control_notify,
1229 s->workload->pending_events);
1230 return 0;
1231 }
1232
cmd_handler_mi_user_interrupt(struct parser_exec_state * s)1233 static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
1234 {
1235 set_bit(cmd_interrupt_events[s->engine->id].mi_user_interrupt,
1236 s->workload->pending_events);
1237 patch_value(s, cmd_ptr(s, 0), MI_NOOP);
1238 return 0;
1239 }
1240
cmd_advance_default(struct parser_exec_state * s)1241 static int cmd_advance_default(struct parser_exec_state *s)
1242 {
1243 return ip_gma_advance(s, cmd_length(s));
1244 }
1245
cmd_handler_mi_batch_buffer_end(struct parser_exec_state * s)1246 static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
1247 {
1248 int ret;
1249
1250 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1251 s->buf_type = BATCH_BUFFER_INSTRUCTION;
1252 ret = ip_gma_set(s, s->ret_ip_gma_bb);
1253 s->buf_addr_type = s->saved_buf_addr_type;
1254 } else if (s->buf_type == RING_BUFFER_CTX) {
1255 ret = ip_gma_set(s, s->ring_tail);
1256 } else {
1257 s->buf_type = RING_BUFFER_INSTRUCTION;
1258 s->buf_addr_type = GTT_BUFFER;
1259 if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size)
1260 s->ret_ip_gma_ring -= s->ring_size;
1261 ret = ip_gma_set(s, s->ret_ip_gma_ring);
1262 }
1263 return ret;
1264 }
1265
1266 struct mi_display_flip_command_info {
1267 int pipe;
1268 int plane;
1269 int event;
1270 i915_reg_t stride_reg;
1271 i915_reg_t ctrl_reg;
1272 i915_reg_t surf_reg;
1273 u64 stride_val;
1274 u64 tile_val;
1275 u64 surf_val;
1276 bool async_flip;
1277 };
1278
1279 struct plane_code_mapping {
1280 int pipe;
1281 int plane;
1282 int event;
1283 };
1284
gen8_decode_mi_display_flip(struct parser_exec_state * s,struct mi_display_flip_command_info * info)1285 static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
1286 struct mi_display_flip_command_info *info)
1287 {
1288 struct drm_i915_private *dev_priv = s->engine->i915;
1289 struct intel_display *display = &dev_priv->display;
1290 struct plane_code_mapping gen8_plane_code[] = {
1291 [0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
1292 [1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
1293 [2] = {PIPE_A, PLANE_B, SPRITE_A_FLIP_DONE},
1294 [3] = {PIPE_B, PLANE_B, SPRITE_B_FLIP_DONE},
1295 [4] = {PIPE_C, PLANE_A, PRIMARY_C_FLIP_DONE},
1296 [5] = {PIPE_C, PLANE_B, SPRITE_C_FLIP_DONE},
1297 };
1298 u32 dword0, dword1, dword2;
1299 u32 v;
1300
1301 dword0 = cmd_val(s, 0);
1302 dword1 = cmd_val(s, 1);
1303 dword2 = cmd_val(s, 2);
1304
1305 v = (dword0 & GENMASK(21, 19)) >> 19;
1306 if (drm_WARN_ON(&dev_priv->drm, v >= ARRAY_SIZE(gen8_plane_code)))
1307 return -EBADRQC;
1308
1309 info->pipe = gen8_plane_code[v].pipe;
1310 info->plane = gen8_plane_code[v].plane;
1311 info->event = gen8_plane_code[v].event;
1312 info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
1313 info->tile_val = (dword1 & 0x1);
1314 info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
1315 info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
1316
1317 if (info->plane == PLANE_A) {
1318 info->ctrl_reg = DSPCNTR(display, info->pipe);
1319 info->stride_reg = DSPSTRIDE(display, info->pipe);
1320 info->surf_reg = DSPSURF(display, info->pipe);
1321 } else if (info->plane == PLANE_B) {
1322 info->ctrl_reg = SPRCTL(info->pipe);
1323 info->stride_reg = SPRSTRIDE(info->pipe);
1324 info->surf_reg = SPRSURF(info->pipe);
1325 } else {
1326 drm_WARN_ON(&dev_priv->drm, 1);
1327 return -EBADRQC;
1328 }
1329 return 0;
1330 }
1331
skl_decode_mi_display_flip(struct parser_exec_state * s,struct mi_display_flip_command_info * info)1332 static int skl_decode_mi_display_flip(struct parser_exec_state *s,
1333 struct mi_display_flip_command_info *info)
1334 {
1335 struct drm_i915_private *dev_priv = s->engine->i915;
1336 struct intel_display *display = &dev_priv->display;
1337 struct intel_vgpu *vgpu = s->vgpu;
1338 u32 dword0 = cmd_val(s, 0);
1339 u32 dword1 = cmd_val(s, 1);
1340 u32 dword2 = cmd_val(s, 2);
1341 u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
1342
1343 info->plane = PRIMARY_PLANE;
1344
1345 switch (plane) {
1346 case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
1347 info->pipe = PIPE_A;
1348 info->event = PRIMARY_A_FLIP_DONE;
1349 break;
1350 case MI_DISPLAY_FLIP_SKL_PLANE_1_B:
1351 info->pipe = PIPE_B;
1352 info->event = PRIMARY_B_FLIP_DONE;
1353 break;
1354 case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
1355 info->pipe = PIPE_C;
1356 info->event = PRIMARY_C_FLIP_DONE;
1357 break;
1358
1359 case MI_DISPLAY_FLIP_SKL_PLANE_2_A:
1360 info->pipe = PIPE_A;
1361 info->event = SPRITE_A_FLIP_DONE;
1362 info->plane = SPRITE_PLANE;
1363 break;
1364 case MI_DISPLAY_FLIP_SKL_PLANE_2_B:
1365 info->pipe = PIPE_B;
1366 info->event = SPRITE_B_FLIP_DONE;
1367 info->plane = SPRITE_PLANE;
1368 break;
1369 case MI_DISPLAY_FLIP_SKL_PLANE_2_C:
1370 info->pipe = PIPE_C;
1371 info->event = SPRITE_C_FLIP_DONE;
1372 info->plane = SPRITE_PLANE;
1373 break;
1374
1375 default:
1376 gvt_vgpu_err("unknown plane code %d\n", plane);
1377 return -EBADRQC;
1378 }
1379
1380 info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
1381 info->tile_val = (dword1 & GENMASK(2, 0));
1382 info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
1383 info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
1384
1385 info->ctrl_reg = DSPCNTR(display, info->pipe);
1386 info->stride_reg = DSPSTRIDE(display, info->pipe);
1387 info->surf_reg = DSPSURF(display, info->pipe);
1388
1389 return 0;
1390 }
1391
gen8_check_mi_display_flip(struct parser_exec_state * s,struct mi_display_flip_command_info * info)1392 static int gen8_check_mi_display_flip(struct parser_exec_state *s,
1393 struct mi_display_flip_command_info *info)
1394 {
1395 u32 stride, tile;
1396
1397 if (!info->async_flip)
1398 return 0;
1399
1400 if (GRAPHICS_VER(s->engine->i915) >= 9) {
1401 stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
1402 tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
1403 GENMASK(12, 10)) >> 10;
1404 } else {
1405 stride = (vgpu_vreg_t(s->vgpu, info->stride_reg) &
1406 GENMASK(15, 6)) >> 6;
1407 tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
1408 }
1409
1410 if (stride != info->stride_val)
1411 gvt_dbg_cmd("cannot change stride during async flip\n");
1412
1413 if (tile != info->tile_val)
1414 gvt_dbg_cmd("cannot change tile during async flip\n");
1415
1416 return 0;
1417 }
1418
gen8_update_plane_mmio_from_mi_display_flip(struct parser_exec_state * s,struct mi_display_flip_command_info * info)1419 static int gen8_update_plane_mmio_from_mi_display_flip(
1420 struct parser_exec_state *s,
1421 struct mi_display_flip_command_info *info)
1422 {
1423 struct drm_i915_private *dev_priv = s->engine->i915;
1424 struct intel_display *display = &dev_priv->display;
1425 struct intel_vgpu *vgpu = s->vgpu;
1426
1427 set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
1428 info->surf_val << 12);
1429 if (GRAPHICS_VER(dev_priv) >= 9) {
1430 set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
1431 info->stride_val);
1432 set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
1433 info->tile_val << 10);
1434 } else {
1435 set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(15, 6),
1436 info->stride_val << 6);
1437 set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(10, 10),
1438 info->tile_val << 10);
1439 }
1440
1441 if (info->plane == PLANE_PRIMARY)
1442 vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(display, info->pipe))++;
1443
1444 if (info->async_flip)
1445 intel_vgpu_trigger_virtual_event(vgpu, info->event);
1446 else
1447 set_bit(info->event, vgpu->irq.flip_done_event[info->pipe]);
1448
1449 return 0;
1450 }
1451
decode_mi_display_flip(struct parser_exec_state * s,struct mi_display_flip_command_info * info)1452 static int decode_mi_display_flip(struct parser_exec_state *s,
1453 struct mi_display_flip_command_info *info)
1454 {
1455 if (IS_BROADWELL(s->engine->i915))
1456 return gen8_decode_mi_display_flip(s, info);
1457 if (GRAPHICS_VER(s->engine->i915) >= 9)
1458 return skl_decode_mi_display_flip(s, info);
1459
1460 return -ENODEV;
1461 }
1462
check_mi_display_flip(struct parser_exec_state * s,struct mi_display_flip_command_info * info)1463 static int check_mi_display_flip(struct parser_exec_state *s,
1464 struct mi_display_flip_command_info *info)
1465 {
1466 return gen8_check_mi_display_flip(s, info);
1467 }
1468
update_plane_mmio_from_mi_display_flip(struct parser_exec_state * s,struct mi_display_flip_command_info * info)1469 static int update_plane_mmio_from_mi_display_flip(
1470 struct parser_exec_state *s,
1471 struct mi_display_flip_command_info *info)
1472 {
1473 return gen8_update_plane_mmio_from_mi_display_flip(s, info);
1474 }
1475
cmd_handler_mi_display_flip(struct parser_exec_state * s)1476 static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
1477 {
1478 struct mi_display_flip_command_info info;
1479 struct intel_vgpu *vgpu = s->vgpu;
1480 int ret;
1481 int i;
1482 int len = cmd_length(s);
1483 u32 valid_len = CMD_LEN(1);
1484
1485 /* Flip Type == Stereo 3D Flip */
1486 if (DWORD_FIELD(2, 1, 0) == 2)
1487 valid_len++;
1488 ret = gvt_check_valid_cmd_length(cmd_length(s),
1489 valid_len);
1490 if (ret)
1491 return ret;
1492
1493 ret = decode_mi_display_flip(s, &info);
1494 if (ret) {
1495 gvt_vgpu_err("fail to decode MI display flip command\n");
1496 return ret;
1497 }
1498
1499 ret = check_mi_display_flip(s, &info);
1500 if (ret) {
1501 gvt_vgpu_err("invalid MI display flip command\n");
1502 return ret;
1503 }
1504
1505 ret = update_plane_mmio_from_mi_display_flip(s, &info);
1506 if (ret) {
1507 gvt_vgpu_err("fail to update plane mmio\n");
1508 return ret;
1509 }
1510
1511 for (i = 0; i < len; i++)
1512 patch_value(s, cmd_ptr(s, i), MI_NOOP);
1513 return 0;
1514 }
1515
is_wait_for_flip_pending(u32 cmd)1516 static bool is_wait_for_flip_pending(u32 cmd)
1517 {
1518 return cmd & (MI_WAIT_FOR_PLANE_A_FLIP_PENDING |
1519 MI_WAIT_FOR_PLANE_B_FLIP_PENDING |
1520 MI_WAIT_FOR_PLANE_C_FLIP_PENDING |
1521 MI_WAIT_FOR_SPRITE_A_FLIP_PENDING |
1522 MI_WAIT_FOR_SPRITE_B_FLIP_PENDING |
1523 MI_WAIT_FOR_SPRITE_C_FLIP_PENDING);
1524 }
1525
cmd_handler_mi_wait_for_event(struct parser_exec_state * s)1526 static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s)
1527 {
1528 u32 cmd = cmd_val(s, 0);
1529
1530 if (!is_wait_for_flip_pending(cmd))
1531 return 0;
1532
1533 patch_value(s, cmd_ptr(s, 0), MI_NOOP);
1534 return 0;
1535 }
1536
get_gma_bb_from_cmd(struct parser_exec_state * s,int index)1537 static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
1538 {
1539 unsigned long addr;
1540 unsigned long gma_high, gma_low;
1541 struct intel_vgpu *vgpu = s->vgpu;
1542 int gmadr_bytes = vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1543
1544 if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) {
1545 gvt_vgpu_err("invalid gma bytes %d\n", gmadr_bytes);
1546 return INTEL_GVT_INVALID_ADDR;
1547 }
1548
1549 gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
1550 if (gmadr_bytes == 4) {
1551 addr = gma_low;
1552 } else {
1553 gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK;
1554 addr = (((unsigned long)gma_high) << 32) | gma_low;
1555 }
1556 return addr;
1557 }
1558
cmd_address_audit(struct parser_exec_state * s,unsigned long guest_gma,int op_size,bool index_mode)1559 static inline int cmd_address_audit(struct parser_exec_state *s,
1560 unsigned long guest_gma, int op_size, bool index_mode)
1561 {
1562 struct intel_vgpu *vgpu = s->vgpu;
1563 u32 max_surface_size = vgpu->gvt->device_info.max_surface_size;
1564 int i;
1565 int ret;
1566
1567 if (op_size > max_surface_size) {
1568 gvt_vgpu_err("command address audit fail name %s\n",
1569 s->info->name);
1570 return -EFAULT;
1571 }
1572
1573 if (index_mode) {
1574 if (guest_gma >= I915_GTT_PAGE_SIZE) {
1575 ret = -EFAULT;
1576 goto err;
1577 }
1578 } else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) {
1579 ret = -EFAULT;
1580 goto err;
1581 }
1582
1583 return 0;
1584
1585 err:
1586 gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
1587 s->info->name, guest_gma, op_size);
1588
1589 pr_err("cmd dump: ");
1590 for (i = 0; i < cmd_length(s); i++) {
1591 if (!(i % 4))
1592 pr_err("\n%08x ", cmd_val(s, i));
1593 else
1594 pr_err("%08x ", cmd_val(s, i));
1595 }
1596 pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n",
1597 vgpu->id,
1598 vgpu_aperture_gmadr_base(vgpu),
1599 vgpu_aperture_gmadr_end(vgpu),
1600 vgpu_hidden_gmadr_base(vgpu),
1601 vgpu_hidden_gmadr_end(vgpu));
1602 return ret;
1603 }
1604
cmd_handler_mi_store_data_imm(struct parser_exec_state * s)1605 static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
1606 {
1607 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1608 int op_size = (cmd_length(s) - 3) * sizeof(u32);
1609 int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
1610 unsigned long gma, gma_low, gma_high;
1611 u32 valid_len = CMD_LEN(2);
1612 int ret = 0;
1613
1614 /* check ppggt */
1615 if (!(cmd_val(s, 0) & (1 << 22)))
1616 return 0;
1617
1618 /* check if QWORD */
1619 if (DWORD_FIELD(0, 21, 21))
1620 valid_len++;
1621 ret = gvt_check_valid_cmd_length(cmd_length(s),
1622 valid_len);
1623 if (ret)
1624 return ret;
1625
1626 gma = cmd_val(s, 2) & GENMASK(31, 2);
1627
1628 if (gmadr_bytes == 8) {
1629 gma_low = cmd_val(s, 1) & GENMASK(31, 2);
1630 gma_high = cmd_val(s, 2) & GENMASK(15, 0);
1631 gma = (gma_high << 32) | gma_low;
1632 core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0;
1633 }
1634 ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false);
1635 return ret;
1636 }
1637
unexpected_cmd(struct parser_exec_state * s)1638 static inline int unexpected_cmd(struct parser_exec_state *s)
1639 {
1640 struct intel_vgpu *vgpu = s->vgpu;
1641
1642 gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
1643
1644 return -EBADRQC;
1645 }
1646
cmd_handler_mi_semaphore_wait(struct parser_exec_state * s)1647 static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
1648 {
1649 return unexpected_cmd(s);
1650 }
1651
cmd_handler_mi_report_perf_count(struct parser_exec_state * s)1652 static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s)
1653 {
1654 return unexpected_cmd(s);
1655 }
1656
cmd_handler_mi_op_2e(struct parser_exec_state * s)1657 static int cmd_handler_mi_op_2e(struct parser_exec_state *s)
1658 {
1659 return unexpected_cmd(s);
1660 }
1661
cmd_handler_mi_op_2f(struct parser_exec_state * s)1662 static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
1663 {
1664 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1665 int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
1666 sizeof(u32);
1667 unsigned long gma, gma_high;
1668 u32 valid_len = CMD_LEN(1);
1669 int ret = 0;
1670
1671 if (!(cmd_val(s, 0) & (1 << 22)))
1672 return ret;
1673
1674 /* check inline data */
1675 if (cmd_val(s, 0) & BIT(18))
1676 valid_len = CMD_LEN(9);
1677 ret = gvt_check_valid_cmd_length(cmd_length(s),
1678 valid_len);
1679 if (ret)
1680 return ret;
1681
1682 gma = cmd_val(s, 1) & GENMASK(31, 2);
1683 if (gmadr_bytes == 8) {
1684 gma_high = cmd_val(s, 2) & GENMASK(15, 0);
1685 gma = (gma_high << 32) | gma;
1686 }
1687 ret = cmd_address_audit(s, gma, op_size, false);
1688 return ret;
1689 }
1690
cmd_handler_mi_store_data_index(struct parser_exec_state * s)1691 static int cmd_handler_mi_store_data_index(struct parser_exec_state *s)
1692 {
1693 return unexpected_cmd(s);
1694 }
1695
cmd_handler_mi_clflush(struct parser_exec_state * s)1696 static int cmd_handler_mi_clflush(struct parser_exec_state *s)
1697 {
1698 return unexpected_cmd(s);
1699 }
1700
cmd_handler_mi_conditional_batch_buffer_end(struct parser_exec_state * s)1701 static int cmd_handler_mi_conditional_batch_buffer_end(
1702 struct parser_exec_state *s)
1703 {
1704 return unexpected_cmd(s);
1705 }
1706
cmd_handler_mi_update_gtt(struct parser_exec_state * s)1707 static int cmd_handler_mi_update_gtt(struct parser_exec_state *s)
1708 {
1709 return unexpected_cmd(s);
1710 }
1711
cmd_handler_mi_flush_dw(struct parser_exec_state * s)1712 static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
1713 {
1714 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1715 unsigned long gma;
1716 bool index_mode = false;
1717 int ret = 0;
1718 u32 hws_pga, val;
1719 u32 valid_len = CMD_LEN(2);
1720
1721 ret = gvt_check_valid_cmd_length(cmd_length(s),
1722 valid_len);
1723 if (ret) {
1724 /* Check again for Qword */
1725 ret = gvt_check_valid_cmd_length(cmd_length(s),
1726 ++valid_len);
1727 return ret;
1728 }
1729
1730 /* Check post-sync and ppgtt bit */
1731 if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
1732 gma = cmd_val(s, 1) & GENMASK(31, 3);
1733 if (gmadr_bytes == 8)
1734 gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32;
1735 /* Store Data Index */
1736 if (cmd_val(s, 0) & (1 << 21))
1737 index_mode = true;
1738 ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
1739 if (ret)
1740 return ret;
1741 if (index_mode) {
1742 hws_pga = s->vgpu->hws_pga[s->engine->id];
1743 gma = hws_pga + gma;
1744 patch_value(s, cmd_ptr(s, 1), gma);
1745 val = cmd_val(s, 0) & (~(1 << 21));
1746 patch_value(s, cmd_ptr(s, 0), val);
1747 }
1748 }
1749 /* Check notify bit */
1750 if ((cmd_val(s, 0) & (1 << 8)))
1751 set_bit(cmd_interrupt_events[s->engine->id].mi_flush_dw,
1752 s->workload->pending_events);
1753 return ret;
1754 }
1755
addr_type_update_snb(struct parser_exec_state * s)1756 static void addr_type_update_snb(struct parser_exec_state *s)
1757 {
1758 if ((s->buf_type == RING_BUFFER_INSTRUCTION) &&
1759 (BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) {
1760 s->buf_addr_type = PPGTT_BUFFER;
1761 }
1762 }
1763
1764
copy_gma_to_hva(struct intel_vgpu * vgpu,struct intel_vgpu_mm * mm,unsigned long gma,unsigned long end_gma,void * va)1765 static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
1766 unsigned long gma, unsigned long end_gma, void *va)
1767 {
1768 unsigned long copy_len, offset;
1769 unsigned long len = 0;
1770 unsigned long gpa;
1771
1772 while (gma != end_gma) {
1773 gpa = intel_vgpu_gma_to_gpa(mm, gma);
1774 if (gpa == INTEL_GVT_INVALID_ADDR) {
1775 gvt_vgpu_err("invalid gma address: %lx\n", gma);
1776 return -EFAULT;
1777 }
1778
1779 offset = gma & (I915_GTT_PAGE_SIZE - 1);
1780
1781 copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ?
1782 I915_GTT_PAGE_SIZE - offset : end_gma - gma;
1783
1784 intel_gvt_read_gpa(vgpu, gpa, va + len, copy_len);
1785
1786 len += copy_len;
1787 gma += copy_len;
1788 }
1789 return len;
1790 }
1791
1792
1793 /*
1794 * Check whether a batch buffer needs to be scanned. Currently
1795 * the only criteria is based on privilege.
1796 */
batch_buffer_needs_scan(struct parser_exec_state * s)1797 static int batch_buffer_needs_scan(struct parser_exec_state *s)
1798 {
1799 /* Decide privilege based on address space */
1800 if (cmd_val(s, 0) & BIT(8) &&
1801 !(s->vgpu->scan_nonprivbb & s->engine->mask))
1802 return 0;
1803
1804 return 1;
1805 }
1806
repr_addr_type(unsigned int type)1807 static const char *repr_addr_type(unsigned int type)
1808 {
1809 return type == PPGTT_BUFFER ? "ppgtt" : "ggtt";
1810 }
1811
find_bb_size(struct parser_exec_state * s,unsigned long * bb_size,unsigned long * bb_end_cmd_offset)1812 static int find_bb_size(struct parser_exec_state *s,
1813 unsigned long *bb_size,
1814 unsigned long *bb_end_cmd_offset)
1815 {
1816 unsigned long gma = 0;
1817 const struct cmd_info *info;
1818 u32 cmd_len = 0;
1819 bool bb_end = false;
1820 struct intel_vgpu *vgpu = s->vgpu;
1821 u32 cmd;
1822 struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
1823 s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
1824
1825 *bb_size = 0;
1826 *bb_end_cmd_offset = 0;
1827
1828 /* get the start gm address of the batch buffer */
1829 gma = get_gma_bb_from_cmd(s, 1);
1830 if (gma == INTEL_GVT_INVALID_ADDR)
1831 return -EFAULT;
1832
1833 cmd = cmd_val(s, 0);
1834 info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
1835 if (info == NULL) {
1836 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
1837 cmd, get_opcode(cmd, s->engine),
1838 repr_addr_type(s->buf_addr_type),
1839 s->engine->name, s->workload);
1840 return -EBADRQC;
1841 }
1842 do {
1843 if (copy_gma_to_hva(s->vgpu, mm,
1844 gma, gma + 4, &cmd) < 0)
1845 return -EFAULT;
1846 info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
1847 if (info == NULL) {
1848 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
1849 cmd, get_opcode(cmd, s->engine),
1850 repr_addr_type(s->buf_addr_type),
1851 s->engine->name, s->workload);
1852 return -EBADRQC;
1853 }
1854
1855 if (info->opcode == OP_MI_BATCH_BUFFER_END) {
1856 bb_end = true;
1857 } else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
1858 if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0)
1859 /* chained batch buffer */
1860 bb_end = true;
1861 }
1862
1863 if (bb_end)
1864 *bb_end_cmd_offset = *bb_size;
1865
1866 cmd_len = get_cmd_length(info, cmd) << 2;
1867 *bb_size += cmd_len;
1868 gma += cmd_len;
1869 } while (!bb_end);
1870
1871 return 0;
1872 }
1873
audit_bb_end(struct parser_exec_state * s,void * va)1874 static int audit_bb_end(struct parser_exec_state *s, void *va)
1875 {
1876 struct intel_vgpu *vgpu = s->vgpu;
1877 u32 cmd = *(u32 *)va;
1878 const struct cmd_info *info;
1879
1880 info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
1881 if (info == NULL) {
1882 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
1883 cmd, get_opcode(cmd, s->engine),
1884 repr_addr_type(s->buf_addr_type),
1885 s->engine->name, s->workload);
1886 return -EBADRQC;
1887 }
1888
1889 if ((info->opcode == OP_MI_BATCH_BUFFER_END) ||
1890 ((info->opcode == OP_MI_BATCH_BUFFER_START) &&
1891 (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0)))
1892 return 0;
1893
1894 return -EBADRQC;
1895 }
1896
perform_bb_shadow(struct parser_exec_state * s)1897 static int perform_bb_shadow(struct parser_exec_state *s)
1898 {
1899 struct intel_vgpu *vgpu = s->vgpu;
1900 struct intel_vgpu_shadow_bb *bb;
1901 unsigned long gma = 0;
1902 unsigned long bb_size;
1903 unsigned long bb_end_cmd_offset;
1904 int ret = 0;
1905 struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
1906 s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
1907 unsigned long start_offset = 0;
1908
1909 /* Get the start gm address of the batch buffer */
1910 gma = get_gma_bb_from_cmd(s, 1);
1911 if (gma == INTEL_GVT_INVALID_ADDR)
1912 return -EFAULT;
1913
1914 ret = find_bb_size(s, &bb_size, &bb_end_cmd_offset);
1915 if (ret)
1916 return ret;
1917
1918 bb = kzalloc(sizeof(*bb), GFP_KERNEL);
1919 if (!bb)
1920 return -ENOMEM;
1921
1922 bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true;
1923
1924 /*
1925 * The start_offset stores the batch buffer's start gma's
1926 * offset relative to page boundary. So for non-privileged batch
1927 * buffer, the shadowed gem object holds exactly the same page
1928 * layout as original gem object. This is for the convenience of
1929 * replacing the whole non-privilged batch buffer page to this
1930 * shadowed one in PPGTT at the same gma address. (This replacing
1931 * action is not implemented yet now, but may be necessary in
1932 * future).
1933 * For prileged batch buffer, we just change start gma address to
1934 * that of shadowed page.
1935 */
1936 if (bb->ppgtt)
1937 start_offset = gma & ~I915_GTT_PAGE_MASK;
1938
1939 bb->obj = i915_gem_object_create_shmem(s->engine->i915,
1940 round_up(bb_size + start_offset,
1941 PAGE_SIZE));
1942 if (IS_ERR(bb->obj)) {
1943 ret = PTR_ERR(bb->obj);
1944 goto err_free_bb;
1945 }
1946
1947 bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB);
1948 if (IS_ERR(bb->va)) {
1949 ret = PTR_ERR(bb->va);
1950 goto err_free_obj;
1951 }
1952
1953 ret = copy_gma_to_hva(s->vgpu, mm,
1954 gma, gma + bb_size,
1955 bb->va + start_offset);
1956 if (ret < 0) {
1957 gvt_vgpu_err("fail to copy guest ring buffer\n");
1958 ret = -EFAULT;
1959 goto err_unmap;
1960 }
1961
1962 ret = audit_bb_end(s, bb->va + start_offset + bb_end_cmd_offset);
1963 if (ret)
1964 goto err_unmap;
1965
1966 i915_gem_object_unlock(bb->obj);
1967 INIT_LIST_HEAD(&bb->list);
1968 list_add(&bb->list, &s->workload->shadow_bb);
1969
1970 bb->bb_start_cmd_va = s->ip_va;
1971
1972 if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa))
1973 bb->bb_offset = s->ip_va - s->rb_va;
1974 else
1975 bb->bb_offset = 0;
1976
1977 /*
1978 * ip_va saves the virtual address of the shadow batch buffer, while
1979 * ip_gma saves the graphics address of the original batch buffer.
1980 * As the shadow batch buffer is just a copy from the original one,
1981 * it should be right to use shadow batch buffer'va and original batch
1982 * buffer's gma in pair. After all, we don't want to pin the shadow
1983 * buffer here (too early).
1984 */
1985 s->ip_va = bb->va + start_offset;
1986 s->ip_gma = gma;
1987 return 0;
1988 err_unmap:
1989 i915_gem_object_unpin_map(bb->obj);
1990 err_free_obj:
1991 i915_gem_object_put(bb->obj);
1992 err_free_bb:
1993 kfree(bb);
1994 return ret;
1995 }
1996
cmd_handler_mi_batch_buffer_start(struct parser_exec_state * s)1997 static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1998 {
1999 bool second_level;
2000 int ret = 0;
2001 struct intel_vgpu *vgpu = s->vgpu;
2002
2003 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
2004 gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
2005 return -EFAULT;
2006 }
2007
2008 second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
2009 if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
2010 gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
2011 return -EFAULT;
2012 }
2013
2014 s->saved_buf_addr_type = s->buf_addr_type;
2015 addr_type_update_snb(s);
2016 if (s->buf_type == RING_BUFFER_INSTRUCTION) {
2017 s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32);
2018 s->buf_type = BATCH_BUFFER_INSTRUCTION;
2019 } else if (second_level) {
2020 s->buf_type = BATCH_BUFFER_2ND_LEVEL;
2021 s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32);
2022 s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32);
2023 }
2024
2025 if (batch_buffer_needs_scan(s)) {
2026 ret = perform_bb_shadow(s);
2027 if (ret < 0)
2028 gvt_vgpu_err("invalid shadow batch buffer\n");
2029 } else {
2030 /* emulate a batch buffer end to do return right */
2031 ret = cmd_handler_mi_batch_buffer_end(s);
2032 if (ret < 0)
2033 return ret;
2034 }
2035 return ret;
2036 }
2037
2038 static int mi_noop_index;
2039
2040 static const struct cmd_info cmd_info[] = {
2041 {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
2042
2043 {"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
2044 0, 1, NULL},
2045
2046 {"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT, F_LEN_CONST, R_ALL, D_ALL,
2047 0, 1, cmd_handler_mi_user_interrupt},
2048
2049 {"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT, F_LEN_CONST, R_RCS | R_BCS,
2050 D_ALL, 0, 1, cmd_handler_mi_wait_for_event},
2051
2052 {"MI_FLUSH", OP_MI_FLUSH, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
2053
2054 {"MI_ARB_CHECK", OP_MI_ARB_CHECK, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
2055 NULL},
2056
2057 {"MI_RS_CONTROL", OP_MI_RS_CONTROL, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
2058 NULL},
2059
2060 {"MI_REPORT_HEAD", OP_MI_REPORT_HEAD, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
2061 NULL},
2062
2063 {"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
2064 NULL},
2065
2066 {"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC, F_LEN_CONST, R_RCS,
2067 D_ALL, 0, 1, NULL},
2068
2069 {"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END,
2070 F_IP_ADVANCE_CUSTOM | F_LEN_CONST, R_ALL, D_ALL, 0, 1,
2071 cmd_handler_mi_batch_buffer_end},
2072
2073 {"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH, F_LEN_CONST, R_ALL, D_ALL,
2074 0, 1, NULL},
2075
2076 {"MI_PREDICATE", OP_MI_PREDICATE, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
2077 NULL},
2078
2079 {"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER, F_LEN_CONST, R_ALL,
2080 D_ALL, 0, 1, NULL},
2081
2082 {"MI_SET_APPID", OP_MI_SET_APPID, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
2083 NULL},
2084
2085 {"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
2086 NULL},
2087
2088 {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR,
2089 R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
2090
2091 {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR | F_LEN_VAR_FIXED,
2092 R_ALL, D_ALL, 0, 8, NULL, CMD_LEN(1)},
2093
2094 {"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
2095
2096 {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS,
2097 D_ALL, 0, 8, NULL, CMD_LEN(0)},
2098
2099 {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL,
2100 F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, 0, 8,
2101 NULL, CMD_LEN(0)},
2102
2103 {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT,
2104 F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, ADDR_FIX_1(2),
2105 8, cmd_handler_mi_semaphore_wait, CMD_LEN(2)},
2106
2107 {"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
2108 ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
2109
2110 {"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX, F_LEN_VAR, R_ALL, D_ALL,
2111 0, 8, cmd_handler_mi_store_data_index},
2112
2113 {"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM, F_LEN_VAR, R_ALL,
2114 D_ALL, 0, 8, cmd_handler_lri},
2115
2116 {"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
2117 cmd_handler_mi_update_gtt},
2118
2119 {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM,
2120 F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
2121 cmd_handler_srm, CMD_LEN(2)},
2122
2123 {"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
2124 cmd_handler_mi_flush_dw},
2125
2126 {"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
2127 10, cmd_handler_mi_clflush},
2128
2129 {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT,
2130 F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(1), 6,
2131 cmd_handler_mi_report_perf_count, CMD_LEN(2)},
2132
2133 {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM,
2134 F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
2135 cmd_handler_lrm, CMD_LEN(2)},
2136
2137 {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG,
2138 F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, 0, 8,
2139 cmd_handler_lrr, CMD_LEN(1)},
2140
2141 {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM,
2142 F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS, D_ALL, 0,
2143 8, NULL, CMD_LEN(2)},
2144
2145 {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR | F_LEN_VAR_FIXED,
2146 R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL, CMD_LEN(2)},
2147
2148 {"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
2149 ADDR_FIX_1(2), 8, NULL},
2150
2151 {"MI_OP_2E", OP_MI_2E, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS,
2152 ADDR_FIX_2(1, 2), 8, cmd_handler_mi_op_2e, CMD_LEN(3)},
2153
2154 {"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
2155 8, cmd_handler_mi_op_2f},
2156
2157 {"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START,
2158 F_IP_ADVANCE_CUSTOM, R_ALL, D_ALL, 0, 8,
2159 cmd_handler_mi_batch_buffer_start},
2160
2161 {"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
2162 F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
2163 cmd_handler_mi_conditional_batch_buffer_end, CMD_LEN(2)},
2164
2165 {"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
2166 R_RCS | R_BCS, D_ALL, 0, 2, NULL},
2167
2168 {"XY_SETUP_BLT", OP_XY_SETUP_BLT, F_LEN_VAR, R_BCS, D_ALL,
2169 ADDR_FIX_2(4, 7), 8, NULL},
2170
2171 {"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT, F_LEN_VAR, R_BCS, D_ALL,
2172 0, 8, NULL},
2173
2174 {"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT,
2175 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
2176
2177 {"XY_PIXEL_BLT", OP_XY_PIXEL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
2178
2179 {"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT, F_LEN_VAR, R_BCS, D_ALL,
2180 0, 8, NULL},
2181
2182 {"XY_TEXT_BLT", OP_XY_TEXT_BLT, F_LEN_VAR, R_BCS, D_ALL,
2183 ADDR_FIX_1(3), 8, NULL},
2184
2185 {"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT, F_LEN_VAR, R_BCS,
2186 D_ALL, 0, 8, NULL},
2187
2188 {"XY_COLOR_BLT", OP_XY_COLOR_BLT, F_LEN_VAR, R_BCS, D_ALL,
2189 ADDR_FIX_1(4), 8, NULL},
2190
2191 {"XY_PAT_BLT", OP_XY_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
2192 ADDR_FIX_2(4, 5), 8, NULL},
2193
2194 {"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
2195 ADDR_FIX_1(4), 8, NULL},
2196
2197 {"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT, F_LEN_VAR, R_BCS, D_ALL,
2198 ADDR_FIX_2(4, 7), 8, NULL},
2199
2200 {"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT, F_LEN_VAR, R_BCS,
2201 D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
2202
2203 {"XY_FULL_BLT", OP_XY_FULL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
2204
2205 {"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT, F_LEN_VAR, R_BCS,
2206 D_ALL, ADDR_FIX_3(4, 5, 8), 8, NULL},
2207
2208 {"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT, F_LEN_VAR,
2209 R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
2210
2211 {"XY_FULL_MONO_PATTERN_MONO_SRC_BLT",
2212 OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT,
2213 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
2214
2215 {"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT, F_LEN_VAR, R_BCS,
2216 D_ALL, ADDR_FIX_1(4), 8, NULL},
2217
2218 {"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT,
2219 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
2220
2221 {"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE, F_LEN_VAR, R_BCS,
2222 D_ALL, ADDR_FIX_1(4), 8, NULL},
2223
2224 {"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT, F_LEN_VAR, R_BCS,
2225 D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
2226
2227 {"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT,
2228 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
2229
2230 {"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT",
2231 OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT,
2232 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
2233
2234 {"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT, F_LEN_VAR, R_BCS, D_ALL,
2235 ADDR_FIX_2(4, 5), 8, NULL},
2236
2237 {"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE,
2238 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
2239
2240 {"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP",
2241 OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
2242 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2243
2244 {"3DSTATE_VIEWPORT_STATE_POINTERS_CC",
2245 OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
2246 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2247
2248 {"3DSTATE_BLEND_STATE_POINTERS",
2249 OP_3DSTATE_BLEND_STATE_POINTERS,
2250 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2251
2252 {"3DSTATE_DEPTH_STENCIL_STATE_POINTERS",
2253 OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS,
2254 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2255
2256 {"3DSTATE_BINDING_TABLE_POINTERS_VS",
2257 OP_3DSTATE_BINDING_TABLE_POINTERS_VS,
2258 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2259
2260 {"3DSTATE_BINDING_TABLE_POINTERS_HS",
2261 OP_3DSTATE_BINDING_TABLE_POINTERS_HS,
2262 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2263
2264 {"3DSTATE_BINDING_TABLE_POINTERS_DS",
2265 OP_3DSTATE_BINDING_TABLE_POINTERS_DS,
2266 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2267
2268 {"3DSTATE_BINDING_TABLE_POINTERS_GS",
2269 OP_3DSTATE_BINDING_TABLE_POINTERS_GS,
2270 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2271
2272 {"3DSTATE_BINDING_TABLE_POINTERS_PS",
2273 OP_3DSTATE_BINDING_TABLE_POINTERS_PS,
2274 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2275
2276 {"3DSTATE_SAMPLER_STATE_POINTERS_VS",
2277 OP_3DSTATE_SAMPLER_STATE_POINTERS_VS,
2278 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2279
2280 {"3DSTATE_SAMPLER_STATE_POINTERS_HS",
2281 OP_3DSTATE_SAMPLER_STATE_POINTERS_HS,
2282 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2283
2284 {"3DSTATE_SAMPLER_STATE_POINTERS_DS",
2285 OP_3DSTATE_SAMPLER_STATE_POINTERS_DS,
2286 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2287
2288 {"3DSTATE_SAMPLER_STATE_POINTERS_GS",
2289 OP_3DSTATE_SAMPLER_STATE_POINTERS_GS,
2290 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2291
2292 {"3DSTATE_SAMPLER_STATE_POINTERS_PS",
2293 OP_3DSTATE_SAMPLER_STATE_POINTERS_PS,
2294 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2295
2296 {"3DSTATE_URB_VS", OP_3DSTATE_URB_VS, F_LEN_VAR, R_RCS, D_ALL,
2297 0, 8, NULL},
2298
2299 {"3DSTATE_URB_HS", OP_3DSTATE_URB_HS, F_LEN_VAR, R_RCS, D_ALL,
2300 0, 8, NULL},
2301
2302 {"3DSTATE_URB_DS", OP_3DSTATE_URB_DS, F_LEN_VAR, R_RCS, D_ALL,
2303 0, 8, NULL},
2304
2305 {"3DSTATE_URB_GS", OP_3DSTATE_URB_GS, F_LEN_VAR, R_RCS, D_ALL,
2306 0, 8, NULL},
2307
2308 {"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS,
2309 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2310
2311 {"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS,
2312 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2313
2314 {"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS,
2315 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2316
2317 {"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS,
2318 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2319
2320 {"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS,
2321 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2322
2323 {"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS,
2324 F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
2325
2326 {"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS,
2327 F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
2328
2329 {"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS,
2330 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2331
2332 {"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS,
2333 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2334
2335 {"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS,
2336 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2337
2338 {"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS,
2339 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2340
2341 {"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS,
2342 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2343
2344 {"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS,
2345 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2346
2347 {"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS,
2348 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2349
2350 {"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS,
2351 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2352
2353 {"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS,
2354 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2355
2356 {"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS,
2357 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2358
2359 {"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS,
2360 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2361
2362 {"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS,
2363 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2364
2365 {"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS,
2366 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2367
2368 {"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING, F_LEN_VAR, R_RCS,
2369 D_BDW_PLUS, 0, 8, NULL},
2370
2371 {"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2372 NULL},
2373
2374 {"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY, F_LEN_VAR, R_RCS,
2375 D_BDW_PLUS, 0, 8, NULL},
2376
2377 {"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY, F_LEN_VAR, R_RCS,
2378 D_BDW_PLUS, 0, 8, NULL},
2379
2380 {"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
2381 8, NULL},
2382
2383 {"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL, F_LEN_VAR,
2384 R_RCS, D_BDW_PLUS, 0, 8, NULL},
2385
2386 {"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
2387 8, NULL},
2388
2389 {"3DSTATE_RASTER", OP_3DSTATE_RASTER, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2390 NULL},
2391
2392 {"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2393 NULL},
2394
2395 {"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2396 NULL},
2397
2398 {"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS, F_LEN_VAR, R_RCS,
2399 D_BDW_PLUS, 0, 8, NULL},
2400
2401 {"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS, F_LEN_VAR,
2402 R_RCS, D_ALL, 0, 8, NULL},
2403
2404 {"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER, F_LEN_VAR, R_RCS,
2405 D_BDW_PLUS, ADDR_FIX_1(2), 8, NULL},
2406
2407 {"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS, F_LEN_CONST,
2408 R_RCS, D_ALL, 0, 1, NULL},
2409
2410 {"3DSTATE_VF", OP_3DSTATE_VF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2411
2412 {"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS, F_LEN_VAR,
2413 R_RCS, D_ALL, 0, 8, NULL},
2414
2415 {"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS,
2416 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2417
2418 {"3DSTATE_GS", OP_3DSTATE_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2419
2420 {"3DSTATE_CLIP", OP_3DSTATE_CLIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2421
2422 {"3DSTATE_WM", OP_3DSTATE_WM, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2423
2424 {"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS, F_LEN_VAR, R_RCS,
2425 D_BDW_PLUS, 0, 8, NULL},
2426
2427 {"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS, F_LEN_VAR, R_RCS,
2428 D_BDW_PLUS, 0, 8, NULL},
2429
2430 {"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK, F_LEN_VAR, R_RCS,
2431 D_ALL, 0, 8, NULL},
2432
2433 {"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS, F_LEN_VAR, R_RCS,
2434 D_BDW_PLUS, 0, 8, NULL},
2435
2436 {"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS, F_LEN_VAR, R_RCS,
2437 D_BDW_PLUS, 0, 8, NULL},
2438
2439 {"3DSTATE_HS", OP_3DSTATE_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2440
2441 {"3DSTATE_TE", OP_3DSTATE_TE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2442
2443 {"3DSTATE_DS", OP_3DSTATE_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2444
2445 {"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT, F_LEN_VAR, R_RCS,
2446 D_ALL, 0, 8, NULL},
2447
2448 {"3DSTATE_SBE", OP_3DSTATE_SBE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2449
2450 {"3DSTATE_PS", OP_3DSTATE_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2451
2452 {"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE, F_LEN_VAR,
2453 R_RCS, D_ALL, 0, 8, NULL},
2454
2455 {"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0,
2456 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2457
2458 {"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY, F_LEN_VAR, R_RCS, D_ALL,
2459 0, 8, NULL},
2460
2461 {"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER, F_LEN_VAR, R_RCS,
2462 D_ALL, ADDR_FIX_1(2), 8, NULL},
2463
2464 {"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET,
2465 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2466
2467 {"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN,
2468 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2469
2470 {"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE, F_LEN_VAR, R_RCS,
2471 D_ALL, 0, 8, NULL},
2472
2473 {"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS, F_LEN_VAR, R_RCS,
2474 D_ALL, 0, 8, NULL},
2475
2476 {"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX, F_LEN_VAR, R_RCS,
2477 D_ALL, 0, 8, NULL},
2478
2479 {"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1,
2480 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2481
2482 {"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW, F_LEN_VAR, R_RCS,
2483 D_BDW_PLUS, 0, 8, NULL},
2484
2485 {"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER, F_LEN_VAR, R_RCS,
2486 D_ALL, ADDR_FIX_1(2), 8, NULL},
2487
2488 {"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER, F_LEN_VAR,
2489 R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL},
2490
2491 {"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS, F_LEN_VAR,
2492 R_RCS, D_ALL, 0, 8, NULL},
2493
2494 {"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
2495 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2496
2497 {"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS,
2498 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2499
2500 {"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS,
2501 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2502
2503 {"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
2504 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2505
2506 {"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
2507 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2508
2509 {"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE, F_LEN_VAR,
2510 R_RCS, D_ALL, 0, 8, NULL},
2511
2512 {"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST, F_LEN_VAR, R_RCS,
2513 D_ALL, 0, 9, NULL},
2514
2515 {"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2516 ADDR_FIX_2(2, 4), 8, NULL},
2517
2518 {"3DSTATE_BINDING_TABLE_POOL_ALLOC",
2519 OP_3DSTATE_BINDING_TABLE_POOL_ALLOC,
2520 F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2521
2522 {"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC,
2523 F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2524
2525 {"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC",
2526 OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC,
2527 F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2528
2529 {"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN, F_LEN_VAR, R_RCS,
2530 D_BDW_PLUS, 0, 8, NULL},
2531
2532 {"PIPE_CONTROL", OP_PIPE_CONTROL, F_LEN_VAR, R_RCS, D_ALL,
2533 ADDR_FIX_1(2), 8, cmd_handler_pipe_control},
2534
2535 {"3DPRIMITIVE", OP_3DPRIMITIVE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2536
2537 {"PIPELINE_SELECT", OP_PIPELINE_SELECT, F_LEN_CONST, R_RCS, D_ALL, 0,
2538 1, NULL},
2539
2540 {"STATE_PREFETCH", OP_STATE_PREFETCH, F_LEN_VAR, R_RCS, D_ALL,
2541 ADDR_FIX_1(1), 8, NULL},
2542
2543 {"STATE_SIP", OP_STATE_SIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2544
2545 {"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2546 ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL},
2547
2548 {"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL,
2549 ADDR_FIX_1(1), 8, NULL},
2550
2551 {"OP_SWTESS_BASE_ADDRESS", OP_SWTESS_BASE_ADDRESS,
2552 F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_2(1, 2), 3, NULL},
2553
2554 {"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2555
2556 {"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2557
2558 {"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2559 0, 8, NULL},
2560
2561 {"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING, F_LEN_VAR, R_RCS,
2562 D_SKL_PLUS, 0, 8, NULL},
2563
2564 {"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD,
2565 F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
2566
2567 {"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE, F_LEN_VAR, R_RCS, D_ALL,
2568 0, 16, NULL},
2569
2570 {"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
2571 0, 16, NULL},
2572
2573 {"MEDIA_POOL_STATE", OP_MEDIA_POOL_STATE, F_LEN_VAR, R_RCS, D_ALL,
2574 0, 16, NULL},
2575
2576 {"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
2577
2578 {"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
2579 0, 16, NULL},
2580
2581 {"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT, F_LEN_VAR, R_RCS, D_ALL,
2582 0, 16, NULL},
2583
2584 {"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER, F_LEN_VAR, R_RCS, D_ALL,
2585 0, 16, NULL},
2586
2587 {"GPGPU_WALKER", OP_GPGPU_WALKER, F_LEN_VAR, R_RCS, D_ALL,
2588 0, 8, NULL},
2589
2590 {"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16,
2591 NULL},
2592
2593 {"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45,
2594 F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
2595
2596 {"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT, F_LEN_VAR,
2597 R_VCS, D_ALL, 0, 12, NULL},
2598
2599 {"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE, F_LEN_VAR,
2600 R_VCS, D_ALL, 0, 12, NULL},
2601
2602 {"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE, F_LEN_VAR,
2603 R_VCS, D_BDW_PLUS, 0, 12, NULL},
2604
2605 {"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE,
2606 F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
2607
2608 {"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE,
2609 F_LEN_VAR, R_VCS, D_BDW_PLUS, ADDR_FIX_3(1, 3, 5), 12, NULL},
2610
2611 {"OP_2_0_0_5", OP_2_0_0_5, F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
2612
2613 {"MFX_STATE_POINTER", OP_MFX_STATE_POINTER, F_LEN_VAR,
2614 R_VCS, D_ALL, 0, 12, NULL},
2615
2616 {"MFX_QM_STATE", OP_MFX_QM_STATE, F_LEN_VAR,
2617 R_VCS, D_ALL, 0, 12, NULL},
2618
2619 {"MFX_FQM_STATE", OP_MFX_FQM_STATE, F_LEN_VAR,
2620 R_VCS, D_ALL, 0, 12, NULL},
2621
2622 {"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT, F_LEN_VAR,
2623 R_VCS, D_ALL, 0, 12, NULL},
2624
2625 {"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT, F_LEN_VAR,
2626 R_VCS, D_ALL, 0, 12, NULL},
2627
2628 {"MFD_IT_OBJECT", OP_MFD_IT_OBJECT, F_LEN_VAR,
2629 R_VCS, D_ALL, 0, 12, NULL},
2630
2631 {"MFX_WAIT", OP_MFX_WAIT, F_LEN_VAR,
2632 R_VCS, D_ALL, 0, 6, NULL},
2633
2634 {"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE, F_LEN_VAR,
2635 R_VCS, D_ALL, 0, 12, NULL},
2636
2637 {"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE, F_LEN_VAR,
2638 R_VCS, D_ALL, 0, 12, NULL},
2639
2640 {"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE, F_LEN_VAR,
2641 R_VCS, D_ALL, 0, 12, NULL},
2642
2643 {"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE, F_LEN_VAR,
2644 R_VCS, D_ALL, 0, 12, NULL},
2645
2646 {"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE, F_LEN_VAR,
2647 R_VCS, D_ALL, 0, 12, NULL},
2648
2649 {"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE, F_LEN_VAR,
2650 R_VCS, D_ALL, 0, 12, NULL},
2651
2652 {"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE, F_LEN_VAR,
2653 R_VCS, D_ALL, 0, 12, NULL},
2654 {"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE, F_LEN_VAR,
2655 R_VCS, D_ALL, 0, 12, NULL},
2656
2657 {"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT, F_LEN_VAR,
2658 R_VCS, D_ALL, 0, 12, NULL},
2659
2660 {"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR, F_LEN_VAR,
2661 R_VCS, D_ALL, ADDR_FIX_1(2), 12, NULL},
2662
2663 {"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT, F_LEN_VAR,
2664 R_VCS, D_ALL, 0, 12, NULL},
2665
2666 {"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE, F_LEN_VAR,
2667 R_VCS, D_ALL, 0, 12, NULL},
2668
2669 {"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE, F_LEN_VAR,
2670 R_VCS, D_ALL, 0, 12, NULL},
2671
2672 {"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE, F_LEN_VAR,
2673 R_VCS, D_ALL, 0, 12, NULL},
2674
2675 {"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE, F_LEN_VAR,
2676 R_VCS, D_ALL, 0, 12, NULL},
2677
2678 {"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT, F_LEN_VAR,
2679 R_VCS, D_ALL, 0, 12, NULL},
2680
2681 {"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE, F_LEN_VAR,
2682 R_VCS, D_ALL, 0, 12, NULL},
2683
2684 {"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT, F_LEN_VAR,
2685 R_VCS, D_ALL, 0, 12, NULL},
2686
2687 {"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE, F_LEN_VAR,
2688 R_VCS, D_ALL, 0, 12, NULL},
2689
2690 {"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE, F_LEN_VAR,
2691 R_VCS, D_ALL, 0, 12, NULL},
2692
2693 {"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT, F_LEN_VAR,
2694 R_VCS, D_ALL, 0, 12, NULL},
2695
2696 {"MFX_2_6_0_0", OP_MFX_2_6_0_0, F_LEN_VAR, R_VCS, D_ALL,
2697 0, 16, NULL},
2698
2699 {"MFX_2_6_0_9", OP_MFX_2_6_0_9, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
2700
2701 {"MFX_2_6_0_8", OP_MFX_2_6_0_8, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
2702
2703 {"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE, F_LEN_VAR,
2704 R_VCS, D_ALL, 0, 12, NULL},
2705
2706 {"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE, F_LEN_VAR,
2707 R_VCS, D_ALL, 0, 12, NULL},
2708
2709 {"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT, F_LEN_VAR,
2710 R_VCS, D_ALL, 0, 12, NULL},
2711
2712 {"VEBOX_STATE", OP_VEB_STATE, F_LEN_VAR, R_VECS, D_ALL, 0, 12, NULL},
2713
2714 {"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE, F_LEN_VAR, R_VECS, D_ALL,
2715 0, 12, NULL},
2716
2717 {"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
2718 0, 12, NULL},
2719 };
2720
add_cmd_entry(struct intel_gvt * gvt,struct cmd_entry * e)2721 static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
2722 {
2723 hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
2724 }
2725
2726 /* call the cmd handler, and advance ip */
cmd_parser_exec(struct parser_exec_state * s)2727 static int cmd_parser_exec(struct parser_exec_state *s)
2728 {
2729 struct intel_vgpu *vgpu = s->vgpu;
2730 const struct cmd_info *info;
2731 u32 cmd;
2732 int ret = 0;
2733
2734 cmd = cmd_val(s, 0);
2735
2736 /* fastpath for MI_NOOP */
2737 if (cmd == MI_NOOP)
2738 info = &cmd_info[mi_noop_index];
2739 else
2740 info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
2741
2742 if (info == NULL) {
2743 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
2744 cmd, get_opcode(cmd, s->engine),
2745 repr_addr_type(s->buf_addr_type),
2746 s->engine->name, s->workload);
2747 return -EBADRQC;
2748 }
2749
2750 s->info = info;
2751
2752 trace_gvt_command(vgpu->id, s->engine->id, s->ip_gma, s->ip_va,
2753 cmd_length(s), s->buf_type, s->buf_addr_type,
2754 s->workload, info->name);
2755
2756 if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) {
2757 ret = gvt_check_valid_cmd_length(cmd_length(s),
2758 info->valid_len);
2759 if (ret)
2760 return ret;
2761 }
2762
2763 if (info->handler) {
2764 ret = info->handler(s);
2765 if (ret < 0) {
2766 gvt_vgpu_err("%s handler error\n", info->name);
2767 return ret;
2768 }
2769 }
2770
2771 if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
2772 ret = cmd_advance_default(s);
2773 if (ret) {
2774 gvt_vgpu_err("%s IP advance error\n", info->name);
2775 return ret;
2776 }
2777 }
2778 return 0;
2779 }
2780
gma_out_of_range(unsigned long gma,unsigned long gma_head,unsigned int gma_tail)2781 static inline bool gma_out_of_range(unsigned long gma,
2782 unsigned long gma_head, unsigned int gma_tail)
2783 {
2784 if (gma_tail >= gma_head)
2785 return (gma < gma_head) || (gma > gma_tail);
2786 else
2787 return (gma > gma_tail) && (gma < gma_head);
2788 }
2789
2790 /* Keep the consistent return type, e.g EBADRQC for unknown
2791 * cmd, EFAULT for invalid address, EPERM for nonpriv. later
2792 * works as the input of VM healthy status.
2793 */
command_scan(struct parser_exec_state * s,unsigned long rb_head,unsigned long rb_tail,unsigned long rb_start,unsigned long rb_len)2794 static int command_scan(struct parser_exec_state *s,
2795 unsigned long rb_head, unsigned long rb_tail,
2796 unsigned long rb_start, unsigned long rb_len)
2797 {
2798
2799 unsigned long gma_head, gma_tail, gma_bottom;
2800 int ret = 0;
2801 struct intel_vgpu *vgpu = s->vgpu;
2802
2803 gma_head = rb_start + rb_head;
2804 gma_tail = rb_start + rb_tail;
2805 gma_bottom = rb_start + rb_len;
2806
2807 while (s->ip_gma != gma_tail) {
2808 if (s->buf_type == RING_BUFFER_INSTRUCTION ||
2809 s->buf_type == RING_BUFFER_CTX) {
2810 if (!(s->ip_gma >= rb_start) ||
2811 !(s->ip_gma < gma_bottom)) {
2812 gvt_vgpu_err("ip_gma %lx out of ring scope."
2813 "(base:0x%lx, bottom: 0x%lx)\n",
2814 s->ip_gma, rb_start,
2815 gma_bottom);
2816 parser_exec_state_dump(s);
2817 return -EFAULT;
2818 }
2819 if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
2820 gvt_vgpu_err("ip_gma %lx out of range."
2821 "base 0x%lx head 0x%lx tail 0x%lx\n",
2822 s->ip_gma, rb_start,
2823 rb_head, rb_tail);
2824 parser_exec_state_dump(s);
2825 break;
2826 }
2827 }
2828 ret = cmd_parser_exec(s);
2829 if (ret) {
2830 gvt_vgpu_err("cmd parser error\n");
2831 parser_exec_state_dump(s);
2832 break;
2833 }
2834 }
2835
2836 return ret;
2837 }
2838
scan_workload(struct intel_vgpu_workload * workload)2839 static int scan_workload(struct intel_vgpu_workload *workload)
2840 {
2841 unsigned long gma_head, gma_tail;
2842 struct parser_exec_state s;
2843 int ret = 0;
2844
2845 /* ring base is page aligned */
2846 if (WARN_ON(!IS_ALIGNED(workload->rb_start, I915_GTT_PAGE_SIZE)))
2847 return -EINVAL;
2848
2849 gma_head = workload->rb_start + workload->rb_head;
2850 gma_tail = workload->rb_start + workload->rb_tail;
2851
2852 s.buf_type = RING_BUFFER_INSTRUCTION;
2853 s.buf_addr_type = GTT_BUFFER;
2854 s.vgpu = workload->vgpu;
2855 s.engine = workload->engine;
2856 s.ring_start = workload->rb_start;
2857 s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
2858 s.ring_head = gma_head;
2859 s.ring_tail = gma_tail;
2860 s.rb_va = workload->shadow_ring_buffer_va;
2861 s.workload = workload;
2862 s.is_ctx_wa = false;
2863
2864 if (bypass_scan_mask & workload->engine->mask || gma_head == gma_tail)
2865 return 0;
2866
2867 ret = ip_gma_set(&s, gma_head);
2868 if (ret)
2869 goto out;
2870
2871 ret = command_scan(&s, workload->rb_head, workload->rb_tail,
2872 workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl));
2873
2874 out:
2875 return ret;
2876 }
2877
scan_wa_ctx(struct intel_shadow_wa_ctx * wa_ctx)2878 static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2879 {
2880
2881 unsigned long gma_head, gma_tail, ring_size, ring_tail;
2882 struct parser_exec_state s;
2883 int ret = 0;
2884 struct intel_vgpu_workload *workload = container_of(wa_ctx,
2885 struct intel_vgpu_workload,
2886 wa_ctx);
2887
2888 /* ring base is page aligned */
2889 if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma,
2890 I915_GTT_PAGE_SIZE)))
2891 return -EINVAL;
2892
2893 ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(u32);
2894 ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
2895 PAGE_SIZE);
2896 gma_head = wa_ctx->indirect_ctx.guest_gma;
2897 gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail;
2898
2899 s.buf_type = RING_BUFFER_INSTRUCTION;
2900 s.buf_addr_type = GTT_BUFFER;
2901 s.vgpu = workload->vgpu;
2902 s.engine = workload->engine;
2903 s.ring_start = wa_ctx->indirect_ctx.guest_gma;
2904 s.ring_size = ring_size;
2905 s.ring_head = gma_head;
2906 s.ring_tail = gma_tail;
2907 s.rb_va = wa_ctx->indirect_ctx.shadow_va;
2908 s.workload = workload;
2909 s.is_ctx_wa = true;
2910
2911 ret = ip_gma_set(&s, gma_head);
2912 if (ret)
2913 goto out;
2914
2915 ret = command_scan(&s, 0, ring_tail,
2916 wa_ctx->indirect_ctx.guest_gma, ring_size);
2917 out:
2918 return ret;
2919 }
2920
shadow_workload_ring_buffer(struct intel_vgpu_workload * workload)2921 static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2922 {
2923 struct intel_vgpu *vgpu = workload->vgpu;
2924 struct intel_vgpu_submission *s = &vgpu->submission;
2925 unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
2926 void *shadow_ring_buffer_va;
2927 int ret;
2928
2929 guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
2930
2931 /* calculate workload ring buffer size */
2932 workload->rb_len = (workload->rb_tail + guest_rb_size -
2933 workload->rb_head) % guest_rb_size;
2934
2935 gma_head = workload->rb_start + workload->rb_head;
2936 gma_tail = workload->rb_start + workload->rb_tail;
2937 gma_top = workload->rb_start + guest_rb_size;
2938
2939 if (workload->rb_len > s->ring_scan_buffer_size[workload->engine->id]) {
2940 void *p;
2941
2942 /* realloc the new ring buffer if needed */
2943 p = krealloc(s->ring_scan_buffer[workload->engine->id],
2944 workload->rb_len, GFP_KERNEL);
2945 if (!p) {
2946 gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
2947 return -ENOMEM;
2948 }
2949 s->ring_scan_buffer[workload->engine->id] = p;
2950 s->ring_scan_buffer_size[workload->engine->id] = workload->rb_len;
2951 }
2952
2953 shadow_ring_buffer_va = s->ring_scan_buffer[workload->engine->id];
2954
2955 /* get shadow ring buffer va */
2956 workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
2957
2958 /* head > tail --> copy head <-> top */
2959 if (gma_head > gma_tail) {
2960 ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
2961 gma_head, gma_top, shadow_ring_buffer_va);
2962 if (ret < 0) {
2963 gvt_vgpu_err("fail to copy guest ring buffer\n");
2964 return ret;
2965 }
2966 shadow_ring_buffer_va += ret;
2967 gma_head = workload->rb_start;
2968 }
2969
2970 /* copy head or start <-> tail */
2971 ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail,
2972 shadow_ring_buffer_va);
2973 if (ret < 0) {
2974 gvt_vgpu_err("fail to copy guest ring buffer\n");
2975 return ret;
2976 }
2977 return 0;
2978 }
2979
intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload * workload)2980 int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload)
2981 {
2982 int ret;
2983 struct intel_vgpu *vgpu = workload->vgpu;
2984
2985 ret = shadow_workload_ring_buffer(workload);
2986 if (ret) {
2987 gvt_vgpu_err("fail to shadow workload ring_buffer\n");
2988 return ret;
2989 }
2990
2991 ret = scan_workload(workload);
2992 if (ret) {
2993 gvt_vgpu_err("scan workload error\n");
2994 return ret;
2995 }
2996 return 0;
2997 }
2998
shadow_indirect_ctx(struct intel_shadow_wa_ctx * wa_ctx)2999 static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
3000 {
3001 int ctx_size = wa_ctx->indirect_ctx.size;
3002 unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
3003 struct intel_vgpu_workload *workload = container_of(wa_ctx,
3004 struct intel_vgpu_workload,
3005 wa_ctx);
3006 struct intel_vgpu *vgpu = workload->vgpu;
3007 struct drm_i915_gem_object *obj;
3008 int ret = 0;
3009 void *map;
3010
3011 obj = i915_gem_object_create_shmem(workload->engine->i915,
3012 roundup(ctx_size + CACHELINE_BYTES,
3013 PAGE_SIZE));
3014 if (IS_ERR(obj))
3015 return PTR_ERR(obj);
3016
3017 /* get the va of the shadow batch buffer */
3018 map = i915_gem_object_pin_map(obj, I915_MAP_WB);
3019 if (IS_ERR(map)) {
3020 gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
3021 ret = PTR_ERR(map);
3022 goto put_obj;
3023 }
3024
3025 i915_gem_object_lock(obj, NULL);
3026 ret = i915_gem_object_set_to_cpu_domain(obj, false);
3027 i915_gem_object_unlock(obj);
3028 if (ret) {
3029 gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
3030 goto unmap_src;
3031 }
3032
3033 ret = copy_gma_to_hva(workload->vgpu,
3034 workload->vgpu->gtt.ggtt_mm,
3035 guest_gma, guest_gma + ctx_size,
3036 map);
3037 if (ret < 0) {
3038 gvt_vgpu_err("fail to copy guest indirect ctx\n");
3039 goto unmap_src;
3040 }
3041
3042 wa_ctx->indirect_ctx.obj = obj;
3043 wa_ctx->indirect_ctx.shadow_va = map;
3044 return 0;
3045
3046 unmap_src:
3047 i915_gem_object_unpin_map(obj);
3048 put_obj:
3049 i915_gem_object_put(obj);
3050 return ret;
3051 }
3052
combine_wa_ctx(struct intel_shadow_wa_ctx * wa_ctx)3053 static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
3054 {
3055 u32 per_ctx_start[CACHELINE_DWORDS] = {};
3056 unsigned char *bb_start_sva;
3057
3058 if (!wa_ctx->per_ctx.valid)
3059 return 0;
3060
3061 per_ctx_start[0] = 0x18800001;
3062 per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
3063
3064 bb_start_sva = (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
3065 wa_ctx->indirect_ctx.size;
3066
3067 memcpy(bb_start_sva, per_ctx_start, CACHELINE_BYTES);
3068
3069 return 0;
3070 }
3071
intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx * wa_ctx)3072 int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
3073 {
3074 int ret;
3075 struct intel_vgpu_workload *workload = container_of(wa_ctx,
3076 struct intel_vgpu_workload,
3077 wa_ctx);
3078 struct intel_vgpu *vgpu = workload->vgpu;
3079
3080 if (wa_ctx->indirect_ctx.size == 0)
3081 return 0;
3082
3083 ret = shadow_indirect_ctx(wa_ctx);
3084 if (ret) {
3085 gvt_vgpu_err("fail to shadow indirect ctx\n");
3086 return ret;
3087 }
3088
3089 combine_wa_ctx(wa_ctx);
3090
3091 ret = scan_wa_ctx(wa_ctx);
3092 if (ret) {
3093 gvt_vgpu_err("scan wa ctx error\n");
3094 return ret;
3095 }
3096
3097 return 0;
3098 }
3099
3100 /* generate dummy contexts by sending empty requests to HW, and let
3101 * the HW to fill Engine Contexts. This dummy contexts are used for
3102 * initialization purpose (update reg whitelist), so referred to as
3103 * init context here
3104 */
intel_gvt_update_reg_whitelist(struct intel_vgpu * vgpu)3105 void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
3106 {
3107 const unsigned long start = LRC_STATE_PN * PAGE_SIZE;
3108 struct intel_gvt *gvt = vgpu->gvt;
3109 struct intel_engine_cs *engine;
3110 enum intel_engine_id id;
3111
3112 if (gvt->is_reg_whitelist_updated)
3113 return;
3114
3115 /* scan init ctx to update cmd accessible list */
3116 for_each_engine(engine, gvt->gt, id) {
3117 struct parser_exec_state s;
3118 void *vaddr;
3119 int ret;
3120
3121 if (!engine->default_state)
3122 continue;
3123
3124 vaddr = shmem_pin_map(engine->default_state);
3125 if (!vaddr) {
3126 gvt_err("failed to map %s->default state\n",
3127 engine->name);
3128 return;
3129 }
3130
3131 s.buf_type = RING_BUFFER_CTX;
3132 s.buf_addr_type = GTT_BUFFER;
3133 s.vgpu = vgpu;
3134 s.engine = engine;
3135 s.ring_start = 0;
3136 s.ring_size = engine->context_size - start;
3137 s.ring_head = 0;
3138 s.ring_tail = s.ring_size;
3139 s.rb_va = vaddr + start;
3140 s.workload = NULL;
3141 s.is_ctx_wa = false;
3142 s.is_init_ctx = true;
3143
3144 /* skipping the first RING_CTX_SIZE(0x50) dwords */
3145 ret = ip_gma_set(&s, RING_CTX_SIZE);
3146 if (ret == 0) {
3147 ret = command_scan(&s, 0, s.ring_size, 0, s.ring_size);
3148 if (ret)
3149 gvt_err("Scan init ctx error\n");
3150 }
3151
3152 shmem_unpin_map(engine->default_state, vaddr);
3153 if (ret)
3154 return;
3155 }
3156
3157 gvt->is_reg_whitelist_updated = true;
3158 }
3159
intel_gvt_scan_engine_context(struct intel_vgpu_workload * workload)3160 int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload)
3161 {
3162 struct intel_vgpu *vgpu = workload->vgpu;
3163 unsigned long gma_head, gma_tail, gma_start, ctx_size;
3164 struct parser_exec_state s;
3165 int ring_id = workload->engine->id;
3166 struct intel_context *ce = vgpu->submission.shadow[ring_id];
3167 int ret;
3168
3169 GEM_BUG_ON(atomic_read(&ce->pin_count) < 0);
3170
3171 ctx_size = workload->engine->context_size - PAGE_SIZE;
3172
3173 /* Only ring contxt is loaded to HW for inhibit context, no need to
3174 * scan engine context
3175 */
3176 if (is_inhibit_context(ce))
3177 return 0;
3178
3179 gma_start = i915_ggtt_offset(ce->state) + LRC_STATE_PN*PAGE_SIZE;
3180 gma_head = 0;
3181 gma_tail = ctx_size;
3182
3183 s.buf_type = RING_BUFFER_CTX;
3184 s.buf_addr_type = GTT_BUFFER;
3185 s.vgpu = workload->vgpu;
3186 s.engine = workload->engine;
3187 s.ring_start = gma_start;
3188 s.ring_size = ctx_size;
3189 s.ring_head = gma_start + gma_head;
3190 s.ring_tail = gma_start + gma_tail;
3191 s.rb_va = ce->lrc_reg_state;
3192 s.workload = workload;
3193 s.is_ctx_wa = false;
3194 s.is_init_ctx = false;
3195
3196 /* don't scan the first RING_CTX_SIZE(0x50) dwords, as it's ring
3197 * context
3198 */
3199 ret = ip_gma_set(&s, gma_start + gma_head + RING_CTX_SIZE);
3200 if (ret)
3201 goto out;
3202
3203 ret = command_scan(&s, gma_head, gma_tail,
3204 gma_start, ctx_size);
3205 out:
3206 if (ret)
3207 gvt_vgpu_err("scan shadow ctx error\n");
3208
3209 return ret;
3210 }
3211
init_cmd_table(struct intel_gvt * gvt)3212 static int init_cmd_table(struct intel_gvt *gvt)
3213 {
3214 unsigned int gen_type = intel_gvt_get_device_type(gvt);
3215 int i;
3216
3217 for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
3218 struct cmd_entry *e;
3219
3220 if (!(cmd_info[i].devices & gen_type))
3221 continue;
3222
3223 e = kzalloc(sizeof(*e), GFP_KERNEL);
3224 if (!e)
3225 return -ENOMEM;
3226
3227 e->info = &cmd_info[i];
3228 if (cmd_info[i].opcode == OP_MI_NOOP)
3229 mi_noop_index = i;
3230
3231 INIT_HLIST_NODE(&e->hlist);
3232 add_cmd_entry(gvt, e);
3233 gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
3234 e->info->name, e->info->opcode, e->info->flag,
3235 e->info->devices, e->info->rings);
3236 }
3237
3238 return 0;
3239 }
3240
clean_cmd_table(struct intel_gvt * gvt)3241 static void clean_cmd_table(struct intel_gvt *gvt)
3242 {
3243 struct hlist_node *tmp;
3244 struct cmd_entry *e;
3245 int i;
3246
3247 hash_for_each_safe(gvt->cmd_table, i, tmp, e, hlist)
3248 kfree(e);
3249
3250 hash_init(gvt->cmd_table);
3251 }
3252
intel_gvt_clean_cmd_parser(struct intel_gvt * gvt)3253 void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt)
3254 {
3255 clean_cmd_table(gvt);
3256 }
3257
intel_gvt_init_cmd_parser(struct intel_gvt * gvt)3258 int intel_gvt_init_cmd_parser(struct intel_gvt *gvt)
3259 {
3260 int ret;
3261
3262 ret = init_cmd_table(gvt);
3263 if (ret) {
3264 intel_gvt_clean_cmd_parser(gvt);
3265 return ret;
3266 }
3267 return 0;
3268 }
3269