1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #include "kfd_priv.h" 24 #include "kfd_events.h" 25 #include "soc15_int.h" 26 #include "kfd_device_queue_manager.h" 27 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" 28 #include "kfd_smi_events.h" 29 #include "kfd_debug.h" 30 31 /* 32 * GFX11 SQ Interrupts 33 * 34 * There are 3 encoding types of interrupts sourced from SQ sent as a 44-bit 35 * packet to the Interrupt Handler: 36 * Auto - Generated by the SQG (various cmd overflows, timestamps etc) 37 * Wave - Generated by S_SENDMSG through a shader program 38 * Error - HW generated errors (Illegal instructions, Memviols, EDC etc) 39 * 40 * The 44-bit packet is mapped as {context_id1[7:0],context_id0[31:0]} plus 41 * 4-bits for VMID (SOC15_VMID_FROM_IH_ENTRY) as such: 42 * 43 * - context_id1[7:6] 44 * Encoding type (0 = Auto, 1 = Wave, 2 = Error) 45 * 46 * - context_id0[26] 47 * PRIV bit indicates that Wave S_SEND or error occurred within trap 48 * 49 * - context_id0[24:0] 50 * 25-bit data with the following layout per encoding type: 51 * Auto - only context_id0[8:0] is used, which reports various interrupts 52 * generated by SQG. The rest is 0. 53 * Wave - user data sent from m0 via S_SENDMSG (context_id0[23:0]) 54 * Error - Error Type (context_id0[24:21]), Error Details (context_id0[20:0]) 55 * 56 * The other context_id bits show coordinates (SE/SH/CU/SIMD/WGP) for wave 57 * S_SENDMSG and Errors. These are 0 for Auto. 58 */ 59 60 enum SQ_INTERRUPT_WORD_ENCODING { 61 SQ_INTERRUPT_WORD_ENCODING_AUTO = 0x0, 62 SQ_INTERRUPT_WORD_ENCODING_INST, 63 SQ_INTERRUPT_WORD_ENCODING_ERROR, 64 }; 65 66 enum SQ_INTERRUPT_ERROR_TYPE { 67 SQ_INTERRUPT_ERROR_TYPE_EDC_FUE = 0x0, 68 SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST, 69 SQ_INTERRUPT_ERROR_TYPE_MEMVIOL, 70 SQ_INTERRUPT_ERROR_TYPE_EDC_FED, 71 }; 72 73 /* SQ_INTERRUPT_WORD_AUTO_CTXID */ 74 #define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE__SHIFT 0 75 #define SQ_INTERRUPT_WORD_AUTO_CTXID0__WLT__SHIFT 1 76 #define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF_FULL__SHIFT 2 77 #define SQ_INTERRUPT_WORD_AUTO_CTXID0__REG_TIMESTAMP__SHIFT 3 78 #define SQ_INTERRUPT_WORD_AUTO_CTXID0__CMD_TIMESTAMP__SHIFT 4 79 #define SQ_INTERRUPT_WORD_AUTO_CTXID0__HOST_CMD_OVERFLOW__SHIFT 5 80 #define SQ_INTERRUPT_WORD_AUTO_CTXID0__HOST_REG_OVERFLOW__SHIFT 6 81 #define SQ_INTERRUPT_WORD_AUTO_CTXID0__IMMED_OVERFLOW__SHIFT 7 82 #define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_UTC_ERROR__SHIFT 8 83 #define SQ_INTERRUPT_WORD_AUTO_CTXID1__ENCODING__SHIFT 6 84 85 #define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_MASK 0x00000001 86 #define SQ_INTERRUPT_WORD_AUTO_CTXID0__WLT_MASK 0x00000002 87 #define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF_FULL_MASK 0x00000004 88 #define SQ_INTERRUPT_WORD_AUTO_CTXID0__REG_TIMESTAMP_MASK 0x00000008 89 #define SQ_INTERRUPT_WORD_AUTO_CTXID0__CMD_TIMESTAMP_MASK 0x00000010 90 #define SQ_INTERRUPT_WORD_AUTO_CTXID0__HOST_CMD_OVERFLOW_MASK 0x00000020 91 #define SQ_INTERRUPT_WORD_AUTO_CTXID0__HOST_REG_OVERFLOW_MASK 0x00000040 92 #define SQ_INTERRUPT_WORD_AUTO_CTXID0__IMMED_OVERFLOW_MASK 0x00000080 93 #define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_UTC_ERROR_MASK 0x00000100 94 #define SQ_INTERRUPT_WORD_AUTO_CTXID1__ENCODING_MASK 0x000000c0 95 96 /* SQ_INTERRUPT_WORD_WAVE_CTXID */ 97 #define SQ_INTERRUPT_WORD_WAVE_CTXID0__DATA__SHIFT 0 98 #define SQ_INTERRUPT_WORD_WAVE_CTXID0__SH_ID__SHIFT 25 99 #define SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV__SHIFT 26 100 #define SQ_INTERRUPT_WORD_WAVE_CTXID0__WAVE_ID__SHIFT 27 101 #define SQ_INTERRUPT_WORD_WAVE_CTXID1__SIMD_ID__SHIFT 0 102 #define SQ_INTERRUPT_WORD_WAVE_CTXID1__WGP_ID__SHIFT 2 103 #define SQ_INTERRUPT_WORD_WAVE_CTXID1__ENCODING__SHIFT 6 104 105 #define SQ_INTERRUPT_WORD_WAVE_CTXID0__DATA_MASK 0x00ffffff /* [23:0] */ 106 #define SQ_INTERRUPT_WORD_WAVE_CTXID0__SH_ID_MASK 0x02000000 /* [25] */ 107 #define SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV_MASK 0x04000000 /* [26] */ 108 #define SQ_INTERRUPT_WORD_WAVE_CTXID0__WAVE_ID_MASK 0xf8000000 /* [31:27] */ 109 #define SQ_INTERRUPT_WORD_WAVE_CTXID1__SIMD_ID_MASK 0x00000003 /* [33:32] */ 110 #define SQ_INTERRUPT_WORD_WAVE_CTXID1__WGP_ID_MASK 0x0000003c /* [37:34] */ 111 #define SQ_INTERRUPT_WORD_WAVE_CTXID1__ENCODING_MASK 0x000000c0 /* [39:38] */ 112 113 /* SQ_INTERRUPT_WORD_ERROR_CTXID */ 114 #define SQ_INTERRUPT_WORD_ERROR_CTXID0__DETAIL__SHIFT 0 115 #define SQ_INTERRUPT_WORD_ERROR_CTXID0__TYPE__SHIFT 21 116 #define SQ_INTERRUPT_WORD_ERROR_CTXID0__SH_ID__SHIFT 25 117 #define SQ_INTERRUPT_WORD_ERROR_CTXID0__PRIV__SHIFT 26 118 #define SQ_INTERRUPT_WORD_ERROR_CTXID0__WAVE_ID__SHIFT 27 119 #define SQ_INTERRUPT_WORD_ERROR_CTXID1__SIMD_ID__SHIFT 0 120 #define SQ_INTERRUPT_WORD_ERROR_CTXID1__WGP_ID__SHIFT 2 121 #define SQ_INTERRUPT_WORD_ERROR_CTXID1__ENCODING__SHIFT 6 122 123 #define SQ_INTERRUPT_WORD_ERROR_CTXID0__DETAIL_MASK 0x001fffff /* [20:0] */ 124 #define SQ_INTERRUPT_WORD_ERROR_CTXID0__TYPE_MASK 0x01e00000 /* [24:21] */ 125 #define SQ_INTERRUPT_WORD_ERROR_CTXID0__SH_ID_MASK 0x02000000 /* [25] */ 126 #define SQ_INTERRUPT_WORD_ERROR_CTXID0__PRIV_MASK 0x04000000 /* [26] */ 127 #define SQ_INTERRUPT_WORD_ERROR_CTXID0__WAVE_ID_MASK 0xf8000000 /* [31:27] */ 128 #define SQ_INTERRUPT_WORD_ERROR_CTXID1__SIMD_ID_MASK 0x00000003 /* [33:32] */ 129 #define SQ_INTERRUPT_WORD_ERROR_CTXID1__WGP_ID_MASK 0x0000003c /* [37:34] */ 130 #define SQ_INTERRUPT_WORD_ERROR_CTXID1__ENCODING_MASK 0x000000c0 /* [39:38] */ 131 132 /* 133 * The debugger will send user data(m0) with PRIV=1 to indicate it requires 134 * notification from the KFD with the following queue id (DOORBELL_ID) and 135 * trap code (TRAP_CODE). 136 */ 137 #define KFD_CTXID0_TRAP_CODE_SHIFT 10 138 #define KFD_CTXID0_TRAP_CODE_MASK 0xfffc00 139 #define KFD_CTXID0_CP_BAD_OP_ECODE_MASK 0x3ffffff 140 #define KFD_CTXID0_DOORBELL_ID_MASK 0x0003ff 141 142 #define KFD_CTXID0_TRAP_CODE(ctxid0) (((ctxid0) & \ 143 KFD_CTXID0_TRAP_CODE_MASK) >> \ 144 KFD_CTXID0_TRAP_CODE_SHIFT) 145 #define KFD_CTXID0_CP_BAD_OP_ECODE(ctxid0) (((ctxid0) & \ 146 KFD_CTXID0_CP_BAD_OP_ECODE_MASK) >> \ 147 KFD_CTXID0_TRAP_CODE_SHIFT) 148 #define KFD_CTXID0_DOORBELL_ID(ctxid0) ((ctxid0) & \ 149 KFD_CTXID0_DOORBELL_ID_MASK) 150 151 static void print_sq_intr_info_auto(struct kfd_node *dev, uint32_t context_id0, 152 uint32_t context_id1) 153 { 154 dev_dbg_ratelimited( 155 dev->adev->dev, 156 "sq_intr: auto, ttrace %d, wlt %d, ttrace_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n", 157 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, 158 THREAD_TRACE), 159 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, WLT), 160 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, 161 THREAD_TRACE_BUF_FULL), 162 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, 163 REG_TIMESTAMP), 164 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, 165 CMD_TIMESTAMP), 166 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, 167 HOST_CMD_OVERFLOW), 168 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, 169 HOST_REG_OVERFLOW), 170 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, 171 IMMED_OVERFLOW), 172 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, 173 THREAD_TRACE_UTC_ERROR)); 174 } 175 176 static void print_sq_intr_info_inst(struct kfd_node *dev, uint32_t context_id0, 177 uint32_t context_id1) 178 { 179 dev_dbg_ratelimited( 180 dev->adev->dev, 181 "sq_intr: inst, data 0x%08x, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n", 182 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, DATA), 183 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, 184 SH_ID), 185 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV), 186 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, 187 WAVE_ID), 188 REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, 189 SIMD_ID), 190 REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, 191 WGP_ID)); 192 } 193 194 static void print_sq_intr_info_error(struct kfd_node *dev, uint32_t context_id0, 195 uint32_t context_id1) 196 { 197 dev_warn_ratelimited( 198 dev->adev->dev, 199 "sq_intr: error, detail 0x%08x, type %d, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n", 200 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, 201 DETAIL), 202 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, 203 TYPE), 204 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, 205 SH_ID), 206 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, 207 PRIV), 208 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, 209 WAVE_ID), 210 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, 211 SIMD_ID), 212 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, 213 WGP_ID)); 214 } 215 216 static void event_interrupt_poison_consumption_v11(struct kfd_node *dev, 217 uint16_t pasid, uint16_t source_id) 218 { 219 enum amdgpu_ras_block block = 0; 220 int ret = -EINVAL; 221 uint32_t reset = 0; 222 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, NULL); 223 224 if (!p) 225 return; 226 227 /* all queues of a process will be unmapped in one time */ 228 if (atomic_read(&p->poison)) { 229 kfd_unref_process(p); 230 return; 231 } 232 233 atomic_set(&p->poison, 1); 234 kfd_unref_process(p); 235 236 switch (source_id) { 237 case SOC15_INTSRC_SQ_INTERRUPT_MSG: 238 if (dev->dqm->ops.reset_queues) 239 ret = dev->dqm->ops.reset_queues(dev->dqm, pasid); 240 block = AMDGPU_RAS_BLOCK__GFX; 241 if (ret) 242 reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; 243 break; 244 case SOC21_INTSRC_SDMA_ECC: 245 default: 246 block = AMDGPU_RAS_BLOCK__GFX; 247 reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; 248 break; 249 } 250 251 kfd_signal_poison_consumed_event(dev, pasid); 252 253 /* resetting queue passes, do page retirement without gpu reset 254 resetting queue fails, fallback to gpu reset solution */ 255 amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, reset); 256 } 257 258 static bool event_interrupt_isr_v11(struct kfd_node *dev, 259 const uint32_t *ih_ring_entry, 260 uint32_t *patched_ihre, 261 bool *patched_flag) 262 { 263 uint16_t source_id, client_id, pasid, vmid; 264 const uint32_t *data = ih_ring_entry; 265 uint32_t context_id0; 266 267 source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); 268 client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); 269 /* Only handle interrupts from KFD VMIDs */ 270 vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); 271 if (!KFD_IRQ_IS_FENCE(client_id, source_id) && 272 (vmid < dev->vm_info.first_vmid_kfd || 273 vmid > dev->vm_info.last_vmid_kfd)) 274 return false; 275 276 pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); 277 context_id0 = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry); 278 279 if ((source_id == SOC15_INTSRC_CP_END_OF_PIPE) && 280 (context_id0 & AMDGPU_FENCE_MES_QUEUE_FLAG)) 281 return false; 282 283 dev_dbg(dev->adev->dev, 284 "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n", 285 client_id, source_id, vmid, pasid); 286 dev_dbg(dev->adev->dev, "%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n", 287 data[0], data[1], data[2], data[3], data[4], data[5], data[6], 288 data[7]); 289 290 if (pasid == 0) 291 return false; 292 293 /* Interrupt types we care about: various signals and faults. 294 * They will be forwarded to a work queue (see below). 295 */ 296 return source_id == SOC15_INTSRC_CP_END_OF_PIPE || 297 source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG || 298 source_id == SOC15_INTSRC_CP_BAD_OPCODE || 299 source_id == SOC21_INTSRC_SDMA_TRAP || 300 KFD_IRQ_IS_FENCE(client_id, source_id) || 301 (((client_id == SOC21_IH_CLIENTID_VMC) || 302 ((client_id == SOC21_IH_CLIENTID_GFX) && 303 (source_id == UTCL2_1_0__SRCID__FAULT))) && 304 !amdgpu_no_queue_eviction_on_vm_fault); 305 } 306 307 static void event_interrupt_wq_v11(struct kfd_node *dev, 308 const uint32_t *ih_ring_entry) 309 { 310 uint16_t source_id, client_id, ring_id, pasid, vmid; 311 uint32_t context_id0, context_id1; 312 uint8_t sq_int_enc, sq_int_priv, sq_int_errtype; 313 struct kfd_vm_fault_info info = {0}; 314 struct kfd_hsa_memory_exception_data exception_data; 315 316 source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); 317 client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); 318 ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); 319 pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); 320 vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); 321 context_id0 = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry); 322 context_id1 = SOC15_CONTEXT_ID1_FROM_IH_ENTRY(ih_ring_entry); 323 324 /* VMC, UTCL2 */ 325 if (client_id == SOC21_IH_CLIENTID_VMC || 326 ((client_id == SOC21_IH_CLIENTID_GFX) && 327 (source_id == UTCL2_1_0__SRCID__FAULT))) { 328 329 info.vmid = vmid; 330 info.mc_id = client_id; 331 info.page_addr = ih_ring_entry[4] | 332 (uint64_t)(ih_ring_entry[5] & 0xf) << 32; 333 info.prot_valid = ring_id & 0x08; 334 info.prot_read = ring_id & 0x10; 335 info.prot_write = ring_id & 0x20; 336 337 memset(&exception_data, 0, sizeof(exception_data)); 338 exception_data.gpu_id = dev->id; 339 exception_data.va = (info.page_addr) << PAGE_SHIFT; 340 exception_data.failure.NotPresent = info.prot_valid ? 1 : 0; 341 exception_data.failure.NoExecute = info.prot_exec ? 1 : 0; 342 exception_data.failure.ReadOnly = info.prot_write ? 1 : 0; 343 exception_data.failure.imprecise = 0; 344 345 kfd_set_dbg_ev_from_interrupt(dev, pasid, -1, 346 KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION), 347 &exception_data, sizeof(exception_data)); 348 kfd_smi_event_update_vmfault(dev, pasid); 349 350 /* GRBM, SDMA, SE, PMM */ 351 } else if (client_id == SOC21_IH_CLIENTID_GRBM_CP || 352 client_id == SOC21_IH_CLIENTID_GFX) { 353 354 /* CP */ 355 if (source_id == SOC15_INTSRC_CP_END_OF_PIPE) 356 kfd_signal_event_interrupt(pasid, context_id0, 32); 357 else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE && 358 KFD_DBG_EC_TYPE_IS_PACKET(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0))) { 359 u32 doorbell_id = KFD_CTXID0_DOORBELL_ID(context_id0); 360 361 kfd_set_dbg_ev_from_interrupt(dev, pasid, doorbell_id, 362 KFD_EC_MASK(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)), 363 NULL, 0); 364 kfd_dqm_suspend_bad_queue_mes(dev, pasid, doorbell_id); 365 } 366 367 /* SDMA */ 368 else if (source_id == SOC21_INTSRC_SDMA_TRAP) 369 kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28); 370 else if (source_id == SOC21_INTSRC_SDMA_ECC) { 371 event_interrupt_poison_consumption_v11(dev, pasid, source_id); 372 return; 373 } 374 375 /* SQ */ 376 else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG) { 377 sq_int_enc = REG_GET_FIELD(context_id1, 378 SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING); 379 switch (sq_int_enc) { 380 case SQ_INTERRUPT_WORD_ENCODING_AUTO: 381 print_sq_intr_info_auto(dev, context_id0, context_id1); 382 break; 383 case SQ_INTERRUPT_WORD_ENCODING_INST: 384 print_sq_intr_info_inst(dev, context_id0, context_id1); 385 sq_int_priv = REG_GET_FIELD(context_id0, 386 SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV); 387 if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid, 388 KFD_CTXID0_DOORBELL_ID(context_id0), 389 KFD_CTXID0_TRAP_CODE(context_id0), 390 NULL, 0))) 391 return; 392 break; 393 case SQ_INTERRUPT_WORD_ENCODING_ERROR: 394 print_sq_intr_info_error(dev, context_id0, context_id1); 395 sq_int_errtype = REG_GET_FIELD(context_id0, 396 SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE); 397 if (sq_int_errtype != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST && 398 sq_int_errtype != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) { 399 event_interrupt_poison_consumption_v11( 400 dev, pasid, source_id); 401 return; 402 } 403 break; 404 default: 405 break; 406 } 407 kfd_signal_event_interrupt(pasid, context_id0 & 0xffffff, 24); 408 } 409 410 } else if (KFD_IRQ_IS_FENCE(client_id, source_id)) { 411 kfd_process_close_interrupt_drain(pasid); 412 } 413 } 414 415 const struct kfd_event_interrupt_class event_interrupt_class_v11 = { 416 .interrupt_isr = event_interrupt_isr_v11, 417 .interrupt_wq = event_interrupt_wq_v11, 418 }; 419