1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu.h" 25 #include "amdgpu_jpeg.h" 26 #include "amdgpu_pm.h" 27 #include "soc15.h" 28 #include "soc15d.h" 29 #include "jpeg_v2_0.h" 30 31 #include "vcn/vcn_3_0_0_offset.h" 32 #include "vcn/vcn_3_0_0_sh_mask.h" 33 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" 34 35 #define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f 36 37 static const struct amdgpu_hwip_reg_entry jpeg_reg_list_3_0[] = { 38 SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_POWER_STATUS), 39 SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_INT_STAT), 40 SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_RPTR), 41 SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_WPTR), 42 SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_CNTL), 43 SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_SIZE), 44 SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_STATUS), 45 SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_ADDR_MODE), 46 SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG), 47 SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_Y_GFX10_TILING_SURFACE), 48 SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_UV_GFX10_TILING_SURFACE), 49 SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_PITCH), 50 SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_UV_PITCH), 51 }; 52 53 static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev); 54 static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev); 55 static int jpeg_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block, 56 enum amd_powergating_state state); 57 58 /** 59 * jpeg_v3_0_early_init - set function pointers 60 * 61 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 62 * 63 * Set ring and irq function pointers 64 */ 65 static int jpeg_v3_0_early_init(struct amdgpu_ip_block *ip_block) 66 { 67 struct amdgpu_device *adev = ip_block->adev; 68 69 u32 harvest; 70 71 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { 72 case IP_VERSION(3, 1, 1): 73 case IP_VERSION(3, 1, 2): 74 break; 75 default: 76 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING); 77 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) 78 return -ENOENT; 79 break; 80 } 81 82 adev->jpeg.num_jpeg_inst = 1; 83 adev->jpeg.num_jpeg_rings = 1; 84 85 jpeg_v3_0_set_dec_ring_funcs(adev); 86 jpeg_v3_0_set_irq_funcs(adev); 87 88 return 0; 89 } 90 91 /** 92 * jpeg_v3_0_sw_init - sw init for JPEG block 93 * 94 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 95 * 96 * Load firmware and sw initialization 97 */ 98 static int jpeg_v3_0_sw_init(struct amdgpu_ip_block *ip_block) 99 { 100 struct amdgpu_device *adev = ip_block->adev; 101 struct amdgpu_ring *ring; 102 int r; 103 104 /* JPEG TRAP */ 105 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 106 VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq); 107 if (r) 108 return r; 109 110 r = amdgpu_jpeg_sw_init(adev); 111 if (r) 112 return r; 113 114 r = amdgpu_jpeg_resume(adev); 115 if (r) 116 return r; 117 118 ring = adev->jpeg.inst->ring_dec; 119 ring->use_doorbell = true; 120 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; 121 ring->vm_hub = AMDGPU_MMHUB0(0); 122 sprintf(ring->name, "jpeg_dec"); 123 r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, 124 AMDGPU_RING_PRIO_DEFAULT, NULL); 125 if (r) 126 return r; 127 128 adev->jpeg.internal.jpeg_pitch[0] = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; 129 adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH); 130 131 r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_3_0, ARRAY_SIZE(jpeg_reg_list_3_0)); 132 if (r) 133 return r; 134 135 adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE; 136 r = amdgpu_jpeg_sysfs_reset_mask_init(adev); 137 138 return r; 139 } 140 141 /** 142 * jpeg_v3_0_sw_fini - sw fini for JPEG block 143 * 144 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 145 * 146 * JPEG suspend and free up sw allocation 147 */ 148 static int jpeg_v3_0_sw_fini(struct amdgpu_ip_block *ip_block) 149 { 150 struct amdgpu_device *adev = ip_block->adev; 151 int r; 152 153 r = amdgpu_jpeg_suspend(adev); 154 if (r) 155 return r; 156 157 amdgpu_jpeg_sysfs_reset_mask_fini(adev); 158 159 r = amdgpu_jpeg_sw_fini(adev); 160 161 return r; 162 } 163 164 /** 165 * jpeg_v3_0_hw_init - start and test JPEG block 166 * 167 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 168 * 169 */ 170 static int jpeg_v3_0_hw_init(struct amdgpu_ip_block *ip_block) 171 { 172 struct amdgpu_device *adev = ip_block->adev; 173 struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; 174 175 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 176 (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); 177 178 return amdgpu_ring_test_helper(ring); 179 } 180 181 /** 182 * jpeg_v3_0_hw_fini - stop the hardware block 183 * 184 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 185 * 186 * Stop the JPEG block, mark ring as not ready any more 187 */ 188 static int jpeg_v3_0_hw_fini(struct amdgpu_ip_block *ip_block) 189 { 190 struct amdgpu_device *adev = ip_block->adev; 191 192 cancel_delayed_work_sync(&adev->jpeg.idle_work); 193 194 if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && 195 RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS)) 196 jpeg_v3_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE); 197 198 return 0; 199 } 200 201 /** 202 * jpeg_v3_0_suspend - suspend JPEG block 203 * 204 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 205 * 206 * HW fini and suspend JPEG block 207 */ 208 static int jpeg_v3_0_suspend(struct amdgpu_ip_block *ip_block) 209 { 210 int r; 211 212 r = jpeg_v3_0_hw_fini(ip_block); 213 if (r) 214 return r; 215 216 r = amdgpu_jpeg_suspend(ip_block->adev); 217 218 return r; 219 } 220 221 /** 222 * jpeg_v3_0_resume - resume JPEG block 223 * 224 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 225 * 226 * Resume firmware and hw init JPEG block 227 */ 228 static int jpeg_v3_0_resume(struct amdgpu_ip_block *ip_block) 229 { 230 int r; 231 232 r = amdgpu_jpeg_resume(ip_block->adev); 233 if (r) 234 return r; 235 236 r = jpeg_v3_0_hw_init(ip_block); 237 238 return r; 239 } 240 241 static void jpeg_v3_0_disable_clock_gating(struct amdgpu_device *adev) 242 { 243 uint32_t data = 0; 244 245 data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL); 246 if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) 247 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 248 else 249 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 250 251 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 252 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 253 WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data); 254 255 data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE); 256 data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK 257 | JPEG_CGC_GATE__JPEG2_DEC_MASK 258 | JPEG_CGC_GATE__JPEG_ENC_MASK 259 | JPEG_CGC_GATE__JMCIF_MASK 260 | JPEG_CGC_GATE__JRBBM_MASK); 261 WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data); 262 263 data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL); 264 data &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK 265 | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK 266 | JPEG_CGC_CTRL__JMCIF_MODE_MASK 267 | JPEG_CGC_CTRL__JRBBM_MODE_MASK); 268 WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data); 269 } 270 271 static void jpeg_v3_0_enable_clock_gating(struct amdgpu_device *adev) 272 { 273 uint32_t data = 0; 274 275 data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE); 276 data |= (JPEG_CGC_GATE__JPEG_DEC_MASK 277 |JPEG_CGC_GATE__JPEG2_DEC_MASK 278 |JPEG_CGC_GATE__JPEG_ENC_MASK 279 |JPEG_CGC_GATE__JMCIF_MASK 280 |JPEG_CGC_GATE__JRBBM_MASK); 281 WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data); 282 } 283 284 static int jpeg_v3_0_disable_static_power_gating(struct amdgpu_device *adev) 285 { 286 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { 287 uint32_t data = 0; 288 int r = 0; 289 290 data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; 291 WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data); 292 293 r = SOC15_WAIT_ON_RREG(JPEG, 0, 294 mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON, 295 UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); 296 297 if (r) { 298 DRM_ERROR("amdgpu: JPEG disable power gating failed\n"); 299 return r; 300 } 301 } 302 303 /* disable anti hang mechanism */ 304 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), 0, 305 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 306 307 /* keep the JPEG in static PG mode */ 308 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), 0, 309 ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK); 310 311 return 0; 312 } 313 314 static int jpeg_v3_0_enable_static_power_gating(struct amdgpu_device *adev) 315 { 316 /* enable anti hang mechanism */ 317 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), 318 UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, 319 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 320 321 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { 322 uint32_t data = 0; 323 int r = 0; 324 325 data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; 326 WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data); 327 328 r = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS, 329 (2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT), 330 UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); 331 332 if (r) { 333 DRM_ERROR("amdgpu: JPEG enable power gating failed\n"); 334 return r; 335 } 336 } 337 338 return 0; 339 } 340 341 /** 342 * jpeg_v3_0_start - start JPEG block 343 * 344 * @adev: amdgpu_device pointer 345 * 346 * Setup and start the JPEG block 347 */ 348 static int jpeg_v3_0_start(struct amdgpu_device *adev) 349 { 350 struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; 351 int r; 352 353 if (adev->pm.dpm_enabled) 354 amdgpu_dpm_enable_jpeg(adev, true); 355 356 /* disable power gating */ 357 r = jpeg_v3_0_disable_static_power_gating(adev); 358 if (r) 359 return r; 360 361 /* JPEG disable CGC */ 362 jpeg_v3_0_disable_clock_gating(adev); 363 364 /* MJPEG global tiling registers */ 365 WREG32_SOC15(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG, 366 adev->gfx.config.gb_addr_config); 367 WREG32_SOC15(JPEG, 0, mmJPEG_ENC_GFX10_ADDR_CONFIG, 368 adev->gfx.config.gb_addr_config); 369 370 /* enable JMI channel */ 371 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), 0, 372 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 373 374 /* enable System Interrupt for JRBC */ 375 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmJPEG_SYS_INT_EN), 376 JPEG_SYS_INT_EN__DJRBC_MASK, 377 ~JPEG_SYS_INT_EN__DJRBC_MASK); 378 379 WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0); 380 WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); 381 WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, 382 lower_32_bits(ring->gpu_addr)); 383 WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, 384 upper_32_bits(ring->gpu_addr)); 385 WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0); 386 WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0); 387 WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L); 388 WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); 389 ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR); 390 391 return 0; 392 } 393 394 /** 395 * jpeg_v3_0_stop - stop JPEG block 396 * 397 * @adev: amdgpu_device pointer 398 * 399 * stop the JPEG block 400 */ 401 static int jpeg_v3_0_stop(struct amdgpu_device *adev) 402 { 403 int r; 404 405 /* reset JMI */ 406 WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), 407 UVD_JMI_CNTL__SOFT_RESET_MASK, 408 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 409 410 jpeg_v3_0_enable_clock_gating(adev); 411 412 /* enable power gating */ 413 r = jpeg_v3_0_enable_static_power_gating(adev); 414 if (r) 415 return r; 416 417 if (adev->pm.dpm_enabled) 418 amdgpu_dpm_enable_jpeg(adev, false); 419 420 return 0; 421 } 422 423 /** 424 * jpeg_v3_0_dec_ring_get_rptr - get read pointer 425 * 426 * @ring: amdgpu_ring pointer 427 * 428 * Returns the current hardware read pointer 429 */ 430 static uint64_t jpeg_v3_0_dec_ring_get_rptr(struct amdgpu_ring *ring) 431 { 432 struct amdgpu_device *adev = ring->adev; 433 434 return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR); 435 } 436 437 /** 438 * jpeg_v3_0_dec_ring_get_wptr - get write pointer 439 * 440 * @ring: amdgpu_ring pointer 441 * 442 * Returns the current hardware write pointer 443 */ 444 static uint64_t jpeg_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring) 445 { 446 struct amdgpu_device *adev = ring->adev; 447 448 if (ring->use_doorbell) 449 return *ring->wptr_cpu_addr; 450 else 451 return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR); 452 } 453 454 /** 455 * jpeg_v3_0_dec_ring_set_wptr - set write pointer 456 * 457 * @ring: amdgpu_ring pointer 458 * 459 * Commits the write pointer to the hardware 460 */ 461 static void jpeg_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring) 462 { 463 struct amdgpu_device *adev = ring->adev; 464 465 if (ring->use_doorbell) { 466 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); 467 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 468 } else { 469 WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); 470 } 471 } 472 473 static bool jpeg_v3_0_is_idle(struct amdgpu_ip_block *ip_block) 474 { 475 struct amdgpu_device *adev = ip_block->adev; 476 int ret = 1; 477 478 ret &= (((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) & 479 UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == 480 UVD_JRBC_STATUS__RB_JOB_DONE_MASK)); 481 482 return ret; 483 } 484 485 static int jpeg_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block) 486 { 487 struct amdgpu_device *adev = ip_block->adev; 488 489 return SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, 490 UVD_JRBC_STATUS__RB_JOB_DONE_MASK, 491 UVD_JRBC_STATUS__RB_JOB_DONE_MASK); 492 } 493 494 static int jpeg_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, 495 enum amd_clockgating_state state) 496 { 497 struct amdgpu_device *adev = ip_block->adev; 498 bool enable = state == AMD_CG_STATE_GATE; 499 500 if (enable) { 501 if (!jpeg_v3_0_is_idle(ip_block)) 502 return -EBUSY; 503 jpeg_v3_0_enable_clock_gating(adev); 504 } else { 505 jpeg_v3_0_disable_clock_gating(adev); 506 } 507 508 return 0; 509 } 510 511 static int jpeg_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block, 512 enum amd_powergating_state state) 513 { 514 struct amdgpu_device *adev = ip_block->adev; 515 int ret; 516 517 if(state == adev->jpeg.cur_state) 518 return 0; 519 520 if (state == AMD_PG_STATE_GATE) 521 ret = jpeg_v3_0_stop(adev); 522 else 523 ret = jpeg_v3_0_start(adev); 524 525 if(!ret) 526 adev->jpeg.cur_state = state; 527 528 return ret; 529 } 530 531 static int jpeg_v3_0_set_interrupt_state(struct amdgpu_device *adev, 532 struct amdgpu_irq_src *source, 533 unsigned type, 534 enum amdgpu_interrupt_state state) 535 { 536 return 0; 537 } 538 539 static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev, 540 struct amdgpu_irq_src *source, 541 struct amdgpu_iv_entry *entry) 542 { 543 DRM_DEBUG("IH: JPEG TRAP\n"); 544 545 switch (entry->src_id) { 546 case VCN_2_0__SRCID__JPEG_DECODE: 547 amdgpu_fence_process(adev->jpeg.inst->ring_dec); 548 break; 549 default: 550 DRM_ERROR("Unhandled interrupt: %d %d\n", 551 entry->src_id, entry->src_data[0]); 552 break; 553 } 554 555 return 0; 556 } 557 558 static int jpeg_v3_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) 559 { 560 jpeg_v3_0_stop(ring->adev); 561 jpeg_v3_0_start(ring->adev); 562 return amdgpu_ring_test_helper(ring); 563 } 564 565 static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = { 566 .name = "jpeg_v3_0", 567 .early_init = jpeg_v3_0_early_init, 568 .sw_init = jpeg_v3_0_sw_init, 569 .sw_fini = jpeg_v3_0_sw_fini, 570 .hw_init = jpeg_v3_0_hw_init, 571 .hw_fini = jpeg_v3_0_hw_fini, 572 .suspend = jpeg_v3_0_suspend, 573 .resume = jpeg_v3_0_resume, 574 .is_idle = jpeg_v3_0_is_idle, 575 .wait_for_idle = jpeg_v3_0_wait_for_idle, 576 .set_clockgating_state = jpeg_v3_0_set_clockgating_state, 577 .set_powergating_state = jpeg_v3_0_set_powergating_state, 578 .dump_ip_state = amdgpu_jpeg_dump_ip_state, 579 .print_ip_state = amdgpu_jpeg_print_ip_state, 580 }; 581 582 static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = { 583 .type = AMDGPU_RING_TYPE_VCN_JPEG, 584 .align_mask = 0xf, 585 .get_rptr = jpeg_v3_0_dec_ring_get_rptr, 586 .get_wptr = jpeg_v3_0_dec_ring_get_wptr, 587 .set_wptr = jpeg_v3_0_dec_ring_set_wptr, 588 .parse_cs = jpeg_v2_dec_ring_parse_cs, 589 .emit_frame_size = 590 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 591 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 592 8 + /* jpeg_v3_0_dec_ring_emit_vm_flush */ 593 18 + 18 + /* jpeg_v3_0_dec_ring_emit_fence x2 vm fence */ 594 8 + 16, 595 .emit_ib_size = 22, /* jpeg_v3_0_dec_ring_emit_ib */ 596 .emit_ib = jpeg_v2_0_dec_ring_emit_ib, 597 .emit_fence = jpeg_v2_0_dec_ring_emit_fence, 598 .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, 599 .test_ring = amdgpu_jpeg_dec_ring_test_ring, 600 .test_ib = amdgpu_jpeg_dec_ring_test_ib, 601 .insert_nop = jpeg_v2_0_dec_ring_nop, 602 .insert_start = jpeg_v2_0_dec_ring_insert_start, 603 .insert_end = jpeg_v2_0_dec_ring_insert_end, 604 .pad_ib = amdgpu_ring_generic_pad_ib, 605 .begin_use = amdgpu_jpeg_ring_begin_use, 606 .end_use = amdgpu_jpeg_ring_end_use, 607 .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, 608 .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, 609 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 610 .reset = jpeg_v3_0_ring_reset, 611 }; 612 613 static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev) 614 { 615 adev->jpeg.inst->ring_dec->funcs = &jpeg_v3_0_dec_ring_vm_funcs; 616 } 617 618 static const struct amdgpu_irq_src_funcs jpeg_v3_0_irq_funcs = { 619 .set = jpeg_v3_0_set_interrupt_state, 620 .process = jpeg_v3_0_process_interrupt, 621 }; 622 623 static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev) 624 { 625 adev->jpeg.inst->irq.num_types = 1; 626 adev->jpeg.inst->irq.funcs = &jpeg_v3_0_irq_funcs; 627 } 628 629 const struct amdgpu_ip_block_version jpeg_v3_0_ip_block = 630 { 631 .type = AMD_IP_BLOCK_TYPE_JPEG, 632 .major = 3, 633 .minor = 0, 634 .rev = 0, 635 .funcs = &jpeg_v3_0_ip_funcs, 636 }; 637