/linux/drivers/gpu/drm/etnaviv/ |
H A D | etnaviv_gpu.c | 32 { .name = "etnaviv-gpu,2d" }, 40 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) in etnaviv_gpu_get_param() argument 42 struct etnaviv_drm_private *priv = gpu->drm->dev_private; in etnaviv_gpu_get_param() 46 *value = gpu->identity.model; in etnaviv_gpu_get_param() 50 *value = gpu->identity.revision; in etnaviv_gpu_get_param() 54 *value = gpu->identity.features; in etnaviv_gpu_get_param() 58 *value = gpu->identity.minor_features0; in etnaviv_gpu_get_param() 62 *value = gpu->identity.minor_features1; in etnaviv_gpu_get_param() 66 *value = gpu->identity.minor_features2; in etnaviv_gpu_get_param() 70 *value = gpu->identity.minor_features3; in etnaviv_gpu_get_param() [all …]
|
H A D | etnaviv_sched.c | 29 dev_dbg(submit->gpu->dev, "skipping bad job\n"); in etnaviv_sched_run_job() 38 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_sched_timedout_job() local 43 * If the GPU managed to complete this jobs fence, the timeout has in etnaviv_sched_timedout_job() 50 * If the GPU is still making forward progress on the front-end (which in etnaviv_sched_timedout_job() 54 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); in etnaviv_sched_timedout_job() 55 change = dma_addr - gpu->hangcheck_dma_addr; in etnaviv_sched_timedout_job() 58 mutex_lock(&gpu->lock); in etnaviv_sched_timedout_job() 59 gpu_write(gpu, VIVS_MC_PROFILE_CONFIG0, in etnaviv_sched_timedout_job() 62 primid = gpu_read(gpu, VIVS_MC_PROFILE_FE_READ); in etnaviv_sched_timedout_job() 63 mutex_unlock(&gpu->lock); in etnaviv_sched_timedout_job() [all …]
|
H A D | etnaviv_buffer.c | 90 static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu, in etnaviv_cmd_select_pipe() argument 95 lockdep_assert_held(&gpu->lock); in etnaviv_cmd_select_pipe() 103 if (gpu->exec_state == ETNA_PIPE_2D) in etnaviv_cmd_select_pipe() 105 else if (gpu->exec_state == ETNA_PIPE_3D) in etnaviv_cmd_select_pipe() 116 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, in etnaviv_buffer_dump() argument 122 dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n", in etnaviv_buffer_dump() 124 &gpu->mmu_context->cmdbuf_mapping) + in etnaviv_buffer_dump() 133 * The GPU may be executing this WAIT while we're modifying it, so we have 134 * to write it in a specific order to avoid the GPU branching to somewhere 152 static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu, in etnaviv_buffer_reserve() argument [all …]
|
H A D | etnaviv_perfmon.c | 18 u32 (*sample)(struct etnaviv_gpu *gpu, 40 static u32 perf_reg_read(struct etnaviv_gpu *gpu, in perf_reg_read() argument 44 gpu_write(gpu, domain->profile_config, signal->data); in perf_reg_read() 46 return gpu_read(gpu, domain->profile_read); in perf_reg_read() 49 static inline void pipe_select(struct etnaviv_gpu *gpu, u32 clock, unsigned pipe) in pipe_select() argument 54 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock); in pipe_select() 57 static u32 pipe_perf_reg_read(struct etnaviv_gpu *gpu, in pipe_perf_reg_read() argument 61 u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); in pipe_perf_reg_read() 65 lockdep_assert_held(&gpu->lock); in pipe_perf_reg_read() 67 for (i = 0; i < gpu->identity.pixel_pipes; i++) { in pipe_perf_reg_read() [all …]
|
H A D | etnaviv_gpu.h | 90 void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event); 170 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data) in gpu_write() argument 172 writel(data, gpu->mmio + reg); in gpu_write() 175 static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg) in gpu_read() argument 182 readl(gpu->mmio + reg); in gpu_read() 184 return readl(gpu->mmio + reg); in gpu_read() 187 static inline u32 gpu_fix_power_address(struct etnaviv_gpu *gpu, u32 reg) in gpu_fix_power_address() argument 190 if (gpu->identity.model == chipModel_GC300 && in gpu_fix_power_address() 191 gpu->identity.revision < 0x2000) in gpu_fix_power_address() 197 static inline void gpu_write_power(struct etnaviv_gpu *gpu, u32 reg, u32 data) in gpu_write_power() argument [all …]
|
/linux/drivers/gpu/drm/msm/adreno/ |
H A D | adreno_gpu.h | 37 * so it helps to be able to group the GPU devices by generation and if 75 int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value); 187 * of gpu firmware to linux-firmware, the fw files were 218 * GPU specific offsets will be exported by GPU specific 256 static inline uint8_t adreno_patchid(const struct adreno_gpu *gpu) in adreno_patchid() argument 262 WARN_ON_ONCE(gpu->info->family >= ADRENO_6XX_GEN1); in adreno_patchid() 263 return gpu->chip_id & 0xff; in adreno_patchid() 266 static inline bool adreno_is_revn(const struct adreno_gpu *gpu, uint32_t revn) in adreno_is_revn() argument 268 if (WARN_ON_ONCE(!gpu->info)) in adreno_is_revn() 270 return gpu->info->revn == revn; in adreno_is_revn() [all …]
|
H A D | a3xx_gpu.c | 28 static void a3xx_dump(struct msm_gpu *gpu); 29 static bool a3xx_idle(struct msm_gpu *gpu); 31 static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a3xx_submit() argument 69 /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ in a3xx_submit() 82 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a3xx_submit() 85 static bool a3xx_me_init(struct msm_gpu *gpu) in a3xx_me_init() argument 87 struct msm_ringbuffer *ring = gpu->rb[0]; in a3xx_me_init() 108 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a3xx_me_init() 109 return a3xx_idle(gpu); in a3xx_me_init() 112 static int a3xx_hw_init(struct msm_gpu *gpu) in a3xx_hw_init() argument [all …]
|
H A D | a6xx_gpu.c | 19 static inline bool _a6xx_check_idle(struct msm_gpu *gpu) in _a6xx_check_idle() argument 21 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in _a6xx_check_idle() 29 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) & in _a6xx_check_idle() 33 return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) & in _a6xx_check_idle() 37 static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a6xx_idle() argument 40 if (!adreno_idle(gpu, ring)) in a6xx_idle() 43 if (spin_until(_a6xx_check_idle(gpu))) { in a6xx_idle() 44 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n", in a6xx_idle() 45 gpu->name, __builtin_return_address(0), in a6xx_idle() 46 gpu_read(gpu, REG_A6XX_RBBM_STATUS), in a6xx_idle() [all …]
|
H A D | a4xx_gpu.c | 22 static void a4xx_dump(struct msm_gpu *gpu); 23 static bool a4xx_idle(struct msm_gpu *gpu); 25 static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a4xx_submit() argument 63 /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ in a4xx_submit() 69 adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR); in a4xx_submit() 76 static void a4xx_enable_hwcg(struct msm_gpu *gpu) in a4xx_enable_hwcg() argument 78 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a4xx_enable_hwcg() 81 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202); in a4xx_enable_hwcg() 83 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222); in a4xx_enable_hwcg() 85 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TP(i), 0x0E739CE7); in a4xx_enable_hwcg() [all …]
|
H A D | a5xx_power.c | 103 static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq) in _get_mvolts() argument 105 struct drm_device *dev = gpu->dev; in _get_mvolts() 122 static void a530_lm_setup(struct msm_gpu *gpu) in a530_lm_setup() argument 124 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a530_lm_setup() 130 gpu_write(gpu, a5xx_sequence_regs[i].reg, in a530_lm_setup() 133 /* Hard code the A530 GPU thermal sensor ID for the GPMU */ in a530_lm_setup() 134 gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, 0x60007); in a530_lm_setup() 135 gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01); in a530_lm_setup() 136 gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01); in a530_lm_setup() 139 gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0); in a530_lm_setup() [all …]
|
H A D | adreno_device.c | 16 MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off… 24 MODULE_PARM_DESC(disable_acd, "Forcefully disable GPU ACD"); 45 /* identify gpu: */ in adreno_info() 66 struct msm_gpu *gpu = NULL; in adreno_load_gpu() local 71 gpu = dev_to_gpu(&pdev->dev); in adreno_load_gpu() 73 if (!gpu) { in adreno_load_gpu() 74 dev_err_once(dev->dev, "no GPU device was found\n"); in adreno_load_gpu() 78 adreno_gpu = to_adreno_gpu(gpu); in adreno_load_gpu() 90 if (gpu->funcs->ucode_load) { in adreno_load_gpu() 91 ret = gpu->funcs->ucode_load(gpu); in adreno_load_gpu() [all …]
|
H A D | a5xx_preempt.c | 25 static inline void set_preempt_state(struct a5xx_gpu *gpu, in set_preempt_state() argument 34 atomic_set(&gpu->preempt_state, new); in set_preempt_state() 40 static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_wptr() argument 52 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); in update_wptr() 56 static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) in get_next_ring() argument 58 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in get_next_ring() 63 for (i = 0; i < gpu->nr_rings; i++) { in get_next_ring() 65 struct msm_ringbuffer *ring = gpu->rb[i]; in get_next_ring() 68 empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); in get_next_ring() 84 struct msm_gpu *gpu = &a5xx_gpu->base.base; in a5xx_preempt_timer() local [all …]
|
H A D | a6xx_preempt.c | 29 static inline void set_preempt_state(struct a6xx_gpu *gpu, in set_preempt_state() argument 38 atomic_set(&gpu->preempt_state, new); in set_preempt_state() 44 static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_wptr() argument 54 gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); in update_wptr() 63 static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) in get_next_ring() argument 65 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in get_next_ring() 71 for (i = 0; i < gpu->nr_rings; i++) { in get_next_ring() 73 struct msm_ringbuffer *ring = gpu->rb[i]; in get_next_ring() 76 empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); in get_next_ring() 92 struct msm_gpu *gpu = &a6xx_gpu->base.base; in a6xx_preempt_timer() local [all …]
|
H A D | a5xx_debugfs.c | 14 static void pfp_print(struct msm_gpu *gpu, struct drm_printer *p) in pfp_print() argument 21 gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, i); in pfp_print() 23 gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA)); in pfp_print() 27 static void me_print(struct msm_gpu *gpu, struct drm_printer *p) in me_print() argument 34 gpu_write(gpu, REG_A5XX_CP_ME_STAT_ADDR, i); in me_print() 36 gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA)); in me_print() 40 static void meq_print(struct msm_gpu *gpu, struct drm_printer *p) in meq_print() argument 45 gpu_write(gpu, REG_A5XX_CP_MEQ_DBG_ADDR, 0); in meq_print() 49 gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA)); in meq_print() 53 static void roq_print(struct msm_gpu *gpu, struct drm_printer *p) in roq_print() argument [all …]
|
H A D | a6xx_gpu_state.c | 131 static int a6xx_crashdumper_init(struct msm_gpu *gpu, in a6xx_crashdumper_init() argument 134 dumper->ptr = msm_gem_kernel_new(gpu->dev, in a6xx_crashdumper_init() 135 SZ_1M, MSM_BO_WC, gpu->vm, in a6xx_crashdumper_init() 144 static int a6xx_crashdumper_run(struct msm_gpu *gpu, in a6xx_crashdumper_run() argument 147 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_crashdumper_run() 161 gpu_write64(gpu, REG_A6XX_CP_CRASH_DUMP_SCRIPT_BASE, dumper->iova); in a6xx_crashdumper_run() 163 gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1); in a6xx_crashdumper_run() 165 ret = gpu_poll_timeout(gpu, REG_A6XX_CP_CRASH_DUMP_STATUS, val, in a6xx_crashdumper_run() 168 gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 0); in a6xx_crashdumper_run() 174 static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset, in debugbus_read() argument [all …]
|
/linux/sound/hda/codecs/hdmi/ |
H A D | nvhdmi.c | 145 HDA_CODEC_ID_MODEL(0x10de0008, "GPU 08 HDMI/DP", MODEL_LEGACY), 146 HDA_CODEC_ID_MODEL(0x10de0009, "GPU 09 HDMI/DP", MODEL_LEGACY), 147 HDA_CODEC_ID_MODEL(0x10de000a, "GPU 0a HDMI/DP", MODEL_LEGACY), 148 HDA_CODEC_ID_MODEL(0x10de000b, "GPU 0b HDMI/DP", MODEL_LEGACY), 150 HDA_CODEC_ID_MODEL(0x10de000d, "GPU 0d HDMI/DP", MODEL_LEGACY), 151 HDA_CODEC_ID_MODEL(0x10de0010, "GPU 10 HDMI/DP", MODEL_LEGACY), 152 HDA_CODEC_ID_MODEL(0x10de0011, "GPU 11 HDMI/DP", MODEL_LEGACY), 153 HDA_CODEC_ID_MODEL(0x10de0012, "GPU 12 HDMI/DP", MODEL_LEGACY), 154 HDA_CODEC_ID_MODEL(0x10de0013, "GPU 13 HDMI/DP", MODEL_LEGACY), 155 HDA_CODEC_ID_MODEL(0x10de0014, "GPU 14 HDMI/DP", MODEL_LEGACY), [all …]
|
/linux/drivers/gpu/drm/ |
H A D | Kconfig | 31 source "drivers/gpu/drm/Kconfig.debug" 172 source "drivers/gpu/drm/clients/Kconfig" 186 source "drivers/gpu/drm/display/Kconfig" 193 GPU memory management subsystem for devices with multiple 194 GPU memory types. Will be enabled automatically if a device driver 208 GPU-VM representation providing helpers to manage a GPUs virtual 217 GPU-SVM representation providing helpers to manage a GPUs shared 268 source "drivers/gpu/drm/sysfb/Kconfig" 270 source "drivers/gpu/drm/arm/Kconfig" 272 source "drivers/gpu/drm/radeon/Kconfig" [all …]
|
H A D | drm_gpusvm.c | 25 * GPU Shared Virtual Memory (GPU SVM) layer for the Direct Rendering Manager (DRM) 27 * between the CPU and GPU. It enables efficient data exchange and processing 28 * for GPU-accelerated applications by allowing memory sharing and 29 * synchronization between the CPU's and GPU's virtual address spaces. 31 * Key GPU SVM Components: 34 * Used for tracking memory intervals and notifying the GPU of changes, 35 * notifiers are sized based on a GPU SVM initialization parameter, with a 38 * tracked within a GPU SVM Red-BlacK tree and list and are dynamically 42 * Represent memory ranges mapped in a DRM device and managed by GPU SVM. 43 * They are sized based on an array of chunk sizes, which is a GPU SVM [all …]
|
/linux/drivers/gpu/drm/msm/ |
H A D | msm_gpu_devfreq.c | 22 struct msm_gpu *gpu = dev_to_gpu(dev); in msm_devfreq_target() local 23 struct msm_gpu_devfreq *df = &gpu->devfreq; in msm_devfreq_target() 37 * If the GPU is idle, devfreq is not aware, so just stash in msm_devfreq_target() 46 if (gpu->funcs->gpu_set_freq) { in msm_devfreq_target() 48 gpu->funcs->gpu_set_freq(gpu, opp, df->suspended); in msm_devfreq_target() 59 static unsigned long get_freq(struct msm_gpu *gpu) in get_freq() argument 61 struct msm_gpu_devfreq *df = &gpu->devfreq; in get_freq() 64 * If the GPU is idle, use the shadow/saved freq to avoid in get_freq() 71 if (gpu->funcs->gpu_get_freq) in get_freq() 72 return gpu->funcs->gpu_get_freq(gpu); in get_freq() [all …]
|
/linux/include/drm/ |
H A D | drm_gpusvm.h | 23 * struct drm_gpusvm_ops - Operations structure for GPU SVM 25 * This structure defines the operations for GPU Shared Virtual Memory (SVM). 26 * These operations are provided by the GPU driver to manage SVM ranges and 31 * @notifier_alloc: Allocate a GPU SVM notifier (optional) 33 * Allocate a GPU SVM notifier. 35 * Return: Pointer to the allocated GPU SVM notifier on success, NULL on failure. 40 * @notifier_free: Free a GPU SVM notifier (optional) 41 * @notifier: Pointer to the GPU SVM notifier to be freed 43 * Free a GPU SVM notifier. 48 * @range_alloc: Allocate a GPU SVM range (optional) [all …]
|
/linux/Documentation/gpu/ |
H A D | i915.rst | 19 .. kernel-doc:: drivers/gpu/drm/i915/intel_runtime_pm.c 22 .. kernel-doc:: drivers/gpu/drm/i915/intel_runtime_pm.c 25 .. kernel-doc:: drivers/gpu/drm/i915/intel_uncore.c 31 .. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c 34 .. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c 37 .. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c 40 .. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c 46 .. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c 49 .. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c 55 .. kernel-doc:: drivers/gpu/drm/i915/intel_gvt.c [all …]
|
H A D | drm-kms-helpers.rst | 53 .. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c 59 .. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c 68 .. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c 74 .. kernel-doc:: drivers/gpu/drm/drm_atomic_state_helper.c 80 .. kernel-doc:: drivers/gpu/drm/drm_atomic_state_helper.c 86 .. kernel-doc:: drivers/gpu/drm/drm_gem_atomic_helper.c 92 .. kernel-doc:: drivers/gpu/drm/drm_gem_atomic_helper.c 98 .. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c 104 .. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c 110 .. kernel-doc:: drivers/gpu/drm/drm_fb_helper.c [all …]
|
/linux/drivers/gpu/drm/panthor/ |
H A D | panthor_gpu.c | 24 * struct panthor_gpu - GPU block management data. 27 /** @irq: GPU irq. */ 33 /** @pending_reqs: Pending GPU requests. */ 36 /** @reqs_acked: GPU request wait queue. */ 41 * struct panthor_model - GPU model description 55 * GPU_MODEL() - Define a GPU model. A GPU product can be uniquely identified 58 * @_name: Name for the GPU model. 154 drm_warn(&ptdev->base, "GPU Fault 0x%08x (%s) at 0x%016llx\n", in panthor_gpu_irq_handler() 159 drm_warn(&ptdev->base, "GPU Fault in protected mode\n"); in panthor_gpu_irq_handler() 161 spin_lock(&ptdev->gpu->reqs_lock); in panthor_gpu_irq_handler() [all …]
|
/linux/drivers/gpu/drm/ci/xfails/ |
H A D | msm-sm8350-hdk-skips.txt | 24 # [ 200.895243] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=PERMISS… 25 # [ 200.906885] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 26 # [ 200.917625] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 27 # [ 200.928353] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 28 # [ 200.939084] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 29 # [ 200.949815] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 31 # [ 200.960467] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 32 # [ 200.960500] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 33 # [ 200.995966] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… 34 # [ 201.006702] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN… [all …]
|
/linux/Documentation/gpu/rfc/ |
H A D | gpusvm.rst | 4 GPU SVM Section 25 * Eviction is defined as migrating data from the GPU back to the 26 CPU without a virtual address to free up GPU memory. 32 * GPU page table invalidation, which requires a GPU virtual address, is 33 handled via the notifier that has access to the GPU virtual address. 34 * GPU fault side 36 and should strive to take mmap_read lock only in GPU SVM layer. 37 * Big retry loop to handle all races with the mmu notifier under the gpu 47 migration policy requiring GPU access to occur in GPU memory. 49 While no current user (Xe) of GPU SVM has such a policy, it is likely [all …]
|