Lines Matching full:gpu
47 int (*get_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
49 int (*set_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
51 int (*hw_init)(struct msm_gpu *gpu);
56 int (*ucode_load)(struct msm_gpu *gpu);
58 int (*pm_suspend)(struct msm_gpu *gpu);
59 int (*pm_resume)(struct msm_gpu *gpu);
60 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
61 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
63 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
64 void (*recover)(struct msm_gpu *gpu);
65 void (*destroy)(struct msm_gpu *gpu);
67 /* show GPU status in debugfs: */
68 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
71 void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
74 u64 (*gpu_busy)(struct msm_gpu *gpu, unsigned long *out_sample_rate);
75 struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
77 unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
79 void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp,
82 (struct msm_gpu *gpu, struct platform_device *pdev);
84 (struct msm_gpu *gpu);
85 uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
88 * progress: Has the GPU made progress?
90 * Return true if GPU position in cmdstream has advanced (or changed)
94 bool (*progress)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
127 * Shadow frequency used while the GPU is idle. From the PoV of
129 * adjust frequency while the GPU is idle, but we use this shadow
130 * value as the GPU is actually clamped to minimum frequency while
207 * General lock for serializing all the gpu things.
227 /* does gpu need hw_init? */
231 * global_faults: number of GPU hangs not attributed to a particular
259 /* work for handling GPU ioval faults: */
262 /* work for handling GPU recovery: */
315 static inline bool msm_gpu_active(struct msm_gpu *gpu) in msm_gpu_active() argument
319 for (i = 0; i < gpu->nr_rings; i++) { in msm_gpu_active()
320 struct msm_ringbuffer *ring = gpu->rb[i]; in msm_gpu_active()
343 * The number of priority levels provided by drm gpu scheduler. The
356 * @aspace: the per-process GPU address-space
403 * The total (cumulative) elapsed time GPU was busy with rendering
411 * The total (cumulative) GPU cycles elapsed attributed to this
441 * @gpu: the gpu instance
444 * @sched_prio: [out] the gpu scheduler priority level which the userspace
463 static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, in msm_gpu_convert_priority() argument
475 if (rn >= gpu->nr_rings) in msm_gpu_convert_priority()
495 * @faults: the number of GPU hangs associated with this submitqueue
562 static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data) in gpu_write() argument
564 writel(data, gpu->mmio + (reg << 2)); in gpu_write()
567 static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg) in gpu_read() argument
569 return readl(gpu->mmio + (reg << 2)); in gpu_read()
572 static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or) in gpu_rmw() argument
574 msm_rmw(gpu->mmio + (reg << 2), mask, or); in gpu_rmw()
577 static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg) in gpu_read64() argument
583 * not quad word aligned and 2) the GPU hardware designers have a bit in gpu_read64()
585 * spins. The longer a GPU family goes the higher the chance that in gpu_read64()
595 val = (u64) readl(gpu->mmio + (reg << 2)); in gpu_read64()
596 val |= ((u64) readl(gpu->mmio + ((reg + 1) << 2)) << 32); in gpu_read64()
601 static inline void gpu_write64(struct msm_gpu *gpu, u32 reg, u64 val) in gpu_write64() argument
604 writel(lower_32_bits(val), gpu->mmio + (reg << 2)); in gpu_write64()
605 writel(upper_32_bits(val), gpu->mmio + ((reg + 1) << 2)); in gpu_write64()
608 int msm_gpu_pm_suspend(struct msm_gpu *gpu);
609 int msm_gpu_pm_resume(struct msm_gpu *gpu);
611 void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
628 struct msm_gpu *gpu, int sysprof);
643 void msm_devfreq_init(struct msm_gpu *gpu);
644 void msm_devfreq_cleanup(struct msm_gpu *gpu);
645 void msm_devfreq_resume(struct msm_gpu *gpu);
646 void msm_devfreq_suspend(struct msm_gpu *gpu);
647 void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor);
648 void msm_devfreq_active(struct msm_gpu *gpu);
649 void msm_devfreq_idle(struct msm_gpu *gpu);
651 int msm_gpu_hw_init(struct msm_gpu *gpu);
653 void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
654 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
655 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
658 void msm_gpu_retire(struct msm_gpu *gpu);
659 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
662 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
666 msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
668 void msm_gpu_cleanup(struct msm_gpu *gpu);
680 static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu) in msm_gpu_crashstate_get() argument
684 mutex_lock(&gpu->lock); in msm_gpu_crashstate_get()
686 if (gpu->crashstate) { in msm_gpu_crashstate_get()
687 kref_get(&gpu->crashstate->ref); in msm_gpu_crashstate_get()
688 state = gpu->crashstate; in msm_gpu_crashstate_get()
691 mutex_unlock(&gpu->lock); in msm_gpu_crashstate_get()
696 static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu) in msm_gpu_crashstate_put() argument
698 mutex_lock(&gpu->lock); in msm_gpu_crashstate_put()
700 if (gpu->crashstate) { in msm_gpu_crashstate_put()
701 if (gpu->funcs->gpu_state_put(gpu->crashstate)) in msm_gpu_crashstate_put()
702 gpu->crashstate = NULL; in msm_gpu_crashstate_put()
705 mutex_unlock(&gpu->lock); in msm_gpu_crashstate_put()
712 #define check_apriv(gpu, flags) \ argument
713 (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))