Lines Matching +full:per +full:- +full:context

1 /* SPDX-License-Identifier: GPL-2.0-only */
10 #include <linux/adreno-smmu-priv.h>
107 * struct msm_gpu_devfreq - devfreq related state
150 * Used to delay clamping to idle freq on active->idle transition.
199 * The ctx->seqno value of the last context to submit rendering,
201 * that support per-context pgtables). Tracked by seqno rather
212 * TODO move to per-ring locking where feasible (ie. submit/retire
271 /* work for handling active-list retiring: */
292 * switch-over happened early enough in mesa a6xx bringup that we
322 for (i = 0; i < gpu->nr_rings; i++) { in msm_gpu_active()
323 struct msm_ringbuffer *ring = gpu->rb[i]; in msm_gpu_active()
325 if (fence_after(ring->fctx->last_fence, ring->memptrs->fence)) in msm_gpu_active()
332 /* Perf-Counters:
350 #define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_LOW - DRM_SCHED_PRIORITY_HIGH)
353 * struct msm_file_private - per-drm_file context
359 * @aspace: the per-process GPU address-space
361 * @seqno: unique per process seqno
376 * pps-producer (perfetto), and restricted to CAP_SYS_ADMIN.
379 * context switches. Setting a value of 2 will in addition
407 * from this context in ns.
415 * context.
422 * Table of per-priority-level sched entities used by submitqueues
427 * create at most one &drm_sched_entity per-process per-priority-
434 * msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
451 * sched_prio = NR_SCHED_PRIORITIES -
452 * (userspace_prio % NR_SCHED_PRIORITIES) - 1
465 /* invert sched priority to map to higher-numeric-is-higher- in msm_gpu_convert_priority()
468 sp = NR_SCHED_PRIORITIES - sp - 1; in msm_gpu_convert_priority()
470 if (rn >= gpu->nr_rings) in msm_gpu_convert_priority()
471 return -EINVAL; in msm_gpu_convert_priority()
480 * struct msm_gpu_submitqueues - Userspace created context.
482 * A submitqueue is associated with a gl context or vk queue (or equiv)
493 * @ctx: the per-drm_file context associated with the submitqueue (ie.
496 * @node: node in the context's list of submitqueues
497 * @fence_idr: maps fence-id to dma_fence for userspace visible fence
502 * @entity: the submit job-queue
558 msm_writel(data, gpu->mmio + (reg << 2)); in gpu_write()
563 return msm_readl(gpu->mmio + (reg << 2)); in gpu_read()
568 msm_rmw(gpu->mmio + (reg << 2), mask, or); in gpu_rmw()
589 val = (u64) msm_readl(gpu->mmio + (reg << 2)); in gpu_read64()
590 val |= ((u64) msm_readl(gpu->mmio + ((reg + 1) << 2)) << 32); in gpu_read64()
598 msm_writel(lower_32_bits(val), gpu->mmio + (reg << 2)); in gpu_write64()
599 msm_writel(upper_32_bits(val), gpu->mmio + ((reg + 1) << 2)); in gpu_write64()
627 kref_put(&ctx->ref, __msm_file_private_destroy); in msm_file_private_put()
633 kref_get(&ctx->ref); in msm_file_private_get()
671 kref_put(&queue->ref, msm_submitqueue_destroy); in msm_submitqueue_put()
678 mutex_lock(&gpu->lock); in msm_gpu_crashstate_get()
680 if (gpu->crashstate) { in msm_gpu_crashstate_get()
681 kref_get(&gpu->crashstate->ref); in msm_gpu_crashstate_get()
682 state = gpu->crashstate; in msm_gpu_crashstate_get()
685 mutex_unlock(&gpu->lock); in msm_gpu_crashstate_get()
692 mutex_lock(&gpu->lock); in msm_gpu_crashstate_put()
694 if (gpu->crashstate) { in msm_gpu_crashstate_put()
695 if (gpu->funcs->gpu_state_put(gpu->crashstate)) in msm_gpu_crashstate_put()
696 gpu->crashstate = NULL; in msm_gpu_crashstate_put()
699 mutex_unlock(&gpu->lock); in msm_gpu_crashstate_put()
703 * Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
707 (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))