Home
last modified time | relevance | path

Searched refs:workload (Results 1 – 25 of 144) sorted by relevance

123456

/linux/drivers/gpu/drm/i915/gvt/
H A Dscheduler.c63 static void update_shadow_pdps(struct intel_vgpu_workload *workload) in update_shadow_pdps() argument
66 struct intel_context *ctx = workload->req->context; in update_shadow_pdps()
68 if (WARN_ON(!workload->shadow_mm)) in update_shadow_pdps()
71 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount))) in update_shadow_pdps()
76 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps); in update_shadow_pdps()
84 static void sr_oa_regs(struct intel_vgpu_workload *workload, in sr_oa_regs() argument
87 struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915; in sr_oa_regs()
101 if (workload->engine->id != RCS0) in sr_oa_regs()
105 workload->oactxctrl = reg_state[ctx_oactxctrl + 1]; in sr_oa_regs()
107 for (i = 0; i < ARRAY_SIZE(workload in sr_oa_regs()
127 populate_shadow_context(struct intel_vgpu_workload * workload) populate_shadow_context() argument
294 struct intel_vgpu_workload *workload; shadow_context_status_change() local
346 shadow_context_descriptor_update(struct intel_context * ce,struct intel_vgpu_workload * workload) shadow_context_descriptor_update() argument
361 copy_workload_to_ring_buffer(struct intel_vgpu_workload * workload) copy_workload_to_ring_buffer() argument
434 set_context_ppgtt_from_shadow(struct intel_vgpu_workload * workload,struct intel_context * ce) set_context_ppgtt_from_shadow() argument
459 intel_gvt_workload_req_alloc(struct intel_vgpu_workload * workload) intel_gvt_workload_req_alloc() argument
486 intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload * workload) intel_gvt_scan_and_shadow_workload() argument
522 prepare_shadow_batch_buffer(struct intel_vgpu_workload * workload) prepare_shadow_batch_buffer() argument
593 struct intel_vgpu_workload *workload = update_wa_ctx_2_shadow_ctx() local
651 update_vreg_in_ctx(struct intel_vgpu_workload * workload) update_vreg_in_ctx() argument
657 release_shadow_batch_buffer(struct intel_vgpu_workload * workload) release_shadow_batch_buffer() argument
685 intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload * workload) intel_vgpu_shadow_mm_pin() argument
726 intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload * workload) intel_vgpu_shadow_mm_unpin() argument
738 prepare_workload(struct intel_vgpu_workload * workload) prepare_workload() argument
800 dispatch_workload(struct intel_vgpu_workload * workload) dispatch_workload() argument
852 struct intel_vgpu_workload *workload = NULL; pick_next_workload() local
937 update_guest_context(struct intel_vgpu_workload * workload) update_guest_context() argument
1070 struct intel_vgpu_workload *workload = complete_current_workload() local
1157 struct intel_vgpu_workload *workload = NULL; workload_thread() local
1518 intel_vgpu_destroy_workload(struct intel_vgpu_workload * workload) intel_vgpu_destroy_workload() argument
1546 struct intel_vgpu_workload *workload; alloc_workload() local
1581 prepare_mm(struct intel_vgpu_workload * workload) prepare_mm() argument
1635 struct intel_vgpu_workload *workload = NULL; intel_vgpu_create_workload() local
1781 intel_vgpu_queue_workload(struct intel_vgpu_workload * workload) intel_vgpu_queue_workload() argument
[all...]
H A Dexeclist.c370 static int prepare_execlist_workload(struct intel_vgpu_workload *workload) in prepare_execlist_workload() argument
372 struct intel_vgpu *vgpu = workload->vgpu; in prepare_execlist_workload()
377 if (!workload->emulate_schedule_in) in prepare_execlist_workload()
380 ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0); in prepare_execlist_workload()
381 ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); in prepare_execlist_workload()
383 ret = emulate_execlist_schedule_in(&s->execlist[workload->engine->id], in prepare_execlist_workload()
392 static int complete_execlist_workload(struct intel_vgpu_workload *workload) in complete_execlist_workload() argument
394 struct intel_vgpu *vgpu = workload->vgpu; in complete_execlist_workload()
397 &s->execlist[workload->engine->id]; in complete_execlist_workload()
399 struct list_head *next = workload_q_head(vgpu, workload in complete_execlist_workload()
436 struct intel_vgpu_workload *workload = NULL; submit_context() local
[all...]
H A Dcmd_parser.c518 struct intel_vgpu_workload *workload; member
856 u32 base = s->workload->engine->mmio_base; in is_cmd_update_pdps()
864 struct intel_vgpu_mm *shadow_mm = s->workload->shadow_mm; in cmd_pdp_mmio_update_handler()
880 &s->workload->lri_shadow_mm); in cmd_pdp_mmio_update_handler()
1007 * In order to let workload with inhibit context to generate in cmd_reg_handler()
1009 * hw via LRIs in the workload with inhibit context. But as in cmd_reg_handler()
1010 * indirect context is loaded prior to LRIs in workload, we don't in cmd_reg_handler()
1013 * update reg values in it into vregs, so LRIs in workload with in cmd_reg_handler()
1020 s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4); in cmd_reg_handler()
1231 s->workload in cmd_handler_pipe_control()
2841 scan_workload(struct intel_vgpu_workload * workload) scan_workload() argument
2886 struct intel_vgpu_workload *workload = container_of(wa_ctx, scan_wa_ctx() local
2923 shadow_workload_ring_buffer(struct intel_vgpu_workload * workload) shadow_workload_ring_buffer() argument
2982 intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload * workload) intel_gvt_scan_and_shadow_ringbuffer() argument
3005 struct intel_vgpu_workload *workload = container_of(wa_ctx, shadow_indirect_ctx() local
3077 struct intel_vgpu_workload *workload = container_of(wa_ctx, intel_gvt_scan_and_shadow_wa_ctx() local
3162 intel_gvt_scan_engine_context(struct intel_vgpu_workload * workload) intel_gvt_scan_engine_context() argument
[all...]
H A Dcmd_parser.h50 int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload);
56 int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload);
H A Dtrace.h231 void *workload, const char *cmd_name),
234 buf_addr_type, workload, cmd_name),
243 __field(void*, workload)
255 __entry->workload = workload;
261 TP_printk("vgpu%d ring %d: address_type %u, buf_type %u, ip_gma %08x,cmd (name=%s,len=%u,raw cmd=%s), workload=%p\n",
271 __entry->workload)
H A Dscheduler.h89 /* if this workload has been dispatched to i915? */
91 bool shadow; /* if workload has done shadow of guest request */
137 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
164 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
/linux/Documentation/accel/amdxdna/
H A Damdnpu.rst34 partition which can be bound to a workload context.
55 XDNA Array partition setup, XDNA Array configuration, workload context
56 management and workload orchestration.
59 called ERT to service each workload context. ERT is also used to execute user
60 provided ``ctrlcode`` associated with the workload context.
74 workload context. The user channel is primarily used for submitting work to
125 may be *exclusively* bound to one workload context while another partition may
126 be *temporarily* bound to more than one workload contexts. The microcontroller
134 of 2D array among various workloads. Every workload describes the number
136 component uses hints passed by the workload an
[all...]
/linux/tools/perf/tests/shell/
H A Dstat_bpf_counters.sh7 workload="perf test -w sqrtloop"
44 base_instructions=$(perf stat --no-big-num -e instructions -- $workload 2>&1 | awk '/instructions/ {print $1}')
45 bpf_instructions=$(perf stat --no-big-num --bpf-counters -e instructions -- $workload 2>&1 | awk '/instructions/ {print $1}')
54 stat_output=$(perf stat --no-big-num -e instructions/name=base_instructions/,instructions/name=bpf_instructions/b -- $workload 2>&1)
H A Dtrace_summary.sh17 workload="true"
20 echo "testing: perf trace ${args} -- ${workload}"
21 perf trace ${args} -- ${workload} >${OUTPUT} 2>&1
H A Dstat_metrics_values.sh14 workload="perf bench futex hash -r 2 -s"
22 $PYTHON $pythonvalidator -rule $rulefile -output_dir $tmpdir -wl "${workload}" \
/linux/Documentation/admin-guide/
H A Dworkload-tracing.rst4 Discovering Linux kernel subsystems used by a workload
14 * Understanding system resources necessary to build and run a workload
17 in use by a workload. The completeness of the system usage information
18 depends on the completeness of coverage of a workload.
24 * Once we discover and understand the workload needs, we can focus on them
32 the system resources in use by a workload. Once we discover and understand
33 the workload needs, we can focus on them to avoid regressions and use it
37 the workload and doesn't include all the system calls that can be invoked
39 these system calls that are invoked. As an example, if a workload opens a
42 is a workload tha
[all...]
/linux/tools/perf/tests/
H A Dperf-record.c94 * Prepare the workload in argv[] to run, it'll fork it, and then wait in test__PERF_RECORD()
101 pr_debug("Couldn't run the workload!\n"); in test__PERF_RECORD()
114 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); in test__PERF_RECORD()
126 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { in test__PERF_RECORD()
157 * count just on workload.pid, which will start... in test__PERF_RECORD()
212 if ((pid_t)sample.pid != evlist->workload.pid) { in test__PERF_RECORD()
214 name, evlist->workload.pid, sample.pid); in test__PERF_RECORD()
218 if ((pid_t)sample.tid != evlist->workload.pid) { in test__PERF_RECORD()
220 name, evlist->workload.pid, sample.tid); in test__PERF_RECORD()
229 (pid_t)event->comm.pid != evlist->workload in test__PERF_RECORD()
[all...]
/linux/Documentation/accel/qaic/
H A Daic100.rst67 with the QSM. Except for workload data via the DMA Bridge, all interaction with
83 (Q6) DSP with HVX and HMX. Each NSP can only run one workload at a time, but
84 multiple NSPs may be assigned to a single workload. Since each NSP can only run
85 one workload, AIC100 is limited to 16 concurrent workloads. Workload
95 workload is assigned a single DMA Bridge channel. The DMA Bridge exposes
118 1. Compile the workload into an ELF targeting the NSP(s)
119 2. Make requests to the QSM to load the workload and related artifacts into the
121 3. Make a request to the QSM to activate the workload onto a set of idle NSPs
122 4. Make requests to the DMA Bridge to send input data to the workload to be
124 workload
[all...]
H A Dqaic.rst18 if the workload is particularly quick, and the host is responsive. If the host
22 workload's ability to process inputs. The lprnet (license plate reader network)
23 workload is known to trigger this condition, and can generate in excess of 100k
32 sleep for a time to see if the workload will generate more activity. The IRQ
39 workload throughput performance (within run to run noise variation).
112 or receive data from a workload. The call will return a GEM handle that
123 get sent where to a workload. This requires a set of DMA transfers for the
166 workload should be allowed to interface with the DBC.
/linux/rust/pin-init/examples/
H A Dstatic_init.rs93 let workload = 1_000; in main() localVariable
100 for _ in 0..workload { in main()
109 for _ in 0..workload { in main()
122 assert_eq!(*mtx.lock(), workload * thread_count * 2); in main()
H A Dpthread_mutex.rs158 let workload = 1_000_000; in main() localVariable
165 for _ in 0..workload { in main()
170 for _ in 0..workload { in main()
182 assert_eq!(*mtx.lock(), workload * thread_count * 2); in main()
H A Dmutex.rs199 let workload = if cfg!(miri) { 100 } else { 1_000 }; in main() localVariable
206 for _ in 0..workload { in main()
211 for _ in 0..workload { in main()
223 assert_eq!(*mtx.lock(), workload * thread_count * 2); in main()
/linux/Documentation/admin-guide/mm/
H A Didle_page_tracking.rst9 accessed by a workload and which are idle. This information can be useful for
10 estimating the workload's working set size, which, in turn, can be taken into
11 account when configuring the workload parameters, setting memory cgroup limits,
12 or deciding where to place the workload within a compute cluster.
51 workload one should:
53 1. Mark all the workload's pages as idle by setting corresponding bits in
55 ``/proc/pid/pagemap`` if the workload is represented by a process, or by
56 filtering out alien pages using ``/proc/kpagecgroup`` in case the workload
59 2. Wait until the workload accesses its working set.
/linux/tools/perf/tests/shell/lib/
H A Dperf_metric_validation.py22 \tis {1} in workload(s): {2} \n\
31 \tis {1} in workload(s): {2}\n\
39 workload='true', metrics='', cputype='cpu'): argument
49 self.workloads = [x for x in workload.split(",") if x]
51 self.allresults = dict() # metric results of all workload
55 self.results = dict() # metric results of current workload
188 # The first round collect_perf() run these metrics with simple workload
189 # "true". We give metrics a second chance with a longer workload if less
380 def _run_perf(self, metric, workload: str):
383 wl = workload
[all...]
/linux/tools/perf/bench/
H A Dfind-bit-bench.c34 static noinline void workload(int val) in workload() function
82 workload(bit); in do_for_each_set_bit()
97 workload(bit); in do_for_each_set_bit()
/linux/tools/perf/Documentation/
H A Dperf-sched.txt18 of an arbitrary workload.
21 and other scheduling properties of the workload.
43 'perf sched script' to see a detailed trace of the workload that
46 'perf sched replay' to simulate the workload that was recorded
48 that mimic the workload based on the events in the trace. These
50 of the workload as it occurred when it was recorded - and can repeat
54 workload captured via perf sched record. Columns stand for
237 repeat the workload n times (0: infinite). Default is 10.
H A Dperf-test.txt57 --workload=::
58 Run a built-in workload, to list them use '--list-workloads', current ones include:
71 List the available workloads to use with -w/--workload.
/linux/Documentation/filesystems/nfs/
H A Dknfsd-stats.rst54 Depending on the NFS workload patterns and various network stack
58 However this is a more accurate and less workload-dependent measure
74 pool for the NFS workload (the workload is thread-limited), in which
76 performance of the NFS workload.
93 threads configured than can be used by the NFS workload. This is
99 slow; the idle timeout is 60 minutes. Unless the NFS workload
/linux/Documentation/gpu/
H A Ddrm-compute.rst11 have their memory swapped out from them. Or they simply want their workload
19 waiting for the workload to complete. Effectively this terminates the workload
22 Since this is undesirable, there need to be mitigations to prevent a workload
/linux/drivers/gpu/drm/amd/pm/powerplay/hwmgr/
H A Dpp_psm.c275 long workload[1]; in psm_adjust_power_state_dynamic() local
300 workload[0] = hwmgr->workload_setting[index]; in psm_adjust_power_state_dynamic()
302 if (hwmgr->power_profile_mode != workload[0] && hwmgr->hwmgr_func->set_power_profile_mode) in psm_adjust_power_state_dynamic()
303 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0); in psm_adjust_power_state_dynamic()

123456