| /linux/arch/arm64/kvm/ |
| H A D | pvtime.c | 16 u64 base = vcpu->arch.steal.base; in kvm_update_stolen_time() 17 u64 last_steal = vcpu->arch.steal.last_steal; in kvm_update_stolen_time() 19 u64 steal = 0; in kvm_update_stolen_time() local 26 if (!kvm_get_guest(kvm, base + offset, steal)) { in kvm_update_stolen_time() 27 steal = le64_to_cpu(steal); in kvm_update_stolen_time() 28 vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay); in kvm_update_stolen_time() 29 steal += vcpu->arch.steal.last_steal - last_steal; in kvm_update_stolen_time() 30 kvm_put_guest(kvm, base + offset, cpu_to_le64(steal)); in kvm_update_stolen_time() 43 if (vcpu->arch.steal.base != INVALID_GPA) in kvm_hypercall_pv_features() 55 u64 base = vcpu->arch.steal.base; in kvm_init_stolen_time() [all …]
|
| /linux/arch/s390/kernel/ |
| H A D | hiperdispatch.c | 201 static unsigned long steal; in hd_steal_avg() local 203 steal = (steal * (HD_STEAL_AVG_WEIGHT - 1) + new) / HD_STEAL_AVG_WEIGHT; in hd_steal_avg() 204 return steal; in hd_steal_avg() 209 unsigned long time_delta, steal_delta, steal, percentage; in hd_calculate_steal_percentage() local 215 steal = 0; in hd_calculate_steal_percentage() 218 steal += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; in hd_calculate_steal_percentage() 229 if (steal > hd_previous_steal && hd_previous_steal != 0) { in hd_calculate_steal_percentage() 230 steal_delta = (steal - hd_previous_steal) * 100 / time_delta; in hd_calculate_steal_percentage() 233 hd_previous_steal = steal; in hd_calculate_steal_percentage()
|
| H A D | vtime.c | 204 u64 steal, avg_steal; in vtime_flush() local 209 steal = lc->steal_timer; in vtime_flush() 211 if ((s64) steal > 0) { in vtime_flush() 213 account_steal_time(cputime_to_nsecs(steal)); in vtime_flush() 214 avg_steal += steal; in vtime_flush()
|
| /linux/drivers/media/pci/ivtv/ |
| H A D | ivtv-queue.c | 110 int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_queue *steal, in ivtv_queue_move() argument 126 bytes_steal = (from_free && steal) ? steal->length : 0; in ivtv_queue_move() 132 while (steal && bytes_available < needed_bytes) { in ivtv_queue_move() 133 struct ivtv_buffer *buf = list_entry(steal->list.prev, struct ivtv_buffer, list); in ivtv_queue_move() 141 list_move_tail(steal->list.prev, &from->list); in ivtv_queue_move() 143 steal->buffers--; in ivtv_queue_move() 144 steal->length -= s->buf_size; in ivtv_queue_move() 145 steal->bytesused -= buf->bytesused - buf->readpos; in ivtv_queue_move() 150 if (list_empty(&steal->list)) in ivtv_queue_move() 152 buf = list_entry(steal->list.prev, struct ivtv_buffer, list); in ivtv_queue_move()
|
| H A D | ivtv-queue.h | 63 int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_queue *steal,
|
| /linux/arch/riscv/kernel/ |
| H A D | paravirt.c | 95 __le64 steal; in pv_time_steal_clock() local 104 steal = READ_ONCE(st->steal); in pv_time_steal_clock() 109 return le64_to_cpu(steal); in pv_time_steal_clock()
|
| /linux/kernel/sched/ |
| H A D | cputime.c | 258 u64 steal; in steal_account_process_time() local 260 steal = paravirt_steal_clock(smp_processor_id()); in steal_account_process_time() 261 steal -= this_rq()->prev_steal_time; in steal_account_process_time() 262 steal = min(steal, maxtime); in steal_account_process_time() 263 account_steal_time(steal); in steal_account_process_time() 264 this_rq()->prev_steal_time += steal; in steal_account_process_time() 266 return steal; in steal_account_process_time() 477 u64 cputime, steal; in account_process_tick() local 488 steal = steal_account_process_time(ULONG_MAX); in account_process_tick() 490 if (steal >= cputime) in account_process_tick() [all …]
|
| /linux/fs/proc/ |
| H A D | stat.c | 85 u64 user, nice, system, idle, iowait, irq, softirq, steal; in show_stat() local 93 irq = softirq = steal = 0; in show_stat() 112 steal += cpustat[CPUTIME_STEAL]; in show_stat() 134 seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal)); in show_stat() 153 steal = cpustat[CPUTIME_STEAL]; in show_stat() 164 seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal)); in show_stat()
|
| /linux/arch/riscv/kvm/ |
| H A D | vcpu_sbi_sta.c | 34 u64 steal; in kvm_riscv_vcpu_record_steal_time() local 57 offsetof(struct sbi_sta_struct, steal)); in kvm_riscv_vcpu_record_steal_time() 69 steal = le64_to_cpu(steal_le); in kvm_riscv_vcpu_record_steal_time() 71 steal += vcpu->arch.sta.last_steal - last_steal; in kvm_riscv_vcpu_record_steal_time() 72 WARN_ON(put_user(cpu_to_le64(steal), steal_ptr)); in kvm_riscv_vcpu_record_steal_time()
|
| /linux/tools/testing/selftests/kvm/ |
| H A D | steal_time.c | 53 WRITE_ONCE(guest_stolen_time[cpu], st->steal); in guest_code() 60 WRITE_ONCE(guest_stolen_time[cpu], st->steal); in guest_code() 90 ksft_print_msg(" steal: %lld\n", st->steal); in steal_time_dump() 218 uint64_t steal; member 257 WRITE_ONCE(guest_stolen_time[cpu], st->steal); in guest_code() 264 WRITE_ONCE(guest_stolen_time[cpu], st->steal); in guest_code() 296 pr_info(" steal: %"PRIu64"\n", st->steal); in steal_time_dump()
|
| /linux/arch/loongarch/kernel/ |
| H A D | paravirt.c | 36 u64 steal; in paravt_steal_clock() local 44 steal = src->steal; in paravt_steal_clock() 49 return steal; in paravt_steal_clock()
|
| /linux/arch/x86/kernel/cpu/ |
| H A D | vmware.c | 229 struct vmware_steal_time *steal = &per_cpu(vmw_steal_time, cpu); in vmware_steal_clock() local 233 clock = READ_ONCE(steal->clock); in vmware_steal_clock() 238 initial_high = READ_ONCE(steal->clock_high); in vmware_steal_clock() 241 low = READ_ONCE(steal->clock_low); in vmware_steal_clock() 244 high = READ_ONCE(steal->clock_high); in vmware_steal_clock()
|
| /linux/kernel/bpf/ |
| H A D | bpf_lru_list.c | 434 int steal, first_steal; in bpf_common_lru_pop_free() local 465 steal = first_steal; in bpf_common_lru_pop_free() 467 steal_loc_l = per_cpu_ptr(clru->local_list, steal); in bpf_common_lru_pop_free() 477 steal = cpumask_next_wrap(steal, cpu_possible_mask); in bpf_common_lru_pop_free() 478 } while (!node && steal != first_steal); in bpf_common_lru_pop_free() 480 loc_l->next_steal = steal; in bpf_common_lru_pop_free()
|
| /linux/drivers/gpu/drm/radeon/ |
| H A D | radeon_object.c | 540 int steal; in radeon_bo_get_surface_reg() local 553 steal = -1; in radeon_bo_get_surface_reg() 562 steal = i; in radeon_bo_get_surface_reg() 567 if (steal == -1) in radeon_bo_get_surface_reg() 570 reg = &rdev->surface_regs[steal]; in radeon_bo_get_surface_reg() 573 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); in radeon_bo_get_surface_reg() 576 i = steal; in radeon_bo_get_surface_reg()
|
| /linux/arch/x86/kernel/ |
| H A D | kvm.c | 407 u64 steal; in kvm_steal_clock() local 415 steal = src->steal; in kvm_steal_clock() 419 return steal; in kvm_steal_clock()
|
| /linux/arch/x86/include/uapi/asm/ |
| H A D | kvm_para.h | 63 __u64 steal; member
|
| /linux/Documentation/translations/zh_CN/admin-guide/ |
| H A D | cpu-load.rst | 12 avg-cpu: %user %nice %system %iowait %steal %idle
|
| /linux/Documentation/translations/zh_TW/admin-guide/ |
| H A D | cpu-load.rst | 18 avg-cpu: %user %nice %system %iowait %steal %idle
|
| /linux/arch/loongarch/include/asm/ |
| H A D | kvm_para.h | 37 __u64 steal; member
|
| /linux/arch/loongarch/kvm/ |
| H A D | vcpu.c | 162 u64 steal; in kvm_update_stolen_time() local 191 unsafe_get_user(steal, &st->steal, out); in kvm_update_stolen_time() 192 steal += current->sched_info.run_delay - vcpu->arch.st.last_steal; in kvm_update_stolen_time() 194 unsafe_put_user(steal, &st->steal, out); in kvm_update_stolen_time()
|
| /linux/fs/btrfs/ |
| H A D | space-info.h | 230 bool steal; member
|
| /linux/Documentation/admin-guide/ |
| H A D | cpu-load.rst | 12 avg-cpu: %user %nice %system %iowait %steal %idle
|
| /linux/Documentation/virt/kvm/x86/ |
| H A D | cpuid.rst | 58 KVM_FEATURE_STEAL_TIME 5 steal time can be enabled by
|
| H A D | msr.rst | 268 __u64 steal; 295 steal: 298 reported as steal time.
|
| /linux/lib/raid6/ |
| H A D | altivec.uc | 21 * you can just "steal" the vec unit with enable_kernel_altivec() (but
|