/linux/arch/x86/lib/ |
H A D | delay.c | 37 static void (*delay_halt_fn)(u64 start, u64 cycles) __ro_after_init; 63 static void delay_tsc(u64 cycles) in delay_tsc() argument 73 if ((now - bclock) >= cycles) in delay_tsc() 91 cycles -= (now - bclock); in delay_tsc() 105 static void delay_halt_tpause(u64 start, u64 cycles) in delay_halt_tpause() argument 107 u64 until = start + cycles; in delay_halt_tpause() 122 * counts with TSC frequency. The input value is the number of TSC cycles 125 static void delay_halt_mwaitx(u64 unused, u64 cycles) in delay_halt_mwaitx() argument 129 delay = min_t(u64, MWAITX_MAX_WAIT_CYCLES, cycles); in delay_halt_mwaitx() 151 u64 start, end, cycles in delay_halt() local [all...] |
/linux/tools/perf/dlfilters/ |
H A D | dlfilter-show-cycles.c | 3 * dlfilter-show-cycles.c: Print the number of cycles at the start of each line 19 static __u64 cycles[MAX_CPU][MAX_ENTRY]; variable 30 __u64 cycles[MAX_ENTRY]; member 77 e->cycles[pos] += cnt; in add_entry() 92 cycles[cpu][pos] += sample->cyc_cnt; in filter_event_early() 98 static void print_vals(__u64 cycles, __u64 delta) in print_vals() argument 101 printf("%10llu %10llu ", (unsigned long long)cycles, (unsigned long long)delta); in print_vals() 103 printf("%10llu %10s ", (unsigned long long)cycles, ""); in print_vals() 115 print_vals(cycles[cp in filter_event() [all...] |
/linux/drivers/memory/ |
H A D | jz4780-nemc.c | 162 uint32_t smcr, val, cycles; in jz4780_nemc_configure_bank() local 171 /* 11 - 12 -> 12 cycles */ in jz4780_nemc_configure_bank() 174 /* 13 - 15 -> 15 cycles */ in jz4780_nemc_configure_bank() 177 /* 16 - 20 -> 20 cycles */ in jz4780_nemc_configure_bank() 180 /* 21 - 25 -> 25 cycles */ in jz4780_nemc_configure_bank() 183 /* 26 - 31 -> 31 cycles */ in jz4780_nemc_configure_bank() 208 cycles = jz4780_nemc_ns_to_cycles(nemc, val); in jz4780_nemc_configure_bank() 209 if (cycles > nemc->soc_info->tas_tah_cycles_max) { in jz4780_nemc_configure_bank() 210 dev_err(nemc->dev, "tAS %u is too high (%u cycles)\n", in jz4780_nemc_configure_bank() 211 val, cycles); in jz4780_nemc_configure_bank() [all...] |
/linux/drivers/gpu/drm/i915/gt/ |
H A D | selftest_gt_pm.c | 56 u32 cycles[5]; in measure_clocks() local 61 cycles[i] = -read_timestamp(engine); in measure_clocks() 66 cycles[i] += read_timestamp(engine); in measure_clocks() 72 sort(cycles, 5, sizeof(*cycles), cmp_u32, NULL); in measure_clocks() 73 *out_cycles = (cycles[1] + 2 * cycles[2] + cycles[3]) / 4; in measure_clocks() 99 u32 cycles; in live_gt_clocks() local 107 measure_clocks(engine, &cycles, in live_gt_clocks() [all...] |
/linux/tools/perf/Documentation/ |
H A D | intel-hybrid.txt | 45 For example, count the 'cycles' event on core cpus. 47 perf stat -e cpu_core/cycles/ 56 For hardware events, they have pre-defined configs (e.g. 0 for cycles). 84 perf stat -e cycles -a (use system-wide in this example), two events 115 The kernel creates 'cycles' (0x400000000) on cpu0-cpu15 (core cpus), 116 and create 'cycles' (0x800000000) on cpu16-cpu23 (atom cpus). 122 6,744,979 cpu_core/cycles/ 123 1,965,552 cpu_atom/cycles/ 125 The first 'cycles' is core event, the second 'cycles' i [all...] |
H A D | perf-daemon.txt | 32 916507 916508 ... \_ perf record --control=fifo:control,ack -m 10M -e cycles --overwrite --switch-output -a 120 [session-cycles] 121 run = -m 10M -e cycles --overwrite --switch-output -a 136 [603350:cycles] perf record -m 10M -e cycles --overwrite --switch-output -a 149 [603350:cycles] perf record -m 10M -e cycles --overwrite --switch-output -a 150 base: /opt/perfdata/session-cycles 151 output: /opt/perfdata/session-cycles/output 152 control: /opt/perfdata/session-cycles/contro [all...] |
H A D | perf.data-directory-format.txt | 51 Samples for 'cycles' event do not have CPU attribute set. Skipping 'cpu' field. 55 perf 15316 2060795.480902: 1 cycles: ffffffffa2caa548 native_write_msr+0x8 (vmlinux) 56 perf 15316 2060795.480906: 1 cycles: ffffffffa2caa548 native_write_msr+0x8 (vmlinux) 57 perf 15316 2060795.480908: 7 cycles: ffffffffa2caa548 native_write_msr+0x8 (vmlinux) 58 perf 15316 2060795.480910: 119 cycles: ffffffffa2caa54a native_write_msr+0xa (vmlinux) 59 perf 15316 2060795.480912: 2109 cycles: ffffffffa2c9b7b0 native_apic_msr_write+0x0 (vmlinux) 60 perf 15316 2060795.480914: 37606 cycles: ffffffffa2f121fe perf_event_addr_filters_exec+0x2e (vmlinux) 61 uname 15316 2060795.480924: 588287 cycles: ffffffffa303a56d page_counter_try_charge+0x6d (vmlinux) 62 uname 15316 2060795.481067: 2261945 cycles: ffffffffa301438f kmem_cache_free+0x4f (vmlinux) 63 uname 15316 2060795.481643: 2172167 cycles [all...] |
/linux/Documentation/devicetree/bindings/mtd/ |
H A D | fsmc-nand.txt | 15 byte 0 TCLR : CLE to RE delay in number of AHB clock cycles, only 4 bits 17 cycles. 19 byte 2 THIZ : number of HCLK clock cycles during which the data bus is 21 Only valid for write transactions. Zero means zero cycles, 22 255 means 255 cycles. 23 byte 3 THOLD : number of HCLK clock cycles to hold the address (and data 25 one cycle, 255 means 256 cycles. 26 byte 4 TWAIT : number of HCLK clock cycles to assert the command to the 28 255 means 256 cycles. 29 byte 5 TSET : number of HCLK clock cycles t [all...] |
/linux/drivers/net/ethernet/mellanox/mlx4/ |
H A D | en_clock.c | 44 container_of(tc, struct mlx4_en_dev, cycles); in mlx4_en_read_clock() 139 mdev->cycles.mult = mult; in mlx4_en_phc_adjfine() 208 timecounter_init(&mdev->clock, &mdev->cycles, ns); in mlx4_en_phc_settime() 247 * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register. 275 memset(&mdev->cycles, 0, sizeof(mdev->cycles)); in mlx4_en_init_timestamp() 276 mdev->cycles.read = mlx4_en_read_clock; in mlx4_en_init_timestamp() 277 mdev->cycles.mask = CLOCKSOURCE_MASK(48); in mlx4_en_init_timestamp() 278 mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock); in mlx4_en_init_timestamp() 279 mdev->cycles in mlx4_en_init_timestamp() [all...] |
/linux/drivers/net/wireless/ath/ |
H A D | hw.c | 144 u32 cycles, busy, rx, tx; in ath_hw_cycle_counters_update() local 151 cycles = REG_READ(ah, AR_CCCNT); in ath_hw_cycle_counters_update() 166 common->cc_ani.cycles += cycles; in ath_hw_cycle_counters_update() 171 common->cc_survey.cycles += cycles; in ath_hw_cycle_counters_update() 183 listen_time = (cc->cycles - cc->rx_frame - cc->tx_frame) / in ath_hw_get_listen_time()
|
/linux/arch/arm64/lib/ |
H A D | delay.c | 26 void __delay(unsigned long cycles) in __delay() argument 31 u64 end = start + cycles; in __delay() 38 while ((get_cycles() - start) < cycles) in __delay() 44 while ((get_cycles() - start + timer_evt_period) < cycles) in __delay() 48 while ((get_cycles() - start) < cycles) in __delay()
|
/linux/drivers/pwm/ |
H A D | pwm-berlin.c | 81 u64 cycles; in berlin_pwm_config() local 83 cycles = clk_get_rate(bpc->clk); in berlin_pwm_config() 84 cycles *= period_ns; in berlin_pwm_config() 85 do_div(cycles, NSEC_PER_SEC); in berlin_pwm_config() 87 if (cycles > BERLIN_PWM_MAX_TCNT) { in berlin_pwm_config() 89 cycles >>= 12; // Prescaled by 4096 in berlin_pwm_config() 91 if (cycles > BERLIN_PWM_MAX_TCNT) in berlin_pwm_config() 95 period = cycles; in berlin_pwm_config() 96 cycles *= duty_ns; in berlin_pwm_config() 97 do_div(cycles, period_n in berlin_pwm_config() [all...] |
H A D | pwm-xilinx.c | 35 u64 cycles) in xilinx_timer_tlr_cycles() argument 37 WARN_ON(cycles < 2 || cycles - 2 > priv->max); in xilinx_timer_tlr_cycles() 40 return cycles - 2; in xilinx_timer_tlr_cycles() 41 return priv->max - cycles + 2; in xilinx_timer_tlr_cycles() 47 u64 cycles; in xilinx_timer_get_period() local 50 cycles = tlr + 2; in xilinx_timer_get_period() 52 cycles = (u64)priv->max - tlr + 2; in xilinx_timer_get_period() 54 /* cycles has a max of 2^32 + 2, so we can't overflow */ in xilinx_timer_get_period() 55 return DIV64_U64_ROUND_UP(cycles * NSEC_PER_SE in xilinx_timer_get_period() [all...] |
H A D | pwm-atmel.c | 186 unsigned long long cycles = state->period; in atmel_pwm_calculate_cprd_and_pres() local 189 /* Calculate the period cycles and prescale value */ in atmel_pwm_calculate_cprd_and_pres() 190 cycles *= clkrate; in atmel_pwm_calculate_cprd_and_pres() 191 do_div(cycles, NSEC_PER_SEC); in atmel_pwm_calculate_cprd_and_pres() 195 * So for each bit the number of clock cycles is wider divide the input in atmel_pwm_calculate_cprd_and_pres() 198 shift = fls(cycles) - atmel_pwm->data->cfg.period_bits; in atmel_pwm_calculate_cprd_and_pres() 205 cycles >>= *pres; in atmel_pwm_calculate_cprd_and_pres() 210 *cprd = cycles; in atmel_pwm_calculate_cprd_and_pres() 219 unsigned long long cycles = state->duty_cycle; in atmel_pwm_calculate_cdty() local 221 cycles * in atmel_pwm_calculate_cdty() [all...] |
/linux/Documentation/driver-api/mtd/ |
H A D | spi-nor.rst | 83 mode cycles 0 84 dummy cycles 0 87 mode cycles 0 88 dummy cycles 8 91 mode cycles 0 92 dummy cycles 8 95 mode cycles 4 96 dummy cycles 0 99 mode cycles 0 100 dummy cycles [all...] |
/linux/arch/xtensa/include/asm/ |
H A D | delay.h | 25 /* 2 cycles per loop. */ in __delay() 40 unsigned long cycles = (usecs * (ccount_freq >> 15)) >> 5; in __udelay() local 43 while (((unsigned long)get_ccount()) - start < cycles) in __udelay() 61 unsigned long cycles = (nsec * (ccount_freq >> 15)) >> 15; in __ndelay() local 62 __delay(cycles); in __ndelay()
|
/linux/tools/testing/selftests/kvm/include/riscv/ |
H A D | arch_timer.h | 22 #define cycles_to_usec(cycles) \ argument 23 ((uint64_t)(cycles) * 1000000 / (timer_freq)) 58 static inline void __delay(uint64_t cycles) in __delay() argument 62 while ((timer_get_cycles() - start) < cycles) in __delay()
|
/linux/tools/perf/tests/shell/ |
H A D | stat+shadow_stat.sh | 13 perf stat -a -e cycles sleep 1 2>&1 | grep -e cpu_core && exit 2 17 perf stat -a --no-big-num -e cycles,instructions sleep 1 2>&1 | \ 18 grep -e cycles -e instructions | \ 26 # save cycles count 27 if [ "$evt" = "cycles" ]; then 32 # skip if no cycles 56 perf stat -a -A --no-big-num -e cycles,instructions sleep 1 2>&1 | \ 65 # save cycles count 66 if [ "$evt" = "cycles" ]; then 74 # skip if no cycles [all...] |
/linux/tools/virtio/ringtest/ |
H A D | main.h | 21 static inline void wait_cycles(unsigned long long cycles) in wait_cycles() argument 26 while (__rdtsc() - t < cycles) {} in wait_cycles() 33 static inline void wait_cycles(unsigned long long cycles) in wait_cycles() argument 35 asm volatile("0: brctg %0,0b" : : "d" (cycles)); in wait_cycles() 43 static inline void wait_cycles(unsigned long long cycles) in wait_cycles() argument
|
/linux/Documentation/arch/m68k/ |
H A D | buddha-driver.rst | 147 497ns Select (7 clock cycles) , IOR/IOW after 172ns (2 clock cycles) 152 639ns Select (9 clock cycles), IOR/IOW after 243ns (3 clock cycles) 155 781ns Select (11 clock cycles), IOR/IOW after 314ns (4 clock cycles) 158 355ns Select (5 clock cycles), IOR/IOW after 101ns (1 clock cycle) 161 355ns Select (5 clock cycles), IOR/IOW after 172ns (2 clock cycles) 164 355ns Select (5 clock cycles), IO [all...] |
/linux/tools/power/cpupower/bench/ |
H A D | benchmark.c | 93 total_time *= 2; /* powersave and performance cycles */ in start_benchmark() 112 printf("_round %i: doing %u cycles with %u calculations" in start_benchmark() 113 " for %lius\n", _round + 1, config->cycles, in start_benchmark() 124 /* do some sleep/load cycles with the performance governor */ in start_benchmark() 125 for (cycle = 0; cycle < config->cycles; cycle++) { in start_benchmark() 139 performance_time / config->cycles); in start_benchmark() 149 /* again, do some sleep/load cycles with the in start_benchmark() 151 for (cycle = 0; cycle < config->cycles; cycle++) { in start_benchmark() 167 /* compare the average sleep/load cycles */ in start_benchmark() 169 powersave_time / config->cycles); in start_benchmark() [all...] |
/linux/drivers/char/hw_random/ |
H A D | cavium-rng-vf.c | 85 u64 status, cycles; in check_rng_health() local 99 cycles = status >> 1; in check_rng_health() 100 if (!cycles) in check_rng_health() 106 * Number of coprocessor cycles times 2 since the last failure. in check_rng_health() 109 cycles = cycles / 2; in check_rng_health() 110 cur_err = (cycles * 1000000000) / rng->clock_rate; /* In nanosec */ in check_rng_health()
|
/linux/arch/m68k/coldfire/ |
H A D | sltimers.c | 98 u32 cycles, scnt; in mcfslt_read_clk() local 102 cycles = mcfslt_cnt; in mcfslt_read_clk() 104 cycles += mcfslt_cycles_per_jiffy; in mcfslt_read_clk() 110 return cycles + ((mcfslt_cycles_per_jiffy - 1) - scnt); in mcfslt_read_clk() 130 * n cycles, initialize STCNT with n - 1. in hw_timer_init()
|
/linux/tools/perf/tests/shell/attr/ |
H A D | README | 52 perf record -e '{cycles,instructions}' kill (test-record-group1) 53 perf record -e '{cycles/period=1/,instructions/period=2/}:S' kill (test-record-group2) 54 perf record -e '{cycles,cache-misses}:S' kill (test-record-group-sampling1) 55 perf record -c 10000 -e '{cycles,cache-misses}:S' kill (test-record-group-sampling2) 60 perf record -c 1 --pfm-events=cycles:period=2 (test-record-pfm-period) 65 perf stat -e cycles kill (test-stat-basic) 70 perf stat -e '{cycles,instructions}' kill (test-stat-group1) 71 perf stat -i -e cycles kill (test-stat-no-inherit)
|
/linux/kernel/locking/ |
H A D | test-ww_mutex.c | 319 struct test_cycle *cycles; in __test_cycle() local 323 cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL); in __test_cycle() 324 if (!cycles) in __test_cycle() 328 struct test_cycle *cycle = &cycles[n]; in __test_cycle() 332 cycle->b_mutex = &cycles[0].a_mutex; in __test_cycle() 334 cycle->b_mutex = &cycles[n + 1].a_mutex; in __test_cycle() 337 cycle->a_signal = &cycles[last].b_signal; in __test_cycle() 339 cycle->a_signal = &cycles[n - 1].b_signal; in __test_cycle() 347 queue_work(wq, &cycles[ in __test_cycle() [all...] |