Lines Matching defs:ptq
354 struct intel_pt_queue *ptq = data;
355 struct intel_pt *pt = ptq->pt;
402 static int intel_pt_get_buffer(struct intel_pt_queue *ptq,
410 int fd = perf_data__fd(ptq->pt->session->data);
417 might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode;
419 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
442 static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq,
445 if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer)
455 struct intel_pt_queue *ptq = data;
456 struct auxtrace_buffer *buffer = ptq->buffer;
457 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
461 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
470 err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b);
475 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
478 intel_pt_lookahead_drop_buffer(ptq, buffer);
488 intel_pt_lookahead_drop_buffer(ptq, buffer);
489 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
500 struct intel_pt_queue *ptq = data;
501 struct auxtrace_buffer *buffer = ptq->buffer;
502 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
506 if (ptq->stop) {
511 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
521 ptq->buffer = buffer;
523 err = intel_pt_get_buffer(ptq, buffer, old_buffer, b);
527 if (ptq->step_through_buffers)
528 ptq->stop = true;
533 ptq->old_buffer = buffer;
675 static inline u8 intel_pt_nr_cpumode(struct intel_pt_queue *ptq, uint64_t ip, bool nr)
683 return ip >= ptq->pt->kernel_start ?
688 static inline u8 intel_pt_cpumode(struct intel_pt_queue *ptq, uint64_t from_ip, uint64_t to_ip)
692 return intel_pt_nr_cpumode(ptq, from_ip, ptq->state->from_nr);
693 return intel_pt_nr_cpumode(ptq, to_ip, ptq->state->to_nr);
696 static int intel_pt_get_guest(struct intel_pt_queue *ptq)
698 struct machines *machines = &ptq->pt->session->machines;
700 pid_t pid = ptq->pid <= 0 ? DEFAULT_GUEST_KERNEL_ID : ptq->pid;
702 if (ptq->guest_machine && pid == ptq->guest_machine->pid)
705 ptq->guest_machine = NULL;
706 thread__zput(ptq->unknown_guest_thread);
709 thread__zput(ptq->guest_thread);
710 ptq->guest_thread = machines__findnew_guest_code(machines, pid);
717 ptq->unknown_guest_thread = machine__idle_thread(machine);
718 if (!ptq->unknown_guest_thread)
721 ptq->guest_machine = machine;
753 struct intel_pt_queue *ptq = data;
754 struct machine *machine = ptq->pt->machine;
774 nr = ptq->state->to_nr;
775 cpumode = intel_pt_nr_cpumode(ptq, *ip, nr);
778 if (ptq->pt->have_guest_sideband) {
779 if (!ptq->guest_machine || ptq->guest_machine_pid != ptq->pid) {
785 intel_pt_get_guest(ptq)) {
790 machine = ptq->guest_machine;
791 thread = ptq->guest_thread;
798 thread = ptq->unknown_guest_thread;
801 thread = ptq->thread;
808 thread = ptq->pt->unknown_thread;
987 struct intel_pt_queue *ptq = data;
994 if (ptq->state->to_nr) {
996 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
999 } else if (ip >= ptq->pt->kernel_start) {
1000 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
1005 thread = ptq->thread;
1015 res = intel_pt_match_pgd_ip(ptq->pt, ip, offset, dso__long_name(map__dso(al.map)));
1298 struct intel_pt_queue *ptq;
1300 ptq = zalloc(sizeof(struct intel_pt_queue));
1301 if (!ptq)
1305 ptq->chain = intel_pt_alloc_chain(pt);
1306 if (!ptq->chain)
1313 ptq->last_branch = intel_pt_alloc_br_stack(entry_cnt);
1314 if (!ptq->last_branch)
1318 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
1319 if (!ptq->event_buf)
1322 ptq->pt = pt;
1323 ptq->queue_nr = queue_nr;
1324 ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
1325 ptq->pid = -1;
1326 ptq->tid = -1;
1327 ptq->cpu = -1;
1328 ptq->next_tid = -1;
1334 params.data = ptq;
1386 ptq->decoder = intel_pt_decoder_new(¶ms);
1387 if (!ptq->decoder)
1390 return ptq;
1393 zfree(&ptq->event_buf);
1394 zfree(&ptq->last_branch);
1395 zfree(&ptq->chain);
1396 free(ptq);
1402 struct intel_pt_queue *ptq = priv;
1404 if (!ptq)
1406 thread__zput(ptq->thread);
1407 thread__zput(ptq->guest_thread);
1408 thread__zput(ptq->unknown_guest_thread);
1409 intel_pt_decoder_free(ptq->decoder);
1410 zfree(&ptq->event_buf);
1411 zfree(&ptq->last_branch);
1412 zfree(&ptq->chain);
1413 free(ptq);
1424 struct intel_pt_queue *ptq = queue->priv;
1426 if (ptq && ptq->decoder)
1427 intel_pt_set_first_timestamp(ptq->decoder, timestamp);
1431 static int intel_pt_get_guest_from_sideband(struct intel_pt_queue *ptq)
1433 struct machines *machines = &ptq->pt->session->machines;
1435 pid_t machine_pid = ptq->pid;
1446 if (ptq->guest_machine != machine) {
1447 ptq->guest_machine = NULL;
1448 thread__zput(ptq->guest_thread);
1449 thread__zput(ptq->unknown_guest_thread);
1451 ptq->unknown_guest_thread = machine__find_thread(machine, 0, 0);
1452 if (!ptq->unknown_guest_thread)
1454 ptq->guest_machine = machine;
1457 vcpu = ptq->thread ? thread__guest_cpu(ptq->thread) : -1;
1463 if (ptq->guest_thread && thread__tid(ptq->guest_thread) != tid)
1464 thread__zput(ptq->guest_thread);
1466 if (!ptq->guest_thread) {
1467 ptq->guest_thread = machine__find_thread(machine, -1, tid);
1468 if (!ptq->guest_thread)
1472 ptq->guest_machine_pid = machine_pid;
1473 ptq->guest_pid = thread__pid(ptq->guest_thread);
1474 ptq->guest_tid = tid;
1475 ptq->vcpu = vcpu;
1483 struct intel_pt_queue *ptq = queue->priv;
1486 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
1487 if (ptq->tid == -1)
1488 ptq->pid = -1;
1489 thread__zput(ptq->thread);
1492 if (!ptq->thread && ptq->tid != -1)
1493 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
1495 if (ptq->thread) {
1496 ptq->pid = thread__pid(ptq->thread);
1498 ptq->cpu = thread__cpu(ptq->thread);
1501 if (pt->have_guest_sideband && intel_pt_get_guest_from_sideband(ptq)) {
1502 ptq->guest_machine_pid = 0;
1503 ptq->guest_pid = -1;
1504 ptq->guest_tid = -1;
1505 ptq->vcpu = -1;
1509 static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
1511 struct intel_pt *pt = ptq->pt;
1513 ptq->insn_len = 0;
1514 if (ptq->state->flags & INTEL_PT_ABORT_TX) {
1515 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
1516 } else if (ptq->state->flags & INTEL_PT_ASYNC) {
1517 if (!ptq->state->to_ip)
1518 ptq->flags = PERF_IP_FLAG_BRANCH |
1521 else if (ptq->state->from_nr && !ptq->state->to_nr)
1522 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1526 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1530 if (ptq->state->from_ip)
1531 ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
1533 ptq->flags = PERF_IP_FLAG_BRANCH |
1535 if (ptq->state->flags & INTEL_PT_IN_TX)
1536 ptq->flags |= PERF_IP_FLAG_IN_TX;
1537 ptq->insn_len = ptq->state->insn_len;
1538 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
1541 if (ptq->state->type & INTEL_PT_TRACE_BEGIN)
1542 ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN;
1543 if (ptq->state->type & INTEL_PT_TRACE_END)
1544 ptq->flags |= PERF_IP_FLAG_TRACE_END;
1547 if (ptq->state->type & INTEL_PT_IFLAG_CHG) {
1548 if (!ptq->state->from_iflag)
1549 ptq->flags |= PERF_IP_FLAG_INTR_DISABLE;
1550 if (ptq->state->from_iflag != ptq->state->to_iflag)
1551 ptq->flags |= PERF_IP_FLAG_INTR_TOGGLE;
1552 } else if (!ptq->state->to_iflag) {
1553 ptq->flags |= PERF_IP_FLAG_INTR_DISABLE;
1559 struct intel_pt_queue *ptq)
1564 ptq->sel_timestamp = pt->time_ranges[0].start;
1565 ptq->sel_idx = 0;
1567 if (ptq->sel_timestamp) {
1568 ptq->sel_start = true;
1570 ptq->sel_timestamp = pt->time_ranges[0].end;
1571 ptq->sel_start = false;
1579 struct intel_pt_queue *ptq = queue->priv;
1584 if (!ptq) {
1585 ptq = intel_pt_alloc_queue(pt, queue_nr);
1586 if (!ptq)
1588 queue->priv = ptq;
1591 ptq->cpu = queue->cpu;
1592 ptq->tid = queue->tid;
1594 ptq->cbr_seen = UINT_MAX;
1598 ptq->step_through_buffers = true;
1600 ptq->sync_switch = pt->sync_switch;
1602 intel_pt_setup_time_range(pt, ptq);
1605 if (!ptq->on_heap &&
1606 (!ptq->sync_switch ||
1607 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
1616 queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1618 if (ptq->sel_start && ptq->sel_timestamp) {
1619 ret = intel_pt_fast_forward(ptq->decoder,
1620 ptq->sel_timestamp);
1626 state = intel_pt_decode(ptq->decoder);
1639 ptq->timestamp = state->timestamp;
1641 queue_nr, ptq->timestamp);
1642 ptq->state = state;
1643 ptq->have_sample = true;
1644 if (ptq->sel_start && ptq->sel_timestamp &&
1645 ptq->timestamp < ptq->sel_timestamp)
1646 ptq->have_sample = false;
1647 intel_pt_sample_flags(ptq);
1648 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
1651 ptq->on_heap = true;
1687 static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq,
1694 sample->pid = ptq->pid;
1695 sample->tid = ptq->tid;
1697 if (ptq->pt->have_guest_sideband) {
1698 if ((ptq->state->from_ip && ptq->state->from_nr) ||
1699 (ptq->state->to_ip && ptq->state->to_nr)) {
1700 sample->pid = ptq->guest_pid;
1701 sample->tid = ptq->guest_tid;
1702 sample->machine_pid = ptq->guest_machine_pid;
1703 sample->vcpu = ptq->vcpu;
1707 sample->cpu = ptq->cpu;
1708 sample->insn_len = ptq->insn_len;
1709 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1713 struct intel_pt_queue *ptq,
1717 intel_pt_prep_a_sample(ptq, event, sample);
1720 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1722 sample->ip = ptq->state->from_ip;
1723 sample->addr = ptq->state->to_ip;
1724 sample->cpumode = intel_pt_cpumode(ptq, sample->ip, sample->addr);
1726 sample->flags = ptq->flags;
1765 static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
1767 struct intel_pt *pt = ptq->pt;
1768 union perf_event *event = ptq->event_buf;
1777 if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
1784 intel_pt_prep_b_sample(pt, ptq, event, &sample);
1786 sample.id = ptq->pt->branches_id;
1787 sample.stream_id = ptq->pt->branches_id;
1805 if (ptq->sample_ipc)
1806 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
1808 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
1809 ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
1810 ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt;
1820 struct intel_pt_queue *ptq,
1824 intel_pt_prep_b_sample(pt, ptq, event, sample);
1827 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
1830 sample->callchain = ptq->chain;
1834 thread_stack__br_sample(ptq->thread, ptq->cpu, ptq->last_branch,
1836 sample->branch_stack = ptq->last_branch;
1840 static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1842 struct intel_pt *pt = ptq->pt;
1843 union perf_event *event = ptq->event_buf;
1851 intel_pt_prep_sample(pt, ptq, event, &sample);
1853 sample.id = ptq->pt->instructions_id;
1854 sample.stream_id = ptq->pt->instructions_id;
1858 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
1860 if (ptq->sample_ipc)
1861 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
1863 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
1864 ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
1865 ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt;
1868 ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1876 static int intel_pt_synth_cycle_sample(struct intel_pt_queue *ptq)
1878 struct intel_pt *pt = ptq->pt;
1879 union perf_event *event = ptq->event_buf;
1884 if (ptq->sample_ipc)
1885 period = ptq->ipc_cyc_cnt - ptq->last_cy_cyc_cnt;
1891 intel_pt_prep_sample(pt, ptq, event, &sample);
1893 sample.id = ptq->pt->cycles_id;
1894 sample.stream_id = ptq->pt->cycles_id;
1898 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_cy_insn_cnt;
1899 ptq->last_cy_insn_cnt = ptq->ipc_insn_cnt;
1900 ptq->last_cy_cyc_cnt = ptq->ipc_cyc_cnt;
1907 static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1909 struct intel_pt *pt = ptq->pt;
1910 union perf_event *event = ptq->event_buf;
1918 intel_pt_prep_sample(pt, ptq, event, &sample);
1920 sample.id = ptq->pt->transactions_id;
1921 sample.stream_id = ptq->pt->transactions_id;
1930 struct intel_pt_queue *ptq,
1934 intel_pt_prep_sample(pt, ptq, event, sample);
1944 static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
1946 struct intel_pt *pt = ptq->pt;
1947 union perf_event *event = ptq->event_buf;
1954 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1956 sample.id = ptq->pt->ptwrites_id;
1957 sample.stream_id = ptq->pt->ptwrites_id;
1960 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1961 raw.payload = cpu_to_le64(ptq->state->ptw_payload);
1970 static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
1972 struct intel_pt *pt = ptq->pt;
1973 union perf_event *event = ptq->event_buf;
1982 ptq->cbr_seen = ptq->state->cbr;
1985 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1987 sample.id = ptq->pt->cbr_id;
1988 sample.stream_id = ptq->pt->cbr_id;
1990 flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
2004 static int intel_pt_synth_psb_sample(struct intel_pt_queue *ptq)
2006 struct intel_pt *pt = ptq->pt;
2007 union perf_event *event = ptq->event_buf;
2016 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2018 sample.id = ptq->pt->psb_id;
2019 sample.stream_id = ptq->pt->psb_id;
2023 raw.offset = ptq->state->psb_offset;
2034 static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
2036 struct intel_pt *pt = ptq->pt;
2037 union perf_event *event = ptq->event_buf;
2046 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2048 sample.id = ptq->pt->mwait_id;
2049 sample.stream_id = ptq->pt->mwait_id;
2052 raw.payload = cpu_to_le64(ptq->state->mwait_payload);
2063 static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
2065 struct intel_pt *pt = ptq->pt;
2066 union perf_event *event = ptq->event_buf;
2075 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2077 sample.id = ptq->pt->pwre_id;
2078 sample.stream_id = ptq->pt->pwre_id;
2081 raw.payload = cpu_to_le64(ptq->state->pwre_payload);
2092 static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
2094 struct intel_pt *pt = ptq->pt;
2095 union perf_event *event = ptq->event_buf;
2104 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2106 sample.id = ptq->pt->exstop_id;
2107 sample.stream_id = ptq->pt->exstop_id;
2110 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
2121 static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
2123 struct intel_pt *pt = ptq->pt;
2124 union perf_event *event = ptq->event_buf;
2133 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2135 sample.id = ptq->pt->pwrx_id;
2136 sample.stream_id = ptq->pt->pwrx_id;
2139 raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
2415 static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evsel *evsel,
2418 const struct intel_pt_blk_items *items = &ptq->state->items;
2420 union perf_event *event = ptq->event_buf;
2421 struct intel_pt *pt = ptq->pt;
2431 intel_pt_prep_a_sample(ptq, event, &sample);
2445 sample.ip = ptq->state->from_ip;
2447 cpumode = intel_pt_cpumode(ptq, sample.ip, 0);
2459 timestamp = ptq->timestamp;
2466 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
2469 sample.callchain = ptq->chain;
2493 intel_pt_add_lbrs(ptq->last_branch, items);
2495 thread_stack__br_sample(ptq->thread, ptq->cpu,
2496 ptq->last_branch,
2499 ptq->last_branch->nr = 0;
2501 sample.branch_stack = ptq->last_branch;
2565 static int intel_pt_synth_single_pebs_sample(struct intel_pt_queue *ptq)
2567 struct intel_pt *pt = ptq->pt;
2572 return intel_pt_do_synth_pebs_sample(ptq, evsel, id, data_src_fmt);
2575 static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
2577 const struct intel_pt_blk_items *items = &ptq->state->items;
2579 struct intel_pt *pt = ptq->pt;
2586 return intel_pt_synth_single_pebs_sample(ptq);
2590 pe = &ptq->pebs[hw_id];
2595 return intel_pt_synth_single_pebs_sample(ptq);
2597 err = intel_pt_do_synth_pebs_sample(ptq, pe->evsel, pe->id, pe->data_src_fmt);
2605 static int intel_pt_synth_events_sample(struct intel_pt_queue *ptq)
2607 struct intel_pt *pt = ptq->pt;
2608 union perf_event *event = ptq->event_buf;
2620 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2622 sample.id = ptq->pt->evt_id;
2623 sample.stream_id = ptq->pt->evt_id;
2625 raw.cfe.type = ptq->state->cfe_type;
2627 raw.cfe.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
2628 raw.cfe.vector = ptq->state->cfe_vector;
2629 raw.cfe.evd_cnt = ptq->state->evd_cnt;
2631 for (i = 0; i < ptq->state->evd_cnt; i++) {
2633 raw.evd[i].evd_type = ptq->state->evd[i].type;
2634 raw.evd[i].payload = ptq->state->evd[i].payload;
2638 ptq->state->evd_cnt * sizeof(struct perf_synth_intel_evd);
2647 static int intel_pt_synth_iflag_chg_sample(struct intel_pt_queue *ptq)
2649 struct intel_pt *pt = ptq->pt;
2650 union perf_event *event = ptq->event_buf;
2659 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2661 sample.id = ptq->pt->iflag_chg_id;
2662 sample.stream_id = ptq->pt->iflag_chg_id;
2665 raw.iflag = ptq->state->to_iflag;
2667 if (ptq->state->type & INTEL_PT_BRANCH) {
2669 raw.branch_ip = ptq->state->to_ip;
2673 sample.flags = ptq->flags;
2727 static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
2730 struct intel_pt *pt = ptq->pt;
2731 u64 tm = ptq->timestamp;
2733 pid_t pid = ptq->pid;
2734 pid_t tid = ptq->tid;
2740 machine_pid = ptq->guest_machine_pid;
2741 vcpu = ptq->vcpu;
2742 pid = ptq->guest_pid;
2743 tid = ptq->guest_tid;
2746 return intel_pt_synth_error(pt, state->err, ptq->cpu, pid, tid,
2750 static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
2753 pid_t tid = ptq->next_tid;
2759 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
2761 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
2763 queue = &pt->queues.queue_array[ptq->queue_nr];
2766 ptq->next_tid = -1;
2771 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
2773 struct intel_pt *pt = ptq->pt;
2776 (ptq->flags & PERF_IP_FLAG_BRANCH) &&
2777 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
2784 static int intel_pt_sample(struct intel_pt_queue *ptq)
2786 const struct intel_pt_state *state = ptq->state;
2787 struct intel_pt *pt = ptq->pt;
2790 if (!ptq->have_sample)
2793 ptq->have_sample = false;
2796 ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
2797 ptq->ipc_cyc_cnt = ptq->state->cycles;
2798 ptq->sample_ipc = true;
2800 ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
2801 ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
2802 ptq->sample_ipc = ptq->state->flags & INTEL_PT_SAMPLE_IPC;
2807 intel_pt_get_guest(ptq);
2814 err = intel_pt_synth_pebs_sample(ptq);
2821 err = intel_pt_synth_events_sample(ptq);
2826 err = intel_pt_synth_iflag_chg_sample(ptq);
2834 err = intel_pt_synth_psb_sample(ptq);
2838 if (ptq->state->cbr != ptq->cbr_seen) {
2839 err = intel_pt_synth_cbr_sample(ptq);
2845 err = intel_pt_synth_mwait_sample(ptq);
2850 err = intel_pt_synth_pwre_sample(ptq);
2855 err = intel_pt_synth_exstop_sample(ptq);
2860 err = intel_pt_synth_pwrx_sample(ptq);
2869 err = intel_pt_synth_instruction_sample(ptq);
2874 err = intel_pt_synth_cycle_sample(ptq);
2881 err = intel_pt_synth_transaction_sample(ptq);
2887 err = intel_pt_synth_ptwrite_sample(ptq);
2896 thread_stack__event(ptq->thread, ptq->cpu, ptq->flags,
2897 state->from_ip, state->to_ip, ptq->insn_len,
2902 thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
2917 err = intel_pt_synth_branch_sample(ptq);
2922 err = intel_pt_synth_branch_sample(ptq);
2925 err = intel_pt_synth_branch_sample(ptq);
2931 if (!ptq->sync_switch)
2934 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
2935 switch (ptq->switch_state) {
2939 err = intel_pt_next_tid(pt, ptq);
2942 ptq->switch_state = INTEL_PT_SS_TRACING;
2945 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
2949 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2950 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
2951 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2952 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2954 (ptq->flags & PERF_IP_FLAG_CALL)) {
2955 ptq->switch_state = INTEL_PT_SS_TRACING;
3024 struct intel_pt_queue *ptq = queue->priv;
3026 if (ptq)
3027 ptq->sync_switch = true;
3039 struct intel_pt_queue *ptq = queue->priv;
3041 if (ptq) {
3042 ptq->sync_switch = false;
3043 intel_pt_next_tid(pt, ptq);
3052 static bool intel_pt_next_time(struct intel_pt_queue *ptq)
3054 struct intel_pt *pt = ptq->pt;
3056 if (ptq->sel_start) {
3058 ptq->sel_start = false;
3059 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end;
3061 } else if (ptq->sel_idx + 1 < pt->range_cnt) {
3063 ptq->sel_start = true;
3064 ptq->sel_idx += 1;
3065 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start;
3073 static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp)
3078 if (ptq->sel_start) {
3079 if (ptq->timestamp >= ptq->sel_timestamp) {
3081 intel_pt_next_time(ptq);
3082 if (!ptq->sel_timestamp) {
3090 ptq->have_sample = false;
3091 if (ptq->sel_timestamp > *ff_timestamp) {
3092 if (ptq->sync_switch) {
3093 intel_pt_next_tid(ptq->pt, ptq);
3094 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
3096 *ff_timestamp = ptq->sel_timestamp;
3097 err = intel_pt_fast_forward(ptq->decoder,
3098 ptq->sel_timestamp);
3103 } else if (ptq->timestamp > ptq->sel_timestamp) {
3105 if (!intel_pt_next_time(ptq)) {
3107 ptq->have_sample = false;
3108 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
3120 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
3122 const struct intel_pt_state *state = ptq->state;
3123 struct intel_pt *pt = ptq->pt;
3143 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
3145 err = intel_pt_sample(ptq);
3149 state = intel_pt_decode(ptq->decoder);
3153 if (ptq->sync_switch &&
3155 ptq->sync_switch = false;
3156 intel_pt_next_tid(pt, ptq);
3158 ptq->timestamp = state->est_timestamp;
3160 err = intel_ptq_synth_error(ptq, state);
3167 ptq->state = state;
3168 ptq->have_sample = true;
3169 intel_pt_sample_flags(ptq);
3177 ptq->timestamp = state->est_timestamp;
3179 } else if (ptq->sync_switch &&
3180 ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
3181 intel_pt_is_switch_ip(ptq, state->to_ip) &&
3182 ptq->next_tid == -1) {
3185 ptq->timestamp = state->est_timestamp;
3186 } else if (state->timestamp > ptq->timestamp) {
3187 ptq->timestamp = state->timestamp;
3190 if (ptq->sel_timestamp) {
3191 err = intel_pt_time_filter(ptq, &ff_timestamp);
3196 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
3197 *timestamp = ptq->timestamp;
3221 struct intel_pt_queue *ptq;
3231 ptq = queue->priv;
3249 ret = intel_pt_run_decoder(ptq, &ts);
3261 ptq->on_heap = false;
3277 struct intel_pt_queue *ptq = queue->priv;
3279 if (ptq && (tid == -1 || ptq->tid == tid)) {
3280 ptq->time = time_;
3282 intel_pt_run_decoder(ptq, &ts);
3288 static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq,
3292 struct machine *m = ptq->pt->machine;
3294 ptq->pid = sample->pid;
3295 ptq->tid = sample->tid;
3296 ptq->cpu = queue->cpu;
3299 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
3301 thread__zput(ptq->thread);
3303 if (ptq->tid == -1)
3306 if (ptq->pid == -1) {
3307 ptq->thread = machine__find_thread(m, -1, ptq->tid);
3308 if (ptq->thread)
3309 ptq->pid = thread__pid(ptq->thread);
3313 ptq->thread = machine__findnew_thread(m, ptq->pid, ptq->tid);
3320 struct intel_pt_queue *ptq;
3327 ptq = queue->priv;
3328 if (!ptq)
3331 ptq->stop = false;
3332 ptq->time = sample->time;
3333 intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample);
3334 intel_pt_run_decoder(ptq, &ts);
3376 struct intel_pt_queue *ptq;
3382 ptq = intel_pt_cpu_to_ptq(pt, cpu);
3383 if (!ptq || !ptq->sync_switch)
3386 switch (ptq->switch_state) {
3391 ptq->next_tid = tid;
3392 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
3395 if (!ptq->on_heap) {
3396 ptq->timestamp = perf_time_to_tsc(timestamp,
3398 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
3399 ptq->timestamp);
3402 ptq->on_heap = true;
3404 ptq->switch_state = INTEL_PT_SS_TRACING;
3413 ptq->next_tid = -1;
3452 struct intel_pt_queue *ptq;
3454 ptq = intel_pt_cpu_to_ptq(pt, cpu);
3455 if (ptq && ptq->sync_switch) {
3456 ptq->next_tid = -1;
3457 switch (ptq->switch_state) {
3464 ptq->switch_state = INTEL_PT_SS_TRACING;
3613 struct intel_pt_queue *ptq;
3623 ptq = queue->priv;
3625 ptq->pebs[hw_id].evsel = evsel;
3626 ptq->pebs[hw_id].id = sample->id;
3627 ptq->pebs[hw_id].data_src_fmt = intel_pt_data_src_fmt(pt, evsel);