Mailing List Archive

[RFC PATCH v1 3/3] perf evsel: Add retirement latency event support
When a retirement latency event is processed it sets a flag on the
evsel. This change makes it so that when the flag is set evsel
opening, reading and exiting report values from child perf record and
perf report processes.

Something similar was suggested by Namhyung Kim in:
https://lore.kernel.org/lkml/CAM9d7cgdQQn5GYB7t++xuoMdeqPXiEkkcop69+rD06RAnu9-EQ@mail.gmail.com/

This is trying to add support for retirement latency directly in
events rather than through metric changes, as suggested by Weilin Wang in:
https://lore.kernel.org/lkml/20240402214436.1409476-1-weilin.wang@intel.com/
---
tools/perf/util/evsel.c | 181 +++++++++++++++++++++++++++++++++++++++-
tools/perf/util/evsel.h | 3 +
2 files changed, 181 insertions(+), 3 deletions(-)

diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 2743d40665ff..3f0b4326bac6 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -56,6 +56,7 @@
#include <internal/xyarray.h>
#include <internal/lib.h>
#include <internal/threadmap.h>
+#include <subcmd/run-command.h>

#include <linux/ctype.h>

@@ -491,6 +492,156 @@ struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx)
}
#endif

+static int evsel__start_retire_latency_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
+ int cpu_map_idx)
+{
+ char buf[16];
+ int pipefd[2];
+ int err, i, event_len;
+ struct perf_cpu cpu = perf_cpu_map__cpu(cpus, cpu_map_idx);
+ struct child_process *child_record =
+ xyarray__entry(evsel->children, cpu_map_idx, 0);
+ struct child_process *child_report =
+ xyarray__entry(evsel->children, cpu_map_idx, 1);
+ char *event = strdup(evsel__name(evsel));
+ // TODO: the dummy event also won't be used, but there's no option to disable.
+ const char *record_argv[15] = {
+ [0] = "perf",
+ [1] = "record",
+ [2] = "--synth=no",
+ [3] = "-W",
+ [4] = "-o",
+ [5] = "-",
+ [6] = "-e",
+ };
+ const char *report_argv[] = {
+ [0] = "perf",
+ [1] = "report",
+ [2] = "-i",
+ [3] = "-",
+ [4] = "-q",
+ [5] = "-F",
+ [6] = "weight1",
+ NULL,
+ };
+
+ if (!event)
+ return -ENOMEM;
+
+ // Remove the R from the modifiers.
+ event_len = strlen(event);
+ if (event[event_len - 1] == 'R' && event[event_len - 2] == ':') {
+ event[strlen(event) - 2] = '\0';
+ } else if (event[event_len - 1] == 'R' && event[event_len - 2] == '/') {
+ event[strlen(event) - 1] = '\0';
+ } else {
+ for (i = event_len - 1; i > 0; i--) {
+ if (event[i] == 'R') {
+ for (int j = i + 1; j < event_len; j++)
+ event[i] = event[j];
+ event[strlen(event) - 1] = '\0';
+ break;
+ }
+ }
+ if (i == 0)
+ pr_err("Expected retired latency 'R'\n");
+ }
+
+ i = 7;
+ record_argv[i++] = event;
+ if (verbose) {
+ record_argv[i++] = verbose > 1 ? "-vv" : "-v";
+ }
+ if (cpu.cpu >= 0) {
+ record_argv[i++] = "-C";
+ snprintf(buf, sizeof(buf), "%d", cpu.cpu);
+ record_argv[i++] = buf;
+ } else {
+ record_argv[i++] = "-a";
+ }
+ record_argv[i++] = "sleep";
+ // TODO: interval and support for different periods.
+ record_argv[i++] = "0.1";
+
+ if (pipe(pipefd) < 0) {
+ free(event);
+ return -errno;
+ }
+
+ child_record->argv = record_argv;
+ child_record->pid = -1;
+ child_record->no_stdin = 1;
+ if (verbose)
+ child_record->err = fileno(stderr);
+ else
+ child_record->no_stderr = 1;
+ child_record->out = pipefd[1];
+ err = start_command(child_record);
+ free(event);
+ if (err)
+ return err;
+
+ child_report->argv = report_argv;
+ child_report->pid = -1;
+ if (verbose)
+ child_report->err = fileno(stderr);
+ else
+ child_report->no_stderr = 1;
+ child_report->in = pipefd[0];
+ child_report->out = -1;
+ return start_command(child_report);
+}
+
+static int evsel__finish_retire_latency_cpu(struct evsel *evsel, int cpu_map_idx)
+{
+ struct child_process *child_record =
+ xyarray__entry(evsel->children, cpu_map_idx, 0);
+ struct child_process *child_report =
+ xyarray__entry(evsel->children, cpu_map_idx, 1);
+
+ finish_command(child_record);
+ finish_command(child_report);
+ return 0;
+}
+
+static int evsel__read_retire_latency(struct evsel *evsel, int cpu_map_idx, int thread)
+{
+ struct child_process *child_report = xyarray__entry(evsel->children, cpu_map_idx, 1);
+ struct perf_counts_values *count = perf_counts(evsel->counts, cpu_map_idx, thread);
+ char buf[256];
+ int err;
+
+ err = read(child_report->out, buf, sizeof(buf));
+ if (err < 0 || strlen(buf) == 0)
+ return -1;
+
+ count->val = atoll(buf);
+ count->ena = 1;
+ count->run = 1;
+ count->id = 0;
+ count->lost = 0;
+
+ err = evsel__finish_retire_latency_cpu(evsel, cpu_map_idx);
+ if (err)
+ return err;
+
+ /* Restart the counter. */
+ return evsel__start_retire_latency_cpu(evsel, evsel->core.cpus, cpu_map_idx);
+}
+
+static int evsel__finish_retire_latency(struct evsel *evsel)
+{
+ int idx;
+
+ perf_cpu_map__for_each_idx(idx, evsel->core.cpus) {
+ int err = evsel__finish_retire_latency_cpu(evsel, idx);
+
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
const char *const evsel__hw_names[PERF_COUNT_HW_MAX] = {
"cycles",
"instructions",
@@ -1463,6 +1614,10 @@ static void evsel__free_config_terms(struct evsel *evsel)

void evsel__exit(struct evsel *evsel)
{
+ if (evsel->children) {
+ evsel__finish_retire_latency(evsel);
+ zfree(&evsel->children);
+ }
assert(list_empty(&evsel->core.node));
assert(evsel->evlist == NULL);
bpf_counter__destroy(evsel);
@@ -1602,9 +1757,10 @@ static int evsel__read_group(struct evsel *leader, int cpu_map_idx, int thread)

int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread)
{
- u64 read_format = evsel->core.attr.read_format;
+ if (evsel->retire_lat)
+ return evsel__read_retire_latency(evsel, cpu_map_idx, thread);

- if (read_format & PERF_FORMAT_GROUP)
+ if (evsel->core.attr.read_format & PERF_FORMAT_GROUP)
return evsel__read_group(evsel, cpu_map_idx, thread);

return evsel__read_one(evsel, cpu_map_idx, thread);
@@ -1819,10 +1975,22 @@ static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
threads = empty_thread_map;
}

- if (evsel->core.fd == NULL &&
+ if (!evsel->retire_lat && evsel->core.fd == NULL &&
perf_evsel__alloc_fd(&evsel->core, perf_cpu_map__nr(cpus), nthreads) < 0)
return -ENOMEM;

+ if (evsel->retire_lat && evsel->children == NULL) {
+ /*
+ * Use ylen of 2, [0] is the record and [1] is the report
+ * command. Currently retirement latency doesn't support
+ * per-thread mode.
+ */
+ evsel->children = xyarray__new(perf_cpu_map__nr(cpus), /*ylen=*/2,
+ sizeof(struct child_process));
+ if (!evsel->children)
+ return -ENOMEM;
+ }
+
evsel->open_flags = PERF_FLAG_FD_CLOEXEC;
if (evsel->cgrp)
evsel->open_flags |= PERF_FLAG_PID_CGROUP;
@@ -2033,6 +2201,13 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,

for (idx = start_cpu_map_idx; idx < end_cpu_map_idx; idx++) {

+ if (evsel->retire_lat) {
+ err = evsel__start_retire_latency_cpu(evsel, cpus, idx);
+ if (err)
+ return err;
+ continue;
+ }
+
for (thread = 0; thread < nthreads; thread++) {
int fd, group_fd;
retry_open:
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index e6726587e1bc..ab7c10e7f063 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -171,6 +171,9 @@ struct evsel {

/* for missing_features */
struct perf_pmu *pmu;
+
+ /* Used for retire_lat child process. */
+ struct xyarray *children;
};

struct perf_missing_features {
--
2.44.0.769.g3c40516874-goog