summaryrefslogtreecommitdiffstats
path: root/src/vppinfra/perfmon
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2022-04-06 21:16:37 +0200
committerFlorin Coras <florin.coras@gmail.com>2022-04-08 15:53:10 +0000
commitd5045e68a782d484e3f0e54edb4a88dc3dfac291 (patch)
treef0cf5627b672050e4463d1dee4ea952a066c12d6 /src/vppinfra/perfmon
parentdfc43164078b481e39dc0a87e8e358cc6a56d14e (diff)
vppinfra: introduce clib_perfmom
Type: improvement Change-Id: I85a90774eb313020435c9bc2297c1bdf23d52efc Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/vppinfra/perfmon')
-rw-r--r--src/vppinfra/perfmon/bundle_core_power.c48
-rw-r--r--src/vppinfra/perfmon/bundle_default.c64
-rw-r--r--src/vppinfra/perfmon/perfmon.c230
-rw-r--r--src/vppinfra/perfmon/perfmon.h117
4 files changed, 459 insertions, 0 deletions
diff --git a/src/vppinfra/perfmon/bundle_core_power.c b/src/vppinfra/perfmon/bundle_core_power.c
new file mode 100644
index 00000000000..6a30cdfdde4
--- /dev/null
+++ b/src/vppinfra/perfmon/bundle_core_power.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2022 Cisco Systems, Inc.
+ */
+
+#ifdef __x86_64__
+
+#include <vppinfra/format.h>
+#include <vppinfra/error.h>
+#include <vppinfra/perfmon/perfmon.h>
+
+static u8 *
+format_perfmon_bundle_core_power (u8 *s, va_list *args)
+{
+ clib_perfmon_ctx_t __clib_unused *ctx = va_arg (*args, clib_perfmon_ctx_t *);
+ clib_perfmon_capture_t *c = va_arg (*args, clib_perfmon_capture_t *);
+ u32 col = va_arg (*args, int);
+ u64 *d = c->data;
+
+ switch (col)
+ {
+ case 0:
+ return format (s, "%7.1f %%", (f64) 100 * d[1] / d[0]);
+ case 1:
+ return format (s, "%7.1f %%", (f64) 100 * d[2] / d[0]);
+ case 2:
+ return format (s, "%7.1f %%", (f64) 100 * d[3] / d[0]);
+ default:
+ return s;
+ }
+}
+
+#define PERF_INTEL_CODE(event, umask) ((event) | (umask) << 8)
+
+CLIB_PERFMON_BUNDLE (core_power) = {
+ .name = "core-power",
+ .desc =
+ "Core cycles where the core was running under specific turbo schedule.",
+ .type = PERF_TYPE_RAW,
+ .config[0] = PERF_INTEL_CODE (0x3c, 0x00),
+ .config[1] = PERF_INTEL_CODE (0x28, 0x07),
+ .config[2] = PERF_INTEL_CODE (0x28, 0x18),
+ .config[3] = PERF_INTEL_CODE (0x28, 0x20),
+ .n_events = 4,
+ .format_fn = format_perfmon_bundle_core_power,
+ .column_headers = CLIB_STRING_ARRAY ("Level 0", "Level 1", "Level 2"),
+};
+
+#endif
diff --git a/src/vppinfra/perfmon/bundle_default.c b/src/vppinfra/perfmon/bundle_default.c
new file mode 100644
index 00000000000..b5282c51740
--- /dev/null
+++ b/src/vppinfra/perfmon/bundle_default.c
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2022 Cisco Systems, Inc.
+ */
+
+#include <vppinfra/format.h>
+#include <vppinfra/error.h>
+#include <vppinfra/perfmon/perfmon.h>
+
+static u8 *
+format_perfmon_bundle_default (u8 *s, va_list *args)
+{
+ clib_perfmon_ctx_t *ctx = va_arg (*args, clib_perfmon_ctx_t *);
+ clib_perfmon_capture_t *c = va_arg (*args, clib_perfmon_capture_t *);
+ u32 col = va_arg (*args, int);
+ u64 *d = c->data;
+
+ switch (col)
+ {
+ case 0:
+ if (ctx->ref_clock > 0)
+ return format (s, "%8.1f", (f64) d[0] / d[1] * (ctx->ref_clock / 1e9));
+ else
+ return s;
+ case 1:
+ return format (s, "%5.2f", (f64) d[2] / d[0]);
+ case 2:
+ if (c->n_ops > 1)
+ return format (s, "%8.2f", (f64) d[0] / c->n_ops);
+ else
+ return format (s, "%8u", d[0]);
+ case 3:
+ if (c->n_ops > 1)
+ return format (s, "%8.2f", (f64) d[2] / c->n_ops);
+ else
+ return format (s, "%8u", d[2]);
+ case 4:
+ if (c->n_ops > 1)
+ return format (s, "%9.2f", (f64) d[3] / c->n_ops);
+ else
+ return format (s, "%9u", d[3]);
+ case 5:
+ if (c->n_ops > 1)
+ return format (s, "%10.2f", (f64) d[4] / c->n_ops);
+ else
+ return format (s, "%10u", d[4]);
+ default:
+ return s;
+ }
+}
+
+CLIB_PERFMON_BUNDLE (default) = {
+ .name = "default",
+ .desc = "IPC, Clocks/Operatiom, Instr/Operation, Branch Total & Miss",
+ .type = PERF_TYPE_HARDWARE,
+ .config[0] = PERF_COUNT_HW_CPU_CYCLES,
+ .config[1] = PERF_COUNT_HW_REF_CPU_CYCLES,
+ .config[2] = PERF_COUNT_HW_INSTRUCTIONS,
+ .config[3] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
+ .config[4] = PERF_COUNT_HW_BRANCH_MISSES,
+ .n_events = 5,
+ .format_fn = format_perfmon_bundle_default,
+ .column_headers = CLIB_STRING_ARRAY ("Freq", "IPC", "Clks/Op", "Inst/Op",
+ "Brnch/Op", "BrMiss/Op"),
+};
diff --git a/src/vppinfra/perfmon/perfmon.c b/src/vppinfra/perfmon/perfmon.c
new file mode 100644
index 00000000000..9ec90b88d67
--- /dev/null
+++ b/src/vppinfra/perfmon/perfmon.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2022 Cisco Systems, Inc.
+ */
+
+#include <vppinfra/format.h>
+#include <vppinfra/error.h>
+#include <vppinfra/perfmon/perfmon.h>
+#include <vppinfra/format_table.h>
+
+clib_perfmon_main_t clib_perfmon_main;
+
+__clib_export clib_error_t *
+clib_perfmon_init_by_bundle_name (clib_perfmon_ctx_t *ctx, char *fmt, ...)
+{
+ clib_perfmon_main_t *pm = &clib_perfmon_main;
+ clib_perfmon_bundle_t *b = 0;
+ int group_fd = -1;
+ clib_error_t *err = 0;
+ va_list va;
+ char *bundle_name;
+
+ struct perf_event_attr pe = {
+ .size = sizeof (struct perf_event_attr),
+ .disabled = 1,
+ .exclude_kernel = 1,
+ .exclude_hv = 1,
+ .pinned = 1,
+ .exclusive = 1,
+ .read_format = (PERF_FORMAT_GROUP | PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING),
+ };
+
+ va_start (va, fmt);
+ bundle_name = (char *) va_format (0, fmt, &va);
+ va_end (va);
+ vec_add1 (bundle_name, 0);
+
+ for (clib_perfmon_bundle_reg_t *r = pm->bundle_regs; r; r = r->next)
+ {
+ if (strncmp (r->bundle->name, bundle_name, vec_len (bundle_name) - 1))
+ continue;
+ b = r->bundle;
+ break;
+ }
+
+ if (b == 0)
+ {
+ err = clib_error_return (0, "Unknown bundle '%s'", bundle_name);
+ goto done;
+ }
+
+ clib_memset_u8 (ctx, 0, sizeof (clib_perfmon_ctx_t));
+ vec_validate_init_empty (ctx->fds, b->n_events - 1, -1);
+ ctx->bundle = b;
+
+ for (int i = 0; i < b->n_events; i++)
+ {
+ pe.config = b->config[i];
+ pe.type = b->type;
+ int fd = syscall (__NR_perf_event_open, &pe, /* pid */ 0, /* cpu */ -1,
+ /* group_fd */ group_fd, /* flags */ 0);
+ if (fd < 0)
+ {
+ err = clib_error_return_unix (0, "perf_event_open[%u]", i);
+ goto done;
+ }
+
+ if (ctx->debug)
+ fformat (stderr, "perf event %u open, fd %d\n", i, fd);
+
+ if (group_fd == -1)
+ {
+ group_fd = fd;
+ pe.pinned = 0;
+ pe.exclusive = 0;
+ }
+
+ ctx->fds[i] = fd;
+ }
+
+ ctx->group_fd = group_fd;
+ ctx->data = vec_new (u64, 3 + b->n_events);
+ ctx->ref_clock = os_cpu_clock_frequency ();
+ vec_validate (ctx->capture_groups, 0);
+
+done:
+ if (err)
+ clib_perfmon_free (ctx);
+
+ vec_free (bundle_name);
+ return err;
+}
+
+__clib_export void
+clib_perfmon_free (clib_perfmon_ctx_t *ctx)
+{
+ clib_perfmon_clear (ctx);
+ vec_free (ctx->captures);
+ vec_free (ctx->capture_groups);
+
+ for (int i = 0; i < vec_len (ctx->fds); i++)
+ if (ctx->fds[i] > -1)
+ close (ctx->fds[i]);
+ vec_free (ctx->fds);
+ vec_free (ctx->data);
+}
+
+__clib_export void
+clib_perfmon_clear (clib_perfmon_ctx_t *ctx)
+{
+ for (int i = 0; i < vec_len (ctx->captures); i++)
+ vec_free (ctx->captures[i].desc);
+ vec_reset_length (ctx->captures);
+ for (int i = 0; i < vec_len (ctx->capture_groups); i++)
+ vec_free (ctx->capture_groups[i].name);
+ vec_reset_length (ctx->capture_groups);
+}
+
+__clib_export u64 *
+clib_perfmon_capture (clib_perfmon_ctx_t *ctx, u32 n_ops, char *fmt, ...)
+{
+ u32 read_size = (ctx->bundle->n_events + 3) * sizeof (u64);
+ clib_perfmon_capture_t *c;
+ u64 d[CLIB_PERFMON_MAX_EVENTS + 3];
+ va_list va;
+
+ if ((read (ctx->group_fd, d, read_size) != read_size))
+ {
+ if (ctx->debug)
+ fformat (stderr, "reading of %u bytes failed, %s (%d)\n", read_size,
+ strerror (errno), errno);
+ return 0;
+ }
+
+ if (ctx->debug)
+ {
+ fformat (stderr, "read events: %lu enabled: %lu running: %lu ", d[0],
+ d[1], d[2]);
+ fformat (stderr, "data: [%lu", d[3]);
+ for (int i = 1; i < ctx->bundle->n_events; i++)
+ fformat (stderr, ", %lu", d[i + 3]);
+ fformat (stderr, "]\n");
+ }
+
+ vec_add2 (ctx->captures, c, 1);
+
+ va_start (va, fmt);
+ c->desc = va_format (0, fmt, &va);
+ va_end (va);
+
+ c->n_ops = n_ops;
+ c->group = vec_len (ctx->capture_groups) - 1;
+ c->time_enabled = d[1];
+ c->time_running = d[2];
+ for (int i = 0; i < CLIB_PERFMON_MAX_EVENTS; i++)
+ c->data[i] = d[i + 3];
+
+ return ctx->data + vec_len (ctx->data) - ctx->bundle->n_events;
+}
+
+__clib_export void
+clib_perfmon_capture_group (clib_perfmon_ctx_t *ctx, char *fmt, ...)
+{
+ clib_perfmon_capture_group_t *cg;
+ va_list va;
+
+ cg = vec_end (ctx->capture_groups) - 1;
+
+ if (cg->name != 0)
+ vec_add2 (ctx->capture_groups, cg, 1);
+
+ va_start (va, fmt);
+ cg->name = va_format (0, fmt, &va);
+ va_end (va);
+ ASSERT (cg->name);
+}
+
+__clib_export void
+clib_perfmon_warmup (clib_perfmon_ctx_t *ctx)
+{
+ for (u64 i = 0; i < (u64) ctx->ref_clock; i++)
+ asm inline("" : : "r"(i * i) : "memory");
+}
+
+__clib_export u8 *
+format_perfmon_bundle (u8 *s, va_list *args)
+{
+ clib_perfmon_ctx_t *ctx = va_arg (*args, clib_perfmon_ctx_t *);
+ clib_perfmon_capture_t *c;
+ clib_perfmon_capture_group_t *cg = 0;
+ char **hdr = ctx->bundle->column_headers;
+ table_t _t = {}, *t = &_t;
+ u32 n_row = 0, col = 0;
+
+ table_add_header_row (t, 0);
+
+ for (char **h = ctx->bundle->column_headers; h[0]; h++)
+ n_row++;
+
+ vec_foreach (c, ctx->captures)
+ {
+ if (cg != ctx->capture_groups + c->group)
+ {
+ cg = ctx->capture_groups + c->group;
+ table_format_cell (t, col, -1, "%v", cg->name);
+ table_set_cell_align (t, col, -1, TTAA_LEFT);
+ table_set_cell_fg_color (t, col, -1, TTAC_BRIGHT_RED);
+
+ table_format_cell (t, col, 0, "Ops");
+ table_set_cell_fg_color (t, col, 0, TTAC_BRIGHT_YELLOW);
+
+ for (int i = 0; i < n_row; i++)
+ {
+ table_format_cell (t, col, i + 1, "%s", hdr[i]);
+ table_set_cell_fg_color (t, col, i + 1, TTAC_BRIGHT_YELLOW);
+ }
+ col++;
+ }
+ table_format_cell (t, col, -1, "%v", c->desc);
+ table_format_cell (t, col, 0, "%7u", c->n_ops);
+ for (int i = 0; i < n_row; i++)
+ table_format_cell (t, col, i + 1, "%U", ctx->bundle->format_fn, ctx, c,
+ i);
+ col++;
+ }
+
+ s = format (s, "%U", format_table, t);
+ table_free (t);
+ return s;
+}
diff --git a/src/vppinfra/perfmon/perfmon.h b/src/vppinfra/perfmon/perfmon.h
new file mode 100644
index 00000000000..0d09dc6fb23
--- /dev/null
+++ b/src/vppinfra/perfmon/perfmon.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2022 Cisco Systems, Inc.
+ */
+
+#ifndef included_perfmon_perfmon_h
+#define included_perfmon_perfmon_h
+
+#include <vppinfra/cpu.h>
+#ifdef __linux__
+#include <sys/ioctl.h>
+#include <linux/perf_event.h>
+#endif
+
+#define CLIB_PERFMON_MAX_EVENTS 7
+typedef struct
+{
+ char *name;
+ char *desc;
+ u64 config[CLIB_PERFMON_MAX_EVENTS];
+ u32 type;
+ u8 n_events;
+ format_function_t *format_fn;
+ char **column_headers;
+} clib_perfmon_bundle_t;
+
+typedef struct
+{
+ u64 time_enabled;
+ u64 time_running;
+ u64 data[CLIB_PERFMON_MAX_EVENTS];
+ u8 *desc;
+ u32 n_ops;
+ u32 group;
+} clib_perfmon_capture_t;
+
+typedef struct
+{
+ u8 *name;
+ u32 start;
+} clib_perfmon_capture_group_t;
+
+typedef struct
+{
+ int group_fd;
+ int *fds;
+ clib_perfmon_bundle_t *bundle;
+ u64 *data;
+ u8 debug : 1;
+ u32 n_captures;
+ clib_perfmon_capture_t *captures;
+ clib_perfmon_capture_group_t *capture_groups;
+ f64 ref_clock;
+} clib_perfmon_ctx_t;
+
+typedef struct clib_perfmon_bundle_reg
+{
+ clib_perfmon_bundle_t *bundle;
+ struct clib_perfmon_bundle_reg *next;
+} clib_perfmon_bundle_reg_t;
+
+typedef struct
+{
+ clib_perfmon_bundle_reg_t *bundle_regs;
+} clib_perfmon_main_t;
+
+extern clib_perfmon_main_t clib_perfmon_main;
+
+static_always_inline void
+clib_perfmon_ioctl (int fd, u32 req)
+{
+#ifdef __x86_64__
+ asm inline("syscall"
+ :
+ : "D"(fd), "S"(req), "a"(__NR_ioctl), "d"(PERF_IOC_FLAG_GROUP)
+ : "rcx", "r11" /* registers modified by kernel */);
+#else
+ ioctl (fd, req, PERF_IOC_FLAG_GROUP);
+#endif
+}
+
+clib_error_t *clib_perfmon_init_by_bundle_name (clib_perfmon_ctx_t *ctx,
+ char *fmt, ...);
+void clib_perfmon_free (clib_perfmon_ctx_t *ctx);
+void clib_perfmon_warmup (clib_perfmon_ctx_t *ctx);
+void clib_perfmon_clear (clib_perfmon_ctx_t *ctx);
+u64 *clib_perfmon_capture (clib_perfmon_ctx_t *ctx, u32 n_ops, char *fmt, ...);
+void clib_perfmon_capture_group (clib_perfmon_ctx_t *ctx, char *fmt, ...);
+format_function_t format_perfmon_bundle;
+
+static_always_inline void
+clib_perfmon_reset (clib_perfmon_ctx_t *ctx)
+{
+ clib_perfmon_ioctl (ctx->group_fd, PERF_EVENT_IOC_RESET);
+}
+static_always_inline void
+clib_perfmon_enable (clib_perfmon_ctx_t *ctx)
+{
+ clib_perfmon_ioctl (ctx->group_fd, PERF_EVENT_IOC_ENABLE);
+}
+static_always_inline void
+clib_perfmon_disable (clib_perfmon_ctx_t *ctx)
+{
+ clib_perfmon_ioctl (ctx->group_fd, PERF_EVENT_IOC_DISABLE);
+}
+
+#define CLIB_PERFMON_BUNDLE(x) \
+ static clib_perfmon_bundle_reg_t clib_perfmon_bundle_reg_##x; \
+ static clib_perfmon_bundle_t clib_perfmon_bundle_##x; \
+ static void __clib_constructor clib_perfmon_bundle_reg_fn_##x (void) \
+ { \
+ clib_perfmon_bundle_reg_##x.bundle = &clib_perfmon_bundle_##x; \
+ clib_perfmon_bundle_reg_##x.next = clib_perfmon_main.bundle_regs; \
+ clib_perfmon_main.bundle_regs = &clib_perfmon_bundle_reg_##x; \
+ } \
+ static clib_perfmon_bundle_t clib_perfmon_bundle_##x
+
+#endif