summaryrefslogtreecommitdiffstats
path: root/src/plugins/perfmon/intel
diff options
context:
space:
mode:
authorZachary Leaf <zachary.leaf@arm.com>2022-05-23 06:22:27 -0500
committerDamjan Marion <dmarion@0xa5.net>2022-07-12 15:29:23 +0000
commitc7d43a5eb19f2acab900274432cfd0e136d6cb44 (patch)
tree3ad3bd5191a1199430db40cec0b286ab531993be /src/plugins/perfmon/intel
parent20ac58e5c5ef59bc860270037aa7a3b0966a4ec2 (diff)
perfmon: make less arch dependent
In preparation for enabling perfmon on Arm platforms, move some Intel /arch specific logic into the /intel directory and update the CMake to split the common code from arch specific files. Since the dispatch_wrapper code is very different on Arm/Intel, each arch can provide their own implementation + conduct any additional arch specific config e.g. on Intel, all indexes from the mmap pages are cached. The new method intel_config_dispatch_wrapper conducts this config and returns a pointer to the dispatch wrapper to use. Similarly, is_bundle_supported() looks very different on Arm/Intel, so each implementation is to provide their own arch specific checks. Two new callbacks/function ptrs are added in PERFMON_REGISTER_SOURCE to support this - .bundle_support and .config_dispatch_wrapper. Type: refactor Signed-off-by: Zachary Leaf <zachary.leaf@arm.com> Change-Id: Idd121ddcfd1cc80a57c949cecd64eb2db0ac8be3
Diffstat (limited to 'src/plugins/perfmon/intel')
-rw-r--r--src/plugins/perfmon/intel/core.c50
-rw-r--r--src/plugins/perfmon/intel/core.h2
-rw-r--r--src/plugins/perfmon/intel/dispatch_wrapper.c160
-rw-r--r--src/plugins/perfmon/intel/dispatch_wrapper.h18
-rw-r--r--src/plugins/perfmon/intel/uncore.c1
5 files changed, 231 insertions, 0 deletions
diff --git a/src/plugins/perfmon/intel/core.c b/src/plugins/perfmon/intel/core.c
index 08a27b6a73f..5c4c336e2e8 100644
--- a/src/plugins/perfmon/intel/core.c
+++ b/src/plugins/perfmon/intel/core.c
@@ -16,6 +16,7 @@
#include <vnet/vnet.h>
#include <perfmon/perfmon.h>
#include <perfmon/intel/core.h>
+#include <perfmon/intel/dispatch_wrapper.h>
#include <linux/perf_event.h>
static perfmon_event_t events[] = {
@@ -95,6 +96,53 @@ intel_core_get_event_type (u32 event)
return PERFMON_EVENT_TYPE_GENERAL;
}
+static u8
+is_enough_counters (perfmon_bundle_t *b)
+{
+ u8 bl[PERFMON_EVENT_TYPE_MAX];
+ u8 cpu[PERFMON_EVENT_TYPE_MAX];
+
+ clib_memset (&bl, 0, sizeof (bl));
+ clib_memset (&cpu, 0, sizeof (cpu));
+
+ /* how many does this uarch support */
+ if (!clib_get_pmu_counter_count (&cpu[PERFMON_EVENT_TYPE_FIXED],
+ &cpu[PERFMON_EVENT_TYPE_GENERAL]))
+ return 0;
+
+ /* how many does the bundle require */
+ for (u16 i = 0; i < b->n_events; i++)
+ {
+ /* if source allows us to identify events, otherwise assume general */
+ if (b->src->get_event_type)
+ bl[b->src->get_event_type (b->events[i])]++;
+ else
+ bl[PERFMON_EVENT_TYPE_GENERAL]++;
+ }
+
+ /* consciously ignoring pseudo events here */
+ return cpu[PERFMON_EVENT_TYPE_GENERAL] >= bl[PERFMON_EVENT_TYPE_GENERAL] &&
+ cpu[PERFMON_EVENT_TYPE_FIXED] >= bl[PERFMON_EVENT_TYPE_FIXED];
+}
+
+u8
+intel_bundle_supported (perfmon_bundle_t *b)
+{
+ perfmon_cpu_supports_t *supports = b->cpu_supports;
+
+ if (!is_enough_counters (b))
+ return 0;
+
+ if (!b->cpu_supports)
+ return 1;
+
+ for (int i = 0; i < b->n_cpu_supports; ++i)
+ if (supports[i].cpu_supports ())
+ return 1;
+
+ return 0;
+}
+
PERFMON_REGISTER_SOURCE (intel_core) = {
.name = "intel-core",
.description = "intel arch core events",
@@ -103,4 +151,6 @@ PERFMON_REGISTER_SOURCE (intel_core) = {
.init_fn = intel_core_init,
.get_event_type = intel_core_get_event_type,
.format_config = format_intel_core_config,
+ .bundle_support = intel_bundle_supported,
+ .config_dispatch_wrapper = intel_config_dispatch_wrapper,
};
diff --git a/src/plugins/perfmon/intel/core.h b/src/plugins/perfmon/intel/core.h
index 98ab9e539dd..b2b0434acb3 100644
--- a/src/plugins/perfmon/intel/core.h
+++ b/src/plugins/perfmon/intel/core.h
@@ -16,6 +16,8 @@
#ifndef __perfmon_intel_h
#define __perfmon_intel_h
+u8 intel_bundle_supported (perfmon_bundle_t *b);
+
#define PERF_INTEL_CODE(event, umask, edge, any, inv, cmask) \
((event) | (umask) << 8 | (edge) << 18 | (any) << 21 | (inv) << 23 | \
(cmask) << 24)
diff --git a/src/plugins/perfmon/intel/dispatch_wrapper.c b/src/plugins/perfmon/intel/dispatch_wrapper.c
new file mode 100644
index 00000000000..d424b54b85f
--- /dev/null
+++ b/src/plugins/perfmon/intel/dispatch_wrapper.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "vppinfra/string.h"
+#include <vnet/vnet.h>
+
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vnet/plugin/plugin.h>
+#include <vpp/app/version.h>
+#include <linux/limits.h>
+#include <sys/ioctl.h>
+
+#include <perfmon/perfmon.h>
+
+vlib_node_function_t *perfmon_dispatch_wrappers[PERF_MAX_EVENTS + 1];
+
+static_always_inline void
+perfmon_read_pmcs (u64 *counters, u32 *indexes, u8 n_counters)
+{
+ for (int i = 0; i < n_counters; i++)
+ counters[i] = _rdpmc (indexes[i] - 1);
+}
+
+static_always_inline uword
+perfmon_dispatch_wrapper_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame, u8 n_events)
+{
+ perfmon_main_t *pm = &perfmon_main;
+ perfmon_thread_runtime_t *rt =
+ vec_elt_at_index (pm->thread_runtimes, vm->thread_index);
+ perfmon_node_stats_t *s =
+ vec_elt_at_index (rt->node_stats, node->node_index);
+
+ struct
+ {
+ u64 t[2][PERF_MAX_EVENTS];
+ } samples;
+ uword rv;
+
+ clib_prefetch_load (s);
+
+ perfmon_read_pmcs (&samples.t[0][0], &rt->indexes[0], n_events);
+ rv = node->function (vm, node, frame);
+ perfmon_read_pmcs (&samples.t[1][0], &rt->indexes[0], n_events);
+
+ if (rv == 0)
+ return rv;
+
+ s->n_calls += 1;
+ s->n_packets += rv;
+
+ for (int i = 0; i < n_events; i++)
+ {
+ if (!(rt->preserve_samples & 1 << i))
+ {
+ s->value[i] += samples.t[1][i] - samples.t[0][i];
+ }
+ else
+ {
+ s->t[0].value[i] = samples.t[0][i];
+ s->t[1].value[i] = samples.t[1][i];
+ }
+ }
+
+ return rv;
+}
+
+static_always_inline u32
+perfmon_mmap_read_index (const struct perf_event_mmap_page *mmap_page)
+{
+ u32 idx;
+ u32 seq;
+
+ /* See documentation in /usr/include/linux/perf_event.h, for more details
+ * but the 2 main important things are:
+ * 1) if seq != mmap_page->lock, it means the kernel is currently updating
+ * the user page and we need to read it again
+ * 2) if idx == 0, it means the perf event is currently turned off and we
+ * just need to read the kernel-updated 'offset', otherwise we must also
+ * add the current hw value (hence rdmpc) */
+ do
+ {
+ seq = mmap_page->lock;
+ CLIB_COMPILER_BARRIER ();
+
+ idx = mmap_page->index;
+
+ CLIB_COMPILER_BARRIER ();
+ }
+ while (mmap_page->lock != seq);
+
+ return idx;
+}
+
+static_always_inline clib_error_t *
+read_mmap_indexes (perfmon_bundle_t *b)
+{
+ perfmon_main_t *pm = &perfmon_main;
+ for (int i = 0; i < vec_len (pm->thread_runtimes); i++)
+ {
+ perfmon_thread_runtime_t *tr;
+ tr = vec_elt_at_index (pm->thread_runtimes, i);
+
+ for (int j = 0; j < b->n_events; j++)
+ {
+ tr->indexes[j] = perfmon_mmap_read_index (tr->mmap_pages[j]);
+
+ /* if a zero index is returned generate error */
+ if (!tr->indexes[j])
+ {
+ return clib_error_return (0, "invalid rdpmc index");
+ }
+ }
+ }
+ return 0;
+}
+
+clib_error_t *
+intel_config_dispatch_wrapper (perfmon_bundle_t *b,
+ vlib_node_function_t **dispatch_wrapper)
+{
+ clib_error_t *err = 0;
+ if ((err = read_mmap_indexes (b)) != 0)
+ return err;
+
+ (*dispatch_wrapper) = perfmon_dispatch_wrappers[b->n_events];
+ return 0;
+}
+
+#define foreach_n_events \
+ _ (1) _ (2) _ (3) _ (4) _ (5) _ (6) _ (7) _ (8) _ (9) _ (10) _ (11) _ (12)
+
+#define _(x) \
+ static uword perfmon_dispatch_wrapper##x ( \
+ vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame) \
+ { \
+ return perfmon_dispatch_wrapper_inline (vm, node, frame, x); \
+ }
+
+foreach_n_events
+#undef _
+
+ vlib_node_function_t *perfmon_dispatch_wrappers[PERF_MAX_EVENTS + 1] = {
+#define _(x) [x] = &perfmon_dispatch_wrapper##x,
+ foreach_n_events
+#undef _
+ };
diff --git a/src/plugins/perfmon/intel/dispatch_wrapper.h b/src/plugins/perfmon/intel/dispatch_wrapper.h
new file mode 100644
index 00000000000..bcf4885d54d
--- /dev/null
+++ b/src/plugins/perfmon/intel/dispatch_wrapper.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2022 Arm and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+clib_error_t *
+intel_config_dispatch_wrapper (perfmon_bundle_t *b,
+ vlib_node_function_t **dispatch_wrapper);
diff --git a/src/plugins/perfmon/intel/uncore.c b/src/plugins/perfmon/intel/uncore.c
index 01579964bea..ac5580a3e62 100644
--- a/src/plugins/perfmon/intel/uncore.c
+++ b/src/plugins/perfmon/intel/uncore.c
@@ -220,4 +220,5 @@ PERFMON_REGISTER_SOURCE (intel_uncore) = {
.n_events = INTEL_UNCORE_N_EVENTS,
.init_fn = intel_uncore_init,
.format_config = format_intel_core_config,
+ .bundle_support = intel_bundle_supported,
};