aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins
diff options
context:
space:
mode:
authorTom Seidenberg <tseidenb@cisco.com>2020-07-10 15:49:03 +0000
committerDave Barach <openvpp@barachs.net>2020-07-16 21:44:42 +0000
commit6c81f5a2493ff65b4dacfef45db8a1ee459a738f (patch)
tree24343a92e18599be4e4b4aa360ab7b2c13cdb878 /src/plugins
parentbab02f0b184b63c4159ded030cf34044be10da40 (diff)
misc: add callback hooks and refactor pmc
Callbacks for monitoring and performance measurement: - Add new callback list type, with context - Add callbacks for API, CLI, and barrier sync - Modify node dispatch callback to pass plugin-specific context - Modify perfmon plugin to keep PMC samples local to the plugin - Include process nodes in dispatch callback - Pass dispatch function return value to callback Type: refactor Signed-off-by: Tom Seidenberg <tseidenb@cisco.com> Change-Id: I28b06c58490611e08d76ff5b01b2347ba2109b22
Diffstat (limited to 'src/plugins')
-rw-r--r--src/plugins/mdata/mdata.c22
-rw-r--r--src/plugins/perfmon/CMakeLists.txt18
-rw-r--r--src/plugins/perfmon/perfmon.c33
-rw-r--r--src/plugins/perfmon/perfmon.h38
-rw-r--r--src/plugins/perfmon/perfmon_intel.h4
-rw-r--r--src/plugins/perfmon/perfmon_intel_skl.c59
-rw-r--r--src/plugins/perfmon/perfmon_intel_skx.c59
-rw-r--r--src/plugins/perfmon/perfmon_periodic.c219
-rw-r--r--src/plugins/perfmon/perfmon_plugin.c38
9 files changed, 326 insertions, 164 deletions
diff --git a/src/plugins/mdata/mdata.c b/src/plugins/mdata/mdata.c
index fc5bbfbb571..f74564eb33c 100644
--- a/src/plugins/mdata/mdata.c
+++ b/src/plugins/mdata/mdata.c
@@ -21,6 +21,7 @@
#include <vlibapi/api.h>
#include <vlibmemory/api.h>
+#include <vppinfra/callback_data.h>
#include <vpp/app/version.h>
#include <stdbool.h>
@@ -42,9 +43,8 @@ static mdata_t mdata_none;
before_or_after: 0 => before, 1=> after
*/
static void
-mdata_trace_callback (vlib_main_t * vm, u64 * c0, u64 * c1,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame, int before_or_after)
+mdata_trace_callback (vlib_node_runtime_perf_callback_data_t * data,
+ vlib_node_runtime_perf_callback_args_t * args)
{
int i;
mdata_main_t *mm = &mdata_main;
@@ -53,6 +53,12 @@ mdata_trace_callback (vlib_main_t * vm, u64 * c0, u64 * c1,
u32 n_left_from;
mdata_t *before, *modifies;
u8 *after;
+ vlib_main_t *vm = args->vm;
+ vlib_frame_t *frame = args->frame;
+ vlib_node_runtime_t *node = args->node;
+
+ if (PREDICT_FALSE (args->call_type == VLIB_NODE_RUNTIME_PERF_RESET))
+ return;
/* Input nodes don't have frames, etc. */
if (frame == 0)
@@ -68,7 +74,7 @@ mdata_trace_callback (vlib_main_t * vm, u64 * c0, u64 * c1,
vlib_get_buffers (vm, from, bufs, n_left_from);
b = bufs;
- if (before_or_after == 1 /* after */ )
+ if (args->call_type == VLIB_NODE_RUNTIME_PERF_AFTER)
goto after_pass;
/* Resize the per-thread "before" vector to cover the current frame */
@@ -152,11 +158,9 @@ mdata_enable_disable (mdata_main_t * mmp, int enable_disable)
if (vlib_mains[i] == 0)
continue;
- clib_callback_enable_disable
- (vlib_mains[i]->vlib_node_runtime_perf_counter_cbs,
- vlib_mains[i]->vlib_node_runtime_perf_counter_cb_tmp,
- vlib_mains[i]->worker_thread_main_loop_callback_lock,
- (void *) mdata_trace_callback, enable_disable);
+ clib_callback_data_enable_disable
+ (&vlib_mains[i]->vlib_node_runtime_perf_callbacks,
+ mdata_trace_callback, enable_disable);
}
return rv;
diff --git a/src/plugins/perfmon/CMakeLists.txt b/src/plugins/perfmon/CMakeLists.txt
index a3f045f75f3..69e225b4a3f 100644
--- a/src/plugins/perfmon/CMakeLists.txt
+++ b/src/plugins/perfmon/CMakeLists.txt
@@ -11,7 +11,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-add_vpp_plugin(perfmon
+add_vpp_library (perfcore
SOURCES
perfmon.c
perfmon_periodic.c
@@ -32,6 +32,22 @@ add_vpp_plugin(perfmon
perfmon_intel_wsm_ep_dp.c
perfmon_intel_wsm_ep_sp.c
perfmon_intel_wsm_ex.c
+
+ INSTALL_HEADERS
+ perfmon.h
+
+ LINK_LIBRARIES
+ vppinfra
+ vlib
+ vnet
+)
+
+add_vpp_plugin(perfmon
+ SOURCES
+ perfmon_plugin.c
+
+ LINK_LIBRARIES
+ perfcore
)
option(VPP_BUILD_MAPFILE_TOOL "Build perfmon mapfile utility." OFF)
diff --git a/src/plugins/perfmon/perfmon.c b/src/plugins/perfmon/perfmon.c
index 7e276c30810..525a864b584 100644
--- a/src/plugins/perfmon/perfmon.c
+++ b/src/plugins/perfmon/perfmon.c
@@ -16,7 +16,6 @@
*/
#include <vnet/vnet.h>
-#include <vnet/plugin/plugin.h>
#include <perfmon/perfmon.h>
#include <perfmon/perfmon_intel.h>
@@ -98,6 +97,7 @@ perfmon_init (vlib_main_t * vm)
u32 cpuid;
u8 model, stepping;
perfmon_intel_pmc_event_t *ev;
+ int i;
pm->vlib_main = vm;
pm->vnet_main = vnet_get_main ();
@@ -109,9 +109,17 @@ perfmon_init (vlib_main_t * vm)
/* Default data collection interval */
pm->timeout_interval = 2.0; /* seconds */
- vec_validate (pm->pm_fds, 1);
- vec_validate (pm->perf_event_pages, 1);
- vec_validate (pm->rdpmc_indices, 1);
+
+ vec_validate (pm->threads, vlib_get_thread_main ()->n_vlib_mains - 1);
+ for (i = 0; i < vec_len (pm->threads); i++)
+ {
+ perfmon_thread_t *pt = clib_mem_alloc_aligned
+ (sizeof (perfmon_thread_t), CLIB_CACHE_LINE_BYTES);
+ clib_memset (pt, 0, sizeof (*pt));
+ pm->threads[i] = pt;
+ pt->pm_fds[0] = -1;
+ pt->pm_fds[1] = -1;
+ }
pm->page_size = getpagesize ();
pm->perfmon_table = 0;
@@ -147,18 +155,7 @@ perfmon_init (vlib_main_t * vm)
VLIB_INIT_FUNCTION (perfmon_init);
-/* *INDENT-OFF* */
-VLIB_PLUGIN_REGISTER () =
-{
- .version = VPP_BUILD_VER,
- .description = "Performance Monitor",
-#if !defined(__x86_64__)
- .default_disabled = 1,
-#endif
-};
-/* *INDENT-ON* */
-
-static uword
+uword
unformat_processor_event (unformat_input_t * input, va_list * args)
{
perfmon_main_t *pm = va_arg (*args, perfmon_main_t *);
@@ -185,6 +182,10 @@ unformat_processor_event (unformat_input_t * input, va_list * args)
pe_config |= pm->perfmon_table[idx].event_code[0];
pe_config |= pm->perfmon_table[idx].umask << 8;
+ pe_config |= pm->perfmon_table[idx].edge << 18;
+ pe_config |= pm->perfmon_table[idx].anyt << 21;
+ pe_config |= pm->perfmon_table[idx].inv << 23;
+ pe_config |= pm->perfmon_table[idx].cmask << 24;
ep->name = (char *) hp->key;
ep->pe_type = PERF_TYPE_RAW;
diff --git a/src/plugins/perfmon/perfmon.h b/src/plugins/perfmon/perfmon.h
index 000e3c2849c..c8782023597 100644
--- a/src/plugins/perfmon/perfmon.h
+++ b/src/plugins/perfmon/perfmon.h
@@ -78,6 +78,32 @@ typedef struct
typedef struct
{
+ u64 ticks[2];
+ u64 vectors;
+} perfmon_counters_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
+ /* Current counters */
+ u64 c[2];
+
+ /* Current perf_event file descriptors, per thread */
+ int pm_fds[2];
+
+ /* mmap base of mapped struct perf_event_mmap_page */
+ u8 *perf_event_pages[2];
+
+ u32 rdpmc_indices[2];
+
+ /* vector of counters by node index */
+ perfmon_counters_t *counters;
+
+} perfmon_thread_t;
+
+typedef struct
+{
/* API message ID base */
u16 msg_id_base;
@@ -112,17 +138,15 @@ typedef struct
/* Current event (index) being collected */
u32 current_event;
int n_active;
- u32 **rdpmc_indices;
- /* mmap base / size of (mapped) struct perf_event_mmap_page */
- u8 ***perf_event_pages;
+ /* mmap size of (mapped) struct perf_event_mmap_page */
u32 page_size;
- /* Current perf_event file descriptors, per thread */
- int **pm_fds;
-
/* thread bitmap */
uword *thread_bitmap;
+ /* per-thread data */
+ perfmon_thread_t **threads;
+
/* Logging */
vlib_log_class_t log_class;
@@ -137,6 +161,8 @@ extern perfmon_main_t perfmon_main;
extern vlib_node_registration_t perfmon_periodic_node;
uword *perfmon_parse_table (perfmon_main_t * pm, char *path, char *filename);
+uword unformat_processor_event (unformat_input_t * input, va_list * args);
+
/* Periodic function events */
#define PERFMON_START 1
diff --git a/src/plugins/perfmon/perfmon_intel.h b/src/plugins/perfmon/perfmon_intel.h
index 6bb849244d5..475309124ea 100644
--- a/src/plugins/perfmon/perfmon_intel.h
+++ b/src/plugins/perfmon/perfmon_intel.h
@@ -25,6 +25,10 @@ typedef struct
{
u8 event_code[2];
u8 umask;
+ u8 cmask;
+ u8 inv;
+ u8 anyt;
+ u8 edge;
char *event_name;
} perfmon_intel_pmc_event_t;
diff --git a/src/plugins/perfmon/perfmon_intel_skl.c b/src/plugins/perfmon/perfmon_intel_skl.c
index 726dbb4dd8c..b1c03140651 100644
--- a/src/plugins/perfmon/perfmon_intel_skl.c
+++ b/src/plugins/perfmon/perfmon_intel_skl.c
@@ -88,6 +88,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x0D},
.umask = 0x01,
+ .anyt = 1,
.event_name = "int_misc.recovery_cycles_any",
},
{
@@ -103,6 +104,8 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x0E},
.umask = 0x01,
+ .cmask = 1,
+ .inv = 1,
.event_name = "uops_issued.stall_cycles",
},
{
@@ -233,6 +236,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x3C},
.umask = 0x00,
+ .anyt = 1,
.event_name = "cpu_clk_unhalted.thread_p_any",
},
{
@@ -248,6 +252,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x3C},
.umask = 0x01,
+ .anyt = 1,
.event_name = "cpu_clk_thread_unhalted.ref_xclk_any",
},
{
@@ -268,6 +273,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x48},
.umask = 0x01,
+ .cmask = 1,
.event_name = "l1d_pend_miss.pending",
},
{
@@ -308,6 +314,12 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x49},
.umask = 0x10,
+ .cmask = 1,
+ .event_name = "dtlb_store_misses.walk_active",
+ },
+ {
+ .event_code = {0x49},
+ .umask = 0x10,
.event_name = "dtlb_store_misses.walk_pending",
},
{
@@ -403,6 +415,8 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x5E},
.umask = 0x01,
+ .cmask = 1,
+ .inv = 1,
.event_name = "rs_events.empty_end",
},
{
@@ -413,6 +427,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x60},
.umask = 0x01,
+ .cmask = 1,
.event_name = "offcore_requests_outstanding.cycles_with_demand_data_rd",
},
{
@@ -423,6 +438,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x60},
.umask = 0x02,
+ .cmask = 1,
.event_name = "offcore_requests_outstanding.cycles_with_demand_code_rd",
},
{
@@ -433,6 +449,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x60},
.umask = 0x04,
+ .cmask = 1,
.event_name = "offcore_requests_outstanding.cycles_with_demand_rfo",
},
{
@@ -443,6 +460,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x60},
.umask = 0x08,
+ .cmask = 1,
.event_name = "offcore_requests_outstanding.cycles_with_data_rd",
},
{
@@ -458,6 +476,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x79},
.umask = 0x04,
+ .cmask = 1,
.event_name = "idq.mite_cycles",
},
{
@@ -468,6 +487,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x79},
.umask = 0x08,
+ .cmask = 1,
.event_name = "idq.dsb_cycles",
},
{
@@ -478,11 +498,13 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x79},
.umask = 0x18,
+ .cmask = 4,
.event_name = "idq.all_dsb_cycles_4_uops",
},
{
.event_code = {0x79},
.umask = 0x18,
+ .cmask = 1,
.event_name = "idq.all_dsb_cycles_any_uops",
},
{
@@ -503,11 +525,13 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x79},
.umask = 0x30,
+ .cmask = 1,
.event_name = "idq.ms_cycles",
},
{
.event_code = {0x79},
.umask = 0x30,
+ .edge = 1,
.event_name = "idq.ms_switches",
},
{
@@ -588,26 +612,32 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x9C},
.umask = 0x01,
+ .cmask = 4,
.event_name = "idq_uops_not_delivered.cycles_0_uops_deliv.core",
},
{
.event_code = {0x9C},
.umask = 0x01,
+ .cmask = 3,
.event_name = "idq_uops_not_delivered.cycles_le_1_uop_deliv.core",
},
{
.event_code = {0x9C},
.umask = 0x01,
+ .cmask = 4,
.event_name = "idq_uops_not_delivered.cycles_le_2_uop_deliv.core",
},
{
.event_code = {0x9C},
.umask = 0x01,
+ .cmask = 1,
.event_name = "idq_uops_not_delivered.cycles_le_3_uop_deliv.core",
},
{
.event_code = {0x9C},
.umask = 0x01,
+ .cmask = 1,
+ .inv = 1,
.event_name = "idq_uops_not_delivered.cycles_fe_was_ok",
},
{
@@ -663,36 +693,43 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0xA3},
.umask = 0x01,
+ .cmask = 1,
.event_name = "cycle_activity.cycles_l2_miss",
},
{
.event_code = {0xA3},
.umask = 0x04,
+ .cmask = 4,
.event_name = "cycle_activity.stalls_total",
},
{
.event_code = {0xA3},
.umask = 0x05,
+ .cmask = 5,
.event_name = "cycle_activity.stalls_l2_miss",
},
{
.event_code = {0xA3},
.umask = 0x08,
+ .cmask = 8,
.event_name = "cycle_activity.cycles_l1d_miss",
},
{
.event_code = {0xA3},
.umask = 0x0C,
+ .cmask = 12,
.event_name = "cycle_activity.stalls_l1d_miss",
},
{
.event_code = {0xA3},
.umask = 0x10,
+ .cmask = 16,
.event_name = "cycle_activity.cycles_mem_any",
},
{
.event_code = {0xA3},
.umask = 0x14,
+ .cmask = 20,
.event_name = "cycle_activity.stalls_mem_any",
},
{
@@ -733,11 +770,13 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0xA8},
.umask = 0x01,
+ .cmask = 1,
.event_name = "lsd.cycles_active",
},
{
.event_code = {0xA8},
.umask = 0x01,
+ .cmask = 4,
.event_name = "lsd.cycles_4_uops",
},
{
@@ -788,26 +827,32 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0xB1},
.umask = 0x01,
+ .cmask = 1,
+ .inv = 1,
.event_name = "uops_executed.stall_cycles",
},
{
.event_code = {0xB1},
.umask = 0x01,
+ .cmask = 1,
.event_name = "uops_executed.cycles_ge_1_uop_exec",
},
{
.event_code = {0xB1},
.umask = 0x01,
+ .cmask = 2,
.event_name = "uops_executed.cycles_ge_2_uops_exec",
},
{
.event_code = {0xB1},
.umask = 0x01,
+ .cmask = 3,
.event_name = "uops_executed.cycles_ge_3_uops_exec",
},
{
.event_code = {0xB1},
.umask = 0x01,
+ .cmask = 4,
.event_name = "uops_executed.cycles_ge_4_uops_exec",
},
{
@@ -818,26 +863,32 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0xB1},
.umask = 0x02,
+ .cmask = 1,
.event_name = "uops_executed.core_cycles_ge_1",
},
{
.event_code = {0xB1},
.umask = 0x02,
+ .cmask = 2,
.event_name = "uops_executed.core_cycles_ge_2",
},
{
.event_code = {0xB1},
.umask = 0x02,
+ .cmask = 3,
.event_name = "uops_executed.core_cycles_ge_3",
},
{
.event_code = {0xB1},
.umask = 0x02,
+ .cmask = 4,
.event_name = "uops_executed.core_cycles_ge_4",
},
{
.event_code = {0xB1},
.umask = 0x02,
+ .cmask = 1,
+ .inv = 1,
.event_name = "uops_executed.core_cycles_none",
},
{
@@ -873,6 +924,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0xC0},
.umask = 0x01,
+ .cmask = 10,
.event_name = "inst_retired.total_cycles_ps",
},
{
@@ -883,16 +935,22 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0xC2},
.umask = 0x02,
+ .cmask = 1,
+ .inv = 1,
.event_name = "uops_retired.stall_cycles",
},
{
.event_code = {0xC2},
.umask = 0x02,
+ .cmask = 10,
+ .inv = 1,
.event_name = "uops_retired.total_cycles",
},
{
.event_code = {0xC3},
.umask = 0x01,
+ .cmask = 1,
+ .edge = 1,
.event_name = "machine_clears.count",
},
{
@@ -1083,6 +1141,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0xCA},
.umask = 0x1E,
+ .cmask = 1,
.event_name = "fp_assist.any",
},
{
diff --git a/src/plugins/perfmon/perfmon_intel_skx.c b/src/plugins/perfmon/perfmon_intel_skx.c
index 399174477ac..9de202d22a3 100644
--- a/src/plugins/perfmon/perfmon_intel_skx.c
+++ b/src/plugins/perfmon/perfmon_intel_skx.c
@@ -88,6 +88,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x0D},
.umask = 0x01,
+ .anyt = 1,
.event_name = "int_misc.recovery_cycles_any",
},
{
@@ -98,6 +99,8 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x0E},
.umask = 0x01,
+ .cmask = 1,
+ .inv = 1,
.event_name = "uops_issued.stall_cycles",
},
{
@@ -253,6 +256,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x3C},
.umask = 0x00,
+ .anyt = 1,
.event_name = "cpu_clk_unhalted.thread_p_any",
},
{
@@ -268,6 +272,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x3C},
.umask = 0x01,
+ .anyt = 1,
.event_name = "cpu_clk_thread_unhalted.ref_xclk_any",
},
{
@@ -288,6 +293,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x48},
.umask = 0x01,
+ .cmask = 1,
.event_name = "l1d_pend_miss.pending_cycles",
},
{
@@ -328,6 +334,12 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x49},
.umask = 0x10,
+ .cmask = 1,
+ .event_name = "dtlb_store_misses.walk_active",
+ },
+ {
+ .event_code = {0x49},
+ .umask = 0x10,
.event_name = "dtlb_store_misses.walk_pending",
},
{
@@ -418,6 +430,8 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x5E},
.umask = 0x01,
+ .cmask = 1,
+ .inv = 1,
.event_name = "rs_events.empty_end",
},
{
@@ -428,6 +442,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x60},
.umask = 0x01,
+ .cmask = 1,
.event_name = "offcore_requests_outstanding.cycles_with_demand_data_rd",
},
{
@@ -443,6 +458,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x60},
.umask = 0x02,
+ .cmask = 1,
.event_name = "offcore_requests_outstanding.cycles_with_demand_code_rd",
},
{
@@ -453,11 +469,13 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x60},
.umask = 0x04,
+ .cmask = 1,
.event_name = "offcore_requests_outstanding.cycles_with_demand_rfo",
},
{
.event_code = {0x60},
.umask = 0x08,
+ .cmask = 1,
.event_name = "offcore_requests_outstanding.cycles_with_data_rd",
},
{
@@ -473,6 +491,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x79},
.umask = 0x04,
+ .cmask = 1,
.event_name = "idq.mite_cycles",
},
{
@@ -483,6 +502,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x79},
.umask = 0x08,
+ .cmask = 1,
.event_name = "idq.dsb_cycles",
},
{
@@ -498,11 +518,13 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x79},
.umask = 0x18,
+ .cmask = 1,
.event_name = "idq.all_dsb_cycles_any_uops",
},
{
.event_code = {0x79},
.umask = 0x18,
+ .cmask = 4,
.event_name = "idq.all_dsb_cycles_4_uops",
},
{
@@ -523,6 +545,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x79},
.umask = 0x30,
+ .cmask = 1,
.event_name = "idq.ms_cycles",
},
{
@@ -533,6 +556,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x79},
.umask = 0x30,
+ .edge = 1,
.event_name = "idq.ms_switches",
},
{
@@ -603,26 +627,32 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0x9C},
.umask = 0x01,
+ .cmask = 1,
+ .inv = 1,
.event_name = "idq_uops_not_delivered.cycles_fe_was_ok",
},
{
.event_code = {0x9C},
.umask = 0x01,
+ .cmask = 1,
.event_name = "idq_uops_not_delivered.cycles_le_3_uop_deliv.core",
},
{
.event_code = {0x9C},
.umask = 0x01,
+ .cmask = 2,
.event_name = "idq_uops_not_delivered.cycles_le_2_uop_deliv.core",
},
{
.event_code = {0x9C},
.umask = 0x01,
+ .cmask = 3,
.event_name = "idq_uops_not_delivered.cycles_le_1_uop_deliv.core",
},
{
.event_code = {0x9C},
.umask = 0x01,
+ .cmask = 4,
.event_name = "idq_uops_not_delivered.cycles_0_uops_deliv.core",
},
{
@@ -683,36 +713,43 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0xA3},
.umask = 0x01,
+ .cmask = 1,
.event_name = "cycle_activity.cycles_l2_miss",
},
{
.event_code = {0xA3},
.umask = 0x04,
+ .cmask = 4,
.event_name = "cycle_activity.stalls_total",
},
{
.event_code = {0xA3},
.umask = 0x05,
+ .cmask = 5,
.event_name = "cycle_activity.stalls_l2_miss",
},
{
.event_code = {0xA3},
.umask = 0x08,
+ .cmask = 8,
.event_name = "cycle_activity.cycles_l1d_miss",
},
{
.event_code = {0xA3},
.umask = 0x0C,
+ .cmask = 12,
.event_name = "cycle_activity.stalls_l1d_miss",
},
{
.event_code = {0xA3},
.umask = 0x10,
+ .cmask = 16,
.event_name = "cycle_activity.cycles_mem_any",
},
{
.event_code = {0xA3},
.umask = 0x14,
+ .cmask = 20,
.event_name = "cycle_activity.stalls_mem_any",
},
{
@@ -753,11 +790,13 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0xA8},
.umask = 0x01,
+ .cmask = 4,
.event_name = "lsd.cycles_4_uops",
},
{
.event_code = {0xA8},
.umask = 0x01,
+ .cmask = 1,
.event_name = "lsd.cycles_active",
},
{
@@ -803,26 +842,32 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0xB1},
.umask = 0x01,
+ .cmask = 4,
.event_name = "uops_executed.cycles_ge_4_uops_exec",
},
{
.event_code = {0xB1},
.umask = 0x01,
+ .cmask = 3,
.event_name = "uops_executed.cycles_ge_3_uops_exec",
},
{
.event_code = {0xB1},
.umask = 0x01,
+ .cmask = 2,
.event_name = "uops_executed.cycles_ge_2_uops_exec",
},
{
.event_code = {0xB1},
.umask = 0x01,
+ .cmask = 1,
.event_name = "uops_executed.cycles_ge_1_uop_exec",
},
{
.event_code = {0xB1},
.umask = 0x01,
+ .cmask = 1,
+ .inv = 1,
.event_name = "uops_executed.stall_cycles",
},
{
@@ -838,26 +883,32 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0xB1},
.umask = 0x02,
+ .cmask = 1,
+ .inv = 1,
.event_name = "uops_executed.core_cycles_none",
},
{
.event_code = {0xB1},
.umask = 0x02,
+ .cmask = 4,
.event_name = "uops_executed.core_cycles_ge_4",
},
{
.event_code = {0xB1},
.umask = 0x02,
+ .cmask = 3,
.event_name = "uops_executed.core_cycles_ge_3",
},
{
.event_code = {0xB1},
.umask = 0x02,
+ .cmask = 2,
.event_name = "uops_executed.core_cycles_ge_2",
},
{
.event_code = {0xB1},
.umask = 0x02,
+ .cmask = 1,
.event_name = "uops_executed.core_cycles_ge_1",
},
{
@@ -898,16 +949,21 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0xC0},
.umask = 0x01,
+ .cmask = 10,
.event_name = "inst_retired.total_cycles_ps",
},
{
.event_code = {0xC2},
.umask = 0x02,
+ .cmask = 10,
+ .inv = 1,
.event_name = "uops_retired.total_cycles",
},
{
.event_code = {0xC2},
.umask = 0x02,
+ .cmask = 1,
+ .inv = 1,
.event_name = "uops_retired.stall_cycles",
},
{
@@ -918,6 +974,8 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0xC3},
.umask = 0x01,
+ .cmask = 1,
+ .edge = 1,
.event_name = "machine_clears.count",
},
{
@@ -1118,6 +1176,7 @@ static perfmon_intel_pmc_event_t event_table[] = {
{
.event_code = {0xCA},
.umask = 0x1E,
+ .cmask = 1,
.event_name = "fp_assist.any",
},
{
diff --git a/src/plugins/perfmon/perfmon_periodic.c b/src/plugins/perfmon/perfmon_periodic.c
index 37d669b8d13..de31221f6f4 100644
--- a/src/plugins/perfmon/perfmon_periodic.c
+++ b/src/plugins/perfmon/perfmon_periodic.c
@@ -33,52 +33,65 @@ perf_event_open (struct perf_event_attr *hw_event, pid_t pid, int cpu,
}
static void
-read_current_perf_counters (vlib_main_t * vm, u64 * c0, u64 * c1,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame, int before_or_after)
+read_current_perf_counters (vlib_node_runtime_perf_callback_data_t * data,
+ vlib_node_runtime_perf_callback_args_t * args)
{
int i;
- u64 *cc;
perfmon_main_t *pm = &perfmon_main;
- uword my_thread_index = vm->thread_index;
+ perfmon_thread_t *pt = data->u[0].v;
+ u64 c[2] = { 0, 0 };
+ u64 *cc;
- *c0 = *c1 = 0;
+ if (PREDICT_FALSE (args->call_type == VLIB_NODE_RUNTIME_PERF_RESET))
+ return;
+
+ if (args->call_type == VLIB_NODE_RUNTIME_PERF_BEFORE)
+ cc = pt->c;
+ else
+ cc = c;
for (i = 0; i < pm->n_active; i++)
{
- cc = (i == 0) ? c0 : c1;
- if (pm->rdpmc_indices[i][my_thread_index] != ~0)
- *cc = clib_rdpmc ((int) pm->rdpmc_indices[i][my_thread_index]);
+ if (pt->rdpmc_indices[i] != ~0)
+ cc[i] = clib_rdpmc ((int) pt->rdpmc_indices[i]);
else
{
u64 sw_value;
int read_result;
- if ((read_result = read (pm->pm_fds[i][my_thread_index], &sw_value,
- sizeof (sw_value)) != sizeof (sw_value)))
+ if ((read_result = read (pt->pm_fds[i], &sw_value,
+ sizeof (sw_value))) != sizeof (sw_value))
{
clib_unix_warning
("counter read returned %d, expected %d",
read_result, sizeof (sw_value));
- clib_callback_enable_disable
- (vm->vlib_node_runtime_perf_counter_cbs,
- vm->vlib_node_runtime_perf_counter_cb_tmp,
- vm->worker_thread_main_loop_callback_lock,
+ clib_callback_data_enable_disable
+ (&args->vm->vlib_node_runtime_perf_callbacks,
read_current_perf_counters, 0 /* enable */ );
return;
}
- *cc = sw_value;
+ cc[i] = sw_value;
}
}
+
+ if (args->call_type == VLIB_NODE_RUNTIME_PERF_AFTER)
+ {
+ u32 node_index = args->node->node_index;
+ vec_validate (pt->counters, node_index);
+ pt->counters[node_index].ticks[0] += c[0] - pt->c[0];
+ pt->counters[node_index].ticks[1] += c[1] - pt->c[1];
+ pt->counters[node_index].vectors += args->packets;
+ }
}
static void
clear_counters (perfmon_main_t * pm)
{
- int i, j;
+ int j;
vlib_main_t *vm = pm->vlib_main;
vlib_main_t *stat_vm;
- vlib_node_main_t *nm;
- vlib_node_t *n;
+ perfmon_thread_t *pt;
+ u32 len;
+
vlib_worker_thread_barrier_sync (vm);
@@ -88,26 +101,12 @@ clear_counters (perfmon_main_t * pm)
if (stat_vm == 0)
continue;
- nm = &stat_vm->node_main;
-
- /* Clear the node runtime perfmon counters */
- for (i = 0; i < vec_len (nm->nodes); i++)
- {
- n = nm->nodes[i];
- vlib_node_sync_stats (stat_vm, n);
- }
+ pt = pm->threads[j];
+ len = vec_len (pt->counters);
+ if (!len)
+ continue;
- /* And clear the node perfmon counters */
- for (i = 0; i < vec_len (nm->nodes); i++)
- {
- n = nm->nodes[i];
- n->stats_total.perf_counter0_ticks = 0;
- n->stats_total.perf_counter1_ticks = 0;
- n->stats_total.perf_counter_vectors = 0;
- n->stats_last_clear.perf_counter0_ticks = 0;
- n->stats_last_clear.perf_counter1_ticks = 0;
- n->stats_last_clear.perf_counter_vectors = 0;
- }
+ clib_memset (pt->counters, 0, len * sizeof (pt->counters[0]));
}
vlib_worker_thread_barrier_release (vm);
}
@@ -121,19 +120,20 @@ enable_current_events (perfmon_main_t * pm)
perfmon_event_config_t *c;
vlib_main_t *vm = vlib_get_main ();
u32 my_thread_index = vm->thread_index;
+ perfmon_thread_t *pt = pm->threads[my_thread_index];
u32 index;
int i, limit = 1;
int cpu;
+ vlib_node_runtime_perf_callback_data_t cbdata = { 0 };
+ cbdata.fp = read_current_perf_counters;
+ cbdata.u[0].v = pt;
+ cbdata.u[1].v = vm;
if ((pm->current_event + 1) < vec_len (pm->single_events_to_collect))
limit = 2;
for (i = 0; i < limit; i++)
{
- vec_validate (pm->pm_fds[i], vec_len (vlib_mains) - 1);
- vec_validate (pm->perf_event_pages[i], vec_len (vlib_mains) - 1);
- vec_validate (pm->rdpmc_indices[i], vec_len (vlib_mains) - 1);
-
c = vec_elt_at_index (pm->single_events_to_collect,
pm->current_event + i);
@@ -184,8 +184,8 @@ enable_current_events (perfmon_main_t * pm)
if (ioctl (fd, PERF_EVENT_IOC_ENABLE, 0) < 0)
clib_unix_warning ("enable ioctl");
- pm->perf_event_pages[i][my_thread_index] = (void *) p;
- pm->pm_fds[i][my_thread_index] = fd;
+ pt->perf_event_pages[i] = (void *) p;
+ pt->pm_fds[i] = fd;
}
/*
@@ -194,9 +194,7 @@ enable_current_events (perfmon_main_t * pm)
*/
for (i = 0; i < limit; i++)
{
- p =
- (struct perf_event_mmap_page *)
- pm->perf_event_pages[i][my_thread_index];
+ p = (struct perf_event_mmap_page *) pt->perf_event_pages[i];
/*
* Software event counters - and others not capable of being
@@ -208,16 +206,12 @@ enable_current_events (perfmon_main_t * pm)
else
index = p->index - 1;
- pm->rdpmc_indices[i][my_thread_index] = index;
+ pt->rdpmc_indices[i] = index;
}
pm->n_active = i;
/* Enable the main loop counter snapshot mechanism */
- clib_callback_enable_disable
- (vm->vlib_node_runtime_perf_counter_cbs,
- vm->vlib_node_runtime_perf_counter_cb_tmp,
- vm->worker_thread_main_loop_callback_lock,
- read_current_perf_counters, 1 /* enable */ );
+ clib_callback_data_add (&vm->vlib_node_runtime_perf_callbacks, cbdata);
}
static void
@@ -225,35 +219,30 @@ disable_events (perfmon_main_t * pm)
{
vlib_main_t *vm = vlib_get_main ();
u32 my_thread_index = vm->thread_index;
+ perfmon_thread_t *pt = pm->threads[my_thread_index];
int i;
/* Stop main loop collection */
- clib_callback_enable_disable
- (vm->vlib_node_runtime_perf_counter_cbs,
- vm->vlib_node_runtime_perf_counter_cb_tmp,
- vm->worker_thread_main_loop_callback_lock,
- read_current_perf_counters, 0 /* enable */ );
+ clib_callback_data_remove (&vm->vlib_node_runtime_perf_callbacks,
+ read_current_perf_counters);
for (i = 0; i < pm->n_active; i++)
{
- if (pm->pm_fds[i][my_thread_index] == 0)
+ if (pt->pm_fds[i] == 0)
continue;
- if (ioctl (pm->pm_fds[i][my_thread_index], PERF_EVENT_IOC_DISABLE, 0) <
- 0)
+ if (ioctl (pt->pm_fds[i], PERF_EVENT_IOC_DISABLE, 0) < 0)
clib_unix_warning ("disable ioctl");
- if (pm->perf_event_pages[i][my_thread_index])
+ if (pt->perf_event_pages[i])
{
- if (munmap (pm->perf_event_pages[i][my_thread_index],
- pm->page_size) < 0)
+ if (munmap (pt->perf_event_pages[i], pm->page_size) < 0)
clib_unix_warning ("munmap");
- pm->perf_event_pages[i][my_thread_index] = 0;
+ pt->perf_event_pages[i] = 0;
}
- (void) close (pm->pm_fds[i][my_thread_index]);
- pm->pm_fds[i][my_thread_index] = 0;
-
+ (void) close (pt->pm_fds[i]);
+ pt->pm_fds[i] = 0;
}
}
@@ -265,7 +254,7 @@ worker_thread_start_event (vlib_main_t * vm)
clib_callback_enable_disable (vm->worker_thread_main_loop_callbacks,
vm->worker_thread_main_loop_callback_tmp,
vm->worker_thread_main_loop_callback_lock,
- worker_thread_start_event, 0 /* enable */ );
+ worker_thread_start_event, 0 /* disable */ );
enable_current_events (pm);
}
@@ -276,7 +265,7 @@ worker_thread_stop_event (vlib_main_t * vm)
clib_callback_enable_disable (vm->worker_thread_main_loop_callbacks,
vm->worker_thread_main_loop_callback_tmp,
vm->worker_thread_main_loop_callback_lock,
- worker_thread_stop_event, 0 /* enable */ );
+ worker_thread_stop_event, 0 /* disable */ );
disable_events (pm);
}
@@ -329,14 +318,15 @@ scrape_and_clear_counters (perfmon_main_t * pm)
vlib_main_t *vm = pm->vlib_main;
vlib_main_t *stat_vm;
vlib_node_main_t *nm;
- vlib_node_t ***node_dups = 0;
- vlib_node_t **nodes;
- vlib_node_t *n;
+ perfmon_counters_t *ctr;
+ perfmon_counters_t *ctrs;
+ perfmon_counters_t **ctr_dups = 0;
+ perfmon_thread_t *pt;
perfmon_capture_t *c;
perfmon_event_config_t *current_event;
uword *p;
u8 *counter_name;
- u64 vectors_this_counter;
+ u32 len;
/* snapshoot the nodes, including pm counters */
vlib_worker_thread_barrier_sync (vm);
@@ -347,31 +337,16 @@ scrape_and_clear_counters (perfmon_main_t * pm)
if (stat_vm == 0)
continue;
- nm = &stat_vm->node_main;
-
- for (i = 0; i < vec_len (nm->nodes); i++)
- {
- n = nm->nodes[i];
- vlib_node_sync_stats (stat_vm, n);
- }
-
- nodes = 0;
- vec_validate (nodes, vec_len (nm->nodes) - 1);
- vec_add1 (node_dups, nodes);
-
- /* Snapshoot and clear the per-node perfmon counters */
- for (i = 0; i < vec_len (nm->nodes); i++)
+ pt = pm->threads[j];
+ len = vec_len (pt->counters);
+ ctrs = 0;
+ if (len)
{
- n = nm->nodes[i];
- nodes[i] = clib_mem_alloc (sizeof (*n));
- clib_memcpy_fast (nodes[i], n, sizeof (*n));
- n->stats_total.perf_counter0_ticks = 0;
- n->stats_total.perf_counter1_ticks = 0;
- n->stats_total.perf_counter_vectors = 0;
- n->stats_last_clear.perf_counter0_ticks = 0;
- n->stats_last_clear.perf_counter1_ticks = 0;
- n->stats_last_clear.perf_counter_vectors = 0;
+ vec_validate (ctrs, len - 1);
+ clib_memcpy (ctrs, pt->counters, len * sizeof (pt->counters[0]));
+ clib_memset (pt->counters, 0, len * sizeof (pt->counters[0]));
}
+ vec_add1 (ctr_dups, ctrs);
}
vlib_worker_thread_barrier_release (vm);
@@ -382,22 +357,21 @@ scrape_and_clear_counters (perfmon_main_t * pm)
if (stat_vm == 0)
continue;
- nodes = node_dups[j];
+ pt = pm->threads[j];
+ ctrs = ctr_dups[j];
- for (i = 0; i < vec_len (nodes); i++)
+ for (i = 0; i < vec_len (ctrs); i++)
{
u8 *capture_name;
- n = nodes[i];
+ ctr = &ctrs[i];
+ nm = &stat_vm->node_main;
- if (n->stats_total.perf_counter0_ticks == 0 &&
- n->stats_total.perf_counter1_ticks == 0)
- goto skip_this_node;
+ if (ctr->ticks[0] == 0 && ctr->ticks[1] == 0)
+ continue;
for (k = 0; k < 2; k++)
{
- u64 counter_value, counter_last_clear;
-
/*
* We collect 2 counters at once, except for the
* last counter when the user asks for an odd number of
@@ -407,20 +381,7 @@ scrape_and_clear_counters (perfmon_main_t * pm)
>= vec_len (pm->single_events_to_collect))
break;
- if (k == 0)
- {
- counter_value = n->stats_total.perf_counter0_ticks;
- counter_last_clear =
- n->stats_last_clear.perf_counter0_ticks;
- }
- else
- {
- counter_value = n->stats_total.perf_counter1_ticks;
- counter_last_clear =
- n->stats_last_clear.perf_counter1_ticks;
- }
-
- capture_name = format (0, "t%d-%v%c", j, n->name, 0);
+ capture_name = format (0, "t%d-%v%c", j, nm->nodes[i]->name, 0);
p = hash_get_mem (pm->capture_by_thread_and_node_name,
capture_name);
@@ -443,20 +404,15 @@ scrape_and_clear_counters (perfmon_main_t * pm)
current_event = pm->single_events_to_collect
+ pm->current_event + k;
counter_name = (u8 *) current_event->name;
- vectors_this_counter = n->stats_total.perf_counter_vectors -
- n->stats_last_clear.perf_counter_vectors;
vec_add1 (c->counter_names, counter_name);
- vec_add1 (c->counter_values,
- counter_value - counter_last_clear);
- vec_add1 (c->vectors_this_counter, vectors_this_counter);
+ vec_add1 (c->counter_values, ctr->ticks[k]);
+ vec_add1 (c->vectors_this_counter, ctr->vectors);
}
- skip_this_node:
- clib_mem_free (n);
}
- vec_free (nodes);
+ vec_free (ctrs);
}
- vec_free (node_dups);
+ vec_free (ctr_dups);
}
static void
@@ -492,9 +448,8 @@ handle_timeout (vlib_main_t * vm, perfmon_main_t * pm, f64 now)
for (i = 1; i < vec_len (vlib_mains); i++)
{
/* Has the worker actually stopped collecting data? */
- while (clib_callback_is_set
- (vlib_mains[i]->worker_thread_main_loop_callbacks,
- vlib_mains[i]->worker_thread_main_loop_callback_lock,
+ while (clib_callback_data_is_set
+ (&vm->vlib_node_runtime_perf_callbacks,
read_current_perf_counters))
{
if (vlib_time_now (vm) > deadman)
@@ -528,7 +483,7 @@ handle_timeout (vlib_main_t * vm, perfmon_main_t * pm, f64 now)
(vlib_mains[i]->worker_thread_main_loop_callbacks,
vlib_mains[i]->worker_thread_main_loop_callback_tmp,
vlib_mains[i]->worker_thread_main_loop_callback_lock,
- worker_thread_start_event, 1 /* enable */ );
+ worker_thread_start_event, 0 /* disable */ );
}
}
diff --git a/src/plugins/perfmon/perfmon_plugin.c b/src/plugins/perfmon/perfmon_plugin.c
new file mode 100644
index 00000000000..1d56573abd5
--- /dev/null
+++ b/src/plugins/perfmon/perfmon_plugin.c
@@ -0,0 +1,38 @@
+/*
+ * perfmon_plugin.c - perf monitor plugin
+ *
+ * Copyright (c) <current-year> <your-organization>
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/plugin/plugin.h>
+#include <vpp/app/version.h>
+
+/* *INDENT-OFF* */
+VLIB_PLUGIN_REGISTER () =
+{
+ .version = VPP_BUILD_VER,
+ .description = "Performance Monitor",
+#if !defined(__x86_64__)
+ .default_disabled = 1,
+#endif
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */