summaryrefslogtreecommitdiffstats
path: root/vppinfra/vppinfra/time.h
diff options
context:
space:
mode:
Diffstat (limited to 'vppinfra/vppinfra/time.h')
-rw-r--r--vppinfra/vppinfra/time.h144
1 files changed, 89 insertions, 55 deletions
diff --git a/vppinfra/vppinfra/time.h b/vppinfra/vppinfra/time.h
index ffafaf70b21..3b89cf789fe 100644
--- a/vppinfra/vppinfra/time.h
+++ b/vppinfra/vppinfra/time.h
@@ -40,7 +40,8 @@
#include <vppinfra/clib.h>
-typedef struct {
+typedef struct
+{
/* Total run time in clock cycles
since clib_time_init call. */
u64 total_cpu_time;
@@ -68,71 +69,74 @@ typedef struct {
/* Return CPU time stamp as 64bit number. */
#if defined(__x86_64__) || defined(i386)
-always_inline u64 clib_cpu_time_now (void)
+always_inline u64
+clib_cpu_time_now (void)
{
u32 a, d;
- asm volatile ("rdtsc"
- : "=a" (a), "=d" (d));
+ asm volatile ("rdtsc":"=a" (a), "=d" (d));
return (u64) a + ((u64) d << (u64) 32);
}
#elif defined (__powerpc64__)
-always_inline u64 clib_cpu_time_now (void)
+always_inline u64
+clib_cpu_time_now (void)
{
u64 t;
- asm volatile ("mftb %0" : "=r" (t));
+ asm volatile ("mftb %0":"=r" (t));
return t;
}
#elif defined (__SPU__)
-always_inline u64 clib_cpu_time_now (void)
+always_inline u64
+clib_cpu_time_now (void)
{
#ifdef _XLC
return spu_rdch (0x8);
#else
- return 0 /* __builtin_si_rdch (0x8) FIXME */;
+ return 0 /* __builtin_si_rdch (0x8) FIXME */ ;
#endif
}
#elif defined (__powerpc__)
-always_inline u64 clib_cpu_time_now (void)
+always_inline u64
+clib_cpu_time_now (void)
{
u32 hi1, hi2, lo;
- asm volatile (
- "1:\n"
- "mftbu %[hi1]\n"
- "mftb %[lo]\n"
- "mftbu %[hi2]\n"
- "cmpw %[hi1],%[hi2]\n"
- "bne 1b\n"
- : [hi1] "=r" (hi1), [hi2] "=r" (hi2), [lo] "=r" (lo));
+ asm volatile ("1:\n"
+ "mftbu %[hi1]\n"
+ "mftb %[lo]\n"
+ "mftbu %[hi2]\n"
+ "cmpw %[hi1],%[hi2]\n"
+ "bne 1b\n":[hi1] "=r" (hi1),[hi2] "=r" (hi2),[lo] "=r" (lo));
return (u64) lo + ((u64) hi2 << (u64) 32);
}
#elif defined (__arm__)
#if defined(__ARM_ARCH_8A__)
-always_inline u64 clib_cpu_time_now (void) /* We may run arm64 in aarch32 mode, to leverage 64bit counter */
+always_inline u64
+clib_cpu_time_now (void) /* We may run arm64 in aarch32 mode, to leverage 64bit counter */
{
u64 tsc;
- asm volatile("mrrc p15, 0, %Q0, %R0, c9" : "=r" (tsc));
+ asm volatile ("mrrc p15, 0, %Q0, %R0, c9":"=r" (tsc));
return tsc;
}
#elif defined(__ARM_ARCH_7A__)
-always_inline u64 clib_cpu_time_now (void)
+always_inline u64
+clib_cpu_time_now (void)
{
u32 tsc;
- asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (tsc));
- return (u64)tsc;
+ asm volatile ("mrc p15, 0, %0, c9, c13, 0":"=r" (tsc));
+ return (u64) tsc;
}
#else
-always_inline u64 clib_cpu_time_now (void)
+always_inline u64
+clib_cpu_time_now (void)
{
u32 lo;
- asm volatile ("mrc p15, 0, %[lo], c15, c12, 1"
- : [lo] "=r" (lo));
+ asm volatile ("mrc p15, 0, %[lo], c15, c12, 1":[lo] "=r" (lo));
return (u64) lo;
}
#endif
@@ -140,31 +144,34 @@ always_inline u64 clib_cpu_time_now (void)
#elif defined (__xtensa__)
/* Stub for now. */
-always_inline u64 clib_cpu_time_now (void)
-{ return 0; }
+always_inline u64
+clib_cpu_time_now (void)
+{
+ return 0;
+}
#elif defined (__TMS320C6X__)
-always_inline u64 clib_cpu_time_now (void)
+always_inline u64
+clib_cpu_time_now (void)
{
u32 l, h;
asm volatile (" dint\n"
" mvc .s2 TSCL,%0\n"
- " mvc .s2 TSCH,%1\n"
- " rint\n"
- : "=b" (l), "=b" (h));
+ " mvc .s2 TSCH,%1\n" " rint\n":"=b" (l), "=b" (h));
- return ((u64)h << 32) | l;
+ return ((u64) h << 32) | l;
}
#elif defined (__aarch64__)
-always_inline u64 clib_cpu_time_now (void)
+always_inline u64
+clib_cpu_time_now (void)
{
u64 tsc;
/* Works on Cavium ThunderX. Other platforms: YMMV */
- asm volatile("mrs %0, cntvct_el0" : "=r" (tsc));
+ asm volatile ("mrs %0, cntvct_el0":"=r" (tsc));
return tsc;
}
@@ -176,14 +183,17 @@ always_inline u64 clib_cpu_time_now (void)
void clib_time_verify_frequency (clib_time_t * c);
-always_inline f64 clib_time_now_internal (clib_time_t * c, u64 n)
+always_inline f64
+clib_time_now_internal (clib_time_t * c, u64 n)
{
u64 l = c->last_cpu_time;
u64 t = c->total_cpu_time;
t += n - l;
c->total_cpu_time = t;
c->last_cpu_time = n;
- if (PREDICT_FALSE ((c->last_cpu_time - c->last_verify_cpu_time) >> c->log2_clocks_per_frequency_verify))
+ if (PREDICT_FALSE
+ ((c->last_cpu_time -
+ c->last_verify_cpu_time) >> c->log2_clocks_per_frequency_verify))
clib_time_verify_frequency (c);
return t * c->seconds_per_clock;
}
@@ -191,10 +201,11 @@ always_inline f64 clib_time_now_internal (clib_time_t * c, u64 n)
always_inline f64
clib_time_now (clib_time_t * c)
{
- return clib_time_now_internal (c, clib_cpu_time_now());
+ return clib_time_now_internal (c, clib_cpu_time_now ());
}
-always_inline void clib_cpu_time_wait (u64 dt)
+always_inline void
+clib_cpu_time_wait (u64 dt)
{
u64 t_end = clib_cpu_time_now () + dt;
while (clib_cpu_time_now () < t_end)
@@ -212,32 +223,36 @@ void clib_time_init (clib_time_t * c);
#include <sys/syscall.h>
/* Use 64bit floating point to represent time offset from epoch. */
-always_inline f64 unix_time_now (void)
+always_inline f64
+unix_time_now (void)
{
/* clock_gettime without indirect syscall uses GLIBC wrappers which
we don't want. Just the bare metal, please. */
struct timespec ts;
syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts);
- return ts.tv_sec + 1e-9*ts.tv_nsec;
+ return ts.tv_sec + 1e-9 * ts.tv_nsec;
}
/* As above but integer number of nano-seconds. */
-always_inline u64 unix_time_now_nsec (void)
+always_inline u64
+unix_time_now_nsec (void)
{
struct timespec ts;
syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts);
- return 1e9*ts.tv_sec + ts.tv_nsec;
+ return 1e9 * ts.tv_sec + ts.tv_nsec;
}
-always_inline f64 unix_usage_now (void)
+always_inline f64
+unix_usage_now (void)
{
struct rusage u;
getrusage (RUSAGE_SELF, &u);
- return u.ru_utime.tv_sec + 1e-6*u.ru_utime.tv_usec
- + u.ru_stime.tv_sec + 1e-6*u.ru_stime.tv_usec;
+ return u.ru_utime.tv_sec + 1e-6 * u.ru_utime.tv_usec
+ + u.ru_stime.tv_sec + 1e-6 * u.ru_stime.tv_usec;
}
-always_inline void unix_sleep (f64 dt)
+always_inline void
+unix_sleep (f64 dt)
{
struct timespec t;
t.tv_sec = dt;
@@ -245,20 +260,39 @@ always_inline void unix_sleep (f64 dt)
nanosleep (&t, 0);
}
-#else /* ! CLIB_UNIX */
+#else /* ! CLIB_UNIX */
-always_inline f64 unix_time_now (void)
-{ return 0; }
+always_inline f64
+unix_time_now (void)
+{
+ return 0;
+}
-always_inline u64 unix_time_now_nsec (void)
-{ return 0; }
+always_inline u64
+unix_time_now_nsec (void)
+{
+ return 0;
+}
-always_inline f64 unix_usage_now (void)
-{ return 0; }
+always_inline f64
+unix_usage_now (void)
+{
+ return 0;
+}
-always_inline void unix_sleep (f64 dt)
-{ }
+always_inline void
+unix_sleep (f64 dt)
+{
+}
#endif
#endif /* included_time_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */