diff options
author | Ondrej Fabry <ofabry@cisco.com> | 2019-09-17 12:41:47 +0200 |
---|---|---|
committer | Ondrej Fabry <ofabry@cisco.com> | 2019-10-03 12:54:22 +0200 |
commit | 809b69ea4a90455445c34bbad7d8e5fea5cf3462 (patch) | |
tree | ba4b94339ac83908e2d9933692dc373929380aa7 /adapter/statsclient/statseg.go | |
parent | 48198748bdfcc7d30c794cdac19de822da53f840 (diff) |
Optimizations for statsclient
- this dramatically improves performance for stats data collection
- memory allocation is now done only when stat dirs change
- updating prepared stat dir does not need to allocate memory
- created integration test for testing stats client
- added NumWorkerThreads and VectorRatePerWorker to SystemStats
- added ReduceSimpleCounterStatIndex, ReduceCombinedCounterStatIndex for
aggregating specific index
Change-Id: I702731a69024ab5dd0832bb5cfe2773a987359e5
Signed-off-by: Ondrej Fabry <ofabry@cisco.com>
Diffstat (limited to 'adapter/statsclient/statseg.go')
-rw-r--r-- | adapter/statsclient/statseg.go | 100 |
1 files changed, 100 insertions, 0 deletions
diff --git a/adapter/statsclient/statseg.go b/adapter/statsclient/statseg.go new file mode 100644 index 0000000..7f1c381 --- /dev/null +++ b/adapter/statsclient/statseg.go @@ -0,0 +1,100 @@ +package statsclient + +import ( + "sync/atomic" + "time" + "unsafe" +) + +var ( + MaxWaitInProgress = time.Millisecond * 100 + CheckDelayInProgress = time.Microsecond * 10 +) + +type sharedHeaderBase struct { + epoch int64 + inProgress int64 + directoryOffset int64 + errorOffset int64 + statsOffset int64 +} + +type statSegSharedHeader struct { + version uint64 + sharedHeaderBase +} + +func (h *statSegSharedHeader) legacyVersion() bool { + // older VPP (<=19.04) did not have version in stat segment header + // we try to provide fallback support by skipping it in header + if h.version > maxVersion && h.inProgress > 1 && h.epoch == 0 { + return true + } + return false +} + +func statSegHeader(b []byte) (header statSegSharedHeader) { + h := (*statSegSharedHeader)(unsafe.Pointer(&b[0])) + header.version = atomic.LoadUint64(&h.version) + header.epoch = atomic.LoadInt64(&h.epoch) + header.inProgress = atomic.LoadInt64(&h.inProgress) + header.directoryOffset = atomic.LoadInt64(&h.directoryOffset) + header.errorOffset = atomic.LoadInt64(&h.errorOffset) + header.statsOffset = atomic.LoadInt64(&h.statsOffset) + return +} + +func statSegHeaderLegacy(b []byte) (header statSegSharedHeader) { + h := (*sharedHeaderBase)(unsafe.Pointer(&b[0])) + header.version = 0 + header.epoch = atomic.LoadInt64(&h.epoch) + header.inProgress = atomic.LoadInt64(&h.inProgress) + header.directoryOffset = atomic.LoadInt64(&h.directoryOffset) + header.errorOffset = atomic.LoadInt64(&h.errorOffset) + header.statsOffset = atomic.LoadInt64(&h.statsOffset) + return +} + +type statSegAccess struct { + epoch int64 +} + +func (c *statSegment) accessStart() statSegAccess { + t := time.Now() + + epoch, inprog := c.getEpoch() + for inprog { + if time.Since(t) > MaxWaitInProgress { + return statSegAccess{} + } else { + time.Sleep(CheckDelayInProgress) + } + epoch, inprog = c.getEpoch() + } + return statSegAccess{ + epoch: epoch, + } +} + +func (c *statSegment) accessEnd(acc *statSegAccess) bool { + epoch, inprog := c.getEpoch() + if acc.epoch != epoch || inprog { + return false + } + return true +} + +type vecHeader struct { + length uint64 + vectorData [0]uint8 +} + +func vectorLen(v unsafe.Pointer) uint64 { + vec := *(*vecHeader)(unsafe.Pointer(uintptr(v) - unsafe.Sizeof(uintptr(0)))) + return vec.length +} + +//go:nosplit +func statSegPointer(p unsafe.Pointer, offset uintptr) unsafe.Pointer { + return unsafe.Pointer(uintptr(p) + offset) +} |