vernier 1.4.0 → 1.5.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/examples/fiber_stalls.rb +51 -0
- data/exe/vernier +11 -52
- data/ext/vernier/extconf.rb +2 -0
- data/ext/vernier/memory.cc +144 -0
- data/ext/vernier/periodic_thread.hh +141 -0
- data/ext/vernier/signal_safe_semaphore.hh +72 -0
- data/ext/vernier/timestamp.hh +138 -0
- data/ext/vernier/vernier.cc +138 -338
- data/ext/vernier/vernier.hh +4 -0
- data/lib/vernier/autorun.rb +17 -1
- data/lib/vernier/collector.rb +37 -9
- data/lib/vernier/hooks/memory_usage.rb +37 -0
- data/lib/vernier/hooks.rb +1 -0
- data/lib/vernier/marker.rb +2 -0
- data/lib/vernier/output/file_listing.rb +113 -0
- data/lib/vernier/output/filename_filter.rb +30 -0
- data/lib/vernier/output/firefox.rb +29 -20
- data/lib/vernier/output/top.rb +60 -8
- data/lib/vernier/parsed_profile.rb +102 -0
- data/lib/vernier/result.rb +4 -92
- data/lib/vernier/stack_table.rb +3 -42
- data/lib/vernier/stack_table_helpers.rb +129 -0
- data/lib/vernier/version.rb +1 -1
- data/lib/vernier.rb +3 -0
- metadata +12 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 2a5b535bebdbcc91aa853b53657893c37531e8acffe5f00df1753ca1c52e7508
|
4
|
+
data.tar.gz: 7aa9509fdfb009956bd2eeab6b74bc8fa832e078d9c8fce92445c82a5699ea27
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 45584b395431f8135928011d03d427f42aa747dda76c4bca3bc063837571e3cc168b3fbc04cd268353e17299f47c7be3c6c486e0885488c6e7b5aba94bcb4190
|
7
|
+
data.tar.gz: d8df8d3a0ac7ce752cfb4c8fc0f57e0af1bf7640a3f228f52a8cd4a32211cbe21686338ca6068257d49df0a2a0206773da89d93aa673a2915c5ea5881d1f4be2
|
@@ -0,0 +1,51 @@
|
|
1
|
+
require "bundler/inline"
|
2
|
+
gemfile do
|
3
|
+
source 'https://rubygems.org'
|
4
|
+
|
5
|
+
gem "async"
|
6
|
+
end
|
7
|
+
|
8
|
+
require "async"
|
9
|
+
require "async/queue"
|
10
|
+
|
11
|
+
def measure
|
12
|
+
x = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
13
|
+
yield
|
14
|
+
Process.clock_gettime(Process::CLOCK_MONOTONIC) - x
|
15
|
+
end
|
16
|
+
|
17
|
+
def fib(n)
|
18
|
+
if n < 2
|
19
|
+
n
|
20
|
+
else
|
21
|
+
fib(n-2) + fib(n-1)
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
# find fib that takes ~50ms
|
26
|
+
fib_i = 50.times.find { |i| measure { fib(i) } >= 0.05 }
|
27
|
+
sleep_i = measure { fib(fib_i) }
|
28
|
+
|
29
|
+
Async {
|
30
|
+
latch = Async::Queue.new
|
31
|
+
|
32
|
+
workers = [
|
33
|
+
Async {
|
34
|
+
latch.pop # block until ready to measure
|
35
|
+
|
36
|
+
100.times {
|
37
|
+
sleep(sleep_i)
|
38
|
+
# stalls happen here. This worker wants to be scheduled so it can
|
39
|
+
# continue the loop, but will be blocked by another worker executing fib
|
40
|
+
}
|
41
|
+
},
|
42
|
+
Async {
|
43
|
+
latch.pop # block until ready to measure
|
44
|
+
|
45
|
+
100.times { fib(fib_i) }
|
46
|
+
},
|
47
|
+
]
|
48
|
+
|
49
|
+
2.times { latch << nil }
|
50
|
+
workers.each(&:wait)
|
51
|
+
}
|
data/exe/vernier
CHANGED
@@ -1,5 +1,7 @@
|
|
1
1
|
#!/usr/bin/env ruby
|
2
2
|
|
3
|
+
$LOAD_PATH.unshift File.expand_path("../../lib", __FILE__)
|
4
|
+
|
3
5
|
require "optparse"
|
4
6
|
require "vernier/version"
|
5
7
|
|
@@ -18,6 +20,9 @@ FLAGS:
|
|
18
20
|
o.on('--output [FILENAME]', String, "output filename") do |s|
|
19
21
|
options[:output] = s
|
20
22
|
end
|
23
|
+
o.on('--output-dir [DIRECTORY]', String, "output directory (default .)") do |s|
|
24
|
+
options[:output_dir] = s
|
25
|
+
end
|
21
26
|
o.on('--interval [MICROSECONDS]', Integer, "sampling interval (default 500)") do |i|
|
22
27
|
options[:interval] = i
|
23
28
|
end
|
@@ -52,60 +57,14 @@ FLAGS:
|
|
52
57
|
|
53
58
|
def self.inverted_tree(top, file)
|
54
59
|
# Print the inverted tree from a Vernier profile
|
55
|
-
require "
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
json = if is_gzip
|
60
|
-
require "zlib"
|
61
|
-
Zlib::GzipReader.open(file) { |gz| gz.read }
|
62
|
-
else
|
63
|
-
File.read file
|
64
|
-
end
|
65
|
-
|
66
|
-
info = JSON.load json
|
67
|
-
|
68
|
-
main = info["threads"].find { |thread| thread["isMainThread"] }
|
69
|
-
|
70
|
-
weight_by_frame = Hash.new(0)
|
71
|
-
|
72
|
-
stack_frames = main["stackTable"]["frame"]
|
73
|
-
frame_table = main["frameTable"]["func"]
|
74
|
-
func_table = main["funcTable"]["name"]
|
75
|
-
string_array = main["stringArray"]
|
60
|
+
require "vernier/parsed_profile"
|
61
|
+
require "vernier/output/top"
|
62
|
+
require "vernier/output/file_listing"
|
76
63
|
|
77
|
-
|
78
|
-
top_frame_index = stack_frames[stack]
|
79
|
-
func_index = frame_table[top_frame_index]
|
80
|
-
string_index = func_table[func_index]
|
81
|
-
str = string_array[string_index]
|
82
|
-
weight_by_frame[str] += weight
|
83
|
-
end
|
84
|
-
|
85
|
-
total = weight_by_frame.values.inject :+
|
86
|
-
|
87
|
-
header = ["Samples", "%", ""]
|
88
|
-
widths = header.map(&:bytesize)
|
89
|
-
|
90
|
-
columns = weight_by_frame.sort_by { |k,v| v }.reverse.first(top).map { |k,v|
|
91
|
-
entry = [v.to_s, ((v / total.to_f) * 100).round(1).to_s, k]
|
92
|
-
entry.each_with_index { |str, i| widths[i] = str.bytesize if widths[i] < str.bytesize }
|
93
|
-
entry
|
94
|
-
}
|
95
|
-
|
96
|
-
print_separator widths
|
97
|
-
print_row header, widths
|
98
|
-
print_separator widths
|
99
|
-
columns.each { print_row(_1, widths) }
|
100
|
-
print_separator widths
|
101
|
-
end
|
102
|
-
|
103
|
-
def self.print_row(list, widths)
|
104
|
-
puts("|" + list.map.with_index { |str, i| " " + str.ljust(widths[i] + 1) }.join("|") + "|")
|
105
|
-
end
|
64
|
+
parsed_profile = Vernier::ParsedProfile.read_file(file)
|
106
65
|
|
107
|
-
|
108
|
-
puts
|
66
|
+
puts Vernier::Output::Top.new(parsed_profile).output
|
67
|
+
puts Vernier::Output::FileListing.new(parsed_profile).output
|
109
68
|
end
|
110
69
|
end
|
111
70
|
end
|
data/ext/vernier/extconf.rb
CHANGED
@@ -2,6 +2,7 @@
|
|
2
2
|
|
3
3
|
require "mkmf"
|
4
4
|
|
5
|
+
$CXXFLAGS += " -fvisibility=hidden "
|
5
6
|
$CXXFLAGS += " -std=c++14 "
|
6
7
|
$CXXFLAGS += " -ggdb3 -Og "
|
7
8
|
|
@@ -11,5 +12,6 @@ have_struct_member("rb_internal_thread_event_data_t", "thread", ["ruby/thread.h"
|
|
11
12
|
have_func("rb_profile_thread_frames", "ruby/debug.h")
|
12
13
|
|
13
14
|
have_func("pthread_setname_np")
|
15
|
+
have_func("pthread_condattr_setclock")
|
14
16
|
|
15
17
|
create_makefile("vernier/vernier")
|
@@ -0,0 +1,144 @@
|
|
1
|
+
#include <mutex>
|
2
|
+
#include <stdio.h>
|
3
|
+
#include <unistd.h>
|
4
|
+
#include <vector>
|
5
|
+
|
6
|
+
#include "vernier.hh"
|
7
|
+
#include "timestamp.hh"
|
8
|
+
#include "periodic_thread.hh"
|
9
|
+
|
10
|
+
#if defined(__APPLE__)
|
11
|
+
|
12
|
+
// Based loosely on https://github.com/zombocom/get_process_mem
|
13
|
+
#include <libproc.h>
|
14
|
+
|
15
|
+
uint64_t memory_rss() {
|
16
|
+
pid_t pid = getpid();
|
17
|
+
|
18
|
+
struct proc_taskinfo tinfo;
|
19
|
+
int st = proc_pidinfo(pid, PROC_PIDTASKINFO, 0,
|
20
|
+
&tinfo, sizeof(tinfo));
|
21
|
+
|
22
|
+
if (st != sizeof(tinfo)) {
|
23
|
+
fprintf(stderr, "VERNIER: warning: proc_pidinfo failed\n");
|
24
|
+
return 0;
|
25
|
+
}
|
26
|
+
|
27
|
+
return tinfo.pti_resident_size;
|
28
|
+
}
|
29
|
+
|
30
|
+
#elif defined(__linux__)
|
31
|
+
|
32
|
+
uint64_t memory_rss() {
|
33
|
+
long rss = 0;
|
34
|
+
|
35
|
+
// I'd heard that you shouldn't read /proc/*/smaps with fopen and family,
|
36
|
+
// but maybe it's fine for statm which is much smaller and will almost
|
37
|
+
// certainly fit in any internal buffer.
|
38
|
+
FILE *file = fopen("/proc/self/statm", "r");
|
39
|
+
if (!file) return 0;
|
40
|
+
if (fscanf(file, "%*s%ld", &rss) != 1) {
|
41
|
+
fclose(file);
|
42
|
+
return 0;
|
43
|
+
}
|
44
|
+
fclose(file);
|
45
|
+
return rss * sysconf(_SC_PAGESIZE);
|
46
|
+
}
|
47
|
+
|
48
|
+
#else
|
49
|
+
|
50
|
+
// Unsupported
|
51
|
+
uint64_t memory_rss() {
|
52
|
+
return 0;
|
53
|
+
}
|
54
|
+
|
55
|
+
#endif
|
56
|
+
|
57
|
+
VALUE rb_cMemoryTracker;
|
58
|
+
|
59
|
+
static VALUE rb_memory_rss(VALUE self) {
|
60
|
+
return ULL2NUM(memory_rss());
|
61
|
+
}
|
62
|
+
|
63
|
+
class MemoryTracker : public PeriodicThread {
|
64
|
+
public:
|
65
|
+
struct Record {
|
66
|
+
TimeStamp timestamp;
|
67
|
+
uint64_t memory_rss;
|
68
|
+
};
|
69
|
+
std::vector<Record> results;
|
70
|
+
std::mutex mutex;
|
71
|
+
|
72
|
+
MemoryTracker() : PeriodicThread(TimeStamp::from_milliseconds(10)) {
|
73
|
+
}
|
74
|
+
|
75
|
+
void run_iteration() {
|
76
|
+
record();
|
77
|
+
}
|
78
|
+
|
79
|
+
void record() {
|
80
|
+
const std::lock_guard<std::mutex> lock(mutex);
|
81
|
+
results.push_back(Record{TimeStamp::Now(), memory_rss()});
|
82
|
+
}
|
83
|
+
};
|
84
|
+
|
85
|
+
static const rb_data_type_t rb_memory_tracker_type = {
|
86
|
+
.wrap_struct_name = "vernier/memory_tracker",
|
87
|
+
.function = {
|
88
|
+
//.dmemsize = memory_tracker_memsize,
|
89
|
+
//.dmark = memory_tracker_mark,
|
90
|
+
//.dfree = memory_tracker_free,
|
91
|
+
},
|
92
|
+
};
|
93
|
+
|
94
|
+
VALUE memory_tracker_start(VALUE self) {
|
95
|
+
MemoryTracker *memory_tracker;
|
96
|
+
TypedData_Get_Struct(self, MemoryTracker, &rb_memory_tracker_type, memory_tracker);
|
97
|
+
memory_tracker->start();
|
98
|
+
return self;
|
99
|
+
}
|
100
|
+
|
101
|
+
VALUE memory_tracker_stop(VALUE self) {
|
102
|
+
MemoryTracker *memory_tracker;
|
103
|
+
TypedData_Get_Struct(self, MemoryTracker, &rb_memory_tracker_type, memory_tracker);
|
104
|
+
|
105
|
+
memory_tracker->stop();
|
106
|
+
return self;
|
107
|
+
}
|
108
|
+
|
109
|
+
VALUE memory_tracker_record(VALUE self) {
|
110
|
+
MemoryTracker *memory_tracker;
|
111
|
+
TypedData_Get_Struct(self, MemoryTracker, &rb_memory_tracker_type, memory_tracker);
|
112
|
+
memory_tracker->record();
|
113
|
+
return self;
|
114
|
+
}
|
115
|
+
|
116
|
+
VALUE memory_tracker_results(VALUE self) {
|
117
|
+
MemoryTracker *memory_tracker;
|
118
|
+
TypedData_Get_Struct(self, MemoryTracker, &rb_memory_tracker_type, memory_tracker);
|
119
|
+
VALUE timestamps = rb_ary_new();
|
120
|
+
VALUE memory = rb_ary_new();
|
121
|
+
for (const auto& record: memory_tracker->results) {
|
122
|
+
rb_ary_push(timestamps, ULL2NUM(record.timestamp.nanoseconds()));
|
123
|
+
rb_ary_push(memory, ULL2NUM(record.memory_rss));
|
124
|
+
}
|
125
|
+
return rb_ary_new_from_args(2, timestamps, memory);
|
126
|
+
}
|
127
|
+
|
128
|
+
VALUE memory_tracker_alloc(VALUE self) {
|
129
|
+
auto memory_tracker = new MemoryTracker();
|
130
|
+
VALUE obj = TypedData_Wrap_Struct(self, &rb_memory_tracker_type, memory_tracker);
|
131
|
+
return obj;
|
132
|
+
}
|
133
|
+
|
134
|
+
void Init_memory() {
|
135
|
+
rb_cMemoryTracker = rb_define_class_under(rb_mVernier, "MemoryTracker", rb_cObject);
|
136
|
+
rb_define_alloc_func(rb_cMemoryTracker, memory_tracker_alloc);
|
137
|
+
|
138
|
+
rb_define_method(rb_cMemoryTracker, "start", memory_tracker_start, 0);
|
139
|
+
rb_define_method(rb_cMemoryTracker, "stop", memory_tracker_stop, 0);
|
140
|
+
rb_define_method(rb_cMemoryTracker, "results", memory_tracker_results, 0);
|
141
|
+
rb_define_method(rb_cMemoryTracker, "record", memory_tracker_record, 0);
|
142
|
+
|
143
|
+
rb_define_singleton_method(rb_mVernier, "memory_rss", rb_memory_rss, 0);
|
144
|
+
}
|
@@ -0,0 +1,141 @@
|
|
1
|
+
#include "ruby.h"
|
2
|
+
|
3
|
+
#include <atomic>
|
4
|
+
#include "timestamp.hh"
|
5
|
+
|
6
|
+
#ifdef __APPLE__
|
7
|
+
|
8
|
+
#include <mach/mach.h>
|
9
|
+
#include <mach/mach_time.h>
|
10
|
+
#include <pthread.h>
|
11
|
+
|
12
|
+
// https://developer.apple.com/library/archive/technotes/tn2169/_index.html
|
13
|
+
inline void upgrade_thread_priority(pthread_t pthread) {
|
14
|
+
mach_timebase_info_data_t timebase_info;
|
15
|
+
mach_timebase_info(&timebase_info);
|
16
|
+
|
17
|
+
const uint64_t NANOS_PER_MSEC = 1000000ULL;
|
18
|
+
double clock2abs = ((double)timebase_info.denom / (double)timebase_info.numer) * NANOS_PER_MSEC;
|
19
|
+
|
20
|
+
thread_time_constraint_policy_data_t policy;
|
21
|
+
policy.period = 0;
|
22
|
+
|
23
|
+
// FIXME: I really don't know what these value should be
|
24
|
+
policy.computation = (uint32_t)(5 * clock2abs); // 5 ms of work
|
25
|
+
policy.constraint = (uint32_t)(10 * clock2abs);
|
26
|
+
policy.preemptible = FALSE;
|
27
|
+
|
28
|
+
int kr = thread_policy_set(pthread_mach_thread_np(pthread_self()),
|
29
|
+
THREAD_TIME_CONSTRAINT_POLICY,
|
30
|
+
(thread_policy_t)&policy,
|
31
|
+
THREAD_TIME_CONSTRAINT_POLICY_COUNT);
|
32
|
+
|
33
|
+
if (kr != KERN_SUCCESS) {
|
34
|
+
mach_error("thread_policy_set:", kr);
|
35
|
+
exit(1);
|
36
|
+
}
|
37
|
+
}
|
38
|
+
#else
|
39
|
+
inline void upgrade_thread_priority(pthread_t pthread) {
|
40
|
+
}
|
41
|
+
#endif
|
42
|
+
|
43
|
+
class PeriodicThread {
|
44
|
+
pthread_t pthread;
|
45
|
+
TimeStamp interval;
|
46
|
+
|
47
|
+
pthread_mutex_t running_mutex = PTHREAD_MUTEX_INITIALIZER;
|
48
|
+
pthread_cond_t running_cv;
|
49
|
+
std::atomic_bool running;
|
50
|
+
|
51
|
+
public:
|
52
|
+
PeriodicThread(TimeStamp interval) : interval(interval), running(false) {
|
53
|
+
pthread_condattr_t attr;
|
54
|
+
pthread_condattr_init(&attr);
|
55
|
+
#if HAVE_PTHREAD_CONDATTR_SETCLOCK
|
56
|
+
pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
|
57
|
+
#endif
|
58
|
+
pthread_cond_init(&running_cv, &attr);
|
59
|
+
}
|
60
|
+
|
61
|
+
void set_interval(TimeStamp timestamp) {
|
62
|
+
interval = timestamp;
|
63
|
+
}
|
64
|
+
|
65
|
+
static void *thread_entrypoint(void *arg) {
|
66
|
+
upgrade_thread_priority(pthread_self());
|
67
|
+
|
68
|
+
static_cast<PeriodicThread *>(arg)->run();
|
69
|
+
return NULL;
|
70
|
+
}
|
71
|
+
|
72
|
+
void run() {
|
73
|
+
#if HAVE_PTHREAD_SETNAME_NP
|
74
|
+
#ifdef __APPLE__
|
75
|
+
pthread_setname_np("Vernier profiler");
|
76
|
+
#else
|
77
|
+
pthread_setname_np(pthread_self(), "Vernier profiler");
|
78
|
+
#endif
|
79
|
+
#endif
|
80
|
+
|
81
|
+
TimeStamp next_sample_schedule = TimeStamp::Now();
|
82
|
+
bool done = false;
|
83
|
+
while (!done) {
|
84
|
+
TimeStamp sample_complete = TimeStamp::Now();
|
85
|
+
|
86
|
+
run_iteration();
|
87
|
+
|
88
|
+
next_sample_schedule += interval;
|
89
|
+
|
90
|
+
if (next_sample_schedule < sample_complete) {
|
91
|
+
next_sample_schedule = sample_complete + interval;
|
92
|
+
}
|
93
|
+
|
94
|
+
pthread_mutex_lock(&running_mutex);
|
95
|
+
if (running) {
|
96
|
+
#if HAVE_PTHREAD_CONDATTR_SETCLOCK
|
97
|
+
struct timespec next_sample_ts = next_sample_schedule.timespec();
|
98
|
+
#else
|
99
|
+
auto offset = TimeStamp::NowRealtime() - TimeStamp::Now();
|
100
|
+
struct timespec next_sample_ts = (next_sample_schedule + offset).timespec();
|
101
|
+
#endif
|
102
|
+
int ret;
|
103
|
+
do {
|
104
|
+
ret = pthread_cond_timedwait(&running_cv, &running_mutex, &next_sample_ts);
|
105
|
+
} while(running && ret == EINTR);
|
106
|
+
}
|
107
|
+
done = !running;
|
108
|
+
pthread_mutex_unlock(&running_mutex);
|
109
|
+
}
|
110
|
+
}
|
111
|
+
|
112
|
+
virtual void run_iteration() = 0;
|
113
|
+
|
114
|
+
void start() {
|
115
|
+
pthread_mutex_lock(&running_mutex);
|
116
|
+
if (!running) {
|
117
|
+
running = true;
|
118
|
+
|
119
|
+
int ret = pthread_create(&pthread, NULL, &thread_entrypoint, this);
|
120
|
+
if (ret != 0) {
|
121
|
+
perror("pthread_create");
|
122
|
+
rb_bug("VERNIER: pthread_create failed");
|
123
|
+
}
|
124
|
+
}
|
125
|
+
pthread_mutex_unlock(&running_mutex);
|
126
|
+
}
|
127
|
+
|
128
|
+
void stop() {
|
129
|
+
pthread_mutex_lock(&running_mutex);
|
130
|
+
bool was_running = running;
|
131
|
+
if (running) {
|
132
|
+
running = false;
|
133
|
+
pthread_cond_broadcast(&running_cv);
|
134
|
+
}
|
135
|
+
pthread_mutex_unlock(&running_mutex);
|
136
|
+
if (was_running)
|
137
|
+
pthread_join(pthread, NULL);
|
138
|
+
pthread = 0;
|
139
|
+
}
|
140
|
+
};
|
141
|
+
|
@@ -0,0 +1,72 @@
|
|
1
|
+
#ifndef SIGNAL_SAFE_SEMAPHORE_HH
|
2
|
+
#define SIGNAL_SAFE_SEMAPHORE_HH
|
3
|
+
|
4
|
+
#if defined(__APPLE__)
|
5
|
+
/* macOS */
|
6
|
+
#include <dispatch/dispatch.h>
|
7
|
+
#elif defined(__FreeBSD__)
|
8
|
+
/* FreeBSD */
|
9
|
+
#include <pthread_np.h>
|
10
|
+
#include <semaphore.h>
|
11
|
+
#else
|
12
|
+
/* Linux */
|
13
|
+
#include <semaphore.h>
|
14
|
+
#include <sys/syscall.h> /* for SYS_gettid */
|
15
|
+
#endif
|
16
|
+
|
17
|
+
// A basic semaphore built on sem_wait/sem_post
|
18
|
+
// post() is guaranteed to be async-signal-safe
|
19
|
+
class SignalSafeSemaphore {
|
20
|
+
#ifdef __APPLE__
|
21
|
+
dispatch_semaphore_t sem;
|
22
|
+
#else
|
23
|
+
sem_t sem;
|
24
|
+
#endif
|
25
|
+
|
26
|
+
public:
|
27
|
+
|
28
|
+
SignalSafeSemaphore(unsigned int value = 0) {
|
29
|
+
#ifdef __APPLE__
|
30
|
+
sem = dispatch_semaphore_create(value);
|
31
|
+
#else
|
32
|
+
sem_init(&sem, 0, value);
|
33
|
+
#endif
|
34
|
+
};
|
35
|
+
|
36
|
+
~SignalSafeSemaphore() {
|
37
|
+
#ifdef __APPLE__
|
38
|
+
dispatch_release(sem);
|
39
|
+
#else
|
40
|
+
sem_destroy(&sem);
|
41
|
+
#endif
|
42
|
+
};
|
43
|
+
|
44
|
+
void wait() {
|
45
|
+
#ifdef __APPLE__
|
46
|
+
dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER);
|
47
|
+
#else
|
48
|
+
// Use sem_timedwait so that we get a crash instead of a deadlock for
|
49
|
+
// easier debugging
|
50
|
+
struct timespec ts = (TimeStamp::NowRealtime() + TimeStamp::from_seconds(5)).timespec();
|
51
|
+
|
52
|
+
int ret;
|
53
|
+
do {
|
54
|
+
ret = sem_timedwait(&sem, &ts);
|
55
|
+
} while (ret && errno == EINTR);
|
56
|
+
if (ret != 0) {
|
57
|
+
rb_bug("VERNIER: sem_timedwait waited over 5 seconds");
|
58
|
+
}
|
59
|
+
assert(ret == 0);
|
60
|
+
#endif
|
61
|
+
}
|
62
|
+
|
63
|
+
void post() {
|
64
|
+
#ifdef __APPLE__
|
65
|
+
dispatch_semaphore_signal(sem);
|
66
|
+
#else
|
67
|
+
sem_post(&sem);
|
68
|
+
#endif
|
69
|
+
}
|
70
|
+
};
|
71
|
+
|
72
|
+
#endif
|
@@ -0,0 +1,138 @@
|
|
1
|
+
#ifndef TIMESTAMP_HH
|
2
|
+
#define TIMESTAMP_HH
|
3
|
+
|
4
|
+
#include <iostream>
|
5
|
+
#include <stdint.h>
|
6
|
+
#include <sys/time.h>
|
7
|
+
#include <unistd.h>
|
8
|
+
|
9
|
+
class TimeStamp {
|
10
|
+
static const uint64_t nanoseconds_per_second = 1000000000;
|
11
|
+
uint64_t value_ns;
|
12
|
+
|
13
|
+
TimeStamp(uint64_t value_ns) : value_ns(value_ns) {}
|
14
|
+
|
15
|
+
public:
|
16
|
+
TimeStamp() : value_ns(0) {}
|
17
|
+
|
18
|
+
static TimeStamp Now() {
|
19
|
+
struct timespec ts;
|
20
|
+
clock_gettime(CLOCK_MONOTONIC, &ts);
|
21
|
+
return TimeStamp(ts.tv_sec * nanoseconds_per_second + ts.tv_nsec);
|
22
|
+
}
|
23
|
+
|
24
|
+
static TimeStamp NowRealtime() {
|
25
|
+
struct timespec ts;
|
26
|
+
clock_gettime(CLOCK_REALTIME, &ts);
|
27
|
+
return TimeStamp(ts.tv_sec * nanoseconds_per_second + ts.tv_nsec);
|
28
|
+
}
|
29
|
+
|
30
|
+
static TimeStamp Zero() {
|
31
|
+
return TimeStamp(0);
|
32
|
+
}
|
33
|
+
|
34
|
+
// SleepUntil a specified timestamp
|
35
|
+
// Highly accurate manual sleep time
|
36
|
+
static void SleepUntil(const TimeStamp &target_time) {
|
37
|
+
if (target_time.zero()) return;
|
38
|
+
struct timespec ts = target_time.timespec();
|
39
|
+
|
40
|
+
int res;
|
41
|
+
do {
|
42
|
+
// do nothing until it's time :)
|
43
|
+
sleep(0);
|
44
|
+
} while (target_time > TimeStamp::Now());
|
45
|
+
}
|
46
|
+
|
47
|
+
static TimeStamp from_seconds(uint64_t s) {
|
48
|
+
return TimeStamp::from_milliseconds(s * 1000);
|
49
|
+
}
|
50
|
+
|
51
|
+
static TimeStamp from_milliseconds(uint64_t ms) {
|
52
|
+
return TimeStamp::from_microseconds(ms * 1000);
|
53
|
+
}
|
54
|
+
|
55
|
+
static TimeStamp from_microseconds(uint64_t us) {
|
56
|
+
return TimeStamp::from_nanoseconds(us * 1000);
|
57
|
+
}
|
58
|
+
|
59
|
+
static TimeStamp from_nanoseconds(uint64_t ns) {
|
60
|
+
return TimeStamp(ns);
|
61
|
+
}
|
62
|
+
|
63
|
+
TimeStamp operator-(const TimeStamp &other) const {
|
64
|
+
TimeStamp result = *this;
|
65
|
+
return result -= other;
|
66
|
+
}
|
67
|
+
|
68
|
+
TimeStamp &operator-=(const TimeStamp &other) {
|
69
|
+
if (value_ns > other.value_ns) {
|
70
|
+
value_ns = value_ns - other.value_ns;
|
71
|
+
} else {
|
72
|
+
// underflow
|
73
|
+
value_ns = 0;
|
74
|
+
}
|
75
|
+
return *this;
|
76
|
+
}
|
77
|
+
|
78
|
+
TimeStamp operator+(const TimeStamp &other) const {
|
79
|
+
TimeStamp result = *this;
|
80
|
+
return result += other;
|
81
|
+
}
|
82
|
+
|
83
|
+
TimeStamp &operator+=(const TimeStamp &other) {
|
84
|
+
uint64_t new_value = value_ns + other.value_ns;
|
85
|
+
value_ns = new_value;
|
86
|
+
return *this;
|
87
|
+
}
|
88
|
+
|
89
|
+
bool operator<(const TimeStamp &other) const {
|
90
|
+
return value_ns < other.value_ns;
|
91
|
+
}
|
92
|
+
|
93
|
+
bool operator<=(const TimeStamp &other) const {
|
94
|
+
return value_ns <= other.value_ns;
|
95
|
+
}
|
96
|
+
|
97
|
+
bool operator>(const TimeStamp &other) const {
|
98
|
+
return value_ns > other.value_ns;
|
99
|
+
}
|
100
|
+
|
101
|
+
bool operator>=(const TimeStamp &other) const {
|
102
|
+
return value_ns >= other.value_ns;
|
103
|
+
}
|
104
|
+
|
105
|
+
bool operator==(const TimeStamp &other) const {
|
106
|
+
return value_ns == other.value_ns;
|
107
|
+
}
|
108
|
+
|
109
|
+
bool operator!=(const TimeStamp &other) const {
|
110
|
+
return value_ns != other.value_ns;
|
111
|
+
}
|
112
|
+
|
113
|
+
uint64_t nanoseconds() const {
|
114
|
+
return value_ns;
|
115
|
+
}
|
116
|
+
|
117
|
+
uint64_t microseconds() const {
|
118
|
+
return value_ns / 1000;
|
119
|
+
}
|
120
|
+
|
121
|
+
bool zero() const {
|
122
|
+
return value_ns == 0;
|
123
|
+
}
|
124
|
+
|
125
|
+
struct timespec timespec() const {
|
126
|
+
struct timespec ts;
|
127
|
+
ts.tv_sec = nanoseconds() / nanoseconds_per_second;
|
128
|
+
ts.tv_nsec = (nanoseconds() % nanoseconds_per_second);
|
129
|
+
return ts;
|
130
|
+
}
|
131
|
+
};
|
132
|
+
|
133
|
+
inline std::ostream& operator<<(std::ostream& os, const TimeStamp& info) {
|
134
|
+
os << info.nanoseconds() << "ns";
|
135
|
+
return os;
|
136
|
+
}
|
137
|
+
|
138
|
+
#endif
|