sqlite_web_vfs 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE-3RD-PARTY.md +32 -0
- data/README.md +118 -0
- data/examples/sqlite3_example.rb +21 -0
- data/examples/sqlite3_ffi_example.rb +25 -0
- data/ext/sqlite_web_vfs/Makefile +267 -0
- data/ext/sqlite_web_vfs/_wrap_web_vfs.o +0 -0
- data/ext/sqlite_web_vfs/extconf.rb +177 -0
- data/ext/sqlite_web_vfs/readerwriterqueue.h +5 -0
- data/ext/sqlite_web_vfs/shim.c +16 -0
- data/ext/sqlite_web_vfs/shim.o +0 -0
- data/ext/sqlite_web_vfs/upstream/HTTP.h +435 -0
- data/ext/sqlite_web_vfs/upstream/SQLiteVFS.h +530 -0
- data/ext/sqlite_web_vfs/upstream/ThreadPool.h +209 -0
- data/ext/sqlite_web_vfs/upstream/atomicops.h +772 -0
- data/ext/sqlite_web_vfs/upstream/dbi.h +203 -0
- data/ext/sqlite_web_vfs/upstream/readerwriterqueue.h +979 -0
- data/ext/sqlite_web_vfs/upstream/web_vfs.cc +21 -0
- data/ext/sqlite_web_vfs/upstream/web_vfs.h +823 -0
- data/lib/sqlite_web_vfs/loader.rb +69 -0
- data/lib/sqlite_web_vfs.rb +12 -0
- data/spec/integration/sq_lite_web_vfs_integration_spec.rb +45 -0
- data/spec/integration/sqlite_web_vfs_integration_spec.rb +45 -0
- data/spec/spec_helper.rb +7 -0
- metadata +67 -0
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
// Worker thread pool:
|
|
2
|
+
// 1. Each job has one stage that can run in parallel, followed by a serial stage to be run in the
|
|
3
|
+
// enqueued order, exclusively of other jobs' serial stages. Put another way, the data flow
|
|
4
|
+
// through a parallel scatter, then a serial gather.
|
|
5
|
+
// 2. The jobs have no return values, relying instead on side-effects. (Exception: for each job,
|
|
6
|
+
// the parallel stage passes a void* to its serialized stage.)
|
|
7
|
+
// 3. The Enqueue operation blocks if the queue is "full"
|
|
8
|
+
|
|
9
|
+
#pragma once
|
|
10
|
+
|
|
11
|
+
#include <climits>
|
|
12
|
+
#include <condition_variable>
|
|
13
|
+
#include <functional>
|
|
14
|
+
#include <mutex>
|
|
15
|
+
#include <queue>
|
|
16
|
+
#include <readerwriterqueue.h>
|
|
17
|
+
#include <thread>
|
|
18
|
+
|
|
19
|
+
class ThreadPool {
|
|
20
|
+
struct Job {
|
|
21
|
+
// sequence number used to ensure serial stages run in the correct order
|
|
22
|
+
unsigned long long seqno = ULLONG_MAX;
|
|
23
|
+
void *x = nullptr;
|
|
24
|
+
std::function<void *(void *)> par;
|
|
25
|
+
std::function<void(void *)> ser;
|
|
26
|
+
};
|
|
27
|
+
std::function<bool(const Job &, const Job &)> job_greater_ =
|
|
28
|
+
[](const Job &lhs, const Job &rhs) { return lhs.seqno > rhs.seqno; };
|
|
29
|
+
|
|
30
|
+
// main thread state
|
|
31
|
+
size_t max_threads_, max_jobs_;
|
|
32
|
+
std::vector<std::thread> threads_;
|
|
33
|
+
unsigned long long seqno_next_ = 0; // jobs enqueued so far / next seqno to be enqueued
|
|
34
|
+
|
|
35
|
+
// shared state
|
|
36
|
+
std::mutex mutex_;
|
|
37
|
+
std::condition_variable cv_enqueue_, cv_done_;
|
|
38
|
+
std::queue<Job> par_queue_;
|
|
39
|
+
std::priority_queue<Job, std::vector<Job>, decltype(job_greater_)> ser_queue_;
|
|
40
|
+
unsigned long long seqno_done_ = 0; // highest seqno done (serial stage completed)
|
|
41
|
+
bool shutdown_ = false;
|
|
42
|
+
|
|
43
|
+
// worker thread
|
|
44
|
+
void Worker() {
|
|
45
|
+
std::unique_lock<std::mutex> lock(mutex_);
|
|
46
|
+
while (true) {
|
|
47
|
+
// wait for / dequeue a job
|
|
48
|
+
while (!shutdown_ && par_queue_.empty()) {
|
|
49
|
+
cv_enqueue_.wait(lock);
|
|
50
|
+
}
|
|
51
|
+
if (shutdown_) {
|
|
52
|
+
break;
|
|
53
|
+
}
|
|
54
|
+
Job job = par_queue_.front();
|
|
55
|
+
par_queue_.pop();
|
|
56
|
+
// run parallel stage
|
|
57
|
+
if (job.par) {
|
|
58
|
+
lock.unlock();
|
|
59
|
+
job.x = job.par(job.x);
|
|
60
|
+
lock.lock();
|
|
61
|
+
}
|
|
62
|
+
// enqueue serial stage
|
|
63
|
+
ser_queue_.push(job);
|
|
64
|
+
// run the enqueued serial stage(s) so long as the next one is in seqno order.
|
|
65
|
+
// this may or may not include the one just enqueued.
|
|
66
|
+
auto seqno0 = seqno_done_;
|
|
67
|
+
for (; !ser_queue_.empty() && ser_queue_.top().seqno == seqno_done_; ++seqno_done_) {
|
|
68
|
+
job = ser_queue_.top();
|
|
69
|
+
ser_queue_.pop();
|
|
70
|
+
if (job.ser) {
|
|
71
|
+
// run the next serial stage; we can release lock for this because the seqno
|
|
72
|
+
// check ensures only one can be dequeued at a time.
|
|
73
|
+
lock.unlock();
|
|
74
|
+
if (seqno_done_ > seqno0) {
|
|
75
|
+
cv_done_.notify_all();
|
|
76
|
+
}
|
|
77
|
+
job.ser(job.x);
|
|
78
|
+
lock.lock();
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
if (seqno_done_ > seqno0) {
|
|
82
|
+
cv_done_.notify_all();
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
public:
|
|
88
|
+
ThreadPool(size_t max_threads, size_t max_jobs)
|
|
89
|
+
: max_threads_(max_threads), max_jobs_(max_jobs), ser_queue_(job_greater_) {}
|
|
90
|
+
virtual ~ThreadPool() {
|
|
91
|
+
{
|
|
92
|
+
std::lock_guard<std::mutex> lock(mutex_);
|
|
93
|
+
shutdown_ = true;
|
|
94
|
+
cv_enqueue_.notify_all();
|
|
95
|
+
}
|
|
96
|
+
for (auto &thread : threads_) {
|
|
97
|
+
thread.join();
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
size_t MaxThreads() const noexcept { return max_threads_; }
|
|
102
|
+
|
|
103
|
+
size_t MaxJobs() const noexcept { return max_jobs_; }
|
|
104
|
+
|
|
105
|
+
// Enqueue ser(par(x)) for background processing as described. The functions must not throw.
|
|
106
|
+
void Enqueue(void *x, std::function<void *(void *)> par,
|
|
107
|
+
std::function<void(void *)> ser) {
|
|
108
|
+
if (seqno_next_ == ULLONG_MAX) { // pedantic
|
|
109
|
+
Barrier();
|
|
110
|
+
seqno_next_ = 0;
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
Job job;
|
|
114
|
+
job.x = x;
|
|
115
|
+
job.par = par;
|
|
116
|
+
job.ser = ser;
|
|
117
|
+
|
|
118
|
+
std::unique_lock<std::mutex> lock(mutex_);
|
|
119
|
+
while (seqno_next_ - seqno_done_ >= max_jobs_) {
|
|
120
|
+
cv_done_.wait(lock);
|
|
121
|
+
}
|
|
122
|
+
job.seqno = seqno_next_++;
|
|
123
|
+
par_queue_.push(job);
|
|
124
|
+
lock.unlock();
|
|
125
|
+
cv_enqueue_.notify_one();
|
|
126
|
+
if (threads_.size() < max_threads_ && threads_.size() < par_queue_.size()) {
|
|
127
|
+
threads_.emplace_back([this]() { this->Worker(); });
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
// Await completion of all previously enqueued jobs
|
|
132
|
+
virtual void Barrier() {
|
|
133
|
+
if (seqno_next_) {
|
|
134
|
+
std::unique_lock<std::mutex> lock(mutex_);
|
|
135
|
+
while (seqno_done_ < seqno_next_) {
|
|
136
|
+
cv_done_.wait(lock);
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
};
|
|
141
|
+
|
|
142
|
+
// Adds lock-free EnqueueFast() for use on critical paths.
|
|
143
|
+
// - EnqueueFast() never blocks (!)
|
|
144
|
+
// - Only one thread should ever use it
|
|
145
|
+
class ThreadPoolWithEnqueueFast : public ThreadPool {
|
|
146
|
+
// concept: foreground thread adds job onto a lock-free queue, which a single background thread
|
|
147
|
+
// consumes to Enqueue()
|
|
148
|
+
|
|
149
|
+
struct EnqueueFastJob {
|
|
150
|
+
bool shutdown = false;
|
|
151
|
+
void *x = nullptr;
|
|
152
|
+
std::function<void *(void *)> par;
|
|
153
|
+
std::function<void(void *)> ser;
|
|
154
|
+
};
|
|
155
|
+
|
|
156
|
+
moodycamel::BlockingReaderWriterQueue<EnqueueFastJob> fast_queue_;
|
|
157
|
+
std::atomic<long long> fast_queue_size_;
|
|
158
|
+
std::unique_ptr<std::thread> worker_thread_;
|
|
159
|
+
|
|
160
|
+
void EnqueueFastWorker() {
|
|
161
|
+
EnqueueFastJob job;
|
|
162
|
+
while (true) {
|
|
163
|
+
fast_queue_.wait_dequeue(job);
|
|
164
|
+
if (job.shutdown) {
|
|
165
|
+
break;
|
|
166
|
+
}
|
|
167
|
+
this->Enqueue(job.x, job.par, job.ser);
|
|
168
|
+
fast_queue_size_--;
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
public:
|
|
173
|
+
ThreadPoolWithEnqueueFast(size_t max_threads, size_t max_jobs)
|
|
174
|
+
: ThreadPool(max_threads, max_jobs), fast_queue_(max_jobs), fast_queue_size_(0) {}
|
|
175
|
+
|
|
176
|
+
~ThreadPoolWithEnqueueFast() {
|
|
177
|
+
if (worker_thread_) {
|
|
178
|
+
EnqueueFastJob job;
|
|
179
|
+
job.shutdown = true;
|
|
180
|
+
fast_queue_.enqueue(job);
|
|
181
|
+
worker_thread_->join();
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
void EnqueueFast(void *x, std::function<void *(void *)> par,
|
|
186
|
+
std::function<void(void *)> ser) {
|
|
187
|
+
EnqueueFastJob job;
|
|
188
|
+
job.x = x;
|
|
189
|
+
job.par = par;
|
|
190
|
+
job.ser = ser;
|
|
191
|
+
if (!worker_thread_) {
|
|
192
|
+
worker_thread_.reset(new std::thread([this]() { this->EnqueueFastWorker(); }));
|
|
193
|
+
}
|
|
194
|
+
fast_queue_size_++;
|
|
195
|
+
fast_queue_.enqueue(job);
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
void Barrier() override {
|
|
199
|
+
while (fast_queue_size_.load(std::memory_order_relaxed)) {
|
|
200
|
+
assert(worker_thread_);
|
|
201
|
+
#ifdef __x86_64__
|
|
202
|
+
__builtin_ia32_pause();
|
|
203
|
+
#else
|
|
204
|
+
std::this_thread::yield();
|
|
205
|
+
#endif
|
|
206
|
+
}
|
|
207
|
+
ThreadPool::Barrier();
|
|
208
|
+
}
|
|
209
|
+
};
|