itsi-server 0.1.1 → 0.1.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of itsi-server might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/exe/itsi +88 -28
- data/ext/itsi_error/Cargo.toml +2 -0
- data/ext/itsi_error/src/from.rs +71 -0
- data/ext/itsi_error/src/lib.rs +12 -37
- data/ext/itsi_instrument_entry/Cargo.toml +15 -0
- data/ext/itsi_instrument_entry/src/lib.rs +31 -0
- data/ext/itsi_rb_helpers/Cargo.toml +2 -0
- data/ext/itsi_rb_helpers/src/heap_value.rs +121 -0
- data/ext/itsi_rb_helpers/src/lib.rs +90 -10
- data/ext/itsi_scheduler/Cargo.toml +24 -0
- data/ext/itsi_scheduler/extconf.rb +6 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/io_helpers.rs +56 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/io_waiter.rs +44 -0
- data/ext/itsi_scheduler/src/itsi_scheduler/timer.rs +44 -0
- data/ext/itsi_scheduler/src/itsi_scheduler.rs +308 -0
- data/ext/itsi_scheduler/src/lib.rs +38 -0
- data/ext/itsi_server/Cargo.toml +20 -3
- data/ext/itsi_server/extconf.rb +1 -1
- data/ext/itsi_server/src/body_proxy/big_bytes.rs +104 -0
- data/ext/itsi_server/src/body_proxy/itsi_body_proxy.rs +122 -0
- data/ext/itsi_server/src/body_proxy/mod.rs +2 -0
- data/ext/itsi_server/src/lib.rs +61 -7
- data/ext/itsi_server/src/request/itsi_request.rs +238 -104
- data/ext/itsi_server/src/response/itsi_response.rs +347 -0
- data/ext/itsi_server/src/response/mod.rs +1 -0
- data/ext/itsi_server/src/server/bind.rs +54 -23
- data/ext/itsi_server/src/server/bind_protocol.rs +37 -0
- data/ext/itsi_server/src/server/io_stream.rs +104 -0
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.crt +11 -30
- data/ext/itsi_server/src/server/itsi_ca/itsi_ca.key +3 -50
- data/ext/itsi_server/src/server/itsi_server.rs +196 -134
- data/ext/itsi_server/src/server/lifecycle_event.rs +9 -0
- data/ext/itsi_server/src/server/listener.rs +241 -132
- data/ext/itsi_server/src/server/mod.rs +7 -1
- data/ext/itsi_server/src/server/process_worker.rs +196 -0
- data/ext/itsi_server/src/server/serve_strategy/cluster_mode.rs +254 -0
- data/ext/itsi_server/src/server/serve_strategy/mod.rs +27 -0
- data/ext/itsi_server/src/server/serve_strategy/single_mode.rs +247 -0
- data/ext/itsi_server/src/server/signal.rs +70 -0
- data/ext/itsi_server/src/server/thread_worker.rs +368 -0
- data/ext/itsi_server/src/server/tls/locked_dir_cache.rs +132 -0
- data/ext/itsi_server/src/server/tls.rs +137 -52
- data/ext/itsi_tracing/Cargo.toml +4 -0
- data/ext/itsi_tracing/src/lib.rs +36 -6
- data/lib/itsi/request.rb +30 -14
- data/lib/itsi/server/rack/handler/itsi.rb +25 -0
- data/lib/itsi/server/scheduler_mode.rb +6 -0
- data/lib/itsi/server/version.rb +1 -1
- data/lib/itsi/server.rb +82 -2
- data/lib/itsi/signals.rb +23 -0
- data/lib/itsi/stream_io.rb +38 -0
- metadata +39 -25
- data/ext/itsi_server/src/server/transfer_protocol.rs +0 -23
- data/ext/itsi_server/src/stream_writer/mod.rs +0 -21
@@ -0,0 +1,368 @@
|
|
1
|
+
use super::itsi_server::RequestJob;
|
2
|
+
use crate::{request::itsi_request::ItsiRequest, ITSI_SERVER};
|
3
|
+
use itsi_rb_helpers::{
|
4
|
+
call_with_gvl, call_without_gvl, create_ruby_thread, kill_threads, HeapValue,
|
5
|
+
};
|
6
|
+
use itsi_tracing::{debug, error, info, warn};
|
7
|
+
use magnus::{
|
8
|
+
error::Result,
|
9
|
+
value::{InnerValue, Lazy, LazyId, Opaque, ReprValue},
|
10
|
+
Module, RClass, Ruby, Thread, Value,
|
11
|
+
};
|
12
|
+
use nix::unistd::Pid;
|
13
|
+
use parking_lot::{Mutex, RwLock};
|
14
|
+
use std::{
|
15
|
+
num::NonZeroU8,
|
16
|
+
ops::Deref,
|
17
|
+
sync::{
|
18
|
+
atomic::{AtomicBool, Ordering},
|
19
|
+
Arc,
|
20
|
+
},
|
21
|
+
thread,
|
22
|
+
time::{Duration, Instant},
|
23
|
+
};
|
24
|
+
use tokio::{runtime::Builder as RuntimeBuilder, sync::watch};
|
25
|
+
use tracing::instrument;
|
26
|
+
pub struct ThreadWorker {
|
27
|
+
pub id: String,
|
28
|
+
pub app: Opaque<Value>,
|
29
|
+
pub receiver: Arc<async_channel::Receiver<RequestJob>>,
|
30
|
+
pub sender: async_channel::Sender<RequestJob>,
|
31
|
+
pub thread: RwLock<Option<HeapValue<Thread>>>,
|
32
|
+
pub terminated: Arc<AtomicBool>,
|
33
|
+
pub scheduler_class: Option<Opaque<Value>>,
|
34
|
+
}
|
35
|
+
|
36
|
+
static ID_CALL: LazyId = LazyId::new("call");
|
37
|
+
static ID_ALIVE: LazyId = LazyId::new("alive?");
|
38
|
+
static ID_SCHEDULER: LazyId = LazyId::new("scheduler");
|
39
|
+
static ID_SCHEDULE: LazyId = LazyId::new("schedule");
|
40
|
+
static ID_BLOCK: LazyId = LazyId::new("block");
|
41
|
+
static ID_YIELD: LazyId = LazyId::new("yield");
|
42
|
+
static ID_CONST_GET: LazyId = LazyId::new("const_get");
|
43
|
+
static CLASS_FIBER: Lazy<RClass> = Lazy::new(|ruby| {
|
44
|
+
ruby.module_kernel()
|
45
|
+
.const_get::<_, RClass>("Fiber")
|
46
|
+
.unwrap()
|
47
|
+
});
|
48
|
+
|
49
|
+
pub struct TerminateWakerSignal(bool);
|
50
|
+
|
51
|
+
#[instrument(name = "Boot", parent=None, skip(threads, app, pid, scheduler_class))]
|
52
|
+
pub fn build_thread_workers(
|
53
|
+
pid: Pid,
|
54
|
+
threads: NonZeroU8,
|
55
|
+
app: Opaque<Value>,
|
56
|
+
scheduler_class: Option<String>,
|
57
|
+
) -> Result<(Arc<Vec<ThreadWorker>>, async_channel::Sender<RequestJob>)> {
|
58
|
+
let (sender, receiver) = async_channel::bounded(20);
|
59
|
+
let receiver_ref = Arc::new(receiver);
|
60
|
+
let sender_ref = sender;
|
61
|
+
let (app, scheduler_class) = load_app(app, scheduler_class)?;
|
62
|
+
Ok((
|
63
|
+
Arc::new(
|
64
|
+
(1..=u8::from(threads))
|
65
|
+
.map(|id| {
|
66
|
+
info!(pid = pid.as_raw(), id, "Thread");
|
67
|
+
ThreadWorker::new(
|
68
|
+
format!("{:?}#{:?}", pid, id),
|
69
|
+
app,
|
70
|
+
receiver_ref.clone(),
|
71
|
+
sender_ref.clone(),
|
72
|
+
scheduler_class,
|
73
|
+
)
|
74
|
+
})
|
75
|
+
.collect::<Result<Vec<_>>>()?,
|
76
|
+
),
|
77
|
+
sender_ref,
|
78
|
+
))
|
79
|
+
}
|
80
|
+
|
81
|
+
pub fn load_app(
|
82
|
+
app: Opaque<Value>,
|
83
|
+
scheduler_class: Option<String>,
|
84
|
+
) -> Result<(Opaque<Value>, Option<Opaque<Value>>)> {
|
85
|
+
call_with_gvl(|ruby| {
|
86
|
+
let app = app.get_inner_with(&ruby);
|
87
|
+
let app = Opaque::from(
|
88
|
+
app.funcall::<_, _, Value>(*ID_CALL, ())
|
89
|
+
.expect("Couldn't load app"),
|
90
|
+
);
|
91
|
+
let scheduler_class = if let Some(scheduler_class) = scheduler_class {
|
92
|
+
Some(Opaque::from(
|
93
|
+
ruby.module_kernel()
|
94
|
+
.funcall::<_, _, Value>(*ID_CONST_GET, (scheduler_class,))?,
|
95
|
+
))
|
96
|
+
} else {
|
97
|
+
None
|
98
|
+
};
|
99
|
+
Ok((app, scheduler_class))
|
100
|
+
})
|
101
|
+
}
|
102
|
+
impl ThreadWorker {
|
103
|
+
pub fn new(
|
104
|
+
id: String,
|
105
|
+
app: Opaque<Value>,
|
106
|
+
receiver: Arc<async_channel::Receiver<RequestJob>>,
|
107
|
+
sender: async_channel::Sender<RequestJob>,
|
108
|
+
scheduler_class: Option<Opaque<Value>>,
|
109
|
+
) -> Result<Self> {
|
110
|
+
let mut worker = Self {
|
111
|
+
id,
|
112
|
+
app,
|
113
|
+
receiver,
|
114
|
+
sender,
|
115
|
+
thread: RwLock::new(None),
|
116
|
+
terminated: Arc::new(AtomicBool::new(false)),
|
117
|
+
scheduler_class,
|
118
|
+
};
|
119
|
+
worker.run()?;
|
120
|
+
Ok(worker)
|
121
|
+
}
|
122
|
+
|
123
|
+
#[instrument(skip(self), fields(id = self.id))]
|
124
|
+
pub async fn request_shutdown(&self) {
|
125
|
+
match self.sender.send(RequestJob::Shutdown).await {
|
126
|
+
Ok(_) => {}
|
127
|
+
Err(err) => error!("Failed to send shutdown request: {}", err),
|
128
|
+
};
|
129
|
+
info!("Requesting shutdown");
|
130
|
+
}
|
131
|
+
|
132
|
+
#[instrument(skip(self, deadline), fields(id = self.id))]
|
133
|
+
pub fn poll_shutdown(&self, deadline: Instant) -> bool {
|
134
|
+
call_with_gvl(|_ruby| {
|
135
|
+
if let Some(thread) = self.thread.read().deref() {
|
136
|
+
if Instant::now() > deadline {
|
137
|
+
warn!("Worker shutdown timed out. Killing thread");
|
138
|
+
self.terminated.store(true, Ordering::SeqCst);
|
139
|
+
kill_threads(vec![thread.as_value()]);
|
140
|
+
}
|
141
|
+
if thread.funcall::<_, _, bool>(*ID_ALIVE, ()).unwrap_or(false) {
|
142
|
+
return true;
|
143
|
+
}
|
144
|
+
info!("Thread has shut down");
|
145
|
+
}
|
146
|
+
self.thread.write().take();
|
147
|
+
|
148
|
+
false
|
149
|
+
})
|
150
|
+
}
|
151
|
+
|
152
|
+
pub fn run(&mut self) -> Result<()> {
|
153
|
+
let id = self.id.clone();
|
154
|
+
let app = self.app;
|
155
|
+
let receiver = self.receiver.clone();
|
156
|
+
let terminated = self.terminated.clone();
|
157
|
+
let scheduler_class = self.scheduler_class;
|
158
|
+
call_with_gvl(|_| {
|
159
|
+
*self.thread.write() = Some(
|
160
|
+
create_ruby_thread(move || {
|
161
|
+
if let Some(scheduler_class) = scheduler_class {
|
162
|
+
if let Err(err) =
|
163
|
+
Self::fiber_accept_loop(id, app, receiver, scheduler_class, terminated)
|
164
|
+
{
|
165
|
+
error!("Error in fiber_accept_loop: {:?}", err);
|
166
|
+
}
|
167
|
+
} else {
|
168
|
+
Self::accept_loop(id, app, receiver, terminated);
|
169
|
+
}
|
170
|
+
})
|
171
|
+
.into(),
|
172
|
+
);
|
173
|
+
Ok::<(), magnus::Error>(())
|
174
|
+
})?;
|
175
|
+
Ok(())
|
176
|
+
}
|
177
|
+
|
178
|
+
pub fn build_scheduler_proc(
|
179
|
+
app: Opaque<Value>,
|
180
|
+
leader: &Arc<Mutex<Option<RequestJob>>>,
|
181
|
+
receiver: &Arc<async_channel::Receiver<RequestJob>>,
|
182
|
+
terminated: &Arc<AtomicBool>,
|
183
|
+
waker_sender: &watch::Sender<TerminateWakerSignal>,
|
184
|
+
) -> magnus::block::Proc {
|
185
|
+
let leader = leader.clone();
|
186
|
+
let receiver = receiver.clone();
|
187
|
+
let terminated = terminated.clone();
|
188
|
+
let waker_sender = waker_sender.clone();
|
189
|
+
Ruby::get().unwrap().proc_from_fn(move |ruby, _args, _blk| {
|
190
|
+
let scheduler = ruby
|
191
|
+
.get_inner(&CLASS_FIBER)
|
192
|
+
.funcall::<_, _, Value>(*ID_SCHEDULER, ())
|
193
|
+
.unwrap();
|
194
|
+
let server = ruby.get_inner(&ITSI_SERVER);
|
195
|
+
let thread_current = ruby.thread_current();
|
196
|
+
let leader_clone = leader.clone();
|
197
|
+
let receiver = receiver.clone();
|
198
|
+
let terminated = terminated.clone();
|
199
|
+
let waker_sender = waker_sender.clone();
|
200
|
+
let mut batch = Vec::with_capacity(MAX_BATCH_SIZE as usize);
|
201
|
+
|
202
|
+
static MAX_BATCH_SIZE: i32 = 25;
|
203
|
+
call_without_gvl(move || loop {
|
204
|
+
let mut idle_counter = 0;
|
205
|
+
if let Some(v) = leader_clone.lock().take() {
|
206
|
+
match v {
|
207
|
+
RequestJob::ProcessRequest(itsi_request) => {
|
208
|
+
batch.push(RequestJob::ProcessRequest(itsi_request))
|
209
|
+
}
|
210
|
+
RequestJob::Shutdown => {
|
211
|
+
waker_sender.send(TerminateWakerSignal(true)).unwrap();
|
212
|
+
break;
|
213
|
+
}
|
214
|
+
}
|
215
|
+
}
|
216
|
+
for _ in 0..MAX_BATCH_SIZE {
|
217
|
+
if let Ok(req) = receiver.try_recv() {
|
218
|
+
batch.push(req);
|
219
|
+
} else {
|
220
|
+
break;
|
221
|
+
}
|
222
|
+
}
|
223
|
+
|
224
|
+
let shutdown_requested = call_with_gvl(|_| {
|
225
|
+
for req in batch.drain(..) {
|
226
|
+
match req {
|
227
|
+
RequestJob::ProcessRequest(request) => {
|
228
|
+
let response = request.response.clone();
|
229
|
+
if let Err(err) =
|
230
|
+
server.funcall::<_, _, Value>(*ID_SCHEDULE, (app, request))
|
231
|
+
{
|
232
|
+
ItsiRequest::internal_error(ruby, response, err)
|
233
|
+
}
|
234
|
+
}
|
235
|
+
RequestJob::Shutdown => return true,
|
236
|
+
}
|
237
|
+
}
|
238
|
+
false
|
239
|
+
});
|
240
|
+
|
241
|
+
if shutdown_requested || terminated.load(Ordering::Relaxed) {
|
242
|
+
waker_sender.send(TerminateWakerSignal(true)).unwrap();
|
243
|
+
break;
|
244
|
+
}
|
245
|
+
|
246
|
+
let yield_result = if receiver.is_empty() {
|
247
|
+
waker_sender.send(TerminateWakerSignal(false)).unwrap();
|
248
|
+
idle_counter = (idle_counter + 1) % 100;
|
249
|
+
call_with_gvl(|ruby| {
|
250
|
+
if idle_counter == 0 {
|
251
|
+
ruby.gc_start();
|
252
|
+
}
|
253
|
+
scheduler.funcall::<_, _, Value>(*ID_BLOCK, (thread_current, None::<u8>))
|
254
|
+
})
|
255
|
+
} else {
|
256
|
+
call_with_gvl(|_| scheduler.funcall::<_, _, Value>(*ID_YIELD, ()))
|
257
|
+
};
|
258
|
+
|
259
|
+
if yield_result.is_err() {
|
260
|
+
break;
|
261
|
+
}
|
262
|
+
})
|
263
|
+
})
|
264
|
+
}
|
265
|
+
|
266
|
+
#[instrument(skip_all, fields(thread_worker=id))]
|
267
|
+
pub fn fiber_accept_loop(
|
268
|
+
id: String,
|
269
|
+
app: Opaque<Value>,
|
270
|
+
receiver: Arc<async_channel::Receiver<RequestJob>>,
|
271
|
+
scheduler_class: Opaque<Value>,
|
272
|
+
terminated: Arc<AtomicBool>,
|
273
|
+
) -> Result<()> {
|
274
|
+
let ruby = Ruby::get().unwrap();
|
275
|
+
let (waker_sender, waker_receiver) = watch::channel(TerminateWakerSignal(false));
|
276
|
+
let leader: Arc<Mutex<Option<RequestJob>>> = Arc::new(Mutex::new(None));
|
277
|
+
let server = ruby.get_inner(&ITSI_SERVER);
|
278
|
+
let scheduler_proc =
|
279
|
+
Self::build_scheduler_proc(app, &leader, &receiver, &terminated, &waker_sender);
|
280
|
+
let (scheduler, scheduler_fiber) = server.funcall::<_, _, (Value, Value)>(
|
281
|
+
"start_scheduler_loop",
|
282
|
+
(scheduler_class, scheduler_proc),
|
283
|
+
)?;
|
284
|
+
Self::start_waker_thread(
|
285
|
+
scheduler.into(),
|
286
|
+
scheduler_fiber.into(),
|
287
|
+
leader,
|
288
|
+
receiver,
|
289
|
+
waker_receiver,
|
290
|
+
);
|
291
|
+
Ok(())
|
292
|
+
}
|
293
|
+
|
294
|
+
#[allow(clippy::await_holding_lock)]
|
295
|
+
pub fn start_waker_thread(
|
296
|
+
scheduler: Opaque<Value>,
|
297
|
+
scheduler_fiber: Opaque<Value>,
|
298
|
+
leader: Arc<Mutex<Option<RequestJob>>>,
|
299
|
+
receiver: Arc<async_channel::Receiver<RequestJob>>,
|
300
|
+
mut waker_receiver: watch::Receiver<TerminateWakerSignal>,
|
301
|
+
) {
|
302
|
+
create_ruby_thread(move || {
|
303
|
+
let scheduler = scheduler.get_inner_with(&Ruby::get().unwrap());
|
304
|
+
let leader = leader.clone();
|
305
|
+
call_without_gvl(|| {
|
306
|
+
RuntimeBuilder::new_current_thread()
|
307
|
+
.build()
|
308
|
+
.expect("Failed to build Tokio runtime")
|
309
|
+
.block_on(async {
|
310
|
+
loop {
|
311
|
+
waker_receiver.changed().await.ok();
|
312
|
+
if waker_receiver.borrow().0 {
|
313
|
+
break;
|
314
|
+
}
|
315
|
+
tokio::select! {
|
316
|
+
_ = waker_receiver.changed() => {
|
317
|
+
if waker_receiver.borrow().0 {
|
318
|
+
break;
|
319
|
+
}
|
320
|
+
},
|
321
|
+
next_msg = receiver.recv() => {
|
322
|
+
*leader.lock() = next_msg.ok();
|
323
|
+
call_with_gvl(|_| {
|
324
|
+
scheduler
|
325
|
+
.funcall::<_, _, Value>(
|
326
|
+
"unblock",
|
327
|
+
(None::<u8>, scheduler_fiber),
|
328
|
+
)
|
329
|
+
.ok();
|
330
|
+
});
|
331
|
+
}
|
332
|
+
}
|
333
|
+
}
|
334
|
+
})
|
335
|
+
});
|
336
|
+
});
|
337
|
+
}
|
338
|
+
|
339
|
+
#[instrument(skip_all, fields(thread_worker=id))]
|
340
|
+
pub fn accept_loop(
|
341
|
+
id: String,
|
342
|
+
app: Opaque<Value>,
|
343
|
+
receiver: Arc<async_channel::Receiver<RequestJob>>,
|
344
|
+
terminated: Arc<AtomicBool>,
|
345
|
+
) {
|
346
|
+
let ruby = Ruby::get().unwrap();
|
347
|
+
let server = ruby.get_inner(&ITSI_SERVER);
|
348
|
+
call_without_gvl(|| loop {
|
349
|
+
match receiver.recv_blocking() {
|
350
|
+
Ok(RequestJob::ProcessRequest(request)) => {
|
351
|
+
if terminated.load(Ordering::Relaxed) {
|
352
|
+
break;
|
353
|
+
}
|
354
|
+
call_with_gvl(|_ruby| {
|
355
|
+
request.process(&ruby, server, app).ok();
|
356
|
+
})
|
357
|
+
}
|
358
|
+
Ok(RequestJob::Shutdown) => {
|
359
|
+
debug!("Shutting down thread worker");
|
360
|
+
break;
|
361
|
+
}
|
362
|
+
Err(_) => {
|
363
|
+
thread::sleep(Duration::from_micros(1));
|
364
|
+
}
|
365
|
+
}
|
366
|
+
});
|
367
|
+
}
|
368
|
+
}
|
@@ -0,0 +1,132 @@
|
|
1
|
+
use async_trait::async_trait;
|
2
|
+
use fs2::FileExt;
|
3
|
+
use parking_lot::Mutex;
|
4
|
+
use std::env;
|
5
|
+
use std::fs::{self, OpenOptions};
|
6
|
+
use std::io::Error as IoError;
|
7
|
+
use std::path::{Path, PathBuf};
|
8
|
+
use tokio_rustls_acme::caches::DirCache;
|
9
|
+
use tokio_rustls_acme::{AccountCache, CertCache};
|
10
|
+
|
11
|
+
/// A wrapper around DirCache that locks a file before writing cert/account data.
|
12
|
+
pub struct LockedDirCache<P: AsRef<Path> + Send + Sync> {
|
13
|
+
inner: DirCache<P>,
|
14
|
+
lock_path: PathBuf,
|
15
|
+
current_lock: Mutex<Option<std::fs::File>>,
|
16
|
+
}
|
17
|
+
|
18
|
+
impl<P: AsRef<Path> + Send + Sync> LockedDirCache<P> {
|
19
|
+
pub fn new(dir: P) -> Self {
|
20
|
+
let dir_path = dir.as_ref().to_path_buf();
|
21
|
+
std::fs::create_dir_all(&dir_path).unwrap();
|
22
|
+
let lock_path =
|
23
|
+
dir_path.join(env::var("ITSI_ACME_LOCK_FILE_NAME").unwrap_or(".acme.lock".to_string()));
|
24
|
+
Self::touch_file(&lock_path).expect("Failed to create lock file");
|
25
|
+
|
26
|
+
Self {
|
27
|
+
inner: DirCache::new(dir),
|
28
|
+
lock_path,
|
29
|
+
current_lock: Mutex::new(None),
|
30
|
+
}
|
31
|
+
}
|
32
|
+
|
33
|
+
fn touch_file(path: &PathBuf) -> std::io::Result<()> {
|
34
|
+
if let Some(parent) = path.parent() {
|
35
|
+
fs::create_dir_all(parent)?;
|
36
|
+
}
|
37
|
+
fs::OpenOptions::new()
|
38
|
+
.create(true)
|
39
|
+
.write(true)
|
40
|
+
.truncate(true)
|
41
|
+
.open(path)?;
|
42
|
+
Ok(())
|
43
|
+
}
|
44
|
+
|
45
|
+
fn lock_exclusive(&self) -> Result<(), IoError> {
|
46
|
+
if self.current_lock.lock().is_some() {
|
47
|
+
return Ok(());
|
48
|
+
}
|
49
|
+
|
50
|
+
if let Some(parent) = self.lock_path.parent() {
|
51
|
+
std::fs::create_dir_all(parent)?;
|
52
|
+
}
|
53
|
+
let lockfile = OpenOptions::new()
|
54
|
+
.create(true)
|
55
|
+
.write(true)
|
56
|
+
.truncate(true)
|
57
|
+
.open(&self.lock_path)?;
|
58
|
+
lockfile.lock_exclusive()?;
|
59
|
+
*self.current_lock.lock() = Some(lockfile);
|
60
|
+
Ok(())
|
61
|
+
}
|
62
|
+
|
63
|
+
fn unlock(&self) -> Result<(), IoError> {
|
64
|
+
self.current_lock.lock().take();
|
65
|
+
Ok(())
|
66
|
+
}
|
67
|
+
}
|
68
|
+
|
69
|
+
#[async_trait]
|
70
|
+
impl<P: AsRef<Path> + Send + Sync> CertCache for LockedDirCache<P> {
|
71
|
+
type EC = IoError;
|
72
|
+
|
73
|
+
async fn load_cert(
|
74
|
+
&self,
|
75
|
+
domains: &[String],
|
76
|
+
directory_url: &str,
|
77
|
+
) -> Result<Option<Vec<u8>>, Self::EC> {
|
78
|
+
self.lock_exclusive()?;
|
79
|
+
let result = self.inner.load_cert(domains, directory_url).await;
|
80
|
+
|
81
|
+
if let Ok(Some(_)) = result {
|
82
|
+
self.unlock()?;
|
83
|
+
}
|
84
|
+
|
85
|
+
result
|
86
|
+
}
|
87
|
+
|
88
|
+
async fn store_cert(
|
89
|
+
&self,
|
90
|
+
domains: &[String],
|
91
|
+
directory_url: &str,
|
92
|
+
cert: &[u8],
|
93
|
+
) -> Result<(), Self::EC> {
|
94
|
+
// Acquire the lock before storing
|
95
|
+
self.lock_exclusive()?;
|
96
|
+
|
97
|
+
// Perform the store operation
|
98
|
+
let result = self.inner.store_cert(domains, directory_url, cert).await;
|
99
|
+
|
100
|
+
if let Ok(()) = result {
|
101
|
+
self.unlock()?;
|
102
|
+
}
|
103
|
+
result
|
104
|
+
}
|
105
|
+
}
|
106
|
+
|
107
|
+
#[async_trait]
|
108
|
+
impl<P: AsRef<Path> + Send + Sync> AccountCache for LockedDirCache<P> {
|
109
|
+
type EA = IoError;
|
110
|
+
|
111
|
+
async fn load_account(
|
112
|
+
&self,
|
113
|
+
contact: &[String],
|
114
|
+
directory_url: &str,
|
115
|
+
) -> Result<Option<Vec<u8>>, Self::EA> {
|
116
|
+
self.lock_exclusive()?;
|
117
|
+
self.inner.load_account(contact, directory_url).await
|
118
|
+
}
|
119
|
+
|
120
|
+
async fn store_account(
|
121
|
+
&self,
|
122
|
+
contact: &[String],
|
123
|
+
directory_url: &str,
|
124
|
+
account: &[u8],
|
125
|
+
) -> Result<(), Self::EA> {
|
126
|
+
self.lock_exclusive()?;
|
127
|
+
|
128
|
+
self.inner
|
129
|
+
.store_account(contact, directory_url, account)
|
130
|
+
.await
|
131
|
+
}
|
132
|
+
}
|