libertem-qd-mpx 0.2.13__tar.gz → 0.2.14__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of libertem-qd-mpx might be problematic. Click here for more details.
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/Cargo.lock +7 -5
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/PKG-INFO +1 -1
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/common/Cargo.toml +4 -1
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/common/src/frame_stack.rs +21 -12
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/common/src/generic_cam_client.rs +8 -2
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/common/src/generic_connection.rs +15 -7
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/common/src/py_connection.rs +6 -2
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/ipc_test/Cargo.toml +3 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/ipc_test/examples/consumer/main.rs +2 -2
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/ipc_test/examples/producer/main.rs +1 -1
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/ipc_test/src/backend_shm.rs +1 -1
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/ipc_test/src/slab.rs +42 -24
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/libertem_qd_mpx/Cargo.toml +4 -1
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/libertem_qd_mpx/src/background_thread.rs +26 -31
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/stats/Cargo.toml +3 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/Cargo.toml +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/README.md +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/common/README.md +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/common/benches/casting.rs +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/common/src/background_thread.rs +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/common/src/decoder.rs +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/common/src/lib.rs +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/common/src/py_cam_client.rs +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/common/src/tcp.rs +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/common/src/tracing.rs +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/common/src/utils.rs +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/ipc_test/README.md +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/ipc_test/run-consumer.sh +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/ipc_test/run-producer.sh +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/ipc_test/rust-toolchain.toml +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/ipc_test/src/backend_memfd.rs +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/ipc_test/src/common.rs +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/ipc_test/src/freestack.rs +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/ipc_test/src/lib.rs +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/libertem_qd_mpx/.github/workflows/CI.yml +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/libertem_qd_mpx/.gitignore +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/libertem_qd_mpx/README.md +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/libertem_qd_mpx/benches/decoders.rs +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/libertem_qd_mpx/examples/simple.py +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/libertem_qd_mpx/src/base_types.rs +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/libertem_qd_mpx/src/decoder.rs +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/libertem_qd_mpx/src/lib.rs +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/libertem_qd_mpx/src/main_py.rs +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/pyproject.toml +0 -0
- {libertem_qd_mpx-0.2.13 → libertem_qd_mpx-0.2.14}/stats/src/lib.rs +0 -0
|
@@ -991,7 +991,7 @@ dependencies = [
|
|
|
991
991
|
|
|
992
992
|
[[package]]
|
|
993
993
|
name = "common"
|
|
994
|
-
version = "0.2.
|
|
994
|
+
version = "0.2.14"
|
|
995
995
|
dependencies = [
|
|
996
996
|
"bincode",
|
|
997
997
|
"criterion",
|
|
@@ -2304,7 +2304,7 @@ checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5"
|
|
|
2304
2304
|
|
|
2305
2305
|
[[package]]
|
|
2306
2306
|
name = "libertem-asi-mpx3"
|
|
2307
|
-
version = "0.2.
|
|
2307
|
+
version = "0.2.14"
|
|
2308
2308
|
dependencies = [
|
|
2309
2309
|
"bincode",
|
|
2310
2310
|
"common",
|
|
@@ -2325,7 +2325,7 @@ dependencies = [
|
|
|
2325
2325
|
|
|
2326
2326
|
[[package]]
|
|
2327
2327
|
name = "libertem-asi-tpx3"
|
|
2328
|
-
version = "0.2.
|
|
2328
|
+
version = "0.2.14"
|
|
2329
2329
|
dependencies = [
|
|
2330
2330
|
"bincode",
|
|
2331
2331
|
"common",
|
|
@@ -2341,13 +2341,14 @@ dependencies = [
|
|
|
2341
2341
|
"serde",
|
|
2342
2342
|
"stats",
|
|
2343
2343
|
"tempfile",
|
|
2344
|
+
"thiserror",
|
|
2344
2345
|
"uuid",
|
|
2345
2346
|
"zerocopy",
|
|
2346
2347
|
]
|
|
2347
2348
|
|
|
2348
2349
|
[[package]]
|
|
2349
2350
|
name = "libertem-dectris"
|
|
2350
|
-
version = "0.2.
|
|
2351
|
+
version = "0.2.14"
|
|
2351
2352
|
dependencies = [
|
|
2352
2353
|
"bincode",
|
|
2353
2354
|
"bs_sys",
|
|
@@ -2377,7 +2378,7 @@ dependencies = [
|
|
|
2377
2378
|
|
|
2378
2379
|
[[package]]
|
|
2379
2380
|
name = "libertem_qd_mpx"
|
|
2380
|
-
version = "0.2.
|
|
2381
|
+
version = "0.2.14"
|
|
2381
2382
|
dependencies = [
|
|
2382
2383
|
"bincode",
|
|
2383
2384
|
"common",
|
|
@@ -3900,6 +3901,7 @@ dependencies = [
|
|
|
3900
3901
|
"reqwest",
|
|
3901
3902
|
"serde",
|
|
3902
3903
|
"serde_json",
|
|
3904
|
+
"thiserror",
|
|
3903
3905
|
"url",
|
|
3904
3906
|
]
|
|
3905
3907
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: libertem_qd_mpx
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.14
|
|
4
4
|
Classifier: Programming Language :: Rust
|
|
5
5
|
Classifier: Programming Language :: Python :: Implementation :: CPython
|
|
6
6
|
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
name = "common"
|
|
3
3
|
authors = ["Alexander Clausen <a.clausen@fz-juelich.de>"]
|
|
4
4
|
license = "MIT"
|
|
5
|
-
version = "0.2.
|
|
5
|
+
version = "0.2.14"
|
|
6
6
|
edition = "2021"
|
|
7
7
|
rust-version = "1.71"
|
|
8
8
|
readme = "README.md"
|
|
@@ -35,3 +35,6 @@ criterion = "0.5.1"
|
|
|
35
35
|
[[bench]]
|
|
36
36
|
name = "casting"
|
|
37
37
|
harness = false
|
|
38
|
+
|
|
39
|
+
[lints.rust]
|
|
40
|
+
unused_must_use = "deny"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
use std::fmt::Debug;
|
|
2
2
|
|
|
3
|
-
use ipc_test::{SharedSlabAllocator, SlotForWriting};
|
|
3
|
+
use ipc_test::{slab::ShmError, SharedSlabAllocator, SlotForWriting};
|
|
4
4
|
use log::{error, warn};
|
|
5
5
|
use pyo3::{
|
|
6
6
|
exceptions::{PyRuntimeError, PyValueError},
|
|
@@ -45,6 +45,9 @@ pub enum FrameStackWriteError {
|
|
|
45
45
|
|
|
46
46
|
#[error("too small")]
|
|
47
47
|
TooSmall,
|
|
48
|
+
|
|
49
|
+
#[error("SHM access error: {0}")]
|
|
50
|
+
ShmAccessError(#[from] ShmError),
|
|
48
51
|
}
|
|
49
52
|
|
|
50
53
|
impl From<FrameStackWriteError> for PyErr {
|
|
@@ -56,6 +59,7 @@ impl From<FrameStackWriteError> for PyErr {
|
|
|
56
59
|
FrameStackWriteError::TooSmall => {
|
|
57
60
|
PyValueError::new_err("frame stack too small to handle single frame")
|
|
58
61
|
}
|
|
62
|
+
FrameStackWriteError::ShmAccessError(e) => PyValueError::new_err(e.to_string()),
|
|
59
63
|
}
|
|
60
64
|
}
|
|
61
65
|
}
|
|
@@ -64,6 +68,9 @@ impl From<FrameStackWriteError> for PyErr {
|
|
|
64
68
|
pub enum SplitError<M: FrameMeta> {
|
|
65
69
|
#[error("shm full")]
|
|
66
70
|
ShmFull(FrameStackHandle<M>),
|
|
71
|
+
|
|
72
|
+
#[error("shm access error: {0}")]
|
|
73
|
+
AccessError(#[from] ShmError),
|
|
67
74
|
}
|
|
68
75
|
|
|
69
76
|
pub struct FrameStackForWriting<M>
|
|
@@ -160,7 +167,7 @@ where
|
|
|
160
167
|
) -> Result<FrameStackHandle<M>, FrameStackWriteError> {
|
|
161
168
|
if self.is_empty() {
|
|
162
169
|
let slot_info = shm.writing_done(self.slot);
|
|
163
|
-
shm.free_idx(slot_info.slot_idx)
|
|
170
|
+
shm.free_idx(slot_info.slot_idx)?;
|
|
164
171
|
return Err(FrameStackWriteError::Empty);
|
|
165
172
|
}
|
|
166
173
|
|
|
@@ -180,7 +187,7 @@ where
|
|
|
180
187
|
) -> Result<(), FrameStackWriteError> {
|
|
181
188
|
if self.is_empty() {
|
|
182
189
|
let slot_info = shm.writing_done(self.slot);
|
|
183
|
-
shm.free_idx(slot_info.slot_idx)
|
|
190
|
+
shm.free_idx(slot_info.slot_idx)?;
|
|
184
191
|
Ok(())
|
|
185
192
|
} else {
|
|
186
193
|
Err(FrameStackWriteError::NonEmpty)
|
|
@@ -296,15 +303,17 @@ mod inner {
|
|
|
296
303
|
let mut slot_left = match shm.try_get_mut() {
|
|
297
304
|
Ok(s) => s,
|
|
298
305
|
Err(ShmError::NoSlotAvailable) => return Err(SplitError::ShmFull(self)),
|
|
306
|
+
Err(e @ ShmError::MutexError(_)) => return Err(e.into()),
|
|
299
307
|
};
|
|
300
308
|
let mut slot_right = match shm.try_get_mut() {
|
|
301
309
|
Ok(s) => s,
|
|
302
310
|
Err(ShmError::NoSlotAvailable) => {
|
|
303
311
|
// don't leak the left slot!
|
|
304
312
|
let l = shm.writing_done(slot_left);
|
|
305
|
-
shm.free_idx(l.slot_idx)
|
|
313
|
+
shm.free_idx(l.slot_idx)?;
|
|
306
314
|
return Err(SplitError::ShmFull(self));
|
|
307
315
|
}
|
|
316
|
+
Err(e @ ShmError::MutexError(_)) => return Err(e.into()),
|
|
308
317
|
};
|
|
309
318
|
|
|
310
319
|
let slice_left = slot_left.as_slice_mut();
|
|
@@ -316,7 +325,7 @@ mod inner {
|
|
|
316
325
|
let left = shm.writing_done(slot_left);
|
|
317
326
|
let right = shm.writing_done(slot_right);
|
|
318
327
|
|
|
319
|
-
shm.free_idx(self.slot.slot_idx)
|
|
328
|
+
shm.free_idx(self.slot.slot_idx)?;
|
|
320
329
|
|
|
321
330
|
(left, right)
|
|
322
331
|
};
|
|
@@ -351,8 +360,8 @@ mod inner {
|
|
|
351
360
|
f(&slot_r)
|
|
352
361
|
}
|
|
353
362
|
|
|
354
|
-
pub fn free_slot(self, shm: &mut SharedSlabAllocator) {
|
|
355
|
-
shm.free_idx(self.slot.slot_idx)
|
|
363
|
+
pub fn free_slot(self, shm: &mut SharedSlabAllocator) -> Result<(), ShmError> {
|
|
364
|
+
shm.free_idx(self.slot.slot_idx)
|
|
356
365
|
}
|
|
357
366
|
}
|
|
358
367
|
|
|
@@ -493,7 +502,7 @@ impl<'b, M: FrameMeta> Drop for WriteGuard<'b, M> {
|
|
|
493
502
|
match frame_stack.writing_done(self.shm) {
|
|
494
503
|
Ok(frame_stack) => {
|
|
495
504
|
warn!("discarding non-empty frame stack as result of previous errors");
|
|
496
|
-
frame_stack.free_slot(self.shm);
|
|
505
|
+
let _ = frame_stack.free_slot(self.shm);
|
|
497
506
|
}
|
|
498
507
|
Err(e) => error!("WriteGuard::drop failed: {e:?}"),
|
|
499
508
|
}
|
|
@@ -616,12 +625,12 @@ mod tests {
|
|
|
616
625
|
assert_eq!(a.offsets.len() + b.offsets.len(), 2);
|
|
617
626
|
|
|
618
627
|
// when the split is done, there should be one free shm slot:
|
|
619
|
-
assert_eq!(shm.num_slots_free(), 1);
|
|
628
|
+
assert_eq!(shm.num_slots_free().unwrap(), 1);
|
|
620
629
|
|
|
621
630
|
// and we can free them again:
|
|
622
|
-
shm.free_idx(a.slot.slot_idx);
|
|
623
|
-
shm.free_idx(b.slot.slot_idx);
|
|
631
|
+
shm.free_idx(a.slot.slot_idx).unwrap();
|
|
632
|
+
shm.free_idx(b.slot.slot_idx).unwrap();
|
|
624
633
|
|
|
625
|
-
assert_eq!(shm.num_slots_free(), 3);
|
|
634
|
+
assert_eq!(shm.num_slots_free().unwrap(), 3);
|
|
626
635
|
}
|
|
627
636
|
}
|
|
@@ -1,6 +1,9 @@
|
|
|
1
1
|
use std::fmt::Debug;
|
|
2
2
|
|
|
3
|
-
use ipc_test::{
|
|
3
|
+
use ipc_test::{
|
|
4
|
+
slab::{ShmError, SlabInitError},
|
|
5
|
+
SharedSlabAllocator,
|
|
6
|
+
};
|
|
4
7
|
use multiversion::multiversion;
|
|
5
8
|
use ndarray::ArrayViewMut3;
|
|
6
9
|
use num::cast::AsPrimitive;
|
|
@@ -18,6 +21,9 @@ pub enum CamClientError {
|
|
|
18
21
|
error: SlabInitError,
|
|
19
22
|
},
|
|
20
23
|
|
|
24
|
+
#[error("failed to access SHM: {0}")]
|
|
25
|
+
ShmError(#[from] ShmError),
|
|
26
|
+
|
|
21
27
|
#[error("operation on closed client")]
|
|
22
28
|
Closed,
|
|
23
29
|
|
|
@@ -166,7 +172,7 @@ where
|
|
|
166
172
|
M: FrameMeta,
|
|
167
173
|
{
|
|
168
174
|
let shm = self.get_shm_mut()?;
|
|
169
|
-
handle.free_slot(shm)
|
|
175
|
+
handle.free_slot(shm)?;
|
|
170
176
|
Ok(())
|
|
171
177
|
}
|
|
172
178
|
|
|
@@ -6,7 +6,10 @@ use std::{
|
|
|
6
6
|
time::{Duration, Instant},
|
|
7
7
|
};
|
|
8
8
|
|
|
9
|
-
use ipc_test::{
|
|
9
|
+
use ipc_test::{
|
|
10
|
+
slab::{ShmError, SlabInitError},
|
|
11
|
+
SharedSlabAllocator,
|
|
12
|
+
};
|
|
10
13
|
use log::{debug, info, trace, warn};
|
|
11
14
|
use stats::Stats;
|
|
12
15
|
|
|
@@ -53,6 +56,9 @@ pub enum ConnectionError {
|
|
|
53
56
|
#[error("could not connect to SHM area: {0}")]
|
|
54
57
|
ShmConnectError(#[from] SlabInitError),
|
|
55
58
|
|
|
59
|
+
#[error("could not access SHM")]
|
|
60
|
+
ShmAccessError(#[from] ShmError),
|
|
61
|
+
|
|
56
62
|
#[error("background thread is dead")]
|
|
57
63
|
Disconnected,
|
|
58
64
|
|
|
@@ -117,6 +123,7 @@ where
|
|
|
117
123
|
frame_stack = old_frame_stack;
|
|
118
124
|
continue;
|
|
119
125
|
}
|
|
126
|
+
Err(SplitError::AccessError(e)) => return Err(e.into()),
|
|
120
127
|
};
|
|
121
128
|
}
|
|
122
129
|
}
|
|
@@ -314,13 +321,13 @@ where
|
|
|
314
321
|
return Err(ConnectionError::FatalError(error))
|
|
315
322
|
}
|
|
316
323
|
ReceiverMsg::FrameStack { frame_stack } => {
|
|
317
|
-
frame_stack.free_slot(&mut self.shm)
|
|
324
|
+
frame_stack.free_slot(&mut self.shm)?;
|
|
318
325
|
return Err(ConnectionError::UnexpectedMessage(
|
|
319
326
|
"ReceiverMsg::FrameStack in wait_for_arm".to_owned(),
|
|
320
327
|
));
|
|
321
328
|
}
|
|
322
329
|
ReceiverMsg::Finished { frame_stack } => {
|
|
323
|
-
frame_stack.free_slot(&mut self.shm)
|
|
330
|
+
frame_stack.free_slot(&mut self.shm)?;
|
|
324
331
|
return Err(ConnectionError::UnexpectedMessage(
|
|
325
332
|
"ReceiverMsg::Finished in wait_for_arm".to_owned(),
|
|
326
333
|
));
|
|
@@ -406,11 +413,11 @@ where
|
|
|
406
413
|
match res {
|
|
407
414
|
ReceiverMsg::FrameStack { frame_stack } => {
|
|
408
415
|
trace!("wait_for_status: ignoring received FrameStackHandle");
|
|
409
|
-
frame_stack.free_slot(&mut self.shm)
|
|
416
|
+
frame_stack.free_slot(&mut self.shm)?;
|
|
410
417
|
}
|
|
411
418
|
ReceiverMsg::Finished { frame_stack } => {
|
|
412
419
|
warn!("wait_for_status: ignoring FrameStackHandle received in ReceiverMsg::Finished message");
|
|
413
|
-
frame_stack.free_slot(&mut self.shm)
|
|
420
|
+
frame_stack.free_slot(&mut self.shm)?;
|
|
414
421
|
}
|
|
415
422
|
ReceiverMsg::FatalError { error } => {
|
|
416
423
|
return Err(ConnectionError::FatalError(error));
|
|
@@ -568,12 +575,13 @@ where
|
|
|
568
575
|
self.wait_for_status(ConnectionStatus::Idle, *timeout, periodic_callback)
|
|
569
576
|
}
|
|
570
577
|
|
|
571
|
-
pub fn log_shm_stats(&self) {
|
|
578
|
+
pub fn log_shm_stats(&self) -> Result<(), ConnectionError> {
|
|
572
579
|
let shm = &self.shm;
|
|
573
|
-
let free = shm.num_slots_free()
|
|
580
|
+
let free = shm.num_slots_free()?;
|
|
574
581
|
let total = shm.num_slots_total();
|
|
575
582
|
self.stats.log_stats();
|
|
576
583
|
info!("shm stats free/total: {}/{}", free, total);
|
|
584
|
+
Ok(())
|
|
577
585
|
}
|
|
578
586
|
|
|
579
587
|
pub fn reset_stats(&mut self) {
|
|
@@ -191,7 +191,9 @@ macro_rules! impl_py_connection {
|
|
|
191
191
|
let _trace_guard = span_from_py(py, &format!("{}::close", stringify!($name)))?;
|
|
192
192
|
|
|
193
193
|
if let Some(mut conn_impl) = self.conn_impl.take() {
|
|
194
|
-
conn_impl
|
|
194
|
+
conn_impl
|
|
195
|
+
.log_shm_stats()
|
|
196
|
+
.map_err(|e| PyConnectionError::new_err(e.to_string()))?;
|
|
195
197
|
conn_impl.reset_stats();
|
|
196
198
|
conn_impl.close();
|
|
197
199
|
Ok(())
|
|
@@ -261,7 +263,9 @@ macro_rules! impl_py_connection {
|
|
|
261
263
|
|
|
262
264
|
pub fn log_shm_stats(&self) -> PyResult<()> {
|
|
263
265
|
let conn_impl = self.get_conn()?;
|
|
264
|
-
conn_impl
|
|
266
|
+
conn_impl
|
|
267
|
+
.log_shm_stats()
|
|
268
|
+
.map_err(|e| PyConnectionError::new_err(e.to_string()))?;
|
|
265
269
|
Ok(())
|
|
266
270
|
}
|
|
267
271
|
}
|
|
@@ -75,13 +75,13 @@ fn main() {
|
|
|
75
75
|
// some additional "work":
|
|
76
76
|
//std::thread::sleep(Duration::from_micros(1));
|
|
77
77
|
|
|
78
|
-
ssa.free_idx(slot_info.slot_idx);
|
|
78
|
+
ssa.free_idx(slot_info.slot_idx).unwrap();
|
|
79
79
|
|
|
80
80
|
sum += sum_part.0 as f64;
|
|
81
81
|
bytes_processed += SLOT_SIZE_BYTES;
|
|
82
82
|
|
|
83
83
|
if t0.elapsed() > Duration::from_secs(1) {
|
|
84
|
-
let slots_free = ssa.num_slots_free();
|
|
84
|
+
let slots_free = ssa.num_slots_free().unwrap();
|
|
85
85
|
println!(
|
|
86
86
|
"idx: {idx:5}, sum: {sum_part}, throughput: {:7.2} MiB/s, slots free: {slots_free}",
|
|
87
87
|
bytes_processed as f32 / 1024.0 / 1024.0
|
|
@@ -70,7 +70,7 @@ fn handle_connection(
|
|
|
70
70
|
}
|
|
71
71
|
|
|
72
72
|
println!("done sending {} items", send_num_items);
|
|
73
|
-
while ssa.num_slots_free() < ssa.num_slots_total() {
|
|
73
|
+
while ssa.num_slots_free().unwrap() < ssa.num_slots_total() {
|
|
74
74
|
thread::sleep(Duration::from_millis(100));
|
|
75
75
|
}
|
|
76
76
|
println!("done!")
|
|
@@ -69,10 +69,13 @@ pub struct SHMHandle {
|
|
|
69
69
|
|
|
70
70
|
impl SHMHandle {}
|
|
71
71
|
|
|
72
|
-
#[derive(Debug, thiserror::Error)]
|
|
72
|
+
#[derive(Debug, Clone, thiserror::Error)]
|
|
73
73
|
pub enum ShmError {
|
|
74
74
|
#[error("no slot available")]
|
|
75
75
|
NoSlotAvailable,
|
|
76
|
+
|
|
77
|
+
#[error("mutex error: {0}")]
|
|
78
|
+
MutexError(String),
|
|
76
79
|
}
|
|
77
80
|
|
|
78
81
|
/// Additional information needed to re-crate a `SharedSlabAllocator` in a
|
|
@@ -103,6 +106,9 @@ pub struct SharedSlabAllocator {
|
|
|
103
106
|
pub enum SlabInitError {
|
|
104
107
|
#[error("connection failed: {0}")]
|
|
105
108
|
ConnectError(#[from] ShmConnectError),
|
|
109
|
+
|
|
110
|
+
#[error("mutex error: {0}")]
|
|
111
|
+
MutexError(String),
|
|
106
112
|
}
|
|
107
113
|
|
|
108
114
|
///
|
|
@@ -185,7 +191,8 @@ impl SharedSlabAllocator {
|
|
|
185
191
|
let free_list_ptr = unsafe { ptr.offset(Self::MUTEX_SIZE.try_into().unwrap()) };
|
|
186
192
|
|
|
187
193
|
let (_lock, bg_thread) = if init_structures {
|
|
188
|
-
let (lock, used_size) = unsafe { Mutex::new(ptr, free_list_ptr)
|
|
194
|
+
let (lock, used_size) = unsafe { Mutex::new(ptr, free_list_ptr) }
|
|
195
|
+
.map_err(|e| SlabInitError::MutexError(e.to_string()))?;
|
|
189
196
|
|
|
190
197
|
if used_size > Self::MUTEX_SIZE {
|
|
191
198
|
panic!("Mutex size larger than expected!");
|
|
@@ -231,7 +238,8 @@ impl SharedSlabAllocator {
|
|
|
231
238
|
|
|
232
239
|
(lock, Some((j, cleanup_chan_s)))
|
|
233
240
|
} else {
|
|
234
|
-
let (lock, used_size) = unsafe { Mutex::from_existing(ptr, free_list_ptr)
|
|
241
|
+
let (lock, used_size) = unsafe { Mutex::from_existing(ptr, free_list_ptr) }
|
|
242
|
+
.map_err(|e| SlabInitError::MutexError(e.to_string()))?;
|
|
235
243
|
|
|
236
244
|
if used_size > Self::MUTEX_SIZE {
|
|
237
245
|
panic!("Mutex size larger than expected!");
|
|
@@ -259,11 +267,12 @@ impl SharedSlabAllocator {
|
|
|
259
267
|
Self::connect(&handle.os_handle)
|
|
260
268
|
}
|
|
261
269
|
|
|
262
|
-
fn get_mutex(&self) -> Box<dyn LockImpl> {
|
|
270
|
+
fn get_mutex(&self) -> Result<Box<dyn LockImpl>, ShmError> {
|
|
263
271
|
let ptr = self.shm.as_mut_ptr();
|
|
264
272
|
let free_list_ptr = unsafe { ptr.offset(Self::MUTEX_SIZE.try_into().unwrap()) };
|
|
265
|
-
let (lock, _) = unsafe { Mutex::from_existing(ptr, free_list_ptr)
|
|
266
|
-
|
|
273
|
+
let (lock, _) = unsafe { Mutex::from_existing(ptr, free_list_ptr) }
|
|
274
|
+
.map_err(|e| ShmError::MutexError(e.to_string()))?;
|
|
275
|
+
Ok(lock)
|
|
267
276
|
}
|
|
268
277
|
|
|
269
278
|
pub fn get_slab_info(&self) -> SlabInfo {
|
|
@@ -290,19 +299,27 @@ impl SharedSlabAllocator {
|
|
|
290
299
|
/// `SlotInfo` struct using `writing_done`, which can then be sent to
|
|
291
300
|
/// a consumer.
|
|
292
301
|
pub fn get_mut(&mut self) -> Option<SlotForWriting> {
|
|
293
|
-
|
|
302
|
+
match self.try_get_mut() {
|
|
303
|
+
Ok(slot) => Some(slot),
|
|
304
|
+
Err(_) => None,
|
|
305
|
+
}
|
|
306
|
+
}
|
|
294
307
|
|
|
295
|
-
|
|
308
|
+
pub fn try_get_mut(&mut self) -> Result<SlotForWriting, ShmError> {
|
|
309
|
+
let slot_idx: usize = match self.pop_free_slot_idx()? {
|
|
310
|
+
Some(idx) => idx,
|
|
311
|
+
None => {
|
|
312
|
+
return Err(ShmError::NoSlotAvailable);
|
|
313
|
+
}
|
|
314
|
+
};
|
|
315
|
+
|
|
316
|
+
Ok(SlotForWriting {
|
|
296
317
|
ptr: self.get_mut_ptr_for_slot(slot_idx),
|
|
297
318
|
slot_idx,
|
|
298
319
|
size: self.slot_size,
|
|
299
320
|
})
|
|
300
321
|
}
|
|
301
322
|
|
|
302
|
-
pub fn try_get_mut(&mut self) -> Result<SlotForWriting, ShmError> {
|
|
303
|
-
self.get_mut().ok_or(ShmError::NoSlotAvailable)
|
|
304
|
-
}
|
|
305
|
-
|
|
306
323
|
/// Exchange the `SlotForWriting` token into
|
|
307
324
|
/// a `SlotInfo` that can be sent to readers
|
|
308
325
|
/// which can not be used to write anymore
|
|
@@ -314,11 +331,11 @@ impl SharedSlabAllocator {
|
|
|
314
331
|
}
|
|
315
332
|
}
|
|
316
333
|
|
|
317
|
-
pub fn num_slots_free(&self) -> usize {
|
|
318
|
-
let mutex = self.get_mutex()
|
|
334
|
+
pub fn num_slots_free(&self) -> Result<usize, ShmError> {
|
|
335
|
+
let mutex = self.get_mutex()?;
|
|
319
336
|
let guard = mutex.lock().unwrap();
|
|
320
337
|
let stack = Self::get_free_list(*guard, self.num_slots);
|
|
321
|
-
stack.get_stack_idx()
|
|
338
|
+
Ok(stack.get_stack_idx())
|
|
322
339
|
}
|
|
323
340
|
|
|
324
341
|
pub fn num_slots_total(&self) -> usize {
|
|
@@ -338,11 +355,12 @@ impl SharedSlabAllocator {
|
|
|
338
355
|
}
|
|
339
356
|
}
|
|
340
357
|
|
|
341
|
-
pub fn free_idx(&mut self, slot_idx: usize) {
|
|
342
|
-
let mutex = self.get_mutex()
|
|
358
|
+
pub fn free_idx(&mut self, slot_idx: usize) -> Result<(), ShmError> {
|
|
359
|
+
let mutex = self.get_mutex()?;
|
|
343
360
|
let guard = mutex.lock().unwrap();
|
|
344
361
|
let mut stack = Self::get_free_list(*guard, self.num_slots);
|
|
345
362
|
stack.push(slot_idx);
|
|
363
|
+
Ok(())
|
|
346
364
|
}
|
|
347
365
|
|
|
348
366
|
///
|
|
@@ -416,11 +434,11 @@ impl SharedSlabAllocator {
|
|
|
416
434
|
FreeStack::new(stack_ptr, free_list_size)
|
|
417
435
|
}
|
|
418
436
|
|
|
419
|
-
fn pop_free_slot_idx(&mut self) -> Option<usize> {
|
|
420
|
-
let mutex = self.get_mutex()
|
|
437
|
+
fn pop_free_slot_idx(&mut self) -> Result<Option<usize>, ShmError> {
|
|
438
|
+
let mutex = self.get_mutex()?;
|
|
421
439
|
let guard = mutex.lock().unwrap();
|
|
422
440
|
let mut stack = Self::get_free_list(*guard, self.num_slots);
|
|
423
|
-
stack.pop()
|
|
441
|
+
Ok(stack.pop())
|
|
424
442
|
}
|
|
425
443
|
}
|
|
426
444
|
|
|
@@ -477,7 +495,7 @@ mod test {
|
|
|
477
495
|
for i in 0..255u8 {
|
|
478
496
|
assert_eq!(slotr.as_slice()[i as usize], i);
|
|
479
497
|
}
|
|
480
|
-
ssa.free_idx(slotw.slot_idx);
|
|
498
|
+
ssa.free_idx(slotw.slot_idx).unwrap();
|
|
481
499
|
}
|
|
482
500
|
|
|
483
501
|
#[test]
|
|
@@ -505,7 +523,7 @@ mod test {
|
|
|
505
523
|
for i in 0..255u8 {
|
|
506
524
|
assert_eq!(slotr.as_slice()[i as usize], i);
|
|
507
525
|
}
|
|
508
|
-
ssa2.free_idx(slotw.slot_idx);
|
|
526
|
+
ssa2.free_idx(slotw.slot_idx).unwrap();
|
|
509
527
|
}
|
|
510
528
|
|
|
511
529
|
#[test]
|
|
@@ -534,7 +552,7 @@ mod test {
|
|
|
534
552
|
for i in 0..255u8 {
|
|
535
553
|
assert_eq!(slotr.as_slice()[i as usize], i);
|
|
536
554
|
}
|
|
537
|
-
ssa2.free_idx(slotw.slot_idx);
|
|
555
|
+
ssa2.free_idx(slotw.slot_idx).unwrap();
|
|
538
556
|
}
|
|
539
557
|
|
|
540
558
|
#[test]
|
|
@@ -575,7 +593,7 @@ mod test {
|
|
|
575
593
|
|
|
576
594
|
// We are done with the data in the slot, make it available
|
|
577
595
|
// to the producer:
|
|
578
|
-
ssa2.free_idx(idx);
|
|
596
|
+
ssa2.free_idx(idx).unwrap();
|
|
579
597
|
|
|
580
598
|
// for keeping the test robust: signal main thread we are done
|
|
581
599
|
done_s.send(()).unwrap();
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
name = "libertem_qd_mpx"
|
|
3
3
|
authors = ["Alexander Clausen <a.clausen@fz-juelich.de>"]
|
|
4
4
|
license = "MIT"
|
|
5
|
-
version = "0.2.
|
|
5
|
+
version = "0.2.14"
|
|
6
6
|
edition = "2021"
|
|
7
7
|
readme = "README.md"
|
|
8
8
|
rust-version = "1.71"
|
|
@@ -41,3 +41,6 @@ tokio = { version = "1", features = ["rt", "net", "time", "sync", "io-util", "rt
|
|
|
41
41
|
[[bench]]
|
|
42
42
|
name = "decoders"
|
|
43
43
|
harness = false
|
|
44
|
+
|
|
45
|
+
[lints.rust]
|
|
46
|
+
unused_must_use = "deny"
|
|
@@ -49,8 +49,8 @@ pub enum AcquisitionError {
|
|
|
49
49
|
#[error("configuration error: {msg}")]
|
|
50
50
|
ConfigurationError { msg: String },
|
|
51
51
|
|
|
52
|
-
#[error("shm
|
|
53
|
-
|
|
52
|
+
#[error("error accessing shm: {0}")]
|
|
53
|
+
ShmAccessError(#[from] ShmError),
|
|
54
54
|
|
|
55
55
|
#[error("error writing to shm: {0}")]
|
|
56
56
|
WriteError(#[from] FrameStackWriteError),
|
|
@@ -95,14 +95,6 @@ impl<T> From<SendError<T>> for AcquisitionError {
|
|
|
95
95
|
}
|
|
96
96
|
}
|
|
97
97
|
|
|
98
|
-
impl From<ShmError> for AcquisitionError {
|
|
99
|
-
fn from(value: ShmError) -> Self {
|
|
100
|
-
match value {
|
|
101
|
-
ShmError::NoSlotAvailable => AcquisitionError::NoSlotAvailable,
|
|
102
|
-
}
|
|
103
|
-
}
|
|
104
|
-
}
|
|
105
|
-
|
|
106
98
|
impl From<ReadExactError<AcquisitionError>> for AcquisitionError {
|
|
107
99
|
fn from(value: ReadExactError<AcquisitionError>) -> Self {
|
|
108
100
|
match value {
|
|
@@ -370,7 +362,7 @@ fn make_frame_stack<'a>(
|
|
|
370
362
|
) -> Result<WriteGuard<'a, QdFrameMeta>, AcquisitionError> {
|
|
371
363
|
loop {
|
|
372
364
|
// keep some slots free for splitting frame stacks
|
|
373
|
-
if shm.num_slots_free() < 3 && shm.num_slots_total() >= 3 {
|
|
365
|
+
if shm.num_slots_free()? < 3 && shm.num_slots_total() >= 3 {
|
|
374
366
|
trace!("shm is almost full; waiting and creating backpressure...");
|
|
375
367
|
check_for_control(to_thread_r)?;
|
|
376
368
|
std::thread::sleep(Duration::from_millis(1));
|
|
@@ -394,6 +386,9 @@ fn make_frame_stack<'a>(
|
|
|
394
386
|
std::thread::sleep(Duration::from_millis(1));
|
|
395
387
|
continue;
|
|
396
388
|
}
|
|
389
|
+
Err(e @ ShmError::MutexError(_)) => {
|
|
390
|
+
return Err(e.into());
|
|
391
|
+
}
|
|
397
392
|
}
|
|
398
393
|
}
|
|
399
394
|
}
|
|
@@ -624,7 +619,7 @@ fn passive_acquisition(
|
|
|
624
619
|
shm,
|
|
625
620
|
)?;
|
|
626
621
|
|
|
627
|
-
let free = shm.num_slots_free()
|
|
622
|
+
let free = shm.num_slots_free()?;
|
|
628
623
|
let total = shm.num_slots_total();
|
|
629
624
|
info!("passive acquisition done; free slots: {}/{}", free, total);
|
|
630
625
|
|
|
@@ -974,7 +969,7 @@ End
|
|
|
974
969
|
conn.close();
|
|
975
970
|
server_thread.join().unwrap();
|
|
976
971
|
|
|
977
|
-
assert_eq!(shm.num_slots_total(), shm.num_slots_free());
|
|
972
|
+
assert_eq!(shm.num_slots_total(), shm.num_slots_free().unwrap());
|
|
978
973
|
});
|
|
979
974
|
}
|
|
980
975
|
|
|
@@ -1051,7 +1046,7 @@ End
|
|
|
1051
1046
|
conn.close();
|
|
1052
1047
|
server_thread.join().unwrap();
|
|
1053
1048
|
|
|
1054
|
-
assert_eq!(shm.num_slots_total(), shm.num_slots_free());
|
|
1049
|
+
assert_eq!(shm.num_slots_total(), shm.num_slots_free().unwrap());
|
|
1055
1050
|
});
|
|
1056
1051
|
}
|
|
1057
1052
|
|
|
@@ -1156,7 +1151,7 @@ End
|
|
|
1156
1151
|
stack.with_slot(&shm, |s| {
|
|
1157
1152
|
assert_eq!(s.as_slice(), vec![idx as u8; 256 * 256],);
|
|
1158
1153
|
});
|
|
1159
|
-
stack.free_slot(&mut shm);
|
|
1154
|
+
stack.free_slot(&mut shm).unwrap();
|
|
1160
1155
|
idx += 1;
|
|
1161
1156
|
} else {
|
|
1162
1157
|
eprintln!("done!");
|
|
@@ -1173,7 +1168,7 @@ End
|
|
|
1173
1168
|
conn.close();
|
|
1174
1169
|
server_thread.join().unwrap();
|
|
1175
1170
|
|
|
1176
|
-
assert_eq!(shm.num_slots_total(), shm.num_slots_free());
|
|
1171
|
+
assert_eq!(shm.num_slots_total(), shm.num_slots_free().unwrap());
|
|
1177
1172
|
});
|
|
1178
1173
|
}
|
|
1179
1174
|
|
|
@@ -1257,7 +1252,7 @@ End
|
|
|
1257
1252
|
stack.with_slot(&shm, |s| {
|
|
1258
1253
|
assert_eq!(s.as_slice(), vec![idx as u8; 256 * 256],);
|
|
1259
1254
|
});
|
|
1260
|
-
stack.free_slot(&mut shm);
|
|
1255
|
+
stack.free_slot(&mut shm).unwrap();
|
|
1261
1256
|
} else {
|
|
1262
1257
|
eprintln!("done!");
|
|
1263
1258
|
break;
|
|
@@ -1272,7 +1267,7 @@ End
|
|
|
1272
1267
|
conn.close();
|
|
1273
1268
|
server_thread.join().unwrap();
|
|
1274
1269
|
|
|
1275
|
-
assert_eq!(shm.num_slots_total(), shm.num_slots_free());
|
|
1270
|
+
assert_eq!(shm.num_slots_total(), shm.num_slots_free().unwrap());
|
|
1276
1271
|
});
|
|
1277
1272
|
}
|
|
1278
1273
|
|
|
@@ -1365,7 +1360,7 @@ End
|
|
|
1365
1360
|
stack.with_slot(&shm, |s| {
|
|
1366
1361
|
assert_eq!(s.as_slice(), vec![idx as u8; 256 * 256],);
|
|
1367
1362
|
});
|
|
1368
|
-
stack.free_slot(&mut shm);
|
|
1363
|
+
stack.free_slot(&mut shm).unwrap();
|
|
1369
1364
|
} else {
|
|
1370
1365
|
eprintln!("done!");
|
|
1371
1366
|
break;
|
|
@@ -1380,7 +1375,7 @@ End
|
|
|
1380
1375
|
conn.close();
|
|
1381
1376
|
server_thread.join().unwrap();
|
|
1382
1377
|
|
|
1383
|
-
assert_eq!(shm.num_slots_total(), shm.num_slots_free());
|
|
1378
|
+
assert_eq!(shm.num_slots_total(), shm.num_slots_free().unwrap());
|
|
1384
1379
|
});
|
|
1385
1380
|
}
|
|
1386
1381
|
|
|
@@ -1478,7 +1473,7 @@ End
|
|
|
1478
1473
|
stack.with_slot(&shm, |s| {
|
|
1479
1474
|
assert_eq!(s.as_slice(), vec![idx as u8; 256 * 256],);
|
|
1480
1475
|
});
|
|
1481
|
-
stack.free_slot(&mut shm);
|
|
1476
|
+
stack.free_slot(&mut shm).unwrap();
|
|
1482
1477
|
} else {
|
|
1483
1478
|
eprintln!("done!");
|
|
1484
1479
|
break;
|
|
@@ -1493,7 +1488,7 @@ End
|
|
|
1493
1488
|
conn.close();
|
|
1494
1489
|
server_thread.join().unwrap();
|
|
1495
1490
|
|
|
1496
|
-
assert_eq!(shm.num_slots_total(), shm.num_slots_free());
|
|
1491
|
+
assert_eq!(shm.num_slots_total(), shm.num_slots_free().unwrap());
|
|
1497
1492
|
});
|
|
1498
1493
|
}
|
|
1499
1494
|
|
|
@@ -1586,7 +1581,7 @@ End
|
|
|
1586
1581
|
stack.with_slot(&shm, |s| {
|
|
1587
1582
|
assert_eq!(s.as_slice(), vec![idx as u8; 256 * 256],);
|
|
1588
1583
|
});
|
|
1589
|
-
stack.free_slot(&mut shm);
|
|
1584
|
+
stack.free_slot(&mut shm).unwrap();
|
|
1590
1585
|
}
|
|
1591
1586
|
|
|
1592
1587
|
let next = conn.get_next_stack(1, || Ok::<_, std::io::Error>(()));
|
|
@@ -1601,7 +1596,7 @@ End
|
|
|
1601
1596
|
server_thread.join().unwrap();
|
|
1602
1597
|
|
|
1603
1598
|
// we don't leak any slots, yay!
|
|
1604
|
-
assert_eq!(shm.num_slots_total(), shm.num_slots_free());
|
|
1599
|
+
assert_eq!(shm.num_slots_total(), shm.num_slots_free().unwrap());
|
|
1605
1600
|
});
|
|
1606
1601
|
}
|
|
1607
1602
|
|
|
@@ -1690,7 +1685,7 @@ End
|
|
|
1690
1685
|
stack.with_slot(&shm, |s| {
|
|
1691
1686
|
assert_eq!(s.as_slice(), vec![idx as u8; 256 * 256],);
|
|
1692
1687
|
});
|
|
1693
|
-
stack.free_slot(&mut shm);
|
|
1688
|
+
stack.free_slot(&mut shm).unwrap();
|
|
1694
1689
|
idx += 1;
|
|
1695
1690
|
|
|
1696
1691
|
if Instant::now() > deadline {
|
|
@@ -1820,7 +1815,7 @@ End
|
|
|
1820
1815
|
first.with_slot(&shm, |s| {
|
|
1821
1816
|
assert_eq!(s.as_slice(), vec![0u8; 256 * 256],);
|
|
1822
1817
|
});
|
|
1823
|
-
first.free_slot(&mut shm);
|
|
1818
|
+
first.free_slot(&mut shm).unwrap();
|
|
1824
1819
|
|
|
1825
1820
|
info!("got the first one");
|
|
1826
1821
|
|
|
@@ -1848,7 +1843,7 @@ End
|
|
|
1848
1843
|
stack.with_slot(&shm, |s| {
|
|
1849
1844
|
assert_eq!(s.as_slice(), vec![idx as u8; 256 * 256],);
|
|
1850
1845
|
});
|
|
1851
|
-
stack.free_slot(&mut shm);
|
|
1846
|
+
stack.free_slot(&mut shm).unwrap();
|
|
1852
1847
|
}
|
|
1853
1848
|
|
|
1854
1849
|
// allow the server thread to end:
|
|
@@ -1859,7 +1854,7 @@ End
|
|
|
1859
1854
|
server_thread.join().unwrap();
|
|
1860
1855
|
|
|
1861
1856
|
// we don't leak any slots, yay!
|
|
1862
|
-
assert_eq!(shm.num_slots_total(), shm.num_slots_free());
|
|
1857
|
+
assert_eq!(shm.num_slots_total(), shm.num_slots_free().unwrap());
|
|
1863
1858
|
});
|
|
1864
1859
|
}
|
|
1865
1860
|
|
|
@@ -1970,7 +1965,7 @@ End
|
|
|
1970
1965
|
stack.with_slot(&shm, |s| {
|
|
1971
1966
|
assert_eq!(s.as_slice(), vec![idx as u8; 256 * 256],);
|
|
1972
1967
|
});
|
|
1973
|
-
stack.free_slot(&mut shm);
|
|
1968
|
+
stack.free_slot(&mut shm).unwrap();
|
|
1974
1969
|
}
|
|
1975
1970
|
|
|
1976
1971
|
// next stack is an error:
|
|
@@ -2001,7 +1996,7 @@ End
|
|
|
2001
1996
|
stack.with_slot(&shm, |s| {
|
|
2002
1997
|
assert_eq!(s.as_slice(), vec![idx as u8; 256 * 256],);
|
|
2003
1998
|
});
|
|
2004
|
-
stack.free_slot(&mut shm);
|
|
1999
|
+
stack.free_slot(&mut shm).unwrap();
|
|
2005
2000
|
}
|
|
2006
2001
|
|
|
2007
2002
|
// that was the last stack:
|
|
@@ -2022,7 +2017,7 @@ End
|
|
|
2022
2017
|
.expect("server should be able to send everything");
|
|
2023
2018
|
|
|
2024
2019
|
// we don't leak any slots, yay!
|
|
2025
|
-
assert_eq!(shm.num_slots_total(), shm.num_slots_free());
|
|
2020
|
+
assert_eq!(shm.num_slots_total(), shm.num_slots_free().unwrap());
|
|
2026
2021
|
});
|
|
2027
2022
|
}
|
|
2028
2023
|
}
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|