ratomic 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,231 @@
1
+ use crate::{GcGuard, sem::Semaphore};
2
+ use std::{
3
+ cell::Cell,
4
+ ffi::c_ulong,
5
+ sync::atomic::{AtomicUsize, Ordering},
6
+ };
7
+
8
+ struct QueueElement {
9
+ sequence: AtomicUsize,
10
+ data: Cell<c_ulong>,
11
+ }
12
+
13
+ unsafe impl Send for QueueElement {}
14
+ unsafe impl Sync for QueueElement {}
15
+
16
+ pub struct MpmcQueue {
17
+ buffer: Vec<QueueElement>,
18
+ buffer_mask: usize,
19
+ enqueue_pos: AtomicUsize,
20
+ dequeue_pos: AtomicUsize,
21
+
22
+ gc_guard: GcGuard,
23
+ read_sem: Semaphore,
24
+ write_sem: Semaphore,
25
+ }
26
+
27
+ impl MpmcQueue {
28
+ fn alloc() -> Self {
29
+ Self {
30
+ buffer: vec![],
31
+ buffer_mask: 0,
32
+ enqueue_pos: AtomicUsize::new(0),
33
+ dequeue_pos: AtomicUsize::new(0),
34
+
35
+ gc_guard: GcGuard::alloc(),
36
+ read_sem: Semaphore::alloc(),
37
+ write_sem: Semaphore::alloc(),
38
+ }
39
+ }
40
+
41
+ fn init(&mut self, buffer_size: usize, default: c_ulong) {
42
+ assert!(buffer_size >= 2);
43
+ assert_eq!(buffer_size & (buffer_size - 1), 0);
44
+
45
+ let mut buffer = Vec::with_capacity(buffer_size);
46
+ for i in 0..buffer_size {
47
+ buffer.push(QueueElement {
48
+ sequence: AtomicUsize::new(i),
49
+ data: Cell::new(default),
50
+ });
51
+ }
52
+
53
+ self.buffer_mask = buffer_size - 1;
54
+ self.buffer = buffer;
55
+ self.enqueue_pos.store(0, Ordering::Relaxed);
56
+ self.dequeue_pos.store(0, Ordering::Relaxed);
57
+
58
+ self.gc_guard.init();
59
+ self.read_sem.init(0);
60
+ self.write_sem.init(buffer_size as u32);
61
+ }
62
+
63
+ pub fn new(buffer_size: usize, default: c_ulong) -> Self {
64
+ let mut q = Self::alloc();
65
+ q.init(buffer_size, default);
66
+ q
67
+ }
68
+
69
+ fn try_push(&self, data: c_ulong) -> bool {
70
+ let mut cell;
71
+ let mut pos = self.enqueue_pos.load(Ordering::Relaxed);
72
+ loop {
73
+ cell = &self.buffer[pos & self.buffer_mask];
74
+ let seq = cell.sequence.load(Ordering::Acquire);
75
+ let diff = seq as isize - pos as isize;
76
+ if diff == 0 {
77
+ if self
78
+ .enqueue_pos
79
+ .compare_exchange_weak(pos, pos + 1, Ordering::Relaxed, Ordering::Relaxed)
80
+ .is_ok()
81
+ {
82
+ break;
83
+ }
84
+ } else if diff < 0 {
85
+ return false;
86
+ } else {
87
+ pos = self.enqueue_pos.load(Ordering::Relaxed);
88
+ }
89
+ }
90
+ cell.data.set(data);
91
+ cell.sequence.store(pos + 1, Ordering::Release);
92
+ self.read_sem.post();
93
+ true
94
+ }
95
+
96
+ fn try_pop(&self) -> Option<c_ulong> {
97
+ let mut cell;
98
+ let mut pos = self.dequeue_pos.load(Ordering::Relaxed);
99
+ loop {
100
+ cell = &self.buffer[pos & self.buffer_mask];
101
+ let seq = cell.sequence.load(Ordering::Acquire);
102
+ let diff = seq as isize - (pos + 1) as isize;
103
+ if diff == 0 {
104
+ if self
105
+ .dequeue_pos
106
+ .compare_exchange_weak(pos, pos + 1, Ordering::Relaxed, Ordering::Relaxed)
107
+ .is_ok()
108
+ {
109
+ break;
110
+ }
111
+ } else if diff < 0 {
112
+ return None;
113
+ } else {
114
+ pos = self.dequeue_pos.load(Ordering::Relaxed);
115
+ }
116
+ }
117
+
118
+ let data = cell.data.get();
119
+ cell.sequence
120
+ .store(pos + self.buffer_mask + 1, Ordering::Release);
121
+ self.write_sem.post();
122
+
123
+ #[cfg(feature = "simulation")]
124
+ std::thread::sleep(std::time::Duration::from_millis(100));
125
+
126
+ Some(data)
127
+ }
128
+
129
+ pub fn push(&self, data: c_ulong) {
130
+ loop {
131
+ if self.try_push(data) {
132
+ return;
133
+ }
134
+ self.write_sem.wait();
135
+ }
136
+ }
137
+
138
+ pub fn pop(&self) -> c_ulong {
139
+ loop {
140
+ if let Some(data) = self.gc_guard.acquire_as_consumer(|| self.try_pop()) {
141
+ return data;
142
+ }
143
+ self.read_sem.wait();
144
+ // self.read_sem
145
+ // .wait_for(std::time::Duration::from_millis(100));
146
+ }
147
+ }
148
+
149
+ pub fn acquire_as_gc<F, T>(&self, f: F) -> T
150
+ where
151
+ F: FnOnce() -> T,
152
+ {
153
+ self.gc_guard.acquire_as_gc(f)
154
+ }
155
+
156
+ fn foreach<F>(&self, f: F)
157
+ where
158
+ F: Fn(c_ulong),
159
+ {
160
+ for item in self.buffer.iter() {
161
+ let value = item.data.get();
162
+ f(value);
163
+ }
164
+ }
165
+
166
+ fn mark(&self, mark: extern "C" fn(c_ulong)) {
167
+ self.acquire_as_gc(|| {
168
+ self.foreach(|item| {
169
+ mark(item);
170
+ });
171
+ });
172
+ }
173
+ }
174
+
175
+ #[unsafe(no_mangle)]
176
+ pub unsafe extern "C" fn mpmc_queue_alloc(q: *mut MpmcQueue) {
177
+ unsafe { q.write(MpmcQueue::alloc()) }
178
+ }
179
+
180
+ #[unsafe(no_mangle)]
181
+ pub unsafe extern "C" fn mpmc_queue_init(q: *mut MpmcQueue, capacity: usize, default: c_ulong) {
182
+ let q = unsafe { q.as_mut().unwrap() };
183
+ q.init(capacity, default);
184
+ }
185
+
186
+ #[unsafe(no_mangle)]
187
+ pub unsafe extern "C" fn mpmc_queue_drop(q: *mut MpmcQueue) {
188
+ unsafe { std::ptr::drop_in_place(q) };
189
+ }
190
+
191
+ #[unsafe(no_mangle)]
192
+ pub unsafe extern "C" fn mpmc_queue_mark(q: *const MpmcQueue, f: extern "C" fn(c_ulong)) {
193
+ let q = unsafe { q.as_ref().unwrap() };
194
+ q.mark(f);
195
+ }
196
+
197
+ #[repr(C)]
198
+ pub struct MpmcQueuePushPayload {
199
+ queue: *mut MpmcQueue,
200
+ item: c_ulong,
201
+ }
202
+
203
+ #[unsafe(no_mangle)]
204
+ pub unsafe extern "C" fn mpmc_queue_push(
205
+ push_paylod: *mut std::ffi::c_void,
206
+ ) -> *mut std::ffi::c_void {
207
+ let push_payload = unsafe { push_paylod.cast::<MpmcQueuePushPayload>().as_ref().unwrap() };
208
+ let q = unsafe { push_payload.queue.as_ref().unwrap() };
209
+ q.push(push_payload.item);
210
+ return std::ptr::null_mut();
211
+ }
212
+
213
+ #[unsafe(no_mangle)]
214
+ pub unsafe extern "C" fn mpmc_queue_pop(q: *mut std::ffi::c_void) -> *mut std::ffi::c_void {
215
+ let q = unsafe { q.cast::<MpmcQueue>().as_ref().unwrap() };
216
+ let item = q.pop();
217
+ unsafe { std::mem::transmute(item) }
218
+ }
219
+
220
+ pub const MPMC_QUEUE_OBJECT_SIZE: usize = 80;
221
+
222
+ #[test]
223
+ fn test_mpmc_queue_size() {
224
+ assert_eq!(
225
+ MPMC_QUEUE_OBJECT_SIZE,
226
+ std::mem::size_of::<MpmcQueue>(),
227
+ "size mismatch"
228
+ );
229
+
230
+ assert!(crate::is_sync_and_send::<MpmcQueue>());
231
+ }
data/rs/src/sem.rs ADDED
@@ -0,0 +1,75 @@
1
+ use std::time::Duration;
2
+
3
+ use libc::{
4
+ CLOCK_REALTIME, clock_gettime, sem_destroy, sem_init, sem_post, sem_t, sem_wait,
5
+ };
6
+
7
+ pub(crate) struct Semaphore {
8
+ inner: *mut sem_t,
9
+ }
10
+
11
+ impl Semaphore {
12
+ pub(crate) fn alloc() -> Self {
13
+ unsafe { std::mem::zeroed() }
14
+ }
15
+
16
+ pub(crate) fn init(&mut self, initial: u32) {
17
+ let ptr = Box::into_raw(Box::new(unsafe { std::mem::zeroed() }));
18
+
19
+ let res = unsafe { sem_init(ptr, 0, initial) };
20
+ if res != 0 {
21
+ panic!(
22
+ "failed to create semaphore: {:?}",
23
+ std::io::Error::last_os_error()
24
+ )
25
+ }
26
+
27
+ self.inner = ptr;
28
+ }
29
+
30
+ pub(crate) fn post(&self) {
31
+ let res = unsafe { sem_post(self.inner) };
32
+ if res != 0 {
33
+ panic!(
34
+ "failed to post to semaphore: {:?}",
35
+ std::io::Error::last_os_error()
36
+ )
37
+ }
38
+ }
39
+
40
+ pub(crate) fn wait(&self) {
41
+ let res = unsafe { sem_wait(self.inner) };
42
+ if res != 0 {
43
+ panic!(
44
+ "failed to wait for semaphore: {:?}",
45
+ std::io::Error::last_os_error()
46
+ )
47
+ }
48
+ }
49
+
50
+ // pub(crate) fn wait_for(&self, duration: Duration) -> bool {
51
+ // let mut abstime = unsafe { std::mem::zeroed() };
52
+ // let res = unsafe { clock_gettime(CLOCK_REALTIME, &mut abstime) };
53
+ // if res != 0 {
54
+ // panic!(
55
+ // "failed to call clock_gettime: {:?}",
56
+ // std::io::Error::last_os_error()
57
+ // );
58
+ // }
59
+ // abstime.tv_nsec += duration.as_nanos() as i64;
60
+ // let res = unsafe { sem_timedwait(self.inner, &abstime) };
61
+ // res != -1
62
+ // }
63
+ }
64
+
65
+ impl Drop for Semaphore {
66
+ fn drop(&mut self) {
67
+ unsafe {
68
+ sem_destroy(self.inner);
69
+ drop(Box::from_raw(self.inner));
70
+ }
71
+ }
72
+ }
73
+
74
+ unsafe impl Send for Semaphore {}
75
+ unsafe impl Sync for Semaphore {}
metadata ADDED
@@ -0,0 +1,66 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: ratomic
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.1.0
5
+ platform: ruby
6
+ authors:
7
+ - Mike Perham
8
+ bindir: bin
9
+ cert_chain: []
10
+ date: 2025-03-22 00:00:00.000000000 Z
11
+ dependencies: []
12
+ description: Mutable data structures for Ractors
13
+ email:
14
+ - mike@perham.net
15
+ executables: []
16
+ extensions:
17
+ - ext/ratomic/extconf.rb
18
+ extra_rdoc_files: []
19
+ files:
20
+ - ext/ratomic/counter.h
21
+ - ext/ratomic/extconf.rb
22
+ - ext/ratomic/fixed-size-object-pool.h
23
+ - ext/ratomic/hashmap.h
24
+ - ext/ratomic/mpmc-queue.h
25
+ - ext/ratomic/ratomic.c
26
+ - lib/ratomic.rb
27
+ - lib/ratomic/ratomic.bundle
28
+ - lib/ratomic/version.rb
29
+ - ratomic.gemspec
30
+ - rs/Cargo.lock
31
+ - rs/Cargo.toml
32
+ - rs/cbindgen.toml
33
+ - rs/rust-atomics.h
34
+ - rs/src/bin/mpmc_queue.rs
35
+ - rs/src/counter.rs
36
+ - rs/src/fixed_size_object_pool.rs
37
+ - rs/src/gc_guard.rs
38
+ - rs/src/hashmap.rs
39
+ - rs/src/lib.rs
40
+ - rs/src/mpmc_queue.rs
41
+ - rs/src/sem.rs
42
+ homepage: https://github.com/mperham/ratomic
43
+ licenses:
44
+ - MIT
45
+ metadata:
46
+ homepage_uri: https://github.com/mperham/ratomic
47
+ source_code_uri: https://github.com/mperham/ratomic
48
+ changelog_uri: https://github.com/mperham/ratomic
49
+ rdoc_options: []
50
+ require_paths:
51
+ - lib
52
+ required_ruby_version: !ruby/object:Gem::Requirement
53
+ requirements:
54
+ - - ">="
55
+ - !ruby/object:Gem::Version
56
+ version: 3.4.0
57
+ required_rubygems_version: !ruby/object:Gem::Requirement
58
+ requirements:
59
+ - - ">="
60
+ - !ruby/object:Gem::Version
61
+ version: '0'
62
+ requirements: []
63
+ rubygems_version: 3.6.2
64
+ specification_version: 4
65
+ summary: Mutable data structures for Ractors
66
+ test_files: []