ratomic 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/rs/rust-atomics.h ADDED
@@ -0,0 +1,89 @@
1
+ #ifndef RUST_ATOMICS_H
2
+ #define RUST_ATOMICS_H
3
+
4
+ #include <stdarg.h>
5
+ #include <stdbool.h>
6
+ #include <stdint.h>
7
+ #include <stdlib.h>
8
+
9
+ #define ATOMIC_COUNTER_SIZE 8
10
+
11
+ #define CONCURRENT_HASH_MAP_SIZE 40
12
+
13
+ #define FIXED_SIZE_OBJECT_POOL_SIZE 72
14
+
15
+ #define MPMC_QUEUE_OBJECT_SIZE 80
16
+
17
+ typedef struct atomic_counter_t atomic_counter_t;
18
+
19
+ typedef struct concurrent_hash_map_t concurrent_hash_map_t;
20
+
21
+ typedef struct fixed_size_object_pool_t fixed_size_object_pool_t;
22
+
23
+ typedef struct mpmc_queue_t mpmc_queue_t;
24
+
25
+ typedef struct {
26
+ uintptr_t idx;
27
+ unsigned long rbobj;
28
+ } PooledItem;
29
+
30
+ void atomic_counter_init(atomic_counter_t *counter, uint64_t n);
31
+
32
+ void atomic_counter_increment(const atomic_counter_t *counter, uint64_t amt);
33
+
34
+ void atomic_counter_decrement(const atomic_counter_t *counter, uint64_t amt);
35
+
36
+ uint64_t atomic_counter_read(const atomic_counter_t *counter);
37
+
38
+ extern unsigned long rb_hash(unsigned long obj);
39
+
40
+ extern int rb_eql(unsigned long lhs, unsigned long rhs);
41
+
42
+ void concurrent_hash_map_init(concurrent_hash_map_t *hashmap);
43
+
44
+ void concurrent_hash_map_drop(concurrent_hash_map_t *hashmap);
45
+
46
+ void concurrent_hash_map_clear(const concurrent_hash_map_t *hashmap);
47
+
48
+ unsigned long concurrent_hash_map_get(const concurrent_hash_map_t *hashmap,
49
+ unsigned long key,
50
+ unsigned long fallback);
51
+
52
+ void concurrent_hash_map_set(const concurrent_hash_map_t *hashmap,
53
+ unsigned long key,
54
+ unsigned long value);
55
+
56
+ void concurrent_hash_map_mark(const concurrent_hash_map_t *hashmap, void (*f)(unsigned long));
57
+
58
+ void concurrent_hash_map_fetch_and_modify(const concurrent_hash_map_t *hashmap,
59
+ unsigned long key,
60
+ unsigned long (*f)(unsigned long));
61
+
62
+ void fixed_size_object_pool_alloc(fixed_size_object_pool_t *pool);
63
+
64
+ void fixed_size_object_pool_init(fixed_size_object_pool_t *pool,
65
+ uintptr_t max_size,
66
+ uint64_t timeout_in_ms,
67
+ unsigned long (*rb_make_obj)(unsigned long));
68
+
69
+ void fixed_size_object_pool_drop(fixed_size_object_pool_t *pool);
70
+
71
+ void fixed_size_object_pool_mark(const fixed_size_object_pool_t *pool, void (*f)(unsigned long));
72
+
73
+ PooledItem fixed_size_object_pool_checkout(fixed_size_object_pool_t *pool);
74
+
75
+ void fixed_size_object_pool_checkin(fixed_size_object_pool_t *pool, uintptr_t idx);
76
+
77
+ void mpmc_queue_alloc(mpmc_queue_t *q);
78
+
79
+ void mpmc_queue_init(mpmc_queue_t *q, uintptr_t capacity, unsigned long default_);
80
+
81
+ void mpmc_queue_drop(mpmc_queue_t *q);
82
+
83
+ void mpmc_queue_mark(const mpmc_queue_t *q, void (*f)(unsigned long));
84
+
85
+ void *mpmc_queue_push(void *push_paylod);
86
+
87
+ void *mpmc_queue_pop(void *q);
88
+
89
+ #endif /* RUST_ATOMICS_H */
@@ -0,0 +1,89 @@
1
+ use std::{
2
+ sync::Arc,
3
+ time::{Duration, Instant},
4
+ };
5
+
6
+ use libc::c_ulong;
7
+ use rust_atomics::MpmcQueue;
8
+
9
+ const RUN_GC_EVERY: Duration = Duration::from_millis(1000);
10
+ const PUSH_ITERATIONS: u64 = 5;
11
+ const THREADS_COUNT: u8 = 10;
12
+
13
+ fn main() {
14
+ let q = make_q(16);
15
+
16
+ let mut consumers = vec![];
17
+ for _ in 0..THREADS_COUNT {
18
+ consumers.push(start_consumer(Arc::clone(&q)));
19
+ }
20
+
21
+ let last_pushed_value = start_producer(Arc::clone(&q));
22
+
23
+ let mut consumed = vec![];
24
+ for consumer in consumers {
25
+ let mut data = consumer.join().unwrap();
26
+ consumed.append(&mut data);
27
+ }
28
+
29
+ consumed.sort_unstable();
30
+
31
+ for (prev, next) in consumed.iter().zip(consumed.iter().skip(1)) {
32
+ assert_eq!(*prev + 1, *next);
33
+ }
34
+
35
+ assert_eq!(*consumed.last().unwrap(), last_pushed_value);
36
+ }
37
+
38
+ fn make_q(buffer_size: usize) -> Arc<MpmcQueue> {
39
+ Arc::new(MpmcQueue::new(buffer_size, 0))
40
+ }
41
+
42
+ const END: c_ulong = c_ulong::MAX;
43
+ fn push_end(q: &MpmcQueue) {
44
+ q.push(END);
45
+ }
46
+ fn pop(q: &MpmcQueue) -> Option<c_ulong> {
47
+ match q.pop() {
48
+ END => None,
49
+ other => Some(other),
50
+ }
51
+ }
52
+
53
+ fn start_consumer(q: Arc<MpmcQueue>) -> std::thread::JoinHandle<Vec<c_ulong>> {
54
+ std::thread::spawn(move || {
55
+ let mut popped = vec![];
56
+
57
+ while let Some(value) = pop(&q) {
58
+ eprintln!("[{:?}] popped {value}", std::thread::current().id());
59
+ popped.push(value);
60
+ }
61
+
62
+ popped
63
+ })
64
+ }
65
+
66
+ fn start_producer(q: Arc<MpmcQueue>) -> c_ulong {
67
+ let mut value = 1;
68
+
69
+ for _ in 0..PUSH_ITERATIONS {
70
+ // push for `RUN_GC_EVERY`
71
+ let start = Instant::now();
72
+ while Instant::now() - start < RUN_GC_EVERY {
73
+ q.push(value);
74
+ value += 1;
75
+ }
76
+
77
+ q.acquire_as_gc(|| {
78
+ eprintln!("===== GC START ======");
79
+ std::thread::sleep(Duration::from_millis(1000));
80
+ eprintln!("===== GC END ========");
81
+ });
82
+ }
83
+
84
+ for _ in 0..THREADS_COUNT {
85
+ push_end(&q);
86
+ }
87
+
88
+ value - 1
89
+ }
data/rs/src/counter.rs ADDED
@@ -0,0 +1,57 @@
1
+ use std::sync::atomic::{AtomicU64, Ordering};
2
+
3
+ #[derive(Debug)]
4
+ pub struct AtomicCounter {
5
+ value: AtomicU64,
6
+ }
7
+
8
+ impl AtomicCounter {
9
+ pub fn new(n: u64) -> Self {
10
+ Self {
11
+ value: AtomicU64::new(n),
12
+ }
13
+ }
14
+
15
+ pub fn inc(&self, amt: u64) {
16
+ self.value.fetch_add(amt, Ordering::Relaxed);
17
+ }
18
+
19
+ pub fn dec(&self, amt: u64) {
20
+ self.value.fetch_sub(amt, Ordering::Relaxed);
21
+ }
22
+
23
+ pub fn read(&self) -> u64 {
24
+ self.value.load(Ordering::Relaxed)
25
+ }
26
+ }
27
+
28
+ #[unsafe(no_mangle)]
29
+ pub unsafe extern "C" fn atomic_counter_init(counter: *mut AtomicCounter, n: u64) {
30
+ unsafe { counter.write(AtomicCounter::new(n)) }
31
+ }
32
+
33
+ #[unsafe(no_mangle)]
34
+ pub unsafe extern "C" fn atomic_counter_increment(counter: *const AtomicCounter, amt: u64) {
35
+ let counter = unsafe { counter.as_ref().unwrap() };
36
+ counter.inc(amt);
37
+ }
38
+
39
+ #[unsafe(no_mangle)]
40
+ pub unsafe extern "C" fn atomic_counter_decrement(counter: *const AtomicCounter, amt: u64) {
41
+ let counter = unsafe { counter.as_ref().unwrap() };
42
+ counter.dec(amt);
43
+ }
44
+
45
+ #[unsafe(no_mangle)]
46
+ pub unsafe extern "C" fn atomic_counter_read(counter: *const AtomicCounter) -> u64 {
47
+ let counter = unsafe { counter.as_ref().unwrap() };
48
+ counter.read()
49
+ }
50
+
51
+ pub const ATOMIC_COUNTER_SIZE: usize = 8;
52
+
53
+ #[test]
54
+ fn test_atomic_counter() {
55
+ assert_eq!(ATOMIC_COUNTER_SIZE, std::mem::size_of::<AtomicCounter>());
56
+ assert!(crate::is_sync_and_send::<AtomicCounter>());
57
+ }
@@ -0,0 +1,120 @@
1
+ use crossbeam_channel::{Receiver, Sender};
2
+ use std::{ffi::c_ulong, time::Duration};
3
+
4
+ pub struct FixedSizeObjectPool {
5
+ pool: Vec<c_ulong>,
6
+ tx: Sender<usize>,
7
+ rx: Receiver<usize>,
8
+ timeout: Duration,
9
+ }
10
+
11
+ #[repr(C)]
12
+ pub struct PooledItem {
13
+ pub idx: usize,
14
+ pub rbobj: c_ulong,
15
+ }
16
+
17
+ impl FixedSizeObjectPool {
18
+ fn new() -> Self {
19
+ let (tx, rx) = crossbeam_channel::unbounded();
20
+
21
+ Self {
22
+ pool: vec![],
23
+ tx,
24
+ rx,
25
+ timeout: Duration::MAX,
26
+ }
27
+ }
28
+
29
+ fn init(
30
+ &mut self,
31
+ size: usize,
32
+ timeout_in_ms: u64,
33
+ rb_make_obj: extern "C" fn(c_ulong) -> c_ulong,
34
+ ) {
35
+ self.timeout = Duration::from_millis(timeout_in_ms);
36
+
37
+ self.pool = Vec::with_capacity(size);
38
+ for idx in 0..size {
39
+ self.pool.push((rb_make_obj)(0));
40
+ self.tx.send(idx).unwrap();
41
+ }
42
+ }
43
+
44
+ fn mark(&self, f: extern "C" fn(c_ulong)) {
45
+ for item in self.pool.iter() {
46
+ f(*item);
47
+ }
48
+ }
49
+
50
+ fn checkout(&mut self) -> Option<PooledItem> {
51
+ let idx = self.rx.recv_timeout(self.timeout).ok()?;
52
+ Some(PooledItem {
53
+ idx,
54
+ rbobj: self.pool[idx],
55
+ })
56
+ }
57
+
58
+ fn checkin(&mut self, idx: usize) {
59
+ self.tx.send(idx).unwrap();
60
+ }
61
+ }
62
+
63
+ #[unsafe(no_mangle)]
64
+ pub unsafe extern "C" fn fixed_size_object_pool_alloc(pool: *mut FixedSizeObjectPool) {
65
+ unsafe { pool.write(FixedSizeObjectPool::new()) }
66
+ }
67
+
68
+ #[unsafe(no_mangle)]
69
+ pub unsafe extern "C" fn fixed_size_object_pool_init(
70
+ pool: *mut FixedSizeObjectPool,
71
+ max_size: usize,
72
+ timeout_in_ms: u64,
73
+ rb_make_obj: extern "C" fn(c_ulong) -> c_ulong,
74
+ ) {
75
+ let pool = unsafe { pool.as_mut().unwrap() };
76
+ pool.init(max_size, timeout_in_ms, rb_make_obj);
77
+ }
78
+
79
+ #[unsafe(no_mangle)]
80
+ pub unsafe extern "C" fn fixed_size_object_pool_drop(pool: *mut FixedSizeObjectPool) {
81
+ unsafe { std::ptr::drop_in_place(pool) };
82
+ }
83
+
84
+ #[unsafe(no_mangle)]
85
+ pub unsafe extern "C" fn fixed_size_object_pool_mark(
86
+ pool: *const FixedSizeObjectPool,
87
+ f: extern "C" fn(c_ulong),
88
+ ) {
89
+ let pool = unsafe { pool.as_ref().unwrap() };
90
+ pool.mark(f);
91
+ }
92
+
93
+ #[unsafe(no_mangle)]
94
+ pub unsafe extern "C" fn fixed_size_object_pool_checkout(
95
+ pool: *mut FixedSizeObjectPool,
96
+ ) -> PooledItem {
97
+ let pool = unsafe { pool.as_mut().unwrap() };
98
+ pool.checkout().unwrap_or(PooledItem { idx: 0, rbobj: 0 })
99
+ }
100
+
101
+ #[unsafe(no_mangle)]
102
+ pub unsafe extern "C" fn fixed_size_object_pool_checkin(
103
+ pool: *mut FixedSizeObjectPool,
104
+ idx: usize,
105
+ ) {
106
+ let pool = unsafe { pool.as_mut().unwrap() };
107
+ pool.checkin(idx);
108
+ }
109
+
110
+ pub const FIXED_SIZE_OBJECT_POOL_SIZE: usize = 72;
111
+
112
+ #[test]
113
+ fn test_concurrent_hash_map() {
114
+ assert_eq!(
115
+ FIXED_SIZE_OBJECT_POOL_SIZE,
116
+ std::mem::size_of::<FixedSizeObjectPool>(),
117
+ "size mismatch"
118
+ );
119
+ assert!(crate::is_sync_and_send::<FixedSizeObjectPool>());
120
+ }
@@ -0,0 +1,88 @@
1
+ use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
2
+
3
+ pub(crate) struct GcGuard {
4
+ locked: AtomicBool,
5
+ count: AtomicUsize,
6
+ }
7
+
8
+ impl GcGuard {
9
+ pub(crate) fn alloc() -> Self {
10
+ GcGuard {
11
+ locked: AtomicBool::new(false),
12
+ count: AtomicUsize::new(0),
13
+ }
14
+ }
15
+
16
+ pub(crate) fn init(&mut self) {
17
+ self.locked.store(false, Ordering::Relaxed);
18
+ self.count.store(0, Ordering::Relaxed);
19
+ }
20
+
21
+ fn add_consumer(&self) {
22
+ self.count.fetch_add(1, Ordering::SeqCst);
23
+ }
24
+ fn remove_consumer(&self) {
25
+ self.count.fetch_sub(1, Ordering::SeqCst);
26
+ }
27
+ fn wait_for_no_consumers(&self) {
28
+ loop {
29
+ let count = self.count.load(Ordering::SeqCst);
30
+ if count == 0 {
31
+ #[cfg(feature = "simulation")]
32
+ eprintln!("[producer] 0 running consumers");
33
+ break;
34
+ } else {
35
+ // spin until they are done
36
+ #[cfg(feature = "simulation")]
37
+ eprintln!("[producer] waiting for {count} consumers to finish");
38
+ }
39
+ }
40
+ }
41
+
42
+ fn lock(&self) {
43
+ self.locked.store(true, Ordering::SeqCst);
44
+ }
45
+ fn unlock(&self) {
46
+ self.locked.store(false, Ordering::SeqCst)
47
+ }
48
+ fn is_locked(&self) -> bool {
49
+ self.locked.load(Ordering::SeqCst)
50
+ }
51
+ fn wait_until_unlocked(&self) {
52
+ while self.is_locked() {
53
+ // spin
54
+ }
55
+ }
56
+
57
+ pub(crate) fn acquire_as_gc<F, T>(&self, f: F) -> T
58
+ where
59
+ F: FnOnce() -> T,
60
+ {
61
+ #[cfg(feature = "simulation")]
62
+ eprintln!("Locking consumers");
63
+ self.lock();
64
+ #[cfg(feature = "simulation")]
65
+ eprintln!("Waiting for consumers to finish");
66
+ self.wait_for_no_consumers();
67
+ #[cfg(feature = "simulation")]
68
+ eprintln!("All consumers have finished");
69
+ let out = f();
70
+ #[cfg(feature = "simulation")]
71
+ eprintln!("Unlocking consumers");
72
+ self.unlock();
73
+ out
74
+ }
75
+
76
+ pub(crate) fn acquire_as_consumer<F, T>(&self, f: F) -> T
77
+ where
78
+ F: FnOnce() -> T,
79
+ {
80
+ if self.is_locked() {
81
+ self.wait_until_unlocked();
82
+ }
83
+ self.add_consumer();
84
+ let out = f();
85
+ self.remove_consumer();
86
+ out
87
+ }
88
+ }
data/rs/src/hashmap.rs ADDED
@@ -0,0 +1,129 @@
1
+ use std::ffi::{c_int, c_ulong};
2
+
3
+ #[derive(Debug)]
4
+ struct RubyHashEql(c_ulong);
5
+
6
+ impl PartialEq for RubyHashEql {
7
+ fn eq(&self, other: &Self) -> bool {
8
+ unsafe { rb_eql(self.0, other.0) != 0 }
9
+ }
10
+ }
11
+ impl Eq for RubyHashEql {}
12
+
13
+ impl std::hash::Hash for RubyHashEql {
14
+ fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
15
+ let ruby_hash = unsafe { rb_hash(self.0) };
16
+ ruby_hash.hash(state);
17
+ }
18
+ }
19
+
20
+ pub struct ConcurrentHashMap {
21
+ map: dashmap::DashMap<RubyHashEql, c_ulong>,
22
+ }
23
+
24
+ unsafe extern "C" {
25
+ fn rb_hash(obj: c_ulong) -> c_ulong;
26
+ fn rb_eql(lhs: c_ulong, rhs: c_ulong) -> c_int;
27
+ }
28
+
29
+ impl ConcurrentHashMap {
30
+ fn new() -> Self {
31
+ Self {
32
+ map: dashmap::DashMap::new(),
33
+ }
34
+ }
35
+
36
+ fn get(&self, key: c_ulong) -> Option<c_ulong> {
37
+ let key = RubyHashEql(key);
38
+ self.map.get(&key).map(|v| *v)
39
+ }
40
+
41
+ fn set(&self, key: c_ulong, value: c_ulong) {
42
+ let key = RubyHashEql(key);
43
+ self.map.insert(key, value);
44
+ }
45
+
46
+ fn clear(&self) {
47
+ self.map.clear()
48
+ }
49
+
50
+ fn fetch_and_modify(&self, key: c_ulong, f: extern "C" fn(c_ulong) -> c_ulong) {
51
+ let key = RubyHashEql(key);
52
+ self.map.alter(&key, |_, v| f(v));
53
+ }
54
+
55
+ fn mark(&self, f: extern "C" fn(c_ulong)) {
56
+ for pair in self.map.iter() {
57
+ f(pair.key().0);
58
+ f(*pair.value());
59
+ }
60
+ }
61
+ }
62
+
63
+ #[unsafe(no_mangle)]
64
+ pub unsafe extern "C" fn concurrent_hash_map_init(hashmap: *mut ConcurrentHashMap) {
65
+ unsafe { hashmap.write(ConcurrentHashMap::new()) }
66
+ }
67
+
68
+ #[unsafe(no_mangle)]
69
+ pub unsafe extern "C" fn concurrent_hash_map_drop(hashmap: *mut ConcurrentHashMap) {
70
+ unsafe { std::ptr::drop_in_place(hashmap) };
71
+ }
72
+
73
+ #[unsafe(no_mangle)]
74
+ pub unsafe extern "C" fn concurrent_hash_map_clear(hashmap: *const ConcurrentHashMap) {
75
+ let hashmap = unsafe { hashmap.as_ref().unwrap() };
76
+ hashmap.clear();
77
+ }
78
+
79
+ #[unsafe(no_mangle)]
80
+ pub unsafe extern "C" fn concurrent_hash_map_get(
81
+ hashmap: *const ConcurrentHashMap,
82
+ key: c_ulong,
83
+ fallback: c_ulong,
84
+ ) -> c_ulong {
85
+ let hashmap = unsafe { hashmap.as_ref().unwrap() };
86
+ hashmap.get(key).unwrap_or(fallback)
87
+ }
88
+
89
+ #[unsafe(no_mangle)]
90
+ pub unsafe extern "C" fn concurrent_hash_map_set(
91
+ hashmap: *const ConcurrentHashMap,
92
+ key: c_ulong,
93
+ value: c_ulong,
94
+ ) {
95
+ let hashmap = unsafe { hashmap.as_ref().unwrap() };
96
+ hashmap.set(key, value);
97
+ }
98
+
99
+ #[unsafe(no_mangle)]
100
+ pub unsafe extern "C" fn concurrent_hash_map_mark(
101
+ hashmap: *const ConcurrentHashMap,
102
+ f: extern "C" fn(c_ulong),
103
+ ) {
104
+ let hashmap = unsafe { hashmap.as_ref().unwrap() };
105
+ hashmap.mark(f);
106
+ }
107
+
108
+ #[unsafe(no_mangle)]
109
+ pub unsafe extern "C" fn concurrent_hash_map_fetch_and_modify(
110
+ hashmap: *const ConcurrentHashMap,
111
+ key: c_ulong,
112
+ f: extern "C" fn(c_ulong) -> c_ulong,
113
+ ) {
114
+ let hashmap = unsafe { hashmap.as_ref().unwrap() };
115
+ hashmap.fetch_and_modify(key, f);
116
+ }
117
+
118
+ pub const CONCURRENT_HASH_MAP_SIZE: usize = 40;
119
+
120
+ #[test]
121
+ fn test_concurrent_hash_map() {
122
+ assert_eq!(
123
+ CONCURRENT_HASH_MAP_SIZE,
124
+ std::mem::size_of::<ConcurrentHashMap>(),
125
+ "size mismatch"
126
+ );
127
+
128
+ assert!(crate::is_sync_and_send::<ConcurrentHashMap>());
129
+ }
data/rs/src/lib.rs ADDED
@@ -0,0 +1,23 @@
1
+ #![expect(clippy::missing_safety_doc)]
2
+
3
+ mod counter;
4
+ pub use counter::*;
5
+
6
+ mod hashmap;
7
+ pub use hashmap::*;
8
+
9
+ mod fixed_size_object_pool;
10
+ pub use fixed_size_object_pool::*;
11
+
12
+ mod mpmc_queue;
13
+ pub use mpmc_queue::*;
14
+
15
+ mod gc_guard;
16
+ pub(crate) use gc_guard::GcGuard;
17
+
18
+ mod sem;
19
+
20
+ #[cfg(test)]
21
+ pub(crate) fn is_sync_and_send<T: Sync + Send>() -> bool {
22
+ true
23
+ }