solid_mcp 0.2.2 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/ext/solid_mcp_native/Cargo.toml +12 -0
- data/ext/solid_mcp_native/core/Cargo.toml +32 -0
- data/ext/solid_mcp_native/core/src/config.rs +133 -0
- data/ext/solid_mcp_native/core/src/db/mod.rs +154 -0
- data/ext/solid_mcp_native/core/src/db/postgres.rs +242 -0
- data/ext/solid_mcp_native/core/src/db/sqlite.rs +276 -0
- data/ext/solid_mcp_native/core/src/error.rs +38 -0
- data/ext/solid_mcp_native/core/src/lib.rs +25 -0
- data/ext/solid_mcp_native/core/src/message.rs +191 -0
- data/ext/solid_mcp_native/core/src/pubsub.rs +309 -0
- data/ext/solid_mcp_native/core/src/subscriber.rs +298 -0
- data/ext/solid_mcp_native/core/src/writer.rs +252 -0
- data/ext/solid_mcp_native/extconf.rb +3 -0
- data/ext/solid_mcp_native/ffi/Cargo.toml +20 -0
- data/ext/solid_mcp_native/ffi/extconf.rb +67 -0
- data/ext/solid_mcp_native/ffi/src/lib.rs +224 -0
- data/lib/solid_mcp/configuration.rb +5 -2
- data/lib/solid_mcp/message_writer.rb +80 -45
- data/lib/solid_mcp/native_speedup.rb +140 -0
- data/lib/solid_mcp/pub_sub.rb +10 -8
- data/lib/solid_mcp/subscriber.rb +18 -7
- data/lib/solid_mcp/version.rb +1 -1
- data/lib/solid_mcp.rb +3 -0
- metadata +58 -20
- data/.release-please-manifest.json +0 -1
- data/CHANGELOG.md +0 -27
- data/Gemfile +0 -11
- data/Gemfile.lock +0 -140
- data/Rakefile +0 -8
- data/app/models/solid_mcp/message.rb +0 -25
- data/app/models/solid_mcp/record.rb +0 -10
- data/bin/console +0 -11
- data/bin/rails +0 -15
- data/bin/setup +0 -8
- data/bin/test +0 -8
- data/db/migrate/20250624000001_create_solid_mcp_messages.rb +0 -28
- data/release-please-config.json +0 -8
- data/solid_mcp.gemspec +0 -39
|
@@ -0,0 +1,252 @@
|
|
|
1
|
+
//! Async message writer with batching
|
|
2
|
+
//!
|
|
3
|
+
//! Uses Tokio channels for non-blocking enqueue and background batch writes.
|
|
4
|
+
|
|
5
|
+
use crate::db::{Database, DbPool};
|
|
6
|
+
use crate::{Config, Error, Message, Result};
|
|
7
|
+
use std::sync::Arc;
|
|
8
|
+
use tokio::sync::mpsc;
|
|
9
|
+
use tokio::task::JoinHandle;
|
|
10
|
+
use tracing::{debug, error, info, warn};
|
|
11
|
+
|
|
12
|
+
/// Message writer that batches writes to the database
|
|
13
|
+
pub struct MessageWriter {
|
|
14
|
+
tx: mpsc::Sender<WriterCommand>,
|
|
15
|
+
handle: JoinHandle<()>,
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
enum WriterCommand {
|
|
19
|
+
Message(Message),
|
|
20
|
+
Flush(tokio::sync::oneshot::Sender<()>),
|
|
21
|
+
Shutdown,
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
impl MessageWriter {
|
|
25
|
+
/// Create a new message writer
|
|
26
|
+
pub async fn new(db: Arc<DbPool>, config: &Config) -> Result<Self> {
|
|
27
|
+
let (tx, rx) = mpsc::channel(config.max_queue_size);
|
|
28
|
+
let batch_size = config.batch_size;
|
|
29
|
+
let _shutdown_timeout = config.shutdown_timeout; // TODO: Use for timeout handling
|
|
30
|
+
|
|
31
|
+
let handle = tokio::spawn(async move {
|
|
32
|
+
writer_loop(rx, db, batch_size).await;
|
|
33
|
+
debug!("MessageWriter worker shutdown complete");
|
|
34
|
+
});
|
|
35
|
+
|
|
36
|
+
info!(
|
|
37
|
+
"MessageWriter started with batch_size={}, queue_size={}",
|
|
38
|
+
batch_size, config.max_queue_size
|
|
39
|
+
);
|
|
40
|
+
|
|
41
|
+
Ok(Self { tx, handle })
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/// Enqueue a message for writing (non-blocking)
|
|
45
|
+
///
|
|
46
|
+
/// Returns `Ok(true)` if enqueued, `Ok(false)` if queue is full.
|
|
47
|
+
pub fn enqueue(&self, message: Message) -> Result<bool> {
|
|
48
|
+
match self.tx.try_send(WriterCommand::Message(message)) {
|
|
49
|
+
Ok(()) => Ok(true),
|
|
50
|
+
Err(mpsc::error::TrySendError::Full(_)) => {
|
|
51
|
+
warn!("MessageWriter queue full, dropping message");
|
|
52
|
+
Ok(false)
|
|
53
|
+
}
|
|
54
|
+
Err(mpsc::error::TrySendError::Closed(_)) => Err(Error::Shutdown),
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
/// Enqueue a message for writing (async, waits if queue is full)
|
|
59
|
+
pub async fn enqueue_async(&self, message: Message) -> Result<()> {
|
|
60
|
+
self.tx
|
|
61
|
+
.send(WriterCommand::Message(message))
|
|
62
|
+
.await
|
|
63
|
+
.map_err(|_| Error::Shutdown)
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
/// Flush all pending messages to the database
|
|
67
|
+
pub async fn flush(&self) -> Result<()> {
|
|
68
|
+
let (tx, rx) = tokio::sync::oneshot::channel();
|
|
69
|
+
self.tx
|
|
70
|
+
.send(WriterCommand::Flush(tx))
|
|
71
|
+
.await
|
|
72
|
+
.map_err(|_| Error::Shutdown)?;
|
|
73
|
+
rx.await.map_err(|_| Error::Shutdown)
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
/// Shutdown the writer gracefully
|
|
77
|
+
pub async fn shutdown(self) -> Result<()> {
|
|
78
|
+
info!("MessageWriter shutting down...");
|
|
79
|
+
|
|
80
|
+
// Send shutdown command
|
|
81
|
+
let _ = self.tx.send(WriterCommand::Shutdown).await;
|
|
82
|
+
|
|
83
|
+
// Wait for worker to finish
|
|
84
|
+
self.handle
|
|
85
|
+
.await
|
|
86
|
+
.map_err(|e| Error::Config(format!("Worker panicked: {}", e)))?;
|
|
87
|
+
|
|
88
|
+
info!("MessageWriter shutdown complete");
|
|
89
|
+
Ok(())
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
async fn writer_loop(mut rx: mpsc::Receiver<WriterCommand>, db: Arc<DbPool>, batch_size: usize) {
|
|
94
|
+
let mut batch = Vec::with_capacity(batch_size);
|
|
95
|
+
let mut flush_waiters: Vec<tokio::sync::oneshot::Sender<()>> = Vec::new();
|
|
96
|
+
|
|
97
|
+
loop {
|
|
98
|
+
// Wait for first message or command
|
|
99
|
+
let cmd = match rx.recv().await {
|
|
100
|
+
Some(cmd) => cmd,
|
|
101
|
+
None => {
|
|
102
|
+
debug!("Channel closed, exiting writer loop");
|
|
103
|
+
break;
|
|
104
|
+
}
|
|
105
|
+
};
|
|
106
|
+
|
|
107
|
+
match cmd {
|
|
108
|
+
WriterCommand::Message(msg) => {
|
|
109
|
+
batch.push(msg);
|
|
110
|
+
}
|
|
111
|
+
WriterCommand::Flush(waiter) => {
|
|
112
|
+
flush_waiters.push(waiter);
|
|
113
|
+
}
|
|
114
|
+
WriterCommand::Shutdown => {
|
|
115
|
+
debug!("Shutdown command received");
|
|
116
|
+
// Drain remaining messages
|
|
117
|
+
drain_remaining(&mut rx, &mut batch, &mut flush_waiters);
|
|
118
|
+
// Write final batch
|
|
119
|
+
if !batch.is_empty() {
|
|
120
|
+
write_batch(&db, &mut batch).await;
|
|
121
|
+
}
|
|
122
|
+
// Signal all flush waiters
|
|
123
|
+
signal_flush_waiters(&mut flush_waiters);
|
|
124
|
+
break;
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
// Try to fill batch (non-blocking)
|
|
129
|
+
while batch.len() < batch_size {
|
|
130
|
+
match rx.try_recv() {
|
|
131
|
+
Ok(WriterCommand::Message(msg)) => {
|
|
132
|
+
batch.push(msg);
|
|
133
|
+
}
|
|
134
|
+
Ok(WriterCommand::Flush(waiter)) => {
|
|
135
|
+
flush_waiters.push(waiter);
|
|
136
|
+
break; // Stop filling, write now
|
|
137
|
+
}
|
|
138
|
+
Ok(WriterCommand::Shutdown) => {
|
|
139
|
+
drain_remaining(&mut rx, &mut batch, &mut flush_waiters);
|
|
140
|
+
if !batch.is_empty() {
|
|
141
|
+
write_batch(&db, &mut batch).await;
|
|
142
|
+
}
|
|
143
|
+
signal_flush_waiters(&mut flush_waiters);
|
|
144
|
+
return;
|
|
145
|
+
}
|
|
146
|
+
Err(_) => break, // No more messages available
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
// Write batch if non-empty
|
|
151
|
+
if !batch.is_empty() {
|
|
152
|
+
write_batch(&db, &mut batch).await;
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
// Signal flush waiters
|
|
156
|
+
signal_flush_waiters(&mut flush_waiters);
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
fn drain_remaining(
|
|
161
|
+
rx: &mut mpsc::Receiver<WriterCommand>,
|
|
162
|
+
batch: &mut Vec<Message>,
|
|
163
|
+
flush_waiters: &mut Vec<tokio::sync::oneshot::Sender<()>>,
|
|
164
|
+
) {
|
|
165
|
+
while let Ok(cmd) = rx.try_recv() {
|
|
166
|
+
match cmd {
|
|
167
|
+
WriterCommand::Message(msg) => batch.push(msg),
|
|
168
|
+
WriterCommand::Flush(waiter) => flush_waiters.push(waiter),
|
|
169
|
+
WriterCommand::Shutdown => {}
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
async fn write_batch(db: &DbPool, batch: &mut Vec<Message>) {
|
|
175
|
+
let count = batch.len();
|
|
176
|
+
debug!("Writing batch of {} messages", count);
|
|
177
|
+
|
|
178
|
+
match db.insert_batch(batch).await {
|
|
179
|
+
Ok(()) => {
|
|
180
|
+
debug!("Successfully wrote {} messages", count);
|
|
181
|
+
}
|
|
182
|
+
Err(e) => {
|
|
183
|
+
error!("Failed to write batch: {}", e);
|
|
184
|
+
// TODO: Implement retry logic or dead letter queue
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
batch.clear();
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
fn signal_flush_waiters(waiters: &mut Vec<tokio::sync::oneshot::Sender<()>>) {
|
|
192
|
+
for waiter in waiters.drain(..) {
|
|
193
|
+
let _ = waiter.send(());
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
#[cfg(test)]
|
|
198
|
+
mod tests {
|
|
199
|
+
use super::*;
|
|
200
|
+
use crate::db::sqlite::SqlitePool;
|
|
201
|
+
|
|
202
|
+
async fn create_test_db() -> Arc<DbPool> {
|
|
203
|
+
let sqlite = SqlitePool::new("sqlite::memory:").await.unwrap();
|
|
204
|
+
sqlite.setup_test_schema().await.unwrap();
|
|
205
|
+
Arc::new(DbPool::Sqlite(sqlite))
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
#[tokio::test]
|
|
209
|
+
async fn test_writer_basic() {
|
|
210
|
+
let db = create_test_db().await;
|
|
211
|
+
let config = Config::new("sqlite::memory:").batch_size(10);
|
|
212
|
+
|
|
213
|
+
let writer = MessageWriter::new(db.clone(), &config).await.unwrap();
|
|
214
|
+
|
|
215
|
+
// Enqueue some messages
|
|
216
|
+
for i in 0..5 {
|
|
217
|
+
let msg = Message::new("session-1", "message", format!(r#"{{"i":{}}}"#, i));
|
|
218
|
+
assert!(writer.enqueue(msg).unwrap());
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
// Flush to ensure writes complete
|
|
222
|
+
writer.flush().await.unwrap();
|
|
223
|
+
|
|
224
|
+
// Verify messages in database
|
|
225
|
+
let messages = db.fetch_after("session-1", 0, 100).await.unwrap();
|
|
226
|
+
assert_eq!(messages.len(), 5);
|
|
227
|
+
|
|
228
|
+
// Shutdown
|
|
229
|
+
writer.shutdown().await.unwrap();
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
#[tokio::test]
|
|
233
|
+
async fn test_writer_batching() {
|
|
234
|
+
let db = create_test_db().await;
|
|
235
|
+
let config = Config::new("sqlite::memory:").batch_size(3);
|
|
236
|
+
|
|
237
|
+
let writer = MessageWriter::new(db.clone(), &config).await.unwrap();
|
|
238
|
+
|
|
239
|
+
// Enqueue more messages than batch size
|
|
240
|
+
for i in 0..10 {
|
|
241
|
+
let msg = Message::new("session-1", "message", format!(r#"{{"i":{}}}"#, i));
|
|
242
|
+
writer.enqueue_async(msg).await.unwrap();
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
writer.flush().await.unwrap();
|
|
246
|
+
|
|
247
|
+
let messages = db.fetch_after("session-1", 0, 100).await.unwrap();
|
|
248
|
+
assert_eq!(messages.len(), 10);
|
|
249
|
+
|
|
250
|
+
writer.shutdown().await.unwrap();
|
|
251
|
+
}
|
|
252
|
+
}
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
[package]
|
|
2
|
+
name = "solid_mcp_native"
|
|
3
|
+
version = "0.1.0"
|
|
4
|
+
edition = "2024"
|
|
5
|
+
rust-version = "1.85"
|
|
6
|
+
authors = ["Abdelkader Boudih <terminale@gmail.com>"]
|
|
7
|
+
license = "MIT"
|
|
8
|
+
|
|
9
|
+
[lib]
|
|
10
|
+
crate-type = ["cdylib"]
|
|
11
|
+
|
|
12
|
+
[dependencies]
|
|
13
|
+
solid-mcp-core = { workspace = true }
|
|
14
|
+
magnus = { version = "0.8", features = ["embed"] }
|
|
15
|
+
tokio = { workspace = true }
|
|
16
|
+
tracing = { workspace = true }
|
|
17
|
+
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
|
18
|
+
|
|
19
|
+
[build-dependencies]
|
|
20
|
+
rb-sys = "0.9"
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
def create_noop_makefile(message)
|
|
4
|
+
warn message
|
|
5
|
+
warn 'SolidMCP will fall back to pure Ruby backend.'
|
|
6
|
+
File.write('Makefile', <<~MAKE)
|
|
7
|
+
all:
|
|
8
|
+
@echo '#{message}'
|
|
9
|
+
install:
|
|
10
|
+
@echo '#{message}'
|
|
11
|
+
MAKE
|
|
12
|
+
exit 0
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
# Skip native extension compilation on JRuby
|
|
16
|
+
if RUBY_ENGINE == 'jruby'
|
|
17
|
+
create_noop_makefile('Skipping native extension on JRuby')
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
# TruffleRuby 24.0.0+ has native C extension support
|
|
21
|
+
if RUBY_ENGINE == 'truffleruby'
|
|
22
|
+
warn '⚠️ TruffleRuby detected - C extension support is experimental'
|
|
23
|
+
warn ' Attempting compilation... (may fail, will fall back to pure Ruby)'
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
# Check if Cargo is available
|
|
27
|
+
def cargo_available?
|
|
28
|
+
system('cargo --version > /dev/null 2>&1')
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
unless cargo_available?
|
|
32
|
+
create_noop_makefile('Skipping native extension (Cargo not found)')
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
# Use rb_sys to compile the Rust extension
|
|
36
|
+
require 'mkmf'
|
|
37
|
+
|
|
38
|
+
# Wrap entire compilation process in error handling to ensure gem install never fails
|
|
39
|
+
begin
|
|
40
|
+
require 'rb_sys/mkmf'
|
|
41
|
+
require 'pathname'
|
|
42
|
+
|
|
43
|
+
create_rust_makefile('solid_mcp_native/solid_mcp_native') do |r|
|
|
44
|
+
ffi_dir = Pathname(__dir__)
|
|
45
|
+
r.ext_dir = begin
|
|
46
|
+
ffi_dir.relative_path_from(Pathname(Dir.pwd)).to_s
|
|
47
|
+
rescue ArgumentError
|
|
48
|
+
ffi_dir.expand_path.to_s
|
|
49
|
+
end
|
|
50
|
+
# Profile configuration
|
|
51
|
+
r.profile = ENV.fetch('RB_SYS_CARGO_PROFILE', :release).to_sym
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
makefile_path = File.join(Dir.pwd, 'Makefile')
|
|
55
|
+
if File.exist?(makefile_path)
|
|
56
|
+
manifest_path = File.expand_path(__dir__)
|
|
57
|
+
contents = File.read(makefile_path)
|
|
58
|
+
contents.gsub!(/^RB_SYS_CARGO_MANIFEST_DIR \?=.*$/, "RB_SYS_CARGO_MANIFEST_DIR ?= #{manifest_path}")
|
|
59
|
+
File.write(makefile_path, contents)
|
|
60
|
+
end
|
|
61
|
+
rescue LoadError => e
|
|
62
|
+
# rb_sys not available
|
|
63
|
+
create_noop_makefile("Skipping native extension (rb_sys gem not available: #{e.message})")
|
|
64
|
+
rescue StandardError => e
|
|
65
|
+
# Any other compilation setup failure (Rust compilation errors, Makefile generation, etc.)
|
|
66
|
+
create_noop_makefile("Skipping native extension (compilation setup failed: #{e.message})")
|
|
67
|
+
end
|
|
@@ -0,0 +1,224 @@
|
|
|
1
|
+
//! Ruby FFI bridge for solid-mcp-core
|
|
2
|
+
//!
|
|
3
|
+
//! Exposes the Rust pub/sub engine to Ruby via Magnus.
|
|
4
|
+
|
|
5
|
+
use magnus::{Error, Ruby, function};
|
|
6
|
+
use solid_mcp_core::{Config, PubSub};
|
|
7
|
+
use std::cell::RefCell;
|
|
8
|
+
use std::sync::{Arc, OnceLock};
|
|
9
|
+
use std::time::Duration;
|
|
10
|
+
use tokio::runtime::Runtime;
|
|
11
|
+
use tracing::Level;
|
|
12
|
+
use tracing_subscriber::FmtSubscriber;
|
|
13
|
+
|
|
14
|
+
// Global Tokio runtime
|
|
15
|
+
static RUNTIME: OnceLock<Runtime> = OnceLock::new();
|
|
16
|
+
|
|
17
|
+
// Thread-local PubSub instance (each Ruby thread gets its own)
|
|
18
|
+
thread_local! {
|
|
19
|
+
static PUBSUB: RefCell<Option<Arc<PubSub>>> = const { RefCell::new(None) };
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
fn get_runtime() -> &'static Runtime {
|
|
23
|
+
RUNTIME.get_or_init(|| {
|
|
24
|
+
tokio::runtime::Builder::new_multi_thread()
|
|
25
|
+
.enable_all()
|
|
26
|
+
.worker_threads(4)
|
|
27
|
+
.thread_name("solid-mcp-worker")
|
|
28
|
+
.build()
|
|
29
|
+
.expect("Failed to create Tokio runtime")
|
|
30
|
+
})
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/// Helper to create a runtime error
|
|
34
|
+
fn runtime_error(msg: impl Into<String>) -> Error {
|
|
35
|
+
Error::new(Ruby::get().unwrap().exception_runtime_error(), msg.into())
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
/// Initialize the pub/sub engine with a database URL
|
|
39
|
+
fn init_engine(database_url: String) -> Result<bool, Error> {
|
|
40
|
+
// Initialize tracing if DEBUG env var is set
|
|
41
|
+
if std::env::var("DEBUG_SOLID_MCP").is_ok() {
|
|
42
|
+
let subscriber = FmtSubscriber::builder()
|
|
43
|
+
.with_max_level(Level::DEBUG)
|
|
44
|
+
.finish();
|
|
45
|
+
let _ = tracing::subscriber::set_global_default(subscriber);
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
let rt = get_runtime();
|
|
49
|
+
|
|
50
|
+
let config = Config::new(&database_url);
|
|
51
|
+
|
|
52
|
+
let pubsub = rt
|
|
53
|
+
.block_on(async { PubSub::new(config).await })
|
|
54
|
+
.map_err(|e| runtime_error(e.to_string()))?;
|
|
55
|
+
|
|
56
|
+
PUBSUB.with(|ps| {
|
|
57
|
+
*ps.borrow_mut() = Some(Arc::new(pubsub));
|
|
58
|
+
});
|
|
59
|
+
|
|
60
|
+
Ok(true)
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
/// Initialize with custom configuration
|
|
64
|
+
fn init_engine_with_config(
|
|
65
|
+
database_url: String,
|
|
66
|
+
batch_size: usize,
|
|
67
|
+
polling_interval_ms: u64,
|
|
68
|
+
max_queue_size: usize,
|
|
69
|
+
) -> Result<bool, Error> {
|
|
70
|
+
let rt = get_runtime();
|
|
71
|
+
|
|
72
|
+
let config = Config::new(&database_url)
|
|
73
|
+
.batch_size(batch_size)
|
|
74
|
+
.polling_interval(Duration::from_millis(polling_interval_ms))
|
|
75
|
+
.max_queue_size(max_queue_size);
|
|
76
|
+
|
|
77
|
+
let pubsub = rt
|
|
78
|
+
.block_on(async { PubSub::new(config).await })
|
|
79
|
+
.map_err(|e| runtime_error(e.to_string()))?;
|
|
80
|
+
|
|
81
|
+
PUBSUB.with(|ps| {
|
|
82
|
+
*ps.borrow_mut() = Some(Arc::new(pubsub));
|
|
83
|
+
});
|
|
84
|
+
|
|
85
|
+
Ok(true)
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
/// Broadcast a message to a session (non-blocking)
|
|
89
|
+
fn broadcast(session_id: String, event_type: String, data: String) -> Result<bool, Error> {
|
|
90
|
+
PUBSUB.with(|ps| {
|
|
91
|
+
let ps = ps.borrow();
|
|
92
|
+
let pubsub = ps.as_ref().ok_or_else(|| {
|
|
93
|
+
runtime_error("Engine not initialized")
|
|
94
|
+
})?;
|
|
95
|
+
|
|
96
|
+
pubsub
|
|
97
|
+
.broadcast(&session_id, &event_type, &data)
|
|
98
|
+
.map_err(|e| runtime_error(e.to_string()))
|
|
99
|
+
})
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
/// Flush all pending messages to the database
|
|
103
|
+
fn flush() -> Result<bool, Error> {
|
|
104
|
+
let rt = get_runtime();
|
|
105
|
+
|
|
106
|
+
PUBSUB.with(|ps| {
|
|
107
|
+
let ps = ps.borrow();
|
|
108
|
+
let pubsub = ps.as_ref().ok_or_else(|| {
|
|
109
|
+
runtime_error("Engine not initialized")
|
|
110
|
+
})?;
|
|
111
|
+
|
|
112
|
+
rt.block_on(async { pubsub.flush().await })
|
|
113
|
+
.map_err(|e| runtime_error(e.to_string()))?;
|
|
114
|
+
|
|
115
|
+
Ok(true)
|
|
116
|
+
})
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
/// Mark messages as delivered
|
|
120
|
+
fn mark_delivered(ids: Vec<i64>) -> Result<bool, Error> {
|
|
121
|
+
let rt = get_runtime();
|
|
122
|
+
|
|
123
|
+
PUBSUB.with(|ps| {
|
|
124
|
+
let ps = ps.borrow();
|
|
125
|
+
let pubsub = ps.as_ref().ok_or_else(|| {
|
|
126
|
+
runtime_error("Engine not initialized")
|
|
127
|
+
})?;
|
|
128
|
+
|
|
129
|
+
rt.block_on(async { pubsub.mark_delivered(&ids).await })
|
|
130
|
+
.map_err(|e| runtime_error(e.to_string()))?;
|
|
131
|
+
|
|
132
|
+
Ok(true)
|
|
133
|
+
})
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
/// Cleanup old messages
|
|
137
|
+
/// Returns [delivered_count, undelivered_count]
|
|
138
|
+
fn cleanup() -> Result<Vec<u64>, Error> {
|
|
139
|
+
let rt = get_runtime();
|
|
140
|
+
|
|
141
|
+
PUBSUB.with(|ps| {
|
|
142
|
+
let ps = ps.borrow();
|
|
143
|
+
let pubsub = ps.as_ref().ok_or_else(|| {
|
|
144
|
+
runtime_error("Engine not initialized")
|
|
145
|
+
})?;
|
|
146
|
+
|
|
147
|
+
let (delivered, undelivered) = rt
|
|
148
|
+
.block_on(async { pubsub.cleanup().await })
|
|
149
|
+
.map_err(|e| runtime_error(e.to_string()))?;
|
|
150
|
+
|
|
151
|
+
Ok(vec![delivered, undelivered])
|
|
152
|
+
})
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
/// Shutdown the pub/sub engine
|
|
156
|
+
fn shutdown() -> Result<bool, Error> {
|
|
157
|
+
let rt = get_runtime();
|
|
158
|
+
|
|
159
|
+
PUBSUB.with(|ps| {
|
|
160
|
+
let mut ps = ps.borrow_mut();
|
|
161
|
+
if let Some(pubsub) = ps.take() {
|
|
162
|
+
// Try to get exclusive access
|
|
163
|
+
match Arc::try_unwrap(pubsub) {
|
|
164
|
+
Ok(pubsub) => {
|
|
165
|
+
let _ = rt.block_on(async { pubsub.shutdown().await });
|
|
166
|
+
}
|
|
167
|
+
Err(_) => {
|
|
168
|
+
// Other references exist, can't fully shutdown
|
|
169
|
+
tracing::warn!("Cannot fully shutdown: other references exist");
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
Ok(true)
|
|
174
|
+
})
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
/// Get the library version
|
|
178
|
+
fn version() -> &'static str {
|
|
179
|
+
env!("CARGO_PKG_VERSION")
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
/// Check if the engine is initialized
|
|
183
|
+
fn initialized() -> bool {
|
|
184
|
+
PUBSUB.with(|ps| ps.borrow().is_some())
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
/// Get subscription count
|
|
188
|
+
fn subscription_count() -> Result<usize, Error> {
|
|
189
|
+
let rt = get_runtime();
|
|
190
|
+
|
|
191
|
+
PUBSUB.with(|ps| {
|
|
192
|
+
let ps = ps.borrow();
|
|
193
|
+
let pubsub = ps.as_ref().ok_or_else(|| {
|
|
194
|
+
runtime_error("Engine not initialized")
|
|
195
|
+
})?;
|
|
196
|
+
|
|
197
|
+
Ok(rt.block_on(async { pubsub.subscription_count().await }))
|
|
198
|
+
})
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
#[magnus::init]
|
|
202
|
+
fn init(ruby: &Ruby) -> Result<(), Error> {
|
|
203
|
+
let module = ruby.define_module("SolidMCPNative")?;
|
|
204
|
+
|
|
205
|
+
// Core functions
|
|
206
|
+
module.define_module_function("version", function!(version, 0))?;
|
|
207
|
+
module.define_module_function("initialized?", function!(initialized, 0))?;
|
|
208
|
+
|
|
209
|
+
// Lifecycle
|
|
210
|
+
module.define_module_function("init", function!(init_engine, 1))?;
|
|
211
|
+
module.define_module_function("init_with_config", function!(init_engine_with_config, 4))?;
|
|
212
|
+
module.define_module_function("shutdown", function!(shutdown, 0))?;
|
|
213
|
+
|
|
214
|
+
// Messaging
|
|
215
|
+
module.define_module_function("broadcast", function!(broadcast, 3))?;
|
|
216
|
+
module.define_module_function("flush", function!(flush, 0))?;
|
|
217
|
+
module.define_module_function("mark_delivered", function!(mark_delivered, 1))?;
|
|
218
|
+
module.define_module_function("cleanup", function!(cleanup, 0))?;
|
|
219
|
+
|
|
220
|
+
// Status
|
|
221
|
+
module.define_module_function("subscription_count", function!(subscription_count, 0))?;
|
|
222
|
+
|
|
223
|
+
Ok(())
|
|
224
|
+
}
|
|
@@ -2,8 +2,9 @@
|
|
|
2
2
|
|
|
3
3
|
module SolidMCP
|
|
4
4
|
class Configuration
|
|
5
|
-
attr_accessor :batch_size, :flush_interval, :delivered_retention,
|
|
6
|
-
:undelivered_retention, :polling_interval, :max_wait_time, :logger
|
|
5
|
+
attr_accessor :batch_size, :flush_interval, :delivered_retention,
|
|
6
|
+
:undelivered_retention, :polling_interval, :max_wait_time, :logger,
|
|
7
|
+
:max_queue_size, :shutdown_timeout
|
|
7
8
|
|
|
8
9
|
def initialize
|
|
9
10
|
@batch_size = 200
|
|
@@ -12,6 +13,8 @@ module SolidMCP
|
|
|
12
13
|
@max_wait_time = 30 # 30 seconds
|
|
13
14
|
@delivered_retention = 3600 # 1 hour in seconds
|
|
14
15
|
@undelivered_retention = 86400 # 24 hours in seconds
|
|
16
|
+
@max_queue_size = 10_000 # Maximum messages in memory queue
|
|
17
|
+
@shutdown_timeout = 30 # Maximum seconds to wait for graceful shutdown
|
|
15
18
|
@logger = default_logger
|
|
16
19
|
end
|
|
17
20
|
|