solid_mcp 0.2.3 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. checksums.yaml +4 -4
  2. data/ext/solid_mcp_native/Cargo.toml +12 -0
  3. data/ext/solid_mcp_native/core/Cargo.toml +32 -0
  4. data/ext/solid_mcp_native/core/src/config.rs +133 -0
  5. data/ext/solid_mcp_native/core/src/db/mod.rs +154 -0
  6. data/ext/solid_mcp_native/core/src/db/postgres.rs +242 -0
  7. data/ext/solid_mcp_native/core/src/db/sqlite.rs +276 -0
  8. data/ext/solid_mcp_native/core/src/error.rs +38 -0
  9. data/ext/solid_mcp_native/core/src/lib.rs +25 -0
  10. data/ext/solid_mcp_native/core/src/message.rs +191 -0
  11. data/ext/solid_mcp_native/core/src/pubsub.rs +309 -0
  12. data/ext/solid_mcp_native/core/src/subscriber.rs +298 -0
  13. data/ext/solid_mcp_native/core/src/writer.rs +252 -0
  14. data/ext/solid_mcp_native/extconf.rb +3 -0
  15. data/ext/solid_mcp_native/ffi/Cargo.toml +20 -0
  16. data/ext/solid_mcp_native/ffi/extconf.rb +67 -0
  17. data/ext/solid_mcp_native/ffi/src/lib.rs +224 -0
  18. data/lib/solid_mcp/configuration.rb +5 -2
  19. data/lib/solid_mcp/message_writer.rb +80 -45
  20. data/lib/solid_mcp/native_speedup.rb +140 -0
  21. data/lib/solid_mcp/pub_sub.rb +10 -8
  22. data/lib/solid_mcp/subscriber.rb +18 -7
  23. data/lib/solid_mcp/version.rb +1 -1
  24. data/lib/solid_mcp.rb +3 -0
  25. metadata +57 -19
  26. data/.release-please-manifest.json +0 -1
  27. data/CHANGELOG.md +0 -34
  28. data/Gemfile +0 -11
  29. data/Gemfile.lock +0 -140
  30. data/Rakefile +0 -8
  31. data/app/models/solid_mcp/message.rb +0 -25
  32. data/app/models/solid_mcp/record.rb +0 -10
  33. data/bin/console +0 -11
  34. data/bin/rails +0 -15
  35. data/bin/setup +0 -8
  36. data/bin/test +0 -8
  37. data/db/migrate/20250624000001_create_solid_mcp_messages.rb +0 -28
  38. data/release-please-config.json +0 -8
  39. data/solid_mcp.gemspec +0 -39
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 91045bbe4e21e1bd0dd89dbf5477cb36dde677b646645d90df1231dfe16300ff
4
- data.tar.gz: fd9e010eefd04ac65c5b63bbb977b5b80c7cc087732298790e85c51d8d1225e5
3
+ metadata.gz: 443e55616dcd4ddf092dd435572ef3767cfc00a57216b3c4c00187e34a0bb03c
4
+ data.tar.gz: 3459b9ed5c10ce120eb73db02e61608c25abffc45c081833588b5a281d06eea0
5
5
  SHA512:
6
- metadata.gz: 285dbe81972f936507644965d2185f6d11821486ab8ed740a7d31639d5fae4e4ac9c9d8a86bbe233058102043310675902c65b85d4a3bf913bb1045c0daf8cc7
7
- data.tar.gz: d80ad9922735b39d210c6e942fcc7569ce7ca2c40b3e6989590cc85e18d20b40605f160180416ed8a0bbf108912125f49aec29d279cc986c1c7b61cfee104129
6
+ metadata.gz: 9850fa1d32aa02e8e7d95429db6f97dc9b9429d41a2b35806f66389e8132cadc740e42ed45cd7f3e3f30158b315d79fbdc56f74955fc594f5b240b263b485d0d
7
+ data.tar.gz: 475d22bb1fb2ef245b1f9fa8bd64ae153dec52a01f135263cdd23e8fc96cb9f21b846b9303f00b36e54a46799becf2c9d32284bd158c572d151dbf48cb0e6870
@@ -0,0 +1,12 @@
1
+ [workspace]
2
+ members = ["core", "ffi"]
3
+ resolver = "2"
4
+
5
+ [workspace.dependencies]
6
+ solid-mcp-core = { path = "./core" }
7
+ tokio = { version = "1", features = ["full"] }
8
+ serde = { version = "1", features = ["derive"] }
9
+ serde_json = "1"
10
+ thiserror = "2"
11
+ tracing = "0.1"
12
+ sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite", "postgres", "chrono"] }
@@ -0,0 +1,32 @@
1
+ [package]
2
+ name = "solid-mcp-core"
3
+ version = "0.1.0"
4
+ edition = "2024"
5
+ rust-version = "1.85"
6
+ authors = ["Abdelkader Boudih <terminale@gmail.com>"]
7
+ license = "MIT"
8
+ repository = "https://github.com/seuros/solid_mcp"
9
+ description = "High-performance async pub/sub engine for MCP (Model Context Protocol)"
10
+ keywords = ["mcp", "pub-sub", "async", "tokio"]
11
+ categories = ["asynchronous", "database"]
12
+
13
+ [lib]
14
+ crate-type = ["lib"]
15
+
16
+ [features]
17
+ default = ["sqlite", "postgres"]
18
+ sqlite = ["sqlx/sqlite"]
19
+ postgres = ["sqlx/postgres"]
20
+
21
+ [dependencies]
22
+ tokio = { workspace = true }
23
+ serde = { workspace = true }
24
+ serde_json = { workspace = true }
25
+ thiserror = { workspace = true }
26
+ tracing = { workspace = true }
27
+ sqlx = { workspace = true }
28
+ chrono = { version = "0.4", features = ["serde"] }
29
+ async-trait = "0.1"
30
+
31
+ [dev-dependencies]
32
+ tokio-test = "0.4"
@@ -0,0 +1,133 @@
1
+ //! Configuration for solid-mcp-core
2
+
3
+ use std::time::Duration;
4
+
5
+ /// Configuration for the pub/sub engine
6
+ #[derive(Debug, Clone)]
7
+ pub struct Config {
8
+ /// Maximum messages per batch write (default: 200)
9
+ pub batch_size: usize,
10
+
11
+ /// Polling interval for SQLite subscribers (default: 100ms)
12
+ pub polling_interval: Duration,
13
+
14
+ /// Maximum wait time for SSE connections (default: 30s)
15
+ pub max_wait_time: Duration,
16
+
17
+ /// How long to keep delivered messages (default: 1 hour)
18
+ pub delivered_retention: Duration,
19
+
20
+ /// How long to keep undelivered messages (default: 24 hours)
21
+ pub undelivered_retention: Duration,
22
+
23
+ /// Maximum messages in memory queue (default: 10,000)
24
+ pub max_queue_size: usize,
25
+
26
+ /// Maximum time to wait for graceful shutdown (default: 30s)
27
+ pub shutdown_timeout: Duration,
28
+
29
+ /// Database URL (required)
30
+ pub database_url: String,
31
+ }
32
+
33
+ impl Default for Config {
34
+ fn default() -> Self {
35
+ Self {
36
+ batch_size: 200,
37
+ polling_interval: Duration::from_millis(100),
38
+ max_wait_time: Duration::from_secs(30),
39
+ delivered_retention: Duration::from_secs(3600),
40
+ undelivered_retention: Duration::from_secs(86400),
41
+ max_queue_size: 10_000,
42
+ shutdown_timeout: Duration::from_secs(30),
43
+ database_url: String::new(),
44
+ }
45
+ }
46
+ }
47
+
48
+ impl Config {
49
+ /// Create a new config with the given database URL
50
+ pub fn new(database_url: impl Into<String>) -> Self {
51
+ Self {
52
+ database_url: database_url.into(),
53
+ ..Default::default()
54
+ }
55
+ }
56
+
57
+ /// Builder pattern: set batch size
58
+ pub fn batch_size(mut self, size: usize) -> Self {
59
+ self.batch_size = size;
60
+ self
61
+ }
62
+
63
+ /// Builder pattern: set polling interval
64
+ pub fn polling_interval(mut self, interval: Duration) -> Self {
65
+ self.polling_interval = interval;
66
+ self
67
+ }
68
+
69
+ /// Builder pattern: set max queue size
70
+ pub fn max_queue_size(mut self, size: usize) -> Self {
71
+ self.max_queue_size = size;
72
+ self
73
+ }
74
+
75
+ /// Builder pattern: set shutdown timeout
76
+ pub fn shutdown_timeout(mut self, timeout: Duration) -> Self {
77
+ self.shutdown_timeout = timeout;
78
+ self
79
+ }
80
+
81
+ /// Check if this is a PostgreSQL connection
82
+ pub fn is_postgres(&self) -> bool {
83
+ self.database_url.starts_with("postgres://")
84
+ || self.database_url.starts_with("postgresql://")
85
+ }
86
+
87
+ /// Check if this is a SQLite connection
88
+ pub fn is_sqlite(&self) -> bool {
89
+ self.database_url.starts_with("sqlite://")
90
+ || self.database_url.starts_with("sqlite:")
91
+ || self.database_url.ends_with(".db")
92
+ || self.database_url.ends_with(".sqlite")
93
+ || self.database_url.ends_with(".sqlite3")
94
+ }
95
+ }
96
+
97
+ #[cfg(test)]
98
+ mod tests {
99
+ use super::*;
100
+
101
+ #[test]
102
+ fn test_default_config() {
103
+ let config = Config::default();
104
+ assert_eq!(config.batch_size, 200);
105
+ assert_eq!(config.polling_interval, Duration::from_millis(100));
106
+ assert_eq!(config.max_queue_size, 10_000);
107
+ }
108
+
109
+ #[test]
110
+ fn test_builder_pattern() {
111
+ let config = Config::new("sqlite::memory:")
112
+ .batch_size(100)
113
+ .polling_interval(Duration::from_millis(50))
114
+ .max_queue_size(5000);
115
+
116
+ assert_eq!(config.batch_size, 100);
117
+ assert_eq!(config.polling_interval, Duration::from_millis(50));
118
+ assert_eq!(config.max_queue_size, 5000);
119
+ assert_eq!(config.database_url, "sqlite::memory:");
120
+ }
121
+
122
+ #[test]
123
+ fn test_database_type_detection() {
124
+ assert!(Config::new("postgres://localhost/test").is_postgres());
125
+ assert!(Config::new("postgresql://localhost/test").is_postgres());
126
+ assert!(!Config::new("postgres://localhost/test").is_sqlite());
127
+
128
+ assert!(Config::new("sqlite::memory:").is_sqlite());
129
+ assert!(Config::new("sqlite://./test.db").is_sqlite());
130
+ assert!(Config::new("./test.sqlite3").is_sqlite());
131
+ assert!(!Config::new("sqlite::memory:").is_postgres());
132
+ }
133
+ }
@@ -0,0 +1,154 @@
1
+ //! Database abstraction layer for solid-mcp-core
2
+ //!
3
+ //! Supports both SQLite and PostgreSQL backends.
4
+
5
+ #[cfg(feature = "postgres")]
6
+ pub mod postgres;
7
+ #[cfg(feature = "sqlite")]
8
+ pub mod sqlite;
9
+
10
+ use crate::{Config, Message, Result};
11
+ use async_trait::async_trait;
12
+ use std::time::Duration;
13
+
14
+ /// Database backend trait
15
+ #[async_trait]
16
+ pub trait Database: Send + Sync + 'static {
17
+ /// Insert a batch of messages
18
+ async fn insert_batch(&self, messages: &[Message]) -> Result<()>;
19
+
20
+ /// Fetch undelivered messages for a session after the given ID
21
+ async fn fetch_after(
22
+ &self,
23
+ session_id: &str,
24
+ after_id: i64,
25
+ limit: i64,
26
+ ) -> Result<Vec<Message>>;
27
+
28
+ /// Mark messages as delivered
29
+ async fn mark_delivered(&self, ids: &[i64]) -> Result<()>;
30
+
31
+ /// Delete old delivered messages
32
+ async fn cleanup_delivered(&self, older_than: Duration) -> Result<u64>;
33
+
34
+ /// Delete old undelivered messages
35
+ async fn cleanup_undelivered(&self, older_than: Duration) -> Result<u64>;
36
+
37
+ /// Get the maximum message ID (for initialization)
38
+ async fn max_id(&self) -> Result<i64>;
39
+ }
40
+
41
+ /// Database pool type (enum dispatch for runtime selection)
42
+ pub enum DbPool {
43
+ #[cfg(feature = "sqlite")]
44
+ Sqlite(sqlite::SqlitePool),
45
+ #[cfg(feature = "postgres")]
46
+ Postgres(postgres::PostgresPool),
47
+ }
48
+
49
+ impl DbPool {
50
+ /// Create a new database pool from config
51
+ ///
52
+ /// The database and tables must already exist (created by Ruby migrations).
53
+ pub async fn new(config: &Config) -> Result<Self> {
54
+ #[cfg(feature = "postgres")]
55
+ if config.is_postgres() {
56
+ return Ok(Self::Postgres(
57
+ postgres::PostgresPool::new(&config.database_url).await?,
58
+ ));
59
+ }
60
+
61
+ #[cfg(feature = "sqlite")]
62
+ if config.is_sqlite() {
63
+ return Ok(Self::Sqlite(
64
+ sqlite::SqlitePool::new(&config.database_url).await?,
65
+ ));
66
+ }
67
+
68
+ Err(crate::Error::Config(format!(
69
+ "Unsupported database URL: {}",
70
+ config.database_url
71
+ )))
72
+ }
73
+
74
+ /// Check if this is a PostgreSQL pool (supports LISTEN/NOTIFY)
75
+ pub fn is_postgres(&self) -> bool {
76
+ matches!(self, Self::Postgres(_))
77
+ }
78
+
79
+ /// Create tables for testing purposes only
80
+ #[cfg(test)]
81
+ pub(crate) async fn setup_test_schema(&self) -> Result<()> {
82
+ match self {
83
+ #[cfg(feature = "sqlite")]
84
+ Self::Sqlite(pool) => pool.setup_test_schema().await,
85
+ #[cfg(feature = "postgres")]
86
+ Self::Postgres(_) => {
87
+ // PostgreSQL tests require pre-existing schema
88
+ Ok(())
89
+ }
90
+ }
91
+ }
92
+ }
93
+
94
+ #[async_trait]
95
+ impl Database for DbPool {
96
+ async fn insert_batch(&self, messages: &[Message]) -> Result<()> {
97
+ match self {
98
+ #[cfg(feature = "sqlite")]
99
+ Self::Sqlite(pool) => pool.insert_batch(messages).await,
100
+ #[cfg(feature = "postgres")]
101
+ Self::Postgres(pool) => pool.insert_batch(messages).await,
102
+ }
103
+ }
104
+
105
+ async fn fetch_after(
106
+ &self,
107
+ session_id: &str,
108
+ after_id: i64,
109
+ limit: i64,
110
+ ) -> Result<Vec<Message>> {
111
+ match self {
112
+ #[cfg(feature = "sqlite")]
113
+ Self::Sqlite(pool) => pool.fetch_after(session_id, after_id, limit).await,
114
+ #[cfg(feature = "postgres")]
115
+ Self::Postgres(pool) => pool.fetch_after(session_id, after_id, limit).await,
116
+ }
117
+ }
118
+
119
+ async fn mark_delivered(&self, ids: &[i64]) -> Result<()> {
120
+ match self {
121
+ #[cfg(feature = "sqlite")]
122
+ Self::Sqlite(pool) => pool.mark_delivered(ids).await,
123
+ #[cfg(feature = "postgres")]
124
+ Self::Postgres(pool) => pool.mark_delivered(ids).await,
125
+ }
126
+ }
127
+
128
+ async fn cleanup_delivered(&self, older_than: Duration) -> Result<u64> {
129
+ match self {
130
+ #[cfg(feature = "sqlite")]
131
+ Self::Sqlite(pool) => pool.cleanup_delivered(older_than).await,
132
+ #[cfg(feature = "postgres")]
133
+ Self::Postgres(pool) => pool.cleanup_delivered(older_than).await,
134
+ }
135
+ }
136
+
137
+ async fn cleanup_undelivered(&self, older_than: Duration) -> Result<u64> {
138
+ match self {
139
+ #[cfg(feature = "sqlite")]
140
+ Self::Sqlite(pool) => pool.cleanup_undelivered(older_than).await,
141
+ #[cfg(feature = "postgres")]
142
+ Self::Postgres(pool) => pool.cleanup_undelivered(older_than).await,
143
+ }
144
+ }
145
+
146
+ async fn max_id(&self) -> Result<i64> {
147
+ match self {
148
+ #[cfg(feature = "sqlite")]
149
+ Self::Sqlite(pool) => pool.max_id().await,
150
+ #[cfg(feature = "postgres")]
151
+ Self::Postgres(pool) => pool.max_id().await,
152
+ }
153
+ }
154
+ }
@@ -0,0 +1,242 @@
1
+ //! PostgreSQL database backend for solid-mcp-core
2
+ //!
3
+ //! Supports LISTEN/NOTIFY for real-time message delivery without polling.
4
+
5
+ use crate::{Message, Result};
6
+ use async_trait::async_trait;
7
+ use sqlx::postgres::{PgConnectOptions, PgListener, PgPoolOptions};
8
+ use sqlx::{Pool, Postgres};
9
+ use std::str::FromStr;
10
+ use std::time::Duration;
11
+
12
+ /// PostgreSQL connection pool
13
+ #[derive(Clone)]
14
+ pub struct PostgresPool {
15
+ pool: Pool<Postgres>,
16
+ database_url: String,
17
+ }
18
+
19
+ impl PostgresPool {
20
+ /// Create a new PostgreSQL pool from a database URL
21
+ ///
22
+ /// The database and tables must already exist (created by Ruby migrations).
23
+ pub async fn new(database_url: &str) -> Result<Self> {
24
+ let options = PgConnectOptions::from_str(database_url)?;
25
+
26
+ let pool = PgPoolOptions::new()
27
+ .max_connections(10)
28
+ .acquire_timeout(Duration::from_secs(30))
29
+ .connect_with(options)
30
+ .await?;
31
+
32
+ Ok(Self {
33
+ pool,
34
+ database_url: database_url.to_string(),
35
+ })
36
+ }
37
+
38
+ /// Create a LISTEN connection for a session
39
+ ///
40
+ /// This is used for real-time message delivery without polling.
41
+ pub async fn listen(&self, session_id: &str) -> Result<PgListener> {
42
+ let mut listener = PgListener::connect(&self.database_url).await?;
43
+ let channel = format!("solid_mcp_{}", session_id);
44
+ listener.listen(&channel).await?;
45
+ Ok(listener)
46
+ }
47
+
48
+ /// Send a NOTIFY for a session (called after insert for immediate delivery)
49
+ pub async fn notify(&self, session_id: &str, message_id: i64) -> Result<()> {
50
+ let channel = format!("solid_mcp_{}", session_id);
51
+ sqlx::query("SELECT pg_notify($1, $2)")
52
+ .bind(&channel)
53
+ .bind(message_id.to_string())
54
+ .execute(&self.pool)
55
+ .await?;
56
+ Ok(())
57
+ }
58
+ }
59
+
60
+ #[async_trait]
61
+ impl super::Database for PostgresPool {
62
+ async fn insert_batch(&self, messages: &[Message]) -> Result<()> {
63
+ if messages.is_empty() {
64
+ return Ok(());
65
+ }
66
+
67
+ // Use COPY for maximum performance on large batches
68
+ // Fall back to multi-row INSERT for smaller batches
69
+ if messages.len() >= 100 {
70
+ self.insert_batch_copy(messages).await
71
+ } else {
72
+ self.insert_batch_values(messages).await
73
+ }
74
+ }
75
+
76
+ async fn fetch_after(
77
+ &self,
78
+ session_id: &str,
79
+ after_id: i64,
80
+ limit: i64,
81
+ ) -> Result<Vec<Message>> {
82
+ let rows = sqlx::query_as::<
83
+ _,
84
+ (
85
+ i64,
86
+ String,
87
+ String,
88
+ String,
89
+ chrono::DateTime<chrono::Utc>,
90
+ Option<chrono::DateTime<chrono::Utc>>,
91
+ ),
92
+ >(
93
+ r#"
94
+ SELECT id, session_id, event_type, data, created_at, delivered_at
95
+ FROM solid_mcp_messages
96
+ WHERE session_id = $1 AND delivered_at IS NULL AND id > $2
97
+ ORDER BY id
98
+ LIMIT $3
99
+ "#,
100
+ )
101
+ .bind(session_id)
102
+ .bind(after_id)
103
+ .bind(limit)
104
+ .fetch_all(&self.pool)
105
+ .await?;
106
+
107
+ let messages = rows
108
+ .into_iter()
109
+ .map(
110
+ |(id, session_id, event_type, data, created_at, delivered_at)| Message {
111
+ id,
112
+ session_id,
113
+ event_type,
114
+ data,
115
+ created_at,
116
+ delivered_at,
117
+ },
118
+ )
119
+ .collect();
120
+
121
+ Ok(messages)
122
+ }
123
+
124
+ async fn mark_delivered(&self, ids: &[i64]) -> Result<()> {
125
+ if ids.is_empty() {
126
+ return Ok(());
127
+ }
128
+
129
+ sqlx::query(
130
+ r#"
131
+ UPDATE solid_mcp_messages
132
+ SET delivered_at = NOW()
133
+ WHERE id = ANY($1)
134
+ "#,
135
+ )
136
+ .bind(ids)
137
+ .execute(&self.pool)
138
+ .await?;
139
+
140
+ Ok(())
141
+ }
142
+
143
+ async fn cleanup_delivered(&self, older_than: Duration) -> Result<u64> {
144
+ let cutoff = chrono::Utc::now() - chrono::Duration::from_std(older_than).unwrap();
145
+
146
+ let result = sqlx::query(
147
+ r#"
148
+ DELETE FROM solid_mcp_messages
149
+ WHERE delivered_at IS NOT NULL AND delivered_at < $1
150
+ "#,
151
+ )
152
+ .bind(cutoff)
153
+ .execute(&self.pool)
154
+ .await?;
155
+
156
+ Ok(result.rows_affected())
157
+ }
158
+
159
+ async fn cleanup_undelivered(&self, older_than: Duration) -> Result<u64> {
160
+ let cutoff = chrono::Utc::now() - chrono::Duration::from_std(older_than).unwrap();
161
+
162
+ let result = sqlx::query(
163
+ r#"
164
+ DELETE FROM solid_mcp_messages
165
+ WHERE delivered_at IS NULL AND created_at < $1
166
+ "#,
167
+ )
168
+ .bind(cutoff)
169
+ .execute(&self.pool)
170
+ .await?;
171
+
172
+ Ok(result.rows_affected())
173
+ }
174
+
175
+ async fn max_id(&self) -> Result<i64> {
176
+ let row: (Option<i64>,) = sqlx::query_as("SELECT MAX(id) FROM solid_mcp_messages")
177
+ .fetch_one(&self.pool)
178
+ .await?;
179
+
180
+ Ok(row.0.unwrap_or(0))
181
+ }
182
+ }
183
+
184
+ impl PostgresPool {
185
+ /// Insert using multi-row VALUES (good for small batches)
186
+ async fn insert_batch_values(&self, messages: &[Message]) -> Result<()> {
187
+ let mut query = String::from(
188
+ "INSERT INTO solid_mcp_messages (session_id, event_type, data, created_at) VALUES ",
189
+ );
190
+
191
+ for (i, _) in messages.iter().enumerate() {
192
+ if i > 0 {
193
+ query.push_str(", ");
194
+ }
195
+ let base = i * 4 + 1;
196
+ query.push_str(&format!(
197
+ "(${}, ${}, ${}, ${})",
198
+ base,
199
+ base + 1,
200
+ base + 2,
201
+ base + 3
202
+ ));
203
+ }
204
+
205
+ let mut q = sqlx::query(&query);
206
+ for msg in messages {
207
+ q = q
208
+ .bind(&msg.session_id)
209
+ .bind(&msg.event_type)
210
+ .bind(&msg.data)
211
+ .bind(msg.created_at);
212
+ }
213
+ q.execute(&self.pool).await?;
214
+
215
+ Ok(())
216
+ }
217
+
218
+ /// Insert using COPY (efficient for large batches)
219
+ async fn insert_batch_copy(&self, messages: &[Message]) -> Result<()> {
220
+ // For now, fall back to VALUES insert
221
+ // TODO: Implement proper COPY protocol for maximum throughput
222
+ self.insert_batch_values(messages).await
223
+ }
224
+ }
225
+
226
+ #[cfg(test)]
227
+ mod tests {
228
+ // PostgreSQL tests require a running database
229
+ // Run with: DATABASE_URL=postgres://localhost/test_solid_mcp cargo test
230
+
231
+ use super::*;
232
+ use crate::db::Database;
233
+
234
+ #[tokio::test]
235
+ #[ignore] // Requires PostgreSQL
236
+ async fn test_postgres_pool_creation() {
237
+ let url = std::env::var("DATABASE_URL")
238
+ .unwrap_or_else(|_| "postgres://localhost/test_solid_mcp".to_string());
239
+ let pool = PostgresPool::new(&url).await.unwrap();
240
+ let _ = pool.max_id().await.unwrap();
241
+ }
242
+ }