spikard 0.4.0-arm64-darwin-23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE +1 -0
- data/README.md +659 -0
- data/ext/spikard_rb/Cargo.toml +17 -0
- data/ext/spikard_rb/extconf.rb +10 -0
- data/ext/spikard_rb/src/lib.rs +6 -0
- data/lib/spikard/app.rb +405 -0
- data/lib/spikard/background.rb +27 -0
- data/lib/spikard/config.rb +396 -0
- data/lib/spikard/converters.rb +13 -0
- data/lib/spikard/handler_wrapper.rb +113 -0
- data/lib/spikard/provide.rb +214 -0
- data/lib/spikard/response.rb +173 -0
- data/lib/spikard/schema.rb +243 -0
- data/lib/spikard/sse.rb +111 -0
- data/lib/spikard/streaming_response.rb +44 -0
- data/lib/spikard/testing.rb +221 -0
- data/lib/spikard/upload_file.rb +131 -0
- data/lib/spikard/version.rb +5 -0
- data/lib/spikard/websocket.rb +59 -0
- data/lib/spikard.rb +43 -0
- data/sig/spikard.rbs +366 -0
- data/vendor/bundle/ruby/3.4.0/gems/diff-lcs-1.6.2/mise.toml +5 -0
- data/vendor/bundle/ruby/3.4.0/gems/rake-compiler-dock-1.10.0/build/buildkitd.toml +2 -0
- data/vendor/crates/spikard-bindings-shared/Cargo.toml +63 -0
- data/vendor/crates/spikard-bindings-shared/examples/config_extraction.rs +139 -0
- data/vendor/crates/spikard-bindings-shared/src/config_extractor.rs +561 -0
- data/vendor/crates/spikard-bindings-shared/src/conversion_traits.rs +194 -0
- data/vendor/crates/spikard-bindings-shared/src/di_traits.rs +246 -0
- data/vendor/crates/spikard-bindings-shared/src/error_response.rs +403 -0
- data/vendor/crates/spikard-bindings-shared/src/handler_base.rs +274 -0
- data/vendor/crates/spikard-bindings-shared/src/lib.rs +25 -0
- data/vendor/crates/spikard-bindings-shared/src/lifecycle_base.rs +298 -0
- data/vendor/crates/spikard-bindings-shared/src/lifecycle_executor.rs +637 -0
- data/vendor/crates/spikard-bindings-shared/src/response_builder.rs +309 -0
- data/vendor/crates/spikard-bindings-shared/src/test_client_base.rs +248 -0
- data/vendor/crates/spikard-bindings-shared/src/validation_helpers.rs +355 -0
- data/vendor/crates/spikard-bindings-shared/tests/comprehensive_coverage.rs +502 -0
- data/vendor/crates/spikard-bindings-shared/tests/error_response_edge_cases.rs +389 -0
- data/vendor/crates/spikard-bindings-shared/tests/handler_base_integration.rs +413 -0
- data/vendor/crates/spikard-core/Cargo.toml +40 -0
- data/vendor/crates/spikard-core/src/bindings/mod.rs +3 -0
- data/vendor/crates/spikard-core/src/bindings/response.rs +133 -0
- data/vendor/crates/spikard-core/src/debug.rs +63 -0
- data/vendor/crates/spikard-core/src/di/container.rs +726 -0
- data/vendor/crates/spikard-core/src/di/dependency.rs +273 -0
- data/vendor/crates/spikard-core/src/di/error.rs +118 -0
- data/vendor/crates/spikard-core/src/di/factory.rs +538 -0
- data/vendor/crates/spikard-core/src/di/graph.rs +545 -0
- data/vendor/crates/spikard-core/src/di/mod.rs +192 -0
- data/vendor/crates/spikard-core/src/di/resolved.rs +411 -0
- data/vendor/crates/spikard-core/src/di/value.rs +283 -0
- data/vendor/crates/spikard-core/src/errors.rs +39 -0
- data/vendor/crates/spikard-core/src/http.rs +153 -0
- data/vendor/crates/spikard-core/src/lib.rs +29 -0
- data/vendor/crates/spikard-core/src/lifecycle.rs +422 -0
- data/vendor/crates/spikard-core/src/metadata.rs +397 -0
- data/vendor/crates/spikard-core/src/parameters.rs +723 -0
- data/vendor/crates/spikard-core/src/problem.rs +310 -0
- data/vendor/crates/spikard-core/src/request_data.rs +189 -0
- data/vendor/crates/spikard-core/src/router.rs +249 -0
- data/vendor/crates/spikard-core/src/schema_registry.rs +183 -0
- data/vendor/crates/spikard-core/src/type_hints.rs +304 -0
- data/vendor/crates/spikard-core/src/validation/error_mapper.rs +689 -0
- data/vendor/crates/spikard-core/src/validation/mod.rs +459 -0
- data/vendor/crates/spikard-http/Cargo.toml +58 -0
- data/vendor/crates/spikard-http/examples/sse-notifications.rs +147 -0
- data/vendor/crates/spikard-http/examples/websocket-chat.rs +91 -0
- data/vendor/crates/spikard-http/src/auth.rs +247 -0
- data/vendor/crates/spikard-http/src/background.rs +1562 -0
- data/vendor/crates/spikard-http/src/bindings/mod.rs +3 -0
- data/vendor/crates/spikard-http/src/bindings/response.rs +1 -0
- data/vendor/crates/spikard-http/src/body_metadata.rs +8 -0
- data/vendor/crates/spikard-http/src/cors.rs +490 -0
- data/vendor/crates/spikard-http/src/debug.rs +63 -0
- data/vendor/crates/spikard-http/src/di_handler.rs +1878 -0
- data/vendor/crates/spikard-http/src/handler_response.rs +532 -0
- data/vendor/crates/spikard-http/src/handler_trait.rs +861 -0
- data/vendor/crates/spikard-http/src/handler_trait_tests.rs +284 -0
- data/vendor/crates/spikard-http/src/lib.rs +524 -0
- data/vendor/crates/spikard-http/src/lifecycle/adapter.rs +149 -0
- data/vendor/crates/spikard-http/src/lifecycle.rs +428 -0
- data/vendor/crates/spikard-http/src/middleware/mod.rs +285 -0
- data/vendor/crates/spikard-http/src/middleware/multipart.rs +930 -0
- data/vendor/crates/spikard-http/src/middleware/urlencoded.rs +541 -0
- data/vendor/crates/spikard-http/src/middleware/validation.rs +287 -0
- data/vendor/crates/spikard-http/src/openapi/mod.rs +309 -0
- data/vendor/crates/spikard-http/src/openapi/parameter_extraction.rs +535 -0
- data/vendor/crates/spikard-http/src/openapi/schema_conversion.rs +867 -0
- data/vendor/crates/spikard-http/src/openapi/spec_generation.rs +678 -0
- data/vendor/crates/spikard-http/src/query_parser.rs +369 -0
- data/vendor/crates/spikard-http/src/response.rs +399 -0
- data/vendor/crates/spikard-http/src/server/handler.rs +1557 -0
- data/vendor/crates/spikard-http/src/server/lifecycle_execution.rs +98 -0
- data/vendor/crates/spikard-http/src/server/mod.rs +806 -0
- data/vendor/crates/spikard-http/src/server/request_extraction.rs +630 -0
- data/vendor/crates/spikard-http/src/server/routing_factory.rs +497 -0
- data/vendor/crates/spikard-http/src/sse.rs +961 -0
- data/vendor/crates/spikard-http/src/testing/form.rs +14 -0
- data/vendor/crates/spikard-http/src/testing/multipart.rs +60 -0
- data/vendor/crates/spikard-http/src/testing/test_client.rs +285 -0
- data/vendor/crates/spikard-http/src/testing.rs +377 -0
- data/vendor/crates/spikard-http/src/websocket.rs +831 -0
- data/vendor/crates/spikard-http/tests/background_behavior.rs +918 -0
- data/vendor/crates/spikard-http/tests/common/handlers.rs +308 -0
- data/vendor/crates/spikard-http/tests/common/mod.rs +21 -0
- data/vendor/crates/spikard-http/tests/di_integration.rs +202 -0
- data/vendor/crates/spikard-http/tests/doc_snippets.rs +4 -0
- data/vendor/crates/spikard-http/tests/lifecycle_execution.rs +1135 -0
- data/vendor/crates/spikard-http/tests/multipart_behavior.rs +688 -0
- data/vendor/crates/spikard-http/tests/server_config_builder.rs +324 -0
- data/vendor/crates/spikard-http/tests/sse_behavior.rs +728 -0
- data/vendor/crates/spikard-http/tests/websocket_behavior.rs +724 -0
- data/vendor/crates/spikard-rb/Cargo.toml +43 -0
- data/vendor/crates/spikard-rb/build.rs +199 -0
- data/vendor/crates/spikard-rb/src/background.rs +63 -0
- data/vendor/crates/spikard-rb/src/config/mod.rs +5 -0
- data/vendor/crates/spikard-rb/src/config/server_config.rs +283 -0
- data/vendor/crates/spikard-rb/src/conversion.rs +459 -0
- data/vendor/crates/spikard-rb/src/di/builder.rs +105 -0
- data/vendor/crates/spikard-rb/src/di/mod.rs +413 -0
- data/vendor/crates/spikard-rb/src/handler.rs +612 -0
- data/vendor/crates/spikard-rb/src/integration/mod.rs +3 -0
- data/vendor/crates/spikard-rb/src/lib.rs +1857 -0
- data/vendor/crates/spikard-rb/src/lifecycle.rs +275 -0
- data/vendor/crates/spikard-rb/src/metadata/mod.rs +5 -0
- data/vendor/crates/spikard-rb/src/metadata/route_extraction.rs +427 -0
- data/vendor/crates/spikard-rb/src/runtime/mod.rs +5 -0
- data/vendor/crates/spikard-rb/src/runtime/server_runner.rs +326 -0
- data/vendor/crates/spikard-rb/src/server.rs +283 -0
- data/vendor/crates/spikard-rb/src/sse.rs +231 -0
- data/vendor/crates/spikard-rb/src/testing/client.rs +404 -0
- data/vendor/crates/spikard-rb/src/testing/mod.rs +7 -0
- data/vendor/crates/spikard-rb/src/testing/sse.rs +143 -0
- data/vendor/crates/spikard-rb/src/testing/websocket.rs +221 -0
- data/vendor/crates/spikard-rb/src/websocket.rs +233 -0
- data/vendor/crates/spikard-rb/tests/magnus_ffi_tests.rs +14 -0
- metadata +213 -0
|
@@ -0,0 +1,728 @@
|
|
|
1
|
+
//! Behavioral tests for Server-Sent Events (SSE) functionality
|
|
2
|
+
//!
|
|
3
|
+
//! These tests verify end-to-end SSE behavior including:
|
|
4
|
+
//! - Connection establishment and event streaming
|
|
5
|
+
//! - Client reconnection with Last-Event-ID header
|
|
6
|
+
//! - Event ordering preservation
|
|
7
|
+
//! - Connection cleanup on disconnect
|
|
8
|
+
//! - Keep-alive behavior
|
|
9
|
+
//! - Backpressure handling for slow clients
|
|
10
|
+
//! - Graceful shutdown with active streams
|
|
11
|
+
|
|
12
|
+
mod common;
|
|
13
|
+
|
|
14
|
+
use spikard_http::sse::{SseEvent, SseEventProducer};
|
|
15
|
+
use std::sync::Arc;
|
|
16
|
+
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
|
17
|
+
use std::time::Duration;
|
|
18
|
+
use tokio::time::sleep;
|
|
19
|
+
|
|
20
|
+
// ============================================================================
|
|
21
|
+
// Test Producers for Behavioral Testing
|
|
22
|
+
// ============================================================================
|
|
23
|
+
|
|
24
|
+
/// Producer that simulates a stream of numbered events (for ordering tests)
|
|
25
|
+
struct SequentialEventProducer {
|
|
26
|
+
total_events: usize,
|
|
27
|
+
current_count: Arc<AtomicUsize>,
|
|
28
|
+
connect_count: Arc<AtomicUsize>,
|
|
29
|
+
disconnect_count: Arc<AtomicUsize>,
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
impl SequentialEventProducer {
|
|
33
|
+
fn new(total_events: usize) -> Self {
|
|
34
|
+
Self {
|
|
35
|
+
total_events,
|
|
36
|
+
current_count: Arc::new(AtomicUsize::new(0)),
|
|
37
|
+
connect_count: Arc::new(AtomicUsize::new(0)),
|
|
38
|
+
disconnect_count: Arc::new(AtomicUsize::new(0)),
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
fn get_connect_count(&self) -> usize {
|
|
43
|
+
self.connect_count.load(Ordering::Relaxed)
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
fn get_disconnect_count(&self) -> usize {
|
|
47
|
+
self.disconnect_count.load(Ordering::Relaxed)
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
impl SseEventProducer for SequentialEventProducer {
|
|
52
|
+
async fn next_event(&self) -> Option<SseEvent> {
|
|
53
|
+
let idx = self.current_count.fetch_add(1, Ordering::Relaxed);
|
|
54
|
+
if idx < self.total_events {
|
|
55
|
+
Some(
|
|
56
|
+
SseEvent::with_type(
|
|
57
|
+
"data",
|
|
58
|
+
serde_json::json!({
|
|
59
|
+
"sequence": idx,
|
|
60
|
+
"message": format!("Event {}", idx)
|
|
61
|
+
}),
|
|
62
|
+
)
|
|
63
|
+
.with_id(format!("event-{}", idx)),
|
|
64
|
+
)
|
|
65
|
+
} else {
|
|
66
|
+
None
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
async fn on_connect(&self) {
|
|
71
|
+
self.connect_count.fetch_add(1, Ordering::Relaxed);
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
async fn on_disconnect(&self) {
|
|
75
|
+
self.disconnect_count.fetch_add(1, Ordering::Relaxed);
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
/// Producer that supports reconnection with Last-Event-ID tracking
|
|
80
|
+
struct ReconnectableEventProducer {
|
|
81
|
+
events: Vec<(String, serde_json::Value)>,
|
|
82
|
+
current_idx: Arc<AtomicUsize>,
|
|
83
|
+
connect_count: Arc<AtomicUsize>,
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
impl ReconnectableEventProducer {
|
|
87
|
+
fn new(events: Vec<(String, serde_json::Value)>) -> Self {
|
|
88
|
+
Self {
|
|
89
|
+
events,
|
|
90
|
+
current_idx: Arc::new(AtomicUsize::new(0)),
|
|
91
|
+
connect_count: Arc::new(AtomicUsize::new(0)),
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
fn get_connect_count(&self) -> usize {
|
|
96
|
+
self.connect_count.load(Ordering::Relaxed)
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
impl SseEventProducer for ReconnectableEventProducer {
|
|
101
|
+
async fn next_event(&self) -> Option<SseEvent> {
|
|
102
|
+
let idx = self.current_idx.fetch_add(1, Ordering::Relaxed);
|
|
103
|
+
if idx < self.events.len() {
|
|
104
|
+
let (id, data) = self.events[idx].clone();
|
|
105
|
+
Some(SseEvent::with_type("update", data).with_id(id.clone()))
|
|
106
|
+
} else {
|
|
107
|
+
None
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
async fn on_connect(&self) {
|
|
112
|
+
self.connect_count.fetch_add(1, Ordering::Relaxed);
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
/// Producer that sends events with configurable delays for backpressure testing
|
|
117
|
+
struct SlowClientProducer {
|
|
118
|
+
event_count: usize,
|
|
119
|
+
delay_ms: u64,
|
|
120
|
+
current_idx: Arc<AtomicUsize>,
|
|
121
|
+
events_sent: Arc<AtomicUsize>,
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
impl SlowClientProducer {
|
|
125
|
+
fn new(event_count: usize, delay_ms: u64) -> Self {
|
|
126
|
+
Self {
|
|
127
|
+
event_count,
|
|
128
|
+
delay_ms,
|
|
129
|
+
current_idx: Arc::new(AtomicUsize::new(0)),
|
|
130
|
+
events_sent: Arc::new(AtomicUsize::new(0)),
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
fn get_events_sent(&self) -> usize {
|
|
135
|
+
self.events_sent.load(Ordering::Relaxed)
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
impl SseEventProducer for SlowClientProducer {
|
|
140
|
+
async fn next_event(&self) -> Option<SseEvent> {
|
|
141
|
+
let idx = self.current_idx.fetch_add(1, Ordering::Relaxed);
|
|
142
|
+
if idx < self.event_count {
|
|
143
|
+
// Simulate event generation delay
|
|
144
|
+
sleep(Duration::from_millis(self.delay_ms)).await;
|
|
145
|
+
self.events_sent.fetch_add(1, Ordering::Relaxed);
|
|
146
|
+
Some(SseEvent::new(serde_json::json!({
|
|
147
|
+
"event_number": idx,
|
|
148
|
+
"timestamp": chrono::Utc::now().to_rfc3339()
|
|
149
|
+
})))
|
|
150
|
+
} else {
|
|
151
|
+
None
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
/// Producer that maintains consistent ordering even with rapid fire events
|
|
157
|
+
struct RapidFireOrderedProducer {
|
|
158
|
+
event_count: usize,
|
|
159
|
+
current_idx: Arc<AtomicUsize>,
|
|
160
|
+
events_generated: Arc<AtomicUsize>,
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
impl RapidFireOrderedProducer {
|
|
164
|
+
fn new(event_count: usize) -> Self {
|
|
165
|
+
Self {
|
|
166
|
+
event_count,
|
|
167
|
+
current_idx: Arc::new(AtomicUsize::new(0)),
|
|
168
|
+
events_generated: Arc::new(AtomicUsize::new(0)),
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
fn get_generated_count(&self) -> usize {
|
|
173
|
+
self.events_generated.load(Ordering::Relaxed)
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
impl SseEventProducer for RapidFireOrderedProducer {
|
|
178
|
+
async fn next_event(&self) -> Option<SseEvent> {
|
|
179
|
+
let idx = self.current_idx.fetch_add(1, Ordering::Relaxed);
|
|
180
|
+
if idx < self.event_count {
|
|
181
|
+
self.events_generated.fetch_add(1, Ordering::Relaxed);
|
|
182
|
+
Some(
|
|
183
|
+
SseEvent::with_type(
|
|
184
|
+
"rapid",
|
|
185
|
+
serde_json::json!({
|
|
186
|
+
"index": idx,
|
|
187
|
+
"nanotime": std::time::SystemTime::now().duration_since(
|
|
188
|
+
std::time::UNIX_EPOCH
|
|
189
|
+
).unwrap().as_nanos()
|
|
190
|
+
}),
|
|
191
|
+
)
|
|
192
|
+
.with_id(format!("{}", idx)),
|
|
193
|
+
)
|
|
194
|
+
} else {
|
|
195
|
+
None
|
|
196
|
+
}
|
|
197
|
+
}
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
/// Producer that simulates keep-alive with periodic heartbeats
|
|
201
|
+
struct KeepAliveProducer {
|
|
202
|
+
total_events: usize,
|
|
203
|
+
current_idx: Arc<AtomicUsize>,
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
impl KeepAliveProducer {
|
|
207
|
+
fn new(total_events: usize) -> Self {
|
|
208
|
+
Self {
|
|
209
|
+
total_events,
|
|
210
|
+
current_idx: Arc::new(AtomicUsize::new(0)),
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
impl SseEventProducer for KeepAliveProducer {
|
|
216
|
+
async fn next_event(&self) -> Option<SseEvent> {
|
|
217
|
+
let idx = self.current_idx.fetch_add(1, Ordering::Relaxed);
|
|
218
|
+
if idx < self.total_events {
|
|
219
|
+
Some(SseEvent::new(serde_json::json!({
|
|
220
|
+
"heartbeat": idx,
|
|
221
|
+
"alive": true
|
|
222
|
+
})))
|
|
223
|
+
} else {
|
|
224
|
+
None
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
/// Producer for graceful shutdown testing that tracks disconnections
|
|
230
|
+
struct GracefulShutdownProducer {
|
|
231
|
+
total_events: usize,
|
|
232
|
+
current_idx: Arc<AtomicUsize>,
|
|
233
|
+
disconnect_called: Arc<AtomicBool>,
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
impl GracefulShutdownProducer {
|
|
237
|
+
fn new(total_events: usize) -> Self {
|
|
238
|
+
Self {
|
|
239
|
+
total_events,
|
|
240
|
+
current_idx: Arc::new(AtomicUsize::new(0)),
|
|
241
|
+
disconnect_called: Arc::new(AtomicBool::new(false)),
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
fn was_disconnect_called(&self) -> bool {
|
|
246
|
+
self.disconnect_called.load(Ordering::Relaxed)
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
impl SseEventProducer for GracefulShutdownProducer {
|
|
251
|
+
async fn next_event(&self) -> Option<SseEvent> {
|
|
252
|
+
let idx = self.current_idx.fetch_add(1, Ordering::Relaxed);
|
|
253
|
+
if idx < self.total_events {
|
|
254
|
+
Some(SseEvent::new(serde_json::json!({"index": idx})))
|
|
255
|
+
} else {
|
|
256
|
+
None
|
|
257
|
+
}
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
async fn on_disconnect(&self) {
|
|
261
|
+
self.disconnect_called.store(true, Ordering::Relaxed);
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
// ============================================================================
|
|
266
|
+
// Test 1: SSE Connection Establishment and Event Streaming
|
|
267
|
+
// ============================================================================
|
|
268
|
+
|
|
269
|
+
#[tokio::test]
|
|
270
|
+
async fn test_sse_connection_establishment_and_streaming() {
|
|
271
|
+
// Given: A sequential event producer with 5 events
|
|
272
|
+
let producer = SequentialEventProducer::new(5);
|
|
273
|
+
|
|
274
|
+
// When: Client connects and generates events
|
|
275
|
+
producer.on_connect().await;
|
|
276
|
+
|
|
277
|
+
let mut events_received = Vec::new();
|
|
278
|
+
for i in 0..5 {
|
|
279
|
+
if let Some(event) = producer.next_event().await {
|
|
280
|
+
// Verify event has the expected structure
|
|
281
|
+
assert_eq!(
|
|
282
|
+
event.data.get("sequence").and_then(|v| v.as_u64()),
|
|
283
|
+
Some(i as u64),
|
|
284
|
+
"Event {} has correct sequence number",
|
|
285
|
+
i
|
|
286
|
+
);
|
|
287
|
+
assert!(event.id.is_some(), "Event {} has ID for tracking", i);
|
|
288
|
+
events_received.push(event);
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
// Then: Verify all 5 events were received in order
|
|
293
|
+
assert_eq!(events_received.len(), 5, "All 5 events should be received");
|
|
294
|
+
for (idx, event) in events_received.iter().enumerate() {
|
|
295
|
+
assert_eq!(
|
|
296
|
+
event.data.get("sequence").and_then(|v| v.as_u64()),
|
|
297
|
+
Some(idx as u64),
|
|
298
|
+
"Event {} has correct sequence",
|
|
299
|
+
idx
|
|
300
|
+
);
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
// Verify stream ends cleanly after last event
|
|
304
|
+
assert!(
|
|
305
|
+
producer.next_event().await.is_none(),
|
|
306
|
+
"Stream should end after all events"
|
|
307
|
+
);
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
// ============================================================================
|
|
311
|
+
// Test 2: Client Reconnection with Last-Event-ID Header
|
|
312
|
+
// ============================================================================
|
|
313
|
+
|
|
314
|
+
#[tokio::test]
|
|
315
|
+
async fn test_client_reconnection_with_last_event_id() {
|
|
316
|
+
// Given: A set of events with IDs for reconnection tracking
|
|
317
|
+
let events = vec![
|
|
318
|
+
("id-1".to_string(), serde_json::json!({"data": "event1"})),
|
|
319
|
+
("id-2".to_string(), serde_json::json!({"data": "event2"})),
|
|
320
|
+
("id-3".to_string(), serde_json::json!({"data": "event3"})),
|
|
321
|
+
("id-4".to_string(), serde_json::json!({"data": "event4"})),
|
|
322
|
+
];
|
|
323
|
+
|
|
324
|
+
let producer = ReconnectableEventProducer::new(events);
|
|
325
|
+
|
|
326
|
+
// When: Client connects first time
|
|
327
|
+
producer.on_connect().await;
|
|
328
|
+
assert_eq!(producer.get_connect_count(), 1);
|
|
329
|
+
|
|
330
|
+
// Consume first 2 events
|
|
331
|
+
let event1 = producer.next_event().await.unwrap();
|
|
332
|
+
let event1_id = event1.id.clone();
|
|
333
|
+
assert_eq!(event1_id, Some("id-1".to_string()));
|
|
334
|
+
|
|
335
|
+
let event2 = producer.next_event().await.unwrap();
|
|
336
|
+
let event2_id = event2.id.clone();
|
|
337
|
+
assert_eq!(event2_id, Some("id-2".to_string()));
|
|
338
|
+
|
|
339
|
+
// Simulate connection loss after event 2
|
|
340
|
+
// In real scenario, client would reconnect with Last-Event-ID: id-2
|
|
341
|
+
|
|
342
|
+
// When: Simulate new connection
|
|
343
|
+
producer.on_connect().await;
|
|
344
|
+
assert_eq!(producer.get_connect_count(), 2);
|
|
345
|
+
|
|
346
|
+
// Then: Client should be able to resume from where it left off
|
|
347
|
+
// The producer will continue from the current index
|
|
348
|
+
let event3 = producer.next_event().await.unwrap();
|
|
349
|
+
assert_eq!(event3.id, Some("id-3".to_string()));
|
|
350
|
+
|
|
351
|
+
// Verify reconnection didn't cause event loss
|
|
352
|
+
assert_eq!(producer.get_connect_count(), 2, "Client reconnected successfully");
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
// ============================================================================
|
|
356
|
+
// Test 3: Event Ordering Preservation
|
|
357
|
+
// ============================================================================
|
|
358
|
+
|
|
359
|
+
#[tokio::test]
|
|
360
|
+
async fn test_event_ordering_preservation() {
|
|
361
|
+
// Given: A rapid-fire producer generating many events
|
|
362
|
+
let producer = RapidFireOrderedProducer::new(100);
|
|
363
|
+
|
|
364
|
+
// When: We collect all events
|
|
365
|
+
let mut events_collected = Vec::new();
|
|
366
|
+
loop {
|
|
367
|
+
match producer.next_event().await {
|
|
368
|
+
Some(event) => events_collected.push(event),
|
|
369
|
+
None => break,
|
|
370
|
+
}
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
// Then: Verify events are in strict sequential order
|
|
374
|
+
assert_eq!(events_collected.len(), 100, "All 100 events should be collected");
|
|
375
|
+
|
|
376
|
+
let mut last_sequence = -1i32;
|
|
377
|
+
for (idx, event) in events_collected.iter().enumerate() {
|
|
378
|
+
let sequence = event.data.get("index").and_then(|v| v.as_i64()).unwrap() as i32;
|
|
379
|
+
assert_eq!(
|
|
380
|
+
sequence, idx as i32,
|
|
381
|
+
"Event at position {} has correct sequence number {}",
|
|
382
|
+
idx, sequence
|
|
383
|
+
);
|
|
384
|
+
assert!(sequence > last_sequence, "Events are in increasing order");
|
|
385
|
+
last_sequence = sequence;
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
// Verify we generated exactly 100 events
|
|
389
|
+
assert_eq!(
|
|
390
|
+
producer.get_generated_count(),
|
|
391
|
+
100,
|
|
392
|
+
"Exactly 100 events should be generated"
|
|
393
|
+
);
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
// ============================================================================
|
|
397
|
+
// Test 4: Connection Cleanup on Client Disconnect
|
|
398
|
+
// ============================================================================
|
|
399
|
+
|
|
400
|
+
#[tokio::test]
|
|
401
|
+
async fn test_connection_cleanup_on_disconnect() {
|
|
402
|
+
// Given: A producer that tracks connection lifecycle
|
|
403
|
+
let producer = SequentialEventProducer::new(3);
|
|
404
|
+
|
|
405
|
+
// When: Client connects
|
|
406
|
+
producer.on_connect().await;
|
|
407
|
+
assert_eq!(producer.get_connect_count(), 1, "Client should be marked as connected");
|
|
408
|
+
|
|
409
|
+
// Consume one event
|
|
410
|
+
let _event1 = producer.next_event().await;
|
|
411
|
+
|
|
412
|
+
// Simulate client disconnect
|
|
413
|
+
producer.on_disconnect().await;
|
|
414
|
+
assert_eq!(
|
|
415
|
+
producer.get_disconnect_count(),
|
|
416
|
+
1,
|
|
417
|
+
"Client should be marked as disconnected"
|
|
418
|
+
);
|
|
419
|
+
|
|
420
|
+
// Verify cleanup occurred (in real scenario, this would close resources)
|
|
421
|
+
assert!(producer.get_disconnect_count() > 0, "Disconnect hook was invoked");
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
// ============================================================================
|
|
425
|
+
// Test 5: Keep-Alive Behavior
|
|
426
|
+
// ============================================================================
|
|
427
|
+
|
|
428
|
+
#[tokio::test]
|
|
429
|
+
async fn test_keep_alive_behavior() {
|
|
430
|
+
// Given: A producer that sends periodic keep-alive events
|
|
431
|
+
let producer = KeepAliveProducer::new(5);
|
|
432
|
+
|
|
433
|
+
// When: We consume all events
|
|
434
|
+
let mut events = Vec::new();
|
|
435
|
+
loop {
|
|
436
|
+
match producer.next_event().await {
|
|
437
|
+
Some(event) => {
|
|
438
|
+
// Verify each event has a heartbeat field
|
|
439
|
+
assert!(
|
|
440
|
+
event.data.get("heartbeat").is_some(),
|
|
441
|
+
"Each event should contain heartbeat data"
|
|
442
|
+
);
|
|
443
|
+
assert!(
|
|
444
|
+
event.data.get("alive").and_then(|v| v.as_bool()) == Some(true),
|
|
445
|
+
"All events should indicate server is alive"
|
|
446
|
+
);
|
|
447
|
+
events.push(event);
|
|
448
|
+
}
|
|
449
|
+
None => break,
|
|
450
|
+
}
|
|
451
|
+
}
|
|
452
|
+
|
|
453
|
+
// Then: Verify keep-alive events were sent
|
|
454
|
+
assert_eq!(events.len(), 5, "All keep-alive events should be received");
|
|
455
|
+
|
|
456
|
+
// Verify stream eventually closes (not infinite keep-alive)
|
|
457
|
+
assert!(
|
|
458
|
+
producer.next_event().await.is_none(),
|
|
459
|
+
"Stream should terminate normally"
|
|
460
|
+
);
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
// ============================================================================
|
|
464
|
+
// Test 6: Backpressure When Client is Slow
|
|
465
|
+
// ============================================================================
|
|
466
|
+
|
|
467
|
+
#[tokio::test]
|
|
468
|
+
async fn test_backpressure_slow_client() {
|
|
469
|
+
// Given: A slow client producer with delayed event generation
|
|
470
|
+
let producer = SlowClientProducer::new(5, 10); // 10ms delay per event
|
|
471
|
+
|
|
472
|
+
// When: We generate events with intentional delays
|
|
473
|
+
let start = std::time::Instant::now();
|
|
474
|
+
let mut events_count = 0;
|
|
475
|
+
|
|
476
|
+
loop {
|
|
477
|
+
match producer.next_event().await {
|
|
478
|
+
Some(_event) => {
|
|
479
|
+
events_count += 1;
|
|
480
|
+
}
|
|
481
|
+
None => break,
|
|
482
|
+
}
|
|
483
|
+
}
|
|
484
|
+
|
|
485
|
+
let elapsed = start.elapsed();
|
|
486
|
+
|
|
487
|
+
// Then: Verify events were generated with expected delay
|
|
488
|
+
assert_eq!(events_count, 5, "All 5 events should be generated despite backpressure");
|
|
489
|
+
|
|
490
|
+
// Verify timing: should take at least 5 * 10ms = 50ms
|
|
491
|
+
assert!(
|
|
492
|
+
elapsed.as_millis() >= 50,
|
|
493
|
+
"Event generation should have delays, took {:?}ms",
|
|
494
|
+
elapsed.as_millis()
|
|
495
|
+
);
|
|
496
|
+
|
|
497
|
+
// Verify events were actually sent
|
|
498
|
+
assert_eq!(producer.get_events_sent(), 5, "All events should be marked as sent");
|
|
499
|
+
}
|
|
500
|
+
|
|
501
|
+
// ============================================================================
|
|
502
|
+
// Test 7: Graceful Shutdown with Active Streams
|
|
503
|
+
// ============================================================================
|
|
504
|
+
|
|
505
|
+
#[tokio::test]
|
|
506
|
+
async fn test_graceful_shutdown_with_active_streams() {
|
|
507
|
+
// Given: A producer with pending events
|
|
508
|
+
let producer = GracefulShutdownProducer::new(3);
|
|
509
|
+
|
|
510
|
+
// When: Connection is active and generating events
|
|
511
|
+
for _ in 0..2 {
|
|
512
|
+
let _ = producer.next_event().await;
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
// Simulate graceful shutdown
|
|
516
|
+
producer.on_disconnect().await;
|
|
517
|
+
|
|
518
|
+
// Then: Verify disconnect was called during shutdown
|
|
519
|
+
assert!(
|
|
520
|
+
producer.was_disconnect_called(),
|
|
521
|
+
"Disconnect should be called during graceful shutdown"
|
|
522
|
+
);
|
|
523
|
+
|
|
524
|
+
// Verify no panic occurred and stream can still be accessed
|
|
525
|
+
let remaining = producer.next_event().await;
|
|
526
|
+
assert!(remaining.is_some(), "Stream should continue until complete");
|
|
527
|
+
}
|
|
528
|
+
|
|
529
|
+
// ============================================================================
|
|
530
|
+
// Test 8: Event IDs Preserved Through Stream
|
|
531
|
+
// ============================================================================
|
|
532
|
+
|
|
533
|
+
#[tokio::test]
|
|
534
|
+
async fn test_event_ids_preserved_through_stream() {
|
|
535
|
+
// Given: A producer that assigns event IDs
|
|
536
|
+
let producer = SequentialEventProducer::new(10);
|
|
537
|
+
|
|
538
|
+
// When: We collect all events and their IDs
|
|
539
|
+
let mut event_ids = Vec::new();
|
|
540
|
+
loop {
|
|
541
|
+
match producer.next_event().await {
|
|
542
|
+
Some(event) => {
|
|
543
|
+
if let Some(id) = event.id.clone() {
|
|
544
|
+
event_ids.push(id);
|
|
545
|
+
}
|
|
546
|
+
}
|
|
547
|
+
None => break,
|
|
548
|
+
}
|
|
549
|
+
}
|
|
550
|
+
|
|
551
|
+
// Then: Verify all events have unique IDs in expected format
|
|
552
|
+
assert_eq!(event_ids.len(), 10, "All 10 events should have IDs");
|
|
553
|
+
|
|
554
|
+
for (idx, id) in event_ids.iter().enumerate() {
|
|
555
|
+
assert_eq!(id, &format!("event-{}", idx), "Event ID should match expected format");
|
|
556
|
+
}
|
|
557
|
+
|
|
558
|
+
// Verify no duplicate IDs
|
|
559
|
+
let unique_ids: std::collections::HashSet<_> = event_ids.iter().cloned().collect();
|
|
560
|
+
assert_eq!(unique_ids.len(), event_ids.len(), "All event IDs should be unique");
|
|
561
|
+
}
|
|
562
|
+
|
|
563
|
+
// ============================================================================
|
|
564
|
+
// Test 9: Multiple Concurrent Connections
|
|
565
|
+
// ============================================================================
|
|
566
|
+
|
|
567
|
+
#[tokio::test]
|
|
568
|
+
async fn test_multiple_concurrent_connections() {
|
|
569
|
+
// Given: Multiple producers for simulating concurrent connections
|
|
570
|
+
let producer1 = Arc::new(SequentialEventProducer::new(5));
|
|
571
|
+
let producer2 = Arc::new(SequentialEventProducer::new(5));
|
|
572
|
+
|
|
573
|
+
// When: Both connections are active simultaneously
|
|
574
|
+
producer1.on_connect().await;
|
|
575
|
+
producer2.on_connect().await;
|
|
576
|
+
|
|
577
|
+
// Create concurrent tasks to simulate multiple clients
|
|
578
|
+
let handle1 = {
|
|
579
|
+
let producer = Arc::clone(&producer1);
|
|
580
|
+
tokio::spawn(async move {
|
|
581
|
+
let mut count = 0;
|
|
582
|
+
loop {
|
|
583
|
+
match producer.next_event().await {
|
|
584
|
+
Some(_) => count += 1,
|
|
585
|
+
None => break,
|
|
586
|
+
}
|
|
587
|
+
}
|
|
588
|
+
count
|
|
589
|
+
})
|
|
590
|
+
};
|
|
591
|
+
|
|
592
|
+
let handle2 = {
|
|
593
|
+
let producer = Arc::clone(&producer2);
|
|
594
|
+
tokio::spawn(async move {
|
|
595
|
+
let mut count = 0;
|
|
596
|
+
loop {
|
|
597
|
+
match producer.next_event().await {
|
|
598
|
+
Some(_) => count += 1,
|
|
599
|
+
None => break,
|
|
600
|
+
}
|
|
601
|
+
}
|
|
602
|
+
count
|
|
603
|
+
})
|
|
604
|
+
};
|
|
605
|
+
|
|
606
|
+
// Then: Both connections should complete independently
|
|
607
|
+
let count1 = handle1.await.unwrap();
|
|
608
|
+
let count2 = handle2.await.unwrap();
|
|
609
|
+
|
|
610
|
+
assert_eq!(count1, 5, "First connection should receive 5 events");
|
|
611
|
+
assert_eq!(count2, 5, "Second connection should receive 5 events");
|
|
612
|
+
}
|
|
613
|
+
|
|
614
|
+
// ============================================================================
|
|
615
|
+
// Test 10: Event Type Preservation
|
|
616
|
+
// ============================================================================
|
|
617
|
+
|
|
618
|
+
#[tokio::test]
|
|
619
|
+
async fn test_event_type_preservation() {
|
|
620
|
+
// Given: A producer that sends events with different types
|
|
621
|
+
let producer = SequentialEventProducer::new(5);
|
|
622
|
+
|
|
623
|
+
// When: We collect all events
|
|
624
|
+
let mut events = Vec::new();
|
|
625
|
+
loop {
|
|
626
|
+
match producer.next_event().await {
|
|
627
|
+
Some(event) => {
|
|
628
|
+
events.push(event);
|
|
629
|
+
}
|
|
630
|
+
None => break,
|
|
631
|
+
}
|
|
632
|
+
}
|
|
633
|
+
|
|
634
|
+
// Then: All events should have the correct type
|
|
635
|
+
assert_eq!(events.len(), 5);
|
|
636
|
+
for event in events {
|
|
637
|
+
assert_eq!(
|
|
638
|
+
event.event_type,
|
|
639
|
+
Some("data".to_string()),
|
|
640
|
+
"Event type should be preserved as 'data'"
|
|
641
|
+
);
|
|
642
|
+
}
|
|
643
|
+
}
|
|
644
|
+
|
|
645
|
+
// ============================================================================
|
|
646
|
+
// Test 11: Empty Event Stream (No Events)
|
|
647
|
+
// ============================================================================
|
|
648
|
+
|
|
649
|
+
#[tokio::test]
|
|
650
|
+
async fn test_empty_event_stream() {
|
|
651
|
+
// Given: A producer with zero events
|
|
652
|
+
let producer = SequentialEventProducer::new(0);
|
|
653
|
+
|
|
654
|
+
// When: We try to consume events
|
|
655
|
+
let event = producer.next_event().await;
|
|
656
|
+
|
|
657
|
+
// Then: Stream should be empty immediately
|
|
658
|
+
assert!(event.is_none(), "Empty stream should produce no events");
|
|
659
|
+
}
|
|
660
|
+
|
|
661
|
+
// ============================================================================
|
|
662
|
+
// Test 12: Event Data Integrity Through Stream
|
|
663
|
+
// ============================================================================
|
|
664
|
+
|
|
665
|
+
#[tokio::test]
|
|
666
|
+
async fn test_event_data_integrity_through_stream() {
|
|
667
|
+
// Given: Events with complex JSON data
|
|
668
|
+
let events = vec![
|
|
669
|
+
(
|
|
670
|
+
"id-1".to_string(),
|
|
671
|
+
serde_json::json!({
|
|
672
|
+
"name": "Alice",
|
|
673
|
+
"age": 30,
|
|
674
|
+
"active": true,
|
|
675
|
+
"tags": ["rust", "async"],
|
|
676
|
+
"metadata": {
|
|
677
|
+
"created": "2024-01-01",
|
|
678
|
+
"updated": "2024-01-02"
|
|
679
|
+
}
|
|
680
|
+
}),
|
|
681
|
+
),
|
|
682
|
+
(
|
|
683
|
+
"id-2".to_string(),
|
|
684
|
+
serde_json::json!({
|
|
685
|
+
"name": "Bob",
|
|
686
|
+
"age": 25,
|
|
687
|
+
"active": false,
|
|
688
|
+
"tags": ["python"],
|
|
689
|
+
"metadata": {
|
|
690
|
+
"created": "2024-01-03"
|
|
691
|
+
}
|
|
692
|
+
}),
|
|
693
|
+
),
|
|
694
|
+
];
|
|
695
|
+
|
|
696
|
+
let producer = ReconnectableEventProducer::new(events.clone());
|
|
697
|
+
|
|
698
|
+
// When: We consume events and verify data integrity
|
|
699
|
+
let event1 = producer.next_event().await.unwrap();
|
|
700
|
+
assert_eq!(event1.data.get("name").and_then(|v| v.as_str()), Some("Alice"));
|
|
701
|
+
assert_eq!(event1.data.get("age").and_then(|v| v.as_i64()), Some(30));
|
|
702
|
+
assert_eq!(
|
|
703
|
+
event1.data.get("tags").and_then(|v| v.as_array()).map(|a| a.len()),
|
|
704
|
+
Some(2)
|
|
705
|
+
);
|
|
706
|
+
|
|
707
|
+
let event2 = producer.next_event().await.unwrap();
|
|
708
|
+
assert_eq!(event2.data.get("name").and_then(|v| v.as_str()), Some("Bob"));
|
|
709
|
+
assert_eq!(event2.data.get("age").and_then(|v| v.as_i64()), Some(25));
|
|
710
|
+
|
|
711
|
+
// Then: All data should be preserved exactly
|
|
712
|
+
assert_eq!(
|
|
713
|
+
event1
|
|
714
|
+
.data
|
|
715
|
+
.get("metadata")
|
|
716
|
+
.and_then(|v| v.get("created"))
|
|
717
|
+
.and_then(|v| v.as_str()),
|
|
718
|
+
Some("2024-01-01")
|
|
719
|
+
);
|
|
720
|
+
assert_eq!(
|
|
721
|
+
event2
|
|
722
|
+
.data
|
|
723
|
+
.get("metadata")
|
|
724
|
+
.and_then(|v| v.get("created"))
|
|
725
|
+
.and_then(|v| v.as_str()),
|
|
726
|
+
Some("2024-01-03")
|
|
727
|
+
);
|
|
728
|
+
}
|