spikard 0.6.2 → 0.7.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +90 -508
- data/ext/spikard_rb/Cargo.lock +3287 -0
- data/ext/spikard_rb/Cargo.toml +1 -1
- data/ext/spikard_rb/extconf.rb +3 -3
- data/lib/spikard/app.rb +72 -49
- data/lib/spikard/background.rb +38 -7
- data/lib/spikard/testing.rb +42 -4
- data/lib/spikard/version.rb +1 -1
- data/sig/spikard.rbs +4 -0
- data/vendor/crates/spikard-bindings-shared/Cargo.toml +1 -1
- data/vendor/crates/spikard-bindings-shared/tests/config_extractor_behavior.rs +191 -0
- data/vendor/crates/spikard-core/Cargo.toml +1 -1
- data/vendor/crates/spikard-core/src/http.rs +1 -0
- data/vendor/crates/spikard-core/src/lifecycle.rs +63 -0
- data/vendor/crates/spikard-core/tests/bindings_response_tests.rs +136 -0
- data/vendor/crates/spikard-core/tests/di_dependency_defaults.rs +37 -0
- data/vendor/crates/spikard-core/tests/error_mapper.rs +761 -0
- data/vendor/crates/spikard-core/tests/parameters_edge_cases.rs +106 -0
- data/vendor/crates/spikard-core/tests/parameters_full.rs +701 -0
- data/vendor/crates/spikard-core/tests/parameters_schema_and_formats.rs +301 -0
- data/vendor/crates/spikard-core/tests/request_data_roundtrip.rs +67 -0
- data/vendor/crates/spikard-core/tests/validation_coverage.rs +250 -0
- data/vendor/crates/spikard-core/tests/validation_error_paths.rs +45 -0
- data/vendor/crates/spikard-http/Cargo.toml +1 -1
- data/vendor/crates/spikard-http/src/jsonrpc/http_handler.rs +502 -0
- data/vendor/crates/spikard-http/src/jsonrpc/method_registry.rs +648 -0
- data/vendor/crates/spikard-http/src/jsonrpc/mod.rs +58 -0
- data/vendor/crates/spikard-http/src/jsonrpc/protocol.rs +1207 -0
- data/vendor/crates/spikard-http/src/jsonrpc/router.rs +2262 -0
- data/vendor/crates/spikard-http/src/testing/test_client.rs +155 -2
- data/vendor/crates/spikard-http/src/testing.rs +171 -0
- data/vendor/crates/spikard-http/src/websocket.rs +79 -6
- data/vendor/crates/spikard-http/tests/auth_integration.rs +647 -0
- data/vendor/crates/spikard-http/tests/common/test_builders.rs +633 -0
- data/vendor/crates/spikard-http/tests/di_handler_error_responses.rs +162 -0
- data/vendor/crates/spikard-http/tests/middleware_stack_integration.rs +389 -0
- data/vendor/crates/spikard-http/tests/request_extraction_full.rs +513 -0
- data/vendor/crates/spikard-http/tests/server_auth_middleware_behavior.rs +244 -0
- data/vendor/crates/spikard-http/tests/server_configured_router_behavior.rs +200 -0
- data/vendor/crates/spikard-http/tests/server_cors_preflight.rs +82 -0
- data/vendor/crates/spikard-http/tests/server_handler_wrappers.rs +464 -0
- data/vendor/crates/spikard-http/tests/server_method_router_additional_behavior.rs +286 -0
- data/vendor/crates/spikard-http/tests/server_method_router_coverage.rs +118 -0
- data/vendor/crates/spikard-http/tests/server_middleware_behavior.rs +99 -0
- data/vendor/crates/spikard-http/tests/server_middleware_branches.rs +206 -0
- data/vendor/crates/spikard-http/tests/server_openapi_jsonrpc_static.rs +281 -0
- data/vendor/crates/spikard-http/tests/server_router_behavior.rs +121 -0
- data/vendor/crates/spikard-http/tests/sse_full_behavior.rs +584 -0
- data/vendor/crates/spikard-http/tests/sse_handler_behavior.rs +130 -0
- data/vendor/crates/spikard-http/tests/test_client_requests.rs +167 -0
- data/vendor/crates/spikard-http/tests/testing_helpers.rs +87 -0
- data/vendor/crates/spikard-http/tests/testing_module_coverage.rs +156 -0
- data/vendor/crates/spikard-http/tests/urlencoded_content_type.rs +82 -0
- data/vendor/crates/spikard-http/tests/websocket_full_behavior.rs +440 -0
- data/vendor/crates/spikard-http/tests/websocket_integration.rs +152 -0
- data/vendor/crates/spikard-rb/Cargo.toml +1 -1
- data/vendor/crates/spikard-rb/src/gvl.rs +80 -0
- data/vendor/crates/spikard-rb/src/handler.rs +12 -9
- data/vendor/crates/spikard-rb/src/lib.rs +137 -124
- data/vendor/crates/spikard-rb/src/request.rs +342 -0
- data/vendor/crates/spikard-rb/src/runtime/server_runner.rs +1 -8
- data/vendor/crates/spikard-rb/src/server.rs +1 -8
- data/vendor/crates/spikard-rb/src/testing/client.rs +168 -9
- data/vendor/crates/spikard-rb/src/websocket.rs +119 -30
- data/vendor/crates/spikard-rb-macros/Cargo.toml +14 -0
- data/vendor/crates/spikard-rb-macros/src/lib.rs +52 -0
- metadata +44 -1
|
@@ -0,0 +1,584 @@
|
|
|
1
|
+
#![allow(clippy::pedantic, clippy::nursery, clippy::all)]
|
|
2
|
+
//! Comprehensive integration tests for Server-Sent Events (SSE) functionality
|
|
3
|
+
//!
|
|
4
|
+
//! These tests verify full end-to-end SSE behavior including:
|
|
5
|
+
//! - Event streaming with multiple events
|
|
6
|
+
//! - Event IDs and Last-Event-ID tracking
|
|
7
|
+
//! - Client reconnection with resume capability
|
|
8
|
+
//! - Event retry timeout handling
|
|
9
|
+
//! - Comment events and keep-alive
|
|
10
|
+
//! - Connection cleanup on disconnect
|
|
11
|
+
//! - Multi-line data field formatting
|
|
12
|
+
//! - Custom event types
|
|
13
|
+
//! - Large event payload handling
|
|
14
|
+
//! - Producer error handling
|
|
15
|
+
|
|
16
|
+
mod common;
|
|
17
|
+
|
|
18
|
+
use spikard_http::sse::{SseEvent, SseEventProducer};
|
|
19
|
+
use std::sync::Arc;
|
|
20
|
+
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
|
21
|
+
|
|
22
|
+
/// Producer that yields multiple numbered events in sequence
|
|
23
|
+
#[derive(Debug, Clone)]
|
|
24
|
+
struct MultiEventProducer {
|
|
25
|
+
event_count: usize,
|
|
26
|
+
current_idx: Arc<AtomicUsize>,
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
impl MultiEventProducer {
|
|
30
|
+
fn new(event_count: usize) -> Self {
|
|
31
|
+
Self {
|
|
32
|
+
event_count,
|
|
33
|
+
current_idx: Arc::new(AtomicUsize::new(0)),
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
impl SseEventProducer for MultiEventProducer {
|
|
39
|
+
async fn next_event(&self) -> Option<SseEvent> {
|
|
40
|
+
let idx = self.current_idx.fetch_add(1, Ordering::SeqCst);
|
|
41
|
+
if idx < self.event_count {
|
|
42
|
+
Some(
|
|
43
|
+
SseEvent::new(serde_json::json!({
|
|
44
|
+
"event_number": idx,
|
|
45
|
+
"message": format!("Event {}", idx),
|
|
46
|
+
"timestamp": chrono::Utc::now().to_rfc3339()
|
|
47
|
+
}))
|
|
48
|
+
.with_id(format!("event-{}", idx)),
|
|
49
|
+
)
|
|
50
|
+
} else {
|
|
51
|
+
None
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/// Producer with event ID tracking for reconnection tests
|
|
57
|
+
#[derive(Debug, Clone)]
|
|
58
|
+
struct IdTrackedEventProducer {
|
|
59
|
+
events: Vec<(String, serde_json::Value)>,
|
|
60
|
+
current_idx: Arc<AtomicUsize>,
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
impl IdTrackedEventProducer {
|
|
64
|
+
fn new(events: Vec<(String, serde_json::Value)>) -> Self {
|
|
65
|
+
Self {
|
|
66
|
+
events,
|
|
67
|
+
current_idx: Arc::new(AtomicUsize::new(0)),
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
fn get_current_idx(&self) -> usize {
|
|
72
|
+
self.current_idx.load(Ordering::SeqCst)
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
impl SseEventProducer for IdTrackedEventProducer {
|
|
77
|
+
async fn next_event(&self) -> Option<SseEvent> {
|
|
78
|
+
let idx = self.current_idx.fetch_add(1, Ordering::SeqCst);
|
|
79
|
+
if idx < self.events.len() {
|
|
80
|
+
let (id, data) = self.events[idx].clone();
|
|
81
|
+
Some(SseEvent::new(data).with_id(id))
|
|
82
|
+
} else {
|
|
83
|
+
None
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
/// Producer that simulates retry timeout scenarios
|
|
89
|
+
#[derive(Debug, Clone)]
|
|
90
|
+
struct RetryTimeoutProducer {
|
|
91
|
+
event_count: usize,
|
|
92
|
+
current_idx: Arc<AtomicUsize>,
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
impl RetryTimeoutProducer {
|
|
96
|
+
fn new(event_count: usize) -> Self {
|
|
97
|
+
Self {
|
|
98
|
+
event_count,
|
|
99
|
+
current_idx: Arc::new(AtomicUsize::new(0)),
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
impl SseEventProducer for RetryTimeoutProducer {
|
|
105
|
+
async fn next_event(&self) -> Option<SseEvent> {
|
|
106
|
+
let idx = self.current_idx.fetch_add(1, Ordering::SeqCst);
|
|
107
|
+
if idx < self.event_count {
|
|
108
|
+
if idx % 2 == 0 {
|
|
109
|
+
Some(
|
|
110
|
+
SseEvent::new(serde_json::json!({"index": idx, "with_retry": true}))
|
|
111
|
+
.with_id(format!("event-{}", idx))
|
|
112
|
+
.with_retry(3000),
|
|
113
|
+
)
|
|
114
|
+
} else {
|
|
115
|
+
Some(
|
|
116
|
+
SseEvent::new(serde_json::json!({"index": idx, "with_retry": false}))
|
|
117
|
+
.with_id(format!("event-{}", idx)),
|
|
118
|
+
)
|
|
119
|
+
}
|
|
120
|
+
} else {
|
|
121
|
+
None
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
/// Producer that sends comment-like events for keep-alive
|
|
127
|
+
#[derive(Debug, Clone)]
|
|
128
|
+
struct KeepAliveProducer {
|
|
129
|
+
event_count: usize,
|
|
130
|
+
current_idx: Arc<AtomicUsize>,
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
impl KeepAliveProducer {
|
|
134
|
+
fn new(event_count: usize) -> Self {
|
|
135
|
+
Self {
|
|
136
|
+
event_count,
|
|
137
|
+
current_idx: Arc::new(AtomicUsize::new(0)),
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
impl SseEventProducer for KeepAliveProducer {
|
|
143
|
+
async fn next_event(&self) -> Option<SseEvent> {
|
|
144
|
+
let idx = self.current_idx.fetch_add(1, Ordering::SeqCst);
|
|
145
|
+
if idx < self.event_count {
|
|
146
|
+
if idx % 3 == 0 {
|
|
147
|
+
Some(
|
|
148
|
+
SseEvent::with_type("data", serde_json::json!({"index": idx, "type": "real_event"}))
|
|
149
|
+
.with_id(format!("event-{}", idx)),
|
|
150
|
+
)
|
|
151
|
+
} else {
|
|
152
|
+
Some(SseEvent::new(serde_json::json!({"index": idx, "type": "keep_alive"})))
|
|
153
|
+
}
|
|
154
|
+
} else {
|
|
155
|
+
None
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
/// Producer that tracks disconnection lifecycle
|
|
161
|
+
#[derive(Debug, Clone)]
|
|
162
|
+
struct DisconnectTrackingProducer {
|
|
163
|
+
event_count: usize,
|
|
164
|
+
current_idx: Arc<AtomicUsize>,
|
|
165
|
+
connect_count: Arc<AtomicUsize>,
|
|
166
|
+
disconnect_count: Arc<AtomicUsize>,
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
impl DisconnectTrackingProducer {
|
|
170
|
+
fn new(event_count: usize) -> Self {
|
|
171
|
+
Self {
|
|
172
|
+
event_count,
|
|
173
|
+
current_idx: Arc::new(AtomicUsize::new(0)),
|
|
174
|
+
connect_count: Arc::new(AtomicUsize::new(0)),
|
|
175
|
+
disconnect_count: Arc::new(AtomicUsize::new(0)),
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
fn get_connect_count(&self) -> usize {
|
|
180
|
+
self.connect_count.load(Ordering::SeqCst)
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
fn get_disconnect_count(&self) -> usize {
|
|
184
|
+
self.disconnect_count.load(Ordering::SeqCst)
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
impl SseEventProducer for DisconnectTrackingProducer {
|
|
189
|
+
async fn next_event(&self) -> Option<SseEvent> {
|
|
190
|
+
let idx = self.current_idx.fetch_add(1, Ordering::SeqCst);
|
|
191
|
+
if idx < self.event_count {
|
|
192
|
+
Some(
|
|
193
|
+
SseEvent::new(serde_json::json!({
|
|
194
|
+
"index": idx,
|
|
195
|
+
"message": format!("Event {}", idx)
|
|
196
|
+
}))
|
|
197
|
+
.with_id(format!("event-{}", idx)),
|
|
198
|
+
)
|
|
199
|
+
} else {
|
|
200
|
+
None
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
async fn on_connect(&self) {
|
|
205
|
+
self.connect_count.fetch_add(1, Ordering::SeqCst);
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
async fn on_disconnect(&self) {
|
|
209
|
+
self.disconnect_count.fetch_add(1, Ordering::SeqCst);
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
/// Producer with custom event types
|
|
214
|
+
#[derive(Debug, Clone)]
|
|
215
|
+
struct CustomEventTypeProducer {
|
|
216
|
+
event_count: usize,
|
|
217
|
+
current_idx: Arc<AtomicUsize>,
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
impl CustomEventTypeProducer {
|
|
221
|
+
fn new(event_count: usize) -> Self {
|
|
222
|
+
Self {
|
|
223
|
+
event_count,
|
|
224
|
+
current_idx: Arc::new(AtomicUsize::new(0)),
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
impl SseEventProducer for CustomEventTypeProducer {
|
|
230
|
+
async fn next_event(&self) -> Option<SseEvent> {
|
|
231
|
+
let idx = self.current_idx.fetch_add(1, Ordering::SeqCst);
|
|
232
|
+
if idx < self.event_count {
|
|
233
|
+
let event_type = match idx % 3 {
|
|
234
|
+
0 => "user_update",
|
|
235
|
+
1 => "status_change",
|
|
236
|
+
_ => "notification",
|
|
237
|
+
};
|
|
238
|
+
|
|
239
|
+
Some(
|
|
240
|
+
SseEvent::with_type(
|
|
241
|
+
event_type,
|
|
242
|
+
serde_json::json!({
|
|
243
|
+
"index": idx,
|
|
244
|
+
"event_type": event_type
|
|
245
|
+
}),
|
|
246
|
+
)
|
|
247
|
+
.with_id(format!("event-{}", idx)),
|
|
248
|
+
)
|
|
249
|
+
} else {
|
|
250
|
+
None
|
|
251
|
+
}
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
/// Producer with large event payloads
|
|
256
|
+
#[derive(Debug, Clone)]
|
|
257
|
+
struct LargePayloadProducer {
|
|
258
|
+
event_count: usize,
|
|
259
|
+
current_idx: Arc<AtomicUsize>,
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
impl LargePayloadProducer {
|
|
263
|
+
fn new(event_count: usize) -> Self {
|
|
264
|
+
Self {
|
|
265
|
+
event_count,
|
|
266
|
+
current_idx: Arc::new(AtomicUsize::new(0)),
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
impl SseEventProducer for LargePayloadProducer {
|
|
272
|
+
async fn next_event(&self) -> Option<SseEvent> {
|
|
273
|
+
let idx = self.current_idx.fetch_add(1, Ordering::SeqCst);
|
|
274
|
+
if idx < self.event_count {
|
|
275
|
+
let large_data: Vec<i32> = (0..25000).collect();
|
|
276
|
+
Some(
|
|
277
|
+
SseEvent::new(serde_json::json!({
|
|
278
|
+
"index": idx,
|
|
279
|
+
"large_array": large_data,
|
|
280
|
+
"metadata": {
|
|
281
|
+
"size": "large",
|
|
282
|
+
"description": "Large payload event"
|
|
283
|
+
}
|
|
284
|
+
}))
|
|
285
|
+
.with_id(format!("event-{}", idx)),
|
|
286
|
+
)
|
|
287
|
+
} else {
|
|
288
|
+
None
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
/// Producer that simulates errors
|
|
294
|
+
#[derive(Debug, Clone)]
|
|
295
|
+
struct ErrorProducer {
|
|
296
|
+
should_error: Arc<AtomicBool>,
|
|
297
|
+
event_count: Arc<AtomicUsize>,
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
impl ErrorProducer {
|
|
301
|
+
fn new() -> Self {
|
|
302
|
+
Self {
|
|
303
|
+
should_error: Arc::new(AtomicBool::new(false)),
|
|
304
|
+
event_count: Arc::new(AtomicUsize::new(0)),
|
|
305
|
+
}
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
fn enable_error(&self) {
|
|
309
|
+
self.should_error.store(true, Ordering::SeqCst);
|
|
310
|
+
}
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
impl SseEventProducer for ErrorProducer {
|
|
314
|
+
async fn next_event(&self) -> Option<SseEvent> {
|
|
315
|
+
if self.should_error.load(Ordering::SeqCst) {
|
|
316
|
+
None
|
|
317
|
+
} else {
|
|
318
|
+
let idx = self.event_count.fetch_add(1, Ordering::SeqCst);
|
|
319
|
+
Some(SseEvent::new(serde_json::json!({"index": idx})))
|
|
320
|
+
}
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
#[tokio::test]
|
|
325
|
+
async fn test_sse_event_streaming_multiple_events() {
|
|
326
|
+
let producer = MultiEventProducer::new(5);
|
|
327
|
+
|
|
328
|
+
let mut events_received = Vec::new();
|
|
329
|
+
loop {
|
|
330
|
+
match producer.next_event().await {
|
|
331
|
+
Some(event) => events_received.push(event),
|
|
332
|
+
None => break,
|
|
333
|
+
}
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
assert_eq!(events_received.len(), 5);
|
|
337
|
+
|
|
338
|
+
for (idx, event) in events_received.iter().enumerate() {
|
|
339
|
+
assert_eq!(event.data.get("event_number").unwrap(), idx);
|
|
340
|
+
assert_eq!(event.id, Some(format!("event-{}", idx)));
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
assert!(producer.next_event().await.is_none());
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
#[tokio::test]
|
|
347
|
+
async fn test_sse_event_with_id() {
|
|
348
|
+
let producer = IdTrackedEventProducer::new(vec![
|
|
349
|
+
("id-1".to_string(), serde_json::json!({"data": "event1"})),
|
|
350
|
+
("id-2".to_string(), serde_json::json!({"data": "event2"})),
|
|
351
|
+
("id-3".to_string(), serde_json::json!({"data": "event3"})),
|
|
352
|
+
]);
|
|
353
|
+
|
|
354
|
+
let mut events = Vec::new();
|
|
355
|
+
loop {
|
|
356
|
+
match producer.next_event().await {
|
|
357
|
+
Some(event) => events.push(event),
|
|
358
|
+
None => break,
|
|
359
|
+
}
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
assert_eq!(events[0].id, Some("id-1".to_string()));
|
|
363
|
+
assert_eq!(events[1].id, Some("id-2".to_string()));
|
|
364
|
+
assert_eq!(events[2].id, Some("id-3".to_string()));
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
#[tokio::test]
|
|
368
|
+
async fn test_sse_client_reconnection_with_last_event_id() {
|
|
369
|
+
let events = vec![
|
|
370
|
+
("id-1".to_string(), serde_json::json!({"seq": 1})),
|
|
371
|
+
("id-2".to_string(), serde_json::json!({"seq": 2})),
|
|
372
|
+
("id-3".to_string(), serde_json::json!({"seq": 3})),
|
|
373
|
+
("id-4".to_string(), serde_json::json!({"seq": 4})),
|
|
374
|
+
];
|
|
375
|
+
|
|
376
|
+
let producer = IdTrackedEventProducer::new(events);
|
|
377
|
+
|
|
378
|
+
let event1 = producer.next_event().await.unwrap();
|
|
379
|
+
let event2 = producer.next_event().await.unwrap();
|
|
380
|
+
|
|
381
|
+
assert_eq!(event1.id, Some("id-1".to_string()));
|
|
382
|
+
assert_eq!(event2.id, Some("id-2".to_string()));
|
|
383
|
+
assert_eq!(producer.get_current_idx(), 2);
|
|
384
|
+
|
|
385
|
+
let event3 = producer.next_event().await.unwrap();
|
|
386
|
+
let event4 = producer.next_event().await.unwrap();
|
|
387
|
+
|
|
388
|
+
assert_eq!(event3.id, Some("id-3".to_string()));
|
|
389
|
+
assert_eq!(event4.id, Some("id-4".to_string()));
|
|
390
|
+
|
|
391
|
+
assert!(producer.next_event().await.is_none());
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
#[tokio::test]
|
|
395
|
+
async fn test_sse_event_retry_timeout() {
|
|
396
|
+
let producer = RetryTimeoutProducer::new(6);
|
|
397
|
+
|
|
398
|
+
let mut events = Vec::new();
|
|
399
|
+
loop {
|
|
400
|
+
match producer.next_event().await {
|
|
401
|
+
Some(event) => events.push(event),
|
|
402
|
+
None => break,
|
|
403
|
+
}
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
assert_eq!(events[0].retry, Some(3000));
|
|
407
|
+
assert_eq!(events[1].retry, None);
|
|
408
|
+
assert_eq!(events[2].retry, Some(3000));
|
|
409
|
+
assert_eq!(events[3].retry, None);
|
|
410
|
+
assert_eq!(events[4].retry, Some(3000));
|
|
411
|
+
assert_eq!(events[5].retry, None);
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
#[tokio::test]
|
|
415
|
+
async fn test_sse_comment_events() {
|
|
416
|
+
let producer = KeepAliveProducer::new(9);
|
|
417
|
+
|
|
418
|
+
let mut real_events = 0;
|
|
419
|
+
let mut keep_alive_events = 0;
|
|
420
|
+
|
|
421
|
+
loop {
|
|
422
|
+
match producer.next_event().await {
|
|
423
|
+
Some(event) => {
|
|
424
|
+
if event.event_type == Some("data".to_string()) {
|
|
425
|
+
real_events += 1;
|
|
426
|
+
} else if event.data.get("type").and_then(|v| v.as_str()) == Some("keep_alive") {
|
|
427
|
+
keep_alive_events += 1;
|
|
428
|
+
}
|
|
429
|
+
}
|
|
430
|
+
None => break,
|
|
431
|
+
}
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
assert_eq!(real_events, 3);
|
|
435
|
+
assert_eq!(keep_alive_events, 6);
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
#[tokio::test]
|
|
439
|
+
async fn test_sse_connection_cleanup() {
|
|
440
|
+
let producer = DisconnectTrackingProducer::new(3);
|
|
441
|
+
|
|
442
|
+
producer.on_connect().await;
|
|
443
|
+
assert_eq!(producer.get_connect_count(), 1);
|
|
444
|
+
|
|
445
|
+
let _ = producer.next_event().await;
|
|
446
|
+
let _ = producer.next_event().await;
|
|
447
|
+
|
|
448
|
+
producer.on_disconnect().await;
|
|
449
|
+
assert_eq!(producer.get_disconnect_count(), 1);
|
|
450
|
+
|
|
451
|
+
assert_eq!(producer.get_connect_count(), 1);
|
|
452
|
+
assert_eq!(producer.get_disconnect_count(), 1);
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
#[tokio::test]
|
|
456
|
+
async fn test_sse_event_with_multiple_data_lines() {
|
|
457
|
+
let producer = IdTrackedEventProducer::new(vec![(
|
|
458
|
+
"id-1".to_string(),
|
|
459
|
+
serde_json::json!({
|
|
460
|
+
"line1": "data line 1",
|
|
461
|
+
"line2": "data line 2",
|
|
462
|
+
"line3": "data line 3",
|
|
463
|
+
"multiline": "this spans\nmultiple\nlines"
|
|
464
|
+
}),
|
|
465
|
+
)]);
|
|
466
|
+
|
|
467
|
+
let event = producer.next_event().await.unwrap();
|
|
468
|
+
|
|
469
|
+
assert!(event.data.get("line1").is_some());
|
|
470
|
+
assert!(event.data.get("line2").is_some());
|
|
471
|
+
assert!(event.data.get("line3").is_some());
|
|
472
|
+
assert!(event.data.get("multiline").is_some());
|
|
473
|
+
assert_eq!(event.id, Some("id-1".to_string()));
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
#[tokio::test]
|
|
477
|
+
async fn test_sse_event_custom_event_type() {
|
|
478
|
+
let producer = CustomEventTypeProducer::new(9);
|
|
479
|
+
|
|
480
|
+
let mut event_types = Vec::new();
|
|
481
|
+
loop {
|
|
482
|
+
match producer.next_event().await {
|
|
483
|
+
Some(event) => {
|
|
484
|
+
if let Some(evt_type) = event.event_type {
|
|
485
|
+
event_types.push(evt_type);
|
|
486
|
+
}
|
|
487
|
+
}
|
|
488
|
+
None => break,
|
|
489
|
+
}
|
|
490
|
+
}
|
|
491
|
+
|
|
492
|
+
assert_eq!(event_types[0], "user_update");
|
|
493
|
+
assert_eq!(event_types[1], "status_change");
|
|
494
|
+
assert_eq!(event_types[2], "notification");
|
|
495
|
+
assert_eq!(event_types[3], "user_update");
|
|
496
|
+
assert_eq!(event_types.len(), 9);
|
|
497
|
+
}
|
|
498
|
+
|
|
499
|
+
#[tokio::test]
|
|
500
|
+
async fn test_sse_large_event_data() {
|
|
501
|
+
let producer = LargePayloadProducer::new(1);
|
|
502
|
+
|
|
503
|
+
let event = producer.next_event().await.unwrap();
|
|
504
|
+
|
|
505
|
+
assert!(event.data.get("large_array").is_some());
|
|
506
|
+
let array = event.data.get("large_array").unwrap();
|
|
507
|
+
|
|
508
|
+
if let Some(arr) = array.as_array() {
|
|
509
|
+
assert_eq!(arr.len(), 25000);
|
|
510
|
+
} else {
|
|
511
|
+
panic!("Expected array");
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
let serialized = event.data.to_string();
|
|
515
|
+
assert!(serialized.len() > 100000);
|
|
516
|
+
}
|
|
517
|
+
|
|
518
|
+
#[tokio::test]
|
|
519
|
+
async fn test_sse_producer_error() {
|
|
520
|
+
let producer = ErrorProducer::new();
|
|
521
|
+
|
|
522
|
+
let event1 = producer.next_event().await;
|
|
523
|
+
assert!(event1.is_some());
|
|
524
|
+
|
|
525
|
+
let event2 = producer.next_event().await;
|
|
526
|
+
assert!(event2.is_some());
|
|
527
|
+
|
|
528
|
+
producer.enable_error();
|
|
529
|
+
|
|
530
|
+
let event3 = producer.next_event().await;
|
|
531
|
+
assert!(event3.is_none());
|
|
532
|
+
|
|
533
|
+
let event4 = producer.next_event().await;
|
|
534
|
+
assert!(event4.is_none());
|
|
535
|
+
}
|
|
536
|
+
|
|
537
|
+
#[tokio::test]
|
|
538
|
+
async fn test_sse_rapid_event_generation() {
|
|
539
|
+
let producer = MultiEventProducer::new(100);
|
|
540
|
+
|
|
541
|
+
let mut count = 0;
|
|
542
|
+
let start = std::time::Instant::now();
|
|
543
|
+
|
|
544
|
+
loop {
|
|
545
|
+
match producer.next_event().await {
|
|
546
|
+
Some(_) => count += 1,
|
|
547
|
+
None => break,
|
|
548
|
+
}
|
|
549
|
+
}
|
|
550
|
+
|
|
551
|
+
let duration = start.elapsed();
|
|
552
|
+
|
|
553
|
+
assert_eq!(count, 100);
|
|
554
|
+
assert!(duration.as_secs() < 1, "Should generate 100 events in < 1 second");
|
|
555
|
+
}
|
|
556
|
+
|
|
557
|
+
#[tokio::test]
|
|
558
|
+
async fn test_sse_event_data_integrity() {
|
|
559
|
+
let events = vec![
|
|
560
|
+
(
|
|
561
|
+
"id-1".to_string(),
|
|
562
|
+
serde_json::json!({"unicode": "🚀💡🔥", "text": "hello"}),
|
|
563
|
+
),
|
|
564
|
+
("id-2".to_string(), serde_json::json!({"null_value": null, "empty": {}})),
|
|
565
|
+
(
|
|
566
|
+
"id-3".to_string(),
|
|
567
|
+
serde_json::json!({"nested": {"deep": {"data": [1, 2, 3]}}}),
|
|
568
|
+
),
|
|
569
|
+
];
|
|
570
|
+
|
|
571
|
+
let producer = IdTrackedEventProducer::new(events.clone());
|
|
572
|
+
|
|
573
|
+
let mut received = Vec::new();
|
|
574
|
+
loop {
|
|
575
|
+
match producer.next_event().await {
|
|
576
|
+
Some(event) => received.push(event),
|
|
577
|
+
None => break,
|
|
578
|
+
}
|
|
579
|
+
}
|
|
580
|
+
|
|
581
|
+
assert_eq!(received[0].data, events[0].1);
|
|
582
|
+
assert_eq!(received[1].data, events[1].1);
|
|
583
|
+
assert_eq!(received[2].data, events[2].1);
|
|
584
|
+
}
|
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
use axum::{Router, routing::get};
|
|
2
|
+
use http_body_util::BodyExt;
|
|
3
|
+
use serde_json::json;
|
|
4
|
+
use spikard_http::sse::{SseEvent, SseEventProducer, SseState, sse_handler};
|
|
5
|
+
use std::sync::Arc;
|
|
6
|
+
use std::sync::atomic::{AtomicUsize, Ordering};
|
|
7
|
+
use tokio::time::timeout;
|
|
8
|
+
use tower::ServiceExt;
|
|
9
|
+
|
|
10
|
+
struct CountingProducer {
|
|
11
|
+
connect: Arc<AtomicUsize>,
|
|
12
|
+
disconnect: Arc<AtomicUsize>,
|
|
13
|
+
remaining: AtomicUsize,
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
impl CountingProducer {
|
|
17
|
+
const fn new(connect: Arc<AtomicUsize>, disconnect: Arc<AtomicUsize>, events: usize) -> Self {
|
|
18
|
+
Self {
|
|
19
|
+
connect,
|
|
20
|
+
disconnect,
|
|
21
|
+
remaining: AtomicUsize::new(events),
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
impl SseEventProducer for CountingProducer {
|
|
27
|
+
async fn next_event(&self) -> Option<SseEvent> {
|
|
28
|
+
let prev = self.remaining.fetch_sub(1, Ordering::Relaxed);
|
|
29
|
+
(prev > 0).then(|| SseEvent::new(json!({"message": "ok"})))
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
async fn on_connect(&self) {
|
|
33
|
+
self.connect.fetch_add(1, Ordering::Relaxed);
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
async fn on_disconnect(&self) {
|
|
37
|
+
self.disconnect.fetch_add(1, Ordering::Relaxed);
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
#[tokio::test]
|
|
42
|
+
async fn sse_handler_invokes_connect_and_disconnect_per_request() {
|
|
43
|
+
let connect = Arc::new(AtomicUsize::new(0));
|
|
44
|
+
let disconnect = Arc::new(AtomicUsize::new(0));
|
|
45
|
+
|
|
46
|
+
let state = SseState::new(CountingProducer::new(Arc::clone(&connect), Arc::clone(&disconnect), 1));
|
|
47
|
+
let app = Router::new()
|
|
48
|
+
.route("/events", get(sse_handler::<CountingProducer>))
|
|
49
|
+
.with_state(state);
|
|
50
|
+
|
|
51
|
+
let response = app
|
|
52
|
+
.oneshot(
|
|
53
|
+
axum::http::Request::builder()
|
|
54
|
+
.uri("/events")
|
|
55
|
+
.body(axum::body::Body::empty())
|
|
56
|
+
.unwrap(),
|
|
57
|
+
)
|
|
58
|
+
.await
|
|
59
|
+
.unwrap();
|
|
60
|
+
|
|
61
|
+
let bytes = timeout(std::time::Duration::from_secs(5), response.into_body().collect())
|
|
62
|
+
.await
|
|
63
|
+
.expect("response body collection timed out")
|
|
64
|
+
.unwrap()
|
|
65
|
+
.to_bytes();
|
|
66
|
+
let body = String::from_utf8_lossy(&bytes);
|
|
67
|
+
|
|
68
|
+
assert!(body.contains("data:"), "expected SSE data frame, got: {body}");
|
|
69
|
+
assert_eq!(connect.load(Ordering::Relaxed), 1);
|
|
70
|
+
assert_eq!(disconnect.load(Ordering::Relaxed), 1);
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
#[tokio::test]
|
|
74
|
+
async fn sse_handler_emits_validation_error_when_schema_rejects_event() {
|
|
75
|
+
use std::sync::atomic::AtomicBool;
|
|
76
|
+
|
|
77
|
+
struct InvalidEventProducer {
|
|
78
|
+
sent: AtomicBool,
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
impl SseEventProducer for InvalidEventProducer {
|
|
82
|
+
async fn next_event(&self) -> Option<SseEvent> {
|
|
83
|
+
if self.sent.swap(true, Ordering::Relaxed) {
|
|
84
|
+
None
|
|
85
|
+
} else {
|
|
86
|
+
Some(SseEvent::new(json!({"count": "not-an-integer"})))
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
let schema = json!({
|
|
92
|
+
"type": "object",
|
|
93
|
+
"properties": {
|
|
94
|
+
"count": {"type": "integer"}
|
|
95
|
+
},
|
|
96
|
+
"required": ["count"]
|
|
97
|
+
});
|
|
98
|
+
let state = SseState::with_schema(
|
|
99
|
+
InvalidEventProducer {
|
|
100
|
+
sent: AtomicBool::new(false),
|
|
101
|
+
},
|
|
102
|
+
Some(schema),
|
|
103
|
+
)
|
|
104
|
+
.expect("valid schema");
|
|
105
|
+
let app = Router::new()
|
|
106
|
+
.route("/events", get(sse_handler::<InvalidEventProducer>))
|
|
107
|
+
.with_state(state);
|
|
108
|
+
|
|
109
|
+
let response = app
|
|
110
|
+
.oneshot(
|
|
111
|
+
axum::http::Request::builder()
|
|
112
|
+
.uri("/events")
|
|
113
|
+
.body(axum::body::Body::empty())
|
|
114
|
+
.unwrap(),
|
|
115
|
+
)
|
|
116
|
+
.await
|
|
117
|
+
.unwrap();
|
|
118
|
+
|
|
119
|
+
let bytes = timeout(std::time::Duration::from_secs(5), response.into_body().collect())
|
|
120
|
+
.await
|
|
121
|
+
.expect("response body collection timed out")
|
|
122
|
+
.unwrap()
|
|
123
|
+
.to_bytes();
|
|
124
|
+
let body = String::from_utf8_lossy(&bytes);
|
|
125
|
+
|
|
126
|
+
assert!(
|
|
127
|
+
body.contains("validation_error"),
|
|
128
|
+
"expected validation_error frame, got: {body}"
|
|
129
|
+
);
|
|
130
|
+
}
|