spikard 0.5.0 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (135) hide show
  1. checksums.yaml +4 -4
  2. data/LICENSE +1 -1
  3. data/README.md +674 -674
  4. data/ext/spikard_rb/Cargo.toml +17 -17
  5. data/ext/spikard_rb/extconf.rb +13 -10
  6. data/ext/spikard_rb/src/lib.rs +6 -6
  7. data/lib/spikard/app.rb +405 -405
  8. data/lib/spikard/background.rb +27 -27
  9. data/lib/spikard/config.rb +396 -396
  10. data/lib/spikard/converters.rb +13 -13
  11. data/lib/spikard/handler_wrapper.rb +113 -113
  12. data/lib/spikard/provide.rb +214 -214
  13. data/lib/spikard/response.rb +173 -173
  14. data/lib/spikard/schema.rb +243 -243
  15. data/lib/spikard/sse.rb +111 -111
  16. data/lib/spikard/streaming_response.rb +44 -44
  17. data/lib/spikard/testing.rb +256 -256
  18. data/lib/spikard/upload_file.rb +131 -131
  19. data/lib/spikard/version.rb +5 -5
  20. data/lib/spikard/websocket.rb +59 -59
  21. data/lib/spikard.rb +43 -43
  22. data/sig/spikard.rbs +366 -366
  23. data/vendor/crates/spikard-bindings-shared/Cargo.toml +63 -63
  24. data/vendor/crates/spikard-bindings-shared/examples/config_extraction.rs +132 -132
  25. data/vendor/crates/spikard-bindings-shared/src/config_extractor.rs +752 -752
  26. data/vendor/crates/spikard-bindings-shared/src/conversion_traits.rs +194 -194
  27. data/vendor/crates/spikard-bindings-shared/src/di_traits.rs +246 -246
  28. data/vendor/crates/spikard-bindings-shared/src/error_response.rs +401 -401
  29. data/vendor/crates/spikard-bindings-shared/src/handler_base.rs +238 -238
  30. data/vendor/crates/spikard-bindings-shared/src/lib.rs +24 -24
  31. data/vendor/crates/spikard-bindings-shared/src/lifecycle_base.rs +292 -292
  32. data/vendor/crates/spikard-bindings-shared/src/lifecycle_executor.rs +616 -616
  33. data/vendor/crates/spikard-bindings-shared/src/response_builder.rs +305 -305
  34. data/vendor/crates/spikard-bindings-shared/src/test_client_base.rs +248 -248
  35. data/vendor/crates/spikard-bindings-shared/src/validation_helpers.rs +351 -351
  36. data/vendor/crates/spikard-bindings-shared/tests/comprehensive_coverage.rs +454 -454
  37. data/vendor/crates/spikard-bindings-shared/tests/error_response_edge_cases.rs +383 -383
  38. data/vendor/crates/spikard-bindings-shared/tests/handler_base_integration.rs +280 -280
  39. data/vendor/crates/spikard-core/Cargo.toml +40 -40
  40. data/vendor/crates/spikard-core/src/bindings/mod.rs +3 -3
  41. data/vendor/crates/spikard-core/src/bindings/response.rs +133 -133
  42. data/vendor/crates/spikard-core/src/debug.rs +127 -127
  43. data/vendor/crates/spikard-core/src/di/container.rs +702 -702
  44. data/vendor/crates/spikard-core/src/di/dependency.rs +273 -273
  45. data/vendor/crates/spikard-core/src/di/error.rs +118 -118
  46. data/vendor/crates/spikard-core/src/di/factory.rs +534 -534
  47. data/vendor/crates/spikard-core/src/di/graph.rs +506 -506
  48. data/vendor/crates/spikard-core/src/di/mod.rs +192 -192
  49. data/vendor/crates/spikard-core/src/di/resolved.rs +405 -405
  50. data/vendor/crates/spikard-core/src/di/value.rs +281 -281
  51. data/vendor/crates/spikard-core/src/errors.rs +69 -69
  52. data/vendor/crates/spikard-core/src/http.rs +415 -415
  53. data/vendor/crates/spikard-core/src/lib.rs +29 -29
  54. data/vendor/crates/spikard-core/src/lifecycle.rs +1186 -1186
  55. data/vendor/crates/spikard-core/src/metadata.rs +389 -389
  56. data/vendor/crates/spikard-core/src/parameters.rs +2525 -2525
  57. data/vendor/crates/spikard-core/src/problem.rs +344 -344
  58. data/vendor/crates/spikard-core/src/request_data.rs +1154 -1154
  59. data/vendor/crates/spikard-core/src/router.rs +510 -510
  60. data/vendor/crates/spikard-core/src/schema_registry.rs +183 -183
  61. data/vendor/crates/spikard-core/src/type_hints.rs +304 -304
  62. data/vendor/crates/spikard-core/src/validation/error_mapper.rs +696 -688
  63. data/vendor/crates/spikard-core/src/validation/mod.rs +457 -457
  64. data/vendor/crates/spikard-http/Cargo.toml +62 -64
  65. data/vendor/crates/spikard-http/examples/sse-notifications.rs +148 -148
  66. data/vendor/crates/spikard-http/examples/websocket-chat.rs +92 -92
  67. data/vendor/crates/spikard-http/src/auth.rs +296 -296
  68. data/vendor/crates/spikard-http/src/background.rs +1860 -1860
  69. data/vendor/crates/spikard-http/src/bindings/mod.rs +3 -3
  70. data/vendor/crates/spikard-http/src/bindings/response.rs +1 -1
  71. data/vendor/crates/spikard-http/src/body_metadata.rs +8 -8
  72. data/vendor/crates/spikard-http/src/cors.rs +1005 -1005
  73. data/vendor/crates/spikard-http/src/debug.rs +128 -128
  74. data/vendor/crates/spikard-http/src/di_handler.rs +1668 -1668
  75. data/vendor/crates/spikard-http/src/handler_response.rs +901 -901
  76. data/vendor/crates/spikard-http/src/handler_trait.rs +838 -830
  77. data/vendor/crates/spikard-http/src/handler_trait_tests.rs +290 -290
  78. data/vendor/crates/spikard-http/src/lib.rs +534 -534
  79. data/vendor/crates/spikard-http/src/lifecycle/adapter.rs +230 -230
  80. data/vendor/crates/spikard-http/src/lifecycle.rs +1193 -1193
  81. data/vendor/crates/spikard-http/src/middleware/mod.rs +560 -540
  82. data/vendor/crates/spikard-http/src/middleware/multipart.rs +912 -912
  83. data/vendor/crates/spikard-http/src/middleware/urlencoded.rs +513 -513
  84. data/vendor/crates/spikard-http/src/middleware/validation.rs +768 -735
  85. data/vendor/crates/spikard-http/src/openapi/mod.rs +309 -309
  86. data/vendor/crates/spikard-http/src/openapi/parameter_extraction.rs +535 -535
  87. data/vendor/crates/spikard-http/src/openapi/schema_conversion.rs +1363 -1363
  88. data/vendor/crates/spikard-http/src/openapi/spec_generation.rs +665 -665
  89. data/vendor/crates/spikard-http/src/query_parser.rs +793 -793
  90. data/vendor/crates/spikard-http/src/response.rs +720 -720
  91. data/vendor/crates/spikard-http/src/server/handler.rs +1650 -1650
  92. data/vendor/crates/spikard-http/src/server/lifecycle_execution.rs +234 -234
  93. data/vendor/crates/spikard-http/src/server/mod.rs +1593 -1502
  94. data/vendor/crates/spikard-http/src/server/request_extraction.rs +789 -770
  95. data/vendor/crates/spikard-http/src/server/routing_factory.rs +629 -599
  96. data/vendor/crates/spikard-http/src/sse.rs +1409 -1409
  97. data/vendor/crates/spikard-http/src/testing/form.rs +52 -52
  98. data/vendor/crates/spikard-http/src/testing/multipart.rs +64 -60
  99. data/vendor/crates/spikard-http/src/testing/test_client.rs +311 -283
  100. data/vendor/crates/spikard-http/src/testing.rs +406 -377
  101. data/vendor/crates/spikard-http/src/websocket.rs +1404 -1375
  102. data/vendor/crates/spikard-http/tests/background_behavior.rs +832 -832
  103. data/vendor/crates/spikard-http/tests/common/handlers.rs +309 -309
  104. data/vendor/crates/spikard-http/tests/common/mod.rs +26 -26
  105. data/vendor/crates/spikard-http/tests/di_integration.rs +192 -192
  106. data/vendor/crates/spikard-http/tests/doc_snippets.rs +5 -5
  107. data/vendor/crates/spikard-http/tests/lifecycle_execution.rs +1093 -1093
  108. data/vendor/crates/spikard-http/tests/multipart_behavior.rs +656 -656
  109. data/vendor/crates/spikard-http/tests/server_config_builder.rs +314 -314
  110. data/vendor/crates/spikard-http/tests/sse_behavior.rs +620 -620
  111. data/vendor/crates/spikard-http/tests/websocket_behavior.rs +663 -663
  112. data/vendor/crates/spikard-rb/Cargo.toml +48 -48
  113. data/vendor/crates/spikard-rb/build.rs +199 -199
  114. data/vendor/crates/spikard-rb/src/background.rs +63 -63
  115. data/vendor/crates/spikard-rb/src/config/mod.rs +5 -5
  116. data/vendor/crates/spikard-rb/src/config/server_config.rs +285 -285
  117. data/vendor/crates/spikard-rb/src/conversion.rs +554 -554
  118. data/vendor/crates/spikard-rb/src/di/builder.rs +100 -100
  119. data/vendor/crates/spikard-rb/src/di/mod.rs +375 -375
  120. data/vendor/crates/spikard-rb/src/handler.rs +618 -618
  121. data/vendor/crates/spikard-rb/src/integration/mod.rs +3 -3
  122. data/vendor/crates/spikard-rb/src/lib.rs +1806 -1810
  123. data/vendor/crates/spikard-rb/src/lifecycle.rs +275 -275
  124. data/vendor/crates/spikard-rb/src/metadata/mod.rs +5 -5
  125. data/vendor/crates/spikard-rb/src/metadata/route_extraction.rs +442 -447
  126. data/vendor/crates/spikard-rb/src/runtime/mod.rs +5 -5
  127. data/vendor/crates/spikard-rb/src/runtime/server_runner.rs +324 -324
  128. data/vendor/crates/spikard-rb/src/server.rs +305 -308
  129. data/vendor/crates/spikard-rb/src/sse.rs +231 -231
  130. data/vendor/crates/spikard-rb/src/testing/client.rs +538 -551
  131. data/vendor/crates/spikard-rb/src/testing/mod.rs +7 -7
  132. data/vendor/crates/spikard-rb/src/testing/sse.rs +143 -143
  133. data/vendor/crates/spikard-rb/src/testing/websocket.rs +608 -635
  134. data/vendor/crates/spikard-rb/src/websocket.rs +377 -374
  135. metadata +15 -1
@@ -1,620 +1,620 @@
1
- #![allow(clippy::pedantic, clippy::nursery, clippy::all)]
2
- //! Behavioral tests for Server-Sent Events (SSE) functionality
3
- //!
4
- //! These tests verify end-to-end SSE behavior including:
5
- //! - Connection establishment and event streaming
6
- //! - Client reconnection with Last-Event-ID header
7
- //! - Event ordering preservation
8
- //! - Connection cleanup on disconnect
9
- //! - Keep-alive behavior
10
- //! - Backpressure handling for slow clients
11
- //! - Graceful shutdown with active streams
12
-
13
- mod common;
14
-
15
- use spikard_http::sse::{SseEvent, SseEventProducer};
16
- use std::sync::Arc;
17
- use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
18
- use std::time::Duration;
19
- use tokio::time::sleep;
20
-
21
- /// Producer that simulates a stream of numbered events (for ordering tests)
22
- struct SequentialEventProducer {
23
- total_events: usize,
24
- current_count: Arc<AtomicUsize>,
25
- connect_count: Arc<AtomicUsize>,
26
- disconnect_count: Arc<AtomicUsize>,
27
- }
28
-
29
- impl SequentialEventProducer {
30
- fn new(total_events: usize) -> Self {
31
- Self {
32
- total_events,
33
- current_count: Arc::new(AtomicUsize::new(0)),
34
- connect_count: Arc::new(AtomicUsize::new(0)),
35
- disconnect_count: Arc::new(AtomicUsize::new(0)),
36
- }
37
- }
38
-
39
- fn get_connect_count(&self) -> usize {
40
- self.connect_count.load(Ordering::Relaxed)
41
- }
42
-
43
- fn get_disconnect_count(&self) -> usize {
44
- self.disconnect_count.load(Ordering::Relaxed)
45
- }
46
- }
47
-
48
- impl SseEventProducer for SequentialEventProducer {
49
- async fn next_event(&self) -> Option<SseEvent> {
50
- let idx = self.current_count.fetch_add(1, Ordering::Relaxed);
51
- if idx < self.total_events {
52
- Some(
53
- SseEvent::with_type(
54
- "data",
55
- serde_json::json!({
56
- "sequence": idx,
57
- "message": format!("Event {}", idx)
58
- }),
59
- )
60
- .with_id(format!("event-{}", idx)),
61
- )
62
- } else {
63
- None
64
- }
65
- }
66
-
67
- async fn on_connect(&self) {
68
- self.connect_count.fetch_add(1, Ordering::Relaxed);
69
- }
70
-
71
- async fn on_disconnect(&self) {
72
- self.disconnect_count.fetch_add(1, Ordering::Relaxed);
73
- }
74
- }
75
-
76
- /// Producer that supports reconnection with Last-Event-ID tracking
77
- struct ReconnectableEventProducer {
78
- events: Vec<(String, serde_json::Value)>,
79
- current_idx: Arc<AtomicUsize>,
80
- connect_count: Arc<AtomicUsize>,
81
- }
82
-
83
- impl ReconnectableEventProducer {
84
- fn new(events: Vec<(String, serde_json::Value)>) -> Self {
85
- Self {
86
- events,
87
- current_idx: Arc::new(AtomicUsize::new(0)),
88
- connect_count: Arc::new(AtomicUsize::new(0)),
89
- }
90
- }
91
-
92
- fn get_connect_count(&self) -> usize {
93
- self.connect_count.load(Ordering::Relaxed)
94
- }
95
- }
96
-
97
- impl SseEventProducer for ReconnectableEventProducer {
98
- async fn next_event(&self) -> Option<SseEvent> {
99
- let idx = self.current_idx.fetch_add(1, Ordering::Relaxed);
100
- if idx < self.events.len() {
101
- let (id, data) = self.events[idx].clone();
102
- Some(SseEvent::with_type("update", data).with_id(id.clone()))
103
- } else {
104
- None
105
- }
106
- }
107
-
108
- async fn on_connect(&self) {
109
- self.connect_count.fetch_add(1, Ordering::Relaxed);
110
- }
111
- }
112
-
113
- /// Producer that sends events with configurable delays for backpressure testing
114
- struct SlowClientProducer {
115
- event_count: usize,
116
- delay_ms: u64,
117
- current_idx: Arc<AtomicUsize>,
118
- events_sent: Arc<AtomicUsize>,
119
- }
120
-
121
- impl SlowClientProducer {
122
- fn new(event_count: usize, delay_ms: u64) -> Self {
123
- Self {
124
- event_count,
125
- delay_ms,
126
- current_idx: Arc::new(AtomicUsize::new(0)),
127
- events_sent: Arc::new(AtomicUsize::new(0)),
128
- }
129
- }
130
-
131
- fn get_events_sent(&self) -> usize {
132
- self.events_sent.load(Ordering::Relaxed)
133
- }
134
- }
135
-
136
- impl SseEventProducer for SlowClientProducer {
137
- async fn next_event(&self) -> Option<SseEvent> {
138
- let idx = self.current_idx.fetch_add(1, Ordering::Relaxed);
139
- if idx < self.event_count {
140
- sleep(Duration::from_millis(self.delay_ms)).await;
141
- self.events_sent.fetch_add(1, Ordering::Relaxed);
142
- Some(SseEvent::new(serde_json::json!({
143
- "event_number": idx,
144
- "timestamp": chrono::Utc::now().to_rfc3339()
145
- })))
146
- } else {
147
- None
148
- }
149
- }
150
- }
151
-
152
- /// Producer that maintains consistent ordering even with rapid fire events
153
- struct RapidFireOrderedProducer {
154
- event_count: usize,
155
- current_idx: Arc<AtomicUsize>,
156
- events_generated: Arc<AtomicUsize>,
157
- }
158
-
159
- impl RapidFireOrderedProducer {
160
- fn new(event_count: usize) -> Self {
161
- Self {
162
- event_count,
163
- current_idx: Arc::new(AtomicUsize::new(0)),
164
- events_generated: Arc::new(AtomicUsize::new(0)),
165
- }
166
- }
167
-
168
- fn get_generated_count(&self) -> usize {
169
- self.events_generated.load(Ordering::Relaxed)
170
- }
171
- }
172
-
173
- impl SseEventProducer for RapidFireOrderedProducer {
174
- async fn next_event(&self) -> Option<SseEvent> {
175
- let idx = self.current_idx.fetch_add(1, Ordering::Relaxed);
176
- if idx < self.event_count {
177
- self.events_generated.fetch_add(1, Ordering::Relaxed);
178
- Some(
179
- SseEvent::with_type(
180
- "rapid",
181
- serde_json::json!({
182
- "index": idx,
183
- "nanotime": std::time::SystemTime::now().duration_since(
184
- std::time::UNIX_EPOCH
185
- ).unwrap().as_nanos()
186
- }),
187
- )
188
- .with_id(format!("{}", idx)),
189
- )
190
- } else {
191
- None
192
- }
193
- }
194
- }
195
-
196
- /// Producer that simulates keep-alive with periodic heartbeats
197
- struct KeepAliveProducer {
198
- total_events: usize,
199
- current_idx: Arc<AtomicUsize>,
200
- }
201
-
202
- impl KeepAliveProducer {
203
- fn new(total_events: usize) -> Self {
204
- Self {
205
- total_events,
206
- current_idx: Arc::new(AtomicUsize::new(0)),
207
- }
208
- }
209
- }
210
-
211
- impl SseEventProducer for KeepAliveProducer {
212
- async fn next_event(&self) -> Option<SseEvent> {
213
- let idx = self.current_idx.fetch_add(1, Ordering::Relaxed);
214
- if idx < self.total_events {
215
- Some(SseEvent::new(serde_json::json!({
216
- "heartbeat": idx,
217
- "alive": true
218
- })))
219
- } else {
220
- None
221
- }
222
- }
223
- }
224
-
225
- /// Producer for graceful shutdown testing that tracks disconnections
226
- struct GracefulShutdownProducer {
227
- total_events: usize,
228
- current_idx: Arc<AtomicUsize>,
229
- disconnect_called: Arc<AtomicBool>,
230
- }
231
-
232
- impl GracefulShutdownProducer {
233
- fn new(total_events: usize) -> Self {
234
- Self {
235
- total_events,
236
- current_idx: Arc::new(AtomicUsize::new(0)),
237
- disconnect_called: Arc::new(AtomicBool::new(false)),
238
- }
239
- }
240
-
241
- fn was_disconnect_called(&self) -> bool {
242
- self.disconnect_called.load(Ordering::Relaxed)
243
- }
244
- }
245
-
246
- impl SseEventProducer for GracefulShutdownProducer {
247
- async fn next_event(&self) -> Option<SseEvent> {
248
- let idx = self.current_idx.fetch_add(1, Ordering::Relaxed);
249
- if idx < self.total_events {
250
- Some(SseEvent::new(serde_json::json!({"index": idx})))
251
- } else {
252
- None
253
- }
254
- }
255
-
256
- async fn on_disconnect(&self) {
257
- self.disconnect_called.store(true, Ordering::Relaxed);
258
- }
259
- }
260
-
261
- #[tokio::test]
262
- async fn test_sse_connection_establishment_and_streaming() {
263
- let producer = SequentialEventProducer::new(5);
264
-
265
- producer.on_connect().await;
266
-
267
- let mut events_received = Vec::new();
268
- for i in 0..5 {
269
- if let Some(event) = producer.next_event().await {
270
- assert_eq!(
271
- event.data.get("sequence").and_then(|v| v.as_u64()),
272
- Some(i as u64),
273
- "Event {} has correct sequence number",
274
- i
275
- );
276
- assert!(event.id.is_some(), "Event {} has ID for tracking", i);
277
- events_received.push(event);
278
- }
279
- }
280
-
281
- assert_eq!(events_received.len(), 5, "All 5 events should be received");
282
- for (idx, event) in events_received.iter().enumerate() {
283
- assert_eq!(
284
- event.data.get("sequence").and_then(|v| v.as_u64()),
285
- Some(idx as u64),
286
- "Event {} has correct sequence",
287
- idx
288
- );
289
- }
290
-
291
- assert!(
292
- producer.next_event().await.is_none(),
293
- "Stream should end after all events"
294
- );
295
- }
296
-
297
- #[tokio::test]
298
- async fn test_client_reconnection_with_last_event_id() {
299
- let events = vec![
300
- ("id-1".to_string(), serde_json::json!({"data": "event1"})),
301
- ("id-2".to_string(), serde_json::json!({"data": "event2"})),
302
- ("id-3".to_string(), serde_json::json!({"data": "event3"})),
303
- ("id-4".to_string(), serde_json::json!({"data": "event4"})),
304
- ];
305
-
306
- let producer = ReconnectableEventProducer::new(events);
307
-
308
- producer.on_connect().await;
309
- assert_eq!(producer.get_connect_count(), 1);
310
-
311
- let event1 = producer.next_event().await.unwrap();
312
- let event1_id = event1.id.clone();
313
- assert_eq!(event1_id, Some("id-1".to_string()));
314
-
315
- let event2 = producer.next_event().await.unwrap();
316
- let event2_id = event2.id.clone();
317
- assert_eq!(event2_id, Some("id-2".to_string()));
318
-
319
- producer.on_connect().await;
320
- assert_eq!(producer.get_connect_count(), 2);
321
-
322
- let event3 = producer.next_event().await.unwrap();
323
- assert_eq!(event3.id, Some("id-3".to_string()));
324
-
325
- assert_eq!(producer.get_connect_count(), 2, "Client reconnected successfully");
326
- }
327
-
328
- #[tokio::test]
329
- async fn test_event_ordering_preservation() {
330
- let producer = RapidFireOrderedProducer::new(100);
331
-
332
- let mut events_collected = Vec::new();
333
- loop {
334
- match producer.next_event().await {
335
- Some(event) => events_collected.push(event),
336
- None => break,
337
- }
338
- }
339
-
340
- assert_eq!(events_collected.len(), 100, "All 100 events should be collected");
341
-
342
- let mut last_sequence = -1i32;
343
- for (idx, event) in events_collected.iter().enumerate() {
344
- let sequence = event.data.get("index").and_then(|v| v.as_i64()).unwrap() as i32;
345
- assert_eq!(
346
- sequence, idx as i32,
347
- "Event at position {} has correct sequence number {}",
348
- idx, sequence
349
- );
350
- assert!(sequence > last_sequence, "Events are in increasing order");
351
- last_sequence = sequence;
352
- }
353
-
354
- assert_eq!(
355
- producer.get_generated_count(),
356
- 100,
357
- "Exactly 100 events should be generated"
358
- );
359
- }
360
-
361
- #[tokio::test]
362
- async fn test_connection_cleanup_on_disconnect() {
363
- let producer = SequentialEventProducer::new(3);
364
-
365
- producer.on_connect().await;
366
- assert_eq!(producer.get_connect_count(), 1, "Client should be marked as connected");
367
-
368
- let _event1 = producer.next_event().await;
369
-
370
- producer.on_disconnect().await;
371
- assert_eq!(
372
- producer.get_disconnect_count(),
373
- 1,
374
- "Client should be marked as disconnected"
375
- );
376
-
377
- assert!(producer.get_disconnect_count() > 0, "Disconnect hook was invoked");
378
- }
379
-
380
- #[tokio::test]
381
- async fn test_keep_alive_behavior() {
382
- let producer = KeepAliveProducer::new(5);
383
-
384
- let mut events = Vec::new();
385
- loop {
386
- match producer.next_event().await {
387
- Some(event) => {
388
- assert!(
389
- event.data.get("heartbeat").is_some(),
390
- "Each event should contain heartbeat data"
391
- );
392
- assert!(
393
- event.data.get("alive").and_then(|v| v.as_bool()) == Some(true),
394
- "All events should indicate server is alive"
395
- );
396
- events.push(event);
397
- }
398
- None => break,
399
- }
400
- }
401
-
402
- assert_eq!(events.len(), 5, "All keep-alive events should be received");
403
-
404
- assert!(
405
- producer.next_event().await.is_none(),
406
- "Stream should terminate normally"
407
- );
408
- }
409
-
410
- #[tokio::test]
411
- async fn test_backpressure_slow_client() {
412
- let producer = SlowClientProducer::new(5, 10);
413
-
414
- let start = std::time::Instant::now();
415
- let mut events_count = 0;
416
-
417
- loop {
418
- match producer.next_event().await {
419
- Some(_event) => {
420
- events_count += 1;
421
- }
422
- None => break,
423
- }
424
- }
425
-
426
- let elapsed = start.elapsed();
427
-
428
- assert_eq!(events_count, 5, "All 5 events should be generated despite backpressure");
429
-
430
- assert!(
431
- elapsed.as_millis() >= 50,
432
- "Event generation should have delays, took {:?}ms",
433
- elapsed.as_millis()
434
- );
435
-
436
- assert_eq!(producer.get_events_sent(), 5, "All events should be marked as sent");
437
- }
438
-
439
- #[tokio::test]
440
- async fn test_graceful_shutdown_with_active_streams() {
441
- let producer = GracefulShutdownProducer::new(3);
442
-
443
- for _ in 0..2 {
444
- let _ = producer.next_event().await;
445
- }
446
-
447
- producer.on_disconnect().await;
448
-
449
- assert!(
450
- producer.was_disconnect_called(),
451
- "Disconnect should be called during graceful shutdown"
452
- );
453
-
454
- let remaining = producer.next_event().await;
455
- assert!(remaining.is_some(), "Stream should continue until complete");
456
- }
457
-
458
- #[tokio::test]
459
- async fn test_event_ids_preserved_through_stream() {
460
- let producer = SequentialEventProducer::new(10);
461
-
462
- let mut event_ids = Vec::new();
463
- loop {
464
- match producer.next_event().await {
465
- Some(event) => {
466
- if let Some(id) = event.id.clone() {
467
- event_ids.push(id);
468
- }
469
- }
470
- None => break,
471
- }
472
- }
473
-
474
- assert_eq!(event_ids.len(), 10, "All 10 events should have IDs");
475
-
476
- for (idx, id) in event_ids.iter().enumerate() {
477
- assert_eq!(id, &format!("event-{}", idx), "Event ID should match expected format");
478
- }
479
-
480
- let unique_ids: std::collections::HashSet<_> = event_ids.iter().cloned().collect();
481
- assert_eq!(unique_ids.len(), event_ids.len(), "All event IDs should be unique");
482
- }
483
-
484
- #[tokio::test]
485
- async fn test_multiple_concurrent_connections() {
486
- let producer1 = Arc::new(SequentialEventProducer::new(5));
487
- let producer2 = Arc::new(SequentialEventProducer::new(5));
488
-
489
- producer1.on_connect().await;
490
- producer2.on_connect().await;
491
-
492
- let handle1 = {
493
- let producer = Arc::clone(&producer1);
494
- tokio::spawn(async move {
495
- let mut count = 0;
496
- loop {
497
- match producer.next_event().await {
498
- Some(_) => count += 1,
499
- None => break,
500
- }
501
- }
502
- count
503
- })
504
- };
505
-
506
- let handle2 = {
507
- let producer = Arc::clone(&producer2);
508
- tokio::spawn(async move {
509
- let mut count = 0;
510
- loop {
511
- match producer.next_event().await {
512
- Some(_) => count += 1,
513
- None => break,
514
- }
515
- }
516
- count
517
- })
518
- };
519
-
520
- let count1 = handle1.await.unwrap();
521
- let count2 = handle2.await.unwrap();
522
-
523
- assert_eq!(count1, 5, "First connection should receive 5 events");
524
- assert_eq!(count2, 5, "Second connection should receive 5 events");
525
- }
526
-
527
- #[tokio::test]
528
- async fn test_event_type_preservation() {
529
- let producer = SequentialEventProducer::new(5);
530
-
531
- let mut events = Vec::new();
532
- loop {
533
- match producer.next_event().await {
534
- Some(event) => {
535
- events.push(event);
536
- }
537
- None => break,
538
- }
539
- }
540
-
541
- assert_eq!(events.len(), 5);
542
- for event in events {
543
- assert_eq!(
544
- event.event_type,
545
- Some("data".to_string()),
546
- "Event type should be preserved as 'data'"
547
- );
548
- }
549
- }
550
-
551
- #[tokio::test]
552
- async fn test_empty_event_stream() {
553
- let producer = SequentialEventProducer::new(0);
554
-
555
- let event = producer.next_event().await;
556
-
557
- assert!(event.is_none(), "Empty stream should produce no events");
558
- }
559
-
560
- #[tokio::test]
561
- async fn test_event_data_integrity_through_stream() {
562
- let events = vec![
563
- (
564
- "id-1".to_string(),
565
- serde_json::json!({
566
- "name": "Alice",
567
- "age": 30,
568
- "active": true,
569
- "tags": ["rust", "async"],
570
- "metadata": {
571
- "created": "2024-01-01",
572
- "updated": "2024-01-02"
573
- }
574
- }),
575
- ),
576
- (
577
- "id-2".to_string(),
578
- serde_json::json!({
579
- "name": "Bob",
580
- "age": 25,
581
- "active": false,
582
- "tags": ["python"],
583
- "metadata": {
584
- "created": "2024-01-03"
585
- }
586
- }),
587
- ),
588
- ];
589
-
590
- let producer = ReconnectableEventProducer::new(events.clone());
591
-
592
- let event1 = producer.next_event().await.unwrap();
593
- assert_eq!(event1.data.get("name").and_then(|v| v.as_str()), Some("Alice"));
594
- assert_eq!(event1.data.get("age").and_then(|v| v.as_i64()), Some(30));
595
- assert_eq!(
596
- event1.data.get("tags").and_then(|v| v.as_array()).map(|a| a.len()),
597
- Some(2)
598
- );
599
-
600
- let event2 = producer.next_event().await.unwrap();
601
- assert_eq!(event2.data.get("name").and_then(|v| v.as_str()), Some("Bob"));
602
- assert_eq!(event2.data.get("age").and_then(|v| v.as_i64()), Some(25));
603
-
604
- assert_eq!(
605
- event1
606
- .data
607
- .get("metadata")
608
- .and_then(|v| v.get("created"))
609
- .and_then(|v| v.as_str()),
610
- Some("2024-01-01")
611
- );
612
- assert_eq!(
613
- event2
614
- .data
615
- .get("metadata")
616
- .and_then(|v| v.get("created"))
617
- .and_then(|v| v.as_str()),
618
- Some("2024-01-03")
619
- );
620
- }
1
+ #![allow(clippy::pedantic, clippy::nursery, clippy::all)]
2
+ //! Behavioral tests for Server-Sent Events (SSE) functionality
3
+ //!
4
+ //! These tests verify end-to-end SSE behavior including:
5
+ //! - Connection establishment and event streaming
6
+ //! - Client reconnection with Last-Event-ID header
7
+ //! - Event ordering preservation
8
+ //! - Connection cleanup on disconnect
9
+ //! - Keep-alive behavior
10
+ //! - Backpressure handling for slow clients
11
+ //! - Graceful shutdown with active streams
12
+
13
+ mod common;
14
+
15
+ use spikard_http::sse::{SseEvent, SseEventProducer};
16
+ use std::sync::Arc;
17
+ use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
18
+ use std::time::Duration;
19
+ use tokio::time::sleep;
20
+
21
+ /// Producer that simulates a stream of numbered events (for ordering tests)
22
+ struct SequentialEventProducer {
23
+ total_events: usize,
24
+ current_count: Arc<AtomicUsize>,
25
+ connect_count: Arc<AtomicUsize>,
26
+ disconnect_count: Arc<AtomicUsize>,
27
+ }
28
+
29
+ impl SequentialEventProducer {
30
+ fn new(total_events: usize) -> Self {
31
+ Self {
32
+ total_events,
33
+ current_count: Arc::new(AtomicUsize::new(0)),
34
+ connect_count: Arc::new(AtomicUsize::new(0)),
35
+ disconnect_count: Arc::new(AtomicUsize::new(0)),
36
+ }
37
+ }
38
+
39
+ fn get_connect_count(&self) -> usize {
40
+ self.connect_count.load(Ordering::Relaxed)
41
+ }
42
+
43
+ fn get_disconnect_count(&self) -> usize {
44
+ self.disconnect_count.load(Ordering::Relaxed)
45
+ }
46
+ }
47
+
48
+ impl SseEventProducer for SequentialEventProducer {
49
+ async fn next_event(&self) -> Option<SseEvent> {
50
+ let idx = self.current_count.fetch_add(1, Ordering::Relaxed);
51
+ if idx < self.total_events {
52
+ Some(
53
+ SseEvent::with_type(
54
+ "data",
55
+ serde_json::json!({
56
+ "sequence": idx,
57
+ "message": format!("Event {}", idx)
58
+ }),
59
+ )
60
+ .with_id(format!("event-{}", idx)),
61
+ )
62
+ } else {
63
+ None
64
+ }
65
+ }
66
+
67
+ async fn on_connect(&self) {
68
+ self.connect_count.fetch_add(1, Ordering::Relaxed);
69
+ }
70
+
71
+ async fn on_disconnect(&self) {
72
+ self.disconnect_count.fetch_add(1, Ordering::Relaxed);
73
+ }
74
+ }
75
+
76
+ /// Producer that supports reconnection with Last-Event-ID tracking
77
+ struct ReconnectableEventProducer {
78
+ events: Vec<(String, serde_json::Value)>,
79
+ current_idx: Arc<AtomicUsize>,
80
+ connect_count: Arc<AtomicUsize>,
81
+ }
82
+
83
+ impl ReconnectableEventProducer {
84
+ fn new(events: Vec<(String, serde_json::Value)>) -> Self {
85
+ Self {
86
+ events,
87
+ current_idx: Arc::new(AtomicUsize::new(0)),
88
+ connect_count: Arc::new(AtomicUsize::new(0)),
89
+ }
90
+ }
91
+
92
+ fn get_connect_count(&self) -> usize {
93
+ self.connect_count.load(Ordering::Relaxed)
94
+ }
95
+ }
96
+
97
+ impl SseEventProducer for ReconnectableEventProducer {
98
+ async fn next_event(&self) -> Option<SseEvent> {
99
+ let idx = self.current_idx.fetch_add(1, Ordering::Relaxed);
100
+ if idx < self.events.len() {
101
+ let (id, data) = self.events[idx].clone();
102
+ Some(SseEvent::with_type("update", data).with_id(id.clone()))
103
+ } else {
104
+ None
105
+ }
106
+ }
107
+
108
+ async fn on_connect(&self) {
109
+ self.connect_count.fetch_add(1, Ordering::Relaxed);
110
+ }
111
+ }
112
+
113
+ /// Producer that sends events with configurable delays for backpressure testing
114
+ struct SlowClientProducer {
115
+ event_count: usize,
116
+ delay_ms: u64,
117
+ current_idx: Arc<AtomicUsize>,
118
+ events_sent: Arc<AtomicUsize>,
119
+ }
120
+
121
+ impl SlowClientProducer {
122
+ fn new(event_count: usize, delay_ms: u64) -> Self {
123
+ Self {
124
+ event_count,
125
+ delay_ms,
126
+ current_idx: Arc::new(AtomicUsize::new(0)),
127
+ events_sent: Arc::new(AtomicUsize::new(0)),
128
+ }
129
+ }
130
+
131
+ fn get_events_sent(&self) -> usize {
132
+ self.events_sent.load(Ordering::Relaxed)
133
+ }
134
+ }
135
+
136
+ impl SseEventProducer for SlowClientProducer {
137
+ async fn next_event(&self) -> Option<SseEvent> {
138
+ let idx = self.current_idx.fetch_add(1, Ordering::Relaxed);
139
+ if idx < self.event_count {
140
+ sleep(Duration::from_millis(self.delay_ms)).await;
141
+ self.events_sent.fetch_add(1, Ordering::Relaxed);
142
+ Some(SseEvent::new(serde_json::json!({
143
+ "event_number": idx,
144
+ "timestamp": chrono::Utc::now().to_rfc3339()
145
+ })))
146
+ } else {
147
+ None
148
+ }
149
+ }
150
+ }
151
+
152
+ /// Producer that maintains consistent ordering even with rapid fire events
153
+ struct RapidFireOrderedProducer {
154
+ event_count: usize,
155
+ current_idx: Arc<AtomicUsize>,
156
+ events_generated: Arc<AtomicUsize>,
157
+ }
158
+
159
+ impl RapidFireOrderedProducer {
160
+ fn new(event_count: usize) -> Self {
161
+ Self {
162
+ event_count,
163
+ current_idx: Arc::new(AtomicUsize::new(0)),
164
+ events_generated: Arc::new(AtomicUsize::new(0)),
165
+ }
166
+ }
167
+
168
+ fn get_generated_count(&self) -> usize {
169
+ self.events_generated.load(Ordering::Relaxed)
170
+ }
171
+ }
172
+
173
+ impl SseEventProducer for RapidFireOrderedProducer {
174
+ async fn next_event(&self) -> Option<SseEvent> {
175
+ let idx = self.current_idx.fetch_add(1, Ordering::Relaxed);
176
+ if idx < self.event_count {
177
+ self.events_generated.fetch_add(1, Ordering::Relaxed);
178
+ Some(
179
+ SseEvent::with_type(
180
+ "rapid",
181
+ serde_json::json!({
182
+ "index": idx,
183
+ "nanotime": std::time::SystemTime::now().duration_since(
184
+ std::time::UNIX_EPOCH
185
+ ).unwrap().as_nanos()
186
+ }),
187
+ )
188
+ .with_id(format!("{}", idx)),
189
+ )
190
+ } else {
191
+ None
192
+ }
193
+ }
194
+ }
195
+
196
+ /// Producer that simulates keep-alive with periodic heartbeats
197
+ struct KeepAliveProducer {
198
+ total_events: usize,
199
+ current_idx: Arc<AtomicUsize>,
200
+ }
201
+
202
+ impl KeepAliveProducer {
203
+ fn new(total_events: usize) -> Self {
204
+ Self {
205
+ total_events,
206
+ current_idx: Arc::new(AtomicUsize::new(0)),
207
+ }
208
+ }
209
+ }
210
+
211
+ impl SseEventProducer for KeepAliveProducer {
212
+ async fn next_event(&self) -> Option<SseEvent> {
213
+ let idx = self.current_idx.fetch_add(1, Ordering::Relaxed);
214
+ if idx < self.total_events {
215
+ Some(SseEvent::new(serde_json::json!({
216
+ "heartbeat": idx,
217
+ "alive": true
218
+ })))
219
+ } else {
220
+ None
221
+ }
222
+ }
223
+ }
224
+
225
+ /// Producer for graceful shutdown testing that tracks disconnections
226
+ struct GracefulShutdownProducer {
227
+ total_events: usize,
228
+ current_idx: Arc<AtomicUsize>,
229
+ disconnect_called: Arc<AtomicBool>,
230
+ }
231
+
232
+ impl GracefulShutdownProducer {
233
+ fn new(total_events: usize) -> Self {
234
+ Self {
235
+ total_events,
236
+ current_idx: Arc::new(AtomicUsize::new(0)),
237
+ disconnect_called: Arc::new(AtomicBool::new(false)),
238
+ }
239
+ }
240
+
241
+ fn was_disconnect_called(&self) -> bool {
242
+ self.disconnect_called.load(Ordering::Relaxed)
243
+ }
244
+ }
245
+
246
+ impl SseEventProducer for GracefulShutdownProducer {
247
+ async fn next_event(&self) -> Option<SseEvent> {
248
+ let idx = self.current_idx.fetch_add(1, Ordering::Relaxed);
249
+ if idx < self.total_events {
250
+ Some(SseEvent::new(serde_json::json!({"index": idx})))
251
+ } else {
252
+ None
253
+ }
254
+ }
255
+
256
+ async fn on_disconnect(&self) {
257
+ self.disconnect_called.store(true, Ordering::Relaxed);
258
+ }
259
+ }
260
+
261
+ #[tokio::test]
262
+ async fn test_sse_connection_establishment_and_streaming() {
263
+ let producer = SequentialEventProducer::new(5);
264
+
265
+ producer.on_connect().await;
266
+
267
+ let mut events_received = Vec::new();
268
+ for i in 0..5 {
269
+ if let Some(event) = producer.next_event().await {
270
+ assert_eq!(
271
+ event.data.get("sequence").and_then(|v| v.as_u64()),
272
+ Some(i as u64),
273
+ "Event {} has correct sequence number",
274
+ i
275
+ );
276
+ assert!(event.id.is_some(), "Event {} has ID for tracking", i);
277
+ events_received.push(event);
278
+ }
279
+ }
280
+
281
+ assert_eq!(events_received.len(), 5, "All 5 events should be received");
282
+ for (idx, event) in events_received.iter().enumerate() {
283
+ assert_eq!(
284
+ event.data.get("sequence").and_then(|v| v.as_u64()),
285
+ Some(idx as u64),
286
+ "Event {} has correct sequence",
287
+ idx
288
+ );
289
+ }
290
+
291
+ assert!(
292
+ producer.next_event().await.is_none(),
293
+ "Stream should end after all events"
294
+ );
295
+ }
296
+
297
+ #[tokio::test]
298
+ async fn test_client_reconnection_with_last_event_id() {
299
+ let events = vec![
300
+ ("id-1".to_string(), serde_json::json!({"data": "event1"})),
301
+ ("id-2".to_string(), serde_json::json!({"data": "event2"})),
302
+ ("id-3".to_string(), serde_json::json!({"data": "event3"})),
303
+ ("id-4".to_string(), serde_json::json!({"data": "event4"})),
304
+ ];
305
+
306
+ let producer = ReconnectableEventProducer::new(events);
307
+
308
+ producer.on_connect().await;
309
+ assert_eq!(producer.get_connect_count(), 1);
310
+
311
+ let event1 = producer.next_event().await.unwrap();
312
+ let event1_id = event1.id.clone();
313
+ assert_eq!(event1_id, Some("id-1".to_string()));
314
+
315
+ let event2 = producer.next_event().await.unwrap();
316
+ let event2_id = event2.id.clone();
317
+ assert_eq!(event2_id, Some("id-2".to_string()));
318
+
319
+ producer.on_connect().await;
320
+ assert_eq!(producer.get_connect_count(), 2);
321
+
322
+ let event3 = producer.next_event().await.unwrap();
323
+ assert_eq!(event3.id, Some("id-3".to_string()));
324
+
325
+ assert_eq!(producer.get_connect_count(), 2, "Client reconnected successfully");
326
+ }
327
+
328
+ #[tokio::test]
329
+ async fn test_event_ordering_preservation() {
330
+ let producer = RapidFireOrderedProducer::new(100);
331
+
332
+ let mut events_collected = Vec::new();
333
+ loop {
334
+ match producer.next_event().await {
335
+ Some(event) => events_collected.push(event),
336
+ None => break,
337
+ }
338
+ }
339
+
340
+ assert_eq!(events_collected.len(), 100, "All 100 events should be collected");
341
+
342
+ let mut last_sequence = -1i32;
343
+ for (idx, event) in events_collected.iter().enumerate() {
344
+ let sequence = event.data.get("index").and_then(|v| v.as_i64()).unwrap() as i32;
345
+ assert_eq!(
346
+ sequence, idx as i32,
347
+ "Event at position {} has correct sequence number {}",
348
+ idx, sequence
349
+ );
350
+ assert!(sequence > last_sequence, "Events are in increasing order");
351
+ last_sequence = sequence;
352
+ }
353
+
354
+ assert_eq!(
355
+ producer.get_generated_count(),
356
+ 100,
357
+ "Exactly 100 events should be generated"
358
+ );
359
+ }
360
+
361
+ #[tokio::test]
362
+ async fn test_connection_cleanup_on_disconnect() {
363
+ let producer = SequentialEventProducer::new(3);
364
+
365
+ producer.on_connect().await;
366
+ assert_eq!(producer.get_connect_count(), 1, "Client should be marked as connected");
367
+
368
+ let _event1 = producer.next_event().await;
369
+
370
+ producer.on_disconnect().await;
371
+ assert_eq!(
372
+ producer.get_disconnect_count(),
373
+ 1,
374
+ "Client should be marked as disconnected"
375
+ );
376
+
377
+ assert!(producer.get_disconnect_count() > 0, "Disconnect hook was invoked");
378
+ }
379
+
380
+ #[tokio::test]
381
+ async fn test_keep_alive_behavior() {
382
+ let producer = KeepAliveProducer::new(5);
383
+
384
+ let mut events = Vec::new();
385
+ loop {
386
+ match producer.next_event().await {
387
+ Some(event) => {
388
+ assert!(
389
+ event.data.get("heartbeat").is_some(),
390
+ "Each event should contain heartbeat data"
391
+ );
392
+ assert!(
393
+ event.data.get("alive").and_then(|v| v.as_bool()) == Some(true),
394
+ "All events should indicate server is alive"
395
+ );
396
+ events.push(event);
397
+ }
398
+ None => break,
399
+ }
400
+ }
401
+
402
+ assert_eq!(events.len(), 5, "All keep-alive events should be received");
403
+
404
+ assert!(
405
+ producer.next_event().await.is_none(),
406
+ "Stream should terminate normally"
407
+ );
408
+ }
409
+
410
+ #[tokio::test]
411
+ async fn test_backpressure_slow_client() {
412
+ let producer = SlowClientProducer::new(5, 10);
413
+
414
+ let start = std::time::Instant::now();
415
+ let mut events_count = 0;
416
+
417
+ loop {
418
+ match producer.next_event().await {
419
+ Some(_event) => {
420
+ events_count += 1;
421
+ }
422
+ None => break,
423
+ }
424
+ }
425
+
426
+ let elapsed = start.elapsed();
427
+
428
+ assert_eq!(events_count, 5, "All 5 events should be generated despite backpressure");
429
+
430
+ assert!(
431
+ elapsed.as_millis() >= 50,
432
+ "Event generation should have delays, took {:?}ms",
433
+ elapsed.as_millis()
434
+ );
435
+
436
+ assert_eq!(producer.get_events_sent(), 5, "All events should be marked as sent");
437
+ }
438
+
439
+ #[tokio::test]
440
+ async fn test_graceful_shutdown_with_active_streams() {
441
+ let producer = GracefulShutdownProducer::new(3);
442
+
443
+ for _ in 0..2 {
444
+ let _ = producer.next_event().await;
445
+ }
446
+
447
+ producer.on_disconnect().await;
448
+
449
+ assert!(
450
+ producer.was_disconnect_called(),
451
+ "Disconnect should be called during graceful shutdown"
452
+ );
453
+
454
+ let remaining = producer.next_event().await;
455
+ assert!(remaining.is_some(), "Stream should continue until complete");
456
+ }
457
+
458
+ #[tokio::test]
459
+ async fn test_event_ids_preserved_through_stream() {
460
+ let producer = SequentialEventProducer::new(10);
461
+
462
+ let mut event_ids = Vec::new();
463
+ loop {
464
+ match producer.next_event().await {
465
+ Some(event) => {
466
+ if let Some(id) = event.id.clone() {
467
+ event_ids.push(id);
468
+ }
469
+ }
470
+ None => break,
471
+ }
472
+ }
473
+
474
+ assert_eq!(event_ids.len(), 10, "All 10 events should have IDs");
475
+
476
+ for (idx, id) in event_ids.iter().enumerate() {
477
+ assert_eq!(id, &format!("event-{}", idx), "Event ID should match expected format");
478
+ }
479
+
480
+ let unique_ids: std::collections::HashSet<_> = event_ids.iter().cloned().collect();
481
+ assert_eq!(unique_ids.len(), event_ids.len(), "All event IDs should be unique");
482
+ }
483
+
484
+ #[tokio::test]
485
+ async fn test_multiple_concurrent_connections() {
486
+ let producer1 = Arc::new(SequentialEventProducer::new(5));
487
+ let producer2 = Arc::new(SequentialEventProducer::new(5));
488
+
489
+ producer1.on_connect().await;
490
+ producer2.on_connect().await;
491
+
492
+ let handle1 = {
493
+ let producer = Arc::clone(&producer1);
494
+ tokio::spawn(async move {
495
+ let mut count = 0;
496
+ loop {
497
+ match producer.next_event().await {
498
+ Some(_) => count += 1,
499
+ None => break,
500
+ }
501
+ }
502
+ count
503
+ })
504
+ };
505
+
506
+ let handle2 = {
507
+ let producer = Arc::clone(&producer2);
508
+ tokio::spawn(async move {
509
+ let mut count = 0;
510
+ loop {
511
+ match producer.next_event().await {
512
+ Some(_) => count += 1,
513
+ None => break,
514
+ }
515
+ }
516
+ count
517
+ })
518
+ };
519
+
520
+ let count1 = handle1.await.unwrap();
521
+ let count2 = handle2.await.unwrap();
522
+
523
+ assert_eq!(count1, 5, "First connection should receive 5 events");
524
+ assert_eq!(count2, 5, "Second connection should receive 5 events");
525
+ }
526
+
527
+ #[tokio::test]
528
+ async fn test_event_type_preservation() {
529
+ let producer = SequentialEventProducer::new(5);
530
+
531
+ let mut events = Vec::new();
532
+ loop {
533
+ match producer.next_event().await {
534
+ Some(event) => {
535
+ events.push(event);
536
+ }
537
+ None => break,
538
+ }
539
+ }
540
+
541
+ assert_eq!(events.len(), 5);
542
+ for event in events {
543
+ assert_eq!(
544
+ event.event_type,
545
+ Some("data".to_string()),
546
+ "Event type should be preserved as 'data'"
547
+ );
548
+ }
549
+ }
550
+
551
+ #[tokio::test]
552
+ async fn test_empty_event_stream() {
553
+ let producer = SequentialEventProducer::new(0);
554
+
555
+ let event = producer.next_event().await;
556
+
557
+ assert!(event.is_none(), "Empty stream should produce no events");
558
+ }
559
+
560
+ #[tokio::test]
561
+ async fn test_event_data_integrity_through_stream() {
562
+ let events = vec![
563
+ (
564
+ "id-1".to_string(),
565
+ serde_json::json!({
566
+ "name": "Alice",
567
+ "age": 30,
568
+ "active": true,
569
+ "tags": ["rust", "async"],
570
+ "metadata": {
571
+ "created": "2024-01-01",
572
+ "updated": "2024-01-02"
573
+ }
574
+ }),
575
+ ),
576
+ (
577
+ "id-2".to_string(),
578
+ serde_json::json!({
579
+ "name": "Bob",
580
+ "age": 25,
581
+ "active": false,
582
+ "tags": ["python"],
583
+ "metadata": {
584
+ "created": "2024-01-03"
585
+ }
586
+ }),
587
+ ),
588
+ ];
589
+
590
+ let producer = ReconnectableEventProducer::new(events.clone());
591
+
592
+ let event1 = producer.next_event().await.unwrap();
593
+ assert_eq!(event1.data.get("name").and_then(|v| v.as_str()), Some("Alice"));
594
+ assert_eq!(event1.data.get("age").and_then(|v| v.as_i64()), Some(30));
595
+ assert_eq!(
596
+ event1.data.get("tags").and_then(|v| v.as_array()).map(|a| a.len()),
597
+ Some(2)
598
+ );
599
+
600
+ let event2 = producer.next_event().await.unwrap();
601
+ assert_eq!(event2.data.get("name").and_then(|v| v.as_str()), Some("Bob"));
602
+ assert_eq!(event2.data.get("age").and_then(|v| v.as_i64()), Some(25));
603
+
604
+ assert_eq!(
605
+ event1
606
+ .data
607
+ .get("metadata")
608
+ .and_then(|v| v.get("created"))
609
+ .and_then(|v| v.as_str()),
610
+ Some("2024-01-01")
611
+ );
612
+ assert_eq!(
613
+ event2
614
+ .data
615
+ .get("metadata")
616
+ .and_then(|v| v.get("created"))
617
+ .and_then(|v| v.as_str()),
618
+ Some("2024-01-03")
619
+ );
620
+ }