spikard 0.3.6 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +21 -6
  3. data/ext/spikard_rb/Cargo.toml +2 -2
  4. data/lib/spikard/app.rb +33 -14
  5. data/lib/spikard/testing.rb +47 -12
  6. data/lib/spikard/version.rb +1 -1
  7. data/vendor/crates/spikard-bindings-shared/Cargo.toml +63 -0
  8. data/vendor/crates/spikard-bindings-shared/examples/config_extraction.rs +132 -0
  9. data/vendor/crates/spikard-bindings-shared/src/config_extractor.rs +752 -0
  10. data/vendor/crates/spikard-bindings-shared/src/conversion_traits.rs +194 -0
  11. data/vendor/crates/spikard-bindings-shared/src/di_traits.rs +246 -0
  12. data/vendor/crates/spikard-bindings-shared/src/error_response.rs +401 -0
  13. data/vendor/crates/spikard-bindings-shared/src/handler_base.rs +238 -0
  14. data/vendor/crates/spikard-bindings-shared/src/lib.rs +24 -0
  15. data/vendor/crates/spikard-bindings-shared/src/lifecycle_base.rs +292 -0
  16. data/vendor/crates/spikard-bindings-shared/src/lifecycle_executor.rs +616 -0
  17. data/vendor/crates/spikard-bindings-shared/src/response_builder.rs +305 -0
  18. data/vendor/crates/spikard-bindings-shared/src/test_client_base.rs +248 -0
  19. data/vendor/crates/spikard-bindings-shared/src/validation_helpers.rs +351 -0
  20. data/vendor/crates/spikard-bindings-shared/tests/comprehensive_coverage.rs +454 -0
  21. data/vendor/crates/spikard-bindings-shared/tests/error_response_edge_cases.rs +383 -0
  22. data/vendor/crates/spikard-bindings-shared/tests/handler_base_integration.rs +280 -0
  23. data/vendor/crates/spikard-core/Cargo.toml +4 -4
  24. data/vendor/crates/spikard-core/src/debug.rs +64 -0
  25. data/vendor/crates/spikard-core/src/di/container.rs +3 -27
  26. data/vendor/crates/spikard-core/src/di/factory.rs +1 -5
  27. data/vendor/crates/spikard-core/src/di/graph.rs +8 -47
  28. data/vendor/crates/spikard-core/src/di/mod.rs +1 -1
  29. data/vendor/crates/spikard-core/src/di/resolved.rs +1 -7
  30. data/vendor/crates/spikard-core/src/di/value.rs +2 -4
  31. data/vendor/crates/spikard-core/src/errors.rs +30 -0
  32. data/vendor/crates/spikard-core/src/http.rs +262 -0
  33. data/vendor/crates/spikard-core/src/lib.rs +1 -1
  34. data/vendor/crates/spikard-core/src/lifecycle.rs +764 -0
  35. data/vendor/crates/spikard-core/src/metadata.rs +389 -0
  36. data/vendor/crates/spikard-core/src/parameters.rs +1962 -159
  37. data/vendor/crates/spikard-core/src/problem.rs +34 -0
  38. data/vendor/crates/spikard-core/src/request_data.rs +966 -1
  39. data/vendor/crates/spikard-core/src/router.rs +263 -2
  40. data/vendor/crates/spikard-core/src/validation/error_mapper.rs +688 -0
  41. data/vendor/crates/spikard-core/src/{validation.rs → validation/mod.rs} +26 -268
  42. data/vendor/crates/spikard-http/Cargo.toml +12 -16
  43. data/vendor/crates/spikard-http/examples/sse-notifications.rs +148 -0
  44. data/vendor/crates/spikard-http/examples/websocket-chat.rs +92 -0
  45. data/vendor/crates/spikard-http/src/auth.rs +65 -16
  46. data/vendor/crates/spikard-http/src/background.rs +1614 -3
  47. data/vendor/crates/spikard-http/src/cors.rs +515 -0
  48. data/vendor/crates/spikard-http/src/debug.rs +65 -0
  49. data/vendor/crates/spikard-http/src/di_handler.rs +1322 -77
  50. data/vendor/crates/spikard-http/src/handler_response.rs +711 -0
  51. data/vendor/crates/spikard-http/src/handler_trait.rs +607 -5
  52. data/vendor/crates/spikard-http/src/handler_trait_tests.rs +6 -0
  53. data/vendor/crates/spikard-http/src/lib.rs +33 -28
  54. data/vendor/crates/spikard-http/src/lifecycle/adapter.rs +81 -0
  55. data/vendor/crates/spikard-http/src/lifecycle.rs +765 -0
  56. data/vendor/crates/spikard-http/src/middleware/mod.rs +372 -117
  57. data/vendor/crates/spikard-http/src/middleware/multipart.rs +836 -10
  58. data/vendor/crates/spikard-http/src/middleware/urlencoded.rs +409 -43
  59. data/vendor/crates/spikard-http/src/middleware/validation.rs +513 -65
  60. data/vendor/crates/spikard-http/src/openapi/parameter_extraction.rs +345 -0
  61. data/vendor/crates/spikard-http/src/openapi/schema_conversion.rs +1055 -0
  62. data/vendor/crates/spikard-http/src/openapi/spec_generation.rs +473 -3
  63. data/vendor/crates/spikard-http/src/query_parser.rs +455 -31
  64. data/vendor/crates/spikard-http/src/response.rs +321 -0
  65. data/vendor/crates/spikard-http/src/server/handler.rs +1572 -9
  66. data/vendor/crates/spikard-http/src/server/lifecycle_execution.rs +136 -0
  67. data/vendor/crates/spikard-http/src/server/mod.rs +875 -178
  68. data/vendor/crates/spikard-http/src/server/request_extraction.rs +674 -23
  69. data/vendor/crates/spikard-http/src/server/routing_factory.rs +599 -0
  70. data/vendor/crates/spikard-http/src/sse.rs +983 -21
  71. data/vendor/crates/spikard-http/src/testing/form.rs +38 -0
  72. data/vendor/crates/spikard-http/src/testing/test_client.rs +0 -2
  73. data/vendor/crates/spikard-http/src/testing.rs +7 -7
  74. data/vendor/crates/spikard-http/src/websocket.rs +1055 -4
  75. data/vendor/crates/spikard-http/tests/background_behavior.rs +832 -0
  76. data/vendor/crates/spikard-http/tests/common/handlers.rs +309 -0
  77. data/vendor/crates/spikard-http/tests/common/mod.rs +26 -0
  78. data/vendor/crates/spikard-http/tests/di_integration.rs +192 -0
  79. data/vendor/crates/spikard-http/tests/doc_snippets.rs +5 -0
  80. data/vendor/crates/spikard-http/tests/lifecycle_execution.rs +1093 -0
  81. data/vendor/crates/spikard-http/tests/multipart_behavior.rs +656 -0
  82. data/vendor/crates/spikard-http/tests/server_config_builder.rs +314 -0
  83. data/vendor/crates/spikard-http/tests/sse_behavior.rs +620 -0
  84. data/vendor/crates/spikard-http/tests/websocket_behavior.rs +663 -0
  85. data/vendor/crates/spikard-rb/Cargo.toml +10 -4
  86. data/vendor/crates/spikard-rb/build.rs +196 -5
  87. data/vendor/crates/spikard-rb/src/config/mod.rs +5 -0
  88. data/vendor/crates/spikard-rb/src/{config.rs → config/server_config.rs} +100 -109
  89. data/vendor/crates/spikard-rb/src/conversion.rs +121 -20
  90. data/vendor/crates/spikard-rb/src/di/builder.rs +100 -0
  91. data/vendor/crates/spikard-rb/src/{di.rs → di/mod.rs} +12 -46
  92. data/vendor/crates/spikard-rb/src/handler.rs +100 -107
  93. data/vendor/crates/spikard-rb/src/integration/mod.rs +3 -0
  94. data/vendor/crates/spikard-rb/src/lib.rs +467 -1428
  95. data/vendor/crates/spikard-rb/src/lifecycle.rs +1 -0
  96. data/vendor/crates/spikard-rb/src/metadata/mod.rs +5 -0
  97. data/vendor/crates/spikard-rb/src/metadata/route_extraction.rs +447 -0
  98. data/vendor/crates/spikard-rb/src/runtime/mod.rs +5 -0
  99. data/vendor/crates/spikard-rb/src/runtime/server_runner.rs +324 -0
  100. data/vendor/crates/spikard-rb/src/server.rs +47 -22
  101. data/vendor/crates/spikard-rb/src/{test_client.rs → testing/client.rs} +187 -40
  102. data/vendor/crates/spikard-rb/src/testing/mod.rs +7 -0
  103. data/vendor/crates/spikard-rb/src/testing/websocket.rs +635 -0
  104. data/vendor/crates/spikard-rb/src/websocket.rs +178 -37
  105. metadata +46 -13
  106. data/vendor/crates/spikard-http/src/parameters.rs +0 -1
  107. data/vendor/crates/spikard-http/src/problem.rs +0 -1
  108. data/vendor/crates/spikard-http/src/router.rs +0 -1
  109. data/vendor/crates/spikard-http/src/schema_registry.rs +0 -1
  110. data/vendor/crates/spikard-http/src/type_hints.rs +0 -1
  111. data/vendor/crates/spikard-http/src/validation.rs +0 -1
  112. data/vendor/crates/spikard-rb/src/test_websocket.rs +0 -221
  113. /data/vendor/crates/spikard-rb/src/{test_sse.rs → testing/sse.rs} +0 -0
@@ -0,0 +1,832 @@
1
+ #![allow(clippy::pedantic, clippy::nursery, clippy::all)]
2
+ //! Behavioral tests for background task execution in spikard-http.
3
+ //!
4
+ //! These tests focus on observable behavior: task completion, timing, resource cleanup,
5
+ //! and graceful shutdown patterns. They validate end-to-end behavior rather than
6
+ //! implementation details.
7
+ //!
8
+ //! Test categories:
9
+ //! 1. Graceful Shutdown & Draining
10
+ //! 2. Shutdown Timeout Behavior
11
+ //! 3. Task Success/Failure Observable Outcomes
12
+ //! 4. High-Volume Task Queue
13
+ //! 5. Task Execution Order Guarantees
14
+ //! 6. Concurrent Task Execution
15
+ //! 7. Task Cancellation Propagation
16
+
17
+ use spikard_http::background::{
18
+ BackgroundJobError, BackgroundJobMetadata, BackgroundRuntime, BackgroundSpawnError, BackgroundTaskConfig,
19
+ };
20
+ use std::borrow::Cow;
21
+ use std::sync::Arc;
22
+ use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
23
+ use std::time::{Duration, Instant};
24
+
25
+ /// Verifies that shutdown waits for all in-flight tasks to complete gracefully.
26
+ /// Observable behavior: all spawned tasks complete before shutdown returns Ok.
27
+ #[tokio::test]
28
+ async fn test_graceful_shutdown_drains_all_spawned_tasks() {
29
+ let config = BackgroundTaskConfig {
30
+ max_queue_size: 50,
31
+ max_concurrent_tasks: 5,
32
+ drain_timeout_secs: 10,
33
+ };
34
+
35
+ let runtime = BackgroundRuntime::start(config).await;
36
+ let handle = runtime.handle();
37
+
38
+ let completion_count = Arc::new(AtomicU64::new(0));
39
+ let task_count = 20;
40
+
41
+ for _ in 0..task_count {
42
+ let count = completion_count.clone();
43
+ handle
44
+ .spawn(move || {
45
+ let c = count.clone();
46
+ async move {
47
+ tokio::time::sleep(Duration::from_millis(10)).await;
48
+ c.fetch_add(1, Ordering::SeqCst);
49
+ Ok(())
50
+ }
51
+ })
52
+ .expect("spawn failed");
53
+ }
54
+
55
+ tokio::time::sleep(Duration::from_millis(50)).await;
56
+
57
+ let shutdown_result = runtime.shutdown().await;
58
+ assert!(
59
+ shutdown_result.is_ok(),
60
+ "shutdown should succeed when draining in-flight tasks"
61
+ );
62
+
63
+ assert_eq!(
64
+ completion_count.load(Ordering::SeqCst),
65
+ task_count,
66
+ "all spawned tasks must complete during graceful shutdown"
67
+ );
68
+ }
69
+
70
+ /// Verifies that shutdown processes queued tasks before returning.
71
+ /// Observable behavior: both in-flight and queued tasks complete.
72
+ #[tokio::test]
73
+ async fn test_graceful_shutdown_processes_both_inflight_and_queued_tasks() {
74
+ let config = BackgroundTaskConfig {
75
+ max_queue_size: 100,
76
+ max_concurrent_tasks: 2,
77
+ drain_timeout_secs: 10,
78
+ };
79
+
80
+ let runtime = BackgroundRuntime::start(config).await;
81
+ let handle = runtime.handle();
82
+
83
+ let completion_count = Arc::new(AtomicU64::new(0));
84
+ let task_count = 15;
85
+
86
+ for _ in 0..task_count {
87
+ let count = completion_count.clone();
88
+ handle
89
+ .spawn(move || {
90
+ let c = count.clone();
91
+ async move {
92
+ tokio::time::sleep(Duration::from_millis(5)).await;
93
+ c.fetch_add(1, Ordering::SeqCst);
94
+ Ok(())
95
+ }
96
+ })
97
+ .expect("spawn failed");
98
+ }
99
+
100
+ let shutdown_result = runtime.shutdown().await;
101
+ assert!(
102
+ shutdown_result.is_ok(),
103
+ "shutdown should drain both in-flight and queued tasks"
104
+ );
105
+
106
+ assert_eq!(
107
+ completion_count.load(Ordering::SeqCst),
108
+ task_count,
109
+ "all tasks including queued ones must complete"
110
+ );
111
+ }
112
+
113
+ /// Verifies that shutdown times out when tasks exceed drain timeout.
114
+ /// Observable behavior: shutdown returns error, incomplete tasks remain unfinished.
115
+ #[tokio::test]
116
+ async fn test_shutdown_timeout_with_long_running_task() {
117
+ let config = BackgroundTaskConfig {
118
+ max_queue_size: 10,
119
+ max_concurrent_tasks: 2,
120
+ drain_timeout_secs: 1,
121
+ };
122
+
123
+ let runtime = BackgroundRuntime::start(config).await;
124
+ let handle = runtime.handle();
125
+
126
+ let task_completed = Arc::new(AtomicBool::new(false));
127
+ let completed_clone = task_completed.clone();
128
+
129
+ handle
130
+ .spawn(move || {
131
+ let c = completed_clone.clone();
132
+ async move {
133
+ tokio::time::sleep(Duration::from_secs(10)).await;
134
+ c.store(true, Ordering::SeqCst);
135
+ Ok(())
136
+ }
137
+ })
138
+ .expect("spawn failed");
139
+
140
+ tokio::time::sleep(Duration::from_millis(50)).await;
141
+
142
+ let shutdown_result = runtime.shutdown().await;
143
+ assert!(
144
+ shutdown_result.is_err(),
145
+ "shutdown should timeout with incomplete long-running task"
146
+ );
147
+
148
+ assert!(
149
+ !task_completed.load(Ordering::SeqCst),
150
+ "incomplete task should not complete after shutdown timeout"
151
+ );
152
+ }
153
+
154
+ /// Verifies shutdown respects drain timeout duration.
155
+ /// Observable behavior: shutdown duration is approximately the drain_timeout_secs.
156
+ #[tokio::test]
157
+ async fn test_shutdown_timeout_duration_respected() {
158
+ let drain_timeout_secs = 2;
159
+ let config = BackgroundTaskConfig {
160
+ max_queue_size: 10,
161
+ max_concurrent_tasks: 1,
162
+ drain_timeout_secs,
163
+ };
164
+
165
+ let runtime = BackgroundRuntime::start(config).await;
166
+ let handle = runtime.handle();
167
+
168
+ handle
169
+ .spawn(|| async {
170
+ tokio::time::sleep(Duration::from_secs(30)).await;
171
+ Ok(())
172
+ })
173
+ .expect("spawn failed");
174
+
175
+ tokio::time::sleep(Duration::from_millis(100)).await;
176
+
177
+ let shutdown_start = Instant::now();
178
+ let _ = runtime.shutdown().await;
179
+ let shutdown_elapsed = shutdown_start.elapsed();
180
+
181
+ assert!(
182
+ shutdown_elapsed >= Duration::from_secs(drain_timeout_secs - 1),
183
+ "shutdown should wait at least drain_timeout"
184
+ );
185
+ assert!(
186
+ shutdown_elapsed < Duration::from_secs(drain_timeout_secs + 2),
187
+ "shutdown should not wait much longer than drain_timeout"
188
+ );
189
+ }
190
+
191
+ /// Verifies that successful tasks complete without affecting other tasks.
192
+ /// Observable behavior: task runs to completion, no side effects on runtime.
193
+ #[tokio::test]
194
+ async fn test_task_success_completes_cleanly() {
195
+ let config = BackgroundTaskConfig::default();
196
+ let runtime = BackgroundRuntime::start(config).await;
197
+ let handle = runtime.handle();
198
+
199
+ let success_flag = Arc::new(AtomicBool::new(false));
200
+ let flag_clone = success_flag.clone();
201
+
202
+ handle
203
+ .spawn(move || {
204
+ let f = flag_clone.clone();
205
+ async move {
206
+ tokio::time::sleep(Duration::from_millis(10)).await;
207
+ f.store(true, Ordering::SeqCst);
208
+ Ok(())
209
+ }
210
+ })
211
+ .expect("spawn failed");
212
+
213
+ tokio::time::sleep(Duration::from_millis(50)).await;
214
+
215
+ assert!(
216
+ success_flag.load(Ordering::SeqCst),
217
+ "successful task should execute and set flag"
218
+ );
219
+
220
+ let shutdown_result = runtime.shutdown().await;
221
+ assert!(shutdown_result.is_ok(), "shutdown should succeed after successful task");
222
+ }
223
+
224
+ /// Verifies that failed tasks log failure but don't crash the runtime.
225
+ /// Observable behavior: failed task executes, runtime continues accepting new tasks.
226
+ #[tokio::test]
227
+ async fn test_task_failure_doesnt_crash_runtime() {
228
+ let config = BackgroundTaskConfig::default();
229
+ let runtime = BackgroundRuntime::start(config).await;
230
+ let handle = runtime.handle();
231
+
232
+ let failure_count = Arc::new(AtomicU64::new(0));
233
+ let success_count = Arc::new(AtomicU64::new(0));
234
+
235
+ {
236
+ let f = failure_count.clone();
237
+ handle
238
+ .spawn(move || {
239
+ let fail = f.clone();
240
+ async move {
241
+ fail.fetch_add(1, Ordering::SeqCst);
242
+ Err(BackgroundJobError::from("task error"))
243
+ }
244
+ })
245
+ .expect("spawn failed");
246
+ }
247
+
248
+ tokio::time::sleep(Duration::from_millis(50)).await;
249
+
250
+ {
251
+ let s = success_count.clone();
252
+ handle
253
+ .spawn(move || {
254
+ let succ = s.clone();
255
+ async move {
256
+ succ.fetch_add(1, Ordering::SeqCst);
257
+ Ok(())
258
+ }
259
+ })
260
+ .expect("spawn failed");
261
+ }
262
+
263
+ tokio::time::sleep(Duration::from_millis(100)).await;
264
+
265
+ assert_eq!(failure_count.load(Ordering::SeqCst), 1, "failed task should execute");
266
+ assert_eq!(
267
+ success_count.load(Ordering::SeqCst),
268
+ 1,
269
+ "task after failure should also execute"
270
+ );
271
+
272
+ let shutdown_result = runtime.shutdown().await;
273
+ assert!(
274
+ shutdown_result.is_ok(),
275
+ "runtime should shutdown cleanly after failed tasks"
276
+ );
277
+ }
278
+
279
+ /// Verifies that mixed success/failure tasks all execute during shutdown.
280
+ /// Observable behavior: shutdown drains both successful and failed tasks.
281
+ #[tokio::test]
282
+ async fn test_shutdown_drains_mixed_success_and_failure_tasks() {
283
+ let config = BackgroundTaskConfig {
284
+ max_queue_size: 100,
285
+ max_concurrent_tasks: 5,
286
+ drain_timeout_secs: 10,
287
+ };
288
+
289
+ let runtime = BackgroundRuntime::start(config).await;
290
+ let handle = runtime.handle();
291
+
292
+ let success_count = Arc::new(AtomicU64::new(0));
293
+ let failure_count = Arc::new(AtomicU64::new(0));
294
+ let task_count = 20;
295
+
296
+ for i in 0..task_count {
297
+ if i % 2 == 0 {
298
+ let s = success_count.clone();
299
+ handle
300
+ .spawn(move || {
301
+ let succ = s.clone();
302
+ async move {
303
+ succ.fetch_add(1, Ordering::SeqCst);
304
+ Ok(())
305
+ }
306
+ })
307
+ .expect("spawn failed");
308
+ } else {
309
+ let f = failure_count.clone();
310
+ handle
311
+ .spawn(move || {
312
+ let fail = f.clone();
313
+ async move {
314
+ fail.fetch_add(1, Ordering::SeqCst);
315
+ Err(BackgroundJobError::from("intentional failure"))
316
+ }
317
+ })
318
+ .expect("spawn failed");
319
+ }
320
+ }
321
+
322
+ let shutdown_result = runtime.shutdown().await;
323
+ assert!(shutdown_result.is_ok(), "shutdown should drain all tasks");
324
+
325
+ assert_eq!(
326
+ success_count.load(Ordering::SeqCst),
327
+ 10,
328
+ "all successful tasks should execute"
329
+ );
330
+ assert_eq!(
331
+ failure_count.load(Ordering::SeqCst),
332
+ 10,
333
+ "all failing tasks should execute"
334
+ );
335
+ }
336
+
337
+ /// Verifies that high-volume queues are processed without resource exhaustion.
338
+ /// Observable behavior: 10K tasks all complete within drain timeout.
339
+ #[tokio::test]
340
+ async fn test_high_volume_queue_10k_tasks() {
341
+ let task_count = 10_000;
342
+ let config = BackgroundTaskConfig {
343
+ max_queue_size: 15_000,
344
+ max_concurrent_tasks: 50,
345
+ drain_timeout_secs: 60,
346
+ };
347
+
348
+ let runtime = BackgroundRuntime::start(config).await;
349
+ let handle = runtime.handle();
350
+
351
+ let completion_count = Arc::new(AtomicU64::new(0));
352
+
353
+ for _ in 0..task_count {
354
+ let count = completion_count.clone();
355
+ let result = handle.spawn(move || {
356
+ let c = count.clone();
357
+ async move {
358
+ c.fetch_add(1, Ordering::SeqCst);
359
+ Ok(())
360
+ }
361
+ });
362
+ assert!(result.is_ok(), "spawn should succeed for high-volume queue");
363
+ }
364
+
365
+ let shutdown_result = runtime.shutdown().await;
366
+ assert!(shutdown_result.is_ok(), "shutdown should complete high-volume queue");
367
+
368
+ assert_eq!(
369
+ completion_count.load(Ordering::SeqCst),
370
+ task_count as u64,
371
+ "all 10K tasks must execute"
372
+ );
373
+ }
374
+
375
+ /// Verifies queue full behavior under high spawn rate.
376
+ /// Observable behavior: QueueFull errors when queue capacity exceeded.
377
+ #[tokio::test]
378
+ async fn test_high_volume_queue_overflow_behavior() {
379
+ let config = BackgroundTaskConfig {
380
+ max_queue_size: 10,
381
+ max_concurrent_tasks: 50,
382
+ drain_timeout_secs: 10,
383
+ };
384
+
385
+ let runtime = BackgroundRuntime::start(config).await;
386
+ let handle = runtime.handle();
387
+
388
+ let blocking_counter = Arc::new(AtomicU64::new(0));
389
+ let spawned_count = Arc::new(AtomicU64::new(0));
390
+
391
+ let mut overflow_error_count = 0;
392
+ for _ in 0..50 {
393
+ let counter = blocking_counter.clone();
394
+ let spawned = spawned_count.clone();
395
+ let result = handle.spawn(move || {
396
+ let c = counter.clone();
397
+ let s = spawned.clone();
398
+ async move {
399
+ s.fetch_add(1, Ordering::SeqCst);
400
+ tokio::time::sleep(Duration::from_millis(100)).await;
401
+ c.fetch_add(1, Ordering::SeqCst);
402
+ Ok(())
403
+ }
404
+ });
405
+
406
+ if let Err(BackgroundSpawnError::QueueFull) = result {
407
+ overflow_error_count += 1;
408
+ }
409
+ }
410
+
411
+ assert!(
412
+ overflow_error_count > 0,
413
+ "should see queue full errors when exceeding capacity"
414
+ );
415
+
416
+ runtime.shutdown().await.expect("shutdown should succeed");
417
+ }
418
+
419
+ /// Verifies that multiple tasks execute to completion (order not guaranteed, but all complete).
420
+ /// Observable behavior: all tasks execute despite concurrent nature.
421
+ #[tokio::test]
422
+ async fn test_task_execution_order_all_complete() {
423
+ let config = BackgroundTaskConfig::default();
424
+ let runtime = BackgroundRuntime::start(config).await;
425
+ let handle = runtime.handle();
426
+
427
+ let execution_log = Arc::new(tokio::sync::Mutex::new(Vec::new()));
428
+ let task_count = 100;
429
+
430
+ for i in 0..task_count {
431
+ let log = execution_log.clone();
432
+ handle
433
+ .spawn(move || {
434
+ let l = log.clone();
435
+ async move {
436
+ l.lock().await.push(i);
437
+ Ok(())
438
+ }
439
+ })
440
+ .expect("spawn failed");
441
+ }
442
+
443
+ tokio::time::sleep(Duration::from_millis(200)).await;
444
+
445
+ let log = execution_log.lock().await;
446
+
447
+ assert_eq!(log.len(), task_count, "all spawned tasks should execute");
448
+
449
+ for i in 0..task_count {
450
+ let count = log.iter().filter(|&&x| x == i).count();
451
+ assert_eq!(count, 1, "task {} should execute exactly once", i);
452
+ }
453
+
454
+ runtime.shutdown().await.expect("shutdown should succeed");
455
+ }
456
+
457
+ /// Verifies FIFO-like behavior when concurrency is limited.
458
+ /// Observable behavior: with 1 concurrent task, tasks execute sequentially.
459
+ #[tokio::test]
460
+ async fn test_sequential_execution_with_single_concurrency() {
461
+ let config = BackgroundTaskConfig {
462
+ max_queue_size: 100,
463
+ max_concurrent_tasks: 1,
464
+ drain_timeout_secs: 30,
465
+ };
466
+
467
+ let runtime = BackgroundRuntime::start(config).await;
468
+ let handle = runtime.handle();
469
+
470
+ let execution_order = Arc::new(tokio::sync::Mutex::new(Vec::new()));
471
+ let task_count = 10;
472
+
473
+ for i in 0..task_count {
474
+ let order = execution_order.clone();
475
+ handle
476
+ .spawn(move || {
477
+ let o = order.clone();
478
+ async move {
479
+ o.lock().await.push(i);
480
+ tokio::time::sleep(Duration::from_millis(5)).await;
481
+ Ok(())
482
+ }
483
+ })
484
+ .expect("spawn failed");
485
+ }
486
+
487
+ let shutdown_result = runtime.shutdown().await;
488
+ assert!(shutdown_result.is_ok(), "shutdown should succeed");
489
+
490
+ let order = execution_order.lock().await;
491
+
492
+ assert_eq!(order.len(), task_count, "all tasks should execute");
493
+ }
494
+
495
+ /// Verifies that concurrent limit is respected during execution.
496
+ /// Observable behavior: peak concurrent tasks <= configured limit.
497
+ #[tokio::test]
498
+ async fn test_concurrent_execution_respects_limit() {
499
+ let config = BackgroundTaskConfig {
500
+ max_queue_size: 100,
501
+ max_concurrent_tasks: 5,
502
+ drain_timeout_secs: 10,
503
+ };
504
+
505
+ let runtime = BackgroundRuntime::start(config).await;
506
+ let handle = runtime.handle();
507
+
508
+ let active_count = Arc::new(AtomicU64::new(0));
509
+ let peak_count = Arc::new(AtomicU64::new(0));
510
+ let task_count = 20;
511
+
512
+ for _ in 0..task_count {
513
+ let active = active_count.clone();
514
+ let peak = peak_count.clone();
515
+
516
+ handle
517
+ .spawn(move || {
518
+ let a = active.clone();
519
+ let p = peak.clone();
520
+
521
+ async move {
522
+ let current = a.fetch_add(1, Ordering::SeqCst) + 1;
523
+
524
+ let mut peak_val = p.load(Ordering::SeqCst);
525
+ while current > peak_val {
526
+ if p.compare_exchange(peak_val, current, Ordering::SeqCst, Ordering::SeqCst)
527
+ .is_ok()
528
+ {
529
+ break;
530
+ }
531
+ peak_val = p.load(Ordering::SeqCst);
532
+ }
533
+
534
+ tokio::time::sleep(Duration::from_millis(100)).await;
535
+ a.fetch_sub(1, Ordering::SeqCst);
536
+ Ok(())
537
+ }
538
+ })
539
+ .expect("spawn failed");
540
+ }
541
+
542
+ tokio::time::sleep(Duration::from_millis(300)).await;
543
+
544
+ let peak = peak_count.load(Ordering::SeqCst);
545
+
546
+ assert!(
547
+ peak <= 5,
548
+ "peak concurrent tasks ({}) should not exceed limit of 5",
549
+ peak
550
+ );
551
+
552
+ runtime.shutdown().await.expect("shutdown should succeed");
553
+ }
554
+
555
+ /// Verifies tasks can run concurrently and interact safely.
556
+ /// Observable behavior: multiple tasks run simultaneously without data races.
557
+ #[tokio::test]
558
+ async fn test_concurrent_tasks_safe_interaction() {
559
+ let config = BackgroundTaskConfig {
560
+ max_queue_size: 100,
561
+ max_concurrent_tasks: 10,
562
+ drain_timeout_secs: 10,
563
+ };
564
+
565
+ let runtime = BackgroundRuntime::start(config).await;
566
+ let handle = runtime.handle();
567
+
568
+ let shared_value = Arc::new(AtomicU64::new(0));
569
+ let task_count = 50;
570
+
571
+ for _ in 0..task_count {
572
+ let val = shared_value.clone();
573
+ handle
574
+ .spawn(move || {
575
+ let v = val.clone();
576
+ async move {
577
+ v.fetch_add(1, Ordering::SeqCst);
578
+ Ok(())
579
+ }
580
+ })
581
+ .expect("spawn failed");
582
+ }
583
+
584
+ tokio::time::sleep(Duration::from_millis(200)).await;
585
+
586
+ assert_eq!(
587
+ shared_value.load(Ordering::SeqCst),
588
+ task_count as u64,
589
+ "concurrent increments should all complete"
590
+ );
591
+
592
+ runtime.shutdown().await.expect("shutdown should succeed");
593
+ }
594
+
595
+ /// Verifies that shutdown immediately stops accepting new tasks.
596
+ /// Observable behavior: spawn after shutdown signal returns error.
597
+ #[tokio::test]
598
+ async fn test_spawn_fails_after_shutdown_initiated() {
599
+ let config = BackgroundTaskConfig::default();
600
+ let runtime = BackgroundRuntime::start(config).await;
601
+ let handle = runtime.handle();
602
+
603
+ let handle_clone = handle.clone();
604
+
605
+ runtime.shutdown().await.expect("shutdown should succeed");
606
+
607
+ tokio::time::sleep(Duration::from_millis(50)).await;
608
+
609
+ let result = handle_clone.spawn(|| async { Ok(()) });
610
+ assert!(result.is_err(), "spawn after shutdown should fail");
611
+ }
612
+
613
+ /// Verifies that incomplete tasks are cancelled when shutdown times out.
614
+ /// Observable behavior: incomplete task never completes after timeout.
615
+ #[tokio::test]
616
+ async fn test_incomplete_task_cancelled_on_timeout() {
617
+ let config = BackgroundTaskConfig {
618
+ max_queue_size: 10,
619
+ max_concurrent_tasks: 1,
620
+ drain_timeout_secs: 1,
621
+ };
622
+
623
+ let runtime = BackgroundRuntime::start(config).await;
624
+ let handle = runtime.handle();
625
+
626
+ let task_started = Arc::new(AtomicBool::new(false));
627
+ let task_completed = Arc::new(AtomicBool::new(false));
628
+ let started = task_started.clone();
629
+ let completed = task_completed.clone();
630
+
631
+ handle
632
+ .spawn(move || {
633
+ let s = started.clone();
634
+ let c = completed.clone();
635
+ async move {
636
+ s.store(true, Ordering::SeqCst);
637
+ tokio::time::sleep(Duration::from_secs(10)).await;
638
+ c.store(true, Ordering::SeqCst);
639
+ Ok(())
640
+ }
641
+ })
642
+ .expect("spawn failed");
643
+
644
+ tokio::time::sleep(Duration::from_millis(100)).await;
645
+
646
+ assert!(task_started.load(Ordering::SeqCst), "task should have started");
647
+
648
+ let shutdown_result = runtime.shutdown().await;
649
+
650
+ assert!(shutdown_result.is_err(), "shutdown should timeout with incomplete task");
651
+
652
+ assert!(
653
+ !task_completed.load(Ordering::SeqCst),
654
+ "incomplete task should not complete after shutdown timeout"
655
+ );
656
+ }
657
+
658
+ /// Verifies task cancellation doesn't affect other tasks.
659
+ /// Observable behavior: other tasks complete normally even if one is cancelled.
660
+ #[tokio::test]
661
+ async fn test_task_cancellation_doesnt_affect_others() {
662
+ let config = BackgroundTaskConfig {
663
+ max_queue_size: 100,
664
+ max_concurrent_tasks: 5,
665
+ drain_timeout_secs: 1,
666
+ };
667
+
668
+ let runtime = BackgroundRuntime::start(config).await;
669
+ let handle = runtime.handle();
670
+
671
+ let short_task_completed = Arc::new(AtomicBool::new(false));
672
+ let long_task_started = Arc::new(AtomicBool::new(false));
673
+
674
+ {
675
+ let c = short_task_completed.clone();
676
+ handle
677
+ .spawn(move || {
678
+ let completed = c.clone();
679
+ async move {
680
+ tokio::time::sleep(Duration::from_millis(50)).await;
681
+ completed.store(true, Ordering::SeqCst);
682
+ Ok(())
683
+ }
684
+ })
685
+ .expect("spawn failed");
686
+ }
687
+
688
+ {
689
+ let s = long_task_started.clone();
690
+ handle
691
+ .spawn(move || {
692
+ let started = s.clone();
693
+ async move {
694
+ started.store(true, Ordering::SeqCst);
695
+ tokio::time::sleep(Duration::from_secs(30)).await;
696
+ Ok(())
697
+ }
698
+ })
699
+ .expect("spawn failed");
700
+ }
701
+
702
+ tokio::time::sleep(Duration::from_millis(100)).await;
703
+
704
+ let shutdown_result = runtime.shutdown().await;
705
+ assert!(shutdown_result.is_err(), "shutdown should timeout due to long task");
706
+
707
+ assert!(
708
+ short_task_completed.load(Ordering::SeqCst),
709
+ "short task should have completed before timeout"
710
+ );
711
+ assert!(
712
+ long_task_started.load(Ordering::SeqCst),
713
+ "long task should have started before timeout"
714
+ );
715
+ }
716
+
717
+ /// Verifies immediate shutdown with no tasks.
718
+ /// Observable behavior: shutdown succeeds quickly with empty queue.
719
+ #[tokio::test]
720
+ async fn test_shutdown_with_no_tasks() {
721
+ let config = BackgroundTaskConfig::default();
722
+ let runtime = BackgroundRuntime::start(config).await;
723
+
724
+ let start = Instant::now();
725
+ let result = runtime.shutdown().await;
726
+ let elapsed = start.elapsed();
727
+
728
+ assert!(result.is_ok(), "shutdown should succeed with no tasks");
729
+ assert!(
730
+ elapsed < Duration::from_secs(1),
731
+ "shutdown with no tasks should be fast"
732
+ );
733
+ }
734
+
735
+ /// Verifies task metadata is preserved (metadata doesn't affect execution).
736
+ /// Observable behavior: tasks with metadata execute successfully.
737
+ #[tokio::test]
738
+ async fn test_task_metadata_preserved_execution() {
739
+ let config = BackgroundTaskConfig::default();
740
+ let runtime = BackgroundRuntime::start(config).await;
741
+ let handle = runtime.handle();
742
+
743
+ let executed = Arc::new(AtomicBool::new(false));
744
+ let executed_clone = executed.clone();
745
+
746
+ let metadata = BackgroundJobMetadata {
747
+ name: Cow::Owned("test_task".to_string()),
748
+ request_id: Some("req-123".to_string()),
749
+ };
750
+
751
+ let future = async move {
752
+ executed_clone.store(true, Ordering::SeqCst);
753
+ Ok(())
754
+ };
755
+
756
+ handle.spawn_with_metadata(future, metadata).expect("spawn failed");
757
+
758
+ tokio::time::sleep(Duration::from_millis(50)).await;
759
+
760
+ assert!(executed.load(Ordering::SeqCst), "task with metadata should execute");
761
+
762
+ runtime.shutdown().await.expect("shutdown should succeed");
763
+ }
764
+
765
+ /// Verifies that multiple handles to the same runtime work correctly.
766
+ /// Observable behavior: multiple handle clones spawn tasks independently.
767
+ #[tokio::test]
768
+ async fn test_multiple_handle_clones_spawn_independently() {
769
+ let config = BackgroundTaskConfig::default();
770
+ let runtime = BackgroundRuntime::start(config).await;
771
+ let handle1 = runtime.handle();
772
+ let handle2 = runtime.handle();
773
+
774
+ let count = Arc::new(AtomicU64::new(0));
775
+
776
+ {
777
+ let c = count.clone();
778
+ handle1
779
+ .spawn(move || {
780
+ let counter = c.clone();
781
+ async move {
782
+ counter.fetch_add(1, Ordering::SeqCst);
783
+ Ok(())
784
+ }
785
+ })
786
+ .expect("spawn failed");
787
+ }
788
+
789
+ {
790
+ let c = count.clone();
791
+ handle2
792
+ .spawn(move || {
793
+ let counter = c.clone();
794
+ async move {
795
+ counter.fetch_add(1, Ordering::SeqCst);
796
+ Ok(())
797
+ }
798
+ })
799
+ .expect("spawn failed");
800
+ }
801
+
802
+ tokio::time::sleep(Duration::from_millis(100)).await;
803
+
804
+ assert_eq!(
805
+ count.load(Ordering::SeqCst),
806
+ 2,
807
+ "tasks from multiple handles should all execute"
808
+ );
809
+
810
+ runtime.shutdown().await.expect("shutdown should succeed");
811
+ }
812
+
813
+ /// Verifies that resource cleanup occurs after shutdown.
814
+ /// Observable behavior: runtime can be dropped safely after shutdown.
815
+ #[tokio::test]
816
+ async fn test_resource_cleanup_after_shutdown() {
817
+ let config = BackgroundTaskConfig::default();
818
+ let runtime = BackgroundRuntime::start(config).await;
819
+ let handle = runtime.handle();
820
+
821
+ handle
822
+ .spawn(|| async {
823
+ tokio::time::sleep(Duration::from_millis(10)).await;
824
+ Ok(())
825
+ })
826
+ .expect("spawn failed");
827
+
828
+ let shutdown_result = runtime.shutdown().await;
829
+ assert!(shutdown_result.is_ok(), "shutdown should complete successfully");
830
+
831
+ drop(handle);
832
+ }