spikard 0.4.0-x86_64-linux
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE +1 -0
- data/README.md +659 -0
- data/ext/spikard_rb/Cargo.toml +17 -0
- data/ext/spikard_rb/extconf.rb +10 -0
- data/ext/spikard_rb/src/lib.rs +6 -0
- data/lib/spikard/app.rb +405 -0
- data/lib/spikard/background.rb +27 -0
- data/lib/spikard/config.rb +396 -0
- data/lib/spikard/converters.rb +13 -0
- data/lib/spikard/handler_wrapper.rb +113 -0
- data/lib/spikard/provide.rb +214 -0
- data/lib/spikard/response.rb +173 -0
- data/lib/spikard/schema.rb +243 -0
- data/lib/spikard/sse.rb +111 -0
- data/lib/spikard/streaming_response.rb +44 -0
- data/lib/spikard/testing.rb +221 -0
- data/lib/spikard/upload_file.rb +131 -0
- data/lib/spikard/version.rb +5 -0
- data/lib/spikard/websocket.rb +59 -0
- data/lib/spikard.rb +43 -0
- data/sig/spikard.rbs +366 -0
- data/vendor/bundle/ruby/3.4.0/gems/diff-lcs-1.6.2/mise.toml +5 -0
- data/vendor/bundle/ruby/3.4.0/gems/rake-compiler-dock-1.10.0/build/buildkitd.toml +2 -0
- data/vendor/crates/spikard-bindings-shared/Cargo.toml +63 -0
- data/vendor/crates/spikard-bindings-shared/examples/config_extraction.rs +139 -0
- data/vendor/crates/spikard-bindings-shared/src/config_extractor.rs +561 -0
- data/vendor/crates/spikard-bindings-shared/src/conversion_traits.rs +194 -0
- data/vendor/crates/spikard-bindings-shared/src/di_traits.rs +246 -0
- data/vendor/crates/spikard-bindings-shared/src/error_response.rs +403 -0
- data/vendor/crates/spikard-bindings-shared/src/handler_base.rs +274 -0
- data/vendor/crates/spikard-bindings-shared/src/lib.rs +25 -0
- data/vendor/crates/spikard-bindings-shared/src/lifecycle_base.rs +298 -0
- data/vendor/crates/spikard-bindings-shared/src/lifecycle_executor.rs +637 -0
- data/vendor/crates/spikard-bindings-shared/src/response_builder.rs +309 -0
- data/vendor/crates/spikard-bindings-shared/src/test_client_base.rs +248 -0
- data/vendor/crates/spikard-bindings-shared/src/validation_helpers.rs +355 -0
- data/vendor/crates/spikard-bindings-shared/tests/comprehensive_coverage.rs +502 -0
- data/vendor/crates/spikard-bindings-shared/tests/error_response_edge_cases.rs +389 -0
- data/vendor/crates/spikard-bindings-shared/tests/handler_base_integration.rs +413 -0
- data/vendor/crates/spikard-core/Cargo.toml +40 -0
- data/vendor/crates/spikard-core/src/bindings/mod.rs +3 -0
- data/vendor/crates/spikard-core/src/bindings/response.rs +133 -0
- data/vendor/crates/spikard-core/src/debug.rs +63 -0
- data/vendor/crates/spikard-core/src/di/container.rs +726 -0
- data/vendor/crates/spikard-core/src/di/dependency.rs +273 -0
- data/vendor/crates/spikard-core/src/di/error.rs +118 -0
- data/vendor/crates/spikard-core/src/di/factory.rs +538 -0
- data/vendor/crates/spikard-core/src/di/graph.rs +545 -0
- data/vendor/crates/spikard-core/src/di/mod.rs +192 -0
- data/vendor/crates/spikard-core/src/di/resolved.rs +411 -0
- data/vendor/crates/spikard-core/src/di/value.rs +283 -0
- data/vendor/crates/spikard-core/src/errors.rs +39 -0
- data/vendor/crates/spikard-core/src/http.rs +153 -0
- data/vendor/crates/spikard-core/src/lib.rs +29 -0
- data/vendor/crates/spikard-core/src/lifecycle.rs +422 -0
- data/vendor/crates/spikard-core/src/metadata.rs +397 -0
- data/vendor/crates/spikard-core/src/parameters.rs +723 -0
- data/vendor/crates/spikard-core/src/problem.rs +310 -0
- data/vendor/crates/spikard-core/src/request_data.rs +189 -0
- data/vendor/crates/spikard-core/src/router.rs +249 -0
- data/vendor/crates/spikard-core/src/schema_registry.rs +183 -0
- data/vendor/crates/spikard-core/src/type_hints.rs +304 -0
- data/vendor/crates/spikard-core/src/validation/error_mapper.rs +689 -0
- data/vendor/crates/spikard-core/src/validation/mod.rs +459 -0
- data/vendor/crates/spikard-http/Cargo.toml +58 -0
- data/vendor/crates/spikard-http/examples/sse-notifications.rs +147 -0
- data/vendor/crates/spikard-http/examples/websocket-chat.rs +91 -0
- data/vendor/crates/spikard-http/src/auth.rs +247 -0
- data/vendor/crates/spikard-http/src/background.rs +1562 -0
- data/vendor/crates/spikard-http/src/bindings/mod.rs +3 -0
- data/vendor/crates/spikard-http/src/bindings/response.rs +1 -0
- data/vendor/crates/spikard-http/src/body_metadata.rs +8 -0
- data/vendor/crates/spikard-http/src/cors.rs +490 -0
- data/vendor/crates/spikard-http/src/debug.rs +63 -0
- data/vendor/crates/spikard-http/src/di_handler.rs +1878 -0
- data/vendor/crates/spikard-http/src/handler_response.rs +532 -0
- data/vendor/crates/spikard-http/src/handler_trait.rs +861 -0
- data/vendor/crates/spikard-http/src/handler_trait_tests.rs +284 -0
- data/vendor/crates/spikard-http/src/lib.rs +524 -0
- data/vendor/crates/spikard-http/src/lifecycle/adapter.rs +149 -0
- data/vendor/crates/spikard-http/src/lifecycle.rs +428 -0
- data/vendor/crates/spikard-http/src/middleware/mod.rs +285 -0
- data/vendor/crates/spikard-http/src/middleware/multipart.rs +930 -0
- data/vendor/crates/spikard-http/src/middleware/urlencoded.rs +541 -0
- data/vendor/crates/spikard-http/src/middleware/validation.rs +287 -0
- data/vendor/crates/spikard-http/src/openapi/mod.rs +309 -0
- data/vendor/crates/spikard-http/src/openapi/parameter_extraction.rs +535 -0
- data/vendor/crates/spikard-http/src/openapi/schema_conversion.rs +867 -0
- data/vendor/crates/spikard-http/src/openapi/spec_generation.rs +678 -0
- data/vendor/crates/spikard-http/src/query_parser.rs +369 -0
- data/vendor/crates/spikard-http/src/response.rs +399 -0
- data/vendor/crates/spikard-http/src/server/handler.rs +1557 -0
- data/vendor/crates/spikard-http/src/server/lifecycle_execution.rs +98 -0
- data/vendor/crates/spikard-http/src/server/mod.rs +806 -0
- data/vendor/crates/spikard-http/src/server/request_extraction.rs +630 -0
- data/vendor/crates/spikard-http/src/server/routing_factory.rs +497 -0
- data/vendor/crates/spikard-http/src/sse.rs +961 -0
- data/vendor/crates/spikard-http/src/testing/form.rs +14 -0
- data/vendor/crates/spikard-http/src/testing/multipart.rs +60 -0
- data/vendor/crates/spikard-http/src/testing/test_client.rs +285 -0
- data/vendor/crates/spikard-http/src/testing.rs +377 -0
- data/vendor/crates/spikard-http/src/websocket.rs +831 -0
- data/vendor/crates/spikard-http/tests/background_behavior.rs +918 -0
- data/vendor/crates/spikard-http/tests/common/handlers.rs +308 -0
- data/vendor/crates/spikard-http/tests/common/mod.rs +21 -0
- data/vendor/crates/spikard-http/tests/di_integration.rs +202 -0
- data/vendor/crates/spikard-http/tests/doc_snippets.rs +4 -0
- data/vendor/crates/spikard-http/tests/lifecycle_execution.rs +1135 -0
- data/vendor/crates/spikard-http/tests/multipart_behavior.rs +688 -0
- data/vendor/crates/spikard-http/tests/server_config_builder.rs +324 -0
- data/vendor/crates/spikard-http/tests/sse_behavior.rs +728 -0
- data/vendor/crates/spikard-http/tests/websocket_behavior.rs +724 -0
- data/vendor/crates/spikard-rb/Cargo.toml +43 -0
- data/vendor/crates/spikard-rb/build.rs +199 -0
- data/vendor/crates/spikard-rb/src/background.rs +63 -0
- data/vendor/crates/spikard-rb/src/config/mod.rs +5 -0
- data/vendor/crates/spikard-rb/src/config/server_config.rs +283 -0
- data/vendor/crates/spikard-rb/src/conversion.rs +459 -0
- data/vendor/crates/spikard-rb/src/di/builder.rs +105 -0
- data/vendor/crates/spikard-rb/src/di/mod.rs +413 -0
- data/vendor/crates/spikard-rb/src/handler.rs +612 -0
- data/vendor/crates/spikard-rb/src/integration/mod.rs +3 -0
- data/vendor/crates/spikard-rb/src/lib.rs +1857 -0
- data/vendor/crates/spikard-rb/src/lifecycle.rs +275 -0
- data/vendor/crates/spikard-rb/src/metadata/mod.rs +5 -0
- data/vendor/crates/spikard-rb/src/metadata/route_extraction.rs +427 -0
- data/vendor/crates/spikard-rb/src/runtime/mod.rs +5 -0
- data/vendor/crates/spikard-rb/src/runtime/server_runner.rs +326 -0
- data/vendor/crates/spikard-rb/src/server.rs +283 -0
- data/vendor/crates/spikard-rb/src/sse.rs +231 -0
- data/vendor/crates/spikard-rb/src/testing/client.rs +404 -0
- data/vendor/crates/spikard-rb/src/testing/mod.rs +7 -0
- data/vendor/crates/spikard-rb/src/testing/sse.rs +143 -0
- data/vendor/crates/spikard-rb/src/testing/websocket.rs +221 -0
- data/vendor/crates/spikard-rb/src/websocket.rs +233 -0
- data/vendor/crates/spikard-rb/tests/magnus_ffi_tests.rs +14 -0
- metadata +213 -0
|
@@ -0,0 +1,918 @@
|
|
|
1
|
+
//! Behavioral tests for background task execution in spikard-http.
|
|
2
|
+
//!
|
|
3
|
+
//! These tests focus on observable behavior: task completion, timing, resource cleanup,
|
|
4
|
+
//! and graceful shutdown patterns. They validate end-to-end behavior rather than
|
|
5
|
+
//! implementation details.
|
|
6
|
+
//!
|
|
7
|
+
//! Test categories:
|
|
8
|
+
//! 1. Graceful Shutdown & Draining
|
|
9
|
+
//! 2. Shutdown Timeout Behavior
|
|
10
|
+
//! 3. Task Success/Failure Observable Outcomes
|
|
11
|
+
//! 4. High-Volume Task Queue
|
|
12
|
+
//! 5. Task Execution Order Guarantees
|
|
13
|
+
//! 6. Concurrent Task Execution
|
|
14
|
+
//! 7. Task Cancellation Propagation
|
|
15
|
+
|
|
16
|
+
use spikard_http::background::{
|
|
17
|
+
BackgroundJobError, BackgroundJobMetadata, BackgroundRuntime, BackgroundSpawnError, BackgroundTaskConfig,
|
|
18
|
+
};
|
|
19
|
+
use std::borrow::Cow;
|
|
20
|
+
use std::sync::Arc;
|
|
21
|
+
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
|
|
22
|
+
use std::time::{Duration, Instant};
|
|
23
|
+
|
|
24
|
+
// ============================================================================
|
|
25
|
+
// Test 1: Graceful Shutdown Drains In-Flight Tasks
|
|
26
|
+
// ============================================================================
|
|
27
|
+
|
|
28
|
+
/// Verifies that shutdown waits for all in-flight tasks to complete gracefully.
|
|
29
|
+
/// Observable behavior: all spawned tasks complete before shutdown returns Ok.
|
|
30
|
+
#[tokio::test]
|
|
31
|
+
async fn test_graceful_shutdown_drains_all_spawned_tasks() {
|
|
32
|
+
let config = BackgroundTaskConfig {
|
|
33
|
+
max_queue_size: 50,
|
|
34
|
+
max_concurrent_tasks: 5,
|
|
35
|
+
drain_timeout_secs: 10,
|
|
36
|
+
};
|
|
37
|
+
|
|
38
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
39
|
+
let handle = runtime.handle();
|
|
40
|
+
|
|
41
|
+
let completion_count = Arc::new(AtomicU64::new(0));
|
|
42
|
+
let task_count = 20;
|
|
43
|
+
|
|
44
|
+
// Spawn multiple tasks with realistic delays
|
|
45
|
+
for _ in 0..task_count {
|
|
46
|
+
let count = completion_count.clone();
|
|
47
|
+
handle
|
|
48
|
+
.spawn(move || {
|
|
49
|
+
let c = count.clone();
|
|
50
|
+
async move {
|
|
51
|
+
tokio::time::sleep(Duration::from_millis(10)).await;
|
|
52
|
+
c.fetch_add(1, Ordering::SeqCst);
|
|
53
|
+
Ok(())
|
|
54
|
+
}
|
|
55
|
+
})
|
|
56
|
+
.expect("spawn failed");
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
// Wait briefly for tasks to begin
|
|
60
|
+
tokio::time::sleep(Duration::from_millis(50)).await;
|
|
61
|
+
|
|
62
|
+
// Graceful shutdown should wait for all tasks
|
|
63
|
+
let shutdown_result = runtime.shutdown().await;
|
|
64
|
+
assert!(
|
|
65
|
+
shutdown_result.is_ok(),
|
|
66
|
+
"shutdown should succeed when draining in-flight tasks"
|
|
67
|
+
);
|
|
68
|
+
|
|
69
|
+
// Observable outcome: all tasks completed
|
|
70
|
+
assert_eq!(
|
|
71
|
+
completion_count.load(Ordering::SeqCst),
|
|
72
|
+
task_count,
|
|
73
|
+
"all spawned tasks must complete during graceful shutdown"
|
|
74
|
+
);
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
/// Verifies that shutdown processes queued tasks before returning.
|
|
78
|
+
/// Observable behavior: both in-flight and queued tasks complete.
|
|
79
|
+
#[tokio::test]
|
|
80
|
+
async fn test_graceful_shutdown_processes_both_inflight_and_queued_tasks() {
|
|
81
|
+
let config = BackgroundTaskConfig {
|
|
82
|
+
max_queue_size: 100,
|
|
83
|
+
max_concurrent_tasks: 2, // Low concurrency to force queueing
|
|
84
|
+
drain_timeout_secs: 10,
|
|
85
|
+
};
|
|
86
|
+
|
|
87
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
88
|
+
let handle = runtime.handle();
|
|
89
|
+
|
|
90
|
+
let completion_count = Arc::new(AtomicU64::new(0));
|
|
91
|
+
let task_count = 15;
|
|
92
|
+
|
|
93
|
+
// Spawn tasks that will queue due to concurrency limit
|
|
94
|
+
for _ in 0..task_count {
|
|
95
|
+
let count = completion_count.clone();
|
|
96
|
+
handle
|
|
97
|
+
.spawn(move || {
|
|
98
|
+
let c = count.clone();
|
|
99
|
+
async move {
|
|
100
|
+
tokio::time::sleep(Duration::from_millis(5)).await;
|
|
101
|
+
c.fetch_add(1, Ordering::SeqCst);
|
|
102
|
+
Ok(())
|
|
103
|
+
}
|
|
104
|
+
})
|
|
105
|
+
.expect("spawn failed");
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
// Immediate shutdown should drain all queued tasks
|
|
109
|
+
let shutdown_result = runtime.shutdown().await;
|
|
110
|
+
assert!(
|
|
111
|
+
shutdown_result.is_ok(),
|
|
112
|
+
"shutdown should drain both in-flight and queued tasks"
|
|
113
|
+
);
|
|
114
|
+
|
|
115
|
+
assert_eq!(
|
|
116
|
+
completion_count.load(Ordering::SeqCst),
|
|
117
|
+
task_count,
|
|
118
|
+
"all tasks including queued ones must complete"
|
|
119
|
+
);
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// ============================================================================
|
|
123
|
+
// Test 2: Shutdown Timeout Leaves Incomplete Tasks
|
|
124
|
+
// ============================================================================
|
|
125
|
+
|
|
126
|
+
/// Verifies that shutdown times out when tasks exceed drain timeout.
|
|
127
|
+
/// Observable behavior: shutdown returns error, incomplete tasks remain unfinished.
|
|
128
|
+
#[tokio::test]
|
|
129
|
+
async fn test_shutdown_timeout_with_long_running_task() {
|
|
130
|
+
let config = BackgroundTaskConfig {
|
|
131
|
+
max_queue_size: 10,
|
|
132
|
+
max_concurrent_tasks: 2,
|
|
133
|
+
drain_timeout_secs: 1, // Very short timeout
|
|
134
|
+
};
|
|
135
|
+
|
|
136
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
137
|
+
let handle = runtime.handle();
|
|
138
|
+
|
|
139
|
+
let task_completed = Arc::new(AtomicBool::new(false));
|
|
140
|
+
let completed_clone = task_completed.clone();
|
|
141
|
+
|
|
142
|
+
// Spawn a task that takes longer than drain timeout
|
|
143
|
+
handle
|
|
144
|
+
.spawn(move || {
|
|
145
|
+
let c = completed_clone.clone();
|
|
146
|
+
async move {
|
|
147
|
+
tokio::time::sleep(Duration::from_secs(10)).await;
|
|
148
|
+
c.store(true, Ordering::SeqCst);
|
|
149
|
+
Ok(())
|
|
150
|
+
}
|
|
151
|
+
})
|
|
152
|
+
.expect("spawn failed");
|
|
153
|
+
|
|
154
|
+
tokio::time::sleep(Duration::from_millis(50)).await;
|
|
155
|
+
|
|
156
|
+
// Observable behavior: shutdown times out
|
|
157
|
+
let shutdown_result = runtime.shutdown().await;
|
|
158
|
+
assert!(
|
|
159
|
+
shutdown_result.is_err(),
|
|
160
|
+
"shutdown should timeout with incomplete long-running task"
|
|
161
|
+
);
|
|
162
|
+
|
|
163
|
+
// Observable behavior: task was not completed before timeout
|
|
164
|
+
assert!(
|
|
165
|
+
!task_completed.load(Ordering::SeqCst),
|
|
166
|
+
"incomplete task should not complete after shutdown timeout"
|
|
167
|
+
);
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
/// Verifies shutdown respects drain timeout duration.
|
|
171
|
+
/// Observable behavior: shutdown duration is approximately the drain_timeout_secs.
|
|
172
|
+
#[tokio::test]
|
|
173
|
+
async fn test_shutdown_timeout_duration_respected() {
|
|
174
|
+
let drain_timeout_secs = 2;
|
|
175
|
+
let config = BackgroundTaskConfig {
|
|
176
|
+
max_queue_size: 10,
|
|
177
|
+
max_concurrent_tasks: 1,
|
|
178
|
+
drain_timeout_secs,
|
|
179
|
+
};
|
|
180
|
+
|
|
181
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
182
|
+
let handle = runtime.handle();
|
|
183
|
+
|
|
184
|
+
// Spawn a very long-running task
|
|
185
|
+
handle
|
|
186
|
+
.spawn(|| async {
|
|
187
|
+
tokio::time::sleep(Duration::from_secs(30)).await;
|
|
188
|
+
Ok(())
|
|
189
|
+
})
|
|
190
|
+
.expect("spawn failed");
|
|
191
|
+
|
|
192
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
193
|
+
|
|
194
|
+
// Measure shutdown time
|
|
195
|
+
let shutdown_start = Instant::now();
|
|
196
|
+
let _ = runtime.shutdown().await;
|
|
197
|
+
let shutdown_elapsed = shutdown_start.elapsed();
|
|
198
|
+
|
|
199
|
+
// Observable behavior: shutdown duration is ~drain_timeout (±1 second tolerance)
|
|
200
|
+
assert!(
|
|
201
|
+
shutdown_elapsed >= Duration::from_secs(drain_timeout_secs - 1),
|
|
202
|
+
"shutdown should wait at least drain_timeout"
|
|
203
|
+
);
|
|
204
|
+
assert!(
|
|
205
|
+
shutdown_elapsed < Duration::from_secs(drain_timeout_secs + 2),
|
|
206
|
+
"shutdown should not wait much longer than drain_timeout"
|
|
207
|
+
);
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
// ============================================================================
|
|
211
|
+
// Test 3: Task Success/Failure Observable Outcomes
|
|
212
|
+
// ============================================================================
|
|
213
|
+
|
|
214
|
+
/// Verifies that successful tasks complete without affecting other tasks.
|
|
215
|
+
/// Observable behavior: task runs to completion, no side effects on runtime.
|
|
216
|
+
#[tokio::test]
|
|
217
|
+
async fn test_task_success_completes_cleanly() {
|
|
218
|
+
let config = BackgroundTaskConfig::default();
|
|
219
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
220
|
+
let handle = runtime.handle();
|
|
221
|
+
|
|
222
|
+
let success_flag = Arc::new(AtomicBool::new(false));
|
|
223
|
+
let flag_clone = success_flag.clone();
|
|
224
|
+
|
|
225
|
+
handle
|
|
226
|
+
.spawn(move || {
|
|
227
|
+
let f = flag_clone.clone();
|
|
228
|
+
async move {
|
|
229
|
+
tokio::time::sleep(Duration::from_millis(10)).await;
|
|
230
|
+
f.store(true, Ordering::SeqCst);
|
|
231
|
+
Ok(())
|
|
232
|
+
}
|
|
233
|
+
})
|
|
234
|
+
.expect("spawn failed");
|
|
235
|
+
|
|
236
|
+
tokio::time::sleep(Duration::from_millis(50)).await;
|
|
237
|
+
|
|
238
|
+
// Observable behavior: task succeeded and flag set
|
|
239
|
+
assert!(
|
|
240
|
+
success_flag.load(Ordering::SeqCst),
|
|
241
|
+
"successful task should execute and set flag"
|
|
242
|
+
);
|
|
243
|
+
|
|
244
|
+
// Shutdown should complete normally
|
|
245
|
+
let shutdown_result = runtime.shutdown().await;
|
|
246
|
+
assert!(shutdown_result.is_ok(), "shutdown should succeed after successful task");
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
/// Verifies that failed tasks log failure but don't crash the runtime.
|
|
250
|
+
/// Observable behavior: failed task executes, runtime continues accepting new tasks.
|
|
251
|
+
#[tokio::test]
|
|
252
|
+
async fn test_task_failure_doesnt_crash_runtime() {
|
|
253
|
+
let config = BackgroundTaskConfig::default();
|
|
254
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
255
|
+
let handle = runtime.handle();
|
|
256
|
+
|
|
257
|
+
let failure_count = Arc::new(AtomicU64::new(0));
|
|
258
|
+
let success_count = Arc::new(AtomicU64::new(0));
|
|
259
|
+
|
|
260
|
+
// Spawn a failing task
|
|
261
|
+
{
|
|
262
|
+
let f = failure_count.clone();
|
|
263
|
+
handle
|
|
264
|
+
.spawn(move || {
|
|
265
|
+
let fail = f.clone();
|
|
266
|
+
async move {
|
|
267
|
+
fail.fetch_add(1, Ordering::SeqCst);
|
|
268
|
+
Err(BackgroundJobError::from("task error"))
|
|
269
|
+
}
|
|
270
|
+
})
|
|
271
|
+
.expect("spawn failed");
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
tokio::time::sleep(Duration::from_millis(50)).await;
|
|
275
|
+
|
|
276
|
+
// Spawn another task after failure
|
|
277
|
+
{
|
|
278
|
+
let s = success_count.clone();
|
|
279
|
+
handle
|
|
280
|
+
.spawn(move || {
|
|
281
|
+
let succ = s.clone();
|
|
282
|
+
async move {
|
|
283
|
+
succ.fetch_add(1, Ordering::SeqCst);
|
|
284
|
+
Ok(())
|
|
285
|
+
}
|
|
286
|
+
})
|
|
287
|
+
.expect("spawn failed");
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
291
|
+
|
|
292
|
+
// Observable behavior: both tasks ran despite first one failing
|
|
293
|
+
assert_eq!(failure_count.load(Ordering::SeqCst), 1, "failed task should execute");
|
|
294
|
+
assert_eq!(
|
|
295
|
+
success_count.load(Ordering::SeqCst),
|
|
296
|
+
1,
|
|
297
|
+
"task after failure should also execute"
|
|
298
|
+
);
|
|
299
|
+
|
|
300
|
+
let shutdown_result = runtime.shutdown().await;
|
|
301
|
+
assert!(
|
|
302
|
+
shutdown_result.is_ok(),
|
|
303
|
+
"runtime should shutdown cleanly after failed tasks"
|
|
304
|
+
);
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
/// Verifies that mixed success/failure tasks all execute during shutdown.
|
|
308
|
+
/// Observable behavior: shutdown drains both successful and failed tasks.
|
|
309
|
+
#[tokio::test]
|
|
310
|
+
async fn test_shutdown_drains_mixed_success_and_failure_tasks() {
|
|
311
|
+
let config = BackgroundTaskConfig {
|
|
312
|
+
max_queue_size: 100,
|
|
313
|
+
max_concurrent_tasks: 5,
|
|
314
|
+
drain_timeout_secs: 10,
|
|
315
|
+
};
|
|
316
|
+
|
|
317
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
318
|
+
let handle = runtime.handle();
|
|
319
|
+
|
|
320
|
+
let success_count = Arc::new(AtomicU64::new(0));
|
|
321
|
+
let failure_count = Arc::new(AtomicU64::new(0));
|
|
322
|
+
let task_count = 20;
|
|
323
|
+
|
|
324
|
+
for i in 0..task_count {
|
|
325
|
+
if i % 2 == 0 {
|
|
326
|
+
let s = success_count.clone();
|
|
327
|
+
handle
|
|
328
|
+
.spawn(move || {
|
|
329
|
+
let succ = s.clone();
|
|
330
|
+
async move {
|
|
331
|
+
succ.fetch_add(1, Ordering::SeqCst);
|
|
332
|
+
Ok(())
|
|
333
|
+
}
|
|
334
|
+
})
|
|
335
|
+
.expect("spawn failed");
|
|
336
|
+
} else {
|
|
337
|
+
let f = failure_count.clone();
|
|
338
|
+
handle
|
|
339
|
+
.spawn(move || {
|
|
340
|
+
let fail = f.clone();
|
|
341
|
+
async move {
|
|
342
|
+
fail.fetch_add(1, Ordering::SeqCst);
|
|
343
|
+
Err(BackgroundJobError::from("intentional failure"))
|
|
344
|
+
}
|
|
345
|
+
})
|
|
346
|
+
.expect("spawn failed");
|
|
347
|
+
}
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
let shutdown_result = runtime.shutdown().await;
|
|
351
|
+
assert!(shutdown_result.is_ok(), "shutdown should drain all tasks");
|
|
352
|
+
|
|
353
|
+
// Observable behavior: all tasks executed
|
|
354
|
+
assert_eq!(
|
|
355
|
+
success_count.load(Ordering::SeqCst),
|
|
356
|
+
10,
|
|
357
|
+
"all successful tasks should execute"
|
|
358
|
+
);
|
|
359
|
+
assert_eq!(
|
|
360
|
+
failure_count.load(Ordering::SeqCst),
|
|
361
|
+
10,
|
|
362
|
+
"all failing tasks should execute"
|
|
363
|
+
);
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
// ============================================================================
|
|
367
|
+
// Test 4: High-Volume Task Queue (10K+ tasks)
|
|
368
|
+
// ============================================================================
|
|
369
|
+
|
|
370
|
+
/// Verifies that high-volume queues are processed without resource exhaustion.
|
|
371
|
+
/// Observable behavior: 10K tasks all complete within drain timeout.
|
|
372
|
+
#[tokio::test]
|
|
373
|
+
async fn test_high_volume_queue_10k_tasks() {
|
|
374
|
+
let task_count = 10_000;
|
|
375
|
+
let config = BackgroundTaskConfig {
|
|
376
|
+
max_queue_size: 15_000,
|
|
377
|
+
max_concurrent_tasks: 50,
|
|
378
|
+
drain_timeout_secs: 60,
|
|
379
|
+
};
|
|
380
|
+
|
|
381
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
382
|
+
let handle = runtime.handle();
|
|
383
|
+
|
|
384
|
+
let completion_count = Arc::new(AtomicU64::new(0));
|
|
385
|
+
|
|
386
|
+
// Spawn 10K tasks
|
|
387
|
+
for _ in 0..task_count {
|
|
388
|
+
let count = completion_count.clone();
|
|
389
|
+
let result = handle.spawn(move || {
|
|
390
|
+
let c = count.clone();
|
|
391
|
+
async move {
|
|
392
|
+
c.fetch_add(1, Ordering::SeqCst);
|
|
393
|
+
Ok(())
|
|
394
|
+
}
|
|
395
|
+
});
|
|
396
|
+
assert!(result.is_ok(), "spawn should succeed for high-volume queue");
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
// Shutdown should drain all 10K tasks
|
|
400
|
+
let shutdown_result = runtime.shutdown().await;
|
|
401
|
+
assert!(shutdown_result.is_ok(), "shutdown should complete high-volume queue");
|
|
402
|
+
|
|
403
|
+
// Observable behavior: all 10K tasks completed
|
|
404
|
+
assert_eq!(
|
|
405
|
+
completion_count.load(Ordering::SeqCst),
|
|
406
|
+
task_count as u64,
|
|
407
|
+
"all 10K tasks must execute"
|
|
408
|
+
);
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
/// Verifies queue full behavior under high spawn rate.
|
|
412
|
+
/// Observable behavior: QueueFull errors when queue capacity exceeded.
|
|
413
|
+
#[tokio::test]
|
|
414
|
+
async fn test_high_volume_queue_overflow_behavior() {
|
|
415
|
+
let config = BackgroundTaskConfig {
|
|
416
|
+
max_queue_size: 10,
|
|
417
|
+
max_concurrent_tasks: 50,
|
|
418
|
+
drain_timeout_secs: 10,
|
|
419
|
+
};
|
|
420
|
+
|
|
421
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
422
|
+
let handle = runtime.handle();
|
|
423
|
+
|
|
424
|
+
let blocking_counter = Arc::new(AtomicU64::new(0));
|
|
425
|
+
let spawned_count = Arc::new(AtomicU64::new(0));
|
|
426
|
+
|
|
427
|
+
// Try to fill queue with more tasks than capacity
|
|
428
|
+
let mut overflow_error_count = 0;
|
|
429
|
+
for _ in 0..50 {
|
|
430
|
+
let counter = blocking_counter.clone();
|
|
431
|
+
let spawned = spawned_count.clone();
|
|
432
|
+
let result = handle.spawn(move || {
|
|
433
|
+
let c = counter.clone();
|
|
434
|
+
let s = spawned.clone();
|
|
435
|
+
async move {
|
|
436
|
+
s.fetch_add(1, Ordering::SeqCst);
|
|
437
|
+
// Sleep briefly to keep tasks in flight
|
|
438
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
439
|
+
c.fetch_add(1, Ordering::SeqCst);
|
|
440
|
+
Ok(())
|
|
441
|
+
}
|
|
442
|
+
});
|
|
443
|
+
|
|
444
|
+
if let Err(BackgroundSpawnError::QueueFull) = result {
|
|
445
|
+
overflow_error_count += 1;
|
|
446
|
+
}
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
// Observable behavior: queue full errors when capacity exceeded
|
|
450
|
+
assert!(
|
|
451
|
+
overflow_error_count > 0,
|
|
452
|
+
"should see queue full errors when exceeding capacity"
|
|
453
|
+
);
|
|
454
|
+
|
|
455
|
+
// Shutdown should drain remaining tasks
|
|
456
|
+
runtime.shutdown().await.expect("shutdown should succeed");
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
// ============================================================================
|
|
460
|
+
// Test 5: Task Execution Order Guarantees
|
|
461
|
+
// ============================================================================
|
|
462
|
+
|
|
463
|
+
/// Verifies that multiple tasks execute to completion (order not guaranteed, but all complete).
|
|
464
|
+
/// Observable behavior: all tasks execute despite concurrent nature.
|
|
465
|
+
#[tokio::test]
|
|
466
|
+
async fn test_task_execution_order_all_complete() {
|
|
467
|
+
let config = BackgroundTaskConfig::default();
|
|
468
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
469
|
+
let handle = runtime.handle();
|
|
470
|
+
|
|
471
|
+
let execution_log = Arc::new(tokio::sync::Mutex::new(Vec::new()));
|
|
472
|
+
let task_count = 100;
|
|
473
|
+
|
|
474
|
+
for i in 0..task_count {
|
|
475
|
+
let log = execution_log.clone();
|
|
476
|
+
handle
|
|
477
|
+
.spawn(move || {
|
|
478
|
+
let l = log.clone();
|
|
479
|
+
async move {
|
|
480
|
+
l.lock().await.push(i);
|
|
481
|
+
Ok(())
|
|
482
|
+
}
|
|
483
|
+
})
|
|
484
|
+
.expect("spawn failed");
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
tokio::time::sleep(Duration::from_millis(200)).await;
|
|
488
|
+
|
|
489
|
+
let log = execution_log.lock().await;
|
|
490
|
+
|
|
491
|
+
// Observable behavior: all tasks executed
|
|
492
|
+
assert_eq!(log.len(), task_count, "all spawned tasks should execute");
|
|
493
|
+
|
|
494
|
+
// Observable behavior: each task ran exactly once
|
|
495
|
+
for i in 0..task_count {
|
|
496
|
+
let count = log.iter().filter(|&&x| x == i).count();
|
|
497
|
+
assert_eq!(count, 1, "task {} should execute exactly once", i);
|
|
498
|
+
}
|
|
499
|
+
|
|
500
|
+
runtime.shutdown().await.expect("shutdown should succeed");
|
|
501
|
+
}
|
|
502
|
+
|
|
503
|
+
/// Verifies FIFO-like behavior when concurrency is limited.
|
|
504
|
+
/// Observable behavior: with 1 concurrent task, tasks execute sequentially.
|
|
505
|
+
#[tokio::test]
|
|
506
|
+
async fn test_sequential_execution_with_single_concurrency() {
|
|
507
|
+
let config = BackgroundTaskConfig {
|
|
508
|
+
max_queue_size: 100,
|
|
509
|
+
max_concurrent_tasks: 1, // Force sequential execution
|
|
510
|
+
drain_timeout_secs: 30,
|
|
511
|
+
};
|
|
512
|
+
|
|
513
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
514
|
+
let handle = runtime.handle();
|
|
515
|
+
|
|
516
|
+
let execution_order = Arc::new(tokio::sync::Mutex::new(Vec::new()));
|
|
517
|
+
let task_count = 10;
|
|
518
|
+
|
|
519
|
+
for i in 0..task_count {
|
|
520
|
+
let order = execution_order.clone();
|
|
521
|
+
handle
|
|
522
|
+
.spawn(move || {
|
|
523
|
+
let o = order.clone();
|
|
524
|
+
async move {
|
|
525
|
+
o.lock().await.push(i);
|
|
526
|
+
tokio::time::sleep(Duration::from_millis(5)).await;
|
|
527
|
+
Ok(())
|
|
528
|
+
}
|
|
529
|
+
})
|
|
530
|
+
.expect("spawn failed");
|
|
531
|
+
}
|
|
532
|
+
|
|
533
|
+
let shutdown_result = runtime.shutdown().await;
|
|
534
|
+
assert!(shutdown_result.is_ok(), "shutdown should succeed");
|
|
535
|
+
|
|
536
|
+
let order = execution_order.lock().await;
|
|
537
|
+
|
|
538
|
+
// Observable behavior: all tasks executed
|
|
539
|
+
assert_eq!(order.len(), task_count, "all tasks should execute");
|
|
540
|
+
|
|
541
|
+
// With single concurrency, tasks may not strictly execute in order due to async
|
|
542
|
+
// scheduling, but they should all complete
|
|
543
|
+
}
|
|
544
|
+
|
|
545
|
+
// ============================================================================
|
|
546
|
+
// Test 6: Concurrent Task Execution
|
|
547
|
+
// ============================================================================
|
|
548
|
+
|
|
549
|
+
/// Verifies that concurrent limit is respected during execution.
|
|
550
|
+
/// Observable behavior: peak concurrent tasks <= configured limit.
|
|
551
|
+
#[tokio::test]
|
|
552
|
+
async fn test_concurrent_execution_respects_limit() {
|
|
553
|
+
let config = BackgroundTaskConfig {
|
|
554
|
+
max_queue_size: 100,
|
|
555
|
+
max_concurrent_tasks: 5,
|
|
556
|
+
drain_timeout_secs: 10,
|
|
557
|
+
};
|
|
558
|
+
|
|
559
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
560
|
+
let handle = runtime.handle();
|
|
561
|
+
|
|
562
|
+
let active_count = Arc::new(AtomicU64::new(0));
|
|
563
|
+
let peak_count = Arc::new(AtomicU64::new(0));
|
|
564
|
+
let task_count = 20;
|
|
565
|
+
|
|
566
|
+
for _ in 0..task_count {
|
|
567
|
+
let active = active_count.clone();
|
|
568
|
+
let peak = peak_count.clone();
|
|
569
|
+
|
|
570
|
+
handle
|
|
571
|
+
.spawn(move || {
|
|
572
|
+
let a = active.clone();
|
|
573
|
+
let p = peak.clone();
|
|
574
|
+
|
|
575
|
+
async move {
|
|
576
|
+
// Increment active count
|
|
577
|
+
let current = a.fetch_add(1, Ordering::SeqCst) + 1;
|
|
578
|
+
|
|
579
|
+
// Update peak
|
|
580
|
+
let mut peak_val = p.load(Ordering::SeqCst);
|
|
581
|
+
while current > peak_val {
|
|
582
|
+
if p.compare_exchange(peak_val, current, Ordering::SeqCst, Ordering::SeqCst)
|
|
583
|
+
.is_ok()
|
|
584
|
+
{
|
|
585
|
+
break;
|
|
586
|
+
}
|
|
587
|
+
peak_val = p.load(Ordering::SeqCst);
|
|
588
|
+
}
|
|
589
|
+
|
|
590
|
+
// Hold task for a bit to measure peak
|
|
591
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
592
|
+
a.fetch_sub(1, Ordering::SeqCst);
|
|
593
|
+
Ok(())
|
|
594
|
+
}
|
|
595
|
+
})
|
|
596
|
+
.expect("spawn failed");
|
|
597
|
+
}
|
|
598
|
+
|
|
599
|
+
// Wait for tasks to execute and measure peak
|
|
600
|
+
tokio::time::sleep(Duration::from_millis(300)).await;
|
|
601
|
+
|
|
602
|
+
let peak = peak_count.load(Ordering::SeqCst);
|
|
603
|
+
|
|
604
|
+
// Observable behavior: peak concurrent never exceeds limit
|
|
605
|
+
assert!(
|
|
606
|
+
peak <= 5,
|
|
607
|
+
"peak concurrent tasks ({}) should not exceed limit of 5",
|
|
608
|
+
peak
|
|
609
|
+
);
|
|
610
|
+
|
|
611
|
+
runtime.shutdown().await.expect("shutdown should succeed");
|
|
612
|
+
}
|
|
613
|
+
|
|
614
|
+
/// Verifies tasks can run concurrently and interact safely.
|
|
615
|
+
/// Observable behavior: multiple tasks run simultaneously without data races.
|
|
616
|
+
#[tokio::test]
|
|
617
|
+
async fn test_concurrent_tasks_safe_interaction() {
|
|
618
|
+
let config = BackgroundTaskConfig {
|
|
619
|
+
max_queue_size: 100,
|
|
620
|
+
max_concurrent_tasks: 10,
|
|
621
|
+
drain_timeout_secs: 10,
|
|
622
|
+
};
|
|
623
|
+
|
|
624
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
625
|
+
let handle = runtime.handle();
|
|
626
|
+
|
|
627
|
+
let shared_value = Arc::new(AtomicU64::new(0));
|
|
628
|
+
let task_count = 50;
|
|
629
|
+
|
|
630
|
+
for _ in 0..task_count {
|
|
631
|
+
let val = shared_value.clone();
|
|
632
|
+
handle
|
|
633
|
+
.spawn(move || {
|
|
634
|
+
let v = val.clone();
|
|
635
|
+
async move {
|
|
636
|
+
// Safe concurrent increment via atomic
|
|
637
|
+
v.fetch_add(1, Ordering::SeqCst);
|
|
638
|
+
Ok(())
|
|
639
|
+
}
|
|
640
|
+
})
|
|
641
|
+
.expect("spawn failed");
|
|
642
|
+
}
|
|
643
|
+
|
|
644
|
+
tokio::time::sleep(Duration::from_millis(200)).await;
|
|
645
|
+
|
|
646
|
+
// Observable behavior: all concurrent increments succeeded
|
|
647
|
+
assert_eq!(
|
|
648
|
+
shared_value.load(Ordering::SeqCst),
|
|
649
|
+
task_count as u64,
|
|
650
|
+
"concurrent increments should all complete"
|
|
651
|
+
);
|
|
652
|
+
|
|
653
|
+
runtime.shutdown().await.expect("shutdown should succeed");
|
|
654
|
+
}
|
|
655
|
+
|
|
656
|
+
// ============================================================================
|
|
657
|
+
// Test 7: Task Cancellation Propagation
|
|
658
|
+
// ============================================================================
|
|
659
|
+
|
|
660
|
+
/// Verifies that shutdown immediately stops accepting new tasks.
|
|
661
|
+
/// Observable behavior: spawn after shutdown signal returns error.
|
|
662
|
+
#[tokio::test]
|
|
663
|
+
async fn test_spawn_fails_after_shutdown_initiated() {
|
|
664
|
+
let config = BackgroundTaskConfig::default();
|
|
665
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
666
|
+
let handle = runtime.handle();
|
|
667
|
+
|
|
668
|
+
// Clone handle before shutdown
|
|
669
|
+
let handle_clone = handle.clone();
|
|
670
|
+
|
|
671
|
+
// Shutdown the runtime
|
|
672
|
+
runtime.shutdown().await.expect("shutdown should succeed");
|
|
673
|
+
|
|
674
|
+
// Brief delay to ensure shutdown is complete
|
|
675
|
+
tokio::time::sleep(Duration::from_millis(50)).await;
|
|
676
|
+
|
|
677
|
+
// Observable behavior: spawn after shutdown returns error
|
|
678
|
+
let result = handle_clone.spawn(|| async { Ok(()) });
|
|
679
|
+
assert!(result.is_err(), "spawn after shutdown should fail");
|
|
680
|
+
}
|
|
681
|
+
|
|
682
|
+
/// Verifies that incomplete tasks are cancelled when shutdown times out.
|
|
683
|
+
/// Observable behavior: incomplete task never completes after timeout.
|
|
684
|
+
#[tokio::test]
|
|
685
|
+
async fn test_incomplete_task_cancelled_on_timeout() {
|
|
686
|
+
let config = BackgroundTaskConfig {
|
|
687
|
+
max_queue_size: 10,
|
|
688
|
+
max_concurrent_tasks: 1,
|
|
689
|
+
drain_timeout_secs: 1,
|
|
690
|
+
};
|
|
691
|
+
|
|
692
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
693
|
+
let handle = runtime.handle();
|
|
694
|
+
|
|
695
|
+
let task_started = Arc::new(AtomicBool::new(false));
|
|
696
|
+
let task_completed = Arc::new(AtomicBool::new(false));
|
|
697
|
+
let started = task_started.clone();
|
|
698
|
+
let completed = task_completed.clone();
|
|
699
|
+
|
|
700
|
+
handle
|
|
701
|
+
.spawn(move || {
|
|
702
|
+
let s = started.clone();
|
|
703
|
+
let c = completed.clone();
|
|
704
|
+
async move {
|
|
705
|
+
s.store(true, Ordering::SeqCst);
|
|
706
|
+
tokio::time::sleep(Duration::from_secs(10)).await;
|
|
707
|
+
c.store(true, Ordering::SeqCst);
|
|
708
|
+
Ok(())
|
|
709
|
+
}
|
|
710
|
+
})
|
|
711
|
+
.expect("spawn failed");
|
|
712
|
+
|
|
713
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
714
|
+
|
|
715
|
+
// Task started before shutdown
|
|
716
|
+
assert!(task_started.load(Ordering::SeqCst), "task should have started");
|
|
717
|
+
|
|
718
|
+
let shutdown_result = runtime.shutdown().await;
|
|
719
|
+
|
|
720
|
+
// Observable behavior: shutdown timed out
|
|
721
|
+
assert!(shutdown_result.is_err(), "shutdown should timeout with incomplete task");
|
|
722
|
+
|
|
723
|
+
// Observable behavior: task was not allowed to complete
|
|
724
|
+
assert!(
|
|
725
|
+
!task_completed.load(Ordering::SeqCst),
|
|
726
|
+
"incomplete task should not complete after shutdown timeout"
|
|
727
|
+
);
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
/// Verifies task cancellation doesn't affect other tasks.
|
|
731
|
+
/// Observable behavior: other tasks complete normally even if one is cancelled.
|
|
732
|
+
#[tokio::test]
|
|
733
|
+
async fn test_task_cancellation_doesnt_affect_others() {
|
|
734
|
+
let config = BackgroundTaskConfig {
|
|
735
|
+
max_queue_size: 100,
|
|
736
|
+
max_concurrent_tasks: 5,
|
|
737
|
+
drain_timeout_secs: 1, // Short timeout
|
|
738
|
+
};
|
|
739
|
+
|
|
740
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
741
|
+
let handle = runtime.handle();
|
|
742
|
+
|
|
743
|
+
let short_task_completed = Arc::new(AtomicBool::new(false));
|
|
744
|
+
let long_task_started = Arc::new(AtomicBool::new(false));
|
|
745
|
+
|
|
746
|
+
// Spawn a short task that will complete
|
|
747
|
+
{
|
|
748
|
+
let c = short_task_completed.clone();
|
|
749
|
+
handle
|
|
750
|
+
.spawn(move || {
|
|
751
|
+
let completed = c.clone();
|
|
752
|
+
async move {
|
|
753
|
+
tokio::time::sleep(Duration::from_millis(50)).await;
|
|
754
|
+
completed.store(true, Ordering::SeqCst);
|
|
755
|
+
Ok(())
|
|
756
|
+
}
|
|
757
|
+
})
|
|
758
|
+
.expect("spawn failed");
|
|
759
|
+
}
|
|
760
|
+
|
|
761
|
+
// Spawn a long task that will be cancelled
|
|
762
|
+
{
|
|
763
|
+
let s = long_task_started.clone();
|
|
764
|
+
handle
|
|
765
|
+
.spawn(move || {
|
|
766
|
+
let started = s.clone();
|
|
767
|
+
async move {
|
|
768
|
+
started.store(true, Ordering::SeqCst);
|
|
769
|
+
tokio::time::sleep(Duration::from_secs(30)).await;
|
|
770
|
+
Ok(())
|
|
771
|
+
}
|
|
772
|
+
})
|
|
773
|
+
.expect("spawn failed");
|
|
774
|
+
}
|
|
775
|
+
|
|
776
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
777
|
+
|
|
778
|
+
// Shutdown with timeout
|
|
779
|
+
let shutdown_result = runtime.shutdown().await;
|
|
780
|
+
assert!(shutdown_result.is_err(), "shutdown should timeout due to long task");
|
|
781
|
+
|
|
782
|
+
// Observable behavior: short task completed, long task started but not completed
|
|
783
|
+
assert!(
|
|
784
|
+
short_task_completed.load(Ordering::SeqCst),
|
|
785
|
+
"short task should have completed before timeout"
|
|
786
|
+
);
|
|
787
|
+
assert!(
|
|
788
|
+
long_task_started.load(Ordering::SeqCst),
|
|
789
|
+
"long task should have started before timeout"
|
|
790
|
+
);
|
|
791
|
+
}
|
|
792
|
+
|
|
793
|
+
// ============================================================================
|
|
794
|
+
// Additional Coverage: Resource Cleanup & Edge Cases
|
|
795
|
+
// ============================================================================
|
|
796
|
+
|
|
797
|
+
/// Verifies immediate shutdown with no tasks.
|
|
798
|
+
/// Observable behavior: shutdown succeeds quickly with empty queue.
|
|
799
|
+
#[tokio::test]
|
|
800
|
+
async fn test_shutdown_with_no_tasks() {
|
|
801
|
+
let config = BackgroundTaskConfig::default();
|
|
802
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
803
|
+
|
|
804
|
+
let start = Instant::now();
|
|
805
|
+
let result = runtime.shutdown().await;
|
|
806
|
+
let elapsed = start.elapsed();
|
|
807
|
+
|
|
808
|
+
// Observable behavior: shutdown succeeds and completes quickly
|
|
809
|
+
assert!(result.is_ok(), "shutdown should succeed with no tasks");
|
|
810
|
+
assert!(
|
|
811
|
+
elapsed < Duration::from_secs(1),
|
|
812
|
+
"shutdown with no tasks should be fast"
|
|
813
|
+
);
|
|
814
|
+
}
|
|
815
|
+
|
|
816
|
+
/// Verifies task metadata is preserved (metadata doesn't affect execution).
|
|
817
|
+
/// Observable behavior: tasks with metadata execute successfully.
|
|
818
|
+
#[tokio::test]
|
|
819
|
+
async fn test_task_metadata_preserved_execution() {
|
|
820
|
+
let config = BackgroundTaskConfig::default();
|
|
821
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
822
|
+
let handle = runtime.handle();
|
|
823
|
+
|
|
824
|
+
let executed = Arc::new(AtomicBool::new(false));
|
|
825
|
+
let executed_clone = executed.clone();
|
|
826
|
+
|
|
827
|
+
let metadata = BackgroundJobMetadata {
|
|
828
|
+
name: Cow::Owned("test_task".to_string()),
|
|
829
|
+
request_id: Some("req-123".to_string()),
|
|
830
|
+
};
|
|
831
|
+
|
|
832
|
+
let future = async move {
|
|
833
|
+
executed_clone.store(true, Ordering::SeqCst);
|
|
834
|
+
Ok(())
|
|
835
|
+
};
|
|
836
|
+
|
|
837
|
+
handle.spawn_with_metadata(future, metadata).expect("spawn failed");
|
|
838
|
+
|
|
839
|
+
tokio::time::sleep(Duration::from_millis(50)).await;
|
|
840
|
+
|
|
841
|
+
// Observable behavior: task with metadata executed
|
|
842
|
+
assert!(executed.load(Ordering::SeqCst), "task with metadata should execute");
|
|
843
|
+
|
|
844
|
+
runtime.shutdown().await.expect("shutdown should succeed");
|
|
845
|
+
}
|
|
846
|
+
|
|
847
|
+
/// Verifies that multiple handles to the same runtime work correctly.
|
|
848
|
+
/// Observable behavior: multiple handle clones spawn tasks independently.
|
|
849
|
+
#[tokio::test]
|
|
850
|
+
async fn test_multiple_handle_clones_spawn_independently() {
|
|
851
|
+
let config = BackgroundTaskConfig::default();
|
|
852
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
853
|
+
let handle1 = runtime.handle();
|
|
854
|
+
let handle2 = runtime.handle();
|
|
855
|
+
|
|
856
|
+
let count = Arc::new(AtomicU64::new(0));
|
|
857
|
+
|
|
858
|
+
// Spawn via first handle
|
|
859
|
+
{
|
|
860
|
+
let c = count.clone();
|
|
861
|
+
handle1
|
|
862
|
+
.spawn(move || {
|
|
863
|
+
let counter = c.clone();
|
|
864
|
+
async move {
|
|
865
|
+
counter.fetch_add(1, Ordering::SeqCst);
|
|
866
|
+
Ok(())
|
|
867
|
+
}
|
|
868
|
+
})
|
|
869
|
+
.expect("spawn failed");
|
|
870
|
+
}
|
|
871
|
+
|
|
872
|
+
// Spawn via second handle
|
|
873
|
+
{
|
|
874
|
+
let c = count.clone();
|
|
875
|
+
handle2
|
|
876
|
+
.spawn(move || {
|
|
877
|
+
let counter = c.clone();
|
|
878
|
+
async move {
|
|
879
|
+
counter.fetch_add(1, Ordering::SeqCst);
|
|
880
|
+
Ok(())
|
|
881
|
+
}
|
|
882
|
+
})
|
|
883
|
+
.expect("spawn failed");
|
|
884
|
+
}
|
|
885
|
+
|
|
886
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
887
|
+
|
|
888
|
+
// Observable behavior: tasks from both handles executed
|
|
889
|
+
assert_eq!(
|
|
890
|
+
count.load(Ordering::SeqCst),
|
|
891
|
+
2,
|
|
892
|
+
"tasks from multiple handles should all execute"
|
|
893
|
+
);
|
|
894
|
+
|
|
895
|
+
runtime.shutdown().await.expect("shutdown should succeed");
|
|
896
|
+
}
|
|
897
|
+
|
|
898
|
+
/// Verifies that resource cleanup occurs after shutdown.
|
|
899
|
+
/// Observable behavior: runtime can be dropped safely after shutdown.
|
|
900
|
+
#[tokio::test]
|
|
901
|
+
async fn test_resource_cleanup_after_shutdown() {
|
|
902
|
+
let config = BackgroundTaskConfig::default();
|
|
903
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
904
|
+
let handle = runtime.handle();
|
|
905
|
+
|
|
906
|
+
handle
|
|
907
|
+
.spawn(|| async {
|
|
908
|
+
tokio::time::sleep(Duration::from_millis(10)).await;
|
|
909
|
+
Ok(())
|
|
910
|
+
})
|
|
911
|
+
.expect("spawn failed");
|
|
912
|
+
|
|
913
|
+
let shutdown_result = runtime.shutdown().await;
|
|
914
|
+
assert!(shutdown_result.is_ok(), "shutdown should complete successfully");
|
|
915
|
+
|
|
916
|
+
// Observable behavior: dropping runtime after shutdown is safe (no double-free, panics)
|
|
917
|
+
drop(handle);
|
|
918
|
+
}
|