spikard 0.4.0-arm64-darwin-23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE +1 -0
- data/README.md +659 -0
- data/ext/spikard_rb/Cargo.toml +17 -0
- data/ext/spikard_rb/extconf.rb +10 -0
- data/ext/spikard_rb/src/lib.rs +6 -0
- data/lib/spikard/app.rb +405 -0
- data/lib/spikard/background.rb +27 -0
- data/lib/spikard/config.rb +396 -0
- data/lib/spikard/converters.rb +13 -0
- data/lib/spikard/handler_wrapper.rb +113 -0
- data/lib/spikard/provide.rb +214 -0
- data/lib/spikard/response.rb +173 -0
- data/lib/spikard/schema.rb +243 -0
- data/lib/spikard/sse.rb +111 -0
- data/lib/spikard/streaming_response.rb +44 -0
- data/lib/spikard/testing.rb +221 -0
- data/lib/spikard/upload_file.rb +131 -0
- data/lib/spikard/version.rb +5 -0
- data/lib/spikard/websocket.rb +59 -0
- data/lib/spikard.rb +43 -0
- data/sig/spikard.rbs +366 -0
- data/vendor/bundle/ruby/3.4.0/gems/diff-lcs-1.6.2/mise.toml +5 -0
- data/vendor/bundle/ruby/3.4.0/gems/rake-compiler-dock-1.10.0/build/buildkitd.toml +2 -0
- data/vendor/crates/spikard-bindings-shared/Cargo.toml +63 -0
- data/vendor/crates/spikard-bindings-shared/examples/config_extraction.rs +139 -0
- data/vendor/crates/spikard-bindings-shared/src/config_extractor.rs +561 -0
- data/vendor/crates/spikard-bindings-shared/src/conversion_traits.rs +194 -0
- data/vendor/crates/spikard-bindings-shared/src/di_traits.rs +246 -0
- data/vendor/crates/spikard-bindings-shared/src/error_response.rs +403 -0
- data/vendor/crates/spikard-bindings-shared/src/handler_base.rs +274 -0
- data/vendor/crates/spikard-bindings-shared/src/lib.rs +25 -0
- data/vendor/crates/spikard-bindings-shared/src/lifecycle_base.rs +298 -0
- data/vendor/crates/spikard-bindings-shared/src/lifecycle_executor.rs +637 -0
- data/vendor/crates/spikard-bindings-shared/src/response_builder.rs +309 -0
- data/vendor/crates/spikard-bindings-shared/src/test_client_base.rs +248 -0
- data/vendor/crates/spikard-bindings-shared/src/validation_helpers.rs +355 -0
- data/vendor/crates/spikard-bindings-shared/tests/comprehensive_coverage.rs +502 -0
- data/vendor/crates/spikard-bindings-shared/tests/error_response_edge_cases.rs +389 -0
- data/vendor/crates/spikard-bindings-shared/tests/handler_base_integration.rs +413 -0
- data/vendor/crates/spikard-core/Cargo.toml +40 -0
- data/vendor/crates/spikard-core/src/bindings/mod.rs +3 -0
- data/vendor/crates/spikard-core/src/bindings/response.rs +133 -0
- data/vendor/crates/spikard-core/src/debug.rs +63 -0
- data/vendor/crates/spikard-core/src/di/container.rs +726 -0
- data/vendor/crates/spikard-core/src/di/dependency.rs +273 -0
- data/vendor/crates/spikard-core/src/di/error.rs +118 -0
- data/vendor/crates/spikard-core/src/di/factory.rs +538 -0
- data/vendor/crates/spikard-core/src/di/graph.rs +545 -0
- data/vendor/crates/spikard-core/src/di/mod.rs +192 -0
- data/vendor/crates/spikard-core/src/di/resolved.rs +411 -0
- data/vendor/crates/spikard-core/src/di/value.rs +283 -0
- data/vendor/crates/spikard-core/src/errors.rs +39 -0
- data/vendor/crates/spikard-core/src/http.rs +153 -0
- data/vendor/crates/spikard-core/src/lib.rs +29 -0
- data/vendor/crates/spikard-core/src/lifecycle.rs +422 -0
- data/vendor/crates/spikard-core/src/metadata.rs +397 -0
- data/vendor/crates/spikard-core/src/parameters.rs +723 -0
- data/vendor/crates/spikard-core/src/problem.rs +310 -0
- data/vendor/crates/spikard-core/src/request_data.rs +189 -0
- data/vendor/crates/spikard-core/src/router.rs +249 -0
- data/vendor/crates/spikard-core/src/schema_registry.rs +183 -0
- data/vendor/crates/spikard-core/src/type_hints.rs +304 -0
- data/vendor/crates/spikard-core/src/validation/error_mapper.rs +689 -0
- data/vendor/crates/spikard-core/src/validation/mod.rs +459 -0
- data/vendor/crates/spikard-http/Cargo.toml +58 -0
- data/vendor/crates/spikard-http/examples/sse-notifications.rs +147 -0
- data/vendor/crates/spikard-http/examples/websocket-chat.rs +91 -0
- data/vendor/crates/spikard-http/src/auth.rs +247 -0
- data/vendor/crates/spikard-http/src/background.rs +1562 -0
- data/vendor/crates/spikard-http/src/bindings/mod.rs +3 -0
- data/vendor/crates/spikard-http/src/bindings/response.rs +1 -0
- data/vendor/crates/spikard-http/src/body_metadata.rs +8 -0
- data/vendor/crates/spikard-http/src/cors.rs +490 -0
- data/vendor/crates/spikard-http/src/debug.rs +63 -0
- data/vendor/crates/spikard-http/src/di_handler.rs +1878 -0
- data/vendor/crates/spikard-http/src/handler_response.rs +532 -0
- data/vendor/crates/spikard-http/src/handler_trait.rs +861 -0
- data/vendor/crates/spikard-http/src/handler_trait_tests.rs +284 -0
- data/vendor/crates/spikard-http/src/lib.rs +524 -0
- data/vendor/crates/spikard-http/src/lifecycle/adapter.rs +149 -0
- data/vendor/crates/spikard-http/src/lifecycle.rs +428 -0
- data/vendor/crates/spikard-http/src/middleware/mod.rs +285 -0
- data/vendor/crates/spikard-http/src/middleware/multipart.rs +930 -0
- data/vendor/crates/spikard-http/src/middleware/urlencoded.rs +541 -0
- data/vendor/crates/spikard-http/src/middleware/validation.rs +287 -0
- data/vendor/crates/spikard-http/src/openapi/mod.rs +309 -0
- data/vendor/crates/spikard-http/src/openapi/parameter_extraction.rs +535 -0
- data/vendor/crates/spikard-http/src/openapi/schema_conversion.rs +867 -0
- data/vendor/crates/spikard-http/src/openapi/spec_generation.rs +678 -0
- data/vendor/crates/spikard-http/src/query_parser.rs +369 -0
- data/vendor/crates/spikard-http/src/response.rs +399 -0
- data/vendor/crates/spikard-http/src/server/handler.rs +1557 -0
- data/vendor/crates/spikard-http/src/server/lifecycle_execution.rs +98 -0
- data/vendor/crates/spikard-http/src/server/mod.rs +806 -0
- data/vendor/crates/spikard-http/src/server/request_extraction.rs +630 -0
- data/vendor/crates/spikard-http/src/server/routing_factory.rs +497 -0
- data/vendor/crates/spikard-http/src/sse.rs +961 -0
- data/vendor/crates/spikard-http/src/testing/form.rs +14 -0
- data/vendor/crates/spikard-http/src/testing/multipart.rs +60 -0
- data/vendor/crates/spikard-http/src/testing/test_client.rs +285 -0
- data/vendor/crates/spikard-http/src/testing.rs +377 -0
- data/vendor/crates/spikard-http/src/websocket.rs +831 -0
- data/vendor/crates/spikard-http/tests/background_behavior.rs +918 -0
- data/vendor/crates/spikard-http/tests/common/handlers.rs +308 -0
- data/vendor/crates/spikard-http/tests/common/mod.rs +21 -0
- data/vendor/crates/spikard-http/tests/di_integration.rs +202 -0
- data/vendor/crates/spikard-http/tests/doc_snippets.rs +4 -0
- data/vendor/crates/spikard-http/tests/lifecycle_execution.rs +1135 -0
- data/vendor/crates/spikard-http/tests/multipart_behavior.rs +688 -0
- data/vendor/crates/spikard-http/tests/server_config_builder.rs +324 -0
- data/vendor/crates/spikard-http/tests/sse_behavior.rs +728 -0
- data/vendor/crates/spikard-http/tests/websocket_behavior.rs +724 -0
- data/vendor/crates/spikard-rb/Cargo.toml +43 -0
- data/vendor/crates/spikard-rb/build.rs +199 -0
- data/vendor/crates/spikard-rb/src/background.rs +63 -0
- data/vendor/crates/spikard-rb/src/config/mod.rs +5 -0
- data/vendor/crates/spikard-rb/src/config/server_config.rs +283 -0
- data/vendor/crates/spikard-rb/src/conversion.rs +459 -0
- data/vendor/crates/spikard-rb/src/di/builder.rs +105 -0
- data/vendor/crates/spikard-rb/src/di/mod.rs +413 -0
- data/vendor/crates/spikard-rb/src/handler.rs +612 -0
- data/vendor/crates/spikard-rb/src/integration/mod.rs +3 -0
- data/vendor/crates/spikard-rb/src/lib.rs +1857 -0
- data/vendor/crates/spikard-rb/src/lifecycle.rs +275 -0
- data/vendor/crates/spikard-rb/src/metadata/mod.rs +5 -0
- data/vendor/crates/spikard-rb/src/metadata/route_extraction.rs +427 -0
- data/vendor/crates/spikard-rb/src/runtime/mod.rs +5 -0
- data/vendor/crates/spikard-rb/src/runtime/server_runner.rs +326 -0
- data/vendor/crates/spikard-rb/src/server.rs +283 -0
- data/vendor/crates/spikard-rb/src/sse.rs +231 -0
- data/vendor/crates/spikard-rb/src/testing/client.rs +404 -0
- data/vendor/crates/spikard-rb/src/testing/mod.rs +7 -0
- data/vendor/crates/spikard-rb/src/testing/sse.rs +143 -0
- data/vendor/crates/spikard-rb/src/testing/websocket.rs +221 -0
- data/vendor/crates/spikard-rb/src/websocket.rs +233 -0
- data/vendor/crates/spikard-rb/tests/magnus_ffi_tests.rs +14 -0
- metadata +213 -0
|
@@ -0,0 +1,1562 @@
|
|
|
1
|
+
use std::borrow::Cow;
|
|
2
|
+
use std::sync::Arc;
|
|
3
|
+
use std::time::Duration;
|
|
4
|
+
|
|
5
|
+
use futures::FutureExt;
|
|
6
|
+
use futures::future::BoxFuture;
|
|
7
|
+
use tokio::sync::{Semaphore, mpsc};
|
|
8
|
+
use tokio::task::JoinSet;
|
|
9
|
+
use tokio::time::timeout;
|
|
10
|
+
use tokio_util::sync::CancellationToken;
|
|
11
|
+
|
|
12
|
+
/// Configuration for in-process background task execution.
|
|
13
|
+
#[derive(Clone, Debug)]
|
|
14
|
+
pub struct BackgroundTaskConfig {
|
|
15
|
+
pub max_queue_size: usize,
|
|
16
|
+
pub max_concurrent_tasks: usize,
|
|
17
|
+
pub drain_timeout_secs: u64,
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
impl Default for BackgroundTaskConfig {
|
|
21
|
+
fn default() -> Self {
|
|
22
|
+
Self {
|
|
23
|
+
max_queue_size: 1024,
|
|
24
|
+
max_concurrent_tasks: 128,
|
|
25
|
+
drain_timeout_secs: 30,
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
#[derive(Clone, Debug)]
|
|
31
|
+
pub struct BackgroundJobMetadata {
|
|
32
|
+
pub name: Cow<'static, str>,
|
|
33
|
+
pub request_id: Option<String>,
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
impl Default for BackgroundJobMetadata {
|
|
37
|
+
fn default() -> Self {
|
|
38
|
+
Self {
|
|
39
|
+
name: Cow::Borrowed("background_task"),
|
|
40
|
+
request_id: None,
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
pub type BackgroundJobFuture = BoxFuture<'static, Result<(), BackgroundJobError>>;
|
|
46
|
+
|
|
47
|
+
struct BackgroundJob {
|
|
48
|
+
pub future: BackgroundJobFuture,
|
|
49
|
+
pub metadata: BackgroundJobMetadata,
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
impl BackgroundJob {
|
|
53
|
+
fn new<F>(future: F, metadata: BackgroundJobMetadata) -> Self
|
|
54
|
+
where
|
|
55
|
+
F: futures::Future<Output = Result<(), BackgroundJobError>> + Send + 'static,
|
|
56
|
+
{
|
|
57
|
+
Self {
|
|
58
|
+
future: future.boxed(),
|
|
59
|
+
metadata,
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
#[derive(Debug, Clone)]
|
|
65
|
+
pub struct BackgroundJobError {
|
|
66
|
+
pub message: String,
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
impl From<String> for BackgroundJobError {
|
|
70
|
+
fn from(message: String) -> Self {
|
|
71
|
+
Self { message }
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
impl From<&str> for BackgroundJobError {
|
|
76
|
+
fn from(message: &str) -> Self {
|
|
77
|
+
Self {
|
|
78
|
+
message: message.to_string(),
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
#[derive(Debug, Clone)]
|
|
84
|
+
pub enum BackgroundSpawnError {
|
|
85
|
+
QueueFull,
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
impl std::fmt::Display for BackgroundSpawnError {
|
|
89
|
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
90
|
+
match self {
|
|
91
|
+
BackgroundSpawnError::QueueFull => write!(f, "background task queue is full"),
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
impl std::error::Error for BackgroundSpawnError {}
|
|
97
|
+
|
|
98
|
+
#[derive(Debug)]
|
|
99
|
+
pub struct BackgroundShutdownError;
|
|
100
|
+
|
|
101
|
+
#[derive(Default)]
|
|
102
|
+
struct BackgroundMetrics {
|
|
103
|
+
queued: std::sync::atomic::AtomicU64,
|
|
104
|
+
running: std::sync::atomic::AtomicU64,
|
|
105
|
+
failed: std::sync::atomic::AtomicU64,
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
impl BackgroundMetrics {
|
|
109
|
+
fn inc_queued(&self) {
|
|
110
|
+
self.queued.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
fn dec_queued(&self) {
|
|
114
|
+
self.queued.fetch_sub(1, std::sync::atomic::Ordering::Relaxed);
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
fn inc_running(&self) {
|
|
118
|
+
self.running.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
fn dec_running(&self) {
|
|
122
|
+
self.running.fetch_sub(1, std::sync::atomic::Ordering::Relaxed);
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
fn inc_failed(&self) {
|
|
126
|
+
self.failed.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
#[derive(Clone)]
|
|
131
|
+
pub struct BackgroundHandle {
|
|
132
|
+
sender: mpsc::Sender<BackgroundJob>,
|
|
133
|
+
metrics: Arc<BackgroundMetrics>,
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
impl BackgroundHandle {
|
|
137
|
+
pub fn spawn<F, Fut>(&self, f: F) -> Result<(), BackgroundSpawnError>
|
|
138
|
+
where
|
|
139
|
+
F: FnOnce() -> Fut,
|
|
140
|
+
Fut: futures::Future<Output = Result<(), BackgroundJobError>> + Send + 'static,
|
|
141
|
+
{
|
|
142
|
+
let future = f();
|
|
143
|
+
self.spawn_with_metadata(future, BackgroundJobMetadata::default())
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
pub fn spawn_with_metadata<Fut>(
|
|
147
|
+
&self,
|
|
148
|
+
future: Fut,
|
|
149
|
+
metadata: BackgroundJobMetadata,
|
|
150
|
+
) -> Result<(), BackgroundSpawnError>
|
|
151
|
+
where
|
|
152
|
+
Fut: futures::Future<Output = Result<(), BackgroundJobError>> + Send + 'static,
|
|
153
|
+
{
|
|
154
|
+
self.metrics.inc_queued();
|
|
155
|
+
let job = BackgroundJob::new(future, metadata);
|
|
156
|
+
self.sender.try_send(job).map_err(|_| {
|
|
157
|
+
self.metrics.dec_queued();
|
|
158
|
+
BackgroundSpawnError::QueueFull
|
|
159
|
+
})
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
pub struct BackgroundRuntime {
|
|
164
|
+
handle: BackgroundHandle,
|
|
165
|
+
drain_timeout: Duration,
|
|
166
|
+
shutdown_token: CancellationToken,
|
|
167
|
+
join_handle: tokio::task::JoinHandle<()>,
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
impl BackgroundRuntime {
|
|
171
|
+
pub async fn start(config: BackgroundTaskConfig) -> Self {
|
|
172
|
+
let (tx, rx) = mpsc::channel(config.max_queue_size);
|
|
173
|
+
let metrics = Arc::new(BackgroundMetrics::default());
|
|
174
|
+
let handle = BackgroundHandle {
|
|
175
|
+
sender: tx.clone(),
|
|
176
|
+
metrics: metrics.clone(),
|
|
177
|
+
};
|
|
178
|
+
let shutdown_token = CancellationToken::new();
|
|
179
|
+
let semaphore = Arc::new(Semaphore::new(config.max_concurrent_tasks));
|
|
180
|
+
let driver_token = shutdown_token.clone();
|
|
181
|
+
|
|
182
|
+
let join_handle = tokio::spawn(run_executor(rx, semaphore, metrics.clone(), driver_token));
|
|
183
|
+
|
|
184
|
+
Self {
|
|
185
|
+
handle,
|
|
186
|
+
drain_timeout: Duration::from_secs(config.drain_timeout_secs),
|
|
187
|
+
shutdown_token,
|
|
188
|
+
join_handle,
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
pub fn handle(&self) -> BackgroundHandle {
|
|
193
|
+
self.handle.clone()
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
pub async fn shutdown(self) -> Result<(), BackgroundShutdownError> {
|
|
197
|
+
self.shutdown_token.cancel();
|
|
198
|
+
drop(self.handle);
|
|
199
|
+
match timeout(self.drain_timeout, self.join_handle).await {
|
|
200
|
+
Ok(Ok(_)) => Ok(()),
|
|
201
|
+
_ => Err(BackgroundShutdownError),
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
async fn run_executor(
|
|
207
|
+
mut rx: mpsc::Receiver<BackgroundJob>,
|
|
208
|
+
semaphore: Arc<Semaphore>,
|
|
209
|
+
metrics: Arc<BackgroundMetrics>,
|
|
210
|
+
token: CancellationToken,
|
|
211
|
+
) {
|
|
212
|
+
let mut join_set = JoinSet::new();
|
|
213
|
+
let token_clone = token.clone();
|
|
214
|
+
|
|
215
|
+
// Phase 1: Accept new jobs until shutdown signal
|
|
216
|
+
loop {
|
|
217
|
+
tokio::select! {
|
|
218
|
+
maybe_job = rx.recv() => {
|
|
219
|
+
match maybe_job {
|
|
220
|
+
Some(job) => {
|
|
221
|
+
metrics.dec_queued();
|
|
222
|
+
let semaphore = semaphore.clone();
|
|
223
|
+
let metrics_clone = metrics.clone();
|
|
224
|
+
join_set.spawn(async move {
|
|
225
|
+
let BackgroundJob { future, metadata } = job;
|
|
226
|
+
// Acquire permit - this may block if at max concurrency
|
|
227
|
+
// During shutdown, the drain will wait for all spawned tasks
|
|
228
|
+
match semaphore.acquire_owned().await {
|
|
229
|
+
Ok(_permit) => {
|
|
230
|
+
metrics_clone.inc_running();
|
|
231
|
+
if let Err(err) = future.await {
|
|
232
|
+
metrics_clone.inc_failed();
|
|
233
|
+
tracing::error!(target = "spikard::background", task = %metadata.name, error = %err.message, "background task failed");
|
|
234
|
+
}
|
|
235
|
+
metrics_clone.dec_running();
|
|
236
|
+
}
|
|
237
|
+
Err(_) => {
|
|
238
|
+
// Semaphore acquisition failed - this task will never run
|
|
239
|
+
// We already decremented queued above, so metrics are consistent
|
|
240
|
+
metrics_clone.inc_failed();
|
|
241
|
+
tracing::warn!(target = "spikard::background", "failed to acquire semaphore permit for background task");
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
});
|
|
245
|
+
}
|
|
246
|
+
None => break,
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
_ = token_clone.cancelled() => {
|
|
250
|
+
// Shutdown signal received; exit phase 1 and begin phase 2 (draining)
|
|
251
|
+
break;
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
// Phase 2: Drain remaining queued jobs without the cancel token check.
|
|
257
|
+
// The shutdown() function drops its handle copy, but handle clones may still exist,
|
|
258
|
+
// so we use try_recv in a loop to check for remaining messages without blocking forever.
|
|
259
|
+
let mut drain_attempts = 0;
|
|
260
|
+
loop {
|
|
261
|
+
match rx.try_recv() {
|
|
262
|
+
Ok(job) => {
|
|
263
|
+
metrics.dec_queued();
|
|
264
|
+
let semaphore = semaphore.clone();
|
|
265
|
+
let metrics_clone = metrics.clone();
|
|
266
|
+
join_set.spawn(async move {
|
|
267
|
+
let BackgroundJob { future, metadata } = job;
|
|
268
|
+
match semaphore.acquire_owned().await {
|
|
269
|
+
Ok(_permit) => {
|
|
270
|
+
metrics_clone.inc_running();
|
|
271
|
+
if let Err(err) = future.await {
|
|
272
|
+
metrics_clone.inc_failed();
|
|
273
|
+
tracing::error!(target = "spikard::background", task = %metadata.name, error = %err.message, "background task failed");
|
|
274
|
+
}
|
|
275
|
+
metrics_clone.dec_running();
|
|
276
|
+
}
|
|
277
|
+
Err(_) => {
|
|
278
|
+
metrics_clone.inc_failed();
|
|
279
|
+
tracing::warn!(target = "spikard::background", "failed to acquire semaphore permit for background task");
|
|
280
|
+
}
|
|
281
|
+
}
|
|
282
|
+
});
|
|
283
|
+
drain_attempts = 0;
|
|
284
|
+
}
|
|
285
|
+
Err(mpsc::error::TryRecvError::Empty) => {
|
|
286
|
+
// Queue is empty but sender might still be held by clones in user code.
|
|
287
|
+
// Wait a bit and retry, but give up after ~1 second of empty checks.
|
|
288
|
+
drain_attempts += 1;
|
|
289
|
+
if drain_attempts > 100 {
|
|
290
|
+
break;
|
|
291
|
+
}
|
|
292
|
+
tokio::time::sleep(Duration::from_millis(10)).await;
|
|
293
|
+
}
|
|
294
|
+
Err(mpsc::error::TryRecvError::Disconnected) => {
|
|
295
|
+
// All senders dropped; nothing more to drain
|
|
296
|
+
break;
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
// Wait for all spawned tasks to complete before returning
|
|
302
|
+
while join_set.join_next().await.is_some() {}
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
#[cfg(test)]
|
|
306
|
+
mod tests {
|
|
307
|
+
use super::*;
|
|
308
|
+
use std::sync::atomic::{AtomicU64, Ordering};
|
|
309
|
+
|
|
310
|
+
#[tokio::test]
|
|
311
|
+
async fn test_basic_spawn_and_execution() {
|
|
312
|
+
let runtime = BackgroundRuntime::start(BackgroundTaskConfig::default()).await;
|
|
313
|
+
let handle = runtime.handle();
|
|
314
|
+
|
|
315
|
+
let counter = Arc::new(AtomicU64::new(0));
|
|
316
|
+
let counter_clone = counter.clone();
|
|
317
|
+
|
|
318
|
+
handle
|
|
319
|
+
.spawn(move || {
|
|
320
|
+
let c = counter_clone.clone();
|
|
321
|
+
async move {
|
|
322
|
+
c.fetch_add(1, Ordering::SeqCst);
|
|
323
|
+
Ok(())
|
|
324
|
+
}
|
|
325
|
+
})
|
|
326
|
+
.expect("spawn failed");
|
|
327
|
+
|
|
328
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
329
|
+
assert_eq!(counter.load(Ordering::SeqCst), 1);
|
|
330
|
+
|
|
331
|
+
runtime.shutdown().await.expect("shutdown failed");
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
#[tokio::test]
|
|
335
|
+
async fn test_multiple_tasks() {
|
|
336
|
+
let runtime = BackgroundRuntime::start(BackgroundTaskConfig::default()).await;
|
|
337
|
+
let handle = runtime.handle();
|
|
338
|
+
|
|
339
|
+
let counter = Arc::new(AtomicU64::new(0));
|
|
340
|
+
|
|
341
|
+
for _ in 0..10 {
|
|
342
|
+
let counter_clone = counter.clone();
|
|
343
|
+
handle
|
|
344
|
+
.spawn(move || {
|
|
345
|
+
let c = counter_clone.clone();
|
|
346
|
+
async move {
|
|
347
|
+
c.fetch_add(1, Ordering::SeqCst);
|
|
348
|
+
Ok(())
|
|
349
|
+
}
|
|
350
|
+
})
|
|
351
|
+
.expect("spawn failed");
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
tokio::time::sleep(Duration::from_millis(200)).await;
|
|
355
|
+
assert_eq!(counter.load(Ordering::SeqCst), 10);
|
|
356
|
+
|
|
357
|
+
runtime.shutdown().await.expect("shutdown failed");
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
#[tokio::test]
|
|
361
|
+
async fn test_task_with_metadata() {
|
|
362
|
+
let runtime = BackgroundRuntime::start(BackgroundTaskConfig::default()).await;
|
|
363
|
+
let handle = runtime.handle();
|
|
364
|
+
|
|
365
|
+
let metadata = BackgroundJobMetadata {
|
|
366
|
+
name: Cow::Owned("test_task".to_string()),
|
|
367
|
+
request_id: Some("req-123".to_string()),
|
|
368
|
+
};
|
|
369
|
+
|
|
370
|
+
let counter = Arc::new(AtomicU64::new(0));
|
|
371
|
+
let counter_clone = counter.clone();
|
|
372
|
+
|
|
373
|
+
let future = async move {
|
|
374
|
+
counter_clone.fetch_add(1, Ordering::SeqCst);
|
|
375
|
+
Ok(())
|
|
376
|
+
};
|
|
377
|
+
|
|
378
|
+
handle.spawn_with_metadata(future, metadata).expect("spawn failed");
|
|
379
|
+
|
|
380
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
381
|
+
assert_eq!(counter.load(Ordering::SeqCst), 1);
|
|
382
|
+
|
|
383
|
+
runtime.shutdown().await.expect("shutdown failed");
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
#[tokio::test]
|
|
387
|
+
async fn test_queue_full_error() {
|
|
388
|
+
let config = BackgroundTaskConfig {
|
|
389
|
+
max_queue_size: 2,
|
|
390
|
+
max_concurrent_tasks: 10,
|
|
391
|
+
drain_timeout_secs: 5,
|
|
392
|
+
};
|
|
393
|
+
|
|
394
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
395
|
+
let handle = runtime.handle();
|
|
396
|
+
|
|
397
|
+
// Spawn slow tasks to fill the queue and keep them running
|
|
398
|
+
let blocking_barrier = Arc::new(tokio::sync::Barrier::new(3));
|
|
399
|
+
|
|
400
|
+
for _ in 0..2 {
|
|
401
|
+
let barrier = blocking_barrier.clone();
|
|
402
|
+
handle
|
|
403
|
+
.spawn(move || {
|
|
404
|
+
let b = barrier.clone();
|
|
405
|
+
async move {
|
|
406
|
+
b.wait().await;
|
|
407
|
+
tokio::time::sleep(Duration::from_secs(1)).await;
|
|
408
|
+
Ok(())
|
|
409
|
+
}
|
|
410
|
+
})
|
|
411
|
+
.expect("spawn failed");
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
// Now queue is full, next spawn should fail
|
|
415
|
+
let result = handle.spawn(move || async { Ok(()) });
|
|
416
|
+
assert!(matches!(result, Err(BackgroundSpawnError::QueueFull)));
|
|
417
|
+
|
|
418
|
+
blocking_barrier.wait().await;
|
|
419
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
420
|
+
|
|
421
|
+
runtime.shutdown().await.expect("shutdown failed");
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
#[tokio::test]
|
|
425
|
+
async fn test_task_failure_handling() {
|
|
426
|
+
let runtime = BackgroundRuntime::start(BackgroundTaskConfig::default()).await;
|
|
427
|
+
let handle = runtime.handle();
|
|
428
|
+
|
|
429
|
+
let success_count = Arc::new(AtomicU64::new(0));
|
|
430
|
+
let success_count_clone = success_count.clone();
|
|
431
|
+
|
|
432
|
+
// Spawn a failing task
|
|
433
|
+
handle
|
|
434
|
+
.spawn(move || {
|
|
435
|
+
let s = success_count_clone.clone();
|
|
436
|
+
async move {
|
|
437
|
+
s.fetch_add(1, Ordering::SeqCst);
|
|
438
|
+
Err(BackgroundJobError::from("test error"))
|
|
439
|
+
}
|
|
440
|
+
})
|
|
441
|
+
.expect("spawn failed");
|
|
442
|
+
|
|
443
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
444
|
+
// Success count still increments (it ran), but failure metrics increment too
|
|
445
|
+
assert_eq!(success_count.load(Ordering::SeqCst), 1);
|
|
446
|
+
|
|
447
|
+
runtime.shutdown().await.expect("shutdown failed");
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
#[tokio::test(flavor = "multi_thread")]
|
|
451
|
+
async fn test_concurrency_limit_with_proper_synchronization() {
|
|
452
|
+
let config = BackgroundTaskConfig {
|
|
453
|
+
max_queue_size: 100,
|
|
454
|
+
max_concurrent_tasks: 2,
|
|
455
|
+
drain_timeout_secs: 30,
|
|
456
|
+
};
|
|
457
|
+
|
|
458
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
459
|
+
let handle = runtime.handle();
|
|
460
|
+
|
|
461
|
+
let running_count = Arc::new(AtomicU64::new(0));
|
|
462
|
+
let max_concurrent = Arc::new(AtomicU64::new(0));
|
|
463
|
+
|
|
464
|
+
for _ in 0..5 {
|
|
465
|
+
let running = running_count.clone();
|
|
466
|
+
let max = max_concurrent.clone();
|
|
467
|
+
|
|
468
|
+
handle
|
|
469
|
+
.spawn(move || {
|
|
470
|
+
let r = running.clone();
|
|
471
|
+
let m = max.clone();
|
|
472
|
+
async move {
|
|
473
|
+
r.fetch_add(1, Ordering::SeqCst);
|
|
474
|
+
let current_running = r.load(Ordering::SeqCst);
|
|
475
|
+
let mut current_max = m.load(Ordering::SeqCst);
|
|
476
|
+
while current_running > current_max {
|
|
477
|
+
m.store(current_running, Ordering::SeqCst);
|
|
478
|
+
current_max = current_running;
|
|
479
|
+
}
|
|
480
|
+
|
|
481
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
482
|
+
r.fetch_sub(1, Ordering::SeqCst);
|
|
483
|
+
Ok(())
|
|
484
|
+
}
|
|
485
|
+
})
|
|
486
|
+
.expect("spawn failed");
|
|
487
|
+
}
|
|
488
|
+
|
|
489
|
+
// Wait for all tasks to start and monitor max concurrent
|
|
490
|
+
tokio::time::sleep(Duration::from_millis(700)).await;
|
|
491
|
+
let max_concurrent_observed = max_concurrent.load(Ordering::SeqCst);
|
|
492
|
+
assert!(
|
|
493
|
+
max_concurrent_observed <= 2,
|
|
494
|
+
"Max concurrent should be <= 2, but was {}",
|
|
495
|
+
max_concurrent_observed
|
|
496
|
+
);
|
|
497
|
+
|
|
498
|
+
runtime.shutdown().await.expect("shutdown failed");
|
|
499
|
+
}
|
|
500
|
+
|
|
501
|
+
#[tokio::test]
|
|
502
|
+
async fn test_graceful_shutdown() {
|
|
503
|
+
let config = BackgroundTaskConfig {
|
|
504
|
+
max_queue_size: 10,
|
|
505
|
+
max_concurrent_tasks: 2,
|
|
506
|
+
drain_timeout_secs: 5,
|
|
507
|
+
};
|
|
508
|
+
|
|
509
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
510
|
+
let handle = runtime.handle();
|
|
511
|
+
|
|
512
|
+
let counter = Arc::new(AtomicU64::new(0));
|
|
513
|
+
let counter_clone = counter.clone();
|
|
514
|
+
|
|
515
|
+
handle
|
|
516
|
+
.spawn(move || {
|
|
517
|
+
let c = counter_clone.clone();
|
|
518
|
+
async move {
|
|
519
|
+
tokio::time::sleep(Duration::from_millis(50)).await;
|
|
520
|
+
c.fetch_add(1, Ordering::SeqCst);
|
|
521
|
+
Ok(())
|
|
522
|
+
}
|
|
523
|
+
})
|
|
524
|
+
.expect("spawn failed");
|
|
525
|
+
|
|
526
|
+
tokio::time::sleep(Duration::from_millis(200)).await;
|
|
527
|
+
|
|
528
|
+
// Shutdown should have already completed tasks
|
|
529
|
+
let result = runtime.shutdown().await;
|
|
530
|
+
assert!(result.is_ok());
|
|
531
|
+
assert_eq!(counter.load(Ordering::SeqCst), 1);
|
|
532
|
+
}
|
|
533
|
+
|
|
534
|
+
#[tokio::test]
|
|
535
|
+
async fn test_shutdown_timeout() {
|
|
536
|
+
let config = BackgroundTaskConfig {
|
|
537
|
+
max_queue_size: 10,
|
|
538
|
+
max_concurrent_tasks: 2,
|
|
539
|
+
drain_timeout_secs: 1,
|
|
540
|
+
};
|
|
541
|
+
|
|
542
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
543
|
+
let handle = runtime.handle();
|
|
544
|
+
|
|
545
|
+
// Spawn a task that takes longer than the drain timeout
|
|
546
|
+
handle
|
|
547
|
+
.spawn(|| async {
|
|
548
|
+
tokio::time::sleep(Duration::from_secs(5)).await;
|
|
549
|
+
Ok(())
|
|
550
|
+
})
|
|
551
|
+
.expect("spawn failed");
|
|
552
|
+
|
|
553
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
554
|
+
|
|
555
|
+
let result = runtime.shutdown().await;
|
|
556
|
+
// Should timeout
|
|
557
|
+
assert!(result.is_err());
|
|
558
|
+
}
|
|
559
|
+
|
|
560
|
+
#[tokio::test]
|
|
561
|
+
async fn test_metrics_tracking() {
|
|
562
|
+
let config = BackgroundTaskConfig::default();
|
|
563
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
564
|
+
let handle = runtime.handle();
|
|
565
|
+
|
|
566
|
+
let barrier = Arc::new(tokio::sync::Barrier::new(2));
|
|
567
|
+
|
|
568
|
+
for _ in 0..2 {
|
|
569
|
+
let b = barrier.clone();
|
|
570
|
+
let _ = handle.spawn(move || {
|
|
571
|
+
let barrier = b.clone();
|
|
572
|
+
async move {
|
|
573
|
+
barrier.wait().await;
|
|
574
|
+
Ok(())
|
|
575
|
+
}
|
|
576
|
+
});
|
|
577
|
+
}
|
|
578
|
+
|
|
579
|
+
tokio::time::sleep(Duration::from_millis(150)).await;
|
|
580
|
+
|
|
581
|
+
runtime.shutdown().await.expect("shutdown failed");
|
|
582
|
+
}
|
|
583
|
+
|
|
584
|
+
#[tokio::test]
|
|
585
|
+
async fn test_task_cancellation_on_shutdown() {
|
|
586
|
+
let config = BackgroundTaskConfig {
|
|
587
|
+
max_queue_size: 10,
|
|
588
|
+
max_concurrent_tasks: 2,
|
|
589
|
+
drain_timeout_secs: 1,
|
|
590
|
+
};
|
|
591
|
+
|
|
592
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
593
|
+
let handle = runtime.handle();
|
|
594
|
+
|
|
595
|
+
let started_count = Arc::new(AtomicU64::new(0));
|
|
596
|
+
let _completed_count = Arc::new(AtomicU64::new(0));
|
|
597
|
+
|
|
598
|
+
// Spawn a task that won't complete
|
|
599
|
+
let started = started_count.clone();
|
|
600
|
+
|
|
601
|
+
handle
|
|
602
|
+
.spawn(move || {
|
|
603
|
+
let s = started.clone();
|
|
604
|
+
async move {
|
|
605
|
+
s.fetch_add(1, Ordering::SeqCst);
|
|
606
|
+
// Simulate a long-running operation
|
|
607
|
+
tokio::time::sleep(Duration::from_secs(10)).await;
|
|
608
|
+
Ok(())
|
|
609
|
+
}
|
|
610
|
+
})
|
|
611
|
+
.expect("spawn failed");
|
|
612
|
+
|
|
613
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
614
|
+
assert_eq!(started_count.load(Ordering::SeqCst), 1);
|
|
615
|
+
|
|
616
|
+
// Shutdown with short timeout - should not wait for the full 10 seconds
|
|
617
|
+
let shutdown_start = std::time::Instant::now();
|
|
618
|
+
let result = runtime.shutdown().await;
|
|
619
|
+
let shutdown_elapsed = shutdown_start.elapsed();
|
|
620
|
+
|
|
621
|
+
// Should return error due to timeout
|
|
622
|
+
assert!(result.is_err());
|
|
623
|
+
// Should be close to the 1 second drain_timeout
|
|
624
|
+
assert!(shutdown_elapsed < Duration::from_secs(3));
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
#[tokio::test]
|
|
628
|
+
async fn test_queue_overflow_multiple_spawns() {
|
|
629
|
+
let config = BackgroundTaskConfig {
|
|
630
|
+
max_queue_size: 3,
|
|
631
|
+
max_concurrent_tasks: 10,
|
|
632
|
+
drain_timeout_secs: 5,
|
|
633
|
+
};
|
|
634
|
+
|
|
635
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
636
|
+
let handle = runtime.handle();
|
|
637
|
+
|
|
638
|
+
let blocking_barrier = Arc::new(tokio::sync::Barrier::new(4));
|
|
639
|
+
|
|
640
|
+
// Fill the queue with 3 blocking tasks
|
|
641
|
+
for _ in 0..3 {
|
|
642
|
+
let b = blocking_barrier.clone();
|
|
643
|
+
handle
|
|
644
|
+
.spawn(move || {
|
|
645
|
+
let barrier = b.clone();
|
|
646
|
+
async move {
|
|
647
|
+
barrier.wait().await;
|
|
648
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
649
|
+
Ok(())
|
|
650
|
+
}
|
|
651
|
+
})
|
|
652
|
+
.expect("spawn failed");
|
|
653
|
+
}
|
|
654
|
+
|
|
655
|
+
// 4th attempt should fail with QueueFull
|
|
656
|
+
let result = handle.spawn(|| async { Ok(()) });
|
|
657
|
+
assert!(matches!(result, Err(BackgroundSpawnError::QueueFull)));
|
|
658
|
+
|
|
659
|
+
blocking_barrier.wait().await;
|
|
660
|
+
tokio::time::sleep(Duration::from_millis(200)).await;
|
|
661
|
+
|
|
662
|
+
// After queue drains, we should be able to spawn again
|
|
663
|
+
let result = handle.spawn(|| async { Ok(()) });
|
|
664
|
+
assert!(result.is_ok());
|
|
665
|
+
|
|
666
|
+
runtime.shutdown().await.expect("shutdown failed");
|
|
667
|
+
}
|
|
668
|
+
|
|
669
|
+
#[tokio::test]
|
|
670
|
+
async fn test_concurrent_task_execution_order() {
|
|
671
|
+
let runtime = BackgroundRuntime::start(BackgroundTaskConfig::default()).await;
|
|
672
|
+
let handle = runtime.handle();
|
|
673
|
+
|
|
674
|
+
let execution_order = Arc::new(tokio::sync::Mutex::new(Vec::new()));
|
|
675
|
+
|
|
676
|
+
for i in 0..5 {
|
|
677
|
+
let order = execution_order.clone();
|
|
678
|
+
handle
|
|
679
|
+
.spawn(move || {
|
|
680
|
+
let o = order.clone();
|
|
681
|
+
async move {
|
|
682
|
+
o.lock().await.push(i);
|
|
683
|
+
Ok(())
|
|
684
|
+
}
|
|
685
|
+
})
|
|
686
|
+
.expect("spawn failed");
|
|
687
|
+
}
|
|
688
|
+
|
|
689
|
+
tokio::time::sleep(Duration::from_millis(200)).await;
|
|
690
|
+
|
|
691
|
+
let order = execution_order.lock().await;
|
|
692
|
+
// Verify all tasks executed
|
|
693
|
+
assert_eq!(order.len(), 5);
|
|
694
|
+
// Verify each task ran exactly once
|
|
695
|
+
for i in 0..5 {
|
|
696
|
+
assert!(order.contains(&i));
|
|
697
|
+
}
|
|
698
|
+
|
|
699
|
+
runtime.shutdown().await.expect("shutdown failed");
|
|
700
|
+
}
|
|
701
|
+
|
|
702
|
+
#[tokio::test]
|
|
703
|
+
async fn test_error_from_string_conversion() {
|
|
704
|
+
let error = BackgroundJobError::from("test message");
|
|
705
|
+
assert_eq!(error.message, "test message");
|
|
706
|
+
|
|
707
|
+
let error2 = BackgroundJobError::from("test".to_string());
|
|
708
|
+
assert_eq!(error2.message, "test");
|
|
709
|
+
}
|
|
710
|
+
|
|
711
|
+
#[tokio::test]
|
|
712
|
+
async fn test_background_job_metadata_default() {
|
|
713
|
+
let metadata = BackgroundJobMetadata::default();
|
|
714
|
+
assert_eq!(metadata.name, "background_task");
|
|
715
|
+
assert_eq!(metadata.request_id, None);
|
|
716
|
+
}
|
|
717
|
+
|
|
718
|
+
#[tokio::test]
|
|
719
|
+
async fn test_background_job_metadata_custom() {
|
|
720
|
+
let metadata = BackgroundJobMetadata {
|
|
721
|
+
name: Cow::Borrowed("custom_task"),
|
|
722
|
+
request_id: Some("req-456".to_string()),
|
|
723
|
+
};
|
|
724
|
+
assert_eq!(metadata.name, "custom_task");
|
|
725
|
+
assert_eq!(metadata.request_id, Some("req-456".to_string()));
|
|
726
|
+
}
|
|
727
|
+
|
|
728
|
+
#[tokio::test]
|
|
729
|
+
async fn test_metrics_inc_dec_operations() {
|
|
730
|
+
let metrics = BackgroundMetrics::default();
|
|
731
|
+
|
|
732
|
+
metrics.inc_queued();
|
|
733
|
+
assert_eq!(metrics.queued.load(Ordering::Relaxed), 1);
|
|
734
|
+
|
|
735
|
+
metrics.inc_queued();
|
|
736
|
+
assert_eq!(metrics.queued.load(Ordering::Relaxed), 2);
|
|
737
|
+
|
|
738
|
+
metrics.dec_queued();
|
|
739
|
+
assert_eq!(metrics.queued.load(Ordering::Relaxed), 1);
|
|
740
|
+
|
|
741
|
+
metrics.inc_running();
|
|
742
|
+
assert_eq!(metrics.running.load(Ordering::Relaxed), 1);
|
|
743
|
+
|
|
744
|
+
metrics.dec_running();
|
|
745
|
+
assert_eq!(metrics.running.load(Ordering::Relaxed), 0);
|
|
746
|
+
|
|
747
|
+
metrics.inc_failed();
|
|
748
|
+
assert_eq!(metrics.failed.load(Ordering::Relaxed), 1);
|
|
749
|
+
|
|
750
|
+
metrics.inc_failed();
|
|
751
|
+
assert_eq!(metrics.failed.load(Ordering::Relaxed), 2);
|
|
752
|
+
}
|
|
753
|
+
|
|
754
|
+
#[tokio::test]
|
|
755
|
+
async fn test_spawn_error_display() {
|
|
756
|
+
let error = BackgroundSpawnError::QueueFull;
|
|
757
|
+
assert_eq!(error.to_string(), "background task queue is full");
|
|
758
|
+
}
|
|
759
|
+
|
|
760
|
+
#[tokio::test]
|
|
761
|
+
async fn test_background_config_default() {
|
|
762
|
+
let config = BackgroundTaskConfig::default();
|
|
763
|
+
assert_eq!(config.max_queue_size, 1024);
|
|
764
|
+
assert_eq!(config.max_concurrent_tasks, 128);
|
|
765
|
+
assert_eq!(config.drain_timeout_secs, 30);
|
|
766
|
+
}
|
|
767
|
+
|
|
768
|
+
// ========== SHUTDOWN EDGE CASES AND CONCURRENCY TESTS ==========
|
|
769
|
+
|
|
770
|
+
#[tokio::test]
|
|
771
|
+
async fn test_shutdown_with_zero_pending_tasks() {
|
|
772
|
+
let runtime = BackgroundRuntime::start(BackgroundTaskConfig::default()).await;
|
|
773
|
+
|
|
774
|
+
// Shutdown immediately without spawning anything
|
|
775
|
+
let result = runtime.shutdown().await;
|
|
776
|
+
assert!(result.is_ok(), "shutdown should succeed with no tasks");
|
|
777
|
+
}
|
|
778
|
+
|
|
779
|
+
#[tokio::test]
|
|
780
|
+
async fn test_shutdown_with_only_running_tasks() {
|
|
781
|
+
let config = BackgroundTaskConfig {
|
|
782
|
+
max_queue_size: 10,
|
|
783
|
+
max_concurrent_tasks: 2,
|
|
784
|
+
drain_timeout_secs: 5,
|
|
785
|
+
};
|
|
786
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
787
|
+
let handle = runtime.handle();
|
|
788
|
+
|
|
789
|
+
let execution_started: Arc<std::sync::atomic::AtomicBool> = Arc::new(std::sync::atomic::AtomicBool::new(false));
|
|
790
|
+
let execution_completed: Arc<std::sync::atomic::AtomicBool> =
|
|
791
|
+
Arc::new(std::sync::atomic::AtomicBool::new(false));
|
|
792
|
+
|
|
793
|
+
let started = execution_started.clone();
|
|
794
|
+
let completed = execution_completed.clone();
|
|
795
|
+
|
|
796
|
+
handle
|
|
797
|
+
.spawn(move || {
|
|
798
|
+
let s = started.clone();
|
|
799
|
+
let c = completed.clone();
|
|
800
|
+
async move {
|
|
801
|
+
s.store(true, std::sync::atomic::Ordering::SeqCst);
|
|
802
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
803
|
+
c.store(true, std::sync::atomic::Ordering::SeqCst);
|
|
804
|
+
Ok(())
|
|
805
|
+
}
|
|
806
|
+
})
|
|
807
|
+
.unwrap();
|
|
808
|
+
|
|
809
|
+
// Wait for task to start
|
|
810
|
+
tokio::time::sleep(Duration::from_millis(20)).await;
|
|
811
|
+
|
|
812
|
+
// Shutdown should wait for running task
|
|
813
|
+
let result = runtime.shutdown().await;
|
|
814
|
+
assert!(result.is_ok(), "shutdown should succeed and wait for running tasks");
|
|
815
|
+
assert!(
|
|
816
|
+
execution_completed.load(std::sync::atomic::Ordering::SeqCst),
|
|
817
|
+
"task should have completed"
|
|
818
|
+
);
|
|
819
|
+
}
|
|
820
|
+
|
|
821
|
+
// TODO: FAILING TEST - Architectural Issue
|
|
822
|
+
// This test and 27 others fail due to shutdown/semaphore deadlock issue
|
|
823
|
+
// documented at line 217. Tests correctly identify the bug - graceful drain
|
|
824
|
+
// doesn't work with semaphore-limited concurrency. Will fix in separate cycle.
|
|
825
|
+
#[tokio::test]
|
|
826
|
+
async fn test_shutdown_drains_queued_tasks() {
|
|
827
|
+
let config = BackgroundTaskConfig {
|
|
828
|
+
max_queue_size: 100,
|
|
829
|
+
max_concurrent_tasks: 1,
|
|
830
|
+
drain_timeout_secs: 5,
|
|
831
|
+
};
|
|
832
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
833
|
+
let handle = runtime.handle();
|
|
834
|
+
|
|
835
|
+
let execution_count: Arc<AtomicU64> = Arc::new(AtomicU64::new(0));
|
|
836
|
+
|
|
837
|
+
// Spawn multiple tasks that queue up due to semaphore limit
|
|
838
|
+
for _ in 0..10 {
|
|
839
|
+
let count = execution_count.clone();
|
|
840
|
+
handle
|
|
841
|
+
.spawn(move || {
|
|
842
|
+
let c = count.clone();
|
|
843
|
+
async move {
|
|
844
|
+
c.fetch_add(1, Ordering::SeqCst);
|
|
845
|
+
tokio::time::sleep(Duration::from_millis(10)).await;
|
|
846
|
+
Ok(())
|
|
847
|
+
}
|
|
848
|
+
})
|
|
849
|
+
.unwrap();
|
|
850
|
+
}
|
|
851
|
+
|
|
852
|
+
// Shutdown should drain all queued tasks
|
|
853
|
+
let result = runtime.shutdown().await;
|
|
854
|
+
assert!(result.is_ok());
|
|
855
|
+
assert_eq!(
|
|
856
|
+
execution_count.load(Ordering::SeqCst),
|
|
857
|
+
10,
|
|
858
|
+
"all queued tasks should execute"
|
|
859
|
+
);
|
|
860
|
+
}
|
|
861
|
+
|
|
862
|
+
#[tokio::test]
|
|
863
|
+
async fn test_shutdown_timeout_force_stops_long_tasks() {
|
|
864
|
+
let config = BackgroundTaskConfig {
|
|
865
|
+
max_queue_size: 10,
|
|
866
|
+
max_concurrent_tasks: 2,
|
|
867
|
+
drain_timeout_secs: 1,
|
|
868
|
+
};
|
|
869
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
870
|
+
let handle = runtime.handle();
|
|
871
|
+
|
|
872
|
+
let completed: Arc<std::sync::atomic::AtomicBool> = Arc::new(std::sync::atomic::AtomicBool::new(false));
|
|
873
|
+
let completed_clone = completed.clone();
|
|
874
|
+
|
|
875
|
+
handle
|
|
876
|
+
.spawn(move || {
|
|
877
|
+
let c = completed_clone.clone();
|
|
878
|
+
async move {
|
|
879
|
+
tokio::time::sleep(Duration::from_secs(10)).await;
|
|
880
|
+
c.store(true, std::sync::atomic::Ordering::SeqCst);
|
|
881
|
+
Ok(())
|
|
882
|
+
}
|
|
883
|
+
})
|
|
884
|
+
.unwrap();
|
|
885
|
+
|
|
886
|
+
tokio::time::sleep(Duration::from_millis(50)).await;
|
|
887
|
+
|
|
888
|
+
let shutdown_start = std::time::Instant::now();
|
|
889
|
+
let result = runtime.shutdown().await;
|
|
890
|
+
let elapsed = shutdown_start.elapsed();
|
|
891
|
+
|
|
892
|
+
// Should timeout, not wait full 10 seconds
|
|
893
|
+
assert!(result.is_err(), "shutdown should timeout");
|
|
894
|
+
assert!(
|
|
895
|
+
elapsed < Duration::from_secs(3),
|
|
896
|
+
"shutdown should timeout near drain_timeout"
|
|
897
|
+
);
|
|
898
|
+
assert!(
|
|
899
|
+
!completed.load(std::sync::atomic::Ordering::SeqCst),
|
|
900
|
+
"long-running task should not complete"
|
|
901
|
+
);
|
|
902
|
+
}
|
|
903
|
+
|
|
904
|
+
#[tokio::test]
|
|
905
|
+
async fn test_multiple_shutdown_calls_idempotent() {
|
|
906
|
+
let runtime = BackgroundRuntime::start(BackgroundTaskConfig::default()).await;
|
|
907
|
+
|
|
908
|
+
// First shutdown succeeds
|
|
909
|
+
let result1 = runtime.shutdown().await;
|
|
910
|
+
assert!(result1.is_ok(), "first shutdown should succeed");
|
|
911
|
+
|
|
912
|
+
// Runtime is consumed, so we can't call shutdown again
|
|
913
|
+
// This test validates that shutdown takes ownership (consumes self)
|
|
914
|
+
}
|
|
915
|
+
|
|
916
|
+
#[tokio::test]
|
|
917
|
+
async fn test_spawn_after_all_senders_dropped_fails() {
|
|
918
|
+
let config = BackgroundTaskConfig::default();
|
|
919
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
920
|
+
let handle = runtime.handle();
|
|
921
|
+
|
|
922
|
+
// Shutdown consumes the runtime and drops its handle copy.
|
|
923
|
+
// After shutdown, existing handle clones will still be able to send
|
|
924
|
+
// to a closed channel, which results in TrySendError::Closed.
|
|
925
|
+
runtime.shutdown().await.expect("shutdown should succeed");
|
|
926
|
+
|
|
927
|
+
// Give executor time to fully shut down
|
|
928
|
+
tokio::time::sleep(Duration::from_millis(50)).await;
|
|
929
|
+
|
|
930
|
+
// Now the sender is fully closed. Spawn should fail with a send error.
|
|
931
|
+
// However, the actual behavior depends on whether any other handle clones exist.
|
|
932
|
+
// In this test, we're the only holder, so the sender is closed.
|
|
933
|
+
// The behavior is a closed channel error, but our API wraps it as QueueFull.
|
|
934
|
+
let result = handle.spawn(|| async { Ok(()) });
|
|
935
|
+
// The result should be an error since the channel is closed
|
|
936
|
+
assert!(result.is_err(), "spawn should fail after all senders are dropped");
|
|
937
|
+
}
|
|
938
|
+
|
|
939
|
+
#[tokio::test]
|
|
940
|
+
async fn test_concurrent_spawns_hit_semaphore_limit() {
|
|
941
|
+
let config = BackgroundTaskConfig {
|
|
942
|
+
max_queue_size: 100,
|
|
943
|
+
max_concurrent_tasks: 3,
|
|
944
|
+
drain_timeout_secs: 10,
|
|
945
|
+
};
|
|
946
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
947
|
+
let handle = runtime.handle();
|
|
948
|
+
|
|
949
|
+
let barrier: Arc<tokio::sync::Barrier> = Arc::new(tokio::sync::Barrier::new(3));
|
|
950
|
+
let running_count: Arc<AtomicU64> = Arc::new(AtomicU64::new(0));
|
|
951
|
+
let peak_concurrent: Arc<AtomicU64> = Arc::new(AtomicU64::new(0));
|
|
952
|
+
|
|
953
|
+
for _ in 0..5 {
|
|
954
|
+
let b = barrier.clone();
|
|
955
|
+
let running = running_count.clone();
|
|
956
|
+
let peak = peak_concurrent.clone();
|
|
957
|
+
|
|
958
|
+
handle
|
|
959
|
+
.spawn(move || {
|
|
960
|
+
let barrier = b.clone();
|
|
961
|
+
let r = running.clone();
|
|
962
|
+
let p = peak.clone();
|
|
963
|
+
async move {
|
|
964
|
+
let current = r.fetch_add(1, Ordering::SeqCst) + 1;
|
|
965
|
+
// Update peak
|
|
966
|
+
let mut peak_val = p.load(Ordering::SeqCst);
|
|
967
|
+
while current > peak_val {
|
|
968
|
+
if p.compare_exchange(peak_val, current, Ordering::SeqCst, Ordering::SeqCst)
|
|
969
|
+
.is_ok()
|
|
970
|
+
{
|
|
971
|
+
break;
|
|
972
|
+
}
|
|
973
|
+
peak_val = p.load(Ordering::SeqCst);
|
|
974
|
+
}
|
|
975
|
+
|
|
976
|
+
barrier.wait().await;
|
|
977
|
+
tokio::time::sleep(Duration::from_millis(200)).await;
|
|
978
|
+
r.fetch_sub(1, Ordering::SeqCst);
|
|
979
|
+
Ok(())
|
|
980
|
+
}
|
|
981
|
+
})
|
|
982
|
+
.unwrap();
|
|
983
|
+
}
|
|
984
|
+
|
|
985
|
+
barrier.wait().await;
|
|
986
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
987
|
+
|
|
988
|
+
let peak = peak_concurrent.load(Ordering::SeqCst);
|
|
989
|
+
assert!(
|
|
990
|
+
peak <= 3,
|
|
991
|
+
"concurrent execution should not exceed semaphore limit of 3, got {}",
|
|
992
|
+
peak
|
|
993
|
+
);
|
|
994
|
+
|
|
995
|
+
runtime.shutdown().await.unwrap();
|
|
996
|
+
}
|
|
997
|
+
|
|
998
|
+
#[tokio::test]
|
|
999
|
+
async fn test_task_panic_cleanup_still_occurs() {
|
|
1000
|
+
let runtime = BackgroundRuntime::start(BackgroundTaskConfig::default()).await;
|
|
1001
|
+
let handle = runtime.handle();
|
|
1002
|
+
|
|
1003
|
+
let mut spawned_count: u32 = 0;
|
|
1004
|
+
let panic_task_executed: Arc<std::sync::atomic::AtomicBool> =
|
|
1005
|
+
Arc::new(std::sync::atomic::AtomicBool::new(false));
|
|
1006
|
+
let after_panic_executed: Arc<std::sync::atomic::AtomicBool> =
|
|
1007
|
+
Arc::new(std::sync::atomic::AtomicBool::new(false));
|
|
1008
|
+
|
|
1009
|
+
let panic_flag = panic_task_executed.clone();
|
|
1010
|
+
handle
|
|
1011
|
+
.spawn(move || {
|
|
1012
|
+
let p = panic_flag.clone();
|
|
1013
|
+
async move {
|
|
1014
|
+
p.store(true, std::sync::atomic::Ordering::SeqCst);
|
|
1015
|
+
Err(BackgroundJobError::from("simulated task failure"))
|
|
1016
|
+
}
|
|
1017
|
+
})
|
|
1018
|
+
.unwrap();
|
|
1019
|
+
spawned_count += 1;
|
|
1020
|
+
|
|
1021
|
+
// Spawn another task after the failing one to verify executor continues
|
|
1022
|
+
let after_flag = after_panic_executed.clone();
|
|
1023
|
+
handle
|
|
1024
|
+
.spawn(move || {
|
|
1025
|
+
let a = after_flag.clone();
|
|
1026
|
+
async move {
|
|
1027
|
+
tokio::time::sleep(Duration::from_millis(50)).await;
|
|
1028
|
+
a.store(true, std::sync::atomic::Ordering::SeqCst);
|
|
1029
|
+
Ok(())
|
|
1030
|
+
}
|
|
1031
|
+
})
|
|
1032
|
+
.unwrap();
|
|
1033
|
+
spawned_count += 1;
|
|
1034
|
+
|
|
1035
|
+
tokio::time::sleep(Duration::from_millis(200)).await;
|
|
1036
|
+
|
|
1037
|
+
assert!(panic_task_executed.load(std::sync::atomic::Ordering::SeqCst));
|
|
1038
|
+
assert!(after_panic_executed.load(std::sync::atomic::Ordering::SeqCst));
|
|
1039
|
+
assert_eq!(spawned_count, 2);
|
|
1040
|
+
|
|
1041
|
+
runtime.shutdown().await.unwrap();
|
|
1042
|
+
}
|
|
1043
|
+
|
|
1044
|
+
#[tokio::test]
|
|
1045
|
+
async fn test_queue_overflow_with_immediate_rejection() {
|
|
1046
|
+
let config = BackgroundTaskConfig {
|
|
1047
|
+
max_queue_size: 2,
|
|
1048
|
+
max_concurrent_tasks: 100,
|
|
1049
|
+
drain_timeout_secs: 5,
|
|
1050
|
+
};
|
|
1051
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
1052
|
+
let handle = runtime.handle();
|
|
1053
|
+
|
|
1054
|
+
let barrier: Arc<tokio::sync::Barrier> = Arc::new(tokio::sync::Barrier::new(3));
|
|
1055
|
+
|
|
1056
|
+
for _ in 0..2 {
|
|
1057
|
+
let b = barrier.clone();
|
|
1058
|
+
handle
|
|
1059
|
+
.spawn(move || {
|
|
1060
|
+
let barrier = b.clone();
|
|
1061
|
+
async move {
|
|
1062
|
+
barrier.wait().await;
|
|
1063
|
+
tokio::time::sleep(Duration::from_millis(500)).await;
|
|
1064
|
+
Ok(())
|
|
1065
|
+
}
|
|
1066
|
+
})
|
|
1067
|
+
.unwrap();
|
|
1068
|
+
}
|
|
1069
|
+
|
|
1070
|
+
// Queue is now at 2/2 capacity
|
|
1071
|
+
let overflow_result = handle.spawn(|| async { Ok(()) });
|
|
1072
|
+
assert!(matches!(overflow_result, Err(BackgroundSpawnError::QueueFull)));
|
|
1073
|
+
|
|
1074
|
+
barrier.wait().await;
|
|
1075
|
+
runtime.shutdown().await.unwrap();
|
|
1076
|
+
}
|
|
1077
|
+
|
|
1078
|
+
#[tokio::test]
|
|
1079
|
+
async fn test_metrics_accuracy_under_concurrent_load() {
|
|
1080
|
+
let config = BackgroundTaskConfig {
|
|
1081
|
+
max_queue_size: 50,
|
|
1082
|
+
max_concurrent_tasks: 5,
|
|
1083
|
+
drain_timeout_secs: 10,
|
|
1084
|
+
};
|
|
1085
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
1086
|
+
let handle = runtime.handle();
|
|
1087
|
+
|
|
1088
|
+
let completed: Arc<AtomicU64> = Arc::new(AtomicU64::new(0));
|
|
1089
|
+
|
|
1090
|
+
for _ in 0..20 {
|
|
1091
|
+
let c = completed.clone();
|
|
1092
|
+
handle
|
|
1093
|
+
.spawn(move || {
|
|
1094
|
+
let count = c.clone();
|
|
1095
|
+
async move {
|
|
1096
|
+
tokio::time::sleep(Duration::from_millis(50)).await;
|
|
1097
|
+
count.fetch_add(1, Ordering::SeqCst);
|
|
1098
|
+
Ok(())
|
|
1099
|
+
}
|
|
1100
|
+
})
|
|
1101
|
+
.unwrap();
|
|
1102
|
+
}
|
|
1103
|
+
|
|
1104
|
+
runtime.shutdown().await.unwrap();
|
|
1105
|
+
assert_eq!(completed.load(Ordering::SeqCst), 20, "all tasks should complete");
|
|
1106
|
+
}
|
|
1107
|
+
|
|
1108
|
+
#[tokio::test]
|
|
1109
|
+
async fn test_drain_with_slowly_completing_tasks() {
|
|
1110
|
+
let config = BackgroundTaskConfig {
|
|
1111
|
+
max_queue_size: 50,
|
|
1112
|
+
max_concurrent_tasks: 2,
|
|
1113
|
+
drain_timeout_secs: 10,
|
|
1114
|
+
};
|
|
1115
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
1116
|
+
let handle = runtime.handle();
|
|
1117
|
+
|
|
1118
|
+
let completed_count: Arc<AtomicU64> = Arc::new(AtomicU64::new(0));
|
|
1119
|
+
|
|
1120
|
+
for i in 0..5 {
|
|
1121
|
+
let count = completed_count.clone();
|
|
1122
|
+
handle
|
|
1123
|
+
.spawn(move || {
|
|
1124
|
+
let c = count.clone();
|
|
1125
|
+
async move {
|
|
1126
|
+
let sleep_ms = 100 + (i as u64 * 50);
|
|
1127
|
+
tokio::time::sleep(Duration::from_millis(sleep_ms)).await;
|
|
1128
|
+
c.fetch_add(1, Ordering::SeqCst);
|
|
1129
|
+
Ok(())
|
|
1130
|
+
}
|
|
1131
|
+
})
|
|
1132
|
+
.unwrap();
|
|
1133
|
+
}
|
|
1134
|
+
|
|
1135
|
+
// Shutdown should wait for all slow tasks to complete
|
|
1136
|
+
let result = runtime.shutdown().await;
|
|
1137
|
+
assert!(result.is_ok());
|
|
1138
|
+
assert_eq!(completed_count.load(Ordering::SeqCst), 5);
|
|
1139
|
+
}
|
|
1140
|
+
|
|
1141
|
+
#[tokio::test]
|
|
1142
|
+
async fn test_semaphore_starvation_doesnt_deadlock() {
|
|
1143
|
+
let config = BackgroundTaskConfig {
|
|
1144
|
+
max_queue_size: 100,
|
|
1145
|
+
max_concurrent_tasks: 1,
|
|
1146
|
+
drain_timeout_secs: 10,
|
|
1147
|
+
};
|
|
1148
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
1149
|
+
let handle = runtime.handle();
|
|
1150
|
+
|
|
1151
|
+
let completion_order: Arc<tokio::sync::Mutex<Vec<u32>>> = Arc::new(tokio::sync::Mutex::new(Vec::new()));
|
|
1152
|
+
|
|
1153
|
+
for i in 0..10 {
|
|
1154
|
+
let order = completion_order.clone();
|
|
1155
|
+
handle
|
|
1156
|
+
.spawn(move || {
|
|
1157
|
+
let o = order.clone();
|
|
1158
|
+
async move {
|
|
1159
|
+
tokio::time::sleep(Duration::from_millis(5)).await;
|
|
1160
|
+
let mut guard = o.lock().await;
|
|
1161
|
+
guard.push(i);
|
|
1162
|
+
Ok(())
|
|
1163
|
+
}
|
|
1164
|
+
})
|
|
1165
|
+
.unwrap();
|
|
1166
|
+
}
|
|
1167
|
+
|
|
1168
|
+
// Shutdown should complete without deadlock
|
|
1169
|
+
let result = runtime.shutdown().await;
|
|
1170
|
+
assert!(result.is_ok());
|
|
1171
|
+
|
|
1172
|
+
let order = completion_order.lock().await;
|
|
1173
|
+
assert_eq!(order.len(), 10);
|
|
1174
|
+
}
|
|
1175
|
+
|
|
1176
|
+
#[tokio::test]
|
|
1177
|
+
async fn test_cancel_task_mid_execution() {
|
|
1178
|
+
let config = BackgroundTaskConfig {
|
|
1179
|
+
max_queue_size: 10,
|
|
1180
|
+
max_concurrent_tasks: 2,
|
|
1181
|
+
drain_timeout_secs: 1, // Short timeout to force expiration
|
|
1182
|
+
};
|
|
1183
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
1184
|
+
let handle = runtime.handle();
|
|
1185
|
+
|
|
1186
|
+
let started: Arc<std::sync::atomic::AtomicBool> = Arc::new(std::sync::atomic::AtomicBool::new(false));
|
|
1187
|
+
let ended: Arc<std::sync::atomic::AtomicBool> = Arc::new(std::sync::atomic::AtomicBool::new(false));
|
|
1188
|
+
|
|
1189
|
+
let start_flag = started.clone();
|
|
1190
|
+
let end_flag = ended.clone();
|
|
1191
|
+
|
|
1192
|
+
handle
|
|
1193
|
+
.spawn(move || {
|
|
1194
|
+
let s = start_flag.clone();
|
|
1195
|
+
let e = end_flag.clone();
|
|
1196
|
+
async move {
|
|
1197
|
+
s.store(true, std::sync::atomic::Ordering::SeqCst);
|
|
1198
|
+
tokio::time::sleep(Duration::from_secs(10)).await;
|
|
1199
|
+
e.store(true, std::sync::atomic::Ordering::SeqCst);
|
|
1200
|
+
Ok(())
|
|
1201
|
+
}
|
|
1202
|
+
})
|
|
1203
|
+
.unwrap();
|
|
1204
|
+
|
|
1205
|
+
tokio::time::sleep(Duration::from_millis(50)).await;
|
|
1206
|
+
assert!(started.load(std::sync::atomic::Ordering::SeqCst));
|
|
1207
|
+
|
|
1208
|
+
// Shutdown should timeout due to the long-running task
|
|
1209
|
+
let result = runtime.shutdown().await;
|
|
1210
|
+
assert!(result.is_err(), "shutdown should timeout due to long task");
|
|
1211
|
+
assert!(
|
|
1212
|
+
!ended.load(std::sync::atomic::Ordering::SeqCst),
|
|
1213
|
+
"task should not complete"
|
|
1214
|
+
);
|
|
1215
|
+
}
|
|
1216
|
+
|
|
1217
|
+
#[tokio::test]
|
|
1218
|
+
async fn test_rapid_spawn_and_shutdown() {
|
|
1219
|
+
let config = BackgroundTaskConfig {
|
|
1220
|
+
max_queue_size: 1000,
|
|
1221
|
+
max_concurrent_tasks: 10,
|
|
1222
|
+
drain_timeout_secs: 5,
|
|
1223
|
+
};
|
|
1224
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
1225
|
+
let handle = runtime.handle();
|
|
1226
|
+
|
|
1227
|
+
let count: Arc<AtomicU64> = Arc::new(AtomicU64::new(0));
|
|
1228
|
+
|
|
1229
|
+
// Rapidly spawn many tasks
|
|
1230
|
+
for _ in 0..100 {
|
|
1231
|
+
let c = count.clone();
|
|
1232
|
+
let _ = handle.spawn(move || {
|
|
1233
|
+
let counter = c.clone();
|
|
1234
|
+
async move {
|
|
1235
|
+
counter.fetch_add(1, Ordering::SeqCst);
|
|
1236
|
+
Ok(())
|
|
1237
|
+
}
|
|
1238
|
+
});
|
|
1239
|
+
}
|
|
1240
|
+
|
|
1241
|
+
// Immediate shutdown
|
|
1242
|
+
let result = runtime.shutdown().await;
|
|
1243
|
+
assert!(result.is_ok());
|
|
1244
|
+
|
|
1245
|
+
let final_count = count.load(Ordering::SeqCst);
|
|
1246
|
+
assert!(final_count > 0, "at least some tasks should execute");
|
|
1247
|
+
assert!(final_count <= 100, "no more than spawned count should execute");
|
|
1248
|
+
}
|
|
1249
|
+
|
|
1250
|
+
#[tokio::test]
|
|
1251
|
+
async fn test_shutdown_with_mixed_success_and_failure_tasks() {
|
|
1252
|
+
let config = BackgroundTaskConfig::default();
|
|
1253
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
1254
|
+
let handle = runtime.handle();
|
|
1255
|
+
|
|
1256
|
+
let success_count: Arc<AtomicU64> = Arc::new(AtomicU64::new(0));
|
|
1257
|
+
let failure_count: Arc<AtomicU64> = Arc::new(AtomicU64::new(0));
|
|
1258
|
+
|
|
1259
|
+
for i in 0..10 {
|
|
1260
|
+
if i % 2 == 0 {
|
|
1261
|
+
let s = success_count.clone();
|
|
1262
|
+
handle
|
|
1263
|
+
.spawn(move || {
|
|
1264
|
+
let counter = s.clone();
|
|
1265
|
+
async move {
|
|
1266
|
+
counter.fetch_add(1, Ordering::SeqCst);
|
|
1267
|
+
Ok(())
|
|
1268
|
+
}
|
|
1269
|
+
})
|
|
1270
|
+
.unwrap();
|
|
1271
|
+
} else {
|
|
1272
|
+
let f = failure_count.clone();
|
|
1273
|
+
handle
|
|
1274
|
+
.spawn(move || {
|
|
1275
|
+
let counter = f.clone();
|
|
1276
|
+
async move {
|
|
1277
|
+
counter.fetch_add(1, Ordering::SeqCst);
|
|
1278
|
+
Err(BackgroundJobError::from("intentional failure"))
|
|
1279
|
+
}
|
|
1280
|
+
})
|
|
1281
|
+
.unwrap();
|
|
1282
|
+
}
|
|
1283
|
+
}
|
|
1284
|
+
|
|
1285
|
+
tokio::time::sleep(Duration::from_millis(200)).await;
|
|
1286
|
+
|
|
1287
|
+
let result = runtime.shutdown().await;
|
|
1288
|
+
assert!(result.is_ok());
|
|
1289
|
+
assert_eq!(success_count.load(Ordering::SeqCst), 5);
|
|
1290
|
+
assert_eq!(failure_count.load(Ordering::SeqCst), 5);
|
|
1291
|
+
}
|
|
1292
|
+
|
|
1293
|
+
#[tokio::test]
|
|
1294
|
+
async fn test_concurrent_handle_clones_spawn_independently() {
|
|
1295
|
+
let config = BackgroundTaskConfig::default();
|
|
1296
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
1297
|
+
let handle = runtime.handle();
|
|
1298
|
+
|
|
1299
|
+
let count: Arc<AtomicU64> = Arc::new(AtomicU64::new(0));
|
|
1300
|
+
|
|
1301
|
+
let mut join_handles = vec![];
|
|
1302
|
+
|
|
1303
|
+
// Spawn 3 concurrent tasks that each spawn background jobs
|
|
1304
|
+
for _ in 0..3 {
|
|
1305
|
+
let h = handle.clone();
|
|
1306
|
+
let c = count.clone();
|
|
1307
|
+
|
|
1308
|
+
let jh = tokio::spawn(async move {
|
|
1309
|
+
for _ in 0..5 {
|
|
1310
|
+
let counter = c.clone();
|
|
1311
|
+
let _ = h.spawn(move || {
|
|
1312
|
+
let cnt = counter.clone();
|
|
1313
|
+
async move {
|
|
1314
|
+
cnt.fetch_add(1, Ordering::SeqCst);
|
|
1315
|
+
Ok(())
|
|
1316
|
+
}
|
|
1317
|
+
});
|
|
1318
|
+
}
|
|
1319
|
+
});
|
|
1320
|
+
join_handles.push(jh);
|
|
1321
|
+
}
|
|
1322
|
+
|
|
1323
|
+
for jh in join_handles {
|
|
1324
|
+
let _ = jh.await;
|
|
1325
|
+
}
|
|
1326
|
+
|
|
1327
|
+
tokio::time::sleep(Duration::from_millis(200)).await;
|
|
1328
|
+
|
|
1329
|
+
let result = runtime.shutdown().await;
|
|
1330
|
+
assert!(result.is_ok());
|
|
1331
|
+
assert_eq!(count.load(Ordering::SeqCst), 15);
|
|
1332
|
+
}
|
|
1333
|
+
|
|
1334
|
+
#[tokio::test]
|
|
1335
|
+
async fn test_queue_full_metrics_updated() {
|
|
1336
|
+
let config = BackgroundTaskConfig {
|
|
1337
|
+
max_queue_size: 2,
|
|
1338
|
+
max_concurrent_tasks: 100,
|
|
1339
|
+
drain_timeout_secs: 5,
|
|
1340
|
+
};
|
|
1341
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
1342
|
+
let handle = runtime.handle();
|
|
1343
|
+
|
|
1344
|
+
let barrier: Arc<tokio::sync::Barrier> = Arc::new(tokio::sync::Barrier::new(3));
|
|
1345
|
+
|
|
1346
|
+
// Fill queue
|
|
1347
|
+
for _ in 0..2 {
|
|
1348
|
+
let b = barrier.clone();
|
|
1349
|
+
handle
|
|
1350
|
+
.spawn(move || {
|
|
1351
|
+
let barrier = b.clone();
|
|
1352
|
+
async move {
|
|
1353
|
+
barrier.wait().await;
|
|
1354
|
+
tokio::time::sleep(Duration::from_secs(1)).await;
|
|
1355
|
+
Ok(())
|
|
1356
|
+
}
|
|
1357
|
+
})
|
|
1358
|
+
.unwrap();
|
|
1359
|
+
}
|
|
1360
|
+
|
|
1361
|
+
// Attempt to overflow - should fail gracefully
|
|
1362
|
+
let result = handle.spawn(|| async { Ok(()) });
|
|
1363
|
+
assert!(matches!(result, Err(BackgroundSpawnError::QueueFull)));
|
|
1364
|
+
|
|
1365
|
+
// After first task completes, we should be able to spawn again
|
|
1366
|
+
barrier.wait().await;
|
|
1367
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
1368
|
+
|
|
1369
|
+
runtime.shutdown().await.unwrap();
|
|
1370
|
+
}
|
|
1371
|
+
|
|
1372
|
+
#[tokio::test]
|
|
1373
|
+
async fn test_handle_persistence_across_spawns() {
|
|
1374
|
+
let config = BackgroundTaskConfig::default();
|
|
1375
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
1376
|
+
let handle = runtime.handle();
|
|
1377
|
+
|
|
1378
|
+
let count: Arc<AtomicU64> = Arc::new(AtomicU64::new(0));
|
|
1379
|
+
|
|
1380
|
+
// Use same handle multiple times
|
|
1381
|
+
for _ in 0..5 {
|
|
1382
|
+
let c = count.clone();
|
|
1383
|
+
handle
|
|
1384
|
+
.spawn(move || {
|
|
1385
|
+
let counter = c.clone();
|
|
1386
|
+
async move {
|
|
1387
|
+
counter.fetch_add(1, Ordering::SeqCst);
|
|
1388
|
+
Ok(())
|
|
1389
|
+
}
|
|
1390
|
+
})
|
|
1391
|
+
.unwrap();
|
|
1392
|
+
}
|
|
1393
|
+
|
|
1394
|
+
tokio::time::sleep(Duration::from_millis(150)).await;
|
|
1395
|
+
assert_eq!(count.load(Ordering::SeqCst), 5);
|
|
1396
|
+
|
|
1397
|
+
runtime.shutdown().await.unwrap();
|
|
1398
|
+
}
|
|
1399
|
+
|
|
1400
|
+
#[tokio::test]
|
|
1401
|
+
async fn test_shutdown_with_queue_at_capacity() {
|
|
1402
|
+
let config = BackgroundTaskConfig {
|
|
1403
|
+
max_queue_size: 5,
|
|
1404
|
+
max_concurrent_tasks: 1,
|
|
1405
|
+
drain_timeout_secs: 10,
|
|
1406
|
+
};
|
|
1407
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
1408
|
+
let handle = runtime.handle();
|
|
1409
|
+
|
|
1410
|
+
let completion_count: Arc<AtomicU64> = Arc::new(AtomicU64::new(0));
|
|
1411
|
+
|
|
1412
|
+
// Fill queue to capacity
|
|
1413
|
+
for _ in 0..5 {
|
|
1414
|
+
let c = completion_count.clone();
|
|
1415
|
+
handle
|
|
1416
|
+
.spawn(move || {
|
|
1417
|
+
let counter = c.clone();
|
|
1418
|
+
async move {
|
|
1419
|
+
tokio::time::sleep(Duration::from_millis(20)).await;
|
|
1420
|
+
counter.fetch_add(1, Ordering::SeqCst);
|
|
1421
|
+
Ok(())
|
|
1422
|
+
}
|
|
1423
|
+
})
|
|
1424
|
+
.unwrap();
|
|
1425
|
+
}
|
|
1426
|
+
|
|
1427
|
+
// Shutdown should drain all tasks despite queue being at capacity
|
|
1428
|
+
let result = runtime.shutdown().await;
|
|
1429
|
+
assert!(result.is_ok());
|
|
1430
|
+
assert_eq!(completion_count.load(Ordering::SeqCst), 5);
|
|
1431
|
+
}
|
|
1432
|
+
|
|
1433
|
+
#[tokio::test]
|
|
1434
|
+
async fn test_metadata_preserved_through_execution() {
|
|
1435
|
+
let runtime = BackgroundRuntime::start(BackgroundTaskConfig::default()).await;
|
|
1436
|
+
let handle = runtime.handle();
|
|
1437
|
+
|
|
1438
|
+
let metadata = BackgroundJobMetadata {
|
|
1439
|
+
name: Cow::Owned("test_metadata_task".to_string()),
|
|
1440
|
+
request_id: Some("req-metadata-123".to_string()),
|
|
1441
|
+
};
|
|
1442
|
+
|
|
1443
|
+
let executed: Arc<std::sync::atomic::AtomicBool> = Arc::new(std::sync::atomic::AtomicBool::new(false));
|
|
1444
|
+
let executed_clone = executed.clone();
|
|
1445
|
+
|
|
1446
|
+
let future = async move {
|
|
1447
|
+
executed_clone.store(true, std::sync::atomic::Ordering::SeqCst);
|
|
1448
|
+
Ok(())
|
|
1449
|
+
};
|
|
1450
|
+
|
|
1451
|
+
handle.spawn_with_metadata(future, metadata).unwrap();
|
|
1452
|
+
|
|
1453
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
1454
|
+
|
|
1455
|
+
assert!(executed.load(std::sync::atomic::Ordering::SeqCst));
|
|
1456
|
+
runtime.shutdown().await.unwrap();
|
|
1457
|
+
}
|
|
1458
|
+
|
|
1459
|
+
#[tokio::test]
|
|
1460
|
+
async fn test_very_short_drain_timeout_forces_stop() {
|
|
1461
|
+
let config = BackgroundTaskConfig {
|
|
1462
|
+
max_queue_size: 10,
|
|
1463
|
+
max_concurrent_tasks: 2,
|
|
1464
|
+
drain_timeout_secs: 0, // Immediate timeout
|
|
1465
|
+
};
|
|
1466
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
1467
|
+
let handle = runtime.handle();
|
|
1468
|
+
|
|
1469
|
+
handle
|
|
1470
|
+
.spawn(|| async {
|
|
1471
|
+
tokio::time::sleep(Duration::from_secs(1)).await;
|
|
1472
|
+
Ok(())
|
|
1473
|
+
})
|
|
1474
|
+
.unwrap();
|
|
1475
|
+
|
|
1476
|
+
tokio::time::sleep(Duration::from_millis(10)).await;
|
|
1477
|
+
|
|
1478
|
+
// Should timeout immediately
|
|
1479
|
+
let result = runtime.shutdown().await;
|
|
1480
|
+
assert!(result.is_err());
|
|
1481
|
+
}
|
|
1482
|
+
|
|
1483
|
+
#[tokio::test]
|
|
1484
|
+
async fn test_spawn_many_tasks_sequential_drain() {
|
|
1485
|
+
let config = BackgroundTaskConfig {
|
|
1486
|
+
max_queue_size: 200,
|
|
1487
|
+
max_concurrent_tasks: 2,
|
|
1488
|
+
drain_timeout_secs: 15,
|
|
1489
|
+
};
|
|
1490
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
1491
|
+
let handle = runtime.handle();
|
|
1492
|
+
|
|
1493
|
+
let count: Arc<AtomicU64> = Arc::new(AtomicU64::new(0));
|
|
1494
|
+
|
|
1495
|
+
// Spawn many tasks that will queue and drain sequentially
|
|
1496
|
+
for _ in 0..50 {
|
|
1497
|
+
let c = count.clone();
|
|
1498
|
+
handle
|
|
1499
|
+
.spawn(move || {
|
|
1500
|
+
let counter = c.clone();
|
|
1501
|
+
async move {
|
|
1502
|
+
tokio::time::sleep(Duration::from_millis(1)).await;
|
|
1503
|
+
counter.fetch_add(1, Ordering::SeqCst);
|
|
1504
|
+
Ok(())
|
|
1505
|
+
}
|
|
1506
|
+
})
|
|
1507
|
+
.unwrap();
|
|
1508
|
+
}
|
|
1509
|
+
|
|
1510
|
+
// Shutdown should drain all 50 tasks
|
|
1511
|
+
let result = runtime.shutdown().await;
|
|
1512
|
+
assert!(result.is_ok());
|
|
1513
|
+
assert_eq!(count.load(Ordering::SeqCst), 50);
|
|
1514
|
+
}
|
|
1515
|
+
|
|
1516
|
+
#[tokio::test]
|
|
1517
|
+
async fn test_no_deadlock_with_max_concurrency_barrier() {
|
|
1518
|
+
let config = BackgroundTaskConfig {
|
|
1519
|
+
max_queue_size: 100,
|
|
1520
|
+
max_concurrent_tasks: 3,
|
|
1521
|
+
drain_timeout_secs: 10,
|
|
1522
|
+
};
|
|
1523
|
+
let runtime = BackgroundRuntime::start(config).await;
|
|
1524
|
+
let handle = runtime.handle();
|
|
1525
|
+
|
|
1526
|
+
let barrier: Arc<tokio::sync::Barrier> = Arc::new(tokio::sync::Barrier::new(4));
|
|
1527
|
+
|
|
1528
|
+
for _ in 0..3 {
|
|
1529
|
+
let b = barrier.clone();
|
|
1530
|
+
handle
|
|
1531
|
+
.spawn(move || {
|
|
1532
|
+
let barrier = b.clone();
|
|
1533
|
+
async move {
|
|
1534
|
+
barrier.wait().await;
|
|
1535
|
+
tokio::time::sleep(Duration::from_millis(50)).await;
|
|
1536
|
+
Ok(())
|
|
1537
|
+
}
|
|
1538
|
+
})
|
|
1539
|
+
.unwrap();
|
|
1540
|
+
}
|
|
1541
|
+
|
|
1542
|
+
barrier.wait().await;
|
|
1543
|
+
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
1544
|
+
|
|
1545
|
+
// Should not deadlock
|
|
1546
|
+
let result = runtime.shutdown().await;
|
|
1547
|
+
assert!(result.is_ok());
|
|
1548
|
+
}
|
|
1549
|
+
|
|
1550
|
+
#[tokio::test]
|
|
1551
|
+
async fn test_error_from_owned_string() {
|
|
1552
|
+
let message = String::from("error message");
|
|
1553
|
+
let error = BackgroundJobError::from(message);
|
|
1554
|
+
assert_eq!(error.message, "error message");
|
|
1555
|
+
}
|
|
1556
|
+
|
|
1557
|
+
#[tokio::test]
|
|
1558
|
+
async fn test_borrowed_str_conversion() {
|
|
1559
|
+
let error = BackgroundJobError::from("borrowed message");
|
|
1560
|
+
assert_eq!(error.message, "borrowed message");
|
|
1561
|
+
}
|
|
1562
|
+
}
|