tasker-rb 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/DEVELOPMENT.md +548 -0
- data/README.md +87 -0
- data/ext/tasker_core/Cargo.lock +4720 -0
- data/ext/tasker_core/Cargo.toml +76 -0
- data/ext/tasker_core/extconf.rb +38 -0
- data/ext/tasker_core/src/CLAUDE.md +7 -0
- data/ext/tasker_core/src/bootstrap.rs +320 -0
- data/ext/tasker_core/src/bridge.rs +400 -0
- data/ext/tasker_core/src/client_ffi.rs +173 -0
- data/ext/tasker_core/src/conversions.rs +131 -0
- data/ext/tasker_core/src/diagnostics.rs +57 -0
- data/ext/tasker_core/src/event_handler.rs +179 -0
- data/ext/tasker_core/src/event_publisher_ffi.rs +239 -0
- data/ext/tasker_core/src/ffi_logging.rs +245 -0
- data/ext/tasker_core/src/global_event_system.rs +16 -0
- data/ext/tasker_core/src/in_process_event_ffi.rs +319 -0
- data/ext/tasker_core/src/lib.rs +41 -0
- data/ext/tasker_core/src/observability_ffi.rs +339 -0
- data/lib/tasker_core/batch_processing/batch_aggregation_scenario.rb +85 -0
- data/lib/tasker_core/batch_processing/batch_worker_context.rb +238 -0
- data/lib/tasker_core/bootstrap.rb +394 -0
- data/lib/tasker_core/domain_events/base_publisher.rb +220 -0
- data/lib/tasker_core/domain_events/base_subscriber.rb +178 -0
- data/lib/tasker_core/domain_events/publisher_registry.rb +253 -0
- data/lib/tasker_core/domain_events/subscriber_registry.rb +152 -0
- data/lib/tasker_core/domain_events.rb +43 -0
- data/lib/tasker_core/errors/CLAUDE.md +7 -0
- data/lib/tasker_core/errors/common.rb +305 -0
- data/lib/tasker_core/errors/error_classifier.rb +61 -0
- data/lib/tasker_core/errors.rb +4 -0
- data/lib/tasker_core/event_bridge.rb +330 -0
- data/lib/tasker_core/handlers.rb +159 -0
- data/lib/tasker_core/internal.rb +31 -0
- data/lib/tasker_core/logger.rb +234 -0
- data/lib/tasker_core/models.rb +337 -0
- data/lib/tasker_core/observability/types.rb +158 -0
- data/lib/tasker_core/observability.rb +292 -0
- data/lib/tasker_core/registry/handler_registry.rb +453 -0
- data/lib/tasker_core/registry/resolver_chain.rb +258 -0
- data/lib/tasker_core/registry/resolvers/base_resolver.rb +90 -0
- data/lib/tasker_core/registry/resolvers/class_constant_resolver.rb +156 -0
- data/lib/tasker_core/registry/resolvers/explicit_mapping_resolver.rb +146 -0
- data/lib/tasker_core/registry/resolvers/method_dispatch_wrapper.rb +144 -0
- data/lib/tasker_core/registry/resolvers/registry_resolver.rb +229 -0
- data/lib/tasker_core/registry/resolvers.rb +42 -0
- data/lib/tasker_core/registry.rb +12 -0
- data/lib/tasker_core/step_handler/api.rb +48 -0
- data/lib/tasker_core/step_handler/base.rb +354 -0
- data/lib/tasker_core/step_handler/batchable.rb +50 -0
- data/lib/tasker_core/step_handler/decision.rb +53 -0
- data/lib/tasker_core/step_handler/mixins/api.rb +452 -0
- data/lib/tasker_core/step_handler/mixins/batchable.rb +465 -0
- data/lib/tasker_core/step_handler/mixins/decision.rb +252 -0
- data/lib/tasker_core/step_handler/mixins.rb +66 -0
- data/lib/tasker_core/subscriber.rb +212 -0
- data/lib/tasker_core/task_handler/base.rb +254 -0
- data/lib/tasker_core/tasker_rb.so +0 -0
- data/lib/tasker_core/template_discovery.rb +181 -0
- data/lib/tasker_core/tracing.rb +166 -0
- data/lib/tasker_core/types/batch_processing_outcome.rb +301 -0
- data/lib/tasker_core/types/client_types.rb +145 -0
- data/lib/tasker_core/types/decision_point_outcome.rb +177 -0
- data/lib/tasker_core/types/error_types.rb +72 -0
- data/lib/tasker_core/types/simple_message.rb +151 -0
- data/lib/tasker_core/types/step_context.rb +328 -0
- data/lib/tasker_core/types/step_handler_call_result.rb +307 -0
- data/lib/tasker_core/types/step_message.rb +112 -0
- data/lib/tasker_core/types/step_types.rb +207 -0
- data/lib/tasker_core/types/task_template.rb +240 -0
- data/lib/tasker_core/types/task_types.rb +148 -0
- data/lib/tasker_core/types.rb +132 -0
- data/lib/tasker_core/version.rb +13 -0
- data/lib/tasker_core/worker/CLAUDE.md +7 -0
- data/lib/tasker_core/worker/event_poller.rb +224 -0
- data/lib/tasker_core/worker/in_process_domain_event_poller.rb +271 -0
- data/lib/tasker_core.rb +160 -0
- metadata +322 -0
|
@@ -0,0 +1,245 @@
|
|
|
1
|
+
//! FFI-specific logging module using unified logging patterns
|
|
2
|
+
//!
|
|
3
|
+
//! This module provides structured logging for FFI boundary debugging
|
|
4
|
+
//! using the unified logging macros that match Ruby patterns.
|
|
5
|
+
//!
|
|
6
|
+
//! ## TAS-29 Phase 6: Ruby FFI Logging Bridge
|
|
7
|
+
//!
|
|
8
|
+
//! This module exposes Rust's tracing infrastructure to Ruby via FFI, enabling
|
|
9
|
+
//! unified structured logging across both Ruby and Rust components.
|
|
10
|
+
//!
|
|
11
|
+
//! ### Architecture
|
|
12
|
+
//!
|
|
13
|
+
//! ```text
|
|
14
|
+
//! Ruby Handler
|
|
15
|
+
//! ↓
|
|
16
|
+
//! TaskerCore::Tracing.info("message", fields: {...})
|
|
17
|
+
//! ↓
|
|
18
|
+
//! FFI Bridge (this module)
|
|
19
|
+
//! ↓
|
|
20
|
+
//! tasker_shared::log_ffi! macro
|
|
21
|
+
//! ↓
|
|
22
|
+
//! tracing crate → OpenTelemetry (if enabled)
|
|
23
|
+
//! ```
|
|
24
|
+
//!
|
|
25
|
+
//! ### Log Levels
|
|
26
|
+
//!
|
|
27
|
+
//! - ERROR: Unrecoverable failures requiring intervention
|
|
28
|
+
//! - WARN: Degraded operation, retryable failures
|
|
29
|
+
//! - INFO: Lifecycle events, state transitions
|
|
30
|
+
//! - DEBUG: Detailed diagnostic information
|
|
31
|
+
//! - TRACE: Very verbose, hot-path entry/exit
|
|
32
|
+
|
|
33
|
+
use magnus::{value::ReprValue, Error, RHash, Value};
|
|
34
|
+
use std::collections::HashMap;
|
|
35
|
+
use tracing::{debug, error, info, trace, warn};
|
|
36
|
+
|
|
37
|
+
/// Initialize FFI logging using two-phase pattern for telemetry support
|
|
38
|
+
///
|
|
39
|
+
/// # Two-Phase Initialization Pattern (TAS-65)
|
|
40
|
+
///
|
|
41
|
+
/// This function implements phase 1 of the FFI telemetry initialization pattern:
|
|
42
|
+
///
|
|
43
|
+
/// **Phase 1 (This function)**: Called during Magnus initialization (no Tokio runtime)
|
|
44
|
+
/// - If TELEMETRY_ENABLED=false: Initialize console-only logging (safe, no runtime needed)
|
|
45
|
+
/// - If TELEMETRY_ENABLED=true: Skip initialization (will be done in phase 2)
|
|
46
|
+
///
|
|
47
|
+
/// **Phase 2**: Called in `bootstrap_worker()` after Tokio runtime creation
|
|
48
|
+
/// - Always call `init_tracing()` in `runtime.block_on()` context
|
|
49
|
+
/// - If console already initialized: Returns early (no-op)
|
|
50
|
+
/// - If not initialized (telemetry case): Initializes with OpenTelemetry in Tokio context
|
|
51
|
+
///
|
|
52
|
+
/// # Why This Pattern?
|
|
53
|
+
///
|
|
54
|
+
/// OpenTelemetry batch exporter requires a Tokio runtime context for async I/O.
|
|
55
|
+
/// During Magnus initialization, no Tokio runtime exists yet, so we defer full
|
|
56
|
+
/// initialization until after the runtime is created in `bootstrap_worker()`.
|
|
57
|
+
///
|
|
58
|
+
/// This pattern works for all FFI targets:
|
|
59
|
+
/// - Ruby (Magnus): Same pattern
|
|
60
|
+
/// - Python (PyO3): Same pattern
|
|
61
|
+
/// - WASM: Same pattern
|
|
62
|
+
pub fn init_ffi_logger() -> Result<(), Box<dyn std::error::Error>> {
|
|
63
|
+
// Check if telemetry is enabled
|
|
64
|
+
let telemetry_enabled = std::env::var("TELEMETRY_ENABLED")
|
|
65
|
+
.map(|v| v.to_lowercase() == "true")
|
|
66
|
+
.unwrap_or(false);
|
|
67
|
+
|
|
68
|
+
if telemetry_enabled {
|
|
69
|
+
// Phase 1: Telemetry enabled - skip logging init
|
|
70
|
+
// Will be initialized in bootstrap_worker() after runtime creation
|
|
71
|
+
println!("📡 TAS-65: Telemetry enabled - deferring logging init to runtime context");
|
|
72
|
+
} else {
|
|
73
|
+
// Phase 1: Telemetry disabled - safe to initialize console-only logging
|
|
74
|
+
tasker_shared::logging::init_console_only();
|
|
75
|
+
|
|
76
|
+
// Use unified logging macro
|
|
77
|
+
tasker_shared::log_ffi!(
|
|
78
|
+
info,
|
|
79
|
+
"FFI console logging initialized (no telemetry)",
|
|
80
|
+
component: "ffi_boundary"
|
|
81
|
+
);
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
Ok(())
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
/// Convert Ruby hash to Rust `HashMap` for structured fields
|
|
88
|
+
fn ruby_hash_to_map(hash: RHash) -> Result<HashMap<String, String>, Error> {
|
|
89
|
+
let mut map = HashMap::new();
|
|
90
|
+
|
|
91
|
+
hash.foreach(|key: Value, value: Value| {
|
|
92
|
+
let key_str = key.to_r_string()?.to_string()?;
|
|
93
|
+
let value_str = value.to_r_string()?.to_string()?;
|
|
94
|
+
map.insert(key_str, value_str);
|
|
95
|
+
Ok(magnus::r_hash::ForEach::Continue)
|
|
96
|
+
})?;
|
|
97
|
+
|
|
98
|
+
Ok(map)
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
/// Log ERROR level message with structured fields (Ruby FFI)
|
|
102
|
+
///
|
|
103
|
+
/// # Ruby Usage
|
|
104
|
+
/// ```ruby
|
|
105
|
+
/// TaskerCore.log_error("Task processing failed", {
|
|
106
|
+
/// correlation_id: correlation_id,
|
|
107
|
+
/// task_uuid: task_uuid,
|
|
108
|
+
/// error_message: error.message
|
|
109
|
+
/// })
|
|
110
|
+
/// ```
|
|
111
|
+
pub fn log_error(message: String, fields: RHash) -> Result<(), Error> {
|
|
112
|
+
let fields_map = ruby_hash_to_map(fields)?;
|
|
113
|
+
|
|
114
|
+
// Extract common fields for structured logging
|
|
115
|
+
let correlation_id = fields_map.get("correlation_id").cloned();
|
|
116
|
+
let task_uuid = fields_map.get("task_uuid").cloned();
|
|
117
|
+
let step_uuid = fields_map.get("step_uuid").cloned();
|
|
118
|
+
let namespace = fields_map.get("namespace").cloned();
|
|
119
|
+
let operation = fields_map
|
|
120
|
+
.get("operation")
|
|
121
|
+
.cloned()
|
|
122
|
+
.unwrap_or_else(|| "ruby_handler".to_string());
|
|
123
|
+
|
|
124
|
+
// Log with structured fields
|
|
125
|
+
error!(
|
|
126
|
+
correlation_id = correlation_id.as_deref(),
|
|
127
|
+
task_uuid = task_uuid.as_deref(),
|
|
128
|
+
step_uuid = step_uuid.as_deref(),
|
|
129
|
+
namespace = namespace.as_deref(),
|
|
130
|
+
operation = %operation,
|
|
131
|
+
component = "ruby_ffi",
|
|
132
|
+
"{}",
|
|
133
|
+
message
|
|
134
|
+
);
|
|
135
|
+
|
|
136
|
+
Ok(())
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
/// Log WARN level message with structured fields (Ruby FFI)
|
|
140
|
+
pub fn log_warn(message: String, fields: RHash) -> Result<(), Error> {
|
|
141
|
+
let fields_map = ruby_hash_to_map(fields)?;
|
|
142
|
+
|
|
143
|
+
let correlation_id = fields_map.get("correlation_id").cloned();
|
|
144
|
+
let task_uuid = fields_map.get("task_uuid").cloned();
|
|
145
|
+
let step_uuid = fields_map.get("step_uuid").cloned();
|
|
146
|
+
let namespace = fields_map.get("namespace").cloned();
|
|
147
|
+
let operation = fields_map
|
|
148
|
+
.get("operation")
|
|
149
|
+
.cloned()
|
|
150
|
+
.unwrap_or_else(|| "ruby_handler".to_string());
|
|
151
|
+
|
|
152
|
+
warn!(
|
|
153
|
+
correlation_id = correlation_id.as_deref(),
|
|
154
|
+
task_uuid = task_uuid.as_deref(),
|
|
155
|
+
step_uuid = step_uuid.as_deref(),
|
|
156
|
+
namespace = namespace.as_deref(),
|
|
157
|
+
operation = %operation,
|
|
158
|
+
component = "ruby_ffi",
|
|
159
|
+
"{}",
|
|
160
|
+
message
|
|
161
|
+
);
|
|
162
|
+
|
|
163
|
+
Ok(())
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
/// Log INFO level message with structured fields (Ruby FFI)
|
|
167
|
+
pub fn log_info(message: String, fields: RHash) -> Result<(), Error> {
|
|
168
|
+
let fields_map = ruby_hash_to_map(fields)?;
|
|
169
|
+
|
|
170
|
+
let correlation_id = fields_map.get("correlation_id").cloned();
|
|
171
|
+
let task_uuid = fields_map.get("task_uuid").cloned();
|
|
172
|
+
let step_uuid = fields_map.get("step_uuid").cloned();
|
|
173
|
+
let namespace = fields_map.get("namespace").cloned();
|
|
174
|
+
let operation = fields_map
|
|
175
|
+
.get("operation")
|
|
176
|
+
.cloned()
|
|
177
|
+
.unwrap_or_else(|| "ruby_handler".to_string());
|
|
178
|
+
|
|
179
|
+
info!(
|
|
180
|
+
correlation_id = correlation_id.as_deref(),
|
|
181
|
+
task_uuid = task_uuid.as_deref(),
|
|
182
|
+
step_uuid = step_uuid.as_deref(),
|
|
183
|
+
namespace = namespace.as_deref(),
|
|
184
|
+
operation = %operation,
|
|
185
|
+
component = "ruby_ffi",
|
|
186
|
+
"{}",
|
|
187
|
+
message
|
|
188
|
+
);
|
|
189
|
+
|
|
190
|
+
Ok(())
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
/// Log DEBUG level message with structured fields (Ruby FFI)
|
|
194
|
+
pub fn log_debug(message: String, fields: RHash) -> Result<(), Error> {
|
|
195
|
+
let fields_map = ruby_hash_to_map(fields)?;
|
|
196
|
+
|
|
197
|
+
let correlation_id = fields_map.get("correlation_id").cloned();
|
|
198
|
+
let task_uuid = fields_map.get("task_uuid").cloned();
|
|
199
|
+
let step_uuid = fields_map.get("step_uuid").cloned();
|
|
200
|
+
let namespace = fields_map.get("namespace").cloned();
|
|
201
|
+
let operation = fields_map
|
|
202
|
+
.get("operation")
|
|
203
|
+
.cloned()
|
|
204
|
+
.unwrap_or_else(|| "ruby_handler".to_string());
|
|
205
|
+
|
|
206
|
+
debug!(
|
|
207
|
+
correlation_id = correlation_id.as_deref(),
|
|
208
|
+
task_uuid = task_uuid.as_deref(),
|
|
209
|
+
step_uuid = step_uuid.as_deref(),
|
|
210
|
+
namespace = namespace.as_deref(),
|
|
211
|
+
operation = %operation,
|
|
212
|
+
component = "ruby_ffi",
|
|
213
|
+
"{}",
|
|
214
|
+
message
|
|
215
|
+
);
|
|
216
|
+
|
|
217
|
+
Ok(())
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
/// Log TRACE level message with structured fields (Ruby FFI)
|
|
221
|
+
pub fn log_trace(message: String, fields: RHash) -> Result<(), Error> {
|
|
222
|
+
let fields_map = ruby_hash_to_map(fields)?;
|
|
223
|
+
|
|
224
|
+
let correlation_id = fields_map.get("correlation_id").cloned();
|
|
225
|
+
let task_uuid = fields_map.get("task_uuid").cloned();
|
|
226
|
+
let step_uuid = fields_map.get("step_uuid").cloned();
|
|
227
|
+
let namespace = fields_map.get("namespace").cloned();
|
|
228
|
+
let operation = fields_map
|
|
229
|
+
.get("operation")
|
|
230
|
+
.cloned()
|
|
231
|
+
.unwrap_or_else(|| "ruby_handler".to_string());
|
|
232
|
+
|
|
233
|
+
trace!(
|
|
234
|
+
correlation_id = correlation_id.as_deref(),
|
|
235
|
+
task_uuid = task_uuid.as_deref(),
|
|
236
|
+
step_uuid = step_uuid.as_deref(),
|
|
237
|
+
namespace = namespace.as_deref(),
|
|
238
|
+
operation = %operation,
|
|
239
|
+
component = "ruby_ffi",
|
|
240
|
+
"{}",
|
|
241
|
+
message
|
|
242
|
+
);
|
|
243
|
+
|
|
244
|
+
Ok(())
|
|
245
|
+
}
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
//! # Global Event System
|
|
2
|
+
//!
|
|
3
|
+
//! Provides a global singleton `WorkerEventSystem` that can be shared between
|
|
4
|
+
//! the `WorkerProcessor` and our Rust event handlers.
|
|
5
|
+
|
|
6
|
+
use std::sync::Arc;
|
|
7
|
+
use tasker_shared::events::WorkerEventSystem;
|
|
8
|
+
|
|
9
|
+
/// Global worker event system singleton
|
|
10
|
+
pub static GLOBAL_EVENT_SYSTEM: std::sync::LazyLock<Arc<WorkerEventSystem>> =
|
|
11
|
+
std::sync::LazyLock::new(|| Arc::new(WorkerEventSystem::new()));
|
|
12
|
+
|
|
13
|
+
/// Get the global worker event system
|
|
14
|
+
pub fn get_global_event_system() -> Arc<WorkerEventSystem> {
|
|
15
|
+
GLOBAL_EVENT_SYSTEM.clone()
|
|
16
|
+
}
|
|
@@ -0,0 +1,319 @@
|
|
|
1
|
+
//! # TAS-65 Phase 4.1: Ruby FFI Bindings for In-Process Domain Events
|
|
2
|
+
//!
|
|
3
|
+
//! Exposes the fast in-process event bus to Ruby for subscribing to domain events.
|
|
4
|
+
//! Ruby handlers can receive domain events with `delivery_mode: fast` for internal
|
|
5
|
+
//! processing like metrics, notifications, and logging integrations.
|
|
6
|
+
//!
|
|
7
|
+
//! ## Architecture
|
|
8
|
+
//!
|
|
9
|
+
//! ```text
|
|
10
|
+
//! EventRouter
|
|
11
|
+
//! ↓ (delivery_mode: fast)
|
|
12
|
+
//! InProcessEventBus
|
|
13
|
+
//! ↓
|
|
14
|
+
//! Broadcast Channel
|
|
15
|
+
//! ↓
|
|
16
|
+
//! Ruby FFI poll_in_process_events()
|
|
17
|
+
//! ↓
|
|
18
|
+
//! Ruby handlers (Sentry, DataDog, Slack, etc.)
|
|
19
|
+
//! ```
|
|
20
|
+
//!
|
|
21
|
+
//! ## Usage
|
|
22
|
+
//!
|
|
23
|
+
//! ```ruby
|
|
24
|
+
//! # Poll for fast domain events
|
|
25
|
+
//! loop do
|
|
26
|
+
//! events = TaskerCore::FFI.poll_in_process_events(10)
|
|
27
|
+
//! break if events.empty?
|
|
28
|
+
//!
|
|
29
|
+
//! events.each do |event|
|
|
30
|
+
//! puts "Received: #{event[:event_name]}"
|
|
31
|
+
//! # Forward to integration (Sentry, DataDog, etc.)
|
|
32
|
+
//! end
|
|
33
|
+
//! end
|
|
34
|
+
//! ```
|
|
35
|
+
|
|
36
|
+
use crate::bridge::WORKER_SYSTEM;
|
|
37
|
+
use chrono::{DateTime, Utc};
|
|
38
|
+
use magnus::{
|
|
39
|
+
function, prelude::*, Error as MagnusError, ExceptionClass, RHash, RModule, Ruby,
|
|
40
|
+
Value as RValue,
|
|
41
|
+
};
|
|
42
|
+
use tasker_shared::events::domain_events::DomainEvent;
|
|
43
|
+
use tokio::sync::broadcast;
|
|
44
|
+
use tracing::{debug, error, trace, warn};
|
|
45
|
+
|
|
46
|
+
/// Helper to get RuntimeError exception class (magnus 0.8 API)
|
|
47
|
+
fn runtime_error_class() -> ExceptionClass {
|
|
48
|
+
Ruby::get()
|
|
49
|
+
.expect("Ruby runtime should be available")
|
|
50
|
+
.exception_runtime_error()
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/// Maximum events to return in a single poll (safety limit)
|
|
54
|
+
const MAX_POLL_BATCH_SIZE: i64 = 100;
|
|
55
|
+
|
|
56
|
+
/// Convert a DomainEvent to a Ruby hash
|
|
57
|
+
///
|
|
58
|
+
/// Transforms the Rust DomainEvent structure into a Ruby hash with all
|
|
59
|
+
/// relevant fields for Ruby-side processing.
|
|
60
|
+
fn domain_event_to_ruby_hash(ruby: &Ruby, event: &DomainEvent) -> Result<RHash, MagnusError> {
|
|
61
|
+
let hash = ruby.hash_new();
|
|
62
|
+
|
|
63
|
+
// Core event fields
|
|
64
|
+
hash.aset("event_id", event.event_id.to_string())?;
|
|
65
|
+
hash.aset("event_name", event.event_name.clone())?;
|
|
66
|
+
hash.aset("event_version", event.event_version.clone())?;
|
|
67
|
+
|
|
68
|
+
// Metadata
|
|
69
|
+
let metadata = ruby.hash_new();
|
|
70
|
+
metadata.aset("task_uuid", event.metadata.task_uuid.to_string())?;
|
|
71
|
+
metadata.aset("step_uuid", event.metadata.step_uuid.map(|u| u.to_string()))?;
|
|
72
|
+
metadata.aset("step_name", event.metadata.step_name.clone())?;
|
|
73
|
+
metadata.aset("namespace", event.metadata.namespace.clone())?;
|
|
74
|
+
metadata.aset("correlation_id", event.metadata.correlation_id.to_string())?;
|
|
75
|
+
metadata.aset("fired_at", format_datetime(&event.metadata.fired_at))?;
|
|
76
|
+
metadata.aset("fired_by", event.metadata.fired_by.clone())?;
|
|
77
|
+
hash.aset("metadata", metadata)?;
|
|
78
|
+
|
|
79
|
+
// Payload - business-specific data as JSON string for Ruby parsing
|
|
80
|
+
// Using JSON string because Ruby can easily parse it with JSON.parse
|
|
81
|
+
let payload_json = serde_json::to_string(&event.payload.payload).map_err(|e| {
|
|
82
|
+
MagnusError::new(
|
|
83
|
+
runtime_error_class(),
|
|
84
|
+
format!("Failed to serialize payload: {}", e),
|
|
85
|
+
)
|
|
86
|
+
})?;
|
|
87
|
+
hash.aset("business_payload", payload_json)?;
|
|
88
|
+
|
|
89
|
+
// Execution result summary (most commonly needed fields)
|
|
90
|
+
let execution = ruby.hash_new();
|
|
91
|
+
execution.aset("success", event.payload.execution_result.success)?;
|
|
92
|
+
execution.aset("status", event.payload.execution_result.status.clone())?;
|
|
93
|
+
execution.aset(
|
|
94
|
+
"step_uuid",
|
|
95
|
+
event.payload.execution_result.step_uuid.to_string(),
|
|
96
|
+
)?;
|
|
97
|
+
if let Some(ref error) = event.payload.execution_result.error {
|
|
98
|
+
// Convert error to JSON string for Ruby consumption
|
|
99
|
+
let error_json = serde_json::to_string(error).map_err(|e| {
|
|
100
|
+
MagnusError::new(
|
|
101
|
+
runtime_error_class(),
|
|
102
|
+
format!("Failed to serialize error: {}", e),
|
|
103
|
+
)
|
|
104
|
+
})?;
|
|
105
|
+
execution.aset("error", error_json)?;
|
|
106
|
+
}
|
|
107
|
+
hash.aset("execution_result", execution)?;
|
|
108
|
+
|
|
109
|
+
// Task/step context summary
|
|
110
|
+
let context = ruby.hash_new();
|
|
111
|
+
context.aset(
|
|
112
|
+
"task_uuid",
|
|
113
|
+
event
|
|
114
|
+
.payload
|
|
115
|
+
.task_sequence_step
|
|
116
|
+
.task
|
|
117
|
+
.task
|
|
118
|
+
.task_uuid
|
|
119
|
+
.to_string(),
|
|
120
|
+
)?;
|
|
121
|
+
context.aset(
|
|
122
|
+
"task_name",
|
|
123
|
+
event.payload.task_sequence_step.task.task_name.clone(),
|
|
124
|
+
)?;
|
|
125
|
+
context.aset(
|
|
126
|
+
"namespace",
|
|
127
|
+
event.payload.task_sequence_step.task.namespace_name.clone(),
|
|
128
|
+
)?;
|
|
129
|
+
context.aset(
|
|
130
|
+
"step_name",
|
|
131
|
+
event.payload.task_sequence_step.workflow_step.name.clone(),
|
|
132
|
+
)?;
|
|
133
|
+
hash.aset("context", context)?;
|
|
134
|
+
|
|
135
|
+
Ok(hash)
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
/// Format datetime for Ruby
|
|
139
|
+
fn format_datetime(dt: &DateTime<Utc>) -> String {
|
|
140
|
+
dt.to_rfc3339()
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
/// FFI function to poll for in-process domain events
|
|
144
|
+
///
|
|
145
|
+
/// Non-blocking poll that returns up to `max_events` domain events from the
|
|
146
|
+
/// fast in-process event bus. Returns empty array if no events are available.
|
|
147
|
+
///
|
|
148
|
+
/// # Arguments
|
|
149
|
+
///
|
|
150
|
+
/// * `max_events` - Maximum number of events to return (capped at 100)
|
|
151
|
+
///
|
|
152
|
+
/// # Returns
|
|
153
|
+
///
|
|
154
|
+
/// Array of Ruby hashes, each representing a domain event with:
|
|
155
|
+
/// - `event_id`: String UUID
|
|
156
|
+
/// - `event_name`: String (e.g., "payment.processed")
|
|
157
|
+
/// - `event_version`: String
|
|
158
|
+
/// - `metadata`: Hash with task_uuid, step_uuid, namespace, correlation_id, etc.
|
|
159
|
+
/// - `business_payload`: JSON string of business-specific data
|
|
160
|
+
/// - `execution_result`: Hash with success, status, step_uuid, error
|
|
161
|
+
/// - `context`: Hash with task_uuid, task_name, namespace, step_name
|
|
162
|
+
///
|
|
163
|
+
/// # Ruby Example
|
|
164
|
+
///
|
|
165
|
+
/// ```ruby
|
|
166
|
+
/// # Poll up to 10 events
|
|
167
|
+
/// events = TaskerCore::FFI.poll_in_process_events(10)
|
|
168
|
+
///
|
|
169
|
+
/// events.each do |event|
|
|
170
|
+
/// puts "Event: #{event[:event_name]}"
|
|
171
|
+
/// payload = JSON.parse(event[:business_payload])
|
|
172
|
+
/// # Process event...
|
|
173
|
+
/// end
|
|
174
|
+
/// ```
|
|
175
|
+
pub fn poll_in_process_events(max_events: i64) -> Result<RValue, MagnusError> {
|
|
176
|
+
let ruby = Ruby::get().map_err(|e| {
|
|
177
|
+
MagnusError::new(runtime_error_class(), format!("Failed to get Ruby: {}", e))
|
|
178
|
+
})?;
|
|
179
|
+
|
|
180
|
+
// Cap max_events for safety (1 to MAX_POLL_BATCH_SIZE)
|
|
181
|
+
let max_events = max_events.clamp(1, MAX_POLL_BATCH_SIZE) as usize;
|
|
182
|
+
|
|
183
|
+
let handle_guard = WORKER_SYSTEM.lock().map_err(|e| {
|
|
184
|
+
error!("Failed to acquire worker system lock: {}", e);
|
|
185
|
+
MagnusError::new(runtime_error_class(), "Lock acquisition failed")
|
|
186
|
+
})?;
|
|
187
|
+
|
|
188
|
+
let handle = handle_guard.as_ref().ok_or_else(|| {
|
|
189
|
+
MagnusError::new(
|
|
190
|
+
runtime_error_class(),
|
|
191
|
+
"Worker system not running - call bootstrap_worker first",
|
|
192
|
+
)
|
|
193
|
+
})?;
|
|
194
|
+
|
|
195
|
+
// Get the FFI receiver
|
|
196
|
+
let ffi_receiver_guard = handle.in_process_event_receiver.as_ref().ok_or_else(|| {
|
|
197
|
+
MagnusError::new(
|
|
198
|
+
runtime_error_class(),
|
|
199
|
+
"In-process event bus not initialized",
|
|
200
|
+
)
|
|
201
|
+
})?;
|
|
202
|
+
|
|
203
|
+
let mut receiver = ffi_receiver_guard.lock().map_err(|e| {
|
|
204
|
+
error!("Failed to acquire event receiver lock: {}", e);
|
|
205
|
+
MagnusError::new(runtime_error_class(), "Event receiver lock failed")
|
|
206
|
+
})?;
|
|
207
|
+
|
|
208
|
+
// Collect events using try_recv (non-blocking)
|
|
209
|
+
let events = ruby.ary_new();
|
|
210
|
+
let mut received_count = 0;
|
|
211
|
+
|
|
212
|
+
while received_count < max_events {
|
|
213
|
+
match receiver.try_recv() {
|
|
214
|
+
Ok(event) => {
|
|
215
|
+
trace!(
|
|
216
|
+
event_id = %event.event_id,
|
|
217
|
+
event_name = %event.event_name,
|
|
218
|
+
"Polled in-process domain event for Ruby"
|
|
219
|
+
);
|
|
220
|
+
|
|
221
|
+
match domain_event_to_ruby_hash(&ruby, &event) {
|
|
222
|
+
Ok(hash) => {
|
|
223
|
+
events.push(hash)?;
|
|
224
|
+
received_count += 1;
|
|
225
|
+
}
|
|
226
|
+
Err(e) => {
|
|
227
|
+
warn!(
|
|
228
|
+
event_id = %event.event_id,
|
|
229
|
+
error = %e,
|
|
230
|
+
"Failed to convert domain event to Ruby hash - skipping"
|
|
231
|
+
);
|
|
232
|
+
// Continue processing other events
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
Err(broadcast::error::TryRecvError::Empty) => {
|
|
237
|
+
// No more events available
|
|
238
|
+
break;
|
|
239
|
+
}
|
|
240
|
+
Err(broadcast::error::TryRecvError::Closed) => {
|
|
241
|
+
warn!("In-process event channel closed");
|
|
242
|
+
break;
|
|
243
|
+
}
|
|
244
|
+
Err(broadcast::error::TryRecvError::Lagged(count)) => {
|
|
245
|
+
warn!(
|
|
246
|
+
lagged_count = count,
|
|
247
|
+
"In-process event receiver lagged - some events were dropped"
|
|
248
|
+
);
|
|
249
|
+
// Continue receiving remaining events
|
|
250
|
+
}
|
|
251
|
+
}
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
if received_count > 0 {
|
|
255
|
+
debug!(
|
|
256
|
+
count = received_count,
|
|
257
|
+
"Polled in-process domain events for Ruby"
|
|
258
|
+
);
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
Ok(events.as_value())
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
/// FFI function to get in-process event bus statistics
|
|
265
|
+
///
|
|
266
|
+
/// Returns statistics about the in-process event bus including subscriber
|
|
267
|
+
/// counts and dispatch metrics.
|
|
268
|
+
///
|
|
269
|
+
/// # Returns
|
|
270
|
+
///
|
|
271
|
+
/// Ruby hash with:
|
|
272
|
+
/// - `enabled`: Boolean - whether in-process events are enabled
|
|
273
|
+
/// - `ffi_subscriber_count`: Integer - number of FFI subscribers
|
|
274
|
+
/// - Additional stats when available
|
|
275
|
+
pub fn get_in_process_event_stats() -> Result<RValue, MagnusError> {
|
|
276
|
+
let ruby = Ruby::get().map_err(|e| {
|
|
277
|
+
MagnusError::new(runtime_error_class(), format!("Failed to get Ruby: {}", e))
|
|
278
|
+
})?;
|
|
279
|
+
|
|
280
|
+
let hash = ruby.hash_new();
|
|
281
|
+
|
|
282
|
+
let handle_guard = WORKER_SYSTEM.lock().map_err(|e| {
|
|
283
|
+
error!("Failed to acquire worker system lock: {}", e);
|
|
284
|
+
MagnusError::new(runtime_error_class(), "Lock acquisition failed")
|
|
285
|
+
})?;
|
|
286
|
+
|
|
287
|
+
match handle_guard.as_ref() {
|
|
288
|
+
Some(handle) => {
|
|
289
|
+
hash.aset("enabled", handle.in_process_event_receiver.is_some())?;
|
|
290
|
+
|
|
291
|
+
// If we have access to the event bus stats through the handle,
|
|
292
|
+
// add them here. For now, just report enabled status.
|
|
293
|
+
if handle.in_process_event_receiver.is_some() {
|
|
294
|
+
hash.aset("status", "active")?;
|
|
295
|
+
} else {
|
|
296
|
+
hash.aset("status", "not_initialized")?;
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
None => {
|
|
300
|
+
hash.aset("enabled", false)?;
|
|
301
|
+
hash.aset("status", "worker_not_running")?;
|
|
302
|
+
}
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
Ok(hash.as_value())
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
/// Initialize the in-process event FFI module
|
|
309
|
+
pub fn init_in_process_event_ffi(module: &RModule) -> Result<(), MagnusError> {
|
|
310
|
+
module.define_singleton_method(
|
|
311
|
+
"poll_in_process_events",
|
|
312
|
+
function!(poll_in_process_events, 1),
|
|
313
|
+
)?;
|
|
314
|
+
module.define_singleton_method(
|
|
315
|
+
"in_process_event_stats",
|
|
316
|
+
function!(get_in_process_event_stats, 0),
|
|
317
|
+
)?;
|
|
318
|
+
Ok(())
|
|
319
|
+
}
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
use magnus::{Error as MagnusError, Module, Ruby};
|
|
2
|
+
|
|
3
|
+
mod bootstrap;
|
|
4
|
+
mod bridge;
|
|
5
|
+
mod client_ffi; // TAS-231: Client API FFI functions
|
|
6
|
+
mod conversions;
|
|
7
|
+
mod diagnostics; // System diagnostics for troubleshooting
|
|
8
|
+
mod event_publisher_ffi; // TAS-65 Phase 2.4a: Domain event publishing FFI
|
|
9
|
+
mod ffi_logging;
|
|
10
|
+
mod global_event_system;
|
|
11
|
+
mod in_process_event_ffi; // TAS-65 Phase 4.1: In-process event polling FFI
|
|
12
|
+
mod observability_ffi; // TAS-77: Observability services FFI
|
|
13
|
+
|
|
14
|
+
// TAS-67: DomainEventCallback is now provided by tasker-worker (shared implementation)
|
|
15
|
+
|
|
16
|
+
// TAS-67: event_handler module removed - replaced by FfiDispatchChannel in bridge.rs
|
|
17
|
+
|
|
18
|
+
#[magnus::init]
|
|
19
|
+
fn init(ruby: &Ruby) -> Result<(), MagnusError> {
|
|
20
|
+
// Initialize logging
|
|
21
|
+
ffi_logging::init_ffi_logger().map_err(|err| {
|
|
22
|
+
MagnusError::new(
|
|
23
|
+
ruby.exception_runtime_error(),
|
|
24
|
+
format!("Failed to initialize logging, {err}"),
|
|
25
|
+
)
|
|
26
|
+
})?;
|
|
27
|
+
|
|
28
|
+
let module = ruby.define_module("TaskerCore")?;
|
|
29
|
+
let ffi_module = module.define_module("FFI")?;
|
|
30
|
+
|
|
31
|
+
// Initialize bridge with all lifecycle methods
|
|
32
|
+
bridge::init_bridge(&ffi_module)?;
|
|
33
|
+
|
|
34
|
+
// Add diagnostic function for troubleshooting
|
|
35
|
+
ffi_module.define_module_function(
|
|
36
|
+
"system_diagnostics",
|
|
37
|
+
magnus::function!(diagnostics::system_diagnostics, 0),
|
|
38
|
+
)?;
|
|
39
|
+
|
|
40
|
+
Ok(())
|
|
41
|
+
}
|