create-ekka-desktop-app 0.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +137 -0
- package/bin/cli.js +72 -0
- package/package.json +23 -0
- package/template/branding/app.json +6 -0
- package/template/branding/icon.icns +0 -0
- package/template/eslint.config.js +98 -0
- package/template/index.html +29 -0
- package/template/package.json +40 -0
- package/template/src/app/App.tsx +24 -0
- package/template/src/demo/DemoApp.tsx +260 -0
- package/template/src/demo/components/Banner.tsx +82 -0
- package/template/src/demo/components/EmptyState.tsx +61 -0
- package/template/src/demo/components/InfoPopover.tsx +171 -0
- package/template/src/demo/components/InfoTooltip.tsx +76 -0
- package/template/src/demo/components/LearnMore.tsx +98 -0
- package/template/src/demo/components/NodeCredentialsOnboarding.tsx +219 -0
- package/template/src/demo/components/SetupWizard.tsx +48 -0
- package/template/src/demo/components/StatusBadge.tsx +83 -0
- package/template/src/demo/components/index.ts +10 -0
- package/template/src/demo/hooks/index.ts +6 -0
- package/template/src/demo/hooks/useAuditEvents.ts +30 -0
- package/template/src/demo/layout/Shell.tsx +110 -0
- package/template/src/demo/layout/Sidebar.tsx +192 -0
- package/template/src/demo/pages/AuditLogPage.tsx +235 -0
- package/template/src/demo/pages/DocGenPage.tsx +874 -0
- package/template/src/demo/pages/HomeSetupPage.tsx +182 -0
- package/template/src/demo/pages/LoginPage.tsx +192 -0
- package/template/src/demo/pages/PathPermissionsPage.tsx +873 -0
- package/template/src/demo/pages/RunnerPage.tsx +445 -0
- package/template/src/demo/pages/SystemPage.tsx +557 -0
- package/template/src/demo/pages/VaultPage.tsx +805 -0
- package/template/src/ekka/__tests__/demo-backend.test.ts +187 -0
- package/template/src/ekka/audit/index.ts +7 -0
- package/template/src/ekka/audit/store.ts +68 -0
- package/template/src/ekka/audit/types.ts +22 -0
- package/template/src/ekka/auth/client.ts +212 -0
- package/template/src/ekka/auth/index.ts +30 -0
- package/template/src/ekka/auth/storage.ts +114 -0
- package/template/src/ekka/auth/types.ts +67 -0
- package/template/src/ekka/backend/demo.ts +151 -0
- package/template/src/ekka/backend/interface.ts +36 -0
- package/template/src/ekka/config.ts +48 -0
- package/template/src/ekka/constants.ts +143 -0
- package/template/src/ekka/errors.ts +54 -0
- package/template/src/ekka/index.ts +516 -0
- package/template/src/ekka/internal/backend.ts +156 -0
- package/template/src/ekka/internal/index.ts +7 -0
- package/template/src/ekka/ops/auth.ts +29 -0
- package/template/src/ekka/ops/debug.ts +68 -0
- package/template/src/ekka/ops/home.ts +101 -0
- package/template/src/ekka/ops/index.ts +16 -0
- package/template/src/ekka/ops/nodeCredentials.ts +131 -0
- package/template/src/ekka/ops/nodeSession.ts +145 -0
- package/template/src/ekka/ops/paths.ts +183 -0
- package/template/src/ekka/ops/runner.ts +86 -0
- package/template/src/ekka/ops/runtime.ts +31 -0
- package/template/src/ekka/ops/setup.ts +47 -0
- package/template/src/ekka/ops/vault.ts +459 -0
- package/template/src/ekka/ops/workflowRuns.ts +116 -0
- package/template/src/ekka/types.ts +82 -0
- package/template/src/ekka/utils/idempotency.ts +14 -0
- package/template/src/ekka/utils/index.ts +7 -0
- package/template/src/ekka/utils/time.ts +77 -0
- package/template/src/main.tsx +12 -0
- package/template/src/vite-env.d.ts +12 -0
- package/template/src-tauri/Cargo.toml +41 -0
- package/template/src-tauri/build.rs +3 -0
- package/template/src-tauri/capabilities/default.json +11 -0
- package/template/src-tauri/icons/icon.icns +0 -0
- package/template/src-tauri/icons/icon.png +0 -0
- package/template/src-tauri/resources/ekka-engine-bootstrap +0 -0
- package/template/src-tauri/src/bootstrap.rs +37 -0
- package/template/src-tauri/src/commands.rs +1215 -0
- package/template/src-tauri/src/device_secret.rs +111 -0
- package/template/src-tauri/src/engine_process.rs +538 -0
- package/template/src-tauri/src/grants.rs +129 -0
- package/template/src-tauri/src/handlers/home.rs +65 -0
- package/template/src-tauri/src/handlers/mod.rs +7 -0
- package/template/src-tauri/src/handlers/paths.rs +128 -0
- package/template/src-tauri/src/handlers/vault.rs +680 -0
- package/template/src-tauri/src/main.rs +243 -0
- package/template/src-tauri/src/node_auth.rs +858 -0
- package/template/src-tauri/src/node_credentials.rs +541 -0
- package/template/src-tauri/src/node_runner.rs +882 -0
- package/template/src-tauri/src/node_vault_crypto.rs +113 -0
- package/template/src-tauri/src/node_vault_store.rs +267 -0
- package/template/src-tauri/src/ops/auth.rs +50 -0
- package/template/src-tauri/src/ops/home.rs +251 -0
- package/template/src-tauri/src/ops/mod.rs +7 -0
- package/template/src-tauri/src/ops/runtime.rs +21 -0
- package/template/src-tauri/src/state.rs +639 -0
- package/template/src-tauri/src/types.rs +84 -0
- package/template/src-tauri/tauri.conf.json +41 -0
- package/template/tsconfig.json +26 -0
- package/template/tsconfig.tsbuildinfo +1 -0
- package/template/vite.config.ts +34 -0
|
@@ -0,0 +1,882 @@
|
|
|
1
|
+
//! Desktop Node Session Runner
|
|
2
|
+
//!
|
|
3
|
+
//! Runner loop that uses Ed25519 node session authentication instead of internal service keys.
|
|
4
|
+
//!
|
|
5
|
+
//! ## Architecture
|
|
6
|
+
//!
|
|
7
|
+
//! - Bootstrap node session FIRST before starting runner
|
|
8
|
+
//! - Runner uses session token for all engine calls
|
|
9
|
+
//! - Session is refreshed automatically when expired
|
|
10
|
+
//! - Tenant/workspace comes from session (EKKA decides scope)
|
|
11
|
+
//!
|
|
12
|
+
//! ## Security
|
|
13
|
+
//!
|
|
14
|
+
//! - NO internal service keys used
|
|
15
|
+
//! - NO environment variable credentials
|
|
16
|
+
//! - Session tokens held in memory only
|
|
17
|
+
|
|
18
|
+
#![allow(dead_code)] // API types and fields may not all be used yet
|
|
19
|
+
|
|
20
|
+
use crate::node_auth::{
|
|
21
|
+
refresh_node_session, NodeSession, NodeSessionHolder, NodeSessionRunnerConfig,
|
|
22
|
+
};
|
|
23
|
+
use crate::state::RunnerState;
|
|
24
|
+
// Use ekka_runner_local for enhanced executor with debug bundle support
|
|
25
|
+
use ekka_runner_local::dispatch::{classify_error, dispatch_task};
|
|
26
|
+
use ekka_runner_local::types::{EngineContext, TaskExecutionContext};
|
|
27
|
+
use reqwest::Client;
|
|
28
|
+
use serde::{Deserialize, Serialize};
|
|
29
|
+
use std::path::PathBuf;
|
|
30
|
+
use std::sync::Arc;
|
|
31
|
+
use std::time::Duration;
|
|
32
|
+
use tracing::{error, info, warn};
|
|
33
|
+
use uuid::Uuid;
|
|
34
|
+
|
|
35
|
+
const DEFAULT_NODE_URL: &str = "http://127.0.0.1:7777";
|
|
36
|
+
const POLL_INTERVAL_SECS: u64 = 5;
|
|
37
|
+
const MAX_POLL_LIMIT: u32 = 10;
|
|
38
|
+
const RUNNER_ID_PREFIX: &str = "ekka-node-runner";
|
|
39
|
+
|
|
40
|
+
// =============================================================================
|
|
41
|
+
// Types (duplicated from ekka-runner-core to avoid internal key dependency)
|
|
42
|
+
// =============================================================================
|
|
43
|
+
|
|
44
|
+
#[derive(Debug, Deserialize)]
|
|
45
|
+
struct EnginePollResponse {
|
|
46
|
+
tasks: Vec<EngineTaskInfo>,
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
#[derive(Debug, Clone, Deserialize)]
|
|
50
|
+
struct EngineTaskInfo {
|
|
51
|
+
id: String,
|
|
52
|
+
task_type: String,
|
|
53
|
+
task_subtype: Option<String>,
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
#[derive(Debug, Deserialize)]
|
|
57
|
+
struct EngineClaimResponse {
|
|
58
|
+
input_json: serde_json::Value,
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
#[derive(Debug, Serialize)]
|
|
62
|
+
struct EngineCompleteRequest {
|
|
63
|
+
runner_id: String,
|
|
64
|
+
output: EngineCompleteOutput,
|
|
65
|
+
#[serde(skip_serializing_if = "Option::is_none")]
|
|
66
|
+
duration_ms: Option<u64>,
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
#[derive(Debug, Serialize)]
|
|
70
|
+
struct EngineCompleteOutput {
|
|
71
|
+
decision: String,
|
|
72
|
+
reason: String,
|
|
73
|
+
#[serde(skip_serializing_if = "Option::is_none")]
|
|
74
|
+
proposed_patch: Option<Vec<serde_json::Value>>,
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
#[derive(Debug, Serialize)]
|
|
78
|
+
struct EngineFailRequest {
|
|
79
|
+
runner_id: String,
|
|
80
|
+
error: String,
|
|
81
|
+
#[serde(skip_serializing_if = "Option::is_none")]
|
|
82
|
+
error_code: Option<String>,
|
|
83
|
+
#[serde(skip_serializing_if = "Option::is_none")]
|
|
84
|
+
retryable: Option<bool>,
|
|
85
|
+
#[serde(skip_serializing_if = "Option::is_none")]
|
|
86
|
+
duration_ms: Option<u64>,
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
// =============================================================================
|
|
90
|
+
// Callback Trait
|
|
91
|
+
// =============================================================================
|
|
92
|
+
|
|
93
|
+
pub trait NodeRunnerCallback: Send + Sync {
|
|
94
|
+
fn on_start(&self, runner_id: &str);
|
|
95
|
+
fn on_poll(&self);
|
|
96
|
+
fn on_claim(&self, task_id: &str);
|
|
97
|
+
fn on_complete(&self, task_id: &str);
|
|
98
|
+
fn on_error(&self, error: &str);
|
|
99
|
+
fn on_stop(&self);
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
/// Desktop callbacks that update RunnerState
|
|
103
|
+
pub struct DesktopNodeRunnerCallbacks {
|
|
104
|
+
state: RunnerState,
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
impl DesktopNodeRunnerCallbacks {
|
|
108
|
+
pub fn new(state: RunnerState) -> Self {
|
|
109
|
+
Self { state }
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
impl NodeRunnerCallback for DesktopNodeRunnerCallbacks {
|
|
114
|
+
fn on_start(&self, runner_id: &str) {
|
|
115
|
+
info!(op = "node_runner.start", runner_id = %runner_id, "Node runner started");
|
|
116
|
+
self.state.start(runner_id);
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
fn on_poll(&self) {
|
|
120
|
+
self.state.record_poll();
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
fn on_claim(&self, task_id: &str) {
|
|
124
|
+
info!(op = "node_runner.claim", task_id = %&task_id[..8.min(task_id.len())], "Task claimed");
|
|
125
|
+
self.state.record_claim(task_id);
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
fn on_complete(&self, task_id: &str) {
|
|
129
|
+
info!(op = "node_runner.complete", task_id = %&task_id[..8.min(task_id.len())], "Task completed");
|
|
130
|
+
self.state.record_complete(task_id);
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
fn on_error(&self, error: &str) {
|
|
134
|
+
warn!(op = "node_runner.error", "Runner error occurred");
|
|
135
|
+
self.state.record_error(error);
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
fn on_stop(&self) {
|
|
139
|
+
info!(op = "node_runner.stop", "Node runner stopped");
|
|
140
|
+
self.state.stop();
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
// =============================================================================
|
|
145
|
+
// Node Session Runner
|
|
146
|
+
// =============================================================================
|
|
147
|
+
|
|
148
|
+
struct NodeSessionRunner {
|
|
149
|
+
client: Client,
|
|
150
|
+
engine_url: String,
|
|
151
|
+
node_url: String,
|
|
152
|
+
node_id: Uuid,
|
|
153
|
+
runner_id: String,
|
|
154
|
+
session_holder: Arc<NodeSessionHolder>,
|
|
155
|
+
home_path: PathBuf,
|
|
156
|
+
device_fingerprint: Option<String>,
|
|
157
|
+
/// User subject (from JWT) for PathGuard grant validation
|
|
158
|
+
user_sub: Option<String>,
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
impl NodeSessionRunner {
|
|
162
|
+
fn new(
|
|
163
|
+
config: &NodeSessionRunnerConfig,
|
|
164
|
+
session_holder: Arc<NodeSessionHolder>,
|
|
165
|
+
home_path: PathBuf,
|
|
166
|
+
device_fingerprint: Option<String>,
|
|
167
|
+
user_sub: Option<String>,
|
|
168
|
+
) -> Self {
|
|
169
|
+
let client = Client::builder()
|
|
170
|
+
.timeout(Duration::from_secs(60))
|
|
171
|
+
.build()
|
|
172
|
+
.expect("Failed to build HTTP client");
|
|
173
|
+
|
|
174
|
+
let runner_id = format!("{}-{}", RUNNER_ID_PREFIX, &Uuid::new_v4().to_string()[..8]);
|
|
175
|
+
|
|
176
|
+
Self {
|
|
177
|
+
client,
|
|
178
|
+
engine_url: config.engine_url.clone(),
|
|
179
|
+
node_url: config.node_url.clone(),
|
|
180
|
+
node_id: config.node_id,
|
|
181
|
+
runner_id,
|
|
182
|
+
session_holder,
|
|
183
|
+
home_path,
|
|
184
|
+
device_fingerprint,
|
|
185
|
+
user_sub,
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
/// Get current valid session, refreshing if needed
|
|
190
|
+
///
|
|
191
|
+
/// IMPORTANT: Uses spawn_blocking to avoid Tokio runtime panic.
|
|
192
|
+
/// The refresh_node_session function uses reqwest::blocking::Client internally,
|
|
193
|
+
/// which creates its own runtime. Calling it directly in async context causes:
|
|
194
|
+
/// "Cannot drop a runtime in a context where blocking is not allowed"
|
|
195
|
+
async fn get_session(&self) -> Result<NodeSession, String> {
|
|
196
|
+
// Check if we have a valid session
|
|
197
|
+
if let Some(session) = self.session_holder.get_valid() {
|
|
198
|
+
return Ok(session);
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
// Need to refresh - use spawn_blocking to avoid runtime panic
|
|
202
|
+
info!(op = "node_runner.refresh_session.start", "Refreshing node session");
|
|
203
|
+
|
|
204
|
+
let home_path = self.home_path.clone();
|
|
205
|
+
let node_id = self.node_id;
|
|
206
|
+
let engine_url = self.engine_url.clone();
|
|
207
|
+
let device_fingerprint = self.device_fingerprint.clone();
|
|
208
|
+
|
|
209
|
+
let session = tokio::task::spawn_blocking(move || {
|
|
210
|
+
refresh_node_session(
|
|
211
|
+
&home_path,
|
|
212
|
+
&node_id,
|
|
213
|
+
&engine_url,
|
|
214
|
+
device_fingerprint.as_deref(),
|
|
215
|
+
)
|
|
216
|
+
})
|
|
217
|
+
.await
|
|
218
|
+
.map_err(|e| format!("Session refresh task failed: {}", e))?
|
|
219
|
+
.map_err(|e| {
|
|
220
|
+
error!(op = "node_runner.refresh_session.failed", error = %e, "Session refresh failed");
|
|
221
|
+
format!("Session refresh failed: {}", e)
|
|
222
|
+
})?;
|
|
223
|
+
|
|
224
|
+
info!(op = "node_runner.refresh_session.ok", session_id = %session.session_id, "Session refreshed successfully");
|
|
225
|
+
self.session_holder.set(session.clone());
|
|
226
|
+
Ok(session)
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
/// Get security headers using current session
|
|
230
|
+
async fn security_headers(&self) -> Result<Vec<(&'static str, String)>, String> {
|
|
231
|
+
let session = self.get_session().await?;
|
|
232
|
+
|
|
233
|
+
Ok(vec![
|
|
234
|
+
("Authorization", format!("Bearer {}", session.token)),
|
|
235
|
+
("X-EKKA-PROOF-TYPE", "node_session".to_string()),
|
|
236
|
+
("X-REQUEST-ID", Uuid::new_v4().to_string()),
|
|
237
|
+
("X-EKKA-CORRELATION-ID", Uuid::new_v4().to_string()),
|
|
238
|
+
("X-EKKA-MODULE", "engine.runner_tasks".to_string()),
|
|
239
|
+
("X-EKKA-CLIENT", "ekka-desktop".to_string()),
|
|
240
|
+
("X-EKKA-CLIENT-VERSION", "0.2.0".to_string()),
|
|
241
|
+
("X-EKKA-NODE-ID", self.node_id.to_string()),
|
|
242
|
+
])
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
async fn poll_tasks(&self) -> Result<Vec<EngineTaskInfo>, String> {
|
|
246
|
+
let session = self.get_session().await?;
|
|
247
|
+
|
|
248
|
+
let url = format!(
|
|
249
|
+
"{}/engine/runner-tasks?status=pending&limit={}&tenant_id={}&workspace_id={}",
|
|
250
|
+
self.engine_url, MAX_POLL_LIMIT, session.tenant_id, session.workspace_id
|
|
251
|
+
);
|
|
252
|
+
|
|
253
|
+
let headers = self.security_headers().await?;
|
|
254
|
+
let mut req = self.client.get(&url);
|
|
255
|
+
for (k, v) in headers {
|
|
256
|
+
req = req.header(k, v);
|
|
257
|
+
}
|
|
258
|
+
req = req.header("X-EKKA-ACTION", "list");
|
|
259
|
+
|
|
260
|
+
let response = req
|
|
261
|
+
.send()
|
|
262
|
+
.await
|
|
263
|
+
.map_err(|e| format!("Poll failed: {}", e))?;
|
|
264
|
+
|
|
265
|
+
if !response.status().is_success() {
|
|
266
|
+
let status = response.status();
|
|
267
|
+
let body = response.text().await.unwrap_or_default();
|
|
268
|
+
return Err(format!(
|
|
269
|
+
"Poll failed ({}): {}",
|
|
270
|
+
status,
|
|
271
|
+
body.chars().take(100).collect::<String>()
|
|
272
|
+
));
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
let poll: EnginePollResponse = response
|
|
276
|
+
.json()
|
|
277
|
+
.await
|
|
278
|
+
.map_err(|e| format!("Parse poll response: {}", e))?;
|
|
279
|
+
|
|
280
|
+
Ok(poll.tasks)
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
async fn claim_task(&self, task_id: &str) -> Result<EngineClaimResponse, String> {
|
|
284
|
+
let session = self.get_session().await?;
|
|
285
|
+
|
|
286
|
+
let url = format!(
|
|
287
|
+
"{}/engine/runner-tasks/{}/claim?tenant_id={}&workspace_id={}",
|
|
288
|
+
self.engine_url, task_id, session.tenant_id, session.workspace_id
|
|
289
|
+
);
|
|
290
|
+
|
|
291
|
+
let headers = self.security_headers().await?;
|
|
292
|
+
let mut req = self.client.post(&url);
|
|
293
|
+
for (k, v) in headers {
|
|
294
|
+
req = req.header(k, v);
|
|
295
|
+
}
|
|
296
|
+
req = req.header("X-EKKA-ACTION", "claim");
|
|
297
|
+
|
|
298
|
+
let response = req
|
|
299
|
+
.json(&serde_json::json!({ "runner_id": self.runner_id }))
|
|
300
|
+
.send()
|
|
301
|
+
.await
|
|
302
|
+
.map_err(|e| format!("Claim failed: {}", e))?;
|
|
303
|
+
|
|
304
|
+
if !response.status().is_success() {
|
|
305
|
+
let status = response.status();
|
|
306
|
+
let body = response.text().await.unwrap_or_default();
|
|
307
|
+
return Err(format!(
|
|
308
|
+
"Claim failed ({}): {}",
|
|
309
|
+
status,
|
|
310
|
+
body.chars().take(100).collect::<String>()
|
|
311
|
+
));
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
response
|
|
315
|
+
.json()
|
|
316
|
+
.await
|
|
317
|
+
.map_err(|e| format!("Parse claim response: {}", e))
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
async fn complete_task(
|
|
321
|
+
&self,
|
|
322
|
+
task_id: &str,
|
|
323
|
+
output: EngineCompleteOutput,
|
|
324
|
+
duration_ms: Option<u64>,
|
|
325
|
+
) -> Result<(), String> {
|
|
326
|
+
let session = self.get_session().await?;
|
|
327
|
+
|
|
328
|
+
let url = format!(
|
|
329
|
+
"{}/engine/runner-tasks/{}/complete?tenant_id={}&workspace_id={}",
|
|
330
|
+
self.engine_url, task_id, session.tenant_id, session.workspace_id
|
|
331
|
+
);
|
|
332
|
+
|
|
333
|
+
let headers = self.security_headers().await?;
|
|
334
|
+
let mut req = self.client.post(&url);
|
|
335
|
+
for (k, v) in headers {
|
|
336
|
+
req = req.header(k, v);
|
|
337
|
+
}
|
|
338
|
+
req = req.header("X-EKKA-ACTION", "complete");
|
|
339
|
+
|
|
340
|
+
let body = EngineCompleteRequest {
|
|
341
|
+
runner_id: self.runner_id.clone(),
|
|
342
|
+
output,
|
|
343
|
+
duration_ms,
|
|
344
|
+
};
|
|
345
|
+
|
|
346
|
+
// DEBUG: Log FULL request details before sending
|
|
347
|
+
let body_json_str = serde_json::to_string(&body).unwrap_or_default();
|
|
348
|
+
tracing::info!(
|
|
349
|
+
op = "node_runner.complete.debug",
|
|
350
|
+
url = %url,
|
|
351
|
+
body_json = %body_json_str,
|
|
352
|
+
"Complete request - FULL BODY"
|
|
353
|
+
);
|
|
354
|
+
|
|
355
|
+
let response = req
|
|
356
|
+
.json(&body)
|
|
357
|
+
.send()
|
|
358
|
+
.await
|
|
359
|
+
.map_err(|e| format!("Complete failed: {}", e))?;
|
|
360
|
+
|
|
361
|
+
if !response.status().is_success() {
|
|
362
|
+
let status = response.status();
|
|
363
|
+
let body_text = response.text().await.unwrap_or_default();
|
|
364
|
+
tracing::error!(
|
|
365
|
+
op = "node_runner.complete.response_error",
|
|
366
|
+
status = %status,
|
|
367
|
+
full_response = %body_text,
|
|
368
|
+
"Complete failed - FULL RESPONSE"
|
|
369
|
+
);
|
|
370
|
+
return Err(format!(
|
|
371
|
+
"Complete failed ({}): {}",
|
|
372
|
+
status,
|
|
373
|
+
body_text
|
|
374
|
+
));
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
Ok(())
|
|
378
|
+
}
|
|
379
|
+
|
|
380
|
+
async fn fail_task(
|
|
381
|
+
&self,
|
|
382
|
+
task_id: &str,
|
|
383
|
+
error: &str,
|
|
384
|
+
code: &str,
|
|
385
|
+
retryable: bool,
|
|
386
|
+
) -> Result<(), String> {
|
|
387
|
+
let session = self.get_session().await?;
|
|
388
|
+
|
|
389
|
+
let url = format!(
|
|
390
|
+
"{}/engine/runner-tasks/{}/fail?tenant_id={}&workspace_id={}",
|
|
391
|
+
self.engine_url, task_id, session.tenant_id, session.workspace_id
|
|
392
|
+
);
|
|
393
|
+
|
|
394
|
+
let headers = self.security_headers().await?;
|
|
395
|
+
let mut req = self.client.post(&url);
|
|
396
|
+
for (k, v) in headers {
|
|
397
|
+
req = req.header(k, v);
|
|
398
|
+
}
|
|
399
|
+
req = req.header("X-EKKA-ACTION", "fail");
|
|
400
|
+
|
|
401
|
+
let body = EngineFailRequest {
|
|
402
|
+
runner_id: self.runner_id.clone(),
|
|
403
|
+
error: error.to_string(),
|
|
404
|
+
error_code: Some(code.to_string()),
|
|
405
|
+
retryable: Some(retryable),
|
|
406
|
+
duration_ms: None,
|
|
407
|
+
};
|
|
408
|
+
|
|
409
|
+
let response = req
|
|
410
|
+
.json(&body)
|
|
411
|
+
.send()
|
|
412
|
+
.await
|
|
413
|
+
.map_err(|e| format!("Fail failed: {}", e))?;
|
|
414
|
+
|
|
415
|
+
if !response.status().is_success() {
|
|
416
|
+
let status = response.status();
|
|
417
|
+
return Err(format!("Fail failed ({})", status));
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
Ok(())
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
async fn heartbeat(&self, task_id: &str) -> Result<(), String> {
|
|
424
|
+
let session = self.get_session().await?;
|
|
425
|
+
|
|
426
|
+
let url = format!(
|
|
427
|
+
"{}/engine/runner-tasks/{}/heartbeat?tenant_id={}&workspace_id={}",
|
|
428
|
+
self.engine_url, task_id, session.tenant_id, session.workspace_id
|
|
429
|
+
);
|
|
430
|
+
|
|
431
|
+
let headers = self.security_headers().await?;
|
|
432
|
+
let mut req = self.client.post(&url);
|
|
433
|
+
for (k, v) in headers {
|
|
434
|
+
req = req.header(k, v);
|
|
435
|
+
}
|
|
436
|
+
req = req.header("X-EKKA-ACTION", "heartbeat");
|
|
437
|
+
|
|
438
|
+
let response = req
|
|
439
|
+
.json(&serde_json::json!({ "runner_id": self.runner_id }))
|
|
440
|
+
.send()
|
|
441
|
+
.await
|
|
442
|
+
.map_err(|e| format!("Heartbeat failed: {}", e))?;
|
|
443
|
+
|
|
444
|
+
if !response.status().is_success() {
|
|
445
|
+
return Err(format!("Heartbeat failed ({})", response.status()));
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
Ok(())
|
|
449
|
+
}
|
|
450
|
+
|
|
451
|
+
async fn process_task(&self, task: &EngineTaskInfo, cb: &Arc<dyn NodeRunnerCallback>) {
|
|
452
|
+
let task_id = &task.id;
|
|
453
|
+
let task_id_short = &task_id[..8.min(task_id.len())];
|
|
454
|
+
|
|
455
|
+
info!(
|
|
456
|
+
op = "node_runner.task.start",
|
|
457
|
+
task_id = %task_id_short,
|
|
458
|
+
task_type = %task.task_type,
|
|
459
|
+
task_subtype = ?task.task_subtype,
|
|
460
|
+
"Processing task"
|
|
461
|
+
);
|
|
462
|
+
|
|
463
|
+
// Claim
|
|
464
|
+
let claim_result = match self.claim_task(task_id).await {
|
|
465
|
+
Ok(r) => r,
|
|
466
|
+
Err(e) => {
|
|
467
|
+
warn!(
|
|
468
|
+
op = "node_runner.task.claim_failed",
|
|
469
|
+
task_id = %task_id_short,
|
|
470
|
+
error = %e,
|
|
471
|
+
"Claim failed"
|
|
472
|
+
);
|
|
473
|
+
cb.on_error(&e);
|
|
474
|
+
return;
|
|
475
|
+
}
|
|
476
|
+
};
|
|
477
|
+
|
|
478
|
+
cb.on_claim(task_id);
|
|
479
|
+
info!(
|
|
480
|
+
op = "node_runner.task.claimed",
|
|
481
|
+
task_id = %task_id_short,
|
|
482
|
+
"Task claimed"
|
|
483
|
+
);
|
|
484
|
+
|
|
485
|
+
// Build execution context
|
|
486
|
+
let ctx = TaskExecutionContext::new(task_id.clone(), claim_result.input_json);
|
|
487
|
+
|
|
488
|
+
// Get session for engine context
|
|
489
|
+
let session = match self.get_session().await {
|
|
490
|
+
Ok(s) => s,
|
|
491
|
+
Err(e) => {
|
|
492
|
+
error!(
|
|
493
|
+
op = "node_runner.task.session_error",
|
|
494
|
+
task_id = %task_id_short,
|
|
495
|
+
error = %e,
|
|
496
|
+
"Failed to get session for execution"
|
|
497
|
+
);
|
|
498
|
+
let _ = self
|
|
499
|
+
.fail_task(task_id, &e, "SESSION_ERROR", true)
|
|
500
|
+
.await;
|
|
501
|
+
cb.on_error(&e);
|
|
502
|
+
return;
|
|
503
|
+
}
|
|
504
|
+
};
|
|
505
|
+
|
|
506
|
+
// Build engine context for prompt_run executor with node session auth
|
|
507
|
+
// Inject ekka_home_path so PathGuard doesn't need EKKA_HOME env var
|
|
508
|
+
// Inject user_sub so PathGuard grant validation matches user's grants
|
|
509
|
+
let mut engine_ctx = EngineContext::with_node_session(
|
|
510
|
+
self.engine_url.clone(),
|
|
511
|
+
session.token.clone(),
|
|
512
|
+
session.tenant_id.to_string(),
|
|
513
|
+
session.workspace_id.to_string(),
|
|
514
|
+
)
|
|
515
|
+
.set_ekka_home_path(self.home_path.clone());
|
|
516
|
+
|
|
517
|
+
if let Some(ref sub) = self.user_sub {
|
|
518
|
+
engine_ctx = engine_ctx.set_user_sub(sub.clone());
|
|
519
|
+
}
|
|
520
|
+
|
|
521
|
+
// Build heartbeat function
|
|
522
|
+
let heartbeat_task_id = task_id.clone();
|
|
523
|
+
let heartbeat_self = NodeSessionRunnerHeartbeat {
|
|
524
|
+
client: self.client.clone(),
|
|
525
|
+
engine_url: self.engine_url.clone(),
|
|
526
|
+
runner_id: self.runner_id.clone(),
|
|
527
|
+
session_holder: self.session_holder.clone(),
|
|
528
|
+
home_path: self.home_path.clone(),
|
|
529
|
+
node_id: self.node_id,
|
|
530
|
+
device_fingerprint: self.device_fingerprint.clone(),
|
|
531
|
+
};
|
|
532
|
+
|
|
533
|
+
let heartbeat_fn: Arc<
|
|
534
|
+
dyn Fn()
|
|
535
|
+
-> std::pin::Pin<
|
|
536
|
+
Box<dyn std::future::Future<Output = Result<(), String>> + Send>,
|
|
537
|
+
> + Send
|
|
538
|
+
+ Sync,
|
|
539
|
+
> = Arc::new(move || {
|
|
540
|
+
let task_id = heartbeat_task_id.clone();
|
|
541
|
+
let hb = heartbeat_self.clone();
|
|
542
|
+
|
|
543
|
+
Box::pin(async move { hb.send_heartbeat(&task_id).await })
|
|
544
|
+
});
|
|
545
|
+
|
|
546
|
+
// Dispatch to actual executor
|
|
547
|
+
let start = std::time::Instant::now();
|
|
548
|
+
let result = dispatch_task(
|
|
549
|
+
task.task_subtype.as_deref(),
|
|
550
|
+
&self.client,
|
|
551
|
+
&self.node_url,
|
|
552
|
+
"", // session_id not used for prompt_run
|
|
553
|
+
Some(&engine_ctx),
|
|
554
|
+
&ctx,
|
|
555
|
+
Some(heartbeat_fn),
|
|
556
|
+
)
|
|
557
|
+
.await;
|
|
558
|
+
|
|
559
|
+
let duration_ms = start.elapsed().as_millis() as u64;
|
|
560
|
+
|
|
561
|
+
// Handle result
|
|
562
|
+
match result {
|
|
563
|
+
Ok(envelope) => {
|
|
564
|
+
// Check if executor returned success or failure
|
|
565
|
+
let success = envelope
|
|
566
|
+
.get("success")
|
|
567
|
+
.and_then(|v| v.as_bool())
|
|
568
|
+
.unwrap_or(false);
|
|
569
|
+
|
|
570
|
+
let (decision, reason) = if success {
|
|
571
|
+
("ACCEPT".to_string(), "Task executed successfully".to_string())
|
|
572
|
+
} else {
|
|
573
|
+
let failure_code = envelope
|
|
574
|
+
.get("failure_code")
|
|
575
|
+
.and_then(|v| v.as_str())
|
|
576
|
+
.unwrap_or("UNKNOWN");
|
|
577
|
+
(
|
|
578
|
+
"REJECT".to_string(),
|
|
579
|
+
format!("Task failed: {}", failure_code),
|
|
580
|
+
)
|
|
581
|
+
};
|
|
582
|
+
|
|
583
|
+
info!(
|
|
584
|
+
op = "node_runner.task.executed",
|
|
585
|
+
task_id = %task_id_short,
|
|
586
|
+
success = %success,
|
|
587
|
+
duration_ms = %duration_ms,
|
|
588
|
+
"Task execution completed"
|
|
589
|
+
);
|
|
590
|
+
|
|
591
|
+
let output = EngineCompleteOutput {
|
|
592
|
+
decision,
|
|
593
|
+
reason,
|
|
594
|
+
proposed_patch: Some(vec![envelope]),
|
|
595
|
+
};
|
|
596
|
+
|
|
597
|
+
if let Err(e) = self.complete_task(task_id, output, Some(duration_ms)).await {
|
|
598
|
+
error!(
|
|
599
|
+
op = "node_runner.task.complete_failed",
|
|
600
|
+
task_id = %task_id_short,
|
|
601
|
+
error = %e,
|
|
602
|
+
"Complete failed"
|
|
603
|
+
);
|
|
604
|
+
cb.on_error(&e);
|
|
605
|
+
} else {
|
|
606
|
+
cb.on_complete(task_id);
|
|
607
|
+
}
|
|
608
|
+
}
|
|
609
|
+
Err(e) => {
|
|
610
|
+
warn!(
|
|
611
|
+
op = "node_runner.task.failed",
|
|
612
|
+
task_id = %task_id_short,
|
|
613
|
+
error = %e,
|
|
614
|
+
duration_ms = %duration_ms,
|
|
615
|
+
"Task execution failed"
|
|
616
|
+
);
|
|
617
|
+
|
|
618
|
+
let (code, retryable) = classify_error(&e);
|
|
619
|
+
|
|
620
|
+
if let Err(fail_err) = self.fail_task(task_id, &e, code, retryable).await {
|
|
621
|
+
error!(
|
|
622
|
+
op = "node_runner.task.fail_failed",
|
|
623
|
+
task_id = %task_id_short,
|
|
624
|
+
error = %fail_err,
|
|
625
|
+
"Fail request failed"
|
|
626
|
+
);
|
|
627
|
+
}
|
|
628
|
+
cb.on_error(&e);
|
|
629
|
+
}
|
|
630
|
+
}
|
|
631
|
+
}
|
|
632
|
+
}
|
|
633
|
+
|
|
634
|
+
// =============================================================================
|
|
635
|
+
// Heartbeat Helper
|
|
636
|
+
// =============================================================================
|
|
637
|
+
|
|
638
|
+
/// Helper struct to send heartbeats from the executor
|
|
639
|
+
#[derive(Clone)]
|
|
640
|
+
struct NodeSessionRunnerHeartbeat {
|
|
641
|
+
client: Client,
|
|
642
|
+
engine_url: String,
|
|
643
|
+
runner_id: String,
|
|
644
|
+
session_holder: Arc<NodeSessionHolder>,
|
|
645
|
+
home_path: PathBuf,
|
|
646
|
+
node_id: Uuid,
|
|
647
|
+
device_fingerprint: Option<String>,
|
|
648
|
+
}
|
|
649
|
+
|
|
650
|
+
impl NodeSessionRunnerHeartbeat {
|
|
651
|
+
async fn send_heartbeat(&self, task_id: &str) -> Result<(), String> {
|
|
652
|
+
// Get current session (refresh if needed)
|
|
653
|
+
let session = if let Some(s) = self.session_holder.get_valid() {
|
|
654
|
+
s
|
|
655
|
+
} else {
|
|
656
|
+
// Try to refresh - use spawn_blocking to avoid Tokio runtime panic
|
|
657
|
+
// The refresh_node_session function uses reqwest::blocking::Client internally
|
|
658
|
+
info!(op = "node_runner.heartbeat.refresh_session.start", "Refreshing session for heartbeat");
|
|
659
|
+
|
|
660
|
+
let home_path = self.home_path.clone();
|
|
661
|
+
let node_id = self.node_id;
|
|
662
|
+
let engine_url = self.engine_url.clone();
|
|
663
|
+
let device_fingerprint = self.device_fingerprint.clone();
|
|
664
|
+
|
|
665
|
+
let session = tokio::task::spawn_blocking(move || {
|
|
666
|
+
refresh_node_session(
|
|
667
|
+
&home_path,
|
|
668
|
+
&node_id,
|
|
669
|
+
&engine_url,
|
|
670
|
+
device_fingerprint.as_deref(),
|
|
671
|
+
)
|
|
672
|
+
})
|
|
673
|
+
.await
|
|
674
|
+
.map_err(|e| format!("Session refresh task failed: {}", e))?
|
|
675
|
+
.map_err(|e| {
|
|
676
|
+
error!(op = "node_runner.heartbeat.refresh_session.failed", error = %e, "Session refresh for heartbeat failed");
|
|
677
|
+
format!("Session refresh failed: {}", e)
|
|
678
|
+
})?;
|
|
679
|
+
|
|
680
|
+
info!(op = "node_runner.heartbeat.refresh_session.ok", session_id = %session.session_id, "Session refreshed for heartbeat");
|
|
681
|
+
self.session_holder.set(session.clone());
|
|
682
|
+
session
|
|
683
|
+
};
|
|
684
|
+
|
|
685
|
+
let url = format!(
|
|
686
|
+
"{}/engine/runner-tasks/{}/heartbeat?tenant_id={}&workspace_id={}",
|
|
687
|
+
self.engine_url, task_id, session.tenant_id, session.workspace_id
|
|
688
|
+
);
|
|
689
|
+
|
|
690
|
+
let task_id_short = &task_id[..8.min(task_id.len())];
|
|
691
|
+
|
|
692
|
+
// CRITICAL: Include all security envelope headers (securityEnvelope middleware requires all)
|
|
693
|
+
// Previously missing: X-REQUEST-ID, X-EKKA-CORRELATION-ID, X-EKKA-MODULE, X-EKKA-CLIENT, X-EKKA-CLIENT-VERSION
|
|
694
|
+
let response = self
|
|
695
|
+
.client
|
|
696
|
+
.post(&url)
|
|
697
|
+
.header("Authorization", format!("Bearer {}", session.token))
|
|
698
|
+
.header("X-EKKA-PROOF-TYPE", "node_session")
|
|
699
|
+
.header("X-REQUEST-ID", Uuid::new_v4().to_string())
|
|
700
|
+
.header("X-EKKA-CORRELATION-ID", Uuid::new_v4().to_string())
|
|
701
|
+
.header("X-EKKA-MODULE", "engine.runner_tasks")
|
|
702
|
+
.header("X-EKKA-ACTION", "heartbeat")
|
|
703
|
+
.header("X-EKKA-CLIENT", "ekka-desktop")
|
|
704
|
+
.header("X-EKKA-CLIENT-VERSION", "0.2.0")
|
|
705
|
+
.header("X-EKKA-NODE-ID", self.node_id.to_string())
|
|
706
|
+
.json(&serde_json::json!({ "runner_id": self.runner_id }))
|
|
707
|
+
.send()
|
|
708
|
+
.await
|
|
709
|
+
.map_err(|e| format!("Heartbeat failed: {}", e))?;
|
|
710
|
+
|
|
711
|
+
let status = response.status();
|
|
712
|
+
if !status.is_success() {
|
|
713
|
+
let body = response.text().await.unwrap_or_default();
|
|
714
|
+
let body_trunc = if body.len() > 200 {
|
|
715
|
+
format!("{}...", &body[..200])
|
|
716
|
+
} else {
|
|
717
|
+
body
|
|
718
|
+
};
|
|
719
|
+
warn!(
|
|
720
|
+
op = "prompt_run.heartbeat.failed",
|
|
721
|
+
task_id = %task_id_short,
|
|
722
|
+
http_status = %status.as_u16(),
|
|
723
|
+
response_body = %body_trunc,
|
|
724
|
+
"Heartbeat request failed"
|
|
725
|
+
);
|
|
726
|
+
return Err(format!("Heartbeat failed ({}) {}", status, body_trunc));
|
|
727
|
+
}
|
|
728
|
+
|
|
729
|
+
info!(
|
|
730
|
+
op = "prompt_run.heartbeat.ok",
|
|
731
|
+
task_id = %task_id_short,
|
|
732
|
+
http_status = %status.as_u16(),
|
|
733
|
+
"Heartbeat succeeded"
|
|
734
|
+
);
|
|
735
|
+
|
|
736
|
+
Ok(())
|
|
737
|
+
}
|
|
738
|
+
}
|
|
739
|
+
|
|
740
|
+
// =============================================================================
|
|
741
|
+
// Public API
|
|
742
|
+
// =============================================================================
|
|
743
|
+
|
|
744
|
+
/// Start the node session runner loop
|
|
745
|
+
///
|
|
746
|
+
/// This is the replacement for the internal-key based runner.
|
|
747
|
+
/// Requires a valid node session to be established first.
|
|
748
|
+
pub async fn run_node_session_runner_loop(
|
|
749
|
+
config: NodeSessionRunnerConfig,
|
|
750
|
+
session_holder: Arc<NodeSessionHolder>,
|
|
751
|
+
home_path: PathBuf,
|
|
752
|
+
device_fingerprint: Option<String>,
|
|
753
|
+
user_sub: Option<String>,
|
|
754
|
+
state_cb: Option<Arc<dyn NodeRunnerCallback>>,
|
|
755
|
+
mut shutdown_rx: tokio::sync::watch::Receiver<bool>,
|
|
756
|
+
) -> Result<(), String> {
|
|
757
|
+
let runner = NodeSessionRunner::new(&config, session_holder, home_path, device_fingerprint, user_sub);
|
|
758
|
+
let cb = state_cb.unwrap_or_else(|| Arc::new(NoOpCallback));
|
|
759
|
+
|
|
760
|
+
cb.on_start(&runner.runner_id);
|
|
761
|
+
|
|
762
|
+
info!(
|
|
763
|
+
op = "node_runner.start",
|
|
764
|
+
runner_id = %runner.runner_id,
|
|
765
|
+
node_id = %runner.node_id,
|
|
766
|
+
"Node session runner starting"
|
|
767
|
+
);
|
|
768
|
+
|
|
769
|
+
loop {
|
|
770
|
+
// Check for shutdown signal
|
|
771
|
+
if *shutdown_rx.borrow() {
|
|
772
|
+
info!(op = "node_runner.shutdown", "Shutdown signal received");
|
|
773
|
+
cb.on_stop();
|
|
774
|
+
break;
|
|
775
|
+
}
|
|
776
|
+
|
|
777
|
+
match runner.poll_tasks().await {
|
|
778
|
+
Ok(tasks) => {
|
|
779
|
+
cb.on_poll();
|
|
780
|
+
|
|
781
|
+
if tasks.is_empty() {
|
|
782
|
+
// Wait for next poll or shutdown
|
|
783
|
+
tokio::select! {
|
|
784
|
+
_ = tokio::time::sleep(Duration::from_secs(POLL_INTERVAL_SECS)) => {}
|
|
785
|
+
_ = shutdown_rx.changed() => {
|
|
786
|
+
if *shutdown_rx.borrow() {
|
|
787
|
+
info!(op = "node_runner.shutdown", "Shutdown during poll wait");
|
|
788
|
+
cb.on_stop();
|
|
789
|
+
break;
|
|
790
|
+
}
|
|
791
|
+
}
|
|
792
|
+
}
|
|
793
|
+
continue;
|
|
794
|
+
}
|
|
795
|
+
|
|
796
|
+
info!(
|
|
797
|
+
op = "node_runner.poll.found",
|
|
798
|
+
count = tasks.len(),
|
|
799
|
+
"Found pending tasks"
|
|
800
|
+
);
|
|
801
|
+
|
|
802
|
+
for task in tasks {
|
|
803
|
+
// Check shutdown before processing each task
|
|
804
|
+
if *shutdown_rx.borrow() {
|
|
805
|
+
info!(op = "node_runner.shutdown", "Shutdown before task processing");
|
|
806
|
+
cb.on_stop();
|
|
807
|
+
return Ok(());
|
|
808
|
+
}
|
|
809
|
+
runner.process_task(&task, &cb).await;
|
|
810
|
+
}
|
|
811
|
+
}
|
|
812
|
+
Err(e) => {
|
|
813
|
+
error!(op = "node_runner.poll.error", error = %e, "Poll failed");
|
|
814
|
+
cb.on_error(&e);
|
|
815
|
+
tokio::time::sleep(Duration::from_secs(POLL_INTERVAL_SECS)).await;
|
|
816
|
+
}
|
|
817
|
+
}
|
|
818
|
+
|
|
819
|
+
tokio::time::sleep(Duration::from_secs(1)).await;
|
|
820
|
+
}
|
|
821
|
+
|
|
822
|
+
Ok(())
|
|
823
|
+
}
|
|
824
|
+
|
|
825
|
+
/// No-op callback for when no state tracking is needed
|
|
826
|
+
struct NoOpCallback;
|
|
827
|
+
|
|
828
|
+
impl NodeRunnerCallback for NoOpCallback {
|
|
829
|
+
fn on_start(&self, _: &str) {}
|
|
830
|
+
fn on_poll(&self) {}
|
|
831
|
+
fn on_claim(&self, _: &str) {}
|
|
832
|
+
fn on_complete(&self, _: &str) {}
|
|
833
|
+
fn on_error(&self, _: &str) {}
|
|
834
|
+
fn on_stop(&self) {}
|
|
835
|
+
}
|
|
836
|
+
|
|
837
|
+
/// Start the node runner after session is established
|
|
838
|
+
pub async fn start_node_runner(
|
|
839
|
+
runner_state: RunnerState,
|
|
840
|
+
session_holder: Arc<NodeSessionHolder>,
|
|
841
|
+
config: NodeSessionRunnerConfig,
|
|
842
|
+
home_path: PathBuf,
|
|
843
|
+
device_fingerprint: Option<String>,
|
|
844
|
+
user_sub: Option<String>,
|
|
845
|
+
) -> Option<tokio::sync::watch::Sender<bool>> {
|
|
846
|
+
info!(
|
|
847
|
+
op = "node_runner.init",
|
|
848
|
+
node_id = %config.node_id,
|
|
849
|
+
tenant_id = %config.tenant_id,
|
|
850
|
+
workspace_id = %config.workspace_id,
|
|
851
|
+
"Starting node session runner"
|
|
852
|
+
);
|
|
853
|
+
|
|
854
|
+
// Create shutdown channel
|
|
855
|
+
let (shutdown_tx, shutdown_rx) = tokio::sync::watch::channel(false);
|
|
856
|
+
|
|
857
|
+
// Create callbacks
|
|
858
|
+
let callbacks = Arc::new(DesktopNodeRunnerCallbacks::new(runner_state));
|
|
859
|
+
|
|
860
|
+
// Spawn the runner loop
|
|
861
|
+
tokio::spawn(async move {
|
|
862
|
+
if let Err(e) = run_node_session_runner_loop(
|
|
863
|
+
config,
|
|
864
|
+
session_holder,
|
|
865
|
+
home_path,
|
|
866
|
+
device_fingerprint,
|
|
867
|
+
user_sub,
|
|
868
|
+
Some(callbacks),
|
|
869
|
+
shutdown_rx,
|
|
870
|
+
)
|
|
871
|
+
.await
|
|
872
|
+
{
|
|
873
|
+
warn!(
|
|
874
|
+
op = "node_runner.error",
|
|
875
|
+
error = %e,
|
|
876
|
+
"Node runner loop exited with error"
|
|
877
|
+
);
|
|
878
|
+
}
|
|
879
|
+
});
|
|
880
|
+
|
|
881
|
+
Some(shutdown_tx)
|
|
882
|
+
}
|