@epic-cloudcontrol/daemon 0.2.1 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/__tests__/config.test.d.ts +1 -0
- package/dist/__tests__/config.test.js +26 -0
- package/dist/__tests__/model-router.test.d.ts +1 -0
- package/dist/__tests__/model-router.test.js +59 -0
- package/dist/__tests__/profile.test.d.ts +1 -0
- package/dist/__tests__/profile.test.js +53 -0
- package/dist/__tests__/sandbox.test.d.ts +1 -0
- package/dist/__tests__/sandbox.test.js +78 -0
- package/dist/__tests__/version.test.d.ts +1 -0
- package/dist/__tests__/version.test.js +11 -0
- package/dist/browser.d.ts +7 -0
- package/dist/browser.js +56 -0
- package/dist/cli.js +17 -13
- package/dist/logger.d.ts +13 -0
- package/dist/logger.js +64 -0
- package/dist/mcp-server.js +8 -6
- package/dist/model-router.js +43 -7
- package/dist/models/gemini-api.d.ts +24 -0
- package/dist/models/gemini-api.js +134 -0
- package/dist/models/openai.d.ts +24 -0
- package/dist/models/openai.js +135 -0
- package/dist/multi-profile.js +9 -7
- package/dist/step-runner.d.ts +34 -0
- package/dist/step-runner.js +94 -0
- package/dist/task-executor.js +64 -27
- package/package.json +13 -2
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Google Gemini API Adapter
|
|
3
|
+
*
|
|
4
|
+
* Calls the Google Generative AI API directly (no SDK dependency).
|
|
5
|
+
* Supports Gemini 2.5 Pro, Flash, and any model on the API.
|
|
6
|
+
*
|
|
7
|
+
* Requires: GOOGLE_API_KEY environment variable.
|
|
8
|
+
*/
|
|
9
|
+
import { buildSandboxPrompt, getDefaultSandboxConfig } from "../sandbox.js";
|
|
10
|
+
const GEMINI_API_URL = "https://generativelanguage.googleapis.com/v1beta/models";
|
|
11
|
+
export class GeminiAPIAdapter {
|
|
12
|
+
model;
|
|
13
|
+
apiKey;
|
|
14
|
+
sandboxConfig;
|
|
15
|
+
constructor(model = "gemini-2.5-pro", apiKey, sandboxConfig) {
|
|
16
|
+
this.model = model;
|
|
17
|
+
this.apiKey = apiKey || process.env.GOOGLE_API_KEY || "";
|
|
18
|
+
this.sandboxConfig = sandboxConfig || getDefaultSandboxConfig();
|
|
19
|
+
}
|
|
20
|
+
async execute(task) {
|
|
21
|
+
const dialogue = [];
|
|
22
|
+
const startTime = Date.now();
|
|
23
|
+
let totalTokens = 0;
|
|
24
|
+
const sandboxRules = buildSandboxPrompt(this.sandboxConfig);
|
|
25
|
+
const systemPrompt = `You are a CloudControl task executor. You complete tasks efficiently and report results clearly.
|
|
26
|
+
|
|
27
|
+
When you encounter something you cannot complete (CAPTCHA, phone verification, account creation, ambiguous decision), respond with:
|
|
28
|
+
[HUMAN_REQUIRED]: <reason>
|
|
29
|
+
|
|
30
|
+
Always end your final response with:
|
|
31
|
+
[RESULT]: <brief summary of what was accomplished>
|
|
32
|
+
${sandboxRules}`;
|
|
33
|
+
let userMessage = `## Task: ${task.title}\n`;
|
|
34
|
+
if (task.description)
|
|
35
|
+
userMessage += `\n${task.description}\n`;
|
|
36
|
+
if (task.taskType)
|
|
37
|
+
userMessage += `\nTask type: ${task.taskType}\n`;
|
|
38
|
+
if (task.processHint)
|
|
39
|
+
userMessage += `\nProcess hint: ${task.processHint}\n`;
|
|
40
|
+
if (task.context) {
|
|
41
|
+
userMessage += `\nContext:\n\`\`\`json\n${JSON.stringify(task.context, null, 2)}\n\`\`\`\n`;
|
|
42
|
+
}
|
|
43
|
+
if (task.humanContext) {
|
|
44
|
+
userMessage += `\nHuman feedback from previous attempt:\n${task.humanContext}\n`;
|
|
45
|
+
}
|
|
46
|
+
dialogue.push({
|
|
47
|
+
role: "user",
|
|
48
|
+
content: userMessage,
|
|
49
|
+
timestamp: new Date().toISOString(),
|
|
50
|
+
});
|
|
51
|
+
try {
|
|
52
|
+
const url = `${GEMINI_API_URL}/${this.model}:generateContent?key=${this.apiKey}`;
|
|
53
|
+
const response = await fetch(url, {
|
|
54
|
+
method: "POST",
|
|
55
|
+
headers: { "Content-Type": "application/json" },
|
|
56
|
+
body: JSON.stringify({
|
|
57
|
+
systemInstruction: { parts: [{ text: systemPrompt }] },
|
|
58
|
+
contents: [{ role: "user", parts: [{ text: userMessage }] }],
|
|
59
|
+
generationConfig: {
|
|
60
|
+
maxOutputTokens: 4096,
|
|
61
|
+
},
|
|
62
|
+
}),
|
|
63
|
+
});
|
|
64
|
+
if (!response.ok) {
|
|
65
|
+
const err = await response.text();
|
|
66
|
+
throw new Error(`Gemini API ${response.status}: ${err}`);
|
|
67
|
+
}
|
|
68
|
+
const data = await response.json();
|
|
69
|
+
const assistantContent = data.candidates?.[0]?.content?.parts
|
|
70
|
+
?.map((p) => p.text)
|
|
71
|
+
.join("\n") || "";
|
|
72
|
+
totalTokens = data.usageMetadata?.totalTokenCount || 0;
|
|
73
|
+
dialogue.push({
|
|
74
|
+
role: "assistant",
|
|
75
|
+
content: assistantContent,
|
|
76
|
+
timestamp: new Date().toISOString(),
|
|
77
|
+
});
|
|
78
|
+
// Check for human required
|
|
79
|
+
const humanMatch = assistantContent.match(/\[HUMAN_REQUIRED\]:\s*(.*)/s);
|
|
80
|
+
if (humanMatch) {
|
|
81
|
+
return {
|
|
82
|
+
success: false,
|
|
83
|
+
dialogue,
|
|
84
|
+
result: { raw_response: assistantContent },
|
|
85
|
+
metadata: {
|
|
86
|
+
model: this.model,
|
|
87
|
+
tokens_used: totalTokens,
|
|
88
|
+
duration_ms: Date.now() - startTime,
|
|
89
|
+
},
|
|
90
|
+
humanRequired: {
|
|
91
|
+
reason: humanMatch[1].trim(),
|
|
92
|
+
context: assistantContent,
|
|
93
|
+
},
|
|
94
|
+
};
|
|
95
|
+
}
|
|
96
|
+
const resultMatch = assistantContent.match(/\[RESULT\]:\s*(.*)/s);
|
|
97
|
+
const resultSummary = resultMatch
|
|
98
|
+
? resultMatch[1].trim()
|
|
99
|
+
: assistantContent.slice(0, 500);
|
|
100
|
+
return {
|
|
101
|
+
success: true,
|
|
102
|
+
dialogue,
|
|
103
|
+
result: {
|
|
104
|
+
summary: resultSummary,
|
|
105
|
+
raw_response: assistantContent,
|
|
106
|
+
},
|
|
107
|
+
metadata: {
|
|
108
|
+
model: this.model,
|
|
109
|
+
tokens_used: totalTokens,
|
|
110
|
+
duration_ms: Date.now() - startTime,
|
|
111
|
+
},
|
|
112
|
+
};
|
|
113
|
+
}
|
|
114
|
+
catch (error) {
|
|
115
|
+
const errMsg = error instanceof Error ? error.message : "Unknown error";
|
|
116
|
+
dialogue.push({
|
|
117
|
+
role: "system",
|
|
118
|
+
content: `Execution error: ${errMsg}`,
|
|
119
|
+
timestamp: new Date().toISOString(),
|
|
120
|
+
});
|
|
121
|
+
return {
|
|
122
|
+
success: false,
|
|
123
|
+
dialogue,
|
|
124
|
+
result: { error: errMsg },
|
|
125
|
+
metadata: {
|
|
126
|
+
model: this.model,
|
|
127
|
+
tokens_used: totalTokens,
|
|
128
|
+
duration_ms: Date.now() - startTime,
|
|
129
|
+
},
|
|
130
|
+
};
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
//# sourceMappingURL=gemini-api.js.map
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI GPT Adapter
|
|
3
|
+
*
|
|
4
|
+
* Calls the OpenAI Chat Completions API directly (no SDK dependency).
|
|
5
|
+
* Supports GPT-4o, GPT-4o-mini, and any model available on the API.
|
|
6
|
+
*
|
|
7
|
+
* Requires: OPENAI_API_KEY environment variable.
|
|
8
|
+
*/
|
|
9
|
+
import { type SandboxConfig } from "../sandbox.js";
|
|
10
|
+
import type { ExecutionResult } from "./claude.js";
|
|
11
|
+
export declare class OpenAIAdapter {
|
|
12
|
+
private model;
|
|
13
|
+
private apiKey;
|
|
14
|
+
private sandboxConfig;
|
|
15
|
+
constructor(model?: string, apiKey?: string, sandboxConfig?: SandboxConfig);
|
|
16
|
+
execute(task: {
|
|
17
|
+
title: string;
|
|
18
|
+
description?: string | null;
|
|
19
|
+
taskType?: string | null;
|
|
20
|
+
context?: Record<string, unknown> | null;
|
|
21
|
+
processHint?: string | null;
|
|
22
|
+
humanContext?: string | null;
|
|
23
|
+
}): Promise<ExecutionResult>;
|
|
24
|
+
}
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI GPT Adapter
|
|
3
|
+
*
|
|
4
|
+
* Calls the OpenAI Chat Completions API directly (no SDK dependency).
|
|
5
|
+
* Supports GPT-4o, GPT-4o-mini, and any model available on the API.
|
|
6
|
+
*
|
|
7
|
+
* Requires: OPENAI_API_KEY environment variable.
|
|
8
|
+
*/
|
|
9
|
+
import { buildSandboxPrompt, getDefaultSandboxConfig } from "../sandbox.js";
|
|
10
|
+
const OPENAI_API_URL = "https://api.openai.com/v1/chat/completions";
|
|
11
|
+
export class OpenAIAdapter {
|
|
12
|
+
model;
|
|
13
|
+
apiKey;
|
|
14
|
+
sandboxConfig;
|
|
15
|
+
constructor(model = "gpt-4o", apiKey, sandboxConfig) {
|
|
16
|
+
this.model = model;
|
|
17
|
+
this.apiKey = apiKey || process.env.OPENAI_API_KEY || "";
|
|
18
|
+
this.sandboxConfig = sandboxConfig || getDefaultSandboxConfig();
|
|
19
|
+
}
|
|
20
|
+
async execute(task) {
|
|
21
|
+
const dialogue = [];
|
|
22
|
+
const startTime = Date.now();
|
|
23
|
+
let totalTokens = 0;
|
|
24
|
+
const sandboxRules = buildSandboxPrompt(this.sandboxConfig);
|
|
25
|
+
const systemPrompt = `You are a CloudControl task executor. You complete tasks efficiently and report results clearly.
|
|
26
|
+
|
|
27
|
+
When you encounter something you cannot complete (CAPTCHA, phone verification, account creation, ambiguous decision), respond with:
|
|
28
|
+
[HUMAN_REQUIRED]: <reason>
|
|
29
|
+
|
|
30
|
+
Always end your final response with:
|
|
31
|
+
[RESULT]: <brief summary of what was accomplished>
|
|
32
|
+
${sandboxRules}`;
|
|
33
|
+
let userMessage = `## Task: ${task.title}\n`;
|
|
34
|
+
if (task.description)
|
|
35
|
+
userMessage += `\n${task.description}\n`;
|
|
36
|
+
if (task.taskType)
|
|
37
|
+
userMessage += `\nTask type: ${task.taskType}\n`;
|
|
38
|
+
if (task.processHint)
|
|
39
|
+
userMessage += `\nProcess hint: ${task.processHint}\n`;
|
|
40
|
+
if (task.context) {
|
|
41
|
+
userMessage += `\nContext:\n\`\`\`json\n${JSON.stringify(task.context, null, 2)}\n\`\`\`\n`;
|
|
42
|
+
}
|
|
43
|
+
if (task.humanContext) {
|
|
44
|
+
userMessage += `\nHuman feedback from previous attempt:\n${task.humanContext}\n`;
|
|
45
|
+
}
|
|
46
|
+
dialogue.push({
|
|
47
|
+
role: "user",
|
|
48
|
+
content: userMessage,
|
|
49
|
+
timestamp: new Date().toISOString(),
|
|
50
|
+
});
|
|
51
|
+
try {
|
|
52
|
+
const response = await fetch(OPENAI_API_URL, {
|
|
53
|
+
method: "POST",
|
|
54
|
+
headers: {
|
|
55
|
+
"Authorization": `Bearer ${this.apiKey}`,
|
|
56
|
+
"Content-Type": "application/json",
|
|
57
|
+
},
|
|
58
|
+
body: JSON.stringify({
|
|
59
|
+
model: this.model,
|
|
60
|
+
max_tokens: 4096,
|
|
61
|
+
messages: [
|
|
62
|
+
{ role: "system", content: systemPrompt },
|
|
63
|
+
{ role: "user", content: userMessage },
|
|
64
|
+
],
|
|
65
|
+
}),
|
|
66
|
+
});
|
|
67
|
+
if (!response.ok) {
|
|
68
|
+
const err = await response.text();
|
|
69
|
+
throw new Error(`OpenAI API ${response.status}: ${err}`);
|
|
70
|
+
}
|
|
71
|
+
const data = await response.json();
|
|
72
|
+
const assistantContent = data.choices[0]?.message?.content || "";
|
|
73
|
+
totalTokens = data.usage?.total_tokens || 0;
|
|
74
|
+
dialogue.push({
|
|
75
|
+
role: "assistant",
|
|
76
|
+
content: assistantContent,
|
|
77
|
+
timestamp: new Date().toISOString(),
|
|
78
|
+
});
|
|
79
|
+
// Check for human required
|
|
80
|
+
const humanMatch = assistantContent.match(/\[HUMAN_REQUIRED\]:\s*(.*)/s);
|
|
81
|
+
if (humanMatch) {
|
|
82
|
+
return {
|
|
83
|
+
success: false,
|
|
84
|
+
dialogue,
|
|
85
|
+
result: { raw_response: assistantContent },
|
|
86
|
+
metadata: {
|
|
87
|
+
model: this.model,
|
|
88
|
+
tokens_used: totalTokens,
|
|
89
|
+
duration_ms: Date.now() - startTime,
|
|
90
|
+
},
|
|
91
|
+
humanRequired: {
|
|
92
|
+
reason: humanMatch[1].trim(),
|
|
93
|
+
context: assistantContent,
|
|
94
|
+
},
|
|
95
|
+
};
|
|
96
|
+
}
|
|
97
|
+
const resultMatch = assistantContent.match(/\[RESULT\]:\s*(.*)/s);
|
|
98
|
+
const resultSummary = resultMatch
|
|
99
|
+
? resultMatch[1].trim()
|
|
100
|
+
: assistantContent.slice(0, 500);
|
|
101
|
+
return {
|
|
102
|
+
success: true,
|
|
103
|
+
dialogue,
|
|
104
|
+
result: {
|
|
105
|
+
summary: resultSummary,
|
|
106
|
+
raw_response: assistantContent,
|
|
107
|
+
},
|
|
108
|
+
metadata: {
|
|
109
|
+
model: this.model,
|
|
110
|
+
tokens_used: totalTokens,
|
|
111
|
+
duration_ms: Date.now() - startTime,
|
|
112
|
+
},
|
|
113
|
+
};
|
|
114
|
+
}
|
|
115
|
+
catch (error) {
|
|
116
|
+
const errMsg = error instanceof Error ? error.message : "Unknown error";
|
|
117
|
+
dialogue.push({
|
|
118
|
+
role: "system",
|
|
119
|
+
content: `Execution error: ${errMsg}`,
|
|
120
|
+
timestamp: new Date().toISOString(),
|
|
121
|
+
});
|
|
122
|
+
return {
|
|
123
|
+
success: false,
|
|
124
|
+
dialogue,
|
|
125
|
+
result: { error: errMsg },
|
|
126
|
+
metadata: {
|
|
127
|
+
model: this.model,
|
|
128
|
+
tokens_used: totalTokens,
|
|
129
|
+
duration_ms: Date.now() - startTime,
|
|
130
|
+
},
|
|
131
|
+
};
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
//# sourceMappingURL=openai.js.map
|
package/dist/multi-profile.js
CHANGED
|
@@ -4,6 +4,8 @@ import { listProfiles } from "./profile.js";
|
|
|
4
4
|
import { TaskExecutor } from "./task-executor.js";
|
|
5
5
|
import { fetchWithRetry } from "./retry.js";
|
|
6
6
|
import { DAEMON_VERSION } from "./version.js";
|
|
7
|
+
import { createLogger } from "./logger.js";
|
|
8
|
+
const log = createLogger("daemon");
|
|
7
9
|
export async function startAllProfiles(overrides) {
|
|
8
10
|
const profiles = listProfiles();
|
|
9
11
|
if (profiles.length === 0) {
|
|
@@ -26,7 +28,7 @@ export async function startAllProfiles(overrides) {
|
|
|
26
28
|
model: overrides.model,
|
|
27
29
|
});
|
|
28
30
|
if (!config.apiKey) {
|
|
29
|
-
|
|
31
|
+
log.info(`[${name}] Skipping — no API key`);
|
|
30
32
|
continue;
|
|
31
33
|
}
|
|
32
34
|
const executor = new TaskExecutor(config);
|
|
@@ -67,10 +69,10 @@ export async function startAllProfiles(overrides) {
|
|
|
67
69
|
const data = await res.json();
|
|
68
70
|
pw.workerId = data.worker.id;
|
|
69
71
|
executor.setWorkerId(data.worker.id);
|
|
70
|
-
|
|
72
|
+
log.info(`[${pw.teamName}] Registered as ${pw.workerId.slice(0, 8)}...`);
|
|
71
73
|
}
|
|
72
74
|
catch (err) {
|
|
73
|
-
|
|
75
|
+
log.error(`[${pw.teamName}] Registration failed: ${err.message}`);
|
|
74
76
|
continue;
|
|
75
77
|
}
|
|
76
78
|
// Set up polling
|
|
@@ -85,12 +87,12 @@ export async function startAllProfiles(overrides) {
|
|
|
85
87
|
sendHeartbeat(pw, "busy");
|
|
86
88
|
const task = await executor.claimTask(tasks[0].id);
|
|
87
89
|
if (task) {
|
|
88
|
-
|
|
90
|
+
log.info(`[${pw.teamName}] Claimed: ${task.title}`);
|
|
89
91
|
await executor.executeTask(task);
|
|
90
92
|
}
|
|
91
93
|
}
|
|
92
94
|
catch (err) {
|
|
93
|
-
|
|
95
|
+
log.error(`[${pw.teamName}] Execution error: ${err.message}`);
|
|
94
96
|
}
|
|
95
97
|
finally {
|
|
96
98
|
pw.executing = false;
|
|
@@ -106,10 +108,10 @@ export async function startAllProfiles(overrides) {
|
|
|
106
108
|
workers.push(pw);
|
|
107
109
|
}
|
|
108
110
|
if (workers.length === 0) {
|
|
109
|
-
|
|
111
|
+
log.error("No workers started. Check your profiles.");
|
|
110
112
|
process.exit(1);
|
|
111
113
|
}
|
|
112
|
-
|
|
114
|
+
log.info(`Running ${workers.length} worker(s). Press Ctrl+C to stop.`);
|
|
113
115
|
return () => {
|
|
114
116
|
for (const pw of workers) {
|
|
115
117
|
if (pw.pollTimer)
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Process YAML step runner.
|
|
3
|
+
* Executes process steps in sequence, pausing at human gates.
|
|
4
|
+
*/
|
|
5
|
+
import type { ModelAdapter, TaskInput } from "./model-router.js";
|
|
6
|
+
import type { ExecutionResult } from "./models/claude.js";
|
|
7
|
+
export interface ProcessStep {
|
|
8
|
+
id: string;
|
|
9
|
+
type: "browser" | "ai" | "human" | "api" | "shell";
|
|
10
|
+
action?: string;
|
|
11
|
+
prompt?: string;
|
|
12
|
+
target?: string;
|
|
13
|
+
selector?: string;
|
|
14
|
+
gate?: "none" | "approval_required";
|
|
15
|
+
description?: string;
|
|
16
|
+
}
|
|
17
|
+
export interface StepRunResult {
|
|
18
|
+
completed: boolean;
|
|
19
|
+
stepIndex: number;
|
|
20
|
+
results: Array<{
|
|
21
|
+
stepId: string;
|
|
22
|
+
status: string;
|
|
23
|
+
output?: string;
|
|
24
|
+
}>;
|
|
25
|
+
humanRequired?: {
|
|
26
|
+
reason: string;
|
|
27
|
+
stepId: string;
|
|
28
|
+
};
|
|
29
|
+
finalResult?: ExecutionResult;
|
|
30
|
+
}
|
|
31
|
+
export declare function runProcessSteps(steps: ProcessStep[], task: TaskInput, adapter: {
|
|
32
|
+
adapter: ModelAdapter;
|
|
33
|
+
name: string;
|
|
34
|
+
}, submitActivity: (content: string) => Promise<void>): Promise<StepRunResult>;
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Process YAML step runner.
|
|
3
|
+
* Executes process steps in sequence, pausing at human gates.
|
|
4
|
+
*/
|
|
5
|
+
import { createLogger } from "./logger.js";
|
|
6
|
+
const log = createLogger("steps");
|
|
7
|
+
export async function runProcessSteps(steps, task, adapter, submitActivity) {
|
|
8
|
+
const results = [];
|
|
9
|
+
for (let i = 0; i < steps.length; i++) {
|
|
10
|
+
const step = steps[i];
|
|
11
|
+
log.info(`Step ${i + 1}/${steps.length}: [${step.type}] ${step.id}`);
|
|
12
|
+
await submitActivity(`Step ${i + 1}/${steps.length}: [${step.type}] ${step.id}`);
|
|
13
|
+
switch (step.type) {
|
|
14
|
+
case "ai": {
|
|
15
|
+
const prompt = step.prompt
|
|
16
|
+
? `${step.prompt}\n\nTask context: ${task.title}\n${task.description || ""}`
|
|
17
|
+
: `${task.title}\n${task.description || ""}`;
|
|
18
|
+
const result = await adapter.adapter.execute({
|
|
19
|
+
...task,
|
|
20
|
+
description: prompt,
|
|
21
|
+
processHint: `You are on step ${i + 1} of ${steps.length}: "${step.id}". ${step.description || ""}`,
|
|
22
|
+
});
|
|
23
|
+
results.push({
|
|
24
|
+
stepId: step.id,
|
|
25
|
+
status: result.success ? "completed" : "failed",
|
|
26
|
+
output: result.result?.summary || JSON.stringify(result.result),
|
|
27
|
+
});
|
|
28
|
+
if (result.humanRequired) {
|
|
29
|
+
return {
|
|
30
|
+
completed: false,
|
|
31
|
+
stepIndex: i,
|
|
32
|
+
results,
|
|
33
|
+
humanRequired: { reason: result.humanRequired.reason, stepId: step.id },
|
|
34
|
+
finalResult: result,
|
|
35
|
+
};
|
|
36
|
+
}
|
|
37
|
+
if (!result.success) {
|
|
38
|
+
return { completed: false, stepIndex: i, results, finalResult: result };
|
|
39
|
+
}
|
|
40
|
+
break;
|
|
41
|
+
}
|
|
42
|
+
case "human": {
|
|
43
|
+
if (step.gate === "approval_required") {
|
|
44
|
+
log.info(`Human gate at step ${step.id} — pausing for approval`);
|
|
45
|
+
return {
|
|
46
|
+
completed: false,
|
|
47
|
+
stepIndex: i,
|
|
48
|
+
results,
|
|
49
|
+
humanRequired: {
|
|
50
|
+
reason: step.prompt || step.description || `Step "${step.id}" requires human approval`,
|
|
51
|
+
stepId: step.id,
|
|
52
|
+
},
|
|
53
|
+
};
|
|
54
|
+
}
|
|
55
|
+
results.push({ stepId: step.id, status: "skipped", output: "No gate required" });
|
|
56
|
+
break;
|
|
57
|
+
}
|
|
58
|
+
case "browser": {
|
|
59
|
+
try {
|
|
60
|
+
const { executeBrowserStep } = await import("./browser.js");
|
|
61
|
+
const output = await executeBrowserStep(step);
|
|
62
|
+
results.push({ stepId: step.id, status: "completed", output });
|
|
63
|
+
}
|
|
64
|
+
catch {
|
|
65
|
+
log.warn(`Browser step "${step.id}" skipped — Playwright not installed`);
|
|
66
|
+
results.push({ stepId: step.id, status: "skipped", output: "Playwright not installed" });
|
|
67
|
+
}
|
|
68
|
+
break;
|
|
69
|
+
}
|
|
70
|
+
case "api":
|
|
71
|
+
case "shell": {
|
|
72
|
+
log.warn(`Step type "${step.type}" not yet implemented — skipping "${step.id}"`);
|
|
73
|
+
results.push({ stepId: step.id, status: "skipped", output: `${step.type} steps not yet implemented` });
|
|
74
|
+
break;
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
const lastAiResult = results.filter((r) => r.status === "completed").pop();
|
|
79
|
+
return {
|
|
80
|
+
completed: true,
|
|
81
|
+
stepIndex: steps.length,
|
|
82
|
+
results,
|
|
83
|
+
finalResult: {
|
|
84
|
+
success: true,
|
|
85
|
+
dialogue: [],
|
|
86
|
+
result: {
|
|
87
|
+
summary: lastAiResult?.output || "All process steps completed",
|
|
88
|
+
steps: results,
|
|
89
|
+
},
|
|
90
|
+
metadata: { model: adapter.name, tokens_used: 0, duration_ms: 0 },
|
|
91
|
+
},
|
|
92
|
+
};
|
|
93
|
+
}
|
|
94
|
+
//# sourceMappingURL=step-runner.js.map
|
package/dist/task-executor.js
CHANGED
|
@@ -1,11 +1,14 @@
|
|
|
1
1
|
import { ModelRouter } from "./model-router.js";
|
|
2
2
|
import { fetchWithRetry } from "./retry.js";
|
|
3
3
|
import { cleanupTaskTmpDir } from "./sandbox.js";
|
|
4
|
+
import { createLogger } from "./logger.js";
|
|
5
|
+
import { runProcessSteps } from "./step-runner.js";
|
|
6
|
+
const log = createLogger("executor");
|
|
4
7
|
const retryOpts = {
|
|
5
8
|
maxRetries: 3,
|
|
6
9
|
baseDelayMs: 1000,
|
|
7
10
|
onRetry: (attempt, err) => {
|
|
8
|
-
|
|
11
|
+
log.info(`Retry attempt ${attempt}: ${err.message}`);
|
|
9
12
|
},
|
|
10
13
|
};
|
|
11
14
|
export class TaskExecutor {
|
|
@@ -42,21 +45,21 @@ export class TaskExecutor {
|
|
|
42
45
|
return task;
|
|
43
46
|
}
|
|
44
47
|
catch (err) {
|
|
45
|
-
|
|
48
|
+
log.warn(`Failed to claim task ${taskId}: ${err.message}`);
|
|
46
49
|
return null;
|
|
47
50
|
}
|
|
48
51
|
}
|
|
49
52
|
async executeTask(task) {
|
|
50
|
-
|
|
51
|
-
// Update status to running
|
|
53
|
+
log.info(`Executing: ${task.title}`);
|
|
52
54
|
await this.updateTaskStatus(task.id, "running");
|
|
53
55
|
// Resolve process for this task type (if any)
|
|
56
|
+
let processSteps = null;
|
|
54
57
|
if (task.taskType) {
|
|
55
|
-
const
|
|
56
|
-
if (
|
|
57
|
-
|
|
58
|
-
task.processHint =
|
|
59
|
-
|
|
58
|
+
const processData = await this.resolveProcess(task.taskType);
|
|
59
|
+
if (processData) {
|
|
60
|
+
processSteps = processData.steps;
|
|
61
|
+
task.processHint = processData.hint;
|
|
62
|
+
log.info(`Process loaded for type "${task.taskType}" (${processData.steps.length} steps)`);
|
|
60
63
|
}
|
|
61
64
|
}
|
|
62
65
|
// Fetch any needed secrets
|
|
@@ -65,30 +68,65 @@ export class TaskExecutor {
|
|
|
65
68
|
const value = await this.fetchSecret(key, task.id);
|
|
66
69
|
if (value) {
|
|
67
70
|
this.secrets.set(key, value);
|
|
68
|
-
|
|
71
|
+
log.info(`Secret "${key}" loaded`);
|
|
69
72
|
}
|
|
70
73
|
}
|
|
71
74
|
}
|
|
72
75
|
// Select model based on task's hint
|
|
73
|
-
const
|
|
74
|
-
|
|
76
|
+
const model = this.router.select(task.modelHint);
|
|
77
|
+
log.info(`Model: ${model.name} (hint: ${task.modelHint || "auto"})`);
|
|
75
78
|
// Pass taskId to adapter for sandbox isolation
|
|
76
|
-
if ("setTaskId" in adapter && typeof adapter.setTaskId === "function") {
|
|
77
|
-
adapter.setTaskId(task.id);
|
|
79
|
+
if ("setTaskId" in model.adapter && typeof model.adapter.setTaskId === "function") {
|
|
80
|
+
model.adapter.setTaskId(task.id);
|
|
78
81
|
}
|
|
79
|
-
// Execute with selected model adapter
|
|
80
82
|
let result;
|
|
81
83
|
try {
|
|
82
|
-
|
|
84
|
+
if (processSteps && processSteps.length > 0) {
|
|
85
|
+
// Step-by-step process execution
|
|
86
|
+
log.info(`Running ${processSteps.length} process steps`);
|
|
87
|
+
const stepResult = await runProcessSteps(processSteps, task, model, async (content) => {
|
|
88
|
+
try {
|
|
89
|
+
await fetchWithRetry(`${this.config.apiUrl}/api/tasks/${task.id}/activity`, {
|
|
90
|
+
method: "POST",
|
|
91
|
+
headers: this.headers,
|
|
92
|
+
body: JSON.stringify({ activityType: "progress", content, workerId: this.workerId }),
|
|
93
|
+
}, { maxRetries: 1, baseDelayMs: 500 });
|
|
94
|
+
}
|
|
95
|
+
catch { /* non-fatal */ }
|
|
96
|
+
});
|
|
97
|
+
if (stepResult.humanRequired) {
|
|
98
|
+
result = stepResult.finalResult || {
|
|
99
|
+
success: false,
|
|
100
|
+
dialogue: [],
|
|
101
|
+
result: { steps: stepResult.results },
|
|
102
|
+
metadata: { model: model.name, tokens_used: 0, duration_ms: 0 },
|
|
103
|
+
humanRequired: { reason: stepResult.humanRequired.reason, context: `Paused at step: ${stepResult.humanRequired.stepId}` },
|
|
104
|
+
};
|
|
105
|
+
}
|
|
106
|
+
else if (stepResult.finalResult) {
|
|
107
|
+
result = stepResult.finalResult;
|
|
108
|
+
}
|
|
109
|
+
else {
|
|
110
|
+
result = {
|
|
111
|
+
success: stepResult.completed,
|
|
112
|
+
dialogue: [],
|
|
113
|
+
result: { steps: stepResult.results },
|
|
114
|
+
metadata: { model: model.name, tokens_used: 0, duration_ms: 0 },
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
else {
|
|
119
|
+
// No process steps — single AI call (existing behavior)
|
|
120
|
+
result = await model.adapter.execute(task);
|
|
121
|
+
}
|
|
83
122
|
}
|
|
84
123
|
finally {
|
|
85
|
-
// Flush secrets and cleanup sandbox tmpdir
|
|
86
124
|
this.secrets.clear();
|
|
87
125
|
cleanupTaskTmpDir(task.id);
|
|
88
126
|
}
|
|
89
127
|
// Submit result
|
|
90
128
|
if (result.humanRequired) {
|
|
91
|
-
|
|
129
|
+
log.info(`Task requires human input: ${result.humanRequired.reason}`);
|
|
92
130
|
await this.submitResult(task.id, {
|
|
93
131
|
status: "human_required",
|
|
94
132
|
result: result.result,
|
|
@@ -98,7 +136,7 @@ export class TaskExecutor {
|
|
|
98
136
|
});
|
|
99
137
|
}
|
|
100
138
|
else if (result.success) {
|
|
101
|
-
|
|
139
|
+
log.info("Task completed successfully");
|
|
102
140
|
await this.submitResult(task.id, {
|
|
103
141
|
status: "completed",
|
|
104
142
|
result: result.result,
|
|
@@ -107,7 +145,7 @@ export class TaskExecutor {
|
|
|
107
145
|
});
|
|
108
146
|
}
|
|
109
147
|
else {
|
|
110
|
-
|
|
148
|
+
log.info("Task failed");
|
|
111
149
|
await this.submitResult(task.id, {
|
|
112
150
|
status: "failed",
|
|
113
151
|
result: result.result,
|
|
@@ -124,7 +162,7 @@ export class TaskExecutor {
|
|
|
124
162
|
headers: this.headers,
|
|
125
163
|
body: JSON.stringify({ workerId: this.workerId, ...result }),
|
|
126
164
|
}, retryOpts);
|
|
127
|
-
|
|
165
|
+
log.info(`Task ${taskId} submitted as ${result.status}`);
|
|
128
166
|
}
|
|
129
167
|
async updateTaskStatus(taskId, status) {
|
|
130
168
|
try {
|
|
@@ -136,7 +174,7 @@ export class TaskExecutor {
|
|
|
136
174
|
}
|
|
137
175
|
catch {
|
|
138
176
|
// Status update failure is non-fatal — task will still be processed
|
|
139
|
-
|
|
177
|
+
log.warn(`Failed to update status to ${status}`);
|
|
140
178
|
}
|
|
141
179
|
}
|
|
142
180
|
async resolveProcess(taskType) {
|
|
@@ -145,7 +183,7 @@ export class TaskExecutor {
|
|
|
145
183
|
const { parsed } = await res.json();
|
|
146
184
|
if (!parsed?.steps?.length)
|
|
147
185
|
return null;
|
|
148
|
-
//
|
|
186
|
+
// Build text hint for AI context
|
|
149
187
|
const lines = [`Follow this process (${parsed.name} v${parsed.version}):\n`];
|
|
150
188
|
for (let i = 0; i < parsed.steps.length; i++) {
|
|
151
189
|
const step = parsed.steps[i];
|
|
@@ -160,13 +198,12 @@ export class TaskExecutor {
|
|
|
160
198
|
desc += ` [REQUIRES HUMAN APPROVAL]`;
|
|
161
199
|
lines.push(desc);
|
|
162
200
|
}
|
|
163
|
-
if (parsed.credentialsNeeded?.length
|
|
201
|
+
if (parsed.credentialsNeeded?.length) {
|
|
164
202
|
lines.push(`\nCredentials available: ${parsed.credentialsNeeded.join(", ")}`);
|
|
165
203
|
}
|
|
166
|
-
return lines.join("\n");
|
|
204
|
+
return { steps: parsed.steps, hint: lines.join("\n") };
|
|
167
205
|
}
|
|
168
206
|
catch {
|
|
169
|
-
// Process resolution failure is non-fatal
|
|
170
207
|
return null;
|
|
171
208
|
}
|
|
172
209
|
}
|
|
@@ -177,7 +214,7 @@ export class TaskExecutor {
|
|
|
177
214
|
return value;
|
|
178
215
|
}
|
|
179
216
|
catch (err) {
|
|
180
|
-
|
|
217
|
+
log.warn(`Failed to fetch secret "${key}": ${err.message}`);
|
|
181
218
|
return null;
|
|
182
219
|
}
|
|
183
220
|
}
|