@epic-cloudcontrol/daemon 0.2.0 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/__tests__/config.test.d.ts +1 -0
- package/dist/__tests__/config.test.js +26 -0
- package/dist/__tests__/model-router.test.d.ts +1 -0
- package/dist/__tests__/model-router.test.js +59 -0
- package/dist/__tests__/profile.test.d.ts +1 -0
- package/dist/__tests__/profile.test.js +53 -0
- package/dist/__tests__/sandbox.test.d.ts +1 -0
- package/dist/__tests__/sandbox.test.js +78 -0
- package/dist/__tests__/version.test.d.ts +1 -0
- package/dist/__tests__/version.test.js +11 -0
- package/dist/browser.d.ts +7 -0
- package/dist/browser.js +56 -0
- package/dist/cli.js +17 -13
- package/dist/logger.d.ts +13 -0
- package/dist/logger.js +64 -0
- package/dist/mcp-server.js +8 -6
- package/dist/model-router.js +43 -7
- package/dist/models/gemini-api.d.ts +24 -0
- package/dist/models/gemini-api.js +134 -0
- package/dist/models/openai.d.ts +24 -0
- package/dist/models/openai.js +135 -0
- package/dist/multi-profile.js +9 -7
- package/dist/sandbox.js +3 -6
- package/dist/step-runner.d.ts +34 -0
- package/dist/step-runner.js +94 -0
- package/dist/task-executor.js +64 -27
- package/package.json +13 -2
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Google Gemini API Adapter
|
|
3
|
+
*
|
|
4
|
+
* Calls the Google Generative AI API directly (no SDK dependency).
|
|
5
|
+
* Supports Gemini 2.5 Pro, Flash, and any model on the API.
|
|
6
|
+
*
|
|
7
|
+
* Requires: GOOGLE_API_KEY environment variable.
|
|
8
|
+
*/
|
|
9
|
+
import { buildSandboxPrompt, getDefaultSandboxConfig } from "../sandbox.js";
|
|
10
|
+
const GEMINI_API_URL = "https://generativelanguage.googleapis.com/v1beta/models";
|
|
11
|
+
export class GeminiAPIAdapter {
|
|
12
|
+
model;
|
|
13
|
+
apiKey;
|
|
14
|
+
sandboxConfig;
|
|
15
|
+
constructor(model = "gemini-2.5-pro", apiKey, sandboxConfig) {
|
|
16
|
+
this.model = model;
|
|
17
|
+
this.apiKey = apiKey || process.env.GOOGLE_API_KEY || "";
|
|
18
|
+
this.sandboxConfig = sandboxConfig || getDefaultSandboxConfig();
|
|
19
|
+
}
|
|
20
|
+
async execute(task) {
|
|
21
|
+
const dialogue = [];
|
|
22
|
+
const startTime = Date.now();
|
|
23
|
+
let totalTokens = 0;
|
|
24
|
+
const sandboxRules = buildSandboxPrompt(this.sandboxConfig);
|
|
25
|
+
const systemPrompt = `You are a CloudControl task executor. You complete tasks efficiently and report results clearly.
|
|
26
|
+
|
|
27
|
+
When you encounter something you cannot complete (CAPTCHA, phone verification, account creation, ambiguous decision), respond with:
|
|
28
|
+
[HUMAN_REQUIRED]: <reason>
|
|
29
|
+
|
|
30
|
+
Always end your final response with:
|
|
31
|
+
[RESULT]: <brief summary of what was accomplished>
|
|
32
|
+
${sandboxRules}`;
|
|
33
|
+
let userMessage = `## Task: ${task.title}\n`;
|
|
34
|
+
if (task.description)
|
|
35
|
+
userMessage += `\n${task.description}\n`;
|
|
36
|
+
if (task.taskType)
|
|
37
|
+
userMessage += `\nTask type: ${task.taskType}\n`;
|
|
38
|
+
if (task.processHint)
|
|
39
|
+
userMessage += `\nProcess hint: ${task.processHint}\n`;
|
|
40
|
+
if (task.context) {
|
|
41
|
+
userMessage += `\nContext:\n\`\`\`json\n${JSON.stringify(task.context, null, 2)}\n\`\`\`\n`;
|
|
42
|
+
}
|
|
43
|
+
if (task.humanContext) {
|
|
44
|
+
userMessage += `\nHuman feedback from previous attempt:\n${task.humanContext}\n`;
|
|
45
|
+
}
|
|
46
|
+
dialogue.push({
|
|
47
|
+
role: "user",
|
|
48
|
+
content: userMessage,
|
|
49
|
+
timestamp: new Date().toISOString(),
|
|
50
|
+
});
|
|
51
|
+
try {
|
|
52
|
+
const url = `${GEMINI_API_URL}/${this.model}:generateContent?key=${this.apiKey}`;
|
|
53
|
+
const response = await fetch(url, {
|
|
54
|
+
method: "POST",
|
|
55
|
+
headers: { "Content-Type": "application/json" },
|
|
56
|
+
body: JSON.stringify({
|
|
57
|
+
systemInstruction: { parts: [{ text: systemPrompt }] },
|
|
58
|
+
contents: [{ role: "user", parts: [{ text: userMessage }] }],
|
|
59
|
+
generationConfig: {
|
|
60
|
+
maxOutputTokens: 4096,
|
|
61
|
+
},
|
|
62
|
+
}),
|
|
63
|
+
});
|
|
64
|
+
if (!response.ok) {
|
|
65
|
+
const err = await response.text();
|
|
66
|
+
throw new Error(`Gemini API ${response.status}: ${err}`);
|
|
67
|
+
}
|
|
68
|
+
const data = await response.json();
|
|
69
|
+
const assistantContent = data.candidates?.[0]?.content?.parts
|
|
70
|
+
?.map((p) => p.text)
|
|
71
|
+
.join("\n") || "";
|
|
72
|
+
totalTokens = data.usageMetadata?.totalTokenCount || 0;
|
|
73
|
+
dialogue.push({
|
|
74
|
+
role: "assistant",
|
|
75
|
+
content: assistantContent,
|
|
76
|
+
timestamp: new Date().toISOString(),
|
|
77
|
+
});
|
|
78
|
+
// Check for human required
|
|
79
|
+
const humanMatch = assistantContent.match(/\[HUMAN_REQUIRED\]:\s*(.*)/s);
|
|
80
|
+
if (humanMatch) {
|
|
81
|
+
return {
|
|
82
|
+
success: false,
|
|
83
|
+
dialogue,
|
|
84
|
+
result: { raw_response: assistantContent },
|
|
85
|
+
metadata: {
|
|
86
|
+
model: this.model,
|
|
87
|
+
tokens_used: totalTokens,
|
|
88
|
+
duration_ms: Date.now() - startTime,
|
|
89
|
+
},
|
|
90
|
+
humanRequired: {
|
|
91
|
+
reason: humanMatch[1].trim(),
|
|
92
|
+
context: assistantContent,
|
|
93
|
+
},
|
|
94
|
+
};
|
|
95
|
+
}
|
|
96
|
+
const resultMatch = assistantContent.match(/\[RESULT\]:\s*(.*)/s);
|
|
97
|
+
const resultSummary = resultMatch
|
|
98
|
+
? resultMatch[1].trim()
|
|
99
|
+
: assistantContent.slice(0, 500);
|
|
100
|
+
return {
|
|
101
|
+
success: true,
|
|
102
|
+
dialogue,
|
|
103
|
+
result: {
|
|
104
|
+
summary: resultSummary,
|
|
105
|
+
raw_response: assistantContent,
|
|
106
|
+
},
|
|
107
|
+
metadata: {
|
|
108
|
+
model: this.model,
|
|
109
|
+
tokens_used: totalTokens,
|
|
110
|
+
duration_ms: Date.now() - startTime,
|
|
111
|
+
},
|
|
112
|
+
};
|
|
113
|
+
}
|
|
114
|
+
catch (error) {
|
|
115
|
+
const errMsg = error instanceof Error ? error.message : "Unknown error";
|
|
116
|
+
dialogue.push({
|
|
117
|
+
role: "system",
|
|
118
|
+
content: `Execution error: ${errMsg}`,
|
|
119
|
+
timestamp: new Date().toISOString(),
|
|
120
|
+
});
|
|
121
|
+
return {
|
|
122
|
+
success: false,
|
|
123
|
+
dialogue,
|
|
124
|
+
result: { error: errMsg },
|
|
125
|
+
metadata: {
|
|
126
|
+
model: this.model,
|
|
127
|
+
tokens_used: totalTokens,
|
|
128
|
+
duration_ms: Date.now() - startTime,
|
|
129
|
+
},
|
|
130
|
+
};
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
//# sourceMappingURL=gemini-api.js.map
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI GPT Adapter
|
|
3
|
+
*
|
|
4
|
+
* Calls the OpenAI Chat Completions API directly (no SDK dependency).
|
|
5
|
+
* Supports GPT-4o, GPT-4o-mini, and any model available on the API.
|
|
6
|
+
*
|
|
7
|
+
* Requires: OPENAI_API_KEY environment variable.
|
|
8
|
+
*/
|
|
9
|
+
import { type SandboxConfig } from "../sandbox.js";
|
|
10
|
+
import type { ExecutionResult } from "./claude.js";
|
|
11
|
+
export declare class OpenAIAdapter {
|
|
12
|
+
private model;
|
|
13
|
+
private apiKey;
|
|
14
|
+
private sandboxConfig;
|
|
15
|
+
constructor(model?: string, apiKey?: string, sandboxConfig?: SandboxConfig);
|
|
16
|
+
execute(task: {
|
|
17
|
+
title: string;
|
|
18
|
+
description?: string | null;
|
|
19
|
+
taskType?: string | null;
|
|
20
|
+
context?: Record<string, unknown> | null;
|
|
21
|
+
processHint?: string | null;
|
|
22
|
+
humanContext?: string | null;
|
|
23
|
+
}): Promise<ExecutionResult>;
|
|
24
|
+
}
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI GPT Adapter
|
|
3
|
+
*
|
|
4
|
+
* Calls the OpenAI Chat Completions API directly (no SDK dependency).
|
|
5
|
+
* Supports GPT-4o, GPT-4o-mini, and any model available on the API.
|
|
6
|
+
*
|
|
7
|
+
* Requires: OPENAI_API_KEY environment variable.
|
|
8
|
+
*/
|
|
9
|
+
import { buildSandboxPrompt, getDefaultSandboxConfig } from "../sandbox.js";
|
|
10
|
+
const OPENAI_API_URL = "https://api.openai.com/v1/chat/completions";
|
|
11
|
+
export class OpenAIAdapter {
|
|
12
|
+
model;
|
|
13
|
+
apiKey;
|
|
14
|
+
sandboxConfig;
|
|
15
|
+
constructor(model = "gpt-4o", apiKey, sandboxConfig) {
|
|
16
|
+
this.model = model;
|
|
17
|
+
this.apiKey = apiKey || process.env.OPENAI_API_KEY || "";
|
|
18
|
+
this.sandboxConfig = sandboxConfig || getDefaultSandboxConfig();
|
|
19
|
+
}
|
|
20
|
+
async execute(task) {
|
|
21
|
+
const dialogue = [];
|
|
22
|
+
const startTime = Date.now();
|
|
23
|
+
let totalTokens = 0;
|
|
24
|
+
const sandboxRules = buildSandboxPrompt(this.sandboxConfig);
|
|
25
|
+
const systemPrompt = `You are a CloudControl task executor. You complete tasks efficiently and report results clearly.
|
|
26
|
+
|
|
27
|
+
When you encounter something you cannot complete (CAPTCHA, phone verification, account creation, ambiguous decision), respond with:
|
|
28
|
+
[HUMAN_REQUIRED]: <reason>
|
|
29
|
+
|
|
30
|
+
Always end your final response with:
|
|
31
|
+
[RESULT]: <brief summary of what was accomplished>
|
|
32
|
+
${sandboxRules}`;
|
|
33
|
+
let userMessage = `## Task: ${task.title}\n`;
|
|
34
|
+
if (task.description)
|
|
35
|
+
userMessage += `\n${task.description}\n`;
|
|
36
|
+
if (task.taskType)
|
|
37
|
+
userMessage += `\nTask type: ${task.taskType}\n`;
|
|
38
|
+
if (task.processHint)
|
|
39
|
+
userMessage += `\nProcess hint: ${task.processHint}\n`;
|
|
40
|
+
if (task.context) {
|
|
41
|
+
userMessage += `\nContext:\n\`\`\`json\n${JSON.stringify(task.context, null, 2)}\n\`\`\`\n`;
|
|
42
|
+
}
|
|
43
|
+
if (task.humanContext) {
|
|
44
|
+
userMessage += `\nHuman feedback from previous attempt:\n${task.humanContext}\n`;
|
|
45
|
+
}
|
|
46
|
+
dialogue.push({
|
|
47
|
+
role: "user",
|
|
48
|
+
content: userMessage,
|
|
49
|
+
timestamp: new Date().toISOString(),
|
|
50
|
+
});
|
|
51
|
+
try {
|
|
52
|
+
const response = await fetch(OPENAI_API_URL, {
|
|
53
|
+
method: "POST",
|
|
54
|
+
headers: {
|
|
55
|
+
"Authorization": `Bearer ${this.apiKey}`,
|
|
56
|
+
"Content-Type": "application/json",
|
|
57
|
+
},
|
|
58
|
+
body: JSON.stringify({
|
|
59
|
+
model: this.model,
|
|
60
|
+
max_tokens: 4096,
|
|
61
|
+
messages: [
|
|
62
|
+
{ role: "system", content: systemPrompt },
|
|
63
|
+
{ role: "user", content: userMessage },
|
|
64
|
+
],
|
|
65
|
+
}),
|
|
66
|
+
});
|
|
67
|
+
if (!response.ok) {
|
|
68
|
+
const err = await response.text();
|
|
69
|
+
throw new Error(`OpenAI API ${response.status}: ${err}`);
|
|
70
|
+
}
|
|
71
|
+
const data = await response.json();
|
|
72
|
+
const assistantContent = data.choices[0]?.message?.content || "";
|
|
73
|
+
totalTokens = data.usage?.total_tokens || 0;
|
|
74
|
+
dialogue.push({
|
|
75
|
+
role: "assistant",
|
|
76
|
+
content: assistantContent,
|
|
77
|
+
timestamp: new Date().toISOString(),
|
|
78
|
+
});
|
|
79
|
+
// Check for human required
|
|
80
|
+
const humanMatch = assistantContent.match(/\[HUMAN_REQUIRED\]:\s*(.*)/s);
|
|
81
|
+
if (humanMatch) {
|
|
82
|
+
return {
|
|
83
|
+
success: false,
|
|
84
|
+
dialogue,
|
|
85
|
+
result: { raw_response: assistantContent },
|
|
86
|
+
metadata: {
|
|
87
|
+
model: this.model,
|
|
88
|
+
tokens_used: totalTokens,
|
|
89
|
+
duration_ms: Date.now() - startTime,
|
|
90
|
+
},
|
|
91
|
+
humanRequired: {
|
|
92
|
+
reason: humanMatch[1].trim(),
|
|
93
|
+
context: assistantContent,
|
|
94
|
+
},
|
|
95
|
+
};
|
|
96
|
+
}
|
|
97
|
+
const resultMatch = assistantContent.match(/\[RESULT\]:\s*(.*)/s);
|
|
98
|
+
const resultSummary = resultMatch
|
|
99
|
+
? resultMatch[1].trim()
|
|
100
|
+
: assistantContent.slice(0, 500);
|
|
101
|
+
return {
|
|
102
|
+
success: true,
|
|
103
|
+
dialogue,
|
|
104
|
+
result: {
|
|
105
|
+
summary: resultSummary,
|
|
106
|
+
raw_response: assistantContent,
|
|
107
|
+
},
|
|
108
|
+
metadata: {
|
|
109
|
+
model: this.model,
|
|
110
|
+
tokens_used: totalTokens,
|
|
111
|
+
duration_ms: Date.now() - startTime,
|
|
112
|
+
},
|
|
113
|
+
};
|
|
114
|
+
}
|
|
115
|
+
catch (error) {
|
|
116
|
+
const errMsg = error instanceof Error ? error.message : "Unknown error";
|
|
117
|
+
dialogue.push({
|
|
118
|
+
role: "system",
|
|
119
|
+
content: `Execution error: ${errMsg}`,
|
|
120
|
+
timestamp: new Date().toISOString(),
|
|
121
|
+
});
|
|
122
|
+
return {
|
|
123
|
+
success: false,
|
|
124
|
+
dialogue,
|
|
125
|
+
result: { error: errMsg },
|
|
126
|
+
metadata: {
|
|
127
|
+
model: this.model,
|
|
128
|
+
tokens_used: totalTokens,
|
|
129
|
+
duration_ms: Date.now() - startTime,
|
|
130
|
+
},
|
|
131
|
+
};
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
//# sourceMappingURL=openai.js.map
|
package/dist/multi-profile.js
CHANGED
|
@@ -4,6 +4,8 @@ import { listProfiles } from "./profile.js";
|
|
|
4
4
|
import { TaskExecutor } from "./task-executor.js";
|
|
5
5
|
import { fetchWithRetry } from "./retry.js";
|
|
6
6
|
import { DAEMON_VERSION } from "./version.js";
|
|
7
|
+
import { createLogger } from "./logger.js";
|
|
8
|
+
const log = createLogger("daemon");
|
|
7
9
|
export async function startAllProfiles(overrides) {
|
|
8
10
|
const profiles = listProfiles();
|
|
9
11
|
if (profiles.length === 0) {
|
|
@@ -26,7 +28,7 @@ export async function startAllProfiles(overrides) {
|
|
|
26
28
|
model: overrides.model,
|
|
27
29
|
});
|
|
28
30
|
if (!config.apiKey) {
|
|
29
|
-
|
|
31
|
+
log.info(`[${name}] Skipping — no API key`);
|
|
30
32
|
continue;
|
|
31
33
|
}
|
|
32
34
|
const executor = new TaskExecutor(config);
|
|
@@ -67,10 +69,10 @@ export async function startAllProfiles(overrides) {
|
|
|
67
69
|
const data = await res.json();
|
|
68
70
|
pw.workerId = data.worker.id;
|
|
69
71
|
executor.setWorkerId(data.worker.id);
|
|
70
|
-
|
|
72
|
+
log.info(`[${pw.teamName}] Registered as ${pw.workerId.slice(0, 8)}...`);
|
|
71
73
|
}
|
|
72
74
|
catch (err) {
|
|
73
|
-
|
|
75
|
+
log.error(`[${pw.teamName}] Registration failed: ${err.message}`);
|
|
74
76
|
continue;
|
|
75
77
|
}
|
|
76
78
|
// Set up polling
|
|
@@ -85,12 +87,12 @@ export async function startAllProfiles(overrides) {
|
|
|
85
87
|
sendHeartbeat(pw, "busy");
|
|
86
88
|
const task = await executor.claimTask(tasks[0].id);
|
|
87
89
|
if (task) {
|
|
88
|
-
|
|
90
|
+
log.info(`[${pw.teamName}] Claimed: ${task.title}`);
|
|
89
91
|
await executor.executeTask(task);
|
|
90
92
|
}
|
|
91
93
|
}
|
|
92
94
|
catch (err) {
|
|
93
|
-
|
|
95
|
+
log.error(`[${pw.teamName}] Execution error: ${err.message}`);
|
|
94
96
|
}
|
|
95
97
|
finally {
|
|
96
98
|
pw.executing = false;
|
|
@@ -106,10 +108,10 @@ export async function startAllProfiles(overrides) {
|
|
|
106
108
|
workers.push(pw);
|
|
107
109
|
}
|
|
108
110
|
if (workers.length === 0) {
|
|
109
|
-
|
|
111
|
+
log.error("No workers started. Check your profiles.");
|
|
110
112
|
process.exit(1);
|
|
111
113
|
}
|
|
112
|
-
|
|
114
|
+
log.info(`Running ${workers.length} worker(s). Press Ctrl+C to stop.`);
|
|
113
115
|
return () => {
|
|
114
116
|
for (const pw of workers) {
|
|
115
117
|
if (pw.pollTimer)
|
package/dist/sandbox.js
CHANGED
|
@@ -7,6 +7,9 @@
|
|
|
7
7
|
* - Environment variable filtering
|
|
8
8
|
* - Output size limits
|
|
9
9
|
*/
|
|
10
|
+
import os from "os";
|
|
11
|
+
import fs from "fs";
|
|
12
|
+
import path from "path";
|
|
10
13
|
const DEFAULT_BLOCKED_COMMANDS = [
|
|
11
14
|
"rm -rf /",
|
|
12
15
|
"rm -rf ~",
|
|
@@ -144,9 +147,6 @@ export function truncateOutput(output, maxBytes) {
|
|
|
144
147
|
* Returns the path. Caller is responsible for cleanup.
|
|
145
148
|
*/
|
|
146
149
|
export function createTaskTmpDir(taskId) {
|
|
147
|
-
const os = require("os");
|
|
148
|
-
const fs = require("fs");
|
|
149
|
-
const path = require("path");
|
|
150
150
|
const dir = path.join(os.tmpdir(), `cloudcontrol-${taskId.slice(0, 8)}`);
|
|
151
151
|
if (!fs.existsSync(dir)) {
|
|
152
152
|
fs.mkdirSync(dir, { recursive: true, mode: 0o700 });
|
|
@@ -157,9 +157,6 @@ export function createTaskTmpDir(taskId) {
|
|
|
157
157
|
* Clean up a task's temp directory.
|
|
158
158
|
*/
|
|
159
159
|
export function cleanupTaskTmpDir(taskId) {
|
|
160
|
-
const os = require("os");
|
|
161
|
-
const fs = require("fs");
|
|
162
|
-
const path = require("path");
|
|
163
160
|
const dir = path.join(os.tmpdir(), `cloudcontrol-${taskId.slice(0, 8)}`);
|
|
164
161
|
try {
|
|
165
162
|
fs.rmSync(dir, { recursive: true, force: true });
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Process YAML step runner.
|
|
3
|
+
* Executes process steps in sequence, pausing at human gates.
|
|
4
|
+
*/
|
|
5
|
+
import type { ModelAdapter, TaskInput } from "./model-router.js";
|
|
6
|
+
import type { ExecutionResult } from "./models/claude.js";
|
|
7
|
+
export interface ProcessStep {
|
|
8
|
+
id: string;
|
|
9
|
+
type: "browser" | "ai" | "human" | "api" | "shell";
|
|
10
|
+
action?: string;
|
|
11
|
+
prompt?: string;
|
|
12
|
+
target?: string;
|
|
13
|
+
selector?: string;
|
|
14
|
+
gate?: "none" | "approval_required";
|
|
15
|
+
description?: string;
|
|
16
|
+
}
|
|
17
|
+
export interface StepRunResult {
|
|
18
|
+
completed: boolean;
|
|
19
|
+
stepIndex: number;
|
|
20
|
+
results: Array<{
|
|
21
|
+
stepId: string;
|
|
22
|
+
status: string;
|
|
23
|
+
output?: string;
|
|
24
|
+
}>;
|
|
25
|
+
humanRequired?: {
|
|
26
|
+
reason: string;
|
|
27
|
+
stepId: string;
|
|
28
|
+
};
|
|
29
|
+
finalResult?: ExecutionResult;
|
|
30
|
+
}
|
|
31
|
+
export declare function runProcessSteps(steps: ProcessStep[], task: TaskInput, adapter: {
|
|
32
|
+
adapter: ModelAdapter;
|
|
33
|
+
name: string;
|
|
34
|
+
}, submitActivity: (content: string) => Promise<void>): Promise<StepRunResult>;
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Process YAML step runner.
|
|
3
|
+
* Executes process steps in sequence, pausing at human gates.
|
|
4
|
+
*/
|
|
5
|
+
import { createLogger } from "./logger.js";
|
|
6
|
+
const log = createLogger("steps");
|
|
7
|
+
export async function runProcessSteps(steps, task, adapter, submitActivity) {
|
|
8
|
+
const results = [];
|
|
9
|
+
for (let i = 0; i < steps.length; i++) {
|
|
10
|
+
const step = steps[i];
|
|
11
|
+
log.info(`Step ${i + 1}/${steps.length}: [${step.type}] ${step.id}`);
|
|
12
|
+
await submitActivity(`Step ${i + 1}/${steps.length}: [${step.type}] ${step.id}`);
|
|
13
|
+
switch (step.type) {
|
|
14
|
+
case "ai": {
|
|
15
|
+
const prompt = step.prompt
|
|
16
|
+
? `${step.prompt}\n\nTask context: ${task.title}\n${task.description || ""}`
|
|
17
|
+
: `${task.title}\n${task.description || ""}`;
|
|
18
|
+
const result = await adapter.adapter.execute({
|
|
19
|
+
...task,
|
|
20
|
+
description: prompt,
|
|
21
|
+
processHint: `You are on step ${i + 1} of ${steps.length}: "${step.id}". ${step.description || ""}`,
|
|
22
|
+
});
|
|
23
|
+
results.push({
|
|
24
|
+
stepId: step.id,
|
|
25
|
+
status: result.success ? "completed" : "failed",
|
|
26
|
+
output: result.result?.summary || JSON.stringify(result.result),
|
|
27
|
+
});
|
|
28
|
+
if (result.humanRequired) {
|
|
29
|
+
return {
|
|
30
|
+
completed: false,
|
|
31
|
+
stepIndex: i,
|
|
32
|
+
results,
|
|
33
|
+
humanRequired: { reason: result.humanRequired.reason, stepId: step.id },
|
|
34
|
+
finalResult: result,
|
|
35
|
+
};
|
|
36
|
+
}
|
|
37
|
+
if (!result.success) {
|
|
38
|
+
return { completed: false, stepIndex: i, results, finalResult: result };
|
|
39
|
+
}
|
|
40
|
+
break;
|
|
41
|
+
}
|
|
42
|
+
case "human": {
|
|
43
|
+
if (step.gate === "approval_required") {
|
|
44
|
+
log.info(`Human gate at step ${step.id} — pausing for approval`);
|
|
45
|
+
return {
|
|
46
|
+
completed: false,
|
|
47
|
+
stepIndex: i,
|
|
48
|
+
results,
|
|
49
|
+
humanRequired: {
|
|
50
|
+
reason: step.prompt || step.description || `Step "${step.id}" requires human approval`,
|
|
51
|
+
stepId: step.id,
|
|
52
|
+
},
|
|
53
|
+
};
|
|
54
|
+
}
|
|
55
|
+
results.push({ stepId: step.id, status: "skipped", output: "No gate required" });
|
|
56
|
+
break;
|
|
57
|
+
}
|
|
58
|
+
case "browser": {
|
|
59
|
+
try {
|
|
60
|
+
const { executeBrowserStep } = await import("./browser.js");
|
|
61
|
+
const output = await executeBrowserStep(step);
|
|
62
|
+
results.push({ stepId: step.id, status: "completed", output });
|
|
63
|
+
}
|
|
64
|
+
catch {
|
|
65
|
+
log.warn(`Browser step "${step.id}" skipped — Playwright not installed`);
|
|
66
|
+
results.push({ stepId: step.id, status: "skipped", output: "Playwright not installed" });
|
|
67
|
+
}
|
|
68
|
+
break;
|
|
69
|
+
}
|
|
70
|
+
case "api":
|
|
71
|
+
case "shell": {
|
|
72
|
+
log.warn(`Step type "${step.type}" not yet implemented — skipping "${step.id}"`);
|
|
73
|
+
results.push({ stepId: step.id, status: "skipped", output: `${step.type} steps not yet implemented` });
|
|
74
|
+
break;
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
const lastAiResult = results.filter((r) => r.status === "completed").pop();
|
|
79
|
+
return {
|
|
80
|
+
completed: true,
|
|
81
|
+
stepIndex: steps.length,
|
|
82
|
+
results,
|
|
83
|
+
finalResult: {
|
|
84
|
+
success: true,
|
|
85
|
+
dialogue: [],
|
|
86
|
+
result: {
|
|
87
|
+
summary: lastAiResult?.output || "All process steps completed",
|
|
88
|
+
steps: results,
|
|
89
|
+
},
|
|
90
|
+
metadata: { model: adapter.name, tokens_used: 0, duration_ms: 0 },
|
|
91
|
+
},
|
|
92
|
+
};
|
|
93
|
+
}
|
|
94
|
+
//# sourceMappingURL=step-runner.js.map
|