@eclipse-lyra/extension-ai-system 0.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/agent-registry.d.ts +8 -0
- package/dist/agents/agent-registry.d.ts.map +1 -0
- package/dist/agents/index.d.ts +6 -0
- package/dist/agents/index.d.ts.map +1 -0
- package/dist/agents/message-processor.d.ts +9 -0
- package/dist/agents/message-processor.d.ts.map +1 -0
- package/dist/agents/orchestrator.d.ts +12 -0
- package/dist/agents/orchestrator.d.ts.map +1 -0
- package/dist/agents/prompt-builder.d.ts +21 -0
- package/dist/agents/prompt-builder.d.ts.map +1 -0
- package/dist/agents/reviewer.d.ts +15 -0
- package/dist/agents/reviewer.d.ts.map +1 -0
- package/dist/ai-service-CGdlV3FV.js +1731 -0
- package/dist/ai-service-CGdlV3FV.js.map +1 -0
- package/dist/ai-system-extension-CPLV13Lk.js +2394 -0
- package/dist/ai-system-extension-CPLV13Lk.js.map +1 -0
- package/dist/ai-system-extension.d.ts +1 -0
- package/dist/ai-system-extension.d.ts.map +1 -0
- package/dist/api.d.ts +8 -0
- package/dist/api.d.ts.map +1 -0
- package/dist/api.js +46 -0
- package/dist/api.js.map +1 -0
- package/dist/chat-provider-contributions.d.ts +2 -0
- package/dist/chat-provider-contributions.d.ts.map +1 -0
- package/dist/core/constants.d.ts +21 -0
- package/dist/core/constants.d.ts.map +1 -0
- package/dist/core/index.d.ts +4 -0
- package/dist/core/index.d.ts.map +1 -0
- package/dist/core/interfaces.d.ts +138 -0
- package/dist/core/interfaces.d.ts.map +1 -0
- package/dist/core/message-utils.d.ts +4 -0
- package/dist/core/message-utils.d.ts.map +1 -0
- package/dist/core/types.d.ts +128 -0
- package/dist/core/types.d.ts.map +1 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +10 -0
- package/dist/index.js.map +1 -0
- package/dist/prompt-enhancer-contributions.d.ts +2 -0
- package/dist/prompt-enhancer-contributions.d.ts.map +1 -0
- package/dist/providers/base-provider.d.ts +15 -0
- package/dist/providers/base-provider.d.ts.map +1 -0
- package/dist/providers/index.d.ts +9 -0
- package/dist/providers/index.d.ts.map +1 -0
- package/dist/providers/ollama-provider.d.ts +7 -0
- package/dist/providers/ollama-provider.d.ts.map +1 -0
- package/dist/providers/openai-provider.d.ts +7 -0
- package/dist/providers/openai-provider.d.ts.map +1 -0
- package/dist/providers/provider-factory.d.ts +10 -0
- package/dist/providers/provider-factory.d.ts.map +1 -0
- package/dist/providers/provider-utils.d.ts +5 -0
- package/dist/providers/provider-utils.d.ts.map +1 -0
- package/dist/providers/streaming/ollama-parser.d.ts +9 -0
- package/dist/providers/streaming/ollama-parser.d.ts.map +1 -0
- package/dist/providers/streaming/sse-parser.d.ts +10 -0
- package/dist/providers/streaming/sse-parser.d.ts.map +1 -0
- package/dist/providers/streaming/stream-parser.d.ts +10 -0
- package/dist/providers/streaming/stream-parser.d.ts.map +1 -0
- package/dist/service/ai-service.d.ts +47 -0
- package/dist/service/ai-service.d.ts.map +1 -0
- package/dist/service/token-usage-tracker.d.ts +16 -0
- package/dist/service/token-usage-tracker.d.ts.map +1 -0
- package/dist/task/task-checkpoint.d.ts +12 -0
- package/dist/task/task-checkpoint.d.ts.map +1 -0
- package/dist/task/task-plan.d.ts +7 -0
- package/dist/task/task-plan.d.ts.map +1 -0
- package/dist/task/task-runner.d.ts +10 -0
- package/dist/task/task-runner.d.ts.map +1 -0
- package/dist/tools/index.d.ts +6 -0
- package/dist/tools/index.d.ts.map +1 -0
- package/dist/tools/token-estimator.d.ts +12 -0
- package/dist/tools/token-estimator.d.ts.map +1 -0
- package/dist/tools/tool-call-accumulator.d.ts +9 -0
- package/dist/tools/tool-call-accumulator.d.ts.map +1 -0
- package/dist/tools/tool-executor.d.ts +13 -0
- package/dist/tools/tool-executor.d.ts.map +1 -0
- package/dist/tools/tool-name-utils.d.ts +2 -0
- package/dist/tools/tool-name-utils.d.ts.map +1 -0
- package/dist/tools/tool-registry.d.ts +7 -0
- package/dist/tools/tool-registry.d.ts.map +1 -0
- package/dist/utils/token-estimator.d.ts +2 -0
- package/dist/utils/token-estimator.d.ts.map +1 -0
- package/dist/utils/tool-detector.d.ts +10 -0
- package/dist/utils/tool-detector.d.ts.map +1 -0
- package/dist/view/agent-group-manager.d.ts +37 -0
- package/dist/view/agent-group-manager.d.ts.map +1 -0
- package/dist/view/aiview.d.ts +42 -0
- package/dist/view/aiview.d.ts.map +1 -0
- package/dist/view/components/ai-agent-response-group.d.ts +18 -0
- package/dist/view/components/ai-agent-response-group.d.ts.map +1 -0
- package/dist/view/components/ai-chat-input.d.ts +20 -0
- package/dist/view/components/ai-chat-input.d.ts.map +1 -0
- package/dist/view/components/ai-chat-message.d.ts +22 -0
- package/dist/view/components/ai-chat-message.d.ts.map +1 -0
- package/dist/view/components/ai-config-editor.d.ts +33 -0
- package/dist/view/components/ai-config-editor.d.ts.map +1 -0
- package/dist/view/components/ai-empty-state.d.ts +13 -0
- package/dist/view/components/ai-empty-state.d.ts.map +1 -0
- package/dist/view/components/ai-tool-approval.d.ts +26 -0
- package/dist/view/components/ai-tool-approval.d.ts.map +1 -0
- package/dist/view/components/index.d.ts +7 -0
- package/dist/view/components/index.d.ts.map +1 -0
- package/dist/view/provider-manager.d.ts +30 -0
- package/dist/view/provider-manager.d.ts.map +1 -0
- package/dist/view/session-manager.d.ts +25 -0
- package/dist/view/session-manager.d.ts.map +1 -0
- package/dist/view/stream-manager.d.ts +23 -0
- package/dist/view/stream-manager.d.ts.map +1 -0
- package/dist/view/task-progress-panel.d.ts +14 -0
- package/dist/view/task-progress-panel.d.ts.map +1 -0
- package/dist/view/token-usage.d.ts +18 -0
- package/dist/view/token-usage.d.ts.map +1 -0
- package/dist/view/workspace-panel.d.ts +15 -0
- package/dist/view/workspace-panel.d.ts.map +1 -0
- package/dist/workflows/base-sequential-workflow.d.ts +13 -0
- package/dist/workflows/base-sequential-workflow.d.ts.map +1 -0
- package/dist/workflows/conditional-workflow.d.ts +7 -0
- package/dist/workflows/conditional-workflow.d.ts.map +1 -0
- package/dist/workflows/index.d.ts +10 -0
- package/dist/workflows/index.d.ts.map +1 -0
- package/dist/workflows/orchestrated-workflow.d.ts +10 -0
- package/dist/workflows/orchestrated-workflow.d.ts.map +1 -0
- package/dist/workflows/parallel-workflow.d.ts +6 -0
- package/dist/workflows/parallel-workflow.d.ts.map +1 -0
- package/dist/workflows/pipeline-workflow.d.ts +7 -0
- package/dist/workflows/pipeline-workflow.d.ts.map +1 -0
- package/dist/workflows/review-workflow.d.ts +10 -0
- package/dist/workflows/review-workflow.d.ts.map +1 -0
- package/dist/workflows/sequential-workflow.d.ts +7 -0
- package/dist/workflows/sequential-workflow.d.ts.map +1 -0
- package/dist/workflows/workflow-engine.d.ts +8 -0
- package/dist/workflows/workflow-engine.d.ts.map +1 -0
- package/dist/workflows/workflow-strategy.d.ts +8 -0
- package/dist/workflows/workflow-strategy.d.ts.map +1 -0
- package/dist/workspace/workspace.d.ts +20 -0
- package/dist/workspace/workspace.d.ts.map +1 -0
- package/package.json +34 -0
|
@@ -0,0 +1,1731 @@
|
|
|
1
|
+
import { contributionRegistry, createLogger, commandRegistry, appSettings, publish, persistenceService, subscribe, TOPIC_SETTINGS_CHANGED, logger as logger$1, rootContext } from "@eclipse-lyra/core";
|
|
2
|
+
const TOPIC_AI_STREAM_STARTED = "events/aiservice/streamStarted";
|
|
3
|
+
const TOPIC_AI_STREAM_CHUNK = "events/aiservice/streamChunk";
|
|
4
|
+
const TOPIC_AI_STREAM_COMPLETE = "events/aiservice/streamComplete";
|
|
5
|
+
const TOPIC_AI_STREAM_ERROR = "events/aiservice/streamError";
|
|
6
|
+
const TOPIC_AICONFIG_CHANGED = "events/aiservice/aiConfigChanged";
|
|
7
|
+
const TOPIC_AGENT_WORKFLOW_STARTED = "events/aiservice/agentWorkflowStarted";
|
|
8
|
+
const TOPIC_AGENT_WORKFLOW_COMPLETE = "events/aiservice/agentWorkflowComplete";
|
|
9
|
+
const TOPIC_AGENT_WORKFLOW_ERROR = "events/aiservice/agentWorkflowError";
|
|
10
|
+
const CID_AGENTS = "aiservice.agents";
|
|
11
|
+
const CID_CHAT_PROVIDERS = "aiservice.chatProviders";
|
|
12
|
+
const CID_PROMPT_ENHANCERS = "aiservice.promptEnhancers";
|
|
13
|
+
const KEY_AI_CONFIG = "aiConfig";
|
|
14
|
+
const DEFAULT_AGENT_ROLE = "appsupport";
|
|
15
|
+
const AI_CONFIG_TEMPLATE = {
|
|
16
|
+
"defaultProvider": "openai",
|
|
17
|
+
"providers": [],
|
|
18
|
+
"requireToolApproval": true
|
|
19
|
+
};
|
|
20
|
+
const MAX_TOOL_ITERATIONS = 10;
|
|
21
|
+
const MAX_RECENT_TOOL_CALLS = 5;
|
|
22
|
+
class StreamParser {
|
|
23
|
+
constructor() {
|
|
24
|
+
this.decoder = new TextDecoder();
|
|
25
|
+
this.usage = null;
|
|
26
|
+
}
|
|
27
|
+
async *readLines(reader) {
|
|
28
|
+
let buffer = "";
|
|
29
|
+
this.usage = null;
|
|
30
|
+
try {
|
|
31
|
+
while (true) {
|
|
32
|
+
const { done, value } = await reader.read();
|
|
33
|
+
if (done) break;
|
|
34
|
+
buffer += this.decoder.decode(value, { stream: true });
|
|
35
|
+
const lines = buffer.split("\n");
|
|
36
|
+
buffer = lines.pop() || "";
|
|
37
|
+
for (const line of lines) {
|
|
38
|
+
if (line.trim()) yield* this.processLine(line);
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
if (buffer.trim()) yield* this.processLine(buffer);
|
|
42
|
+
yield this.makeDoneChunk();
|
|
43
|
+
} finally {
|
|
44
|
+
reader.releaseLock();
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
makeDoneChunk() {
|
|
48
|
+
const chunk = { type: "done", content: "" };
|
|
49
|
+
if (this.usage) chunk.metadata = { usage: this.usage };
|
|
50
|
+
return chunk;
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
class SSEParser extends StreamParser {
|
|
54
|
+
async *parse(reader) {
|
|
55
|
+
yield* this.readLines(reader);
|
|
56
|
+
}
|
|
57
|
+
async *processLine(line) {
|
|
58
|
+
if (!line.startsWith("data: ")) return;
|
|
59
|
+
const data = line.slice(6).trim();
|
|
60
|
+
if (data === "[DONE]") {
|
|
61
|
+
yield this.makeDoneChunk();
|
|
62
|
+
return;
|
|
63
|
+
}
|
|
64
|
+
try {
|
|
65
|
+
const json = JSON.parse(data);
|
|
66
|
+
if (json.error) {
|
|
67
|
+
yield { type: "error", content: json.error.message || "Unknown error", metadata: json.error };
|
|
68
|
+
return;
|
|
69
|
+
}
|
|
70
|
+
this.extractUsage(json);
|
|
71
|
+
const chunk = this.parseChunk(json);
|
|
72
|
+
if (chunk) yield chunk;
|
|
73
|
+
} catch {
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
extractUsage(json) {
|
|
77
|
+
if (!json.usage) return;
|
|
78
|
+
const u = json.usage;
|
|
79
|
+
this.usage = {
|
|
80
|
+
promptTokens: u.prompt_tokens || 0,
|
|
81
|
+
completionTokens: u.completion_tokens || 0,
|
|
82
|
+
totalTokens: u.total_tokens || 0,
|
|
83
|
+
estimated: false
|
|
84
|
+
};
|
|
85
|
+
}
|
|
86
|
+
parseChunk(json) {
|
|
87
|
+
const delta = json.choices?.[0]?.delta;
|
|
88
|
+
const choice = json.choices?.[0];
|
|
89
|
+
if (delta?.content) {
|
|
90
|
+
return {
|
|
91
|
+
type: "token",
|
|
92
|
+
content: delta.content,
|
|
93
|
+
message: { role: delta.role || "assistant", content: choice?.message?.content || delta.content }
|
|
94
|
+
};
|
|
95
|
+
}
|
|
96
|
+
if (choice?.message?.tool_calls) {
|
|
97
|
+
const toolCalls = this.parseToolCalls(choice.message.tool_calls, true);
|
|
98
|
+
if (toolCalls.length > 0) return { type: "token", content: "", toolCalls };
|
|
99
|
+
} else if (delta?.tool_calls || choice?.delta?.tool_calls) {
|
|
100
|
+
const toolCalls = this.parseToolCalls(delta?.tool_calls || choice?.delta?.tool_calls || [], false);
|
|
101
|
+
if (toolCalls.length > 0) return { type: "token", content: "", toolCalls };
|
|
102
|
+
}
|
|
103
|
+
return null;
|
|
104
|
+
}
|
|
105
|
+
parseToolCalls(toolCalls, isComplete) {
|
|
106
|
+
return toolCalls.filter((tc) => tc.function !== void 0).map((tc, idx) => ({
|
|
107
|
+
id: tc.id || `call_${tc.index !== void 0 ? tc.index : idx}_${Date.now()}`,
|
|
108
|
+
type: "function",
|
|
109
|
+
function: {
|
|
110
|
+
name: tc.function?.name || "",
|
|
111
|
+
arguments: tc.function?.arguments || (isComplete ? "{}" : "")
|
|
112
|
+
},
|
|
113
|
+
_index: tc.index !== void 0 ? tc.index : idx
|
|
114
|
+
}));
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
class OllamaParser extends StreamParser {
|
|
118
|
+
async *parse(reader) {
|
|
119
|
+
yield* this.readLines(reader);
|
|
120
|
+
}
|
|
121
|
+
async *processLine(line) {
|
|
122
|
+
try {
|
|
123
|
+
const json = JSON.parse(line);
|
|
124
|
+
if (json.error) {
|
|
125
|
+
yield { type: "error", content: json.error, metadata: json };
|
|
126
|
+
return;
|
|
127
|
+
}
|
|
128
|
+
if (json.done) {
|
|
129
|
+
this.extractUsage(json);
|
|
130
|
+
yield this.makeDoneChunk();
|
|
131
|
+
return;
|
|
132
|
+
}
|
|
133
|
+
const tokenChunk = this.parseToken(json);
|
|
134
|
+
if (tokenChunk) yield tokenChunk;
|
|
135
|
+
} catch {
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
extractUsage(json) {
|
|
139
|
+
if (json.prompt_eval_count === void 0 && json.eval_count === void 0) return;
|
|
140
|
+
const promptTokens = json.prompt_eval_count || 0;
|
|
141
|
+
const completionTokens = json.eval_count || 0;
|
|
142
|
+
this.usage = { promptTokens, completionTokens, totalTokens: promptTokens + completionTokens, estimated: false };
|
|
143
|
+
}
|
|
144
|
+
parseToken(json) {
|
|
145
|
+
if (json.message?.content) {
|
|
146
|
+
return { type: "token", content: json.message.content, message: { role: json.message.role || "assistant", content: json.message.content } };
|
|
147
|
+
}
|
|
148
|
+
if (json.response) {
|
|
149
|
+
return { type: "token", content: json.response, message: { role: "assistant", content: json.response } };
|
|
150
|
+
}
|
|
151
|
+
return null;
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
async function streamToText(messages, chatConfig, providerFactory) {
|
|
155
|
+
let content = "";
|
|
156
|
+
const provider = providerFactory.getProvider(chatConfig);
|
|
157
|
+
for await (const chunk of provider.stream({ model: chatConfig.model, messages, chatConfig })) {
|
|
158
|
+
if (chunk.type === "token") content += chunk.content;
|
|
159
|
+
}
|
|
160
|
+
return content;
|
|
161
|
+
}
|
|
162
|
+
function extractBaseUrl(endpoint) {
|
|
163
|
+
if (!endpoint) return null;
|
|
164
|
+
if (endpoint.includes("/v1/chat/completions")) {
|
|
165
|
+
return endpoint.replace("/v1/chat/completions", "");
|
|
166
|
+
}
|
|
167
|
+
if (endpoint.includes("/api/v1/chat/completions")) {
|
|
168
|
+
return endpoint.replace("/api/v1/chat/completions", "");
|
|
169
|
+
}
|
|
170
|
+
if (endpoint.includes("/api/chat/completion")) {
|
|
171
|
+
return endpoint.replace("/api/chat/completion", "");
|
|
172
|
+
}
|
|
173
|
+
try {
|
|
174
|
+
const url = new URL(endpoint);
|
|
175
|
+
return `${url.protocol}//${url.host}`;
|
|
176
|
+
} catch {
|
|
177
|
+
return null;
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
class BaseProvider {
|
|
181
|
+
createParser(contentType, endpoint) {
|
|
182
|
+
if (contentType.includes("text/event-stream") || endpoint.includes("openai")) {
|
|
183
|
+
return new SSEParser();
|
|
184
|
+
}
|
|
185
|
+
return new OllamaParser();
|
|
186
|
+
}
|
|
187
|
+
async getAvailableModels(chatProvider) {
|
|
188
|
+
if (!chatProvider.chatApiEndpoint) return [];
|
|
189
|
+
const baseUrl = extractBaseUrl(chatProvider.chatApiEndpoint);
|
|
190
|
+
if (!baseUrl) return [];
|
|
191
|
+
try {
|
|
192
|
+
const headers = { "Content-Type": "application/json" };
|
|
193
|
+
if (chatProvider.apiKey) {
|
|
194
|
+
headers["Authorization"] = `Bearer ${chatProvider.apiKey}`;
|
|
195
|
+
}
|
|
196
|
+
const response = await fetch(`${baseUrl}/v1/models`, { method: "GET", headers });
|
|
197
|
+
if (!response.ok) return [];
|
|
198
|
+
const data = await response.json();
|
|
199
|
+
return (data.data || []).map((m) => ({ id: m.id, name: m.name || m.id }));
|
|
200
|
+
} catch {
|
|
201
|
+
return [];
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
async *stream(params) {
|
|
205
|
+
const requestBody = {
|
|
206
|
+
model: params.model,
|
|
207
|
+
stream: true,
|
|
208
|
+
messages: params.messages,
|
|
209
|
+
...params.chatConfig.parameters
|
|
210
|
+
};
|
|
211
|
+
if (params.tools && params.tools.length > 0) {
|
|
212
|
+
requestBody.tools = params.tools;
|
|
213
|
+
requestBody.tool_choice = "auto";
|
|
214
|
+
}
|
|
215
|
+
const response = await fetch(params.chatConfig.chatApiEndpoint, {
|
|
216
|
+
method: "POST",
|
|
217
|
+
headers: {
|
|
218
|
+
"Authorization": `Bearer ${params.chatConfig.apiKey}`,
|
|
219
|
+
"Content-Type": "application/json",
|
|
220
|
+
"Accept": "text/event-stream"
|
|
221
|
+
},
|
|
222
|
+
body: JSON.stringify(requestBody),
|
|
223
|
+
signal: params.signal
|
|
224
|
+
});
|
|
225
|
+
if (!response.ok) {
|
|
226
|
+
const errorText = await response.text().catch(() => "Unknown error");
|
|
227
|
+
yield { type: "error", content: `HTTP ${response.status}: ${errorText}`, metadata: { status: response.status } };
|
|
228
|
+
return;
|
|
229
|
+
}
|
|
230
|
+
if (!response.body) {
|
|
231
|
+
yield { type: "error", content: "Response body is null or empty", metadata: { status: response.status } };
|
|
232
|
+
return;
|
|
233
|
+
}
|
|
234
|
+
const reader = response.body.getReader();
|
|
235
|
+
if (!reader) {
|
|
236
|
+
yield { type: "error", content: "Response body is not readable" };
|
|
237
|
+
return;
|
|
238
|
+
}
|
|
239
|
+
const contentType = response.headers.get("content-type") || "";
|
|
240
|
+
const parser = this.createParser(contentType, params.chatConfig.chatApiEndpoint);
|
|
241
|
+
try {
|
|
242
|
+
for await (const chunk of parser.parse(reader)) {
|
|
243
|
+
yield chunk;
|
|
244
|
+
}
|
|
245
|
+
} catch (error) {
|
|
246
|
+
yield {
|
|
247
|
+
type: "error",
|
|
248
|
+
content: error instanceof Error ? error.message : "Failed to parse response stream",
|
|
249
|
+
metadata: { error, contentType }
|
|
250
|
+
};
|
|
251
|
+
}
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
class OpenAIProvider extends BaseProvider {
|
|
255
|
+
constructor() {
|
|
256
|
+
super(...arguments);
|
|
257
|
+
this.name = "openai";
|
|
258
|
+
}
|
|
259
|
+
canHandle(chatProvider) {
|
|
260
|
+
return chatProvider.chatApiEndpoint.includes("openai") || chatProvider.chatApiEndpoint.includes("v1/chat/completions");
|
|
261
|
+
}
|
|
262
|
+
}
|
|
263
|
+
class OllamaProvider extends BaseProvider {
|
|
264
|
+
constructor() {
|
|
265
|
+
super(...arguments);
|
|
266
|
+
this.name = "ollama";
|
|
267
|
+
}
|
|
268
|
+
canHandle(chatProvider) {
|
|
269
|
+
return chatProvider.name.toLowerCase() === "ollama" || chatProvider.chatApiEndpoint.includes("ollama") || chatProvider.chatApiEndpoint.includes("localhost:11434");
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
class ProviderFactory {
|
|
273
|
+
constructor() {
|
|
274
|
+
this.providers = [];
|
|
275
|
+
this.providers.push(new OpenAIProvider());
|
|
276
|
+
this.providers.push(new OllamaProvider());
|
|
277
|
+
}
|
|
278
|
+
registerProvider(provider) {
|
|
279
|
+
this.providers.push(provider);
|
|
280
|
+
}
|
|
281
|
+
getProvider(chatProvider) {
|
|
282
|
+
return this.providers.find((p) => p.canHandle(chatProvider)) ?? this.providers[0];
|
|
283
|
+
}
|
|
284
|
+
getAllProviders() {
|
|
285
|
+
return [...this.providers];
|
|
286
|
+
}
|
|
287
|
+
}
|
|
288
|
+
class AgentRegistry {
|
|
289
|
+
getAgentContributions() {
|
|
290
|
+
return contributionRegistry.getContributions(CID_AGENTS);
|
|
291
|
+
}
|
|
292
|
+
filterAndSortAgents(contributions, context) {
|
|
293
|
+
return contributions.filter((c) => !c.canHandle || c.canHandle(context)).sort((a, b) => (b.priority || 0) - (a.priority || 0));
|
|
294
|
+
}
|
|
295
|
+
getMatchingAgents(context, roles) {
|
|
296
|
+
const contributions = this.getAgentContributions();
|
|
297
|
+
if (contributions.length === 0) {
|
|
298
|
+
throw new Error("No agents are registered. The App Support agent should be available from the AI system extension.");
|
|
299
|
+
}
|
|
300
|
+
const roleFiltered = roles?.length ? contributions.filter((c) => roles.includes(c.role)) : contributions;
|
|
301
|
+
const active = this.filterAndSortAgents(roleFiltered, context);
|
|
302
|
+
if (roles?.length && active.length === 0) {
|
|
303
|
+
throw new Error(`No agents found for requested roles: ${roles.join(", ")}. Available: ${contributions.map((c) => c.role).join(", ")}`);
|
|
304
|
+
}
|
|
305
|
+
if (!roles?.length && active.length === 0) {
|
|
306
|
+
throw new Error(`No agents can handle the current context. Available: ${contributions.map((c) => c.role).join(", ")}`);
|
|
307
|
+
}
|
|
308
|
+
return active;
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
function sanitizeMessageForAPI(message) {
|
|
312
|
+
const apiMessage = {
|
|
313
|
+
role: message.role,
|
|
314
|
+
content: message.content
|
|
315
|
+
};
|
|
316
|
+
if ("tool_call_id" in message && message.tool_call_id) {
|
|
317
|
+
apiMessage.tool_call_id = message.tool_call_id;
|
|
318
|
+
}
|
|
319
|
+
if ("tool_calls" in message && message.tool_calls) {
|
|
320
|
+
apiMessage.tool_calls = message.tool_calls;
|
|
321
|
+
}
|
|
322
|
+
return apiMessage;
|
|
323
|
+
}
|
|
324
|
+
function sanitizeMessagesForAPI(messages) {
|
|
325
|
+
return messages.map(sanitizeMessageForAPI);
|
|
326
|
+
}
|
|
327
|
+
const logger = createLogger("ToolDetector");
|
|
328
|
+
const GREETINGS = ["hello", "hi", "hey", "thanks", "thank you", "bye", "goodbye"];
|
|
329
|
+
const ACTION_KEYWORDS = [
|
|
330
|
+
"create",
|
|
331
|
+
"open",
|
|
332
|
+
"delete",
|
|
333
|
+
"read",
|
|
334
|
+
"write",
|
|
335
|
+
"edit",
|
|
336
|
+
"save",
|
|
337
|
+
"rename",
|
|
338
|
+
"move",
|
|
339
|
+
"copy",
|
|
340
|
+
"list",
|
|
341
|
+
"show",
|
|
342
|
+
"display",
|
|
343
|
+
"run",
|
|
344
|
+
"execute",
|
|
345
|
+
"build",
|
|
346
|
+
"add",
|
|
347
|
+
"remove",
|
|
348
|
+
"update",
|
|
349
|
+
"modify",
|
|
350
|
+
"change",
|
|
351
|
+
"set",
|
|
352
|
+
"get",
|
|
353
|
+
"find",
|
|
354
|
+
"search",
|
|
355
|
+
"filter",
|
|
356
|
+
"sort",
|
|
357
|
+
"install",
|
|
358
|
+
"uninstall",
|
|
359
|
+
"load",
|
|
360
|
+
"import",
|
|
361
|
+
"export",
|
|
362
|
+
"generate",
|
|
363
|
+
"make",
|
|
364
|
+
"do",
|
|
365
|
+
"perform",
|
|
366
|
+
"call",
|
|
367
|
+
"invoke"
|
|
368
|
+
];
|
|
369
|
+
const CONTEXT_KEYWORDS = [
|
|
370
|
+
"file",
|
|
371
|
+
"folder",
|
|
372
|
+
"directory",
|
|
373
|
+
"workspace",
|
|
374
|
+
"editor",
|
|
375
|
+
"map",
|
|
376
|
+
"layer",
|
|
377
|
+
"command",
|
|
378
|
+
"tool",
|
|
379
|
+
"extension",
|
|
380
|
+
"script",
|
|
381
|
+
"code",
|
|
382
|
+
"project"
|
|
383
|
+
];
|
|
384
|
+
class ToolDetector {
|
|
385
|
+
needsTools(prompt) {
|
|
386
|
+
if (!prompt?.trim()) return false;
|
|
387
|
+
const normalized = prompt.toLowerCase().trim();
|
|
388
|
+
if (GREETINGS.some((g) => normalized === g || normalized.startsWith(g + " "))) return false;
|
|
389
|
+
const hasAction = ACTION_KEYWORDS.some((k) => prompt.includes(k));
|
|
390
|
+
const hasContext = CONTEXT_KEYWORDS.some((k) => prompt.includes(k));
|
|
391
|
+
const needsTools = hasAction && (hasContext || prompt.length > 20);
|
|
392
|
+
if (needsTools) logger.info(`Heuristic: needsTools=true (action+context or long prompt)`);
|
|
393
|
+
return needsTools;
|
|
394
|
+
}
|
|
395
|
+
dispose() {
|
|
396
|
+
}
|
|
397
|
+
}
|
|
398
|
+
const toolDetector = new ToolDetector();
|
|
399
|
+
class PromptBuilder {
|
|
400
|
+
constructor(toolRegistry) {
|
|
401
|
+
this.toolRegistry = toolRegistry;
|
|
402
|
+
this.enhancers = [];
|
|
403
|
+
}
|
|
404
|
+
addEnhancer(enhancer) {
|
|
405
|
+
this.enhancers.push(enhancer);
|
|
406
|
+
}
|
|
407
|
+
async getSysPrompt(contribution, context) {
|
|
408
|
+
let sysPrompt = contribution.sysPrompt;
|
|
409
|
+
if (typeof sysPrompt === "function") sysPrompt = sysPrompt();
|
|
410
|
+
if (!sysPrompt || typeof sysPrompt !== "string") {
|
|
411
|
+
throw new Error(`Agent "${contribution.role}" is missing a system prompt.`);
|
|
412
|
+
}
|
|
413
|
+
const allEnhancers = [
|
|
414
|
+
...contribution.promptEnhancers || [],
|
|
415
|
+
...this.enhancers,
|
|
416
|
+
...this.getContributedEnhancers()
|
|
417
|
+
].sort((a, b) => (b.priority || 0) - (a.priority || 0));
|
|
418
|
+
let enhanced = sysPrompt;
|
|
419
|
+
for (const enhancer of allEnhancers) {
|
|
420
|
+
try {
|
|
421
|
+
const result = await enhancer.enhance(enhanced, context);
|
|
422
|
+
if (result && typeof result === "string") enhanced = result;
|
|
423
|
+
} catch (err) {
|
|
424
|
+
console.warn("[PromptBuilder] Enhancer failed:", err);
|
|
425
|
+
}
|
|
426
|
+
}
|
|
427
|
+
return enhanced;
|
|
428
|
+
}
|
|
429
|
+
rewriteChatHistoryForAgent(history, targetRole) {
|
|
430
|
+
return history.map((m) => {
|
|
431
|
+
if (m.role === "user") return { role: m.role, content: m.content };
|
|
432
|
+
if (m.role === targetRole) return { role: "assistant", content: m.content };
|
|
433
|
+
return { role: "user", content: `***Agent '${m.role}' replied:***
|
|
434
|
+
${m.content}` };
|
|
435
|
+
});
|
|
436
|
+
}
|
|
437
|
+
getContributedEnhancers() {
|
|
438
|
+
const contributions = contributionRegistry.getContributions(CID_PROMPT_ENHANCERS);
|
|
439
|
+
return contributions.map((c) => ({ ...c.enhancer, priority: c.priority ?? c.enhancer.priority }));
|
|
440
|
+
}
|
|
441
|
+
async build(contribution, messages, context, hooks) {
|
|
442
|
+
if (hooks?.beforeSend) await hooks.beforeSend(messages, context);
|
|
443
|
+
const sanitized = sanitizeMessagesForAPI(messages);
|
|
444
|
+
const rewritten = this.rewriteChatHistoryForAgent(sanitized, contribution.role);
|
|
445
|
+
let toolsConfig = contribution.tools;
|
|
446
|
+
if (typeof toolsConfig === "function") toolsConfig = await toolsConfig();
|
|
447
|
+
let tools;
|
|
448
|
+
if (toolsConfig?.enabled) {
|
|
449
|
+
if (toolsConfig.smartToolDetection) {
|
|
450
|
+
const lastUser = messages[messages.length - 1];
|
|
451
|
+
if (toolDetector.needsTools(lastUser?.content || "")) {
|
|
452
|
+
tools = this.toolRegistry.getAvailableTools(context, toolsConfig.commandFilter);
|
|
453
|
+
}
|
|
454
|
+
} else {
|
|
455
|
+
tools = this.toolRegistry.getAvailableTools(context, toolsConfig.commandFilter);
|
|
456
|
+
}
|
|
457
|
+
}
|
|
458
|
+
const sysPrompt = await this.getSysPrompt(contribution, context);
|
|
459
|
+
rewritten.unshift({ role: "system", content: sysPrompt });
|
|
460
|
+
return { messages: rewritten, userPromptIndex: rewritten.length - 1, tools };
|
|
461
|
+
}
|
|
462
|
+
}
|
|
463
|
+
class MessageProcessorService {
|
|
464
|
+
constructor() {
|
|
465
|
+
this.processors = [];
|
|
466
|
+
}
|
|
467
|
+
addProcessor(processor) {
|
|
468
|
+
this.processors.push(processor);
|
|
469
|
+
}
|
|
470
|
+
async process(message, contribution, context) {
|
|
471
|
+
const allProcessors = [
|
|
472
|
+
...contribution.messageProcessors || [],
|
|
473
|
+
...this.processors
|
|
474
|
+
].sort((a, b) => (b.priority || 0) - (a.priority || 0));
|
|
475
|
+
let processed = { ...message };
|
|
476
|
+
for (const processor of allProcessors) {
|
|
477
|
+
processed = await processor.process(processed, context);
|
|
478
|
+
}
|
|
479
|
+
return processed;
|
|
480
|
+
}
|
|
481
|
+
}
|
|
482
|
+
class ToolCallAccumulator {
|
|
483
|
+
constructor() {
|
|
484
|
+
this.accumulatedToolCalls = /* @__PURE__ */ new Map();
|
|
485
|
+
this.toolCallIndexMap = /* @__PURE__ */ new Map();
|
|
486
|
+
}
|
|
487
|
+
processChunk(chunk) {
|
|
488
|
+
if (chunk.type !== "token" || !chunk.toolCalls?.length) return;
|
|
489
|
+
for (const toolCall of chunk.toolCalls) {
|
|
490
|
+
const callIndex = toolCall._index;
|
|
491
|
+
const callId = toolCall.id;
|
|
492
|
+
let existing;
|
|
493
|
+
let targetId;
|
|
494
|
+
if (callIndex !== void 0 && this.toolCallIndexMap.has(callIndex)) {
|
|
495
|
+
targetId = this.toolCallIndexMap.get(callIndex);
|
|
496
|
+
existing = this.accumulatedToolCalls.get(targetId);
|
|
497
|
+
} else if (callId && this.accumulatedToolCalls.has(callId)) {
|
|
498
|
+
targetId = callId;
|
|
499
|
+
existing = this.accumulatedToolCalls.get(targetId);
|
|
500
|
+
} else {
|
|
501
|
+
targetId = callId || `call_${callIndex !== void 0 ? callIndex : Date.now()}_${Math.random()}`;
|
|
502
|
+
existing = void 0;
|
|
503
|
+
}
|
|
504
|
+
if (existing) {
|
|
505
|
+
this.accumulatedToolCalls.set(targetId, {
|
|
506
|
+
id: targetId,
|
|
507
|
+
type: toolCall.type || existing.type,
|
|
508
|
+
function: {
|
|
509
|
+
name: toolCall.function.name || existing.function.name,
|
|
510
|
+
arguments: (existing.function.arguments || "") + (toolCall.function.arguments || "")
|
|
511
|
+
}
|
|
512
|
+
});
|
|
513
|
+
if (callIndex !== void 0 && !this.toolCallIndexMap.has(callIndex)) {
|
|
514
|
+
this.toolCallIndexMap.set(callIndex, targetId);
|
|
515
|
+
}
|
|
516
|
+
} else {
|
|
517
|
+
this.accumulatedToolCalls.set(targetId, { ...toolCall, id: targetId });
|
|
518
|
+
if (callIndex !== void 0) {
|
|
519
|
+
this.toolCallIndexMap.set(callIndex, targetId);
|
|
520
|
+
}
|
|
521
|
+
}
|
|
522
|
+
}
|
|
523
|
+
}
|
|
524
|
+
getFinalToolCalls() {
|
|
525
|
+
return Array.from(this.accumulatedToolCalls.values()).filter((tc) => tc.function.name?.trim().length > 0).map((tc) => ({
|
|
526
|
+
...tc,
|
|
527
|
+
function: { ...tc.function, arguments: tc.function.arguments?.trim() || "{}" }
|
|
528
|
+
}));
|
|
529
|
+
}
|
|
530
|
+
reset() {
|
|
531
|
+
this.accumulatedToolCalls.clear();
|
|
532
|
+
this.toolCallIndexMap.clear();
|
|
533
|
+
}
|
|
534
|
+
}
|
|
535
|
+
function sanitizeFunctionName(name) {
|
|
536
|
+
return name.replace(/[^a-zA-Z0-9_-]/g, "_").replace(/^[^a-zA-Z]/, "cmd_$&").replace(/_+/g, "_").replace(/^_|_$/g, "");
|
|
537
|
+
}
|
|
538
|
+
class ToolExecutor {
|
|
539
|
+
findCommand(toolCall, context) {
|
|
540
|
+
const sanitizedName = toolCall.function.name;
|
|
541
|
+
const direct = commandRegistry.getCommand(sanitizedName);
|
|
542
|
+
if (direct) return direct;
|
|
543
|
+
const allCommands = commandRegistry.listCommands();
|
|
544
|
+
for (const [commandId, command] of Object.entries(allCommands)) {
|
|
545
|
+
if (sanitizeFunctionName(commandId) === sanitizedName) {
|
|
546
|
+
return command;
|
|
547
|
+
}
|
|
548
|
+
}
|
|
549
|
+
return null;
|
|
550
|
+
}
|
|
551
|
+
parseArguments(argsStr) {
|
|
552
|
+
if (!argsStr?.trim() || argsStr === "{}") return {};
|
|
553
|
+
try {
|
|
554
|
+
const parsed = JSON.parse(argsStr);
|
|
555
|
+
return parsed && typeof parsed === "object" ? parsed : {};
|
|
556
|
+
} catch {
|
|
557
|
+
return {};
|
|
558
|
+
}
|
|
559
|
+
}
|
|
560
|
+
sanitizeArguments(args, command) {
|
|
561
|
+
if (!command?.parameters || !args || typeof args !== "object") return args || {};
|
|
562
|
+
const sanitizedArgs = {};
|
|
563
|
+
command.parameters.forEach((param) => {
|
|
564
|
+
const sanitizedParamName = sanitizeFunctionName(param.name);
|
|
565
|
+
if (sanitizedParamName in args) {
|
|
566
|
+
sanitizedArgs[param.name] = args[sanitizedParamName];
|
|
567
|
+
}
|
|
568
|
+
});
|
|
569
|
+
return sanitizedArgs;
|
|
570
|
+
}
|
|
571
|
+
async executeToolCall(toolCall, context) {
|
|
572
|
+
try {
|
|
573
|
+
const command = this.findCommand(toolCall, context);
|
|
574
|
+
const commandId = command?.id || toolCall.function.name;
|
|
575
|
+
const args = this.parseArguments(toolCall.function.arguments || "{}");
|
|
576
|
+
const sanitizedArgs = this.sanitizeArguments(args, command);
|
|
577
|
+
const freshContext = commandRegistry.createExecutionContext(sanitizedArgs);
|
|
578
|
+
const execContext = { ...context, ...freshContext, params: sanitizedArgs };
|
|
579
|
+
const commandResult = await commandRegistry.execute(commandId, execContext);
|
|
580
|
+
const commandName = command?.name || commandId;
|
|
581
|
+
const resultMessage = {
|
|
582
|
+
success: true,
|
|
583
|
+
message: `Command "${commandName}" executed successfully`,
|
|
584
|
+
command: commandId
|
|
585
|
+
};
|
|
586
|
+
if (Object.keys(sanitizedArgs).length > 0) {
|
|
587
|
+
resultMessage.parameters = sanitizedArgs;
|
|
588
|
+
}
|
|
589
|
+
if (commandResult != null) {
|
|
590
|
+
let resolved = commandResult;
|
|
591
|
+
if (resolved instanceof Promise) resolved = await resolved;
|
|
592
|
+
resultMessage.result = resolved;
|
|
593
|
+
if (command?.output?.length) {
|
|
594
|
+
resultMessage.output = command.output.map((v) => `${v.name}: ${v.description || v.type || "value"}`).join(", ");
|
|
595
|
+
}
|
|
596
|
+
}
|
|
597
|
+
return { id: toolCall.id, result: resultMessage };
|
|
598
|
+
} catch (error) {
|
|
599
|
+
let command = null;
|
|
600
|
+
try {
|
|
601
|
+
command = this.findCommand(toolCall, context);
|
|
602
|
+
} catch {
|
|
603
|
+
}
|
|
604
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
605
|
+
const commandName = command?.name || toolCall.function.name;
|
|
606
|
+
let detailedError = errorMessage;
|
|
607
|
+
if (errorMessage.includes("No handler found") || errorMessage.includes("No handlers registered")) {
|
|
608
|
+
detailedError = `Command "${commandName}" cannot be executed. ${errorMessage}.`;
|
|
609
|
+
}
|
|
610
|
+
return { id: toolCall.id, result: null, error: detailedError };
|
|
611
|
+
}
|
|
612
|
+
}
|
|
613
|
+
async executeToolCalls(toolCalls, context) {
|
|
614
|
+
const results = [];
|
|
615
|
+
for (const toolCall of toolCalls) {
|
|
616
|
+
results.push(await this.executeToolCall(toolCall, context));
|
|
617
|
+
}
|
|
618
|
+
return results;
|
|
619
|
+
}
|
|
620
|
+
createToolCallAccumulator() {
|
|
621
|
+
return new ToolCallAccumulator();
|
|
622
|
+
}
|
|
623
|
+
createToolCallSignature(toolCall) {
|
|
624
|
+
let args = {};
|
|
625
|
+
try {
|
|
626
|
+
const parsed = JSON.parse(toolCall.function.arguments || "{}");
|
|
627
|
+
args = parsed && typeof parsed === "object" ? parsed : {};
|
|
628
|
+
} catch {
|
|
629
|
+
args = {};
|
|
630
|
+
}
|
|
631
|
+
const sortedArgs = Object.keys(args).sort().reduce((acc, key) => {
|
|
632
|
+
acc[key] = args[key];
|
|
633
|
+
return acc;
|
|
634
|
+
}, {});
|
|
635
|
+
return `${toolCall.function.name}:${JSON.stringify(sortedArgs)}`;
|
|
636
|
+
}
|
|
637
|
+
}
|
|
638
|
+
class ToolRegistry {
|
|
639
|
+
commandToTool(command, context) {
|
|
640
|
+
const properties = {};
|
|
641
|
+
const required = [];
|
|
642
|
+
command.parameters?.forEach((param) => {
|
|
643
|
+
const sanitizedParamName = sanitizeFunctionName(param.name);
|
|
644
|
+
properties[sanitizedParamName] = {
|
|
645
|
+
type: param.type || "string",
|
|
646
|
+
description: param.description,
|
|
647
|
+
...param.allowedValues && { enum: param.allowedValues }
|
|
648
|
+
};
|
|
649
|
+
if (param.required === true) {
|
|
650
|
+
required.push(sanitizedParamName);
|
|
651
|
+
}
|
|
652
|
+
});
|
|
653
|
+
return {
|
|
654
|
+
type: "function",
|
|
655
|
+
function: {
|
|
656
|
+
name: sanitizeFunctionName(command.id),
|
|
657
|
+
description: command.description || command.name,
|
|
658
|
+
parameters: { type: "object", properties, required }
|
|
659
|
+
}
|
|
660
|
+
};
|
|
661
|
+
}
|
|
662
|
+
getAvailableTools(context, commandFilter) {
|
|
663
|
+
const availableCommands = commandRegistry.listCommands();
|
|
664
|
+
let commandsArray = Object.values(availableCommands);
|
|
665
|
+
if (commandFilter) {
|
|
666
|
+
commandsArray = commandsArray.filter((cmd) => commandFilter(cmd, context));
|
|
667
|
+
}
|
|
668
|
+
return commandsArray.map((cmd) => this.commandToTool(cmd, context));
|
|
669
|
+
}
|
|
670
|
+
}
|
|
671
|
+
class ParallelWorkflowStrategy {
|
|
672
|
+
async execute(contributions, options, results, executeAgent) {
|
|
673
|
+
const chatConfig = options.chatConfig;
|
|
674
|
+
if (!chatConfig) throw new Error("Chat config is required");
|
|
675
|
+
await Promise.all(contributions.map(async (contrib) => {
|
|
676
|
+
try {
|
|
677
|
+
await executeAgent(contrib, options.chatContext.history, results.sharedState, chatConfig, options, results);
|
|
678
|
+
} catch (error) {
|
|
679
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
680
|
+
results.errors.set(contrib.role, err);
|
|
681
|
+
options.onAgentError?.(contrib.role, err);
|
|
682
|
+
}
|
|
683
|
+
}));
|
|
684
|
+
}
|
|
685
|
+
}
|
|
686
|
+
class BaseSequentialWorkflow {
|
|
687
|
+
createAgentContextWithPreviousAgents(accumulatedState, options, results) {
|
|
688
|
+
return {
|
|
689
|
+
...accumulatedState,
|
|
690
|
+
...options.callContext.getProxy(),
|
|
691
|
+
previousAgents: Array.from(results.messages.entries()).map(([role, msg]) => ({
|
|
692
|
+
role,
|
|
693
|
+
content: msg.content
|
|
694
|
+
}))
|
|
695
|
+
};
|
|
696
|
+
}
|
|
697
|
+
updateWorkflowState(finalMessage, currentMessages, accumulatedState, agentContext, results) {
|
|
698
|
+
currentMessages.push(finalMessage);
|
|
699
|
+
accumulatedState = { ...accumulatedState, ...agentContext, message: finalMessage };
|
|
700
|
+
results.sharedState = accumulatedState;
|
|
701
|
+
return { currentMessages, accumulatedState };
|
|
702
|
+
}
|
|
703
|
+
}
|
|
704
|
+
class SequentialWorkflowStrategy extends BaseSequentialWorkflow {
|
|
705
|
+
async execute(contributions, options, results, executeAgent) {
|
|
706
|
+
const chatConfig = options.chatConfig;
|
|
707
|
+
if (!chatConfig) throw new Error("Chat config is required");
|
|
708
|
+
let currentMessages = [...options.chatContext.history];
|
|
709
|
+
let accumulatedState = { ...results.sharedState };
|
|
710
|
+
for (const contrib of contributions) {
|
|
711
|
+
try {
|
|
712
|
+
const agentContext = this.createAgentContextWithPreviousAgents(accumulatedState, options, results);
|
|
713
|
+
const finalMessage = await executeAgent(contrib, currentMessages, accumulatedState, chatConfig, options, results);
|
|
714
|
+
if (!finalMessage) break;
|
|
715
|
+
const updated = this.updateWorkflowState(finalMessage, currentMessages, accumulatedState, agentContext, results);
|
|
716
|
+
currentMessages = updated.currentMessages;
|
|
717
|
+
accumulatedState = updated.accumulatedState;
|
|
718
|
+
} catch (error) {
|
|
719
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
720
|
+
results.errors.set(contrib.role, err);
|
|
721
|
+
options.onAgentError?.(contrib.role, err);
|
|
722
|
+
break;
|
|
723
|
+
}
|
|
724
|
+
}
|
|
725
|
+
}
|
|
726
|
+
}
|
|
727
|
+
class ConditionalWorkflowStrategy extends BaseSequentialWorkflow {
|
|
728
|
+
async execute(contributions, options, results, executeAgent) {
|
|
729
|
+
const chatConfig = options.chatConfig;
|
|
730
|
+
if (!chatConfig) throw new Error("Chat config is required");
|
|
731
|
+
let currentMessages = [...options.chatContext.history];
|
|
732
|
+
let accumulatedState = { ...results.sharedState };
|
|
733
|
+
for (const contrib of contributions) {
|
|
734
|
+
try {
|
|
735
|
+
const agentContext = this.createAgentContextWithPreviousAgents(accumulatedState, options, results);
|
|
736
|
+
if (contrib.canHandle && !contrib.canHandle(agentContext)) continue;
|
|
737
|
+
const finalMessage = await executeAgent(contrib, currentMessages, accumulatedState, chatConfig, options, results);
|
|
738
|
+
if (!finalMessage) break;
|
|
739
|
+
const updated = this.updateWorkflowState(finalMessage, currentMessages, accumulatedState, agentContext, results);
|
|
740
|
+
currentMessages = updated.currentMessages;
|
|
741
|
+
accumulatedState = updated.accumulatedState;
|
|
742
|
+
} catch (error) {
|
|
743
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
744
|
+
results.errors.set(contrib.role, err);
|
|
745
|
+
options.onAgentError?.(contrib.role, err);
|
|
746
|
+
}
|
|
747
|
+
}
|
|
748
|
+
}
|
|
749
|
+
}
|
|
750
|
+
class PipelineWorkflowStrategy {
|
|
751
|
+
async execute(contributions, options, results, executeAgent) {
|
|
752
|
+
const chatConfig = options.chatConfig;
|
|
753
|
+
if (!chatConfig) throw new Error("Chat config is required");
|
|
754
|
+
let currentMessages = [...options.chatContext.history];
|
|
755
|
+
for (const wave of this.buildTopoOrder(contributions)) {
|
|
756
|
+
await Promise.all(wave.map(async (contrib) => {
|
|
757
|
+
try {
|
|
758
|
+
await executeAgent(contrib, currentMessages, results.sharedState, chatConfig, options, results);
|
|
759
|
+
} catch (error) {
|
|
760
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
761
|
+
results.errors.set(contrib.role, err);
|
|
762
|
+
options.onAgentError?.(contrib.role, err);
|
|
763
|
+
}
|
|
764
|
+
}));
|
|
765
|
+
for (const contrib of wave) {
|
|
766
|
+
const msg = results.messages.get(contrib.role);
|
|
767
|
+
if (msg) currentMessages.push(msg);
|
|
768
|
+
}
|
|
769
|
+
}
|
|
770
|
+
}
|
|
771
|
+
buildTopoOrder(contributions) {
|
|
772
|
+
const waves = [];
|
|
773
|
+
const placed = /* @__PURE__ */ new Set();
|
|
774
|
+
while (placed.size < contributions.length) {
|
|
775
|
+
const wave = contributions.filter((c) => {
|
|
776
|
+
if (placed.has(c.role)) return false;
|
|
777
|
+
if (!c.consumes?.length) return true;
|
|
778
|
+
const allProduced = contributions.filter((other) => placed.has(other.role)).flatMap((other) => other.produces || []);
|
|
779
|
+
return c.consumes.every((t) => allProduced.includes(t));
|
|
780
|
+
});
|
|
781
|
+
if (wave.length === 0) {
|
|
782
|
+
waves.push(contributions.filter((c) => !placed.has(c.role)));
|
|
783
|
+
break;
|
|
784
|
+
}
|
|
785
|
+
waves.push(wave);
|
|
786
|
+
for (const c of wave) placed.add(c.role);
|
|
787
|
+
}
|
|
788
|
+
return waves;
|
|
789
|
+
}
|
|
790
|
+
}
|
|
791
|
+
function createTaskPlan(originalPrompt, steps) {
|
|
792
|
+
const now = Date.now();
|
|
793
|
+
return {
|
|
794
|
+
id: `plan-${now}-${Math.random().toString(36).slice(2, 9)}`,
|
|
795
|
+
originalPrompt,
|
|
796
|
+
steps: steps.map((s) => ({ ...s, status: "pending", revisions: 0 })),
|
|
797
|
+
status: "planning",
|
|
798
|
+
createdAt: now,
|
|
799
|
+
updatedAt: now
|
|
800
|
+
};
|
|
801
|
+
}
|
|
802
|
+
function getNextRunnableSteps(plan) {
|
|
803
|
+
const completed = new Set(plan.steps.filter((s) => s.status === "completed").map((s) => s.id));
|
|
804
|
+
return plan.steps.filter((s) => s.status === "pending" && s.dependsOn.every((d) => completed.has(d)));
|
|
805
|
+
}
|
|
806
|
+
function isPlanComplete(plan) {
|
|
807
|
+
return plan.steps.every((s) => s.status === "completed" || s.status === "skipped");
|
|
808
|
+
}
|
|
809
|
+
function isPlanFailed(plan) {
|
|
810
|
+
return plan.steps.some((s) => s.status === "failed");
|
|
811
|
+
}
|
|
812
|
+
const ORCHESTRATOR_SYS_PROMPT = `You are a task orchestrator. Given a user's complex request, decompose it into a structured execution plan.
|
|
813
|
+
|
|
814
|
+
Respond with ONLY a JSON object matching this schema (no markdown, no explanation):
|
|
815
|
+
{
|
|
816
|
+
"steps": [
|
|
817
|
+
{
|
|
818
|
+
"id": "step-1",
|
|
819
|
+
"role": "<agent role>",
|
|
820
|
+
"subTask": "<specific instruction for this step>",
|
|
821
|
+
"dependsOn": [],
|
|
822
|
+
"consumes": [],
|
|
823
|
+
"produces": ["<artifact-id>"]
|
|
824
|
+
}
|
|
825
|
+
]
|
|
826
|
+
}
|
|
827
|
+
|
|
828
|
+
Rules:
|
|
829
|
+
- Each step must have a unique id (step-1, step-2, ...)
|
|
830
|
+
- "role" must match an available agent role
|
|
831
|
+
- "dependsOn" lists step IDs that must complete before this step
|
|
832
|
+
- "consumes" and "produces" are artifact IDs
|
|
833
|
+
- Steps with no dependencies can run in parallel
|
|
834
|
+
- Keep the plan minimal — only as many steps as needed`;
|
|
835
|
+
async function orchestrateTask(options) {
|
|
836
|
+
const agentList = options.availableAgents.filter((a) => !a.isOrchestrator).map((a) => `- ${a.role}: ${a.description}`).join("\n");
|
|
837
|
+
const messages = [
|
|
838
|
+
{ role: "system", content: `${ORCHESTRATOR_SYS_PROMPT}
|
|
839
|
+
|
|
840
|
+
Available agents:
|
|
841
|
+
${agentList}` },
|
|
842
|
+
{ role: "user", content: options.prompt }
|
|
843
|
+
];
|
|
844
|
+
const responseText = await options.executeCompletion(messages, options.chatConfig);
|
|
845
|
+
try {
|
|
846
|
+
const jsonMatch = responseText.match(/\{[\s\S]*\}/);
|
|
847
|
+
if (!jsonMatch) throw new Error("No JSON found in orchestrator response");
|
|
848
|
+
const parsed = JSON.parse(jsonMatch[0]);
|
|
849
|
+
return createTaskPlan(options.prompt, parsed.steps || []);
|
|
850
|
+
} catch (error) {
|
|
851
|
+
const defaultAgent = options.availableAgents.find((a) => !a.isOrchestrator);
|
|
852
|
+
return createTaskPlan(options.prompt, [{
|
|
853
|
+
id: "step-1",
|
|
854
|
+
role: defaultAgent?.role || DEFAULT_AGENT_ROLE,
|
|
855
|
+
subTask: options.prompt,
|
|
856
|
+
dependsOn: [],
|
|
857
|
+
consumes: [],
|
|
858
|
+
produces: ["step-1-result"]
|
|
859
|
+
}]);
|
|
860
|
+
}
|
|
861
|
+
}
|
|
862
|
+
class TaskWorkspace {
|
|
863
|
+
constructor(taskId, plan) {
|
|
864
|
+
this.artifacts = /* @__PURE__ */ new Map();
|
|
865
|
+
this.mailbox = /* @__PURE__ */ new Map();
|
|
866
|
+
this.taskId = taskId;
|
|
867
|
+
this.plan = plan;
|
|
868
|
+
}
|
|
869
|
+
putArtifact(artifact) {
|
|
870
|
+
this.artifacts.set(artifact.id, artifact);
|
|
871
|
+
}
|
|
872
|
+
getArtifact(id) {
|
|
873
|
+
return this.artifacts.get(id);
|
|
874
|
+
}
|
|
875
|
+
getArtifactsByType(type) {
|
|
876
|
+
return Array.from(this.artifacts.values()).filter((a) => a.type === type);
|
|
877
|
+
}
|
|
878
|
+
getArtifactsProducedBy(role) {
|
|
879
|
+
return Array.from(this.artifacts.values()).filter((a) => a.producedBy === role);
|
|
880
|
+
}
|
|
881
|
+
postMessage(message) {
|
|
882
|
+
const key = message.to === "*" ? "__broadcast__" : message.to;
|
|
883
|
+
const messages = this.mailbox.get(key) || [];
|
|
884
|
+
messages.push(message);
|
|
885
|
+
this.mailbox.set(key, messages);
|
|
886
|
+
}
|
|
887
|
+
readMessages(recipientRole) {
|
|
888
|
+
const direct = this.mailbox.get(recipientRole) || [];
|
|
889
|
+
const broadcast = this.mailbox.get("__broadcast__") || [];
|
|
890
|
+
return [...direct, ...broadcast];
|
|
891
|
+
}
|
|
892
|
+
clearMessages(recipientRole) {
|
|
893
|
+
this.mailbox.delete(recipientRole);
|
|
894
|
+
}
|
|
895
|
+
updateStepStatus(stepId, status, result) {
|
|
896
|
+
const step = this.plan.steps.find((s) => s.id === stepId);
|
|
897
|
+
if (!step) return;
|
|
898
|
+
step.status = status;
|
|
899
|
+
if (result) {
|
|
900
|
+
step.result = result;
|
|
901
|
+
this.putArtifact(result);
|
|
902
|
+
}
|
|
903
|
+
this.plan.updatedAt = Date.now();
|
|
904
|
+
}
|
|
905
|
+
getNextRunnableSteps() {
|
|
906
|
+
const completed = new Set(
|
|
907
|
+
this.plan.steps.filter((s) => s.status === "completed").map((s) => s.id)
|
|
908
|
+
);
|
|
909
|
+
return this.plan.steps.filter(
|
|
910
|
+
(s) => s.status === "pending" && s.dependsOn.every((dep) => completed.has(dep))
|
|
911
|
+
);
|
|
912
|
+
}
|
|
913
|
+
toJSON() {
|
|
914
|
+
return {
|
|
915
|
+
taskId: this.taskId,
|
|
916
|
+
plan: this.plan,
|
|
917
|
+
artifacts: Array.from(this.artifacts.values()),
|
|
918
|
+
mailbox: Object.fromEntries(this.mailbox.entries())
|
|
919
|
+
};
|
|
920
|
+
}
|
|
921
|
+
static fromJSON(data) {
|
|
922
|
+
const ws = new TaskWorkspace(data.taskId, data.plan);
|
|
923
|
+
for (const artifact of data.artifacts || []) {
|
|
924
|
+
ws.artifacts.set(artifact.id, artifact);
|
|
925
|
+
}
|
|
926
|
+
for (const [key, messages] of Object.entries(data.mailbox || {})) {
|
|
927
|
+
ws.mailbox.set(key, messages);
|
|
928
|
+
}
|
|
929
|
+
return ws;
|
|
930
|
+
}
|
|
931
|
+
}
|
|
932
|
+
const CHECKPOINT_KEY_PREFIX = "ai_task_checkpoint_";
|
|
933
|
+
const REGISTRY_KEY = "ai_task_checkpoint_registry";
|
|
934
|
+
class TaskCheckpointService {
|
|
935
|
+
async save(workspace) {
|
|
936
|
+
const key = `${CHECKPOINT_KEY_PREFIX}${workspace.taskId}`;
|
|
937
|
+
await appSettings.set(key, workspace.toJSON());
|
|
938
|
+
}
|
|
939
|
+
async restore(taskId) {
|
|
940
|
+
const key = `${CHECKPOINT_KEY_PREFIX}${taskId}`;
|
|
941
|
+
const data = await appSettings.get(key);
|
|
942
|
+
if (!data) return null;
|
|
943
|
+
return TaskWorkspace.fromJSON(data);
|
|
944
|
+
}
|
|
945
|
+
async delete(taskId) {
|
|
946
|
+
const key = `${CHECKPOINT_KEY_PREFIX}${taskId}`;
|
|
947
|
+
await appSettings.set(key, void 0);
|
|
948
|
+
}
|
|
949
|
+
async listCheckpoints() {
|
|
950
|
+
return this.getRegistry();
|
|
951
|
+
}
|
|
952
|
+
async registerCheckpoint(taskId) {
|
|
953
|
+
const registry = await this.getRegistry();
|
|
954
|
+
if (!registry.includes(taskId)) {
|
|
955
|
+
registry.push(taskId);
|
|
956
|
+
await appSettings.set(REGISTRY_KEY, registry);
|
|
957
|
+
}
|
|
958
|
+
}
|
|
959
|
+
async unregisterCheckpoint(taskId) {
|
|
960
|
+
const registry = await this.getRegistry();
|
|
961
|
+
await appSettings.set(REGISTRY_KEY, registry.filter((id) => id !== taskId));
|
|
962
|
+
}
|
|
963
|
+
async getRegistry() {
|
|
964
|
+
return await appSettings.get(REGISTRY_KEY) || [];
|
|
965
|
+
}
|
|
966
|
+
}
|
|
967
|
+
const taskCheckpointService = new TaskCheckpointService();
|
|
968
|
+
class TaskRunner {
|
|
969
|
+
constructor(executeStep) {
|
|
970
|
+
this.executeStep = executeStep;
|
|
971
|
+
}
|
|
972
|
+
async run(workspace, options) {
|
|
973
|
+
const plan = workspace.plan;
|
|
974
|
+
plan.status = "running";
|
|
975
|
+
await taskCheckpointService.save(workspace);
|
|
976
|
+
const errors = /* @__PURE__ */ new Map();
|
|
977
|
+
while (true) {
|
|
978
|
+
if (options.signal?.aborted) {
|
|
979
|
+
plan.status = "paused";
|
|
980
|
+
break;
|
|
981
|
+
}
|
|
982
|
+
const runnableSteps = getNextRunnableSteps(plan);
|
|
983
|
+
if (runnableSteps.length === 0) break;
|
|
984
|
+
await Promise.all(runnableSteps.map(async (step) => {
|
|
985
|
+
workspace.updateStepStatus(step.id, "running");
|
|
986
|
+
options.onStepStart?.(step);
|
|
987
|
+
try {
|
|
988
|
+
const artifact = await this.executeStep(step, workspace, options);
|
|
989
|
+
workspace.updateStepStatus(step.id, "completed", artifact);
|
|
990
|
+
options.onStepComplete?.(step, artifact);
|
|
991
|
+
await taskCheckpointService.save(workspace);
|
|
992
|
+
} catch (error) {
|
|
993
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
994
|
+
workspace.updateStepStatus(step.id, "failed");
|
|
995
|
+
errors.set(step.id, err);
|
|
996
|
+
options.onStepError?.(step, err);
|
|
997
|
+
}
|
|
998
|
+
}));
|
|
999
|
+
if (isPlanFailed(plan)) {
|
|
1000
|
+
plan.status = "failed";
|
|
1001
|
+
break;
|
|
1002
|
+
}
|
|
1003
|
+
if (isPlanComplete(plan)) {
|
|
1004
|
+
plan.status = "completed";
|
|
1005
|
+
break;
|
|
1006
|
+
}
|
|
1007
|
+
}
|
|
1008
|
+
const artifacts = plan.steps.filter((s) => s.result).map((s) => s.result);
|
|
1009
|
+
return { plan, artifacts, errors };
|
|
1010
|
+
}
|
|
1011
|
+
}
|
|
1012
|
+
class OrchestratedWorkflowStrategy {
|
|
1013
|
+
async execute(contributions, options, results, executeAgent) {
|
|
1014
|
+
const chatConfig = options.chatConfig;
|
|
1015
|
+
if (!chatConfig) throw new Error("Chat config is required");
|
|
1016
|
+
const prompt = options.chatContext.history[options.chatContext.history.length - 1]?.content || "";
|
|
1017
|
+
const providerFactory = new ProviderFactory();
|
|
1018
|
+
const plan = await orchestrateTask({
|
|
1019
|
+
prompt,
|
|
1020
|
+
availableAgents: contributions,
|
|
1021
|
+
chatConfig,
|
|
1022
|
+
context: options.callContext.getProxy(),
|
|
1023
|
+
executeCompletion: (messages, cfg) => streamToText(messages, cfg, providerFactory)
|
|
1024
|
+
});
|
|
1025
|
+
const workspace = new TaskWorkspace(`wf-${Date.now()}`, plan);
|
|
1026
|
+
const byRole = new Map(contributions.map((c) => [c.role, c]));
|
|
1027
|
+
const runner = new TaskRunner(async (step, ws, _opts) => {
|
|
1028
|
+
const contrib = byRole.get(step.role) || contributions[0];
|
|
1029
|
+
const stepMessages = [
|
|
1030
|
+
...options.chatContext.history,
|
|
1031
|
+
{ role: "user", content: step.subTask }
|
|
1032
|
+
];
|
|
1033
|
+
const finalMessage = await executeAgent(contrib, stepMessages, results.sharedState, chatConfig, options, results);
|
|
1034
|
+
const content = finalMessage?.content || "";
|
|
1035
|
+
const artifact = {
|
|
1036
|
+
id: step.produces[0] || `${step.id}-result`,
|
|
1037
|
+
type: "text",
|
|
1038
|
+
content,
|
|
1039
|
+
producedBy: step.role,
|
|
1040
|
+
createdAt: Date.now()
|
|
1041
|
+
};
|
|
1042
|
+
return artifact;
|
|
1043
|
+
});
|
|
1044
|
+
const taskResult = await runner.run(workspace, {
|
|
1045
|
+
prompt,
|
|
1046
|
+
chatConfig,
|
|
1047
|
+
callContext: options.callContext,
|
|
1048
|
+
signal: options.signal
|
|
1049
|
+
});
|
|
1050
|
+
const combined = taskResult.artifacts.map((a) => a.content).filter(Boolean).join("\n\n");
|
|
1051
|
+
if (combined) {
|
|
1052
|
+
results.messages.set("orchestrator", { role: "assistant", content: combined });
|
|
1053
|
+
}
|
|
1054
|
+
}
|
|
1055
|
+
}
|
|
1056
|
+
const REVIEWER_SYS_PROMPT = `You are a quality reviewer. Evaluate the provided artifact against the original task.
|
|
1057
|
+
|
|
1058
|
+
Respond with ONLY a JSON object:
|
|
1059
|
+
{
|
|
1060
|
+
"verdict": "approved" | "needs-revision",
|
|
1061
|
+
"score": 0-100,
|
|
1062
|
+
"notes": "<feedback for revision, empty if approved>"
|
|
1063
|
+
}`;
|
|
1064
|
+
async function reviewArtifact(options) {
|
|
1065
|
+
const messages = [
|
|
1066
|
+
{ role: "system", content: REVIEWER_SYS_PROMPT },
|
|
1067
|
+
{ role: "user", content: `Original task: ${options.originalTask}
|
|
1068
|
+
|
|
1069
|
+
Artifact to review:
|
|
1070
|
+
${options.artifact.content}` }
|
|
1071
|
+
];
|
|
1072
|
+
try {
|
|
1073
|
+
const responseText = await options.executeCompletion(messages, options.chatConfig);
|
|
1074
|
+
const jsonMatch = responseText.match(/\{[\s\S]*\}/);
|
|
1075
|
+
if (!jsonMatch) throw new Error("No JSON in reviewer response");
|
|
1076
|
+
const parsed = JSON.parse(jsonMatch[0]);
|
|
1077
|
+
return {
|
|
1078
|
+
verdict: parsed.verdict === "approved" ? "approved" : "needs-revision",
|
|
1079
|
+
score: typeof parsed.score === "number" ? parsed.score : 50,
|
|
1080
|
+
notes: parsed.notes || ""
|
|
1081
|
+
};
|
|
1082
|
+
} catch {
|
|
1083
|
+
return { verdict: "approved", score: 70, notes: "" };
|
|
1084
|
+
}
|
|
1085
|
+
}
|
|
1086
|
+
class ReviewWorkflowStrategy {
|
|
1087
|
+
async execute(contributions, options, results, executeAgent) {
|
|
1088
|
+
const chatConfig = options.chatConfig;
|
|
1089
|
+
if (!chatConfig) throw new Error("Chat config is required");
|
|
1090
|
+
const providerFactory = new ProviderFactory();
|
|
1091
|
+
const producer = contributions[0];
|
|
1092
|
+
const reviewer = contributions.find((c) => c.reviewerFor?.includes(producer.role));
|
|
1093
|
+
const maxRevisions = producer.maxRevisions ?? 2;
|
|
1094
|
+
let currentMessages = [...options.chatContext.history];
|
|
1095
|
+
let revisions = 0;
|
|
1096
|
+
while (revisions <= maxRevisions) {
|
|
1097
|
+
const finalMessage = await executeAgent(producer, currentMessages, results.sharedState, chatConfig, options, results);
|
|
1098
|
+
if (!finalMessage) break;
|
|
1099
|
+
if (!reviewer) {
|
|
1100
|
+
results.messages.set(producer.role, finalMessage);
|
|
1101
|
+
break;
|
|
1102
|
+
}
|
|
1103
|
+
const artifact = {
|
|
1104
|
+
id: `draft-${revisions}`,
|
|
1105
|
+
type: "text",
|
|
1106
|
+
content: finalMessage.content,
|
|
1107
|
+
producedBy: producer.role,
|
|
1108
|
+
createdAt: Date.now()
|
|
1109
|
+
};
|
|
1110
|
+
const originalPrompt = options.chatContext.history[options.chatContext.history.length - 1]?.content || "";
|
|
1111
|
+
const review = await reviewArtifact({
|
|
1112
|
+
artifact,
|
|
1113
|
+
originalTask: originalPrompt,
|
|
1114
|
+
chatConfig,
|
|
1115
|
+
executeCompletion: (messages, cfg) => streamToText(messages, cfg, providerFactory)
|
|
1116
|
+
});
|
|
1117
|
+
if (review.verdict === "approved" || revisions >= maxRevisions) {
|
|
1118
|
+
results.messages.set(producer.role, finalMessage);
|
|
1119
|
+
break;
|
|
1120
|
+
}
|
|
1121
|
+
currentMessages = [
|
|
1122
|
+
...options.chatContext.history,
|
|
1123
|
+
finalMessage,
|
|
1124
|
+
{ role: "user", content: `Please revise based on this feedback: ${review.notes}` }
|
|
1125
|
+
];
|
|
1126
|
+
revisions++;
|
|
1127
|
+
}
|
|
1128
|
+
}
|
|
1129
|
+
}
|
|
1130
|
+
class WorkflowEngine {
|
|
1131
|
+
constructor() {
|
|
1132
|
+
this.strategies = /* @__PURE__ */ new Map([
|
|
1133
|
+
["parallel", new ParallelWorkflowStrategy()],
|
|
1134
|
+
["sequential", new SequentialWorkflowStrategy()],
|
|
1135
|
+
["conditional", new ConditionalWorkflowStrategy()],
|
|
1136
|
+
["pipeline", new PipelineWorkflowStrategy()],
|
|
1137
|
+
["orchestrated", new OrchestratedWorkflowStrategy()],
|
|
1138
|
+
["review", new ReviewWorkflowStrategy()]
|
|
1139
|
+
]);
|
|
1140
|
+
}
|
|
1141
|
+
registerStrategy(name, strategy) {
|
|
1142
|
+
this.strategies.set(name, strategy);
|
|
1143
|
+
}
|
|
1144
|
+
async execute(contributions, options, executeAgent) {
|
|
1145
|
+
const workflowId = `workflow-${Date.now()}-${Math.random()}`;
|
|
1146
|
+
const execution = options.execution || "parallel";
|
|
1147
|
+
const results = {
|
|
1148
|
+
messages: /* @__PURE__ */ new Map(),
|
|
1149
|
+
sharedState: { ...options.sharedState || {} },
|
|
1150
|
+
errors: /* @__PURE__ */ new Map()
|
|
1151
|
+
};
|
|
1152
|
+
publish(TOPIC_AGENT_WORKFLOW_STARTED, { workflowId, options });
|
|
1153
|
+
try {
|
|
1154
|
+
const strategy = this.strategies.get(execution);
|
|
1155
|
+
if (!strategy) throw new Error(`Unknown workflow execution strategy: ${execution}`);
|
|
1156
|
+
await strategy.execute(contributions, options, results, executeAgent);
|
|
1157
|
+
publish(TOPIC_AGENT_WORKFLOW_COMPLETE, { workflowId, results });
|
|
1158
|
+
return results;
|
|
1159
|
+
} catch (error) {
|
|
1160
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
1161
|
+
publish(TOPIC_AGENT_WORKFLOW_ERROR, { workflowId, error: err });
|
|
1162
|
+
throw err;
|
|
1163
|
+
}
|
|
1164
|
+
}
|
|
1165
|
+
}
|
|
1166
|
+
const _TokenEstimator = class _TokenEstimator {
|
|
1167
|
+
static estimateTokens(text) {
|
|
1168
|
+
if (!text?.trim()) return 0;
|
|
1169
|
+
const trimmed = text.trim();
|
|
1170
|
+
return Math.max(1, Math.ceil(trimmed.length / this.AVERAGE_CHARS_PER_TOKEN + trimmed.split(/\s+/).filter((w) => w.length > 0).length * 0.3));
|
|
1171
|
+
}
|
|
1172
|
+
static estimateMessageTokens(message) {
|
|
1173
|
+
let tokens = this.MESSAGE_OVERHEAD;
|
|
1174
|
+
if (message.content) tokens += this.estimateTokens(message.content);
|
|
1175
|
+
if (message.role) tokens += this.estimateTokens(message.role);
|
|
1176
|
+
if (message.tool_calls) {
|
|
1177
|
+
for (const tc of message.tool_calls) {
|
|
1178
|
+
tokens += this.estimateTokens(tc.function.name || "") + this.estimateTokens(tc.function.arguments || "{}") + this.TOOL_CALL_OVERHEAD;
|
|
1179
|
+
}
|
|
1180
|
+
}
|
|
1181
|
+
if (message.tool_call_id) tokens += this.estimateTokens(message.tool_call_id);
|
|
1182
|
+
return tokens;
|
|
1183
|
+
}
|
|
1184
|
+
static estimatePromptTokens(messages, tools) {
|
|
1185
|
+
let total = messages.reduce((sum, m) => sum + this.estimateMessageTokens(m), 0);
|
|
1186
|
+
if (tools?.length) {
|
|
1187
|
+
for (const tool of tools) {
|
|
1188
|
+
total += this.TOOL_DEFINITION_OVERHEAD;
|
|
1189
|
+
total += this.estimateTokens(tool.function.name || "");
|
|
1190
|
+
total += this.estimateTokens(tool.function.description || "");
|
|
1191
|
+
if (tool.function.parameters) total += this.estimateTokens(JSON.stringify(tool.function.parameters));
|
|
1192
|
+
}
|
|
1193
|
+
}
|
|
1194
|
+
return total;
|
|
1195
|
+
}
|
|
1196
|
+
static estimateCompletionTokens(content, toolCalls) {
|
|
1197
|
+
let tokens = this.estimateTokens(content);
|
|
1198
|
+
if (toolCalls?.length) {
|
|
1199
|
+
for (const tc of toolCalls) {
|
|
1200
|
+
tokens += this.TOOL_CALL_OVERHEAD + this.estimateTokens(tc.function?.name || "") + this.estimateTokens(tc.function?.arguments || "{}");
|
|
1201
|
+
}
|
|
1202
|
+
}
|
|
1203
|
+
return tokens;
|
|
1204
|
+
}
|
|
1205
|
+
};
|
|
1206
|
+
_TokenEstimator.AVERAGE_CHARS_PER_TOKEN = 4;
|
|
1207
|
+
_TokenEstimator.TOOL_DEFINITION_OVERHEAD = 50;
|
|
1208
|
+
_TokenEstimator.TOOL_CALL_OVERHEAD = 10;
|
|
1209
|
+
_TokenEstimator.MESSAGE_OVERHEAD = 4;
|
|
1210
|
+
let TokenEstimator = _TokenEstimator;
|
|
1211
|
+
const TOKEN_USAGE_KEY = "ai_token_usage";
|
|
1212
|
+
const EMPTY_USAGE = { promptTokens: 0, completionTokens: 0, totalTokens: 0, requestCount: 0 };
|
|
1213
|
+
class TokenUsageTracker {
|
|
1214
|
+
constructor() {
|
|
1215
|
+
this.data = null;
|
|
1216
|
+
this.loadPromise = null;
|
|
1217
|
+
}
|
|
1218
|
+
async loadData() {
|
|
1219
|
+
if (this.data) return this.data;
|
|
1220
|
+
if (this.loadPromise) return this.loadPromise;
|
|
1221
|
+
this.loadPromise = (async () => {
|
|
1222
|
+
const stored = await persistenceService.getObject(TOKEN_USAGE_KEY);
|
|
1223
|
+
this.data = stored ? stored : { providers: {}, total: { ...EMPTY_USAGE }, lastUpdated: Date.now() };
|
|
1224
|
+
if (!this.data) {
|
|
1225
|
+
this.data = { providers: {}, total: { ...EMPTY_USAGE }, lastUpdated: Date.now() };
|
|
1226
|
+
await this.saveData();
|
|
1227
|
+
}
|
|
1228
|
+
this.loadPromise = null;
|
|
1229
|
+
return this.data;
|
|
1230
|
+
})();
|
|
1231
|
+
return this.loadPromise;
|
|
1232
|
+
}
|
|
1233
|
+
async saveData() {
|
|
1234
|
+
if (!this.data) return;
|
|
1235
|
+
this.data.lastUpdated = Date.now();
|
|
1236
|
+
await persistenceService.persistObject(TOKEN_USAGE_KEY, this.data);
|
|
1237
|
+
}
|
|
1238
|
+
async recordUsage(providerName, usage) {
|
|
1239
|
+
await this.loadData();
|
|
1240
|
+
if (!this.data) return;
|
|
1241
|
+
this.data.providers[providerName] ??= { ...EMPTY_USAGE };
|
|
1242
|
+
const provider = this.data.providers[providerName];
|
|
1243
|
+
provider.promptTokens += usage.promptTokens;
|
|
1244
|
+
provider.completionTokens += usage.completionTokens;
|
|
1245
|
+
provider.totalTokens += usage.totalTokens;
|
|
1246
|
+
provider.requestCount += 1;
|
|
1247
|
+
this.data.total.promptTokens += usage.promptTokens;
|
|
1248
|
+
this.data.total.completionTokens += usage.completionTokens;
|
|
1249
|
+
this.data.total.totalTokens += usage.totalTokens;
|
|
1250
|
+
this.data.total.requestCount += 1;
|
|
1251
|
+
await this.saveData();
|
|
1252
|
+
}
|
|
1253
|
+
async getProviderUsage(providerName) {
|
|
1254
|
+
await this.loadData();
|
|
1255
|
+
return this.data?.providers[providerName] || null;
|
|
1256
|
+
}
|
|
1257
|
+
async getAllProviderUsage() {
|
|
1258
|
+
await this.loadData();
|
|
1259
|
+
return this.data?.providers || {};
|
|
1260
|
+
}
|
|
1261
|
+
async getTotalUsage() {
|
|
1262
|
+
await this.loadData();
|
|
1263
|
+
return this.data?.total || { ...EMPTY_USAGE };
|
|
1264
|
+
}
|
|
1265
|
+
async reset() {
|
|
1266
|
+
this.data = { providers: {}, total: { ...EMPTY_USAGE }, lastUpdated: Date.now() };
|
|
1267
|
+
await this.saveData();
|
|
1268
|
+
}
|
|
1269
|
+
async resetProvider(providerName) {
|
|
1270
|
+
await this.loadData();
|
|
1271
|
+
if (!this.data) return;
|
|
1272
|
+
const provider = this.data.providers[providerName];
|
|
1273
|
+
if (!provider) return;
|
|
1274
|
+
this.data.total.promptTokens -= provider.promptTokens;
|
|
1275
|
+
this.data.total.completionTokens -= provider.completionTokens;
|
|
1276
|
+
this.data.total.totalTokens -= provider.totalTokens;
|
|
1277
|
+
this.data.total.requestCount -= provider.requestCount;
|
|
1278
|
+
delete this.data.providers[providerName];
|
|
1279
|
+
await this.saveData();
|
|
1280
|
+
}
|
|
1281
|
+
}
|
|
1282
|
+
const tokenUsageTracker = new TokenUsageTracker();
|
|
1283
|
+
class AIService {
|
|
1284
|
+
constructor() {
|
|
1285
|
+
this.activeRequests = /* @__PURE__ */ new Map();
|
|
1286
|
+
this.activeTasks = /* @__PURE__ */ new Map();
|
|
1287
|
+
this.toolRegistry = new ToolRegistry();
|
|
1288
|
+
this.providerFactory = new ProviderFactory();
|
|
1289
|
+
this.agentRegistry = new AgentRegistry();
|
|
1290
|
+
this._promptBuilder = new PromptBuilder(this.toolRegistry);
|
|
1291
|
+
this.messageProcessor = new MessageProcessorService();
|
|
1292
|
+
this.toolExecutor = new ToolExecutor();
|
|
1293
|
+
this.workflowEngine = new WorkflowEngine();
|
|
1294
|
+
subscribe(TOPIC_SETTINGS_CHANGED, () => {
|
|
1295
|
+
this.aiConfig = void 0;
|
|
1296
|
+
this.configCheckPromise = void 0;
|
|
1297
|
+
this.checkAIConfig().then();
|
|
1298
|
+
});
|
|
1299
|
+
this.checkAIConfig().then();
|
|
1300
|
+
}
|
|
1301
|
+
get promptBuilder() {
|
|
1302
|
+
return this._promptBuilder;
|
|
1303
|
+
}
|
|
1304
|
+
getAgentContributions() {
|
|
1305
|
+
return this.agentRegistry.getAgentContributions();
|
|
1306
|
+
}
|
|
1307
|
+
async getProviders() {
|
|
1308
|
+
await this.checkAIConfig();
|
|
1309
|
+
return this.aiConfig?.providers || [];
|
|
1310
|
+
}
|
|
1311
|
+
async getDefaultProvider() {
|
|
1312
|
+
await this.checkAIConfig();
|
|
1313
|
+
const providers = await this.getProviders();
|
|
1314
|
+
if (this.aiConfig?.defaultProvider) {
|
|
1315
|
+
const config = providers.find((p) => p.name === this.aiConfig?.defaultProvider);
|
|
1316
|
+
if (config) return config;
|
|
1317
|
+
}
|
|
1318
|
+
return providers[0];
|
|
1319
|
+
}
|
|
1320
|
+
async setDefaultProvider(defaultProviderName) {
|
|
1321
|
+
await this.checkAIConfig();
|
|
1322
|
+
if (this.aiConfig) {
|
|
1323
|
+
this.aiConfig.defaultProvider = defaultProviderName;
|
|
1324
|
+
await appSettings.set(KEY_AI_CONFIG, this.aiConfig);
|
|
1325
|
+
}
|
|
1326
|
+
return this.getDefaultProvider();
|
|
1327
|
+
}
|
|
1328
|
+
createMessage(prompt) {
|
|
1329
|
+
return { role: "user", content: prompt };
|
|
1330
|
+
}
|
|
1331
|
+
registerStreamingFetcher(provider) {
|
|
1332
|
+
this.providerFactory.registerProvider(provider);
|
|
1333
|
+
}
|
|
1334
|
+
getContributedProviders() {
|
|
1335
|
+
const contributions = contributionRegistry.getContributions(CID_CHAT_PROVIDERS);
|
|
1336
|
+
return contributions.map((c) => c.provider);
|
|
1337
|
+
}
|
|
1338
|
+
mergeProviders(existing, contributed) {
|
|
1339
|
+
const existingNames = new Set(existing.map((p) => p.name));
|
|
1340
|
+
const missing = contributed.filter((p) => !existingNames.has(p.name));
|
|
1341
|
+
return missing.length > 0 ? [...existing, ...missing] : existing;
|
|
1342
|
+
}
|
|
1343
|
+
async createInitialConfig() {
|
|
1344
|
+
const initialConfig = { ...AI_CONFIG_TEMPLATE, providers: this.getContributedProviders() };
|
|
1345
|
+
await appSettings.set(KEY_AI_CONFIG, initialConfig);
|
|
1346
|
+
return appSettings.get(KEY_AI_CONFIG);
|
|
1347
|
+
}
|
|
1348
|
+
async updateConfigWithMissingProviders(config) {
|
|
1349
|
+
const merged = this.mergeProviders(config.providers, this.getContributedProviders());
|
|
1350
|
+
if (merged.length === config.providers.length) return config;
|
|
1351
|
+
const updated = { ...config, providers: merged };
|
|
1352
|
+
await appSettings.set(KEY_AI_CONFIG, updated);
|
|
1353
|
+
return updated;
|
|
1354
|
+
}
|
|
1355
|
+
async checkAIConfig() {
|
|
1356
|
+
if (this.aiConfig) return;
|
|
1357
|
+
if (this.configCheckPromise) return this.configCheckPromise;
|
|
1358
|
+
this.configCheckPromise = this.performConfigCheck();
|
|
1359
|
+
return this.configCheckPromise;
|
|
1360
|
+
}
|
|
1361
|
+
async performConfigCheck() {
|
|
1362
|
+
try {
|
|
1363
|
+
this.aiConfig = await appSettings.get(KEY_AI_CONFIG);
|
|
1364
|
+
this.aiConfig = this.aiConfig ? await this.updateConfigWithMissingProviders(this.aiConfig) : await this.createInitialConfig();
|
|
1365
|
+
publish(TOPIC_AICONFIG_CHANGED, this.aiConfig);
|
|
1366
|
+
} finally {
|
|
1367
|
+
this.configCheckPromise = void 0;
|
|
1368
|
+
}
|
|
1369
|
+
}
|
|
1370
|
+
createAgentContext(sharedState, callContext, additional = {}) {
|
|
1371
|
+
return { ...sharedState, ...callContext.getProxy(), ...additional };
|
|
1372
|
+
}
|
|
1373
|
+
async *streamCompletion(options) {
|
|
1374
|
+
const requestId = `${Date.now()}-${Math.random()}`;
|
|
1375
|
+
const abortController = new AbortController();
|
|
1376
|
+
this.activeRequests.set(requestId, abortController);
|
|
1377
|
+
if (options.signal) {
|
|
1378
|
+
options.signal.addEventListener("abort", () => abortController.abort());
|
|
1379
|
+
}
|
|
1380
|
+
const effectiveSignal = options.signal || abortController.signal;
|
|
1381
|
+
try {
|
|
1382
|
+
options.onStatus?.("starting");
|
|
1383
|
+
publish(TOPIC_AI_STREAM_STARTED, { requestId, options });
|
|
1384
|
+
const chatConfig = options.chatConfig || await this.getDefaultProvider();
|
|
1385
|
+
const messages = sanitizeMessagesForAPI(options.chatContext.history);
|
|
1386
|
+
const provider = this.providerFactory.getProvider(chatConfig);
|
|
1387
|
+
const accumulator = this.toolExecutor.createToolCallAccumulator();
|
|
1388
|
+
let accumulatedContent = "";
|
|
1389
|
+
let accumulatedRole = "assistant";
|
|
1390
|
+
let tokenUsage;
|
|
1391
|
+
for await (const chunk of provider.stream({
|
|
1392
|
+
model: chatConfig.model,
|
|
1393
|
+
messages,
|
|
1394
|
+
chatConfig,
|
|
1395
|
+
tools: options.tools,
|
|
1396
|
+
signal: effectiveSignal
|
|
1397
|
+
})) {
|
|
1398
|
+
if (chunk.type === "error") {
|
|
1399
|
+
options.onStatus?.("error");
|
|
1400
|
+
publish(TOPIC_AI_STREAM_ERROR, { requestId, chunk });
|
|
1401
|
+
yield chunk;
|
|
1402
|
+
break;
|
|
1403
|
+
}
|
|
1404
|
+
if (chunk.type === "token") {
|
|
1405
|
+
accumulator.processChunk(chunk);
|
|
1406
|
+
if (!chunk.toolCalls?.length) accumulatedContent += chunk.content;
|
|
1407
|
+
if (chunk.message?.role) accumulatedRole = chunk.message.role;
|
|
1408
|
+
if (chunk.content) options.onToken?.(chunk.content);
|
|
1409
|
+
options.onStatus?.("streaming");
|
|
1410
|
+
options.onProgress?.({ received: accumulatedContent.length });
|
|
1411
|
+
publish(TOPIC_AI_STREAM_CHUNK, { requestId, chunk });
|
|
1412
|
+
yield chunk;
|
|
1413
|
+
} else if (chunk.type === "done") {
|
|
1414
|
+
if (chunk.metadata?.usage) tokenUsage = chunk.metadata.usage;
|
|
1415
|
+
options.onStatus?.("complete");
|
|
1416
|
+
publish(TOPIC_AI_STREAM_COMPLETE, { requestId });
|
|
1417
|
+
yield chunk;
|
|
1418
|
+
break;
|
|
1419
|
+
} else {
|
|
1420
|
+
yield chunk;
|
|
1421
|
+
}
|
|
1422
|
+
}
|
|
1423
|
+
const finalToolCalls = accumulator.getFinalToolCalls();
|
|
1424
|
+
const finalMessage = {
|
|
1425
|
+
role: accumulatedRole,
|
|
1426
|
+
content: accumulatedContent,
|
|
1427
|
+
...finalToolCalls.length > 0 && { toolCalls: finalToolCalls }
|
|
1428
|
+
};
|
|
1429
|
+
if (!tokenUsage) {
|
|
1430
|
+
const promptTokens = TokenEstimator.estimatePromptTokens(messages, options.tools);
|
|
1431
|
+
const completionTokens = TokenEstimator.estimateCompletionTokens(accumulatedContent, finalToolCalls);
|
|
1432
|
+
tokenUsage = { promptTokens, completionTokens, totalTokens: promptTokens + completionTokens, estimated: true };
|
|
1433
|
+
}
|
|
1434
|
+
tokenUsageTracker.recordUsage(chatConfig.name, tokenUsage).catch((err) => {
|
|
1435
|
+
logger$1.error(`Failed to record token usage: ${err instanceof Error ? err.message : String(err)}`);
|
|
1436
|
+
});
|
|
1437
|
+
return { message: finalMessage, tokenUsage };
|
|
1438
|
+
} catch (error) {
|
|
1439
|
+
if (error instanceof Error && error.name === "AbortError") {
|
|
1440
|
+
options.onStatus?.("error");
|
|
1441
|
+
publish(TOPIC_AI_STREAM_ERROR, { requestId, error: "Request cancelled" });
|
|
1442
|
+
throw error;
|
|
1443
|
+
}
|
|
1444
|
+
options.onStatus?.("error");
|
|
1445
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
1446
|
+
publish(TOPIC_AI_STREAM_ERROR, { requestId, error: errorMessage });
|
|
1447
|
+
yield { type: "error", content: errorMessage, metadata: { error } };
|
|
1448
|
+
throw error;
|
|
1449
|
+
} finally {
|
|
1450
|
+
this.activeRequests.delete(requestId);
|
|
1451
|
+
}
|
|
1452
|
+
}
|
|
1453
|
+
async handleStreamingPromptDirect(options) {
|
|
1454
|
+
const stream = this.streamCompletion(options);
|
|
1455
|
+
let lastValue;
|
|
1456
|
+
while (true) {
|
|
1457
|
+
lastValue = await stream.next();
|
|
1458
|
+
if (lastValue.done) return lastValue.value.message;
|
|
1459
|
+
const chunk = lastValue.value;
|
|
1460
|
+
if (chunk.type === "error") throw new Error(chunk.content);
|
|
1461
|
+
if (chunk.type === "done") {
|
|
1462
|
+
const final = await stream.next();
|
|
1463
|
+
if (final.done && final.value) return final.value.message;
|
|
1464
|
+
if (!final.done) continue;
|
|
1465
|
+
throw new Error("Stream completed without return value");
|
|
1466
|
+
}
|
|
1467
|
+
}
|
|
1468
|
+
}
|
|
1469
|
+
async handleStreamingPrompt(options) {
|
|
1470
|
+
const callContext = options.callContext || rootContext.createChild({});
|
|
1471
|
+
const agentContext = this.createAgentContext(
|
|
1472
|
+
{},
|
|
1473
|
+
callContext,
|
|
1474
|
+
{ userPrompt: options.chatContext.history[options.chatContext.history.length - 1]?.content || "" }
|
|
1475
|
+
);
|
|
1476
|
+
const matchingAgents = this.agentRegistry.getMatchingAgents(agentContext);
|
|
1477
|
+
const roles = matchingAgents.length > 0 ? matchingAgents.map((a) => a.role) : ["assistant"];
|
|
1478
|
+
const workflowResult = await this.executeAgentWorkflow({
|
|
1479
|
+
chatContext: options.chatContext,
|
|
1480
|
+
chatConfig: options.chatConfig,
|
|
1481
|
+
callContext,
|
|
1482
|
+
execution: "parallel",
|
|
1483
|
+
stream: options.stream,
|
|
1484
|
+
signal: options.signal,
|
|
1485
|
+
onToken: (_role, token) => options.onToken?.(token),
|
|
1486
|
+
onStatus: (_role, status) => options.onStatus?.(status),
|
|
1487
|
+
roles
|
|
1488
|
+
});
|
|
1489
|
+
const messages = Array.from(workflowResult.messages.values());
|
|
1490
|
+
return messages.length === 1 ? messages[0] : messages;
|
|
1491
|
+
}
|
|
1492
|
+
cancelRequest(requestId) {
|
|
1493
|
+
const controller = this.activeRequests.get(requestId);
|
|
1494
|
+
if (!controller) return false;
|
|
1495
|
+
controller.abort();
|
|
1496
|
+
this.activeRequests.delete(requestId);
|
|
1497
|
+
return true;
|
|
1498
|
+
}
|
|
1499
|
+
async executeAgentWorkflow(options) {
|
|
1500
|
+
const agentContext = this.createAgentContext(options.sharedState || {}, options.callContext);
|
|
1501
|
+
const matchingAgents = this.agentRegistry.getMatchingAgents(agentContext, options.roles);
|
|
1502
|
+
return this.workflowEngine.execute(
|
|
1503
|
+
matchingAgents,
|
|
1504
|
+
options,
|
|
1505
|
+
(contrib, messages, sharedState, chatConfig, workflowOptions, results) => this.executeAgent(contrib, messages, sharedState, chatConfig, workflowOptions, results)
|
|
1506
|
+
);
|
|
1507
|
+
}
|
|
1508
|
+
async executeAgent(contrib, messages, sharedState, chatConfig, options, results) {
|
|
1509
|
+
options.onAgentStart?.(contrib.role);
|
|
1510
|
+
const agentContext = this.createAgentContext(sharedState, options.callContext, {
|
|
1511
|
+
userPrompt: messages[messages.length - 1]?.content || ""
|
|
1512
|
+
});
|
|
1513
|
+
const { messages: preparedMessages, tools } = await this._promptBuilder.build(
|
|
1514
|
+
contrib,
|
|
1515
|
+
messages,
|
|
1516
|
+
agentContext,
|
|
1517
|
+
contrib.hooks
|
|
1518
|
+
);
|
|
1519
|
+
const chatMessages = preparedMessages.map((msg) => {
|
|
1520
|
+
const chatMsg = { role: msg.role, content: msg.content };
|
|
1521
|
+
if (msg.tool_call_id) chatMsg.tool_call_id = msg.tool_call_id;
|
|
1522
|
+
if (msg.tool_calls) chatMsg.tool_calls = msg.tool_calls;
|
|
1523
|
+
return chatMsg;
|
|
1524
|
+
});
|
|
1525
|
+
let rawMessage = await this.handleStreamingPromptDirect({
|
|
1526
|
+
chatContext: { history: chatMessages },
|
|
1527
|
+
chatConfig,
|
|
1528
|
+
callContext: options.callContext,
|
|
1529
|
+
stream: options.stream ?? true,
|
|
1530
|
+
signal: options.signal,
|
|
1531
|
+
onToken: options.onToken ? (token) => options.onToken(contrib.role, token) : void 0,
|
|
1532
|
+
tools
|
|
1533
|
+
});
|
|
1534
|
+
let toolCallIteration = 0;
|
|
1535
|
+
const conversationHistory = [...preparedMessages];
|
|
1536
|
+
while (rawMessage.toolCalls && rawMessage.toolCalls.length > 0) {
|
|
1537
|
+
toolCallIteration++;
|
|
1538
|
+
if (toolCallIteration > MAX_TOOL_ITERATIONS) {
|
|
1539
|
+
console.warn(`[AIService] Max tool call iterations reached`);
|
|
1540
|
+
break;
|
|
1541
|
+
}
|
|
1542
|
+
let toolResults;
|
|
1543
|
+
if (options.requireToolApproval && options.onToolApprovalRequest) {
|
|
1544
|
+
const toolCallDescriptions = rawMessage.toolCalls.map((tc) => {
|
|
1545
|
+
let parsedArgs = {};
|
|
1546
|
+
try {
|
|
1547
|
+
parsedArgs = JSON.parse(tc.function.arguments || "{}");
|
|
1548
|
+
} catch {
|
|
1549
|
+
}
|
|
1550
|
+
return `${tc.function.name}(${Object.entries(parsedArgs).map(([k, v]) => `${k}=${v}`).join(", ")})`;
|
|
1551
|
+
}).join(", ");
|
|
1552
|
+
const approvalRequest = {
|
|
1553
|
+
toolCalls: rawMessage.toolCalls,
|
|
1554
|
+
message: `The AI wants to execute: ${toolCallDescriptions}`
|
|
1555
|
+
};
|
|
1556
|
+
const approved = await options.onToolApprovalRequest(contrib.role, approvalRequest);
|
|
1557
|
+
if (!approved) {
|
|
1558
|
+
toolResults = rawMessage.toolCalls.map((tc) => ({
|
|
1559
|
+
id: tc.id,
|
|
1560
|
+
result: { success: false, message: "Tool execution cancelled by user", cancelled: true }
|
|
1561
|
+
}));
|
|
1562
|
+
} else {
|
|
1563
|
+
toolResults = await this.toolExecutor.executeToolCalls(rawMessage.toolCalls, agentContext);
|
|
1564
|
+
}
|
|
1565
|
+
} else {
|
|
1566
|
+
toolResults = await this.toolExecutor.executeToolCalls(rawMessage.toolCalls, agentContext);
|
|
1567
|
+
}
|
|
1568
|
+
const toolMessages = toolResults.map((tr) => ({
|
|
1569
|
+
role: "tool",
|
|
1570
|
+
content: tr.error ? JSON.stringify({ error: tr.error }) : JSON.stringify(tr.result),
|
|
1571
|
+
tool_call_id: tr.id
|
|
1572
|
+
}));
|
|
1573
|
+
const assistantMessage = {
|
|
1574
|
+
role: "assistant",
|
|
1575
|
+
content: rawMessage.content || ""
|
|
1576
|
+
};
|
|
1577
|
+
if (rawMessage.toolCalls?.length) {
|
|
1578
|
+
assistantMessage.tool_calls = rawMessage.toolCalls.filter((tc) => tc.function.name?.trim()).map((tc) => ({ id: tc.id, type: tc.type, function: { name: tc.function.name, arguments: tc.function.arguments || "{}" } }));
|
|
1579
|
+
}
|
|
1580
|
+
conversationHistory.push(assistantMessage, ...toolMessages);
|
|
1581
|
+
rawMessage = await this.handleStreamingPromptDirect({
|
|
1582
|
+
chatContext: {
|
|
1583
|
+
history: conversationHistory.map((m) => ({
|
|
1584
|
+
role: m.role,
|
|
1585
|
+
content: m.content,
|
|
1586
|
+
...m.tool_call_id && { tool_call_id: m.tool_call_id },
|
|
1587
|
+
...m.tool_calls && { tool_calls: m.tool_calls }
|
|
1588
|
+
}))
|
|
1589
|
+
},
|
|
1590
|
+
chatConfig,
|
|
1591
|
+
callContext: options.callContext,
|
|
1592
|
+
stream: options.stream ?? true,
|
|
1593
|
+
signal: options.signal,
|
|
1594
|
+
tools
|
|
1595
|
+
});
|
|
1596
|
+
if (rawMessage.content?.trim() && !rawMessage.toolCalls?.length) break;
|
|
1597
|
+
}
|
|
1598
|
+
const processedMessage = await this.messageProcessor.process(
|
|
1599
|
+
rawMessage,
|
|
1600
|
+
contrib,
|
|
1601
|
+
this.createAgentContext(sharedState, options.callContext, { message: rawMessage })
|
|
1602
|
+
);
|
|
1603
|
+
if (contrib.hooks?.afterReceive) {
|
|
1604
|
+
await contrib.hooks.afterReceive(processedMessage, this.createAgentContext(sharedState, options.callContext));
|
|
1605
|
+
}
|
|
1606
|
+
const finalMessage = { role: contrib.role, content: processedMessage.content };
|
|
1607
|
+
results.messages.set(contrib.role, finalMessage);
|
|
1608
|
+
options.onAgentComplete?.(contrib.role, finalMessage);
|
|
1609
|
+
return finalMessage;
|
|
1610
|
+
}
|
|
1611
|
+
async planTask(prompt, context) {
|
|
1612
|
+
const chatConfig = await this.getDefaultProvider();
|
|
1613
|
+
const contributions = this.agentRegistry.getAgentContributions();
|
|
1614
|
+
rootContext.createChild({});
|
|
1615
|
+
return orchestrateTask({
|
|
1616
|
+
prompt,
|
|
1617
|
+
availableAgents: contributions,
|
|
1618
|
+
chatConfig,
|
|
1619
|
+
context,
|
|
1620
|
+
executeCompletion: (messages, cfg) => streamToText(messages, cfg, this.providerFactory)
|
|
1621
|
+
});
|
|
1622
|
+
}
|
|
1623
|
+
async executeTask(options) {
|
|
1624
|
+
const chatConfig = options.chatConfig || await this.getDefaultProvider();
|
|
1625
|
+
const callContext = options.callContext || rootContext.createChild({});
|
|
1626
|
+
const contributions = this.agentRegistry.getAgentContributions();
|
|
1627
|
+
const plan = await this.planTask(options.prompt, callContext.getProxy());
|
|
1628
|
+
options.onPlanReady?.(plan);
|
|
1629
|
+
const workspace = new TaskWorkspace(`task-${Date.now()}`, plan);
|
|
1630
|
+
await taskCheckpointService.registerCheckpoint(workspace.taskId);
|
|
1631
|
+
const abortController = new AbortController();
|
|
1632
|
+
this.activeTasks.set(workspace.taskId, abortController);
|
|
1633
|
+
const taskOptions = { ...options, signal: options.signal ?? abortController.signal };
|
|
1634
|
+
const byRole = new Map(contributions.map((c) => [c.role, c]));
|
|
1635
|
+
const runner = new TaskRunner(this.createStepExecutor(byRole, contributions, chatConfig, callContext, taskOptions));
|
|
1636
|
+
try {
|
|
1637
|
+
const result = await runner.run(workspace, taskOptions);
|
|
1638
|
+
await taskCheckpointService.unregisterCheckpoint(workspace.taskId);
|
|
1639
|
+
return result;
|
|
1640
|
+
} finally {
|
|
1641
|
+
this.activeTasks.delete(workspace.taskId);
|
|
1642
|
+
}
|
|
1643
|
+
}
|
|
1644
|
+
async resumeTask(taskId, options) {
|
|
1645
|
+
const workspace = await taskCheckpointService.restore(taskId);
|
|
1646
|
+
if (!workspace) throw new Error(`No checkpoint found for task ${taskId}`);
|
|
1647
|
+
const chatConfig = options.chatConfig || await this.getDefaultProvider();
|
|
1648
|
+
const callContext = options.callContext || rootContext.createChild({});
|
|
1649
|
+
const contributions = this.agentRegistry.getAgentContributions();
|
|
1650
|
+
const byRole = new Map(contributions.map((c) => [c.role, c]));
|
|
1651
|
+
const runner = new TaskRunner(this.createStepExecutor(byRole, contributions, chatConfig, callContext, options));
|
|
1652
|
+
return runner.run(workspace, options);
|
|
1653
|
+
}
|
|
1654
|
+
createStepExecutor(byRole, contributions, chatConfig, callContext, options) {
|
|
1655
|
+
return async (step, _ws, _opts) => {
|
|
1656
|
+
const contrib = byRole.get(step.role) || contributions[0];
|
|
1657
|
+
const stepMessages = [
|
|
1658
|
+
...options.chatContext?.history || [],
|
|
1659
|
+
{ role: "user", content: step.subTask }
|
|
1660
|
+
];
|
|
1661
|
+
const workflowResult = await this.workflowEngine.execute(
|
|
1662
|
+
[contrib],
|
|
1663
|
+
{ chatContext: { history: stepMessages }, chatConfig, callContext, execution: "parallel", stream: true, signal: options.signal, roles: [contrib.role] },
|
|
1664
|
+
(c, msgs, state, cfg, wopts, results) => this.executeAgent(c, msgs, state, cfg, wopts, results)
|
|
1665
|
+
);
|
|
1666
|
+
const msg = workflowResult.messages.get(contrib.role);
|
|
1667
|
+
return {
|
|
1668
|
+
id: step.produces[0] || `${step.id}-result`,
|
|
1669
|
+
type: "text",
|
|
1670
|
+
content: msg?.content || "",
|
|
1671
|
+
producedBy: step.role,
|
|
1672
|
+
createdAt: Date.now()
|
|
1673
|
+
};
|
|
1674
|
+
};
|
|
1675
|
+
}
|
|
1676
|
+
cancelTask(taskId) {
|
|
1677
|
+
const controller = this.activeTasks.get(taskId);
|
|
1678
|
+
if (controller) {
|
|
1679
|
+
controller.abort();
|
|
1680
|
+
this.activeTasks.delete(taskId);
|
|
1681
|
+
}
|
|
1682
|
+
}
|
|
1683
|
+
}
|
|
1684
|
+
const aiService = new AIService();
|
|
1685
|
+
export {
|
|
1686
|
+
AI_CONFIG_TEMPLATE as A,
|
|
1687
|
+
BaseProvider as B,
|
|
1688
|
+
CID_AGENTS as C,
|
|
1689
|
+
DEFAULT_AGENT_ROLE as D,
|
|
1690
|
+
aiService as E,
|
|
1691
|
+
extractBaseUrl as F,
|
|
1692
|
+
sanitizeFunctionName as G,
|
|
1693
|
+
streamToText as H,
|
|
1694
|
+
EMPTY_USAGE as I,
|
|
1695
|
+
tokenUsageTracker as J,
|
|
1696
|
+
KEY_AI_CONFIG as K,
|
|
1697
|
+
MAX_RECENT_TOOL_CALLS as M,
|
|
1698
|
+
OllamaParser as O,
|
|
1699
|
+
ParallelWorkflowStrategy as P,
|
|
1700
|
+
ReviewWorkflowStrategy as R,
|
|
1701
|
+
SSEParser as S,
|
|
1702
|
+
TOPIC_AGENT_WORKFLOW_COMPLETE as T,
|
|
1703
|
+
WorkflowEngine as W,
|
|
1704
|
+
AgentRegistry as a,
|
|
1705
|
+
BaseSequentialWorkflow as b,
|
|
1706
|
+
CID_CHAT_PROVIDERS as c,
|
|
1707
|
+
CID_PROMPT_ENHANCERS as d,
|
|
1708
|
+
ConditionalWorkflowStrategy as e,
|
|
1709
|
+
MAX_TOOL_ITERATIONS as f,
|
|
1710
|
+
MessageProcessorService as g,
|
|
1711
|
+
OllamaProvider as h,
|
|
1712
|
+
OpenAIProvider as i,
|
|
1713
|
+
OrchestratedWorkflowStrategy as j,
|
|
1714
|
+
PipelineWorkflowStrategy as k,
|
|
1715
|
+
PromptBuilder as l,
|
|
1716
|
+
ProviderFactory as m,
|
|
1717
|
+
SequentialWorkflowStrategy as n,
|
|
1718
|
+
StreamParser as o,
|
|
1719
|
+
TOPIC_AGENT_WORKFLOW_ERROR as p,
|
|
1720
|
+
TOPIC_AGENT_WORKFLOW_STARTED as q,
|
|
1721
|
+
TOPIC_AICONFIG_CHANGED as r,
|
|
1722
|
+
TOPIC_AI_STREAM_CHUNK as s,
|
|
1723
|
+
TOPIC_AI_STREAM_COMPLETE as t,
|
|
1724
|
+
TOPIC_AI_STREAM_ERROR as u,
|
|
1725
|
+
TOPIC_AI_STREAM_STARTED as v,
|
|
1726
|
+
TokenEstimator as w,
|
|
1727
|
+
ToolCallAccumulator as x,
|
|
1728
|
+
ToolExecutor as y,
|
|
1729
|
+
ToolRegistry as z
|
|
1730
|
+
};
|
|
1731
|
+
//# sourceMappingURL=ai-service-CGdlV3FV.js.map
|