@kispace-io/extension-ai-system 0.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +20 -0
- package/src/agents/agent-registry.ts +65 -0
- package/src/agents/index.ts +4 -0
- package/src/agents/message-processor.ts +50 -0
- package/src/agents/prompt-builder.ts +167 -0
- package/src/ai-system-extension.ts +104 -0
- package/src/aisystem.json +154 -0
- package/src/chat-provider-contributions.ts +95 -0
- package/src/core/constants.ts +23 -0
- package/src/core/index.ts +6 -0
- package/src/core/interfaces.ts +137 -0
- package/src/core/types.ts +126 -0
- package/src/general-assistant-prompt.txt +14 -0
- package/src/i18n.json +11 -0
- package/src/index.ts +13 -0
- package/src/prompt-enhancer-contributions.ts +29 -0
- package/src/providers/index.ts +5 -0
- package/src/providers/ollama-provider.ts +13 -0
- package/src/providers/openai-provider.ts +12 -0
- package/src/providers/provider-factory.ts +36 -0
- package/src/providers/provider.ts +156 -0
- package/src/providers/streaming/ollama-parser.ts +114 -0
- package/src/providers/streaming/sse-parser.ts +152 -0
- package/src/providers/streaming/stream-parser.ts +16 -0
- package/src/register.ts +16 -0
- package/src/service/ai-service.ts +744 -0
- package/src/service/token-usage-tracker.ts +139 -0
- package/src/tools/index.ts +4 -0
- package/src/tools/tool-call-accumulator.ts +81 -0
- package/src/tools/tool-executor.ts +174 -0
- package/src/tools/tool-registry.ts +70 -0
- package/src/translation.ts +3 -0
- package/src/utils/token-estimator.ts +87 -0
- package/src/utils/tool-detector.ts +144 -0
- package/src/view/agent-group-manager.ts +146 -0
- package/src/view/components/ai-agent-response-card.ts +198 -0
- package/src/view/components/ai-agent-response-group.ts +220 -0
- package/src/view/components/ai-chat-input.ts +131 -0
- package/src/view/components/ai-chat-message.ts +615 -0
- package/src/view/components/ai-empty-state.ts +52 -0
- package/src/view/components/ai-loading-indicator.ts +91 -0
- package/src/view/components/index.ts +7 -0
- package/src/view/components/k-ai-config-editor.ts +828 -0
- package/src/view/index.ts +6 -0
- package/src/view/k-aiview.ts +901 -0
- package/src/view/k-token-usage.ts +220 -0
- package/src/view/provider-manager.ts +196 -0
- package/src/view/session-manager.ts +255 -0
- package/src/view/stream-manager.ts +123 -0
- package/src/workflows/conditional-workflow.ts +98 -0
- package/src/workflows/index.ts +6 -0
- package/src/workflows/parallel-workflow.ts +45 -0
- package/src/workflows/sequential-workflow.ts +95 -0
- package/src/workflows/workflow-engine.ts +63 -0
- package/src/workflows/workflow-strategy.ts +21 -0
- package/tsconfig.json +12 -0
|
@@ -0,0 +1,744 @@
|
|
|
1
|
+
import { DependencyContext, rootContext } from "@kispace-io/core";
|
|
2
|
+
import { publish, subscribe } from "@kispace-io/core";
|
|
3
|
+
import { appSettings, TOPIC_SETTINGS_CHANGED } from "@kispace-io/core";
|
|
4
|
+
import { contributionRegistry } from "@kispace-io/core";
|
|
5
|
+
import type { ExecutionContext } from "@kispace-io/core";
|
|
6
|
+
import { logger } from "@kispace-io/core";
|
|
7
|
+
|
|
8
|
+
import {
|
|
9
|
+
TOPIC_AI_STREAM_STARTED,
|
|
10
|
+
TOPIC_AI_STREAM_CHUNK,
|
|
11
|
+
TOPIC_AI_STREAM_COMPLETE,
|
|
12
|
+
TOPIC_AI_STREAM_ERROR,
|
|
13
|
+
TOPIC_AICONFIG_CHANGED,
|
|
14
|
+
KEY_AI_CONFIG,
|
|
15
|
+
AI_CONFIG_TEMPLATE,
|
|
16
|
+
MAX_TOOL_ITERATIONS,
|
|
17
|
+
CID_CHAT_PROVIDERS
|
|
18
|
+
} from "../core/constants";
|
|
19
|
+
|
|
20
|
+
import type {
|
|
21
|
+
ChatMessage,
|
|
22
|
+
ChatHistory,
|
|
23
|
+
ChatProvider,
|
|
24
|
+
AIConfig,
|
|
25
|
+
StreamChunk,
|
|
26
|
+
ToolResult,
|
|
27
|
+
AgentWorkflowOptions,
|
|
28
|
+
AgentWorkflowResult,
|
|
29
|
+
UserAttentionRequest,
|
|
30
|
+
UserAttentionHandler,
|
|
31
|
+
AIServiceOptions,
|
|
32
|
+
AIServiceResult
|
|
33
|
+
} from "../core";
|
|
34
|
+
|
|
35
|
+
import type {
|
|
36
|
+
AgentContribution,
|
|
37
|
+
IProvider,
|
|
38
|
+
ChatProviderContribution
|
|
39
|
+
} from "../core/interfaces";
|
|
40
|
+
|
|
41
|
+
import { ProviderFactory } from "../providers/provider-factory";
|
|
42
|
+
import { AgentRegistry } from "../agents/agent-registry";
|
|
43
|
+
import { PromptBuilder } from "../agents/prompt-builder";
|
|
44
|
+
import { MessageProcessorService } from "../agents/message-processor";
|
|
45
|
+
import { ToolExecutor } from "../tools/tool-executor";
|
|
46
|
+
import { WorkflowEngine } from "../workflows/workflow-engine";
|
|
47
|
+
import { ToolRegistry } from "../tools/tool-registry";
|
|
48
|
+
import { TokenEstimator } from "../utils/token-estimator";
|
|
49
|
+
import { tokenUsageTracker } from "./token-usage-tracker";
|
|
50
|
+
import type { TokenUsage } from "../core/types";
|
|
51
|
+
|
|
52
|
+
export class AIService {
|
|
53
|
+
private aiConfig?: AIConfig;
|
|
54
|
+
private configCheckPromise?: Promise<void>;
|
|
55
|
+
private providerFactory: ProviderFactory;
|
|
56
|
+
private agentRegistry: AgentRegistry;
|
|
57
|
+
private promptBuilder: PromptBuilder;
|
|
58
|
+
private messageProcessor: MessageProcessorService;
|
|
59
|
+
private toolExecutor: ToolExecutor;
|
|
60
|
+
private workflowEngine: WorkflowEngine;
|
|
61
|
+
private toolRegistry: ToolRegistry;
|
|
62
|
+
private activeRequests: Map<string, AbortController> = new Map();
|
|
63
|
+
|
|
64
|
+
constructor() {
|
|
65
|
+
this.providerFactory = new ProviderFactory();
|
|
66
|
+
this.agentRegistry = new AgentRegistry();
|
|
67
|
+
this.promptBuilder = new PromptBuilder();
|
|
68
|
+
this.messageProcessor = new MessageProcessorService();
|
|
69
|
+
this.toolExecutor = new ToolExecutor();
|
|
70
|
+
this.workflowEngine = new WorkflowEngine();
|
|
71
|
+
this.toolRegistry = new ToolRegistry();
|
|
72
|
+
|
|
73
|
+
subscribe(TOPIC_SETTINGS_CHANGED, () => {
|
|
74
|
+
this.aiConfig = undefined;
|
|
75
|
+
this.configCheckPromise = undefined;
|
|
76
|
+
this.checkAIConfig().then();
|
|
77
|
+
});
|
|
78
|
+
|
|
79
|
+
this.checkAIConfig().then();
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
getAgentContributions(): AgentContribution[] {
|
|
83
|
+
return this.agentRegistry.getAgentContributions();
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
async getProviders(): Promise<ChatProvider[]> {
|
|
87
|
+
await this.checkAIConfig();
|
|
88
|
+
return this.aiConfig?.providers || [];
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
async getDefaultProvider(): Promise<ChatProvider> {
|
|
92
|
+
await this.checkAIConfig();
|
|
93
|
+
const providers = await this.getProviders();
|
|
94
|
+
if (this.aiConfig?.defaultProvider) {
|
|
95
|
+
const config = providers.find(p => p.name === this.aiConfig?.defaultProvider);
|
|
96
|
+
if (config) {
|
|
97
|
+
return config;
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
return providers[0];
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
async setDefaultProvider(defaultProviderName: string): Promise<ChatProvider> {
|
|
104
|
+
await this.checkAIConfig();
|
|
105
|
+
if (this.aiConfig) {
|
|
106
|
+
this.aiConfig.defaultProvider = defaultProviderName;
|
|
107
|
+
await appSettings.set(KEY_AI_CONFIG, this.aiConfig);
|
|
108
|
+
}
|
|
109
|
+
return this.getDefaultProvider();
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
createMessage(prompt: string): ChatMessage {
|
|
113
|
+
return {
|
|
114
|
+
role: "user",
|
|
115
|
+
content: prompt
|
|
116
|
+
};
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
registerStreamingFetcher(provider: IProvider): void {
|
|
120
|
+
this.providerFactory.registerProvider(provider);
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
private getContributedProviders(): ChatProvider[] {
|
|
124
|
+
const contributions = contributionRegistry.getContributions(CID_CHAT_PROVIDERS) as ChatProviderContribution[];
|
|
125
|
+
return contributions.map(contrib => contrib.provider);
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
private mergeProviders(existing: ChatProvider[], contributed: ChatProvider[]): ChatProvider[] {
|
|
129
|
+
const existingNames = new Set(existing.map(p => p.name));
|
|
130
|
+
const missing = contributed.filter(provider => !existingNames.has(provider.name));
|
|
131
|
+
return missing.length > 0 ? [...existing, ...missing] : existing;
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
private async createInitialConfig(): Promise<AIConfig> {
|
|
135
|
+
const contributedProviders = this.getContributedProviders();
|
|
136
|
+
const initialConfig: AIConfig = {
|
|
137
|
+
...AI_CONFIG_TEMPLATE,
|
|
138
|
+
providers: contributedProviders
|
|
139
|
+
};
|
|
140
|
+
await appSettings.set(KEY_AI_CONFIG, initialConfig);
|
|
141
|
+
return await appSettings.get(KEY_AI_CONFIG);
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
private async updateConfigWithMissingProviders(config: AIConfig): Promise<AIConfig> {
|
|
145
|
+
const contributedProviders = this.getContributedProviders();
|
|
146
|
+
const mergedProviders = this.mergeProviders(config.providers, contributedProviders);
|
|
147
|
+
|
|
148
|
+
if (mergedProviders.length !== config.providers.length) {
|
|
149
|
+
const updatedConfig: AIConfig = {
|
|
150
|
+
...config,
|
|
151
|
+
providers: mergedProviders
|
|
152
|
+
};
|
|
153
|
+
await appSettings.set(KEY_AI_CONFIG, updatedConfig);
|
|
154
|
+
return updatedConfig;
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
return config;
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
private async checkAIConfig(): Promise<void> {
|
|
161
|
+
if (this.aiConfig) {
|
|
162
|
+
return;
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
if (this.configCheckPromise) {
|
|
166
|
+
return this.configCheckPromise;
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
this.configCheckPromise = this.performConfigCheck();
|
|
170
|
+
return this.configCheckPromise;
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
private async performConfigCheck(): Promise<void> {
|
|
174
|
+
try {
|
|
175
|
+
this.aiConfig = await appSettings.get(KEY_AI_CONFIG);
|
|
176
|
+
|
|
177
|
+
if (!this.aiConfig) {
|
|
178
|
+
this.aiConfig = await this.createInitialConfig();
|
|
179
|
+
} else {
|
|
180
|
+
this.aiConfig = await this.updateConfigWithMissingProviders(this.aiConfig);
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
publish(TOPIC_AICONFIG_CHANGED, this.aiConfig);
|
|
184
|
+
} finally {
|
|
185
|
+
this.configCheckPromise = undefined;
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
private createAgentContext(
|
|
190
|
+
sharedState: ExecutionContext,
|
|
191
|
+
callContext: DependencyContext,
|
|
192
|
+
additional: Partial<ExecutionContext> = {}
|
|
193
|
+
): ExecutionContext {
|
|
194
|
+
return {
|
|
195
|
+
...sharedState,
|
|
196
|
+
...callContext.getProxy(),
|
|
197
|
+
...additional
|
|
198
|
+
};
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
private createFinalMessage(contrib: AgentContribution, processedMessage: ChatMessage): ChatMessage {
|
|
202
|
+
return {
|
|
203
|
+
role: contrib.role,
|
|
204
|
+
content: processedMessage.content,
|
|
205
|
+
actions: processedMessage.actions,
|
|
206
|
+
requiresAttention: processedMessage.requiresAttention,
|
|
207
|
+
attentionRequests: processedMessage.attentionRequests,
|
|
208
|
+
canContinue: processedMessage.canContinue
|
|
209
|
+
};
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
private async handleUserAttention(
|
|
213
|
+
role: string,
|
|
214
|
+
message: ChatMessage,
|
|
215
|
+
options: AgentWorkflowOptions,
|
|
216
|
+
results: AgentWorkflowResult
|
|
217
|
+
): Promise<boolean> {
|
|
218
|
+
if (!message.requiresAttention || !options.userAttentionHandler) {
|
|
219
|
+
return true;
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
const attentionRequests: UserAttentionRequest[] = [];
|
|
223
|
+
|
|
224
|
+
if (message.attentionRequests) {
|
|
225
|
+
attentionRequests.push(...message.attentionRequests);
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
if (message.actions) {
|
|
229
|
+
for (const action of message.actions) {
|
|
230
|
+
if (action.requiresAttention && action.attentionRequest) {
|
|
231
|
+
attentionRequests.push(action.attentionRequest);
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
if (attentionRequests.length === 0) {
|
|
237
|
+
return true;
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
if (!results.pendingAttention) {
|
|
241
|
+
results.pendingAttention = new Map();
|
|
242
|
+
}
|
|
243
|
+
results.pendingAttention.set(role, attentionRequests);
|
|
244
|
+
|
|
245
|
+
if (options.onAttentionRequest) {
|
|
246
|
+
for (const request of attentionRequests) {
|
|
247
|
+
options.onAttentionRequest(role, request);
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
if (options.pauseOnAttention) {
|
|
252
|
+
results.paused = true;
|
|
253
|
+
results.continuationToken = `${role}-${Date.now()}-${Math.random()}`;
|
|
254
|
+
return false;
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
const handler = options.userAttentionHandler;
|
|
258
|
+
const agentContext = this.createAgentContext(
|
|
259
|
+
options.sharedState || {},
|
|
260
|
+
options.callContext,
|
|
261
|
+
{ message }
|
|
262
|
+
);
|
|
263
|
+
|
|
264
|
+
for (const request of attentionRequests) {
|
|
265
|
+
if (handler.onAttentionRequest) {
|
|
266
|
+
const result = await handler.onAttentionRequest(request, agentContext);
|
|
267
|
+
if (request.requiresAction && (result === false || result === null)) {
|
|
268
|
+
return false;
|
|
269
|
+
}
|
|
270
|
+
if (result && typeof result === 'string') {
|
|
271
|
+
agentContext[`attention_${request.type}_result`] = result;
|
|
272
|
+
}
|
|
273
|
+
} else {
|
|
274
|
+
switch (request.type) {
|
|
275
|
+
case 'confirmation':
|
|
276
|
+
if (handler.onConfirmation) {
|
|
277
|
+
const confirmed = await handler.onConfirmation(request.message, agentContext);
|
|
278
|
+
if (!confirmed && request.requiresAction) {
|
|
279
|
+
return false;
|
|
280
|
+
}
|
|
281
|
+
}
|
|
282
|
+
break;
|
|
283
|
+
case 'input':
|
|
284
|
+
if (handler.onInput) {
|
|
285
|
+
const input = await handler.onInput(request.message, undefined, agentContext);
|
|
286
|
+
if (!input && request.requiresAction) {
|
|
287
|
+
return false;
|
|
288
|
+
}
|
|
289
|
+
if (input) {
|
|
290
|
+
agentContext[`attention_input_result`] = input;
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
break;
|
|
294
|
+
}
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
return true;
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
async *streamCompletion(options: AIServiceOptions): AsyncGenerator<StreamChunk, AIServiceResult> {
|
|
302
|
+
const requestId = `${Date.now()}-${Math.random()}`;
|
|
303
|
+
const abortController = new AbortController();
|
|
304
|
+
this.activeRequests.set(requestId, abortController);
|
|
305
|
+
|
|
306
|
+
if (options.signal) {
|
|
307
|
+
options.signal.addEventListener('abort', () => {
|
|
308
|
+
abortController.abort();
|
|
309
|
+
});
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
const effectiveSignal = options.signal || abortController.signal;
|
|
313
|
+
|
|
314
|
+
try {
|
|
315
|
+
options.onStatus?.('starting');
|
|
316
|
+
publish(TOPIC_AI_STREAM_STARTED, { requestId, options });
|
|
317
|
+
|
|
318
|
+
const chatConfig = options.chatConfig || await this.getDefaultProvider();
|
|
319
|
+
const messages = this.sanitizeMessagesForAPI(options.chatContext.history);
|
|
320
|
+
|
|
321
|
+
const provider = this.providerFactory.getProvider(chatConfig);
|
|
322
|
+
|
|
323
|
+
const accumulator = this.toolExecutor.createToolCallAccumulator();
|
|
324
|
+
let accumulatedContent = '';
|
|
325
|
+
let accumulatedMessage: Partial<ChatMessage> = {
|
|
326
|
+
role: 'assistant',
|
|
327
|
+
content: ''
|
|
328
|
+
};
|
|
329
|
+
let tokenUsage: TokenUsage | undefined;
|
|
330
|
+
|
|
331
|
+
for await (const chunk of provider.stream({
|
|
332
|
+
model: chatConfig.model,
|
|
333
|
+
messages,
|
|
334
|
+
chatConfig,
|
|
335
|
+
tools: options.tools,
|
|
336
|
+
signal: effectiveSignal
|
|
337
|
+
})) {
|
|
338
|
+
if (chunk.type === 'error') {
|
|
339
|
+
options.onStatus?.('error');
|
|
340
|
+
publish(TOPIC_AI_STREAM_ERROR, { requestId, chunk });
|
|
341
|
+
yield chunk;
|
|
342
|
+
break;
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
if (chunk.type === 'token') {
|
|
346
|
+
accumulator.processChunk(chunk);
|
|
347
|
+
|
|
348
|
+
if (!chunk.toolCalls || chunk.toolCalls.length === 0) {
|
|
349
|
+
accumulatedContent += chunk.content;
|
|
350
|
+
accumulatedMessage.content = accumulatedContent;
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
if (chunk.message?.role) {
|
|
354
|
+
accumulatedMessage.role = chunk.message.role;
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
if (chunk.content) {
|
|
358
|
+
options.onToken?.(chunk.content);
|
|
359
|
+
}
|
|
360
|
+
options.onStatus?.('streaming');
|
|
361
|
+
options.onProgress?.({ received: accumulatedContent.length });
|
|
362
|
+
|
|
363
|
+
publish(TOPIC_AI_STREAM_CHUNK, { requestId, chunk });
|
|
364
|
+
yield chunk;
|
|
365
|
+
} else if (chunk.type === 'done') {
|
|
366
|
+
if (chunk.metadata?.usage) {
|
|
367
|
+
tokenUsage = chunk.metadata.usage as TokenUsage;
|
|
368
|
+
}
|
|
369
|
+
options.onStatus?.('complete');
|
|
370
|
+
publish(TOPIC_AI_STREAM_COMPLETE, { requestId });
|
|
371
|
+
yield chunk;
|
|
372
|
+
break;
|
|
373
|
+
} else {
|
|
374
|
+
yield chunk;
|
|
375
|
+
}
|
|
376
|
+
}
|
|
377
|
+
|
|
378
|
+
const finalToolCalls = accumulator.getFinalToolCalls();
|
|
379
|
+
const finalMessage: ChatMessage = {
|
|
380
|
+
role: accumulatedMessage.role || 'assistant',
|
|
381
|
+
content: accumulatedContent || '',
|
|
382
|
+
...(finalToolCalls.length > 0 && { toolCalls: finalToolCalls })
|
|
383
|
+
};
|
|
384
|
+
|
|
385
|
+
if (!tokenUsage) {
|
|
386
|
+
const promptTokens = TokenEstimator.estimatePromptTokens(messages, options.tools);
|
|
387
|
+
const completionTokens = TokenEstimator.estimateCompletionTokens(
|
|
388
|
+
accumulatedContent,
|
|
389
|
+
finalToolCalls
|
|
390
|
+
);
|
|
391
|
+
tokenUsage = {
|
|
392
|
+
promptTokens,
|
|
393
|
+
completionTokens,
|
|
394
|
+
totalTokens: promptTokens + completionTokens,
|
|
395
|
+
estimated: true
|
|
396
|
+
};
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
tokenUsageTracker.recordUsage(chatConfig.name, tokenUsage).catch(err => {
|
|
400
|
+
logger.error(`Failed to record token usage: ${err instanceof Error ? err.message : String(err)}`);
|
|
401
|
+
});
|
|
402
|
+
|
|
403
|
+
return {
|
|
404
|
+
message: finalMessage,
|
|
405
|
+
tokenUsage
|
|
406
|
+
};
|
|
407
|
+
} catch (error) {
|
|
408
|
+
if (error instanceof Error && error.name === 'AbortError') {
|
|
409
|
+
options.onStatus?.('error');
|
|
410
|
+
publish(TOPIC_AI_STREAM_ERROR, { requestId, error: 'Request cancelled' });
|
|
411
|
+
throw error;
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
options.onStatus?.('error');
|
|
415
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
416
|
+
publish(TOPIC_AI_STREAM_ERROR, { requestId, error: errorMessage });
|
|
417
|
+
|
|
418
|
+
yield {
|
|
419
|
+
type: 'error',
|
|
420
|
+
content: errorMessage,
|
|
421
|
+
metadata: { error }
|
|
422
|
+
};
|
|
423
|
+
|
|
424
|
+
throw error;
|
|
425
|
+
} finally {
|
|
426
|
+
this.activeRequests.delete(requestId);
|
|
427
|
+
}
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
async handleStreamingPromptDirect(
|
|
431
|
+
options: AIServiceOptions & { tools?: import("../core/types").ToolDefinition[] }
|
|
432
|
+
): Promise<ChatMessage> {
|
|
433
|
+
const stream = this.streamCompletion(options);
|
|
434
|
+
let lastValue: IteratorResult<StreamChunk, AIServiceResult>;
|
|
435
|
+
|
|
436
|
+
while (true) {
|
|
437
|
+
lastValue = await stream.next();
|
|
438
|
+
|
|
439
|
+
if (lastValue.done) {
|
|
440
|
+
return lastValue.value.message;
|
|
441
|
+
}
|
|
442
|
+
|
|
443
|
+
const chunk = lastValue.value;
|
|
444
|
+
if (chunk.type === 'error') {
|
|
445
|
+
throw new Error(chunk.content);
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
if (chunk.type === 'done') {
|
|
449
|
+
const final = await stream.next();
|
|
450
|
+
if (final.done && final.value) {
|
|
451
|
+
return final.value.message;
|
|
452
|
+
}
|
|
453
|
+
if (!final.done) {
|
|
454
|
+
continue;
|
|
455
|
+
}
|
|
456
|
+
throw new Error('Stream completed without return value');
|
|
457
|
+
}
|
|
458
|
+
}
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
private sanitizeMessageForAPI(message: ChatMessage | import("../core/types").ApiMessage): import("../core/types").ApiMessage {
|
|
462
|
+
const apiMessage: import("../core/types").ApiMessage = {
|
|
463
|
+
role: message.role,
|
|
464
|
+
content: message.content
|
|
465
|
+
};
|
|
466
|
+
|
|
467
|
+
if ('tool_call_id' in message && message.tool_call_id) {
|
|
468
|
+
apiMessage.tool_call_id = message.tool_call_id;
|
|
469
|
+
}
|
|
470
|
+
|
|
471
|
+
if ('tool_calls' in message && (message as any).tool_calls) {
|
|
472
|
+
apiMessage.tool_calls = (message as any).tool_calls;
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
return apiMessage;
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
private sanitizeMessagesForAPI(messages: (ChatMessage | import("../core/types").ApiMessage)[]): import("../core/types").ApiMessage[] {
|
|
479
|
+
return messages.map(msg => this.sanitizeMessageForAPI(msg));
|
|
480
|
+
}
|
|
481
|
+
|
|
482
|
+
async handleStreamingPrompt(options: AIServiceOptions): Promise<ChatMessage | ChatMessage[]> {
|
|
483
|
+
const callContext = options.callContext || rootContext.createChild({});
|
|
484
|
+
const agentContext = this.createAgentContext(
|
|
485
|
+
{},
|
|
486
|
+
callContext,
|
|
487
|
+
{ userPrompt: options.chatContext.history[options.chatContext.history.length - 1]?.content || "" }
|
|
488
|
+
);
|
|
489
|
+
|
|
490
|
+
const matchingAgents = this.agentRegistry.getMatchingAgents(agentContext);
|
|
491
|
+
const roles = matchingAgents.length > 0
|
|
492
|
+
? matchingAgents.map(a => a.role)
|
|
493
|
+
: ['assistant'];
|
|
494
|
+
|
|
495
|
+
const workflowResult = await this.executeAgentWorkflow({
|
|
496
|
+
chatContext: options.chatContext,
|
|
497
|
+
chatConfig: options.chatConfig,
|
|
498
|
+
callContext: callContext,
|
|
499
|
+
execution: 'parallel',
|
|
500
|
+
stream: options.stream,
|
|
501
|
+
signal: options.signal,
|
|
502
|
+
onToken: (role: string, token: string) => {
|
|
503
|
+
options.onToken?.(token);
|
|
504
|
+
},
|
|
505
|
+
onStatus: (role: string, status: 'starting' | 'streaming' | 'complete' | 'error') => {
|
|
506
|
+
options.onStatus?.(status);
|
|
507
|
+
},
|
|
508
|
+
roles
|
|
509
|
+
});
|
|
510
|
+
|
|
511
|
+
const agentMessages = Array.from(workflowResult.messages.values());
|
|
512
|
+
return agentMessages.length === 1 ? agentMessages[0] : agentMessages;
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
async continueWorkflow(
|
|
516
|
+
continuationToken: string,
|
|
517
|
+
userResponses: Map<string, any>,
|
|
518
|
+
previousOptions: AgentWorkflowOptions
|
|
519
|
+
): Promise<AgentWorkflowResult> {
|
|
520
|
+
const results: AgentWorkflowResult = {
|
|
521
|
+
messages: new Map(),
|
|
522
|
+
sharedState: { ...previousOptions.sharedState, ...Object.fromEntries(userResponses) },
|
|
523
|
+
errors: new Map()
|
|
524
|
+
};
|
|
525
|
+
|
|
526
|
+
const options: AgentWorkflowOptions = {
|
|
527
|
+
...previousOptions,
|
|
528
|
+
sharedState: results.sharedState,
|
|
529
|
+
pauseOnAttention: false
|
|
530
|
+
};
|
|
531
|
+
|
|
532
|
+
return this.executeAgentWorkflow(options);
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
cancelRequest(requestId: string): boolean {
|
|
536
|
+
const controller = this.activeRequests.get(requestId);
|
|
537
|
+
if (controller) {
|
|
538
|
+
controller.abort();
|
|
539
|
+
this.activeRequests.delete(requestId);
|
|
540
|
+
return true;
|
|
541
|
+
}
|
|
542
|
+
return false;
|
|
543
|
+
}
|
|
544
|
+
|
|
545
|
+
async executeAgentWorkflow(options: AgentWorkflowOptions): Promise<AgentWorkflowResult> {
|
|
546
|
+
const agentContext = this.createAgentContext(
|
|
547
|
+
options.sharedState || {},
|
|
548
|
+
options.callContext
|
|
549
|
+
);
|
|
550
|
+
|
|
551
|
+
const matchingAgents = this.agentRegistry.getMatchingAgents(agentContext, options.roles);
|
|
552
|
+
|
|
553
|
+
return this.workflowEngine.execute(
|
|
554
|
+
matchingAgents,
|
|
555
|
+
options,
|
|
556
|
+
async (contrib, messages, sharedState, chatConfig, workflowOptions, results) => {
|
|
557
|
+
return this.executeAgent(
|
|
558
|
+
contrib,
|
|
559
|
+
messages,
|
|
560
|
+
sharedState,
|
|
561
|
+
chatConfig,
|
|
562
|
+
workflowOptions,
|
|
563
|
+
results
|
|
564
|
+
);
|
|
565
|
+
}
|
|
566
|
+
);
|
|
567
|
+
}
|
|
568
|
+
|
|
569
|
+
private async executeAgent(
|
|
570
|
+
contrib: AgentContribution,
|
|
571
|
+
messages: ChatMessage[],
|
|
572
|
+
sharedState: ExecutionContext,
|
|
573
|
+
chatConfig: ChatProvider,
|
|
574
|
+
options: AgentWorkflowOptions,
|
|
575
|
+
results: AgentWorkflowResult
|
|
576
|
+
): Promise<ChatMessage | null> {
|
|
577
|
+
options.onAgentStart?.(contrib.role);
|
|
578
|
+
|
|
579
|
+
const agentContext = this.createAgentContext(sharedState, options.callContext, {
|
|
580
|
+
userPrompt: messages[messages.length - 1]?.content || ""
|
|
581
|
+
});
|
|
582
|
+
|
|
583
|
+
const { messages: preparedMessages, tools } = await this.promptBuilder.build(
|
|
584
|
+
contrib,
|
|
585
|
+
messages,
|
|
586
|
+
agentContext,
|
|
587
|
+
contrib.hooks
|
|
588
|
+
);
|
|
589
|
+
|
|
590
|
+
const chatMessages: ChatMessage[] = preparedMessages.map(msg => {
|
|
591
|
+
const chatMsg: ChatMessage = { role: msg.role, content: msg.content };
|
|
592
|
+
if (msg.tool_call_id) {
|
|
593
|
+
(chatMsg as any).tool_call_id = msg.tool_call_id;
|
|
594
|
+
}
|
|
595
|
+
if (msg.tool_calls) {
|
|
596
|
+
(chatMsg as any).tool_calls = msg.tool_calls;
|
|
597
|
+
}
|
|
598
|
+
return chatMsg;
|
|
599
|
+
});
|
|
600
|
+
|
|
601
|
+
let rawMessage = await this.handleStreamingPromptDirect({
|
|
602
|
+
chatContext: { history: chatMessages },
|
|
603
|
+
chatConfig,
|
|
604
|
+
callContext: options.callContext,
|
|
605
|
+
stream: options.stream ?? true,
|
|
606
|
+
signal: options.signal,
|
|
607
|
+
tools
|
|
608
|
+
});
|
|
609
|
+
|
|
610
|
+
let toolCallIteration = 0;
|
|
611
|
+
const conversationHistory: import("../core/types").ApiMessage[] = [...preparedMessages];
|
|
612
|
+
|
|
613
|
+
while (rawMessage.toolCalls && rawMessage.toolCalls.length > 0) {
|
|
614
|
+
toolCallIteration++;
|
|
615
|
+
|
|
616
|
+
if (toolCallIteration > MAX_TOOL_ITERATIONS) {
|
|
617
|
+
console.warn(`[AIService] Maximum tool call iterations (${MAX_TOOL_ITERATIONS}) reached`);
|
|
618
|
+
break;
|
|
619
|
+
}
|
|
620
|
+
|
|
621
|
+
let toolResults: ToolResult[] = [];
|
|
622
|
+
|
|
623
|
+
if (options.requireToolApproval && options.onToolApprovalRequest) {
|
|
624
|
+
const toolCallDescriptions = rawMessage.toolCalls.map(tc => {
|
|
625
|
+
const args = tc.function.arguments || "{}";
|
|
626
|
+
let parsedArgs: any = {};
|
|
627
|
+
try {
|
|
628
|
+
parsedArgs = JSON.parse(args);
|
|
629
|
+
} catch (e) {
|
|
630
|
+
parsedArgs = {};
|
|
631
|
+
}
|
|
632
|
+
return `${tc.function.name}(${Object.entries(parsedArgs).map(([k, v]) => `${k}=${v}`).join(", ")})`;
|
|
633
|
+
}).join(", ");
|
|
634
|
+
|
|
635
|
+
const approvalRequest: import("../core/interfaces").ToolApprovalRequest = {
|
|
636
|
+
toolCalls: rawMessage.toolCalls,
|
|
637
|
+
message: `The AI wants to execute: ${toolCallDescriptions}`
|
|
638
|
+
};
|
|
639
|
+
|
|
640
|
+
const approved = await options.onToolApprovalRequest(contrib.role, approvalRequest);
|
|
641
|
+
if (!approved) {
|
|
642
|
+
toolResults = rawMessage.toolCalls.map(tc => ({
|
|
643
|
+
id: tc.id,
|
|
644
|
+
result: {
|
|
645
|
+
success: false,
|
|
646
|
+
message: "Tool execution cancelled by user",
|
|
647
|
+
cancelled: true
|
|
648
|
+
}
|
|
649
|
+
}));
|
|
650
|
+
} else {
|
|
651
|
+
toolResults = await this.toolExecutor.executeToolCalls(rawMessage.toolCalls, agentContext);
|
|
652
|
+
}
|
|
653
|
+
} else {
|
|
654
|
+
toolResults = await this.toolExecutor.executeToolCalls(rawMessage.toolCalls, agentContext);
|
|
655
|
+
}
|
|
656
|
+
|
|
657
|
+
const toolMessages: import("../core/types").ApiMessage[] = toolResults.map((tr) => ({
|
|
658
|
+
role: "tool",
|
|
659
|
+
content: tr.error ? JSON.stringify({ error: tr.error }) : JSON.stringify(tr.result),
|
|
660
|
+
tool_call_id: tr.id
|
|
661
|
+
}));
|
|
662
|
+
|
|
663
|
+
const assistantMessage: any = {
|
|
664
|
+
role: "assistant",
|
|
665
|
+
content: rawMessage.content || ""
|
|
666
|
+
};
|
|
667
|
+
|
|
668
|
+
if (rawMessage.toolCalls && rawMessage.toolCalls.length > 0) {
|
|
669
|
+
assistantMessage.tool_calls = rawMessage.toolCalls
|
|
670
|
+
.filter(tc => tc.function.name && tc.function.name.trim().length > 0)
|
|
671
|
+
.map(tc => ({
|
|
672
|
+
id: tc.id,
|
|
673
|
+
type: tc.type,
|
|
674
|
+
function: {
|
|
675
|
+
name: tc.function.name,
|
|
676
|
+
arguments: tc.function.arguments || "{}"
|
|
677
|
+
}
|
|
678
|
+
}));
|
|
679
|
+
}
|
|
680
|
+
|
|
681
|
+
conversationHistory.push(assistantMessage, ...toolMessages);
|
|
682
|
+
|
|
683
|
+
const updatedMessages = conversationHistory;
|
|
684
|
+
|
|
685
|
+
rawMessage = await this.handleStreamingPromptDirect({
|
|
686
|
+
chatContext: {
|
|
687
|
+
history: updatedMessages.map(m => ({
|
|
688
|
+
role: m.role,
|
|
689
|
+
content: m.content,
|
|
690
|
+
...(m.tool_call_id && { tool_call_id: m.tool_call_id }),
|
|
691
|
+
...(m.tool_calls && { tool_calls: m.tool_calls })
|
|
692
|
+
} as ChatMessage))
|
|
693
|
+
},
|
|
694
|
+
chatConfig,
|
|
695
|
+
callContext: options.callContext,
|
|
696
|
+
stream: options.stream ?? true,
|
|
697
|
+
signal: options.signal,
|
|
698
|
+
tools
|
|
699
|
+
});
|
|
700
|
+
|
|
701
|
+
const hasContent = rawMessage.content && rawMessage.content.trim().length > 0;
|
|
702
|
+
const hasToolCalls = rawMessage.toolCalls && rawMessage.toolCalls.length > 0;
|
|
703
|
+
|
|
704
|
+
if (hasContent && !hasToolCalls) {
|
|
705
|
+
break;
|
|
706
|
+
}
|
|
707
|
+
|
|
708
|
+
if (hasContent && hasToolCalls) {
|
|
709
|
+
console.warn(`[AIService] Model provided content but also called tools - treating as completion`);
|
|
710
|
+
break;
|
|
711
|
+
}
|
|
712
|
+
}
|
|
713
|
+
|
|
714
|
+
const processedMessage = await this.messageProcessor.process(
|
|
715
|
+
rawMessage,
|
|
716
|
+
contrib,
|
|
717
|
+
this.createAgentContext(sharedState, options.callContext, { message: rawMessage })
|
|
718
|
+
);
|
|
719
|
+
|
|
720
|
+
if (contrib.hooks?.afterReceive) {
|
|
721
|
+
await contrib.hooks.afterReceive(processedMessage, this.createAgentContext(sharedState, options.callContext));
|
|
722
|
+
}
|
|
723
|
+
|
|
724
|
+
const finalMessage = this.createFinalMessage(contrib, processedMessage);
|
|
725
|
+
|
|
726
|
+
const canContinue = await this.handleUserAttention(
|
|
727
|
+
contrib.role,
|
|
728
|
+
finalMessage,
|
|
729
|
+
options,
|
|
730
|
+
results
|
|
731
|
+
);
|
|
732
|
+
|
|
733
|
+
if (!canContinue && options.pauseOnAttention) {
|
|
734
|
+
return null;
|
|
735
|
+
}
|
|
736
|
+
|
|
737
|
+
results.messages.set(contrib.role, finalMessage);
|
|
738
|
+
options.onAgentComplete?.(contrib.role, finalMessage);
|
|
739
|
+
return finalMessage;
|
|
740
|
+
}
|
|
741
|
+
}
|
|
742
|
+
|
|
743
|
+
export const aiService = new AIService();
|
|
744
|
+
|