@liquidmetal-ai/precip 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.prettierrc +9 -0
- package/CHANGELOG.md +8 -0
- package/eslint.config.mjs +28 -0
- package/package.json +53 -0
- package/src/engine/agent.ts +478 -0
- package/src/engine/llm-provider.test.ts +275 -0
- package/src/engine/llm-provider.ts +330 -0
- package/src/engine/stream-parser.ts +170 -0
- package/src/index.ts +142 -0
- package/src/mounts/mount-manager.test.ts +516 -0
- package/src/mounts/mount-manager.ts +327 -0
- package/src/mounts/mount-registry.ts +196 -0
- package/src/mounts/zod-to-string.test.ts +154 -0
- package/src/mounts/zod-to-string.ts +213 -0
- package/src/presets/agent-tools.ts +57 -0
- package/src/presets/index.ts +5 -0
- package/src/sandbox/README.md +1321 -0
- package/src/sandbox/bridges/README.md +571 -0
- package/src/sandbox/bridges/actor.test.ts +229 -0
- package/src/sandbox/bridges/actor.ts +195 -0
- package/src/sandbox/bridges/bridge-fixes.test.ts +614 -0
- package/src/sandbox/bridges/bucket.test.ts +300 -0
- package/src/sandbox/bridges/cleanup-reproduction.test.ts +225 -0
- package/src/sandbox/bridges/console-multiple.test.ts +187 -0
- package/src/sandbox/bridges/console.test.ts +157 -0
- package/src/sandbox/bridges/console.ts +122 -0
- package/src/sandbox/bridges/fetch.ts +93 -0
- package/src/sandbox/bridges/index.ts +78 -0
- package/src/sandbox/bridges/readable-stream.ts +323 -0
- package/src/sandbox/bridges/response.test.ts +154 -0
- package/src/sandbox/bridges/response.ts +123 -0
- package/src/sandbox/bridges/review-fixes.test.ts +331 -0
- package/src/sandbox/bridges/search.test.ts +475 -0
- package/src/sandbox/bridges/search.ts +264 -0
- package/src/sandbox/bridges/shared/body-methods.ts +93 -0
- package/src/sandbox/bridges/shared/cleanup.ts +112 -0
- package/src/sandbox/bridges/shared/convert.ts +76 -0
- package/src/sandbox/bridges/shared/headers.ts +181 -0
- package/src/sandbox/bridges/shared/index.ts +36 -0
- package/src/sandbox/bridges/shared/json-helpers.ts +77 -0
- package/src/sandbox/bridges/shared/path-parser.ts +109 -0
- package/src/sandbox/bridges/shared/promise-helper.ts +108 -0
- package/src/sandbox/bridges/shared/registry-setup.ts +84 -0
- package/src/sandbox/bridges/shared/response-object.ts +280 -0
- package/src/sandbox/bridges/shared/result-builder.ts +130 -0
- package/src/sandbox/bridges/shared/scope-helpers.ts +44 -0
- package/src/sandbox/bridges/shared/stream-reader.ts +90 -0
- package/src/sandbox/bridges/storage-bridge.test.ts +893 -0
- package/src/sandbox/bridges/storage.ts +421 -0
- package/src/sandbox/bridges/text-decoder.ts +190 -0
- package/src/sandbox/bridges/text-encoder.ts +102 -0
- package/src/sandbox/bridges/types.ts +39 -0
- package/src/sandbox/bridges/utils.ts +123 -0
- package/src/sandbox/index.ts +6 -0
- package/src/sandbox/quickjs-wasm.d.ts +9 -0
- package/src/sandbox/sandbox.test.ts +191 -0
- package/src/sandbox/sandbox.ts +831 -0
- package/src/sandbox/test-helper.ts +43 -0
- package/src/sandbox/test-mocks.ts +154 -0
- package/src/sandbox/user-stream.test.ts +77 -0
- package/src/skills/frontmatter.test.ts +305 -0
- package/src/skills/frontmatter.ts +200 -0
- package/src/skills/index.ts +9 -0
- package/src/skills/skills-loader.test.ts +237 -0
- package/src/skills/skills-loader.ts +200 -0
- package/src/tools/actor-storage-tools.ts +250 -0
- package/src/tools/code-tools.test.ts +199 -0
- package/src/tools/code-tools.ts +444 -0
- package/src/tools/file-tools.ts +206 -0
- package/src/tools/registry.ts +125 -0
- package/src/tools/script-tools.ts +145 -0
- package/src/tools/smartbucket-tools.ts +203 -0
- package/src/tools/sql-tools.ts +213 -0
- package/src/tools/tool-factory.ts +119 -0
- package/src/types.ts +512 -0
- package/tsconfig.eslint.json +5 -0
- package/tsconfig.json +15 -0
- package/vitest.config.ts +33 -0
package/.prettierrc
ADDED
package/CHANGELOG.md
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import repo_config from '@liquidmetal-ai/eslint-config/eslint.config.mjs';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* @type {import('eslint').Linter.Config[]}
|
|
5
|
+
*/
|
|
6
|
+
const config = [
|
|
7
|
+
...repo_config,
|
|
8
|
+
{
|
|
9
|
+
files: ['**/*.ts'],
|
|
10
|
+
languageOptions: {
|
|
11
|
+
parserOptions: {
|
|
12
|
+
project: './tsconfig.eslint.json'
|
|
13
|
+
}
|
|
14
|
+
}
|
|
15
|
+
},
|
|
16
|
+
{
|
|
17
|
+
rules: {
|
|
18
|
+
'@typescript-eslint/no-explicit-any': 'off',
|
|
19
|
+
'@typescript-eslint/no-floating-promises': 'off'
|
|
20
|
+
}
|
|
21
|
+
},
|
|
22
|
+
{
|
|
23
|
+
// Vitest config is a config file, not part of the TypeScript project
|
|
24
|
+
ignores: ['vitest.config.ts']
|
|
25
|
+
}
|
|
26
|
+
];
|
|
27
|
+
|
|
28
|
+
export default config;
|
package/package.json
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@liquidmetal-ai/precip",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "Agent SDK for building sophisticated LLM-powered agents on Raindrop",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "./dist/index.js",
|
|
7
|
+
"types": "./dist/index.d.ts",
|
|
8
|
+
"exports": {
|
|
9
|
+
".": {
|
|
10
|
+
"types": "./dist/index.d.ts",
|
|
11
|
+
"default": "./dist/index.js"
|
|
12
|
+
},
|
|
13
|
+
"./skills/frontmatter": {
|
|
14
|
+
"types": "./dist/skills/frontmatter.d.ts",
|
|
15
|
+
"default": "./dist/skills/frontmatter.js"
|
|
16
|
+
}
|
|
17
|
+
},
|
|
18
|
+
"keywords": [
|
|
19
|
+
"agent",
|
|
20
|
+
"llm",
|
|
21
|
+
"ai",
|
|
22
|
+
"raindrop"
|
|
23
|
+
],
|
|
24
|
+
"author": "LiquidMetal AI",
|
|
25
|
+
"license": "MIT",
|
|
26
|
+
"dependencies": {
|
|
27
|
+
"@jitl/quickjs-ng-wasmfile-release-sync": "^0.31.0",
|
|
28
|
+
"quickjs-emscripten-core": "^0.31.0",
|
|
29
|
+
"zod": "^3"
|
|
30
|
+
},
|
|
31
|
+
"devDependencies": {
|
|
32
|
+
"@typescript-eslint/eslint-plugin": "^8.7.0",
|
|
33
|
+
"@typescript-eslint/parser": "^8.7.0",
|
|
34
|
+
"eslint": "^9.15.0",
|
|
35
|
+
"globals": "^17.3.0",
|
|
36
|
+
"prettier": "^3.0.0",
|
|
37
|
+
"shx": "^0.4.0",
|
|
38
|
+
"typescript": "^5",
|
|
39
|
+
"vitest": "^3.1.3",
|
|
40
|
+
"@liquidmetal-ai/typescript-config": "0.0.0",
|
|
41
|
+
"@liquidmetal-ai/eslint-config": "0.0.0"
|
|
42
|
+
},
|
|
43
|
+
"peerDependencies": {
|
|
44
|
+
"@liquidmetal-ai/raindrop-framework": "0.18.0"
|
|
45
|
+
},
|
|
46
|
+
"scripts": {
|
|
47
|
+
"build": "shx rm -rf dist && tsc -b",
|
|
48
|
+
"test": "vitest run",
|
|
49
|
+
"test:watch": "vitest",
|
|
50
|
+
"lint": "eslint . --max-warnings=0",
|
|
51
|
+
"format": "prettier --write \"**/*.{ts,tsx,md}\""
|
|
52
|
+
}
|
|
53
|
+
}
|
|
@@ -0,0 +1,478 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Precip Agent - Multi-turn conversation agent with tool calling
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import type {
|
|
6
|
+
AgentConfig,
|
|
7
|
+
AgentResponse,
|
|
8
|
+
AgentEvent,
|
|
9
|
+
LLMMessage,
|
|
10
|
+
ToolContext,
|
|
11
|
+
ToolCallResult,
|
|
12
|
+
ConversationState,
|
|
13
|
+
Logger
|
|
14
|
+
} from '../types.js';
|
|
15
|
+
import { createLLMProvider } from './llm-provider.js';
|
|
16
|
+
import { MountManager } from '../mounts/mount-manager.js';
|
|
17
|
+
import { ToolRegistry } from '../tools/registry.js';
|
|
18
|
+
import { discoverSkills, generateSkillsPrompt } from '../skills/index.js';
|
|
19
|
+
|
|
20
|
+
export class PrecipAgent {
|
|
21
|
+
private llmProvider: ReturnType<typeof createLLMProvider>;
|
|
22
|
+
private mountManager: MountManager;
|
|
23
|
+
private toolRegistry: ToolRegistry;
|
|
24
|
+
private systemPrompt: string;
|
|
25
|
+
private maxTurns: number;
|
|
26
|
+
private logger?: Logger;
|
|
27
|
+
private conversationState: ConversationState;
|
|
28
|
+
private config: AgentConfig;
|
|
29
|
+
private initialized: boolean = false;
|
|
30
|
+
|
|
31
|
+
constructor(config: AgentConfig) {
|
|
32
|
+
this.config = config;
|
|
33
|
+
this.logger = config.logger;
|
|
34
|
+
this.maxTurns = config.maxTurns || 10;
|
|
35
|
+
|
|
36
|
+
// Initialize LLM provider
|
|
37
|
+
this.llmProvider = createLLMProvider(config.llm, this.logger, config.retry);
|
|
38
|
+
|
|
39
|
+
// Initialize mount manager
|
|
40
|
+
this.mountManager = new MountManager(config.mounts || {}, this.logger);
|
|
41
|
+
|
|
42
|
+
// Build system prompt with mounts description (skills added async via init())
|
|
43
|
+
this.systemPrompt = this.buildSystemPrompt(
|
|
44
|
+
config.system || 'You are a helpful assistant.',
|
|
45
|
+
''
|
|
46
|
+
);
|
|
47
|
+
|
|
48
|
+
// Initialize tool registry
|
|
49
|
+
this.toolRegistry = new ToolRegistry(this.logger);
|
|
50
|
+
|
|
51
|
+
// Register tools
|
|
52
|
+
if (config.tools) {
|
|
53
|
+
for (const tool of config.tools) {
|
|
54
|
+
if (typeof tool === 'function') {
|
|
55
|
+
// Tool class constructor
|
|
56
|
+
const toolContext: ToolContext = {
|
|
57
|
+
mounts: this.mountManager,
|
|
58
|
+
logger: this.logger
|
|
59
|
+
};
|
|
60
|
+
const toolInstance = new (tool as any)(toolContext);
|
|
61
|
+
this.toolRegistry.register(toolInstance);
|
|
62
|
+
} else {
|
|
63
|
+
// Tool instance
|
|
64
|
+
this.toolRegistry.register(tool);
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// Initialize conversation state
|
|
70
|
+
this.conversationState = {
|
|
71
|
+
messages: [],
|
|
72
|
+
turnCount: 0,
|
|
73
|
+
totalTokens: 0
|
|
74
|
+
};
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Initialize async components (skills discovery).
|
|
79
|
+
* Called automatically on first run() if not called explicitly.
|
|
80
|
+
*
|
|
81
|
+
* Call this explicitly if you want to control when skill discovery happens:
|
|
82
|
+
* ```typescript
|
|
83
|
+
* const agent = new PrecipAgent(config);
|
|
84
|
+
* await agent.init(); // discover skills now
|
|
85
|
+
* const response = await agent.run("...");
|
|
86
|
+
* ```
|
|
87
|
+
*/
|
|
88
|
+
async init(): Promise<void> {
|
|
89
|
+
if (this.initialized) return;
|
|
90
|
+
|
|
91
|
+
if (this.config.skills) {
|
|
92
|
+
const skills = await discoverSkills(
|
|
93
|
+
this.mountManager,
|
|
94
|
+
this.config.skills,
|
|
95
|
+
this.logger
|
|
96
|
+
);
|
|
97
|
+
|
|
98
|
+
const skillsPrompt = generateSkillsPrompt(skills);
|
|
99
|
+
this.systemPrompt = this.buildSystemPrompt(
|
|
100
|
+
this.config.system || 'You are a helpful assistant.',
|
|
101
|
+
skillsPrompt
|
|
102
|
+
);
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
this.initialized = true;
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
/**
|
|
109
|
+
* Build system prompt with mounts description and skills appended
|
|
110
|
+
*/
|
|
111
|
+
private buildSystemPrompt(baseSystemPrompt: string, skillsPrompt: string): string {
|
|
112
|
+
const mountsDescription = this.mountManager.getMountsDescription();
|
|
113
|
+
|
|
114
|
+
let prompt = baseSystemPrompt;
|
|
115
|
+
|
|
116
|
+
if (mountsDescription) {
|
|
117
|
+
prompt += mountsDescription;
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
if (skillsPrompt) {
|
|
121
|
+
prompt += skillsPrompt;
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
return prompt;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/**
|
|
128
|
+
* Run the agent with a user message (non-streaming).
|
|
129
|
+
* Internally consumes runStream() and returns the final response.
|
|
130
|
+
*/
|
|
131
|
+
async run(userMessage: string): Promise<AgentResponse> {
|
|
132
|
+
const stream = this.runStream(userMessage);
|
|
133
|
+
let result: IteratorResult<AgentEvent, AgentResponse>;
|
|
134
|
+
do {
|
|
135
|
+
result = await stream.next();
|
|
136
|
+
} while (!result.done);
|
|
137
|
+
return result.value;
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
/**
|
|
141
|
+
* Run the agent with streaming events.
|
|
142
|
+
* Yields AgentEvent at each step; returns AgentResponse when done.
|
|
143
|
+
*/
|
|
144
|
+
async *runStream(userMessage: string): AsyncGenerator<AgentEvent, AgentResponse, undefined> {
|
|
145
|
+
// Auto-initialize if not done yet (discovers skills)
|
|
146
|
+
await this.init();
|
|
147
|
+
|
|
148
|
+
// Reset conversation state for new run
|
|
149
|
+
this.conversationState = {
|
|
150
|
+
messages: [
|
|
151
|
+
{ role: 'system', content: this.systemPrompt },
|
|
152
|
+
{ role: 'user', content: userMessage }
|
|
153
|
+
],
|
|
154
|
+
turnCount: 0,
|
|
155
|
+
totalTokens: 0
|
|
156
|
+
};
|
|
157
|
+
|
|
158
|
+
this.logger?.info?.('Starting agent run', { message: userMessage });
|
|
159
|
+
|
|
160
|
+
return yield* this.executeMultiTurnStream();
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
/**
|
|
164
|
+
* Continue an existing conversation (non-streaming).
|
|
165
|
+
*/
|
|
166
|
+
async continue(userMessage: string): Promise<AgentResponse> {
|
|
167
|
+
const stream = this.continueStream(userMessage);
|
|
168
|
+
let result: IteratorResult<AgentEvent, AgentResponse>;
|
|
169
|
+
do {
|
|
170
|
+
result = await stream.next();
|
|
171
|
+
} while (!result.done);
|
|
172
|
+
return result.value;
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
/**
|
|
176
|
+
* Continue an existing conversation with streaming events.
|
|
177
|
+
*/
|
|
178
|
+
async *continueStream(userMessage: string): AsyncGenerator<AgentEvent, AgentResponse, undefined> {
|
|
179
|
+
// Add user message to existing conversation
|
|
180
|
+
this.conversationState.messages.push({
|
|
181
|
+
role: 'user',
|
|
182
|
+
content: userMessage
|
|
183
|
+
});
|
|
184
|
+
|
|
185
|
+
this.logger?.info?.('Continuing conversation', { message: userMessage });
|
|
186
|
+
|
|
187
|
+
// Continue the multi-turn loop WITHOUT resetting state
|
|
188
|
+
return yield* this.executeMultiTurnStream();
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
/**
|
|
192
|
+
* Core multi-turn loop as an async generator.
|
|
193
|
+
* Yields AgentEvent at each step; returns the final AgentResponse.
|
|
194
|
+
*/
|
|
195
|
+
private async *executeMultiTurnStream(): AsyncGenerator<AgentEvent, AgentResponse, undefined> {
|
|
196
|
+
const allToolResults: ToolCallResult[] = [];
|
|
197
|
+
let finalContent = '';
|
|
198
|
+
let finalFinishReason: string | null = null;
|
|
199
|
+
|
|
200
|
+
// Multi-turn conversation loop
|
|
201
|
+
while (this.conversationState.turnCount < this.maxTurns) {
|
|
202
|
+
this.conversationState.turnCount++;
|
|
203
|
+
const currentTurn = this.conversationState.turnCount;
|
|
204
|
+
|
|
205
|
+
// Get tool definitions
|
|
206
|
+
const toolDefinitions = this.toolRegistry.getToolDefinitions();
|
|
207
|
+
|
|
208
|
+
// Call LLM
|
|
209
|
+
const llmResponse = await this.llmProvider.chat(
|
|
210
|
+
this.conversationState.messages,
|
|
211
|
+
toolDefinitions.length > 0 ? toolDefinitions : undefined
|
|
212
|
+
);
|
|
213
|
+
|
|
214
|
+
// Update token usage
|
|
215
|
+
if (llmResponse.usage) {
|
|
216
|
+
this.conversationState.totalTokens += llmResponse.usage.totalTokens;
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
finalContent = llmResponse.content || '';
|
|
220
|
+
finalFinishReason = llmResponse.finishReason;
|
|
221
|
+
|
|
222
|
+
// Add assistant message to history
|
|
223
|
+
const assistantMessage: LLMMessage = {
|
|
224
|
+
role: 'assistant',
|
|
225
|
+
content: llmResponse.content
|
|
226
|
+
};
|
|
227
|
+
|
|
228
|
+
if (llmResponse.toolCalls.length > 0) {
|
|
229
|
+
assistantMessage.tool_calls = llmResponse.toolCalls;
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
this.conversationState.messages.push(assistantMessage);
|
|
233
|
+
|
|
234
|
+
// Yield LLM response event
|
|
235
|
+
yield {
|
|
236
|
+
type: 'llm_response',
|
|
237
|
+
turn: currentTurn,
|
|
238
|
+
message: llmResponse.content || undefined,
|
|
239
|
+
finishReason: llmResponse.finishReason,
|
|
240
|
+
usage: llmResponse.usage,
|
|
241
|
+
};
|
|
242
|
+
|
|
243
|
+
// If no tool calls, we're done!
|
|
244
|
+
if (llmResponse.toolCalls.length === 0) {
|
|
245
|
+
this.logger?.info?.('Agent run complete', {
|
|
246
|
+
turns: currentTurn,
|
|
247
|
+
toolCallsTotal: allToolResults.length
|
|
248
|
+
});
|
|
249
|
+
|
|
250
|
+
// Yield turn_complete
|
|
251
|
+
yield { type: 'turn_complete', turn: currentTurn };
|
|
252
|
+
break;
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
// Execute tool calls
|
|
256
|
+
for (const toolCall of llmResponse.toolCalls) {
|
|
257
|
+
let params: any = {};
|
|
258
|
+
try {
|
|
259
|
+
params = JSON.parse(toolCall.function.arguments);
|
|
260
|
+
} catch (_e) {
|
|
261
|
+
// Malformed tool arguments — report to the LLM so it can self-correct
|
|
262
|
+
this.logger?.warn?.('Failed to parse tool arguments', {
|
|
263
|
+
tool: toolCall.function.name,
|
|
264
|
+
arguments: toolCall.function.arguments,
|
|
265
|
+
});
|
|
266
|
+
|
|
267
|
+
const parseError = `Failed to parse tool arguments: invalid JSON in arguments string`;
|
|
268
|
+
|
|
269
|
+
allToolResults.push({
|
|
270
|
+
turn: currentTurn,
|
|
271
|
+
toolCallId: toolCall.id,
|
|
272
|
+
toolName: toolCall.function.name,
|
|
273
|
+
arguments: {},
|
|
274
|
+
result: null,
|
|
275
|
+
success: false,
|
|
276
|
+
error: parseError,
|
|
277
|
+
});
|
|
278
|
+
|
|
279
|
+
this.conversationState.messages.push({
|
|
280
|
+
role: 'tool',
|
|
281
|
+
tool_call_id: toolCall.id,
|
|
282
|
+
content: JSON.stringify({ error: parseError }),
|
|
283
|
+
});
|
|
284
|
+
|
|
285
|
+
yield {
|
|
286
|
+
type: 'tool_call_start',
|
|
287
|
+
turn: currentTurn,
|
|
288
|
+
toolCall: {
|
|
289
|
+
id: toolCall.id,
|
|
290
|
+
name: toolCall.function.name,
|
|
291
|
+
arguments: {},
|
|
292
|
+
},
|
|
293
|
+
};
|
|
294
|
+
|
|
295
|
+
yield {
|
|
296
|
+
type: 'tool_call_end',
|
|
297
|
+
turn: currentTurn,
|
|
298
|
+
toolResult: {
|
|
299
|
+
id: toolCall.id,
|
|
300
|
+
name: toolCall.function.name,
|
|
301
|
+
result: null,
|
|
302
|
+
success: false,
|
|
303
|
+
error: parseError,
|
|
304
|
+
},
|
|
305
|
+
};
|
|
306
|
+
|
|
307
|
+
continue;
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
// Yield tool_call_start
|
|
311
|
+
yield {
|
|
312
|
+
type: 'tool_call_start',
|
|
313
|
+
turn: currentTurn,
|
|
314
|
+
toolCall: {
|
|
315
|
+
id: toolCall.id,
|
|
316
|
+
name: toolCall.function.name,
|
|
317
|
+
arguments: params,
|
|
318
|
+
},
|
|
319
|
+
};
|
|
320
|
+
|
|
321
|
+
try {
|
|
322
|
+
// Execute tool
|
|
323
|
+
const toolContext: ToolContext = {
|
|
324
|
+
mounts: this.mountManager as any, // Pass mount manager
|
|
325
|
+
logger: this.logger
|
|
326
|
+
};
|
|
327
|
+
|
|
328
|
+
const result = await this.toolRegistry.execute(
|
|
329
|
+
toolCall.function.name,
|
|
330
|
+
params,
|
|
331
|
+
toolContext
|
|
332
|
+
);
|
|
333
|
+
|
|
334
|
+
// Record tool result
|
|
335
|
+
const toolResult: ToolCallResult = {
|
|
336
|
+
turn: currentTurn,
|
|
337
|
+
toolCallId: toolCall.id,
|
|
338
|
+
toolName: toolCall.function.name,
|
|
339
|
+
arguments: params,
|
|
340
|
+
result: result.result,
|
|
341
|
+
success: result.success,
|
|
342
|
+
error: result.error
|
|
343
|
+
};
|
|
344
|
+
|
|
345
|
+
allToolResults.push(toolResult);
|
|
346
|
+
|
|
347
|
+
// Add tool result to message history
|
|
348
|
+
const resultContent = result.success
|
|
349
|
+
? this.truncateForContext(result.result, 4000)
|
|
350
|
+
: JSON.stringify({ error: result.error });
|
|
351
|
+
|
|
352
|
+
this.conversationState.messages.push({
|
|
353
|
+
role: 'tool',
|
|
354
|
+
tool_call_id: toolCall.id,
|
|
355
|
+
content: resultContent
|
|
356
|
+
});
|
|
357
|
+
|
|
358
|
+
// Yield tool_call_end
|
|
359
|
+
yield {
|
|
360
|
+
type: 'tool_call_end',
|
|
361
|
+
turn: currentTurn,
|
|
362
|
+
toolResult: {
|
|
363
|
+
id: toolCall.id,
|
|
364
|
+
name: toolCall.function.name,
|
|
365
|
+
result: result.result,
|
|
366
|
+
success: result.success,
|
|
367
|
+
error: result.error,
|
|
368
|
+
},
|
|
369
|
+
};
|
|
370
|
+
} catch (error) {
|
|
371
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
372
|
+
|
|
373
|
+
this.logger?.error?.('Tool execution error', {
|
|
374
|
+
tool: toolCall.function.name,
|
|
375
|
+
error: errorMessage
|
|
376
|
+
});
|
|
377
|
+
|
|
378
|
+
// Record error
|
|
379
|
+
allToolResults.push({
|
|
380
|
+
turn: currentTurn,
|
|
381
|
+
toolCallId: toolCall.id,
|
|
382
|
+
toolName: toolCall.function.name,
|
|
383
|
+
arguments: params,
|
|
384
|
+
result: null,
|
|
385
|
+
success: false,
|
|
386
|
+
error: errorMessage
|
|
387
|
+
});
|
|
388
|
+
|
|
389
|
+
// Add error to message history
|
|
390
|
+
this.conversationState.messages.push({
|
|
391
|
+
role: 'tool',
|
|
392
|
+
tool_call_id: toolCall.id,
|
|
393
|
+
content: JSON.stringify({ error: errorMessage })
|
|
394
|
+
});
|
|
395
|
+
|
|
396
|
+
// Yield tool_call_end with error
|
|
397
|
+
yield {
|
|
398
|
+
type: 'tool_call_end',
|
|
399
|
+
turn: currentTurn,
|
|
400
|
+
toolResult: {
|
|
401
|
+
id: toolCall.id,
|
|
402
|
+
name: toolCall.function.name,
|
|
403
|
+
result: null,
|
|
404
|
+
success: false,
|
|
405
|
+
error: errorMessage,
|
|
406
|
+
},
|
|
407
|
+
};
|
|
408
|
+
}
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
// Yield turn_complete
|
|
412
|
+
yield { type: 'turn_complete', turn: currentTurn };
|
|
413
|
+
|
|
414
|
+
// Continue to next turn...
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
// Check if we hit max turns
|
|
418
|
+
if (this.conversationState.turnCount >= this.maxTurns) {
|
|
419
|
+
this.logger?.warn?.('Agent hit max turns limit', {
|
|
420
|
+
maxTurns: this.maxTurns
|
|
421
|
+
});
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
return {
|
|
425
|
+
message: finalContent,
|
|
426
|
+
toolCalls: allToolResults.length > 0 ? allToolResults : undefined,
|
|
427
|
+
turns: this.conversationState.turnCount,
|
|
428
|
+
finishReason: finalFinishReason,
|
|
429
|
+
usage:
|
|
430
|
+
this.conversationState.totalTokens > 0
|
|
431
|
+
? {
|
|
432
|
+
promptTokens: 0, // Not tracked separately
|
|
433
|
+
completionTokens: 0,
|
|
434
|
+
totalTokens: this.conversationState.totalTokens
|
|
435
|
+
}
|
|
436
|
+
: undefined
|
|
437
|
+
};
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
/**
|
|
441
|
+
* Get current conversation state
|
|
442
|
+
*/
|
|
443
|
+
getConversation(): ConversationState {
|
|
444
|
+
return { ...this.conversationState };
|
|
445
|
+
}
|
|
446
|
+
|
|
447
|
+
/**
|
|
448
|
+
* Reset conversation
|
|
449
|
+
*/
|
|
450
|
+
reset(): void {
|
|
451
|
+
this.conversationState = {
|
|
452
|
+
messages: [],
|
|
453
|
+
turnCount: 0,
|
|
454
|
+
totalTokens: 0
|
|
455
|
+
};
|
|
456
|
+
}
|
|
457
|
+
|
|
458
|
+
/**
|
|
459
|
+
* Get the full system prompt (base prompt + mounts description)
|
|
460
|
+
* Useful for debugging and testing
|
|
461
|
+
*/
|
|
462
|
+
getSystemPrompt(): string {
|
|
463
|
+
return this.systemPrompt;
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
/**
|
|
467
|
+
* Truncate large results for context management
|
|
468
|
+
*/
|
|
469
|
+
private truncateForContext(obj: any, maxLength: number = 4000): string {
|
|
470
|
+
const str = JSON.stringify(obj);
|
|
471
|
+
if (str.length <= maxLength) {
|
|
472
|
+
return str;
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
const truncated = str.substring(0, maxLength);
|
|
476
|
+
return truncated + `... [TRUNCATED: ${str.length - maxLength} more characters]`;
|
|
477
|
+
}
|
|
478
|
+
}
|