agentic-ai-framework 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +626 -0
- package/index.js +84 -0
- package/package.json +38 -0
- package/src/agent/Agent.js +278 -0
- package/src/agent/AgentConfig.js +88 -0
- package/src/agent/AgentRunner.js +256 -0
- package/src/llm/BaseLLMProvider.js +78 -0
- package/src/llm/LLMRouter.js +80 -0
- package/src/llm/providers/ClaudeProvider.js +307 -0
- package/src/llm/providers/GrokProvider.js +208 -0
- package/src/llm/providers/OpenAIProvider.js +194 -0
- package/src/memory/FileStore.js +102 -0
- package/src/memory/MemoryManager.js +55 -0
- package/src/memory/SessionMemory.js +124 -0
- package/src/prompt/PromptBuilder.js +95 -0
- package/src/prompt/PromptTemplate.js +58 -0
- package/src/team/AgentTeam.js +308 -0
- package/src/team/TeamResult.js +60 -0
- package/src/tool/Tool.js +138 -0
- package/src/tool/ToolRegistry.js +81 -0
- package/src/utils/errors.js +46 -0
- package/src/utils/logger.js +33 -0
|
@@ -0,0 +1,194 @@
|
|
|
1
|
+
// OpenAI LLM Provider
|
|
2
|
+
// Uses the OpenAI Chat Completions API (same format as Grok)
|
|
3
|
+
|
|
4
|
+
import { BaseLLMProvider } from '../BaseLLMProvider.js';
|
|
5
|
+
|
|
6
|
+
const OPENAI_API_BASE = 'https://api.openai.com/v1';
|
|
7
|
+
const DEFAULT_MODEL = 'gpt-4o';
|
|
8
|
+
const DEFAULT_TIMEOUT = 60000;
|
|
9
|
+
|
|
10
|
+
export class OpenAIProvider extends BaseLLMProvider {
|
|
11
|
+
/**
|
|
12
|
+
* @param {string} apiKey - OpenAI API key
|
|
13
|
+
* @param {string} [model] - Model to use
|
|
14
|
+
* @param {Object} [opts] - Additional options
|
|
15
|
+
* @param {string} [opts.baseUrl] - Override base URL (useful for Azure or local proxies)
|
|
16
|
+
* @param {number} [opts.timeout] - Request timeout in ms
|
|
17
|
+
*/
|
|
18
|
+
constructor(apiKey, model = DEFAULT_MODEL, opts = {}) {
|
|
19
|
+
super();
|
|
20
|
+
if (!apiKey) throw new Error('OpenAIProvider: apiKey is required');
|
|
21
|
+
this.apiKey = apiKey;
|
|
22
|
+
this.model = model;
|
|
23
|
+
this.baseUrl = opts.baseUrl ?? OPENAI_API_BASE;
|
|
24
|
+
this.timeout = opts.timeout ?? DEFAULT_TIMEOUT;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
async _makeRequest(endpoint, body) {
|
|
28
|
+
const url = `${this.baseUrl}${endpoint}`;
|
|
29
|
+
const controller = new AbortController();
|
|
30
|
+
const timeoutId = setTimeout(() => controller.abort(), this.timeout);
|
|
31
|
+
|
|
32
|
+
try {
|
|
33
|
+
const response = await fetch(url, {
|
|
34
|
+
method: 'POST',
|
|
35
|
+
headers: {
|
|
36
|
+
'Content-Type': 'application/json',
|
|
37
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
38
|
+
},
|
|
39
|
+
body: JSON.stringify(body),
|
|
40
|
+
signal: controller.signal,
|
|
41
|
+
});
|
|
42
|
+
clearTimeout(timeoutId);
|
|
43
|
+
|
|
44
|
+
if (!response.ok) {
|
|
45
|
+
const errorBody = await response.text();
|
|
46
|
+
let errorMessage;
|
|
47
|
+
try {
|
|
48
|
+
const errorJson = JSON.parse(errorBody);
|
|
49
|
+
errorMessage = errorJson.error?.message || errorBody;
|
|
50
|
+
} catch {
|
|
51
|
+
errorMessage = errorBody;
|
|
52
|
+
}
|
|
53
|
+
throw new Error(`OpenAI API error (${response.status}): ${errorMessage}`);
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
return response.json();
|
|
57
|
+
} catch (error) {
|
|
58
|
+
clearTimeout(timeoutId);
|
|
59
|
+
if (error.name === 'AbortError') throw new Error('OpenAI API request timed out');
|
|
60
|
+
throw error;
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
_buildMessages(prompt, options) {
|
|
65
|
+
if (options.messages && Array.isArray(options.messages)) return options.messages;
|
|
66
|
+
const messages = [];
|
|
67
|
+
if (options.systemPrompt) messages.push({ role: 'system', content: options.systemPrompt });
|
|
68
|
+
messages.push({ role: 'user', content: prompt });
|
|
69
|
+
return messages;
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
_formatTools(tools) {
|
|
73
|
+
if (!tools?.length) return [];
|
|
74
|
+
return tools.map(tool => ({
|
|
75
|
+
type: 'function',
|
|
76
|
+
function: {
|
|
77
|
+
name: tool.name,
|
|
78
|
+
description: tool.description,
|
|
79
|
+
parameters: tool.parameters || { type: 'object', properties: {} },
|
|
80
|
+
},
|
|
81
|
+
}));
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
async complete(prompt, options = {}) {
|
|
85
|
+
const messages = this._buildMessages(prompt, options);
|
|
86
|
+
|
|
87
|
+
const requestBody = {
|
|
88
|
+
model: options.model || this.model,
|
|
89
|
+
messages,
|
|
90
|
+
temperature: options.temperature ?? 0.1,
|
|
91
|
+
max_tokens: options.maxTokens ?? 4096,
|
|
92
|
+
};
|
|
93
|
+
|
|
94
|
+
if (options.enableTools && options.tools) {
|
|
95
|
+
requestBody.tools = this._formatTools(options.tools);
|
|
96
|
+
requestBody.tool_choice = 'auto';
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
if (options.jsonMode && !options.enableTools) {
|
|
100
|
+
requestBody.response_format = { type: 'json_object' };
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
const response = await this._makeRequest('/chat/completions', requestBody);
|
|
104
|
+
const choice = response.choices?.[0];
|
|
105
|
+
if (!choice) throw new Error('No completion returned from OpenAI API');
|
|
106
|
+
|
|
107
|
+
const content = choice.message?.content || '';
|
|
108
|
+
const rawToolCalls = choice.message?.tool_calls;
|
|
109
|
+
const hasToolCalls = !!(rawToolCalls?.length > 0);
|
|
110
|
+
const updatedMessages = [...messages];
|
|
111
|
+
|
|
112
|
+
let toolCalls;
|
|
113
|
+
if (hasToolCalls) {
|
|
114
|
+
toolCalls = rawToolCalls.map(tc => {
|
|
115
|
+
let args;
|
|
116
|
+
try {
|
|
117
|
+
args = typeof tc.function.arguments === 'string'
|
|
118
|
+
? JSON.parse(tc.function.arguments)
|
|
119
|
+
: tc.function.arguments ?? {};
|
|
120
|
+
} catch {
|
|
121
|
+
args = {};
|
|
122
|
+
}
|
|
123
|
+
return { id: tc.id, name: tc.function.name, arguments: args };
|
|
124
|
+
});
|
|
125
|
+
updatedMessages.push({
|
|
126
|
+
role: 'assistant',
|
|
127
|
+
content: content || null,
|
|
128
|
+
tool_calls: rawToolCalls,
|
|
129
|
+
});
|
|
130
|
+
} else {
|
|
131
|
+
updatedMessages.push({ role: 'assistant', content });
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
let parsed = null;
|
|
135
|
+
if (options.jsonMode && content && !hasToolCalls) {
|
|
136
|
+
try {
|
|
137
|
+
parsed = JSON.parse(content);
|
|
138
|
+
} catch (error) {
|
|
139
|
+
throw new Error(`Failed to parse JSON response: ${error.message}`);
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
return {
|
|
144
|
+
content,
|
|
145
|
+
parsed,
|
|
146
|
+
usage: {
|
|
147
|
+
promptTokens: response.usage?.prompt_tokens || 0,
|
|
148
|
+
completionTokens: response.usage?.completion_tokens || 0,
|
|
149
|
+
totalTokens: response.usage?.total_tokens || 0,
|
|
150
|
+
},
|
|
151
|
+
model: response.model || this.model,
|
|
152
|
+
finishReason: choice.finish_reason || 'unknown',
|
|
153
|
+
toolCalls: toolCalls?.length ? toolCalls : undefined,
|
|
154
|
+
messages: updatedMessages,
|
|
155
|
+
};
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
async completeWithSchema(prompt, schema, options = {}) {
|
|
159
|
+
if (options.enableTools) {
|
|
160
|
+
const response = await this.complete(prompt, { ...options, jsonMode: false });
|
|
161
|
+
if (response.content && !response.toolCalls) {
|
|
162
|
+
try { response.parsed = JSON.parse(response.content); } catch {}
|
|
163
|
+
}
|
|
164
|
+
return response;
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
const schemaPrompt = `${prompt}\n\nYou MUST respond with a valid JSON object that matches this structure:\n${JSON.stringify(schema, null, 2)}\n\nRespond ONLY with the JSON object, no additional text.`;
|
|
168
|
+
const response = await this.complete(schemaPrompt, { ...options, jsonMode: true });
|
|
169
|
+
|
|
170
|
+
if (response.parsed && schema?.required) {
|
|
171
|
+
const missing = schema.required.filter(f => !(f in response.parsed));
|
|
172
|
+
if (missing.length > 0) {
|
|
173
|
+
throw new Error(`Response missing required fields: ${missing.join(', ')}`);
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
return response;
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
async testConnection() {
|
|
181
|
+
try {
|
|
182
|
+
const response = await this.complete("Say 'ok'", { maxTokens: 10, temperature: 0 });
|
|
183
|
+
return !!response.content;
|
|
184
|
+
} catch {
|
|
185
|
+
return false;
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
listModels() {
|
|
190
|
+
return ['gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo', 'gpt-3.5-turbo'];
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
export default OpenAIProvider;
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
import fs from 'fs/promises';
|
|
2
|
+
import path from 'path';
|
|
3
|
+
import { MemoryError } from '../utils/errors.js';
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Persists session snapshots as JSON files on disk.
|
|
7
|
+
* One file per session: {dir}/{sessionId}.json
|
|
8
|
+
*
|
|
9
|
+
* This is the only shared resource between concurrent requests.
|
|
10
|
+
* Since each session has its own file, different sessions never conflict.
|
|
11
|
+
* Same-session concurrent writes are an application-level concern.
|
|
12
|
+
*/
|
|
13
|
+
export class FileStore {
|
|
14
|
+
/**
|
|
15
|
+
* @param {string} dir - Directory where session files are stored.
|
|
16
|
+
* Created automatically if it doesn't exist.
|
|
17
|
+
*/
|
|
18
|
+
constructor(dir) {
|
|
19
|
+
if (!dir) throw new Error('FileStore: dir is required');
|
|
20
|
+
this._dir = dir;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
_filePath(sessionId) {
|
|
24
|
+
// Sanitize sessionId to prevent path traversal
|
|
25
|
+
const safe = path.basename(sessionId).replace(/[^a-zA-Z0-9_\-]/g, '_');
|
|
26
|
+
return path.join(this._dir, `${safe}.json`);
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Load a session snapshot from disk.
|
|
31
|
+
* @param {string} sessionId
|
|
32
|
+
* @returns {Promise<Object | null>} Snapshot, or null if session does not exist
|
|
33
|
+
*/
|
|
34
|
+
async load(sessionId) {
|
|
35
|
+
try {
|
|
36
|
+
const raw = await fs.readFile(this._filePath(sessionId), 'utf-8');
|
|
37
|
+
return JSON.parse(raw);
|
|
38
|
+
} catch (err) {
|
|
39
|
+
if (err.code === 'ENOENT') return null; // session doesn't exist yet
|
|
40
|
+
throw new MemoryError(`Failed to load session "${sessionId}"`, {
|
|
41
|
+
cause: err,
|
|
42
|
+
sessionId,
|
|
43
|
+
});
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
/**
|
|
48
|
+
* Save a session snapshot to disk.
|
|
49
|
+
* @param {string} sessionId
|
|
50
|
+
* @param {Object} snapshot
|
|
51
|
+
*/
|
|
52
|
+
async save(sessionId, snapshot) {
|
|
53
|
+
const filePath = this._filePath(sessionId);
|
|
54
|
+
const tmpPath = `${filePath}.${Date.now()}.tmp`;
|
|
55
|
+
try {
|
|
56
|
+
await fs.mkdir(this._dir, { recursive: true });
|
|
57
|
+
// Write to temp file first, then atomically rename to prevent corruption
|
|
58
|
+
await fs.writeFile(tmpPath, JSON.stringify(snapshot, null, 2), 'utf-8');
|
|
59
|
+
await fs.rename(tmpPath, filePath);
|
|
60
|
+
} catch (err) {
|
|
61
|
+
// Clean up temp file if rename failed
|
|
62
|
+
try { await fs.unlink(tmpPath); } catch { /* ignore */ }
|
|
63
|
+
throw new MemoryError(`Failed to save session "${sessionId}"`, {
|
|
64
|
+
cause: err,
|
|
65
|
+
sessionId,
|
|
66
|
+
});
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
/**
|
|
71
|
+
* Delete a session file from disk.
|
|
72
|
+
* @param {string} sessionId
|
|
73
|
+
*/
|
|
74
|
+
async delete(sessionId) {
|
|
75
|
+
try {
|
|
76
|
+
await fs.unlink(this._filePath(sessionId));
|
|
77
|
+
} catch (err) {
|
|
78
|
+
if (err.code !== 'ENOENT') {
|
|
79
|
+
throw new MemoryError(`Failed to delete session "${sessionId}"`, {
|
|
80
|
+
cause: err,
|
|
81
|
+
sessionId,
|
|
82
|
+
});
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
/**
|
|
88
|
+
* List all session IDs stored in the directory.
|
|
89
|
+
* @returns {Promise<string[]>}
|
|
90
|
+
*/
|
|
91
|
+
async list() {
|
|
92
|
+
try {
|
|
93
|
+
const files = await fs.readdir(this._dir);
|
|
94
|
+
return files
|
|
95
|
+
.filter(f => f.endsWith('.json'))
|
|
96
|
+
.map(f => f.slice(0, -5)); // strip .json
|
|
97
|
+
} catch (err) {
|
|
98
|
+
if (err.code === 'ENOENT') return [];
|
|
99
|
+
throw new MemoryError('Failed to list sessions', { cause: err });
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
}
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import { FileStore } from './FileStore.js';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Facade over FileStore that provides load/save/delete operations
|
|
5
|
+
* on SessionMemory snapshots.
|
|
6
|
+
*
|
|
7
|
+
* Usage:
|
|
8
|
+
* const manager = new MemoryManager({ dir: './sessions' });
|
|
9
|
+
* const snapshot = await manager.load('session-123'); // null if new
|
|
10
|
+
* await manager.save('session-123', memory.snapshot());
|
|
11
|
+
* await manager.delete('session-123');
|
|
12
|
+
*/
|
|
13
|
+
export class MemoryManager {
|
|
14
|
+
/**
|
|
15
|
+
* @param {Object} options
|
|
16
|
+
* @param {string} options.dir - Directory for session JSON files
|
|
17
|
+
*/
|
|
18
|
+
constructor({ dir }) {
|
|
19
|
+
this._store = new FileStore(dir);
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* Load a session snapshot. Returns null if the session doesn't exist yet.
|
|
24
|
+
* @param {string} sessionId
|
|
25
|
+
* @returns {Promise<{ history: Array, context: Object } | null>}
|
|
26
|
+
*/
|
|
27
|
+
async load(sessionId) {
|
|
28
|
+
return this._store.load(sessionId);
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
/**
|
|
32
|
+
* Persist a session snapshot.
|
|
33
|
+
* @param {string} sessionId
|
|
34
|
+
* @param {{ history: Array, context: Object }} snapshot
|
|
35
|
+
*/
|
|
36
|
+
async save(sessionId, snapshot) {
|
|
37
|
+
return this._store.save(sessionId, snapshot);
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* Delete a session file from disk.
|
|
42
|
+
* @param {string} sessionId
|
|
43
|
+
*/
|
|
44
|
+
async delete(sessionId) {
|
|
45
|
+
return this._store.delete(sessionId);
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
/**
|
|
49
|
+
* List all persisted session IDs.
|
|
50
|
+
* @returns {Promise<string[]>}
|
|
51
|
+
*/
|
|
52
|
+
async list() {
|
|
53
|
+
return this._store.list();
|
|
54
|
+
}
|
|
55
|
+
}
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* In-memory storage for a single agent session.
|
|
3
|
+
*
|
|
4
|
+
* Holds two things:
|
|
5
|
+
* - history: conversation messages [{role, content}] inserted between system
|
|
6
|
+
* prompt and new user message on each agent.run() call.
|
|
7
|
+
* - context: key-value working state that is injected into the system prompt
|
|
8
|
+
* template as ${key} variables.
|
|
9
|
+
*
|
|
10
|
+
* Cross-session persistence is handled externally by MemoryManager.
|
|
11
|
+
*/
|
|
12
|
+
export class SessionMemory {
|
|
13
|
+
/**
|
|
14
|
+
* @param {Object} [options]
|
|
15
|
+
* @param {number} [options.maxMessages=50] - Max conversation messages to keep.
|
|
16
|
+
* Always trims in pairs (user + assistant) to maintain conversation alignment.
|
|
17
|
+
*/
|
|
18
|
+
constructor({ maxMessages = 50 } = {}) {
|
|
19
|
+
this._history = []; // Array<{ role: 'user' | 'assistant', content: string }>
|
|
20
|
+
this._context = {}; // Record<string, any>
|
|
21
|
+
this._maxMessages = maxMessages;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
// ── History ───────────────────────────────────────────────────────────────
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Append a completed exchange to history.
|
|
28
|
+
* @param {string} userInput
|
|
29
|
+
* @param {string} assistantOutput
|
|
30
|
+
*/
|
|
31
|
+
appendExchange(userInput, assistantOutput) {
|
|
32
|
+
this._history.push({ role: 'user', content: userInput });
|
|
33
|
+
this._history.push({ role: 'assistant', content: assistantOutput });
|
|
34
|
+
|
|
35
|
+
// Trim oldest pairs to stay within limit
|
|
36
|
+
while (this._history.length > this._maxMessages) {
|
|
37
|
+
this._history.splice(0, 2);
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
/**
|
|
42
|
+
* Get a copy of the conversation history.
|
|
43
|
+
* @returns {Array<{ role: string, content: string }>}
|
|
44
|
+
*/
|
|
45
|
+
getHistory() {
|
|
46
|
+
return [...this._history];
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Number of messages currently in history.
|
|
51
|
+
* @returns {number}
|
|
52
|
+
*/
|
|
53
|
+
get historyLength() {
|
|
54
|
+
return this._history.length;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
// ── Working context ───────────────────────────────────────────────────────
|
|
58
|
+
|
|
59
|
+
/**
|
|
60
|
+
* Store a named value in the working context.
|
|
61
|
+
* These values are injected into the system prompt as ${key} variables.
|
|
62
|
+
* @param {string} key
|
|
63
|
+
* @param {*} value
|
|
64
|
+
*/
|
|
65
|
+
setContext(key, value) {
|
|
66
|
+
// Reject non-serializable types (functions, symbols, etc.)
|
|
67
|
+
const type = typeof value;
|
|
68
|
+
if (type === 'function' || type === 'symbol' || type === 'undefined') {
|
|
69
|
+
throw new Error(`SessionMemory.setContext: value for "${key}" must be JSON-serializable (got ${type})`);
|
|
70
|
+
}
|
|
71
|
+
this._context[key] = value;
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
/**
|
|
75
|
+
* Retrieve a context value by key.
|
|
76
|
+
* @param {string} key
|
|
77
|
+
* @returns {*}
|
|
78
|
+
*/
|
|
79
|
+
getContext(key) {
|
|
80
|
+
return this._context[key];
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
/**
|
|
84
|
+
* Get a shallow copy of the entire context object.
|
|
85
|
+
* @returns {Record<string, any>}
|
|
86
|
+
*/
|
|
87
|
+
getContextSnapshot() {
|
|
88
|
+
return { ...this._context };
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
// ── Lifecycle ─────────────────────────────────────────────────────────────
|
|
92
|
+
|
|
93
|
+
/**
|
|
94
|
+
* Clear all history and context (reset to empty state).
|
|
95
|
+
*/
|
|
96
|
+
clear() {
|
|
97
|
+
this._history = [];
|
|
98
|
+
this._context = {};
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
/**
|
|
102
|
+
* Serialize the full session state for persistence.
|
|
103
|
+
* @returns {{ history: Array, context: Record<string, any> }}
|
|
104
|
+
*/
|
|
105
|
+
snapshot() {
|
|
106
|
+
return {
|
|
107
|
+
version: 1,
|
|
108
|
+
history: [...this._history],
|
|
109
|
+
context: { ...this._context },
|
|
110
|
+
savedAt: new Date().toISOString(),
|
|
111
|
+
};
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
/**
|
|
115
|
+
* Restore session state from a snapshot (e.g., loaded from disk).
|
|
116
|
+
* @param {{ history?: Array, context?: Record<string, any> }} snapshot
|
|
117
|
+
*/
|
|
118
|
+
restore(snapshot) {
|
|
119
|
+
this._history = Array.isArray(snapshot?.history) ? [...snapshot.history] : [];
|
|
120
|
+
this._context = (snapshot?.context && typeof snapshot.context === 'object')
|
|
121
|
+
? { ...snapshot.context }
|
|
122
|
+
: {};
|
|
123
|
+
}
|
|
124
|
+
}
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
import { PromptTemplate } from './PromptTemplate.js';
|
|
2
|
+
|
|
3
|
+
// ── Built-in CoT blocks ──────────────────────────────────────────────────────
|
|
4
|
+
|
|
5
|
+
const COT_BLOCKS = {
|
|
6
|
+
'step-by-step': `
|
|
7
|
+
|
|
8
|
+
### Reasoning Approach
|
|
9
|
+
Before producing your final answer, reason step by step through the problem:
|
|
10
|
+
1. Identify what is being asked.
|
|
11
|
+
2. Determine what information or tool results you need.
|
|
12
|
+
3. Plan your approach.
|
|
13
|
+
4. Execute and verify your reasoning before responding.
|
|
14
|
+
`,
|
|
15
|
+
'pros-cons': `
|
|
16
|
+
|
|
17
|
+
### Reasoning Approach
|
|
18
|
+
Before answering, briefly consider:
|
|
19
|
+
- What approaches are available?
|
|
20
|
+
- What are the trade-offs of each?
|
|
21
|
+
- Which approach best fits the question?
|
|
22
|
+
Then proceed with the best approach.
|
|
23
|
+
`,
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Assembles the system prompt by rendering a PromptTemplate and optionally
|
|
28
|
+
* appending a Chain-of-Thought block.
|
|
29
|
+
*
|
|
30
|
+
* CoT modes:
|
|
31
|
+
* - 'prompt' (default): CoT block appended to the rendered system prompt.
|
|
32
|
+
* Zero extra LLM calls. Controlled by cotStyle.
|
|
33
|
+
* - 'reflect': No CoT appended here; AgentRunner makes a second LLM call
|
|
34
|
+
* after the tool-calling loop to verify the answer. PromptBuilder is not
|
|
35
|
+
* involved in that step.
|
|
36
|
+
*/
|
|
37
|
+
export class PromptBuilder {
|
|
38
|
+
/**
|
|
39
|
+
* @param {Object} options
|
|
40
|
+
* @param {string} [options.systemPromptTemplate] - Inline template string
|
|
41
|
+
* @param {string} [options.systemPromptFile] - Path to template file
|
|
42
|
+
* @param {boolean} [options.cotEnabled=true] - Whether to inject CoT
|
|
43
|
+
* @param {string} [options.cotMode='prompt'] - 'prompt' | 'reflect'
|
|
44
|
+
* @param {string} [options.cotStyle='step-by-step'] - 'step-by-step' | 'pros-cons' | 'custom'
|
|
45
|
+
* @param {string} [options.cotCustomInstructions] - Custom CoT text (when cotStyle='custom')
|
|
46
|
+
*/
|
|
47
|
+
constructor({
|
|
48
|
+
systemPromptTemplate,
|
|
49
|
+
systemPromptFile,
|
|
50
|
+
cotEnabled = true,
|
|
51
|
+
cotMode = 'prompt',
|
|
52
|
+
cotStyle = 'step-by-step',
|
|
53
|
+
cotCustomInstructions = null,
|
|
54
|
+
} = {}) {
|
|
55
|
+
this._template = new PromptTemplate({
|
|
56
|
+
inline: systemPromptTemplate,
|
|
57
|
+
file: systemPromptFile,
|
|
58
|
+
});
|
|
59
|
+
|
|
60
|
+
this._cotEnabled = cotEnabled;
|
|
61
|
+
this._cotMode = cotMode;
|
|
62
|
+
|
|
63
|
+
// Only 'prompt' mode appends the block here
|
|
64
|
+
if (cotEnabled && cotMode === 'prompt') {
|
|
65
|
+
if (cotCustomInstructions) {
|
|
66
|
+
this._cotBlock = `\n\n### Reasoning Approach\n${cotCustomInstructions}\n`;
|
|
67
|
+
} else {
|
|
68
|
+
this._cotBlock = COT_BLOCKS[cotStyle] ?? COT_BLOCKS['step-by-step'];
|
|
69
|
+
}
|
|
70
|
+
} else {
|
|
71
|
+
this._cotBlock = '';
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* Whether this builder is configured for reflect-mode CoT.
|
|
77
|
+
* AgentRunner checks this to know whether to run the reflection call.
|
|
78
|
+
* @returns {boolean}
|
|
79
|
+
*/
|
|
80
|
+
get isReflectMode() {
|
|
81
|
+
return this._cotEnabled && this._cotMode === 'reflect';
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
/**
|
|
85
|
+
* Build and return the final system prompt string.
|
|
86
|
+
*
|
|
87
|
+
* @param {Object} [opts]
|
|
88
|
+
* @param {Record<string, string | number | boolean>} [opts.vars={}] - Template variables
|
|
89
|
+
* @returns {Promise<string>}
|
|
90
|
+
*/
|
|
91
|
+
async build({ vars = {} } = {}) {
|
|
92
|
+
const rendered = await this._template.render(vars);
|
|
93
|
+
return rendered + this._cotBlock;
|
|
94
|
+
}
|
|
95
|
+
}
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
import fs from 'fs/promises';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Loads a prompt template from an inline string or a file on disk.
|
|
5
|
+
* Supports ${variable} interpolation.
|
|
6
|
+
*
|
|
7
|
+
* Template variables that are not present in vars are left as-is.
|
|
8
|
+
*/
|
|
9
|
+
export class PromptTemplate {
|
|
10
|
+
/**
|
|
11
|
+
* @param {Object} options
|
|
12
|
+
* @param {string} [options.inline] - Inline template string
|
|
13
|
+
* @param {string} [options.file] - Absolute path to a .md or .txt template file
|
|
14
|
+
*/
|
|
15
|
+
constructor({ inline, file } = {}) {
|
|
16
|
+
if (!inline && !file) {
|
|
17
|
+
throw new Error('PromptTemplate: provide either inline or file');
|
|
18
|
+
}
|
|
19
|
+
this._inline = inline ?? null;
|
|
20
|
+
this._file = file ?? null;
|
|
21
|
+
this._cache = null; // raw template string, cached after first load
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* Load and cache the raw template string.
|
|
26
|
+
* @returns {Promise<string>}
|
|
27
|
+
*/
|
|
28
|
+
async _load() {
|
|
29
|
+
if (this._cache !== null) return this._cache;
|
|
30
|
+
if (this._inline) {
|
|
31
|
+
this._cache = this._inline;
|
|
32
|
+
return this._cache;
|
|
33
|
+
}
|
|
34
|
+
this._cache = await fs.readFile(this._file, 'utf-8');
|
|
35
|
+
return this._cache;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* Invalidate the cache (useful if the file changes on disk).
|
|
40
|
+
*/
|
|
41
|
+
invalidateCache() {
|
|
42
|
+
this._cache = null;
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
/**
|
|
46
|
+
* Render the template by replacing ${key} placeholders with values from vars.
|
|
47
|
+
* Unknown placeholders are left intact.
|
|
48
|
+
*
|
|
49
|
+
* @param {Record<string, string | number | boolean>} [vars={}]
|
|
50
|
+
* @returns {Promise<string>}
|
|
51
|
+
*/
|
|
52
|
+
async render(vars = {}) {
|
|
53
|
+
const raw = await this._load();
|
|
54
|
+
return raw.replace(/\$\{([\w.\-]+)\}/g, (match, key) => {
|
|
55
|
+
return key in vars ? String(vars[key]) : match;
|
|
56
|
+
});
|
|
57
|
+
}
|
|
58
|
+
}
|