@hyperspaceng/neural-agent-core 0.64.1 → 0.65.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +34 -24
- package/dist/agent.d.ts +82 -141
- package/dist/agent.d.ts.map +1 -1
- package/dist/agent.js +301 -328
- package/dist/agent.js.map +1 -1
- package/dist/types.d.ts +44 -8
- package/dist/types.d.ts.map +1 -1
- package/dist/types.js.map +1 -1
- package/package.json +2 -2
package/dist/agent.js
CHANGED
|
@@ -1,306 +1,366 @@
|
|
|
1
|
-
|
|
2
|
-
* Agent class that uses the agent-loop directly.
|
|
3
|
-
* No transport abstraction - calls streamSimple via the loop.
|
|
4
|
-
*/
|
|
5
|
-
import { getModel, streamSimple, } from "@hyperspaceng/neural-ai";
|
|
1
|
+
import { streamSimple, } from "@hyperspaceng/neural-ai";
|
|
6
2
|
import { runAgentLoop, runAgentLoopContinue } from "./agent-loop.js";
|
|
7
|
-
/**
|
|
8
|
-
* Default convertToLlm: Keep only LLM-compatible messages, convert attachments.
|
|
9
|
-
*/
|
|
10
3
|
function defaultConvertToLlm(messages) {
|
|
11
|
-
return messages.filter((
|
|
4
|
+
return messages.filter((message) => message.role === "user" || message.role === "assistant" || message.role === "toolResult");
|
|
12
5
|
}
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
6
|
+
const EMPTY_USAGE = {
|
|
7
|
+
input: 0,
|
|
8
|
+
output: 0,
|
|
9
|
+
cacheRead: 0,
|
|
10
|
+
cacheWrite: 0,
|
|
11
|
+
totalTokens: 0,
|
|
12
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
|
|
13
|
+
};
|
|
14
|
+
const DEFAULT_MODEL = {
|
|
15
|
+
id: "unknown",
|
|
16
|
+
name: "unknown",
|
|
17
|
+
api: "unknown",
|
|
18
|
+
provider: "unknown",
|
|
19
|
+
baseUrl: "",
|
|
20
|
+
reasoning: false,
|
|
21
|
+
input: [],
|
|
22
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
23
|
+
contextWindow: 0,
|
|
24
|
+
maxTokens: 0,
|
|
25
|
+
};
|
|
26
|
+
function createMutableAgentState(initialState) {
|
|
27
|
+
let tools = initialState?.tools?.slice() ?? [];
|
|
28
|
+
let messages = initialState?.messages?.slice() ?? [];
|
|
29
|
+
return {
|
|
30
|
+
systemPrompt: initialState?.systemPrompt ?? "",
|
|
31
|
+
model: initialState?.model ?? DEFAULT_MODEL,
|
|
32
|
+
thinkingLevel: initialState?.thinkingLevel ?? "off",
|
|
33
|
+
get tools() {
|
|
34
|
+
return tools;
|
|
35
|
+
},
|
|
36
|
+
set tools(nextTools) {
|
|
37
|
+
tools = nextTools.slice();
|
|
38
|
+
},
|
|
39
|
+
get messages() {
|
|
40
|
+
return messages;
|
|
41
|
+
},
|
|
42
|
+
set messages(nextMessages) {
|
|
43
|
+
messages = nextMessages.slice();
|
|
44
|
+
},
|
|
20
45
|
isStreaming: false,
|
|
21
|
-
|
|
46
|
+
streamingMessage: undefined,
|
|
22
47
|
pendingToolCalls: new Set(),
|
|
23
|
-
|
|
48
|
+
errorMessage: undefined,
|
|
24
49
|
};
|
|
50
|
+
}
|
|
51
|
+
class PendingMessageQueue {
|
|
52
|
+
mode;
|
|
53
|
+
messages = [];
|
|
54
|
+
constructor(mode) {
|
|
55
|
+
this.mode = mode;
|
|
56
|
+
}
|
|
57
|
+
enqueue(message) {
|
|
58
|
+
this.messages.push(message);
|
|
59
|
+
}
|
|
60
|
+
hasItems() {
|
|
61
|
+
return this.messages.length > 0;
|
|
62
|
+
}
|
|
63
|
+
drain() {
|
|
64
|
+
if (this.mode === "all") {
|
|
65
|
+
const drained = this.messages.slice();
|
|
66
|
+
this.messages = [];
|
|
67
|
+
return drained;
|
|
68
|
+
}
|
|
69
|
+
const first = this.messages[0];
|
|
70
|
+
if (!first) {
|
|
71
|
+
return [];
|
|
72
|
+
}
|
|
73
|
+
this.messages = this.messages.slice(1);
|
|
74
|
+
return [first];
|
|
75
|
+
}
|
|
76
|
+
clear() {
|
|
77
|
+
this.messages = [];
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
/**
|
|
81
|
+
* Stateful wrapper around the low-level agent loop.
|
|
82
|
+
*
|
|
83
|
+
* `Agent` owns the current transcript, emits lifecycle events, executes tools,
|
|
84
|
+
* and exposes queueing APIs for steering and follow-up messages.
|
|
85
|
+
*/
|
|
86
|
+
export class Agent {
|
|
87
|
+
_state;
|
|
25
88
|
listeners = new Set();
|
|
26
|
-
|
|
89
|
+
steeringQueue;
|
|
90
|
+
followUpQueue;
|
|
27
91
|
convertToLlm;
|
|
28
92
|
transformContext;
|
|
29
|
-
steeringQueue = [];
|
|
30
|
-
followUpQueue = [];
|
|
31
|
-
steeringMode;
|
|
32
|
-
followUpMode;
|
|
33
93
|
streamFn;
|
|
34
|
-
_sessionId;
|
|
35
94
|
getApiKey;
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
this.
|
|
52
|
-
this.
|
|
53
|
-
this.
|
|
54
|
-
this.
|
|
55
|
-
this.
|
|
56
|
-
this.
|
|
57
|
-
this.
|
|
58
|
-
this.
|
|
59
|
-
this.
|
|
60
|
-
this.
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
return this._sessionId;
|
|
67
|
-
}
|
|
68
|
-
/**
|
|
69
|
-
* Set the session ID for provider caching.
|
|
70
|
-
* Call this when switching sessions (new session, branch, resume).
|
|
71
|
-
*/
|
|
72
|
-
set sessionId(value) {
|
|
73
|
-
this._sessionId = value;
|
|
74
|
-
}
|
|
75
|
-
/**
|
|
76
|
-
* Get the current thinking budgets.
|
|
77
|
-
*/
|
|
78
|
-
get thinkingBudgets() {
|
|
79
|
-
return this._thinkingBudgets;
|
|
80
|
-
}
|
|
81
|
-
/**
|
|
82
|
-
* Set custom thinking budgets for token-based providers.
|
|
83
|
-
*/
|
|
84
|
-
set thinkingBudgets(value) {
|
|
85
|
-
this._thinkingBudgets = value;
|
|
86
|
-
}
|
|
87
|
-
/**
|
|
88
|
-
* Get the current preferred transport.
|
|
89
|
-
*/
|
|
90
|
-
get transport() {
|
|
91
|
-
return this._transport;
|
|
92
|
-
}
|
|
93
|
-
/**
|
|
94
|
-
* Set the preferred transport.
|
|
95
|
-
*/
|
|
96
|
-
setTransport(value) {
|
|
97
|
-
this._transport = value;
|
|
95
|
+
onPayload;
|
|
96
|
+
beforeToolCall;
|
|
97
|
+
afterToolCall;
|
|
98
|
+
activeRun;
|
|
99
|
+
/** Session identifier forwarded to providers for cache-aware backends. */
|
|
100
|
+
sessionId;
|
|
101
|
+
/** Optional per-level thinking token budgets forwarded to the stream function. */
|
|
102
|
+
thinkingBudgets;
|
|
103
|
+
/** Preferred transport forwarded to the stream function. */
|
|
104
|
+
transport;
|
|
105
|
+
/** Optional cap for provider-requested retry delays. */
|
|
106
|
+
maxRetryDelayMs;
|
|
107
|
+
/** Tool execution strategy for assistant messages that contain multiple tool calls. */
|
|
108
|
+
toolExecution;
|
|
109
|
+
constructor(options = {}) {
|
|
110
|
+
this._state = createMutableAgentState(options.initialState);
|
|
111
|
+
this.convertToLlm = options.convertToLlm ?? defaultConvertToLlm;
|
|
112
|
+
this.transformContext = options.transformContext;
|
|
113
|
+
this.streamFn = options.streamFn ?? streamSimple;
|
|
114
|
+
this.getApiKey = options.getApiKey;
|
|
115
|
+
this.onPayload = options.onPayload;
|
|
116
|
+
this.beforeToolCall = options.beforeToolCall;
|
|
117
|
+
this.afterToolCall = options.afterToolCall;
|
|
118
|
+
this.steeringQueue = new PendingMessageQueue(options.steeringMode ?? "one-at-a-time");
|
|
119
|
+
this.followUpQueue = new PendingMessageQueue(options.followUpMode ?? "one-at-a-time");
|
|
120
|
+
this.sessionId = options.sessionId;
|
|
121
|
+
this.thinkingBudgets = options.thinkingBudgets;
|
|
122
|
+
this.transport = options.transport ?? "sse";
|
|
123
|
+
this.maxRetryDelayMs = options.maxRetryDelayMs;
|
|
124
|
+
this.toolExecution = options.toolExecution ?? "parallel";
|
|
98
125
|
}
|
|
99
126
|
/**
|
|
100
|
-
*
|
|
127
|
+
* Subscribe to agent lifecycle events.
|
|
128
|
+
*
|
|
129
|
+
* Listener promises are awaited in subscription order and are included in
|
|
130
|
+
* the current run's settlement. Listeners also receive the active abort
|
|
131
|
+
* signal for the current run.
|
|
132
|
+
*
|
|
133
|
+
* `agent_end` is the final emitted event for a run, but the agent does not
|
|
134
|
+
* become idle until all awaited listeners for that event have settled.
|
|
101
135
|
*/
|
|
102
|
-
|
|
103
|
-
|
|
136
|
+
subscribe(listener) {
|
|
137
|
+
this.listeners.add(listener);
|
|
138
|
+
return () => this.listeners.delete(listener);
|
|
104
139
|
}
|
|
105
140
|
/**
|
|
106
|
-
*
|
|
107
|
-
*
|
|
141
|
+
* Current agent state.
|
|
142
|
+
*
|
|
143
|
+
* Assigning `state.tools` or `state.messages` copies the provided top-level array.
|
|
108
144
|
*/
|
|
109
|
-
set maxRetryDelayMs(value) {
|
|
110
|
-
this._maxRetryDelayMs = value;
|
|
111
|
-
}
|
|
112
|
-
get toolExecution() {
|
|
113
|
-
return this._toolExecution;
|
|
114
|
-
}
|
|
115
|
-
setToolExecution(value) {
|
|
116
|
-
this._toolExecution = value;
|
|
117
|
-
}
|
|
118
|
-
setBeforeToolCall(value) {
|
|
119
|
-
this._beforeToolCall = value;
|
|
120
|
-
}
|
|
121
|
-
setAfterToolCall(value) {
|
|
122
|
-
this._afterToolCall = value;
|
|
123
|
-
}
|
|
124
145
|
get state() {
|
|
125
146
|
return this._state;
|
|
126
147
|
}
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
}
|
|
131
|
-
// State mutators
|
|
132
|
-
setSystemPrompt(v) {
|
|
133
|
-
this._state.systemPrompt = v;
|
|
134
|
-
}
|
|
135
|
-
setModel(m) {
|
|
136
|
-
this._state.model = m;
|
|
137
|
-
}
|
|
138
|
-
setThinkingLevel(l) {
|
|
139
|
-
this._state.thinkingLevel = l;
|
|
140
|
-
}
|
|
141
|
-
setSteeringMode(mode) {
|
|
142
|
-
this.steeringMode = mode;
|
|
148
|
+
/** Controls how queued steering messages are drained. */
|
|
149
|
+
set steeringMode(mode) {
|
|
150
|
+
this.steeringQueue.mode = mode;
|
|
143
151
|
}
|
|
144
|
-
|
|
145
|
-
return this.
|
|
152
|
+
get steeringMode() {
|
|
153
|
+
return this.steeringQueue.mode;
|
|
146
154
|
}
|
|
147
|
-
|
|
148
|
-
|
|
155
|
+
/** Controls how queued follow-up messages are drained. */
|
|
156
|
+
set followUpMode(mode) {
|
|
157
|
+
this.followUpQueue.mode = mode;
|
|
149
158
|
}
|
|
150
|
-
|
|
151
|
-
return this.
|
|
159
|
+
get followUpMode() {
|
|
160
|
+
return this.followUpQueue.mode;
|
|
152
161
|
}
|
|
153
|
-
|
|
154
|
-
|
|
162
|
+
/** Queue a message to be injected after the current assistant turn finishes. */
|
|
163
|
+
steer(message) {
|
|
164
|
+
this.steeringQueue.enqueue(message);
|
|
155
165
|
}
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
appendMessage(m) {
|
|
160
|
-
this._state.messages = [...this._state.messages, m];
|
|
161
|
-
}
|
|
162
|
-
/**
|
|
163
|
-
* Queue a steering message while the agent is running.
|
|
164
|
-
* Delivered after the current assistant turn finishes executing its tool calls,
|
|
165
|
-
* before the next LLM call.
|
|
166
|
-
*/
|
|
167
|
-
steer(m) {
|
|
168
|
-
this.steeringQueue.push(m);
|
|
169
|
-
}
|
|
170
|
-
/**
|
|
171
|
-
* Queue a follow-up message to be processed after the agent finishes.
|
|
172
|
-
* Delivered only when agent has no more tool calls or steering messages.
|
|
173
|
-
*/
|
|
174
|
-
followUp(m) {
|
|
175
|
-
this.followUpQueue.push(m);
|
|
166
|
+
/** Queue a message to run only after the agent would otherwise stop. */
|
|
167
|
+
followUp(message) {
|
|
168
|
+
this.followUpQueue.enqueue(message);
|
|
176
169
|
}
|
|
170
|
+
/** Remove all queued steering messages. */
|
|
177
171
|
clearSteeringQueue() {
|
|
178
|
-
this.steeringQueue
|
|
172
|
+
this.steeringQueue.clear();
|
|
179
173
|
}
|
|
174
|
+
/** Remove all queued follow-up messages. */
|
|
180
175
|
clearFollowUpQueue() {
|
|
181
|
-
this.followUpQueue
|
|
176
|
+
this.followUpQueue.clear();
|
|
182
177
|
}
|
|
178
|
+
/** Remove all queued steering and follow-up messages. */
|
|
183
179
|
clearAllQueues() {
|
|
184
|
-
this.
|
|
185
|
-
this.
|
|
180
|
+
this.clearSteeringQueue();
|
|
181
|
+
this.clearFollowUpQueue();
|
|
186
182
|
}
|
|
183
|
+
/** Returns true when either queue still contains pending messages. */
|
|
187
184
|
hasQueuedMessages() {
|
|
188
|
-
return this.steeringQueue.
|
|
189
|
-
}
|
|
190
|
-
dequeueSteeringMessages() {
|
|
191
|
-
if (this.steeringMode === "one-at-a-time") {
|
|
192
|
-
if (this.steeringQueue.length > 0) {
|
|
193
|
-
const first = this.steeringQueue[0];
|
|
194
|
-
this.steeringQueue = this.steeringQueue.slice(1);
|
|
195
|
-
return [first];
|
|
196
|
-
}
|
|
197
|
-
return [];
|
|
198
|
-
}
|
|
199
|
-
const steering = this.steeringQueue.slice();
|
|
200
|
-
this.steeringQueue = [];
|
|
201
|
-
return steering;
|
|
202
|
-
}
|
|
203
|
-
dequeueFollowUpMessages() {
|
|
204
|
-
if (this.followUpMode === "one-at-a-time") {
|
|
205
|
-
if (this.followUpQueue.length > 0) {
|
|
206
|
-
const first = this.followUpQueue[0];
|
|
207
|
-
this.followUpQueue = this.followUpQueue.slice(1);
|
|
208
|
-
return [first];
|
|
209
|
-
}
|
|
210
|
-
return [];
|
|
211
|
-
}
|
|
212
|
-
const followUp = this.followUpQueue.slice();
|
|
213
|
-
this.followUpQueue = [];
|
|
214
|
-
return followUp;
|
|
215
|
-
}
|
|
216
|
-
clearMessages() {
|
|
217
|
-
this._state.messages = [];
|
|
185
|
+
return this.steeringQueue.hasItems() || this.followUpQueue.hasItems();
|
|
218
186
|
}
|
|
219
|
-
/**
|
|
187
|
+
/** Active abort signal for the current run, if any. */
|
|
220
188
|
get signal() {
|
|
221
|
-
return this.abortController
|
|
189
|
+
return this.activeRun?.abortController.signal;
|
|
222
190
|
}
|
|
191
|
+
/** Abort the current run, if one is active. */
|
|
223
192
|
abort() {
|
|
224
|
-
this.abortController
|
|
193
|
+
this.activeRun?.abortController.abort();
|
|
225
194
|
}
|
|
195
|
+
/**
|
|
196
|
+
* Resolve when the current run and all awaited event listeners have finished.
|
|
197
|
+
*
|
|
198
|
+
* This resolves after `agent_end` listeners settle.
|
|
199
|
+
*/
|
|
226
200
|
waitForIdle() {
|
|
227
|
-
return this.
|
|
201
|
+
return this.activeRun?.promise ?? Promise.resolve();
|
|
228
202
|
}
|
|
203
|
+
/** Clear transcript state, runtime state, and queued messages. */
|
|
229
204
|
reset() {
|
|
230
205
|
this._state.messages = [];
|
|
231
206
|
this._state.isStreaming = false;
|
|
232
|
-
this._state.
|
|
207
|
+
this._state.streamingMessage = undefined;
|
|
233
208
|
this._state.pendingToolCalls = new Set();
|
|
234
|
-
this._state.
|
|
235
|
-
this.
|
|
236
|
-
this.
|
|
209
|
+
this._state.errorMessage = undefined;
|
|
210
|
+
this.clearFollowUpQueue();
|
|
211
|
+
this.clearSteeringQueue();
|
|
237
212
|
}
|
|
238
213
|
async prompt(input, images) {
|
|
239
|
-
if (this.
|
|
214
|
+
if (this.activeRun) {
|
|
240
215
|
throw new Error("Agent is already processing a prompt. Use steer() or followUp() to queue messages, or wait for completion.");
|
|
241
216
|
}
|
|
242
|
-
const
|
|
243
|
-
|
|
244
|
-
throw new Error("No model configured");
|
|
245
|
-
let msgs;
|
|
246
|
-
if (Array.isArray(input)) {
|
|
247
|
-
msgs = input;
|
|
248
|
-
}
|
|
249
|
-
else if (typeof input === "string") {
|
|
250
|
-
const content = [{ type: "text", text: input }];
|
|
251
|
-
if (images && images.length > 0) {
|
|
252
|
-
content.push(...images);
|
|
253
|
-
}
|
|
254
|
-
msgs = [
|
|
255
|
-
{
|
|
256
|
-
role: "user",
|
|
257
|
-
content,
|
|
258
|
-
timestamp: Date.now(),
|
|
259
|
-
},
|
|
260
|
-
];
|
|
261
|
-
}
|
|
262
|
-
else {
|
|
263
|
-
msgs = [input];
|
|
264
|
-
}
|
|
265
|
-
await this._runLoop(msgs);
|
|
217
|
+
const messages = this.normalizePromptInput(input, images);
|
|
218
|
+
await this.runPromptMessages(messages);
|
|
266
219
|
}
|
|
267
|
-
/**
|
|
268
|
-
* Continue from current context (used for retries and resuming queued messages).
|
|
269
|
-
*/
|
|
220
|
+
/** Continue from the current transcript. The last message must be a user or tool-result message. */
|
|
270
221
|
async continue() {
|
|
271
|
-
if (this.
|
|
222
|
+
if (this.activeRun) {
|
|
272
223
|
throw new Error("Agent is already processing. Wait for completion before continuing.");
|
|
273
224
|
}
|
|
274
|
-
const
|
|
275
|
-
if (
|
|
225
|
+
const lastMessage = this._state.messages[this._state.messages.length - 1];
|
|
226
|
+
if (!lastMessage) {
|
|
276
227
|
throw new Error("No messages to continue from");
|
|
277
228
|
}
|
|
278
|
-
if (
|
|
279
|
-
const queuedSteering = this.
|
|
229
|
+
if (lastMessage.role === "assistant") {
|
|
230
|
+
const queuedSteering = this.steeringQueue.drain();
|
|
280
231
|
if (queuedSteering.length > 0) {
|
|
281
|
-
await this.
|
|
232
|
+
await this.runPromptMessages(queuedSteering, { skipInitialSteeringPoll: true });
|
|
282
233
|
return;
|
|
283
234
|
}
|
|
284
|
-
const
|
|
285
|
-
if (
|
|
286
|
-
await this.
|
|
235
|
+
const queuedFollowUps = this.followUpQueue.drain();
|
|
236
|
+
if (queuedFollowUps.length > 0) {
|
|
237
|
+
await this.runPromptMessages(queuedFollowUps);
|
|
287
238
|
return;
|
|
288
239
|
}
|
|
289
240
|
throw new Error("Cannot continue from message role: assistant");
|
|
290
241
|
}
|
|
291
|
-
await this.
|
|
242
|
+
await this.runContinuation();
|
|
292
243
|
}
|
|
293
|
-
|
|
244
|
+
normalizePromptInput(input, images) {
|
|
245
|
+
if (Array.isArray(input)) {
|
|
246
|
+
return input;
|
|
247
|
+
}
|
|
248
|
+
if (typeof input !== "string") {
|
|
249
|
+
return [input];
|
|
250
|
+
}
|
|
251
|
+
const content = [{ type: "text", text: input }];
|
|
252
|
+
if (images && images.length > 0) {
|
|
253
|
+
content.push(...images);
|
|
254
|
+
}
|
|
255
|
+
return [{ role: "user", content, timestamp: Date.now() }];
|
|
256
|
+
}
|
|
257
|
+
async runPromptMessages(messages, options = {}) {
|
|
258
|
+
await this.runWithLifecycle(async (signal) => {
|
|
259
|
+
await runAgentLoop(messages, this.createContextSnapshot(), this.createLoopConfig(options), (event) => this.processEvents(event), signal, this.streamFn);
|
|
260
|
+
});
|
|
261
|
+
}
|
|
262
|
+
async runContinuation() {
|
|
263
|
+
await this.runWithLifecycle(async (signal) => {
|
|
264
|
+
await runAgentLoopContinue(this.createContextSnapshot(), this.createLoopConfig(), (event) => this.processEvents(event), signal, this.streamFn);
|
|
265
|
+
});
|
|
266
|
+
}
|
|
267
|
+
createContextSnapshot() {
|
|
268
|
+
return {
|
|
269
|
+
systemPrompt: this._state.systemPrompt,
|
|
270
|
+
messages: this._state.messages.slice(),
|
|
271
|
+
tools: this._state.tools.slice(),
|
|
272
|
+
};
|
|
273
|
+
}
|
|
274
|
+
createLoopConfig(options = {}) {
|
|
275
|
+
let skipInitialSteeringPoll = options.skipInitialSteeringPoll === true;
|
|
276
|
+
return {
|
|
277
|
+
model: this._state.model,
|
|
278
|
+
reasoning: this._state.thinkingLevel === "off" ? undefined : this._state.thinkingLevel,
|
|
279
|
+
sessionId: this.sessionId,
|
|
280
|
+
onPayload: this.onPayload,
|
|
281
|
+
transport: this.transport,
|
|
282
|
+
thinkingBudgets: this.thinkingBudgets,
|
|
283
|
+
maxRetryDelayMs: this.maxRetryDelayMs,
|
|
284
|
+
toolExecution: this.toolExecution,
|
|
285
|
+
beforeToolCall: this.beforeToolCall,
|
|
286
|
+
afterToolCall: this.afterToolCall,
|
|
287
|
+
convertToLlm: this.convertToLlm,
|
|
288
|
+
transformContext: this.transformContext,
|
|
289
|
+
getApiKey: this.getApiKey,
|
|
290
|
+
getSteeringMessages: async () => {
|
|
291
|
+
if (skipInitialSteeringPoll) {
|
|
292
|
+
skipInitialSteeringPoll = false;
|
|
293
|
+
return [];
|
|
294
|
+
}
|
|
295
|
+
return this.steeringQueue.drain();
|
|
296
|
+
},
|
|
297
|
+
getFollowUpMessages: async () => this.followUpQueue.drain(),
|
|
298
|
+
};
|
|
299
|
+
}
|
|
300
|
+
async runWithLifecycle(executor) {
|
|
301
|
+
if (this.activeRun) {
|
|
302
|
+
throw new Error("Agent is already processing.");
|
|
303
|
+
}
|
|
304
|
+
const abortController = new AbortController();
|
|
305
|
+
let resolvePromise = () => { };
|
|
306
|
+
const promise = new Promise((resolve) => {
|
|
307
|
+
resolvePromise = resolve;
|
|
308
|
+
});
|
|
309
|
+
this.activeRun = { promise, resolve: resolvePromise, abortController };
|
|
310
|
+
this._state.isStreaming = true;
|
|
311
|
+
this._state.streamingMessage = undefined;
|
|
312
|
+
this._state.errorMessage = undefined;
|
|
313
|
+
try {
|
|
314
|
+
await executor(abortController.signal);
|
|
315
|
+
}
|
|
316
|
+
catch (error) {
|
|
317
|
+
await this.handleRunFailure(error, abortController.signal.aborted);
|
|
318
|
+
}
|
|
319
|
+
finally {
|
|
320
|
+
this.finishRun();
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
async handleRunFailure(error, aborted) {
|
|
324
|
+
const failureMessage = {
|
|
325
|
+
role: "assistant",
|
|
326
|
+
content: [{ type: "text", text: "" }],
|
|
327
|
+
api: this._state.model.api,
|
|
328
|
+
provider: this._state.model.provider,
|
|
329
|
+
model: this._state.model.id,
|
|
330
|
+
usage: EMPTY_USAGE,
|
|
331
|
+
stopReason: aborted ? "aborted" : "error",
|
|
332
|
+
errorMessage: error instanceof Error ? error.message : String(error),
|
|
333
|
+
timestamp: Date.now(),
|
|
334
|
+
};
|
|
335
|
+
this._state.messages.push(failureMessage);
|
|
336
|
+
this._state.errorMessage = failureMessage.errorMessage;
|
|
337
|
+
await this.processEvents({ type: "agent_end", messages: [failureMessage] });
|
|
338
|
+
}
|
|
339
|
+
finishRun() {
|
|
340
|
+
this._state.isStreaming = false;
|
|
341
|
+
this._state.streamingMessage = undefined;
|
|
342
|
+
this._state.pendingToolCalls = new Set();
|
|
343
|
+
this.activeRun?.resolve();
|
|
344
|
+
this.activeRun = undefined;
|
|
345
|
+
}
|
|
346
|
+
/**
|
|
347
|
+
* Reduce internal state for a loop event, then await listeners.
|
|
348
|
+
*
|
|
349
|
+
* `agent_end` only means no further loop events will be emitted. The run is
|
|
350
|
+
* considered idle later, after all awaited listeners for `agent_end` finish
|
|
351
|
+
* and `finishRun()` clears runtime-owned state.
|
|
352
|
+
*/
|
|
353
|
+
async processEvents(event) {
|
|
294
354
|
switch (event.type) {
|
|
295
355
|
case "message_start":
|
|
296
|
-
this._state.
|
|
356
|
+
this._state.streamingMessage = event.message;
|
|
297
357
|
break;
|
|
298
358
|
case "message_update":
|
|
299
|
-
this._state.
|
|
359
|
+
this._state.streamingMessage = event.message;
|
|
300
360
|
break;
|
|
301
361
|
case "message_end":
|
|
302
|
-
this._state.
|
|
303
|
-
this.
|
|
362
|
+
this._state.streamingMessage = undefined;
|
|
363
|
+
this._state.messages.push(event.message);
|
|
304
364
|
break;
|
|
305
365
|
case "tool_execution_start": {
|
|
306
366
|
const pendingToolCalls = new Set(this._state.pendingToolCalls);
|
|
@@ -316,106 +376,19 @@ export class Agent {
|
|
|
316
376
|
}
|
|
317
377
|
case "turn_end":
|
|
318
378
|
if (event.message.role === "assistant" && event.message.errorMessage) {
|
|
319
|
-
this._state.
|
|
379
|
+
this._state.errorMessage = event.message.errorMessage;
|
|
320
380
|
}
|
|
321
381
|
break;
|
|
322
382
|
case "agent_end":
|
|
323
|
-
this._state.
|
|
324
|
-
this._state.streamMessage = null;
|
|
383
|
+
this._state.streamingMessage = undefined;
|
|
325
384
|
break;
|
|
326
385
|
}
|
|
327
|
-
this.
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
* Run the agent loop.
|
|
331
|
-
* If messages are provided, starts a new conversation turn with those messages.
|
|
332
|
-
* Otherwise, continues from existing context.
|
|
333
|
-
*/
|
|
334
|
-
async _runLoop(messages, options) {
|
|
335
|
-
const model = this._state.model;
|
|
336
|
-
if (!model)
|
|
337
|
-
throw new Error("No model configured");
|
|
338
|
-
this.runningPrompt = new Promise((resolve) => {
|
|
339
|
-
this.resolveRunningPrompt = resolve;
|
|
340
|
-
});
|
|
341
|
-
this.abortController = new AbortController();
|
|
342
|
-
this._state.isStreaming = true;
|
|
343
|
-
this._state.streamMessage = null;
|
|
344
|
-
this._state.error = undefined;
|
|
345
|
-
const reasoning = this._state.thinkingLevel === "off" ? undefined : this._state.thinkingLevel;
|
|
346
|
-
const context = {
|
|
347
|
-
systemPrompt: this._state.systemPrompt,
|
|
348
|
-
messages: this._state.messages.slice(),
|
|
349
|
-
tools: this._state.tools,
|
|
350
|
-
};
|
|
351
|
-
let skipInitialSteeringPoll = options?.skipInitialSteeringPoll === true;
|
|
352
|
-
const config = {
|
|
353
|
-
model,
|
|
354
|
-
reasoning,
|
|
355
|
-
sessionId: this._sessionId,
|
|
356
|
-
onPayload: this._onPayload,
|
|
357
|
-
transport: this._transport,
|
|
358
|
-
thinkingBudgets: this._thinkingBudgets,
|
|
359
|
-
maxRetryDelayMs: this._maxRetryDelayMs,
|
|
360
|
-
toolExecution: this._toolExecution,
|
|
361
|
-
beforeToolCall: this._beforeToolCall,
|
|
362
|
-
afterToolCall: this._afterToolCall,
|
|
363
|
-
convertToLlm: this.convertToLlm,
|
|
364
|
-
transformContext: this.transformContext,
|
|
365
|
-
getApiKey: this.getApiKey,
|
|
366
|
-
getSteeringMessages: async () => {
|
|
367
|
-
if (skipInitialSteeringPoll) {
|
|
368
|
-
skipInitialSteeringPoll = false;
|
|
369
|
-
return [];
|
|
370
|
-
}
|
|
371
|
-
return this.dequeueSteeringMessages();
|
|
372
|
-
},
|
|
373
|
-
getFollowUpMessages: async () => this.dequeueFollowUpMessages(),
|
|
374
|
-
};
|
|
375
|
-
try {
|
|
376
|
-
if (messages) {
|
|
377
|
-
await runAgentLoop(messages, context, config, async (event) => this._processLoopEvent(event), this.abortController.signal, this.streamFn);
|
|
378
|
-
}
|
|
379
|
-
else {
|
|
380
|
-
await runAgentLoopContinue(context, config, async (event) => this._processLoopEvent(event), this.abortController.signal, this.streamFn);
|
|
381
|
-
}
|
|
382
|
-
}
|
|
383
|
-
catch (err) {
|
|
384
|
-
const errorMsg = {
|
|
385
|
-
role: "assistant",
|
|
386
|
-
content: [{ type: "text", text: "" }],
|
|
387
|
-
api: model.api,
|
|
388
|
-
provider: model.provider,
|
|
389
|
-
model: model.id,
|
|
390
|
-
usage: {
|
|
391
|
-
input: 0,
|
|
392
|
-
output: 0,
|
|
393
|
-
cacheRead: 0,
|
|
394
|
-
cacheWrite: 0,
|
|
395
|
-
totalTokens: 0,
|
|
396
|
-
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
|
|
397
|
-
},
|
|
398
|
-
stopReason: this.abortController?.signal.aborted ? "aborted" : "error",
|
|
399
|
-
errorMessage: err?.message || String(err),
|
|
400
|
-
timestamp: Date.now(),
|
|
401
|
-
};
|
|
402
|
-
this.appendMessage(errorMsg);
|
|
403
|
-
this._state.error = err?.message || String(err);
|
|
404
|
-
this.emit({ type: "agent_end", messages: [errorMsg] });
|
|
405
|
-
}
|
|
406
|
-
finally {
|
|
407
|
-
this._state.isStreaming = false;
|
|
408
|
-
this._state.streamMessage = null;
|
|
409
|
-
this._state.pendingToolCalls = new Set();
|
|
410
|
-
this.abortController = undefined;
|
|
411
|
-
this.resolveRunningPrompt?.();
|
|
412
|
-
this.runningPrompt = undefined;
|
|
413
|
-
this.resolveRunningPrompt = undefined;
|
|
386
|
+
const signal = this.activeRun?.abortController.signal;
|
|
387
|
+
if (!signal) {
|
|
388
|
+
throw new Error("Agent listener invoked outside active run");
|
|
414
389
|
}
|
|
415
|
-
}
|
|
416
|
-
emit(e) {
|
|
417
390
|
for (const listener of this.listeners) {
|
|
418
|
-
listener(
|
|
391
|
+
await listener(event, signal);
|
|
419
392
|
}
|
|
420
393
|
}
|
|
421
394
|
}
|