@lightining/general.ai 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +178 -0
- package/README.md +809 -0
- package/dist/defaults.d.ts +7 -0
- package/dist/defaults.js +58 -0
- package/dist/defaults.js.map +1 -0
- package/dist/endpoint-adapters.d.ts +12 -0
- package/dist/endpoint-adapters.js +225 -0
- package/dist/endpoint-adapters.js.map +1 -0
- package/dist/general-ai.d.ts +13 -0
- package/dist/general-ai.js +54 -0
- package/dist/general-ai.js.map +1 -0
- package/dist/index.d.ts +8 -0
- package/dist/index.js +8 -0
- package/dist/index.js.map +1 -0
- package/dist/memory.d.ts +6 -0
- package/dist/memory.js +10 -0
- package/dist/memory.js.map +1 -0
- package/dist/prompts/endpoint-chat-completions.txt +7 -0
- package/dist/prompts/endpoint-responses.txt +7 -0
- package/dist/prompts/identity.txt +17 -0
- package/dist/prompts/memory.txt +9 -0
- package/dist/prompts/personality.txt +9 -0
- package/dist/prompts/protocol.txt +54 -0
- package/dist/prompts/safety.txt +10 -0
- package/dist/prompts/task.txt +7 -0
- package/dist/prompts/thinking.txt +10 -0
- package/dist/prompts/tools-subagents.txt +18 -0
- package/dist/prompts.d.ts +11 -0
- package/dist/prompts.js +126 -0
- package/dist/prompts.js.map +1 -0
- package/dist/protocol.d.ts +15 -0
- package/dist/protocol.js +393 -0
- package/dist/protocol.js.map +1 -0
- package/dist/runtime.d.ts +20 -0
- package/dist/runtime.js +871 -0
- package/dist/runtime.js.map +1 -0
- package/dist/tools.d.ts +21 -0
- package/dist/tools.js +49 -0
- package/dist/tools.js.map +1 -0
- package/dist/types.d.ts +358 -0
- package/dist/types.js +2 -0
- package/dist/types.js.map +1 -0
- package/dist/utils.d.ts +14 -0
- package/dist/utils.js +115 -0
- package/dist/utils.js.map +1 -0
- package/package.json +63 -0
package/dist/runtime.js
ADDED
|
@@ -0,0 +1,871 @@
|
|
|
1
|
+
import { DEFAULT_LIMITS, DEFAULT_SAFETY, DEFAULT_THINKING, } from "./defaults.js";
|
|
2
|
+
import { compileMessagesForChatCompletions, compileMessagesForResponses, extractChatTextDelta, extractTextFromChatCompletion, extractTextFromResponse, getReservedRequestKeys, stripReservedRequestKeys, RESERVED_AGENT_CHAT_KEYS, RESERVED_AGENT_RESPONSE_KEYS, } from "./endpoint-adapters.js";
|
|
3
|
+
import { renderPromptSections } from "./prompts.js";
|
|
4
|
+
import { parseProtocol, ProtocolStreamParser, validateProtocolSequence } from "./protocol.js";
|
|
5
|
+
import { aggregateUsage, buildMemorySnapshot, cloneMessage, jsonStringify, mergeStringLists, summarizeMessages, } from "./utils.js";
|
|
6
|
+
function toRegistry(value) {
|
|
7
|
+
if (!value) {
|
|
8
|
+
return {};
|
|
9
|
+
}
|
|
10
|
+
if (Array.isArray(value)) {
|
|
11
|
+
return Object.fromEntries(value.map((entry) => [entry.name, entry]));
|
|
12
|
+
}
|
|
13
|
+
return { ...value };
|
|
14
|
+
}
|
|
15
|
+
function mergePromptOverrides(base, extra) {
|
|
16
|
+
return {
|
|
17
|
+
sections: {
|
|
18
|
+
...base?.sections,
|
|
19
|
+
...extra?.sections,
|
|
20
|
+
},
|
|
21
|
+
raw: {
|
|
22
|
+
...base?.raw,
|
|
23
|
+
...extra?.raw,
|
|
24
|
+
},
|
|
25
|
+
data: {
|
|
26
|
+
...base?.data,
|
|
27
|
+
...extra?.data,
|
|
28
|
+
},
|
|
29
|
+
blocks: {
|
|
30
|
+
...base?.blocks,
|
|
31
|
+
...extra?.blocks,
|
|
32
|
+
},
|
|
33
|
+
};
|
|
34
|
+
}
|
|
35
|
+
function mergeConfigRecords(...records) {
|
|
36
|
+
return Object.assign({}, ...records);
|
|
37
|
+
}
|
|
38
|
+
function normalizeAgentParams(deps, params) {
|
|
39
|
+
const defaults = deps.defaults?.agent;
|
|
40
|
+
const safetyInput = {
|
|
41
|
+
...DEFAULT_SAFETY.input,
|
|
42
|
+
...defaults?.safety?.input,
|
|
43
|
+
...params.safety?.input,
|
|
44
|
+
};
|
|
45
|
+
const safetyOutput = {
|
|
46
|
+
...DEFAULT_SAFETY.output,
|
|
47
|
+
...defaults?.safety?.output,
|
|
48
|
+
...params.safety?.output,
|
|
49
|
+
};
|
|
50
|
+
const thinking = {
|
|
51
|
+
...DEFAULT_THINKING,
|
|
52
|
+
...defaults?.thinking,
|
|
53
|
+
...params.thinking,
|
|
54
|
+
checkpoints: params.thinking?.checkpoints ?? defaults?.thinking?.checkpoints ?? DEFAULT_THINKING.checkpoints,
|
|
55
|
+
};
|
|
56
|
+
const limits = {
|
|
57
|
+
...DEFAULT_LIMITS,
|
|
58
|
+
...defaults?.limits,
|
|
59
|
+
...params.limits,
|
|
60
|
+
};
|
|
61
|
+
const safety = {
|
|
62
|
+
...DEFAULT_SAFETY,
|
|
63
|
+
...defaults?.safety,
|
|
64
|
+
...params.safety,
|
|
65
|
+
input: safetyInput,
|
|
66
|
+
output: safetyOutput,
|
|
67
|
+
};
|
|
68
|
+
const defaultTools = toRegistry(defaults?.tools?.registry);
|
|
69
|
+
const runtimeTools = toRegistry(params.tools?.registry);
|
|
70
|
+
const defaultSubagents = toRegistry(defaults?.subagents?.registry);
|
|
71
|
+
const runtimeSubagents = toRegistry(params.subagents?.registry);
|
|
72
|
+
const memoryConfig = {
|
|
73
|
+
enabled: params.memory?.enabled ?? defaults?.memory?.enabled ?? true,
|
|
74
|
+
sessionId: params.memory?.sessionId ?? defaults?.memory?.sessionId,
|
|
75
|
+
load: params.memory?.load ?? defaults?.memory?.load ?? true,
|
|
76
|
+
save: params.memory?.save ?? defaults?.memory?.save ?? true,
|
|
77
|
+
adapter: params.memory?.adapter ?? deps.memoryAdapter,
|
|
78
|
+
prompt: params.memory?.prompt ?? defaults?.memory?.prompt,
|
|
79
|
+
};
|
|
80
|
+
return {
|
|
81
|
+
...params,
|
|
82
|
+
debug: params.debug ?? defaults?.debug ?? deps.defaults?.debug ?? deps.debug,
|
|
83
|
+
safety,
|
|
84
|
+
thinking,
|
|
85
|
+
limits,
|
|
86
|
+
prompts: mergePromptOverrides(defaults?.prompts, params.prompts),
|
|
87
|
+
tools: {
|
|
88
|
+
enabled: params.tools?.enabled ?? defaults?.tools?.enabled ?? true,
|
|
89
|
+
registry: {
|
|
90
|
+
...defaultTools,
|
|
91
|
+
...runtimeTools,
|
|
92
|
+
},
|
|
93
|
+
prompt: params.tools?.prompt ?? defaults?.tools?.prompt,
|
|
94
|
+
},
|
|
95
|
+
subagents: {
|
|
96
|
+
enabled: params.subagents?.enabled ?? defaults?.subagents?.enabled ?? true,
|
|
97
|
+
registry: {
|
|
98
|
+
...defaultSubagents,
|
|
99
|
+
...runtimeSubagents,
|
|
100
|
+
},
|
|
101
|
+
prompt: params.subagents?.prompt ?? defaults?.subagents?.prompt,
|
|
102
|
+
},
|
|
103
|
+
memory: memoryConfig,
|
|
104
|
+
personality: {
|
|
105
|
+
enabled: params.personality?.enabled ?? defaults?.personality?.enabled ?? true,
|
|
106
|
+
profile: params.personality?.profile ?? defaults?.personality?.profile,
|
|
107
|
+
persona: mergeConfigRecords(defaults?.personality?.persona, params.personality?.persona),
|
|
108
|
+
style: mergeConfigRecords(defaults?.personality?.style, params.personality?.style),
|
|
109
|
+
behavior: mergeConfigRecords(defaults?.personality?.behavior, params.personality?.behavior),
|
|
110
|
+
boundaries: mergeConfigRecords(defaults?.personality?.boundaries, params.personality?.boundaries),
|
|
111
|
+
instructions: params.personality?.instructions ??
|
|
112
|
+
defaults?.personality?.instructions,
|
|
113
|
+
prompt: params.personality?.prompt ?? defaults?.personality?.prompt,
|
|
114
|
+
},
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
function renderConfigMap(title, value) {
|
|
118
|
+
const entries = Object.entries(value);
|
|
119
|
+
if (entries.length === 0) {
|
|
120
|
+
return "";
|
|
121
|
+
}
|
|
122
|
+
return [`${title}:`, ...entries.map(([key, entry]) => `- ${key}: ${entry}`)].join("\n");
|
|
123
|
+
}
|
|
124
|
+
function renderToolsBlock(tools) {
|
|
125
|
+
if (!tools.enabled || Object.keys(tools.registry).length === 0) {
|
|
126
|
+
return "No General.AI protocol tools are configured for this run.";
|
|
127
|
+
}
|
|
128
|
+
const lines = ["Available protocol tools:"];
|
|
129
|
+
for (const tool of Object.values(tools.registry)) {
|
|
130
|
+
lines.push(`- ${tool.name}: ${tool.description}`);
|
|
131
|
+
if (tool.inputSchema) {
|
|
132
|
+
lines.push(` Input schema: ${jsonStringify(tool.inputSchema)}`);
|
|
133
|
+
}
|
|
134
|
+
if (tool.access) {
|
|
135
|
+
const subagentsAccess = tool.access.subagents === undefined
|
|
136
|
+
? "all configured subagents"
|
|
137
|
+
: Array.isArray(tool.access.subagents)
|
|
138
|
+
? tool.access.subagents.join(", ")
|
|
139
|
+
: tool.access.subagents
|
|
140
|
+
? "all configured subagents"
|
|
141
|
+
: "disabled";
|
|
142
|
+
lines.push(` Access: root=${String(tool.access.root ?? true)}, subagents=${subagentsAccess}`);
|
|
143
|
+
}
|
|
144
|
+
if (tool.metadata && Object.keys(tool.metadata).length > 0) {
|
|
145
|
+
lines.push(` Metadata: ${jsonStringify(tool.metadata)}`);
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
return lines.join("\n");
|
|
149
|
+
}
|
|
150
|
+
function isToolAllowedForSubagent(tool, subagentName) {
|
|
151
|
+
if (tool.access?.root === false && tool.access?.subagents === undefined) {
|
|
152
|
+
return false;
|
|
153
|
+
}
|
|
154
|
+
const access = tool.access?.subagents;
|
|
155
|
+
if (access === undefined) {
|
|
156
|
+
return true;
|
|
157
|
+
}
|
|
158
|
+
if (typeof access === "boolean") {
|
|
159
|
+
return access;
|
|
160
|
+
}
|
|
161
|
+
return access.includes(subagentName);
|
|
162
|
+
}
|
|
163
|
+
function filterToolsForSubagent(tools, subagentName) {
|
|
164
|
+
if (!tools.enabled) {
|
|
165
|
+
return {
|
|
166
|
+
enabled: false,
|
|
167
|
+
registry: {},
|
|
168
|
+
prompt: tools.prompt,
|
|
169
|
+
};
|
|
170
|
+
}
|
|
171
|
+
const registry = Object.fromEntries(Object.entries(tools.registry).filter(([, tool]) => isToolAllowedForSubagent(tool, subagentName)));
|
|
172
|
+
return {
|
|
173
|
+
enabled: Object.keys(registry).length > 0,
|
|
174
|
+
registry,
|
|
175
|
+
prompt: tools.prompt,
|
|
176
|
+
};
|
|
177
|
+
}
|
|
178
|
+
function renderSubagentsBlock(subagents) {
|
|
179
|
+
if (!subagents.enabled || Object.keys(subagents.registry).length === 0) {
|
|
180
|
+
return "No General.AI protocol subagents are configured for this run.";
|
|
181
|
+
}
|
|
182
|
+
const lines = ["Available protocol subagents:"];
|
|
183
|
+
for (const subagent of Object.values(subagents.registry)) {
|
|
184
|
+
lines.push(`- ${subagent.name}: ${subagent.description}`);
|
|
185
|
+
}
|
|
186
|
+
return lines.join("\n");
|
|
187
|
+
}
|
|
188
|
+
function renderPersonalityBlock(params) {
|
|
189
|
+
if (!params.personality?.enabled) {
|
|
190
|
+
return "No custom personality override is active. Stay direct, accurate, and adaptive.";
|
|
191
|
+
}
|
|
192
|
+
const sections = [
|
|
193
|
+
params.personality.profile
|
|
194
|
+
? `Profile: ${params.personality.profile}`
|
|
195
|
+
: "",
|
|
196
|
+
renderConfigMap("Persona", params.personality.persona ?? {}),
|
|
197
|
+
renderConfigMap("Style", params.personality.style ?? {}),
|
|
198
|
+
renderConfigMap("Behavior", params.personality.behavior ?? {}),
|
|
199
|
+
renderConfigMap("Boundaries", params.personality.boundaries ?? {}),
|
|
200
|
+
params.personality.instructions
|
|
201
|
+
? `Instructions:\n${params.personality.instructions}`
|
|
202
|
+
: "",
|
|
203
|
+
params.personality.prompt ?? "",
|
|
204
|
+
].filter(Boolean);
|
|
205
|
+
return sections.join("\n\n");
|
|
206
|
+
}
|
|
207
|
+
function renderSafetyBlock(params) {
|
|
208
|
+
const blocks = [
|
|
209
|
+
`Safety mode: ${params.safety.enabled ? params.safety.mode : "off"}`,
|
|
210
|
+
`Input safety enabled: ${String(params.safety.input.enabled)}`,
|
|
211
|
+
`Output safety enabled: ${String(params.safety.output.enabled)}`,
|
|
212
|
+
params.safety.input.instructions
|
|
213
|
+
? `Input safety instructions:\n${params.safety.input.instructions}`
|
|
214
|
+
: "",
|
|
215
|
+
params.safety.output.instructions
|
|
216
|
+
? `Output safety instructions:\n${params.safety.output.instructions}`
|
|
217
|
+
: "",
|
|
218
|
+
params.safety.prompt ?? "",
|
|
219
|
+
].filter(Boolean);
|
|
220
|
+
return blocks.join("\n\n");
|
|
221
|
+
}
|
|
222
|
+
function renderThinkingBlock(params) {
|
|
223
|
+
const checkpoints = params.thinking.checkpoints.map((value) => `- ${value}`).join("\n");
|
|
224
|
+
return [
|
|
225
|
+
`Thinking enabled: ${String(params.thinking.enabled)}`,
|
|
226
|
+
`Thinking strategy: ${params.thinking.strategy}`,
|
|
227
|
+
`Thinking effort: ${params.thinking.effort}`,
|
|
228
|
+
`Thinking checkpoints:\n${checkpoints}`,
|
|
229
|
+
params.thinking.prompt ?? "",
|
|
230
|
+
]
|
|
231
|
+
.filter(Boolean)
|
|
232
|
+
.join("\n\n");
|
|
233
|
+
}
|
|
234
|
+
function renderMemoryBlock(params, snapshot) {
|
|
235
|
+
if (!params.memory.enabled || !snapshot) {
|
|
236
|
+
return "No memory snapshot is currently loaded.";
|
|
237
|
+
}
|
|
238
|
+
return [
|
|
239
|
+
snapshot.summary ? `Summary:\n${snapshot.summary}` : "",
|
|
240
|
+
snapshot.preferences?.length
|
|
241
|
+
? `Preferences:\n${snapshot.preferences.map((value) => `- ${value}`).join("\n")}`
|
|
242
|
+
: "",
|
|
243
|
+
snapshot.notes?.length
|
|
244
|
+
? `Notes:\n${snapshot.notes.map((value) => `- ${value}`).join("\n")}`
|
|
245
|
+
: "",
|
|
246
|
+
params.memory.prompt ?? "",
|
|
247
|
+
]
|
|
248
|
+
.filter(Boolean)
|
|
249
|
+
.join("\n\n");
|
|
250
|
+
}
|
|
251
|
+
function renderTaskBlock(params) {
|
|
252
|
+
const metadata = params.metadata && Object.keys(params.metadata).length > 0
|
|
253
|
+
? Object.entries(params.metadata)
|
|
254
|
+
.map(([key, value]) => `- ${key}: ${value}`)
|
|
255
|
+
.join("\n")
|
|
256
|
+
: "No additional run metadata was provided.";
|
|
257
|
+
return [
|
|
258
|
+
`Endpoint: ${params.endpoint}`,
|
|
259
|
+
`Model: ${params.model}`,
|
|
260
|
+
`Chat role mode: ${params.compatibility?.chatRoleMode ?? "modern"}`,
|
|
261
|
+
`Conversation preview:\n${summarizeMessages(params.messages)}`,
|
|
262
|
+
`Run metadata:\n${metadata}`,
|
|
263
|
+
].join("\n\n");
|
|
264
|
+
}
|
|
265
|
+
export class AgentRuntime {
|
|
266
|
+
deps;
|
|
267
|
+
depth;
|
|
268
|
+
#params;
|
|
269
|
+
#history;
|
|
270
|
+
#memorySnapshot = null;
|
|
271
|
+
#memoryLoaded = false;
|
|
272
|
+
#prompt;
|
|
273
|
+
#promptPromise;
|
|
274
|
+
#events = [];
|
|
275
|
+
#rawOutputs = [];
|
|
276
|
+
#cleanedChunks = [];
|
|
277
|
+
#warnings = [];
|
|
278
|
+
#endpointResults = [];
|
|
279
|
+
#step = 0;
|
|
280
|
+
#toolCallCount = 0;
|
|
281
|
+
#subagentCallCount = 0;
|
|
282
|
+
#protocolErrorCount = 0;
|
|
283
|
+
#notes = [];
|
|
284
|
+
constructor(deps, params, depth = 0) {
|
|
285
|
+
this.deps = deps;
|
|
286
|
+
this.depth = depth;
|
|
287
|
+
this.#params = normalizeAgentParams(deps, params);
|
|
288
|
+
this.#history = params.messages.map(cloneMessage);
|
|
289
|
+
}
|
|
290
|
+
async renderPrompts() {
|
|
291
|
+
await this.#ensureMemory();
|
|
292
|
+
return await this.#ensurePrompt();
|
|
293
|
+
}
|
|
294
|
+
#canRetryProtocolError() {
|
|
295
|
+
return this.#protocolErrorCount < this.#params.limits.maxProtocolErrors;
|
|
296
|
+
}
|
|
297
|
+
#enqueueRetry(reason, detail, rawOutput) {
|
|
298
|
+
if (!this.#canRetryProtocolError()) {
|
|
299
|
+
return false;
|
|
300
|
+
}
|
|
301
|
+
this.#protocolErrorCount += 1;
|
|
302
|
+
this.#warnings.push(`Retrying after recoverable runtime issue ${this.#protocolErrorCount}/${this.#params.limits.maxProtocolErrors}: ${reason}. ${detail}`);
|
|
303
|
+
if (rawOutput) {
|
|
304
|
+
this.#history.push({
|
|
305
|
+
role: "assistant",
|
|
306
|
+
phase: "commentary",
|
|
307
|
+
content: rawOutput,
|
|
308
|
+
});
|
|
309
|
+
}
|
|
310
|
+
this.#history.push({
|
|
311
|
+
role: "developer",
|
|
312
|
+
content: [
|
|
313
|
+
`Recoverable runtime issue: ${reason}.`,
|
|
314
|
+
detail,
|
|
315
|
+
"Retry the request from the latest valid state.",
|
|
316
|
+
"Follow the General.AI protocol exactly.",
|
|
317
|
+
"Emit every protocol marker on its own line starting at column 1.",
|
|
318
|
+
"Do not place multiple markers on the same line.",
|
|
319
|
+
"If you call a tool or subagent, emit only that marker and then stop the turn.",
|
|
320
|
+
"If a previous tool or subagent already succeeded, do not repeat it unless the new request explicitly requires another call.",
|
|
321
|
+
].join("\n\n"),
|
|
322
|
+
});
|
|
323
|
+
return true;
|
|
324
|
+
}
|
|
325
|
+
async run() {
|
|
326
|
+
await this.#ensureMemory();
|
|
327
|
+
const prompt = await this.#ensurePrompt();
|
|
328
|
+
const strippedRequestKeys = getReservedRequestKeys(this.#params.endpoint, this.#params.request);
|
|
329
|
+
this.#warnings.push(...strippedRequestKeys.map((key) => `Reserved request key '${key}' was ignored in agent mode.`));
|
|
330
|
+
while (this.#step < this.#params.limits.maxSteps) {
|
|
331
|
+
this.#step += 1;
|
|
332
|
+
const stepResult = await this.#runSingleStep();
|
|
333
|
+
const parseFailure = stepResult.parsed.warnings.find((warning) => warning.startsWith("Protocol parse failure on step"));
|
|
334
|
+
if (parseFailure) {
|
|
335
|
+
if (this.#enqueueRetry("protocol_parse_failure", parseFailure, stepResult.rawOutput)) {
|
|
336
|
+
continue;
|
|
337
|
+
}
|
|
338
|
+
throw new Error(parseFailure);
|
|
339
|
+
}
|
|
340
|
+
this.#recordStep(stepResult.parsed, stepResult.rawOutput, stepResult.endpointResult);
|
|
341
|
+
if (stepResult.action?.kind === "error") {
|
|
342
|
+
const detail = jsonStringify(stepResult.action.payload);
|
|
343
|
+
if (this.#enqueueRetry("protocol_error_event", detail, stepResult.rawOutput)) {
|
|
344
|
+
continue;
|
|
345
|
+
}
|
|
346
|
+
throw new Error(`Model emitted an unrecoverable protocol error event: ${detail}`);
|
|
347
|
+
}
|
|
348
|
+
if (stepResult.action?.kind === "call_tool") {
|
|
349
|
+
try {
|
|
350
|
+
await this.#handleToolCall(stepResult.action.name, stepResult.action.arguments);
|
|
351
|
+
}
|
|
352
|
+
catch (error) {
|
|
353
|
+
const detail = error instanceof Error ? error.message : String(error);
|
|
354
|
+
if (this.#enqueueRetry("tool_call_failure", detail, stepResult.rawOutput)) {
|
|
355
|
+
continue;
|
|
356
|
+
}
|
|
357
|
+
throw error;
|
|
358
|
+
}
|
|
359
|
+
continue;
|
|
360
|
+
}
|
|
361
|
+
if (stepResult.action?.kind === "call_subagent") {
|
|
362
|
+
try {
|
|
363
|
+
await this.#handleSubagentCall(stepResult.action.name, stepResult.action.arguments);
|
|
364
|
+
}
|
|
365
|
+
catch (error) {
|
|
366
|
+
const detail = error instanceof Error ? error.message : String(error);
|
|
367
|
+
if (this.#enqueueRetry("subagent_call_failure", detail, stepResult.rawOutput)) {
|
|
368
|
+
continue;
|
|
369
|
+
}
|
|
370
|
+
throw error;
|
|
371
|
+
}
|
|
372
|
+
continue;
|
|
373
|
+
}
|
|
374
|
+
break;
|
|
375
|
+
}
|
|
376
|
+
if (this.#step >= this.#params.limits.maxSteps) {
|
|
377
|
+
this.#warnings.push(`Agent stopped after reaching maxSteps=${this.#params.limits.maxSteps}.`);
|
|
378
|
+
}
|
|
379
|
+
return await this.#finalize(prompt, strippedRequestKeys);
|
|
380
|
+
}
|
|
381
|
+
async *stream() {
|
|
382
|
+
yield {
|
|
383
|
+
type: "run_started",
|
|
384
|
+
endpoint: this.#params.endpoint,
|
|
385
|
+
model: this.#params.model,
|
|
386
|
+
};
|
|
387
|
+
await this.#ensureMemory();
|
|
388
|
+
const prompt = await this.#ensurePrompt();
|
|
389
|
+
yield {
|
|
390
|
+
type: "prompt_rendered",
|
|
391
|
+
prompt,
|
|
392
|
+
};
|
|
393
|
+
const strippedRequestKeys = getReservedRequestKeys(this.#params.endpoint, this.#params.request);
|
|
394
|
+
for (const key of strippedRequestKeys) {
|
|
395
|
+
const message = `Reserved request key '${key}' was ignored in agent mode.`;
|
|
396
|
+
this.#warnings.push(message);
|
|
397
|
+
yield {
|
|
398
|
+
type: "warning",
|
|
399
|
+
message,
|
|
400
|
+
};
|
|
401
|
+
}
|
|
402
|
+
while (this.#step < this.#params.limits.maxSteps) {
|
|
403
|
+
this.#step += 1;
|
|
404
|
+
yield {
|
|
405
|
+
type: "step_started",
|
|
406
|
+
step: this.#step,
|
|
407
|
+
};
|
|
408
|
+
const stepResult = await this.#runSingleStreamingStep();
|
|
409
|
+
for (const text of stepResult.rawDeltas) {
|
|
410
|
+
yield {
|
|
411
|
+
type: "raw_text_delta",
|
|
412
|
+
step: this.#step,
|
|
413
|
+
text,
|
|
414
|
+
};
|
|
415
|
+
}
|
|
416
|
+
for (const delta of stepResult.parsed.deltas) {
|
|
417
|
+
if (delta.block === "writing") {
|
|
418
|
+
yield {
|
|
419
|
+
type: "writing_delta",
|
|
420
|
+
step: this.#step,
|
|
421
|
+
text: delta.text,
|
|
422
|
+
};
|
|
423
|
+
}
|
|
424
|
+
}
|
|
425
|
+
const parseFailure = stepResult.parsed.warnings.find((warning) => warning.startsWith("Protocol parse failure on step"));
|
|
426
|
+
if (parseFailure) {
|
|
427
|
+
if (this.#enqueueRetry("protocol_parse_failure", parseFailure, stepResult.rawOutput)) {
|
|
428
|
+
yield {
|
|
429
|
+
type: "warning",
|
|
430
|
+
message: parseFailure,
|
|
431
|
+
};
|
|
432
|
+
continue;
|
|
433
|
+
}
|
|
434
|
+
throw new Error(parseFailure);
|
|
435
|
+
}
|
|
436
|
+
this.#recordStep(stepResult.parsed, stepResult.rawOutput, stepResult.endpointResult);
|
|
437
|
+
for (const event of stepResult.parsed.events) {
|
|
438
|
+
yield {
|
|
439
|
+
type: "protocol_event",
|
|
440
|
+
step: this.#step,
|
|
441
|
+
event,
|
|
442
|
+
};
|
|
443
|
+
}
|
|
444
|
+
if (stepResult.action?.kind === "error") {
|
|
445
|
+
const detail = jsonStringify(stepResult.action.payload);
|
|
446
|
+
if (this.#enqueueRetry("protocol_error_event", detail, stepResult.rawOutput)) {
|
|
447
|
+
yield {
|
|
448
|
+
type: "warning",
|
|
449
|
+
message: `Retrying after model protocol error: ${detail}`,
|
|
450
|
+
};
|
|
451
|
+
continue;
|
|
452
|
+
}
|
|
453
|
+
throw new Error(`Model emitted an unrecoverable protocol error event: ${detail}`);
|
|
454
|
+
}
|
|
455
|
+
if (stepResult.action?.kind === "call_tool") {
|
|
456
|
+
yield {
|
|
457
|
+
type: "tool_started",
|
|
458
|
+
step: this.#step,
|
|
459
|
+
name: stepResult.action.name,
|
|
460
|
+
arguments: stepResult.action.arguments,
|
|
461
|
+
};
|
|
462
|
+
let result;
|
|
463
|
+
try {
|
|
464
|
+
result = await this.#handleToolCall(stepResult.action.name, stepResult.action.arguments);
|
|
465
|
+
}
|
|
466
|
+
catch (error) {
|
|
467
|
+
const detail = error instanceof Error ? error.message : String(error);
|
|
468
|
+
if (this.#enqueueRetry("tool_call_failure", detail, stepResult.rawOutput)) {
|
|
469
|
+
yield {
|
|
470
|
+
type: "warning",
|
|
471
|
+
message: `Retrying after tool failure: ${detail}`,
|
|
472
|
+
};
|
|
473
|
+
continue;
|
|
474
|
+
}
|
|
475
|
+
throw error;
|
|
476
|
+
}
|
|
477
|
+
yield {
|
|
478
|
+
type: "tool_result",
|
|
479
|
+
step: this.#step,
|
|
480
|
+
name: stepResult.action.name,
|
|
481
|
+
result,
|
|
482
|
+
};
|
|
483
|
+
continue;
|
|
484
|
+
}
|
|
485
|
+
if (stepResult.action?.kind === "call_subagent") {
|
|
486
|
+
yield {
|
|
487
|
+
type: "subagent_started",
|
|
488
|
+
step: this.#step,
|
|
489
|
+
name: stepResult.action.name,
|
|
490
|
+
arguments: stepResult.action.arguments,
|
|
491
|
+
};
|
|
492
|
+
let result;
|
|
493
|
+
try {
|
|
494
|
+
result = await this.#handleSubagentCall(stepResult.action.name, stepResult.action.arguments);
|
|
495
|
+
}
|
|
496
|
+
catch (error) {
|
|
497
|
+
const detail = error instanceof Error ? error.message : String(error);
|
|
498
|
+
if (this.#enqueueRetry("subagent_call_failure", detail, stepResult.rawOutput)) {
|
|
499
|
+
yield {
|
|
500
|
+
type: "warning",
|
|
501
|
+
message: `Retrying after subagent failure: ${detail}`,
|
|
502
|
+
};
|
|
503
|
+
continue;
|
|
504
|
+
}
|
|
505
|
+
throw error;
|
|
506
|
+
}
|
|
507
|
+
yield {
|
|
508
|
+
type: "subagent_result",
|
|
509
|
+
step: this.#step,
|
|
510
|
+
name: stepResult.action.name,
|
|
511
|
+
result,
|
|
512
|
+
};
|
|
513
|
+
continue;
|
|
514
|
+
}
|
|
515
|
+
break;
|
|
516
|
+
}
|
|
517
|
+
if (this.#step >= this.#params.limits.maxSteps) {
|
|
518
|
+
const message = `Agent stopped after reaching maxSteps=${this.#params.limits.maxSteps}.`;
|
|
519
|
+
this.#warnings.push(message);
|
|
520
|
+
yield {
|
|
521
|
+
type: "warning",
|
|
522
|
+
message,
|
|
523
|
+
};
|
|
524
|
+
}
|
|
525
|
+
const result = await this.#finalize(prompt, strippedRequestKeys);
|
|
526
|
+
yield {
|
|
527
|
+
type: "run_completed",
|
|
528
|
+
result,
|
|
529
|
+
};
|
|
530
|
+
return result;
|
|
531
|
+
}
|
|
532
|
+
async #ensureMemory() {
|
|
533
|
+
if (!this.#params.memory.enabled || !this.#params.memory.sessionId || !this.#params.memory.load) {
|
|
534
|
+
return;
|
|
535
|
+
}
|
|
536
|
+
if (this.#memoryLoaded) {
|
|
537
|
+
return;
|
|
538
|
+
}
|
|
539
|
+
this.#memorySnapshot = await this.#params.memory.adapter.load({
|
|
540
|
+
sessionId: this.#params.memory.sessionId,
|
|
541
|
+
});
|
|
542
|
+
this.#memoryLoaded = true;
|
|
543
|
+
}
|
|
544
|
+
async #ensurePrompt() {
|
|
545
|
+
const endpointSections = this.#params.endpoint === "responses"
|
|
546
|
+
? { endpoint_chat_completions: "" }
|
|
547
|
+
: { endpoint_responses: "" };
|
|
548
|
+
this.#promptPromise ??= renderPromptSections({
|
|
549
|
+
promptPack: this.deps.promptPack,
|
|
550
|
+
runtimeOverrides: {
|
|
551
|
+
...this.#params.prompts,
|
|
552
|
+
sections: {
|
|
553
|
+
...endpointSections,
|
|
554
|
+
...this.#params.prompts.sections,
|
|
555
|
+
},
|
|
556
|
+
},
|
|
557
|
+
context: {
|
|
558
|
+
data: {
|
|
559
|
+
endpoint: this.#params.endpoint,
|
|
560
|
+
model: this.#params.model,
|
|
561
|
+
safety_mode: this.#params.safety.enabled ? this.#params.safety.mode : "off",
|
|
562
|
+
thinking_strategy: this.#params.thinking.strategy,
|
|
563
|
+
debug_enabled: this.#params.debug,
|
|
564
|
+
},
|
|
565
|
+
blocks: {
|
|
566
|
+
personality_config: renderPersonalityBlock(this.#params),
|
|
567
|
+
safety_config: renderSafetyBlock(this.#params),
|
|
568
|
+
thinking_config: renderThinkingBlock(this.#params),
|
|
569
|
+
tools_registry: renderToolsBlock(this.#params.tools),
|
|
570
|
+
subagents_registry: renderSubagentsBlock(this.#params.subagents),
|
|
571
|
+
memory_context: renderMemoryBlock(this.#params, this.#memorySnapshot),
|
|
572
|
+
task_context: renderTaskBlock(this.#params),
|
|
573
|
+
},
|
|
574
|
+
},
|
|
575
|
+
});
|
|
576
|
+
this.#prompt = await this.#promptPromise;
|
|
577
|
+
return this.#prompt;
|
|
578
|
+
}
|
|
579
|
+
async #runSingleStep() {
|
|
580
|
+
if (this.#params.endpoint === "responses") {
|
|
581
|
+
const body = this.#buildResponsesRequest(false);
|
|
582
|
+
const result = await this.deps.openai.responses.create(body);
|
|
583
|
+
const rawOutput = extractTextFromResponse(result);
|
|
584
|
+
const parsed = this.#parseRawOutput(rawOutput);
|
|
585
|
+
return {
|
|
586
|
+
rawOutput,
|
|
587
|
+
parsed,
|
|
588
|
+
action: parsed.events.find((event) => event.kind === "call_tool" || event.kind === "call_subagent"
|
|
589
|
+
? true
|
|
590
|
+
: event.kind === "error"),
|
|
591
|
+
endpointResult: result,
|
|
592
|
+
};
|
|
593
|
+
}
|
|
594
|
+
const body = this.#buildChatRequest(false);
|
|
595
|
+
const result = await this.deps.openai.chat.completions.create(body);
|
|
596
|
+
const rawOutput = extractTextFromChatCompletion(result);
|
|
597
|
+
const parsed = this.#parseRawOutput(rawOutput);
|
|
598
|
+
return {
|
|
599
|
+
rawOutput,
|
|
600
|
+
parsed,
|
|
601
|
+
action: parsed.events.find((event) => event.kind === "call_tool" || event.kind === "call_subagent"
|
|
602
|
+
? true
|
|
603
|
+
: event.kind === "error"),
|
|
604
|
+
endpointResult: result,
|
|
605
|
+
};
|
|
606
|
+
}
|
|
607
|
+
async #runSingleStreamingStep() {
|
|
608
|
+
const rawDeltas = [];
|
|
609
|
+
const parser = new ProtocolStreamParser({ step: this.#step });
|
|
610
|
+
if (this.#params.endpoint === "responses") {
|
|
611
|
+
const stream = this.deps.openai.responses.stream(this.#buildResponsesRequest(true));
|
|
612
|
+
for await (const event of stream) {
|
|
613
|
+
if (event.type !== "response.output_text.delta") {
|
|
614
|
+
continue;
|
|
615
|
+
}
|
|
616
|
+
rawDeltas.push(event.delta);
|
|
617
|
+
parser.push(event.delta);
|
|
618
|
+
}
|
|
619
|
+
const endpointResult = await stream.finalResponse();
|
|
620
|
+
const rawOutput = rawDeltas.join("") || endpointResult.output_text;
|
|
621
|
+
const parsed = parser.end();
|
|
622
|
+
return {
|
|
623
|
+
rawOutput,
|
|
624
|
+
rawDeltas,
|
|
625
|
+
parsed,
|
|
626
|
+
action: parsed.events.find((event) => event.kind === "call_tool" || event.kind === "call_subagent"
|
|
627
|
+
? true
|
|
628
|
+
: event.kind === "error"),
|
|
629
|
+
endpointResult,
|
|
630
|
+
};
|
|
631
|
+
}
|
|
632
|
+
const stream = this.deps.openai.chat.completions.stream(this.#buildChatRequest(true));
|
|
633
|
+
for await (const chunk of stream) {
|
|
634
|
+
const delta = extractChatTextDelta(chunk);
|
|
635
|
+
if (!delta) {
|
|
636
|
+
continue;
|
|
637
|
+
}
|
|
638
|
+
rawDeltas.push(delta);
|
|
639
|
+
parser.push(delta);
|
|
640
|
+
}
|
|
641
|
+
const endpointResult = stream.currentChatCompletionSnapshot;
|
|
642
|
+
const rawOutput = rawDeltas.join("");
|
|
643
|
+
const parsed = parser.end();
|
|
644
|
+
return {
|
|
645
|
+
rawOutput,
|
|
646
|
+
rawDeltas,
|
|
647
|
+
parsed,
|
|
648
|
+
action: parsed.events.find((event) => event.kind === "call_tool" || event.kind === "call_subagent"
|
|
649
|
+
? true
|
|
650
|
+
: event.kind === "error"),
|
|
651
|
+
endpointResult,
|
|
652
|
+
};
|
|
653
|
+
}
|
|
654
|
+
#parseRawOutput(rawOutput) {
|
|
655
|
+
try {
|
|
656
|
+
return parseProtocol(rawOutput, { step: this.#step });
|
|
657
|
+
}
|
|
658
|
+
catch (error) {
|
|
659
|
+
const message = `Protocol parse failure on step ${this.#step}: ${error instanceof Error ? error.message : String(error)}`;
|
|
660
|
+
this.#warnings.push(message);
|
|
661
|
+
return {
|
|
662
|
+
events: [
|
|
663
|
+
{
|
|
664
|
+
kind: "writing",
|
|
665
|
+
content: rawOutput,
|
|
666
|
+
step: this.#step,
|
|
667
|
+
},
|
|
668
|
+
],
|
|
669
|
+
deltas: [
|
|
670
|
+
{
|
|
671
|
+
type: "writing_delta",
|
|
672
|
+
block: "writing",
|
|
673
|
+
text: rawOutput,
|
|
674
|
+
},
|
|
675
|
+
],
|
|
676
|
+
warnings: [message],
|
|
677
|
+
};
|
|
678
|
+
}
|
|
679
|
+
}
|
|
680
|
+
#recordStep(parsed, rawOutput, endpointResult) {
|
|
681
|
+
this.#rawOutputs.push(rawOutput);
|
|
682
|
+
this.#endpointResults.push(endpointResult);
|
|
683
|
+
this.#warnings.push(...parsed.warnings);
|
|
684
|
+
this.#warnings.push(...validateProtocolSequence(parsed.events, this.#params.safety.enabled && this.#params.safety.mode !== "off"));
|
|
685
|
+
for (const event of parsed.events) {
|
|
686
|
+
this.#events.push(event);
|
|
687
|
+
if (event.kind === "writing") {
|
|
688
|
+
this.#cleanedChunks.push(event.content);
|
|
689
|
+
}
|
|
690
|
+
}
|
|
691
|
+
}
|
|
692
|
+
async #handleToolCall(name, args) {
|
|
693
|
+
if (!this.#params.tools.enabled) {
|
|
694
|
+
throw new Error(`Tool '${name}' was requested but tools are disabled.`);
|
|
695
|
+
}
|
|
696
|
+
if (this.#toolCallCount >= this.#params.limits.maxToolCalls) {
|
|
697
|
+
throw new Error(`Tool limit exceeded for '${name}'.`);
|
|
698
|
+
}
|
|
699
|
+
const tool = this.#params.tools.registry[name];
|
|
700
|
+
if (!tool) {
|
|
701
|
+
throw new Error(`Unknown tool '${name}'.`);
|
|
702
|
+
}
|
|
703
|
+
this.#toolCallCount += 1;
|
|
704
|
+
const result = await tool.execute(args, {
|
|
705
|
+
openai: this.deps.openai,
|
|
706
|
+
endpoint: this.#params.endpoint,
|
|
707
|
+
model: this.#params.model,
|
|
708
|
+
step: this.#step,
|
|
709
|
+
sessionId: this.#params.memory.sessionId,
|
|
710
|
+
params: this.#params,
|
|
711
|
+
});
|
|
712
|
+
this.#notes.push(`Tool ${name} result: ${jsonStringify(result)}`);
|
|
713
|
+
this.#history.push({
|
|
714
|
+
role: "assistant",
|
|
715
|
+
phase: "commentary",
|
|
716
|
+
content: this.#rawOutputs.at(-1) ?? "",
|
|
717
|
+
});
|
|
718
|
+
this.#history.push({
|
|
719
|
+
role: "developer",
|
|
720
|
+
content: [
|
|
721
|
+
`Tool result for "${name}":`,
|
|
722
|
+
jsonStringify({
|
|
723
|
+
ok: true,
|
|
724
|
+
name,
|
|
725
|
+
arguments: args,
|
|
726
|
+
result,
|
|
727
|
+
}),
|
|
728
|
+
"Continue from the latest state. Do not repeat completed tool calls.",
|
|
729
|
+
].join("\n\n"),
|
|
730
|
+
});
|
|
731
|
+
return result;
|
|
732
|
+
}
|
|
733
|
+
async #handleSubagentCall(name, args) {
|
|
734
|
+
if (!this.#params.subagents.enabled) {
|
|
735
|
+
throw new Error(`Subagent '${name}' was requested but subagents are disabled.`);
|
|
736
|
+
}
|
|
737
|
+
if (this.#subagentCallCount >= this.#params.limits.maxSubagentCalls) {
|
|
738
|
+
throw new Error(`Subagent limit exceeded for '${name}'.`);
|
|
739
|
+
}
|
|
740
|
+
const subagent = this.#params.subagents.registry[name];
|
|
741
|
+
if (!subagent) {
|
|
742
|
+
throw new Error(`Unknown subagent '${name}'.`);
|
|
743
|
+
}
|
|
744
|
+
this.#subagentCallCount += 1;
|
|
745
|
+
const payloadText = typeof args === "string"
|
|
746
|
+
? args
|
|
747
|
+
: jsonStringify(args);
|
|
748
|
+
const result = await this.deps.runSubagent({
|
|
749
|
+
endpoint: subagent.endpoint ?? this.#params.endpoint,
|
|
750
|
+
model: subagent.model ?? this.#params.model,
|
|
751
|
+
messages: [
|
|
752
|
+
{
|
|
753
|
+
role: "developer",
|
|
754
|
+
content: [
|
|
755
|
+
`You are the specialized General.AI protocol subagent "${name}".`,
|
|
756
|
+
subagent.instructions,
|
|
757
|
+
"Emit every protocol marker on its own line, starting at column 1.",
|
|
758
|
+
"Do not place multiple protocol markers on the same line.",
|
|
759
|
+
subagent.subagents?.enabled
|
|
760
|
+
? "Only use configured nested subagents when they are materially necessary."
|
|
761
|
+
: "No nested protocol subagents are available in this run. Solve the task directly.",
|
|
762
|
+
].join("\n\n"),
|
|
763
|
+
},
|
|
764
|
+
{
|
|
765
|
+
role: "user",
|
|
766
|
+
content: payloadText,
|
|
767
|
+
},
|
|
768
|
+
],
|
|
769
|
+
personality: subagent.personality ?? this.#params.personality,
|
|
770
|
+
safety: subagent.safety ?? this.#params.safety,
|
|
771
|
+
thinking: subagent.thinking ?? this.#params.thinking,
|
|
772
|
+
tools: filterToolsForSubagent(subagent.tools
|
|
773
|
+
? normalizeAgentParams(this.deps, {
|
|
774
|
+
...this.#params,
|
|
775
|
+
tools: subagent.tools,
|
|
776
|
+
}).tools
|
|
777
|
+
: this.#params.tools, name),
|
|
778
|
+
subagents: subagent.subagents ?? { enabled: false, registry: {} },
|
|
779
|
+
prompts: mergePromptOverrides(this.#params.prompts, subagent.prompts),
|
|
780
|
+
limits: subagent.limits ?? this.#params.limits,
|
|
781
|
+
request: subagent.request ?? this.#params.request,
|
|
782
|
+
compatibility: this.#params.compatibility,
|
|
783
|
+
metadata: {
|
|
784
|
+
parent_subagent: name,
|
|
785
|
+
depth: String(this.depth + 1),
|
|
786
|
+
},
|
|
787
|
+
}, this.depth + 1);
|
|
788
|
+
this.#notes.push(`Subagent ${name} result: ${result.cleaned}`);
|
|
789
|
+
this.#history.push({
|
|
790
|
+
role: "assistant",
|
|
791
|
+
phase: "commentary",
|
|
792
|
+
content: this.#rawOutputs.at(-1) ?? "",
|
|
793
|
+
});
|
|
794
|
+
this.#history.push({
|
|
795
|
+
role: "developer",
|
|
796
|
+
content: [
|
|
797
|
+
`Subagent result for "${name}":`,
|
|
798
|
+
jsonStringify({
|
|
799
|
+
ok: true,
|
|
800
|
+
name,
|
|
801
|
+
arguments: args,
|
|
802
|
+
cleaned: result.cleaned,
|
|
803
|
+
}),
|
|
804
|
+
"Continue from the latest state. Do not repeat completed subagent calls.",
|
|
805
|
+
].join("\n\n"),
|
|
806
|
+
});
|
|
807
|
+
return result;
|
|
808
|
+
}
|
|
809
|
+
async #finalize(prompt, strippedRequestKeys) {
|
|
810
|
+
const cleaned = this.#cleanedChunks.join("");
|
|
811
|
+
const output = this.#rawOutputs.join("\n\n");
|
|
812
|
+
if (this.#params.memory.enabled && this.#params.memory.save && this.#params.memory.sessionId) {
|
|
813
|
+
const snapshot = buildMemorySnapshot(this.#history, cleaned, this.#notes, this.#memorySnapshot);
|
|
814
|
+
await this.#params.memory.adapter.save({
|
|
815
|
+
sessionId: this.#params.memory.sessionId,
|
|
816
|
+
snapshot,
|
|
817
|
+
});
|
|
818
|
+
this.#memorySnapshot = snapshot;
|
|
819
|
+
}
|
|
820
|
+
return {
|
|
821
|
+
output,
|
|
822
|
+
cleaned,
|
|
823
|
+
events: [...this.#events],
|
|
824
|
+
meta: {
|
|
825
|
+
warnings: mergeStringLists(this.#warnings),
|
|
826
|
+
prompt,
|
|
827
|
+
strippedRequestKeys,
|
|
828
|
+
stepCount: this.#step,
|
|
829
|
+
toolCallCount: this.#toolCallCount,
|
|
830
|
+
subagentCallCount: this.#subagentCallCount,
|
|
831
|
+
protocolErrorCount: this.#protocolErrorCount,
|
|
832
|
+
memorySessionId: this.#params.memory.sessionId,
|
|
833
|
+
endpointResults: [...this.#endpointResults],
|
|
834
|
+
},
|
|
835
|
+
usage: aggregateUsage(this.#endpointResults),
|
|
836
|
+
endpointResult: this.#endpointResults.at(-1),
|
|
837
|
+
};
|
|
838
|
+
}
|
|
839
|
+
#buildResponsesRequest(stream) {
|
|
840
|
+
const request = stripReservedRequestKeys(this.#params.request?.responses, RESERVED_AGENT_RESPONSE_KEYS);
|
|
841
|
+
const instructions = this.#prompt?.fullPrompt ?? "";
|
|
842
|
+
return {
|
|
843
|
+
...(request ?? {}),
|
|
844
|
+
model: this.#params.model,
|
|
845
|
+
instructions,
|
|
846
|
+
input: compileMessagesForResponses(this.#history),
|
|
847
|
+
...(this.#params.metadata ? { metadata: this.#params.metadata } : {}),
|
|
848
|
+
...(stream ? { stream: true } : {}),
|
|
849
|
+
};
|
|
850
|
+
}
|
|
851
|
+
#buildChatRequest(stream) {
|
|
852
|
+
const request = stripReservedRequestKeys(this.#params.request?.chat_completions, RESERVED_AGENT_CHAT_KEYS);
|
|
853
|
+
const promptMessage = {
|
|
854
|
+
role: this.#params.compatibility?.chatRoleMode === "classic"
|
|
855
|
+
? "system"
|
|
856
|
+
: "developer",
|
|
857
|
+
content: this.#prompt?.fullPrompt ?? "",
|
|
858
|
+
};
|
|
859
|
+
return {
|
|
860
|
+
...(request ?? {}),
|
|
861
|
+
model: this.#params.model,
|
|
862
|
+
messages: [
|
|
863
|
+
promptMessage,
|
|
864
|
+
...compileMessagesForChatCompletions(this.#history, this.#params.compatibility),
|
|
865
|
+
],
|
|
866
|
+
...(this.#params.metadata ? { metadata: this.#params.metadata } : {}),
|
|
867
|
+
...(stream ? { stream: true } : {}),
|
|
868
|
+
};
|
|
869
|
+
}
|
|
870
|
+
}
|
|
871
|
+
//# sourceMappingURL=runtime.js.map
|