@iinm/plain-agent 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.config/agents.library/code-simplifier.md +5 -0
- package/.config/agents.library/qa-engineer.md +74 -0
- package/.config/agents.library/software-architect.md +278 -0
- package/.config/agents.predefined/worker.md +3 -0
- package/.config/config.predefined.json +825 -0
- package/.config/prompts.library/code-review.md +8 -0
- package/.config/prompts.library/feature-dev.md +6 -0
- package/.config/prompts.predefined/shortcuts/commit-by-user.md +9 -0
- package/.config/prompts.predefined/shortcuts/commit.md +10 -0
- package/.config/prompts.predefined/shortcuts/general-question.md +6 -0
- package/LICENSE +21 -0
- package/README.md +624 -0
- package/bin/plain +3 -0
- package/bin/plain-interrupt +6 -0
- package/bin/plain-notify-desktop +19 -0
- package/bin/plain-notify-terminal-bell +3 -0
- package/package.json +57 -0
- package/sandbox/bin/plain-sandbox +972 -0
- package/src/agent.d.ts +48 -0
- package/src/agent.mjs +159 -0
- package/src/agentLoop.mjs +369 -0
- package/src/agentState.mjs +41 -0
- package/src/cliArgs.mjs +45 -0
- package/src/cliFormatter.mjs +217 -0
- package/src/cliInteractive.mjs +739 -0
- package/src/config.d.ts +48 -0
- package/src/config.mjs +168 -0
- package/src/context/consumeInterruptMessage.mjs +30 -0
- package/src/context/loadAgentRoles.mjs +272 -0
- package/src/context/loadPrompts.mjs +312 -0
- package/src/context/loadUserMessageContext.mjs +147 -0
- package/src/env.mjs +46 -0
- package/src/main.mjs +202 -0
- package/src/mcp.mjs +202 -0
- package/src/model.d.ts +109 -0
- package/src/modelCaller.mjs +29 -0
- package/src/modelDefinition.d.ts +73 -0
- package/src/prompt.mjs +128 -0
- package/src/providers/anthropic.d.ts +248 -0
- package/src/providers/anthropic.mjs +596 -0
- package/src/providers/gemini.d.ts +208 -0
- package/src/providers/gemini.mjs +752 -0
- package/src/providers/openai.d.ts +281 -0
- package/src/providers/openai.mjs +551 -0
- package/src/providers/openaiCompatible.d.ts +147 -0
- package/src/providers/openaiCompatible.mjs +658 -0
- package/src/providers/platform/azure.mjs +42 -0
- package/src/providers/platform/bedrock.mjs +74 -0
- package/src/providers/platform/googleCloud.mjs +34 -0
- package/src/subagent.mjs +247 -0
- package/src/tmpfile.mjs +27 -0
- package/src/tool.d.ts +74 -0
- package/src/toolExecutor.mjs +236 -0
- package/src/toolInputValidator.mjs +183 -0
- package/src/toolUseApprover.mjs +98 -0
- package/src/tools/askGoogle.mjs +135 -0
- package/src/tools/delegateToSubagent.d.ts +4 -0
- package/src/tools/delegateToSubagent.mjs +48 -0
- package/src/tools/execCommand.d.ts +22 -0
- package/src/tools/execCommand.mjs +200 -0
- package/src/tools/fetchWebPage.mjs +96 -0
- package/src/tools/patchFile.d.ts +4 -0
- package/src/tools/patchFile.mjs +96 -0
- package/src/tools/reportAsSubagent.d.ts +3 -0
- package/src/tools/reportAsSubagent.mjs +44 -0
- package/src/tools/tavilySearch.d.ts +6 -0
- package/src/tools/tavilySearch.mjs +57 -0
- package/src/tools/tmuxCommand.d.ts +14 -0
- package/src/tools/tmuxCommand.mjs +194 -0
- package/src/tools/writeFile.d.ts +4 -0
- package/src/tools/writeFile.mjs +56 -0
- package/src/utils/evalJSONConfig.mjs +48 -0
- package/src/utils/matchValue.d.ts +6 -0
- package/src/utils/matchValue.mjs +40 -0
- package/src/utils/noThrow.mjs +31 -0
- package/src/utils/notify.mjs +28 -0
- package/src/utils/parseFileRange.mjs +18 -0
- package/src/utils/readFileRange.mjs +33 -0
- package/src/utils/retryOnError.mjs +41 -0
|
@@ -0,0 +1,658 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @import { ModelInput, Message, MessageContentText, AssistantMessage, ModelOutput, PartialMessageContent, MessageContentThinking, MessageContentToolUse } from "../model"
|
|
3
|
+
* @import { OpenAIAssistantMessage, OpenAIMessage, OpenAIMessageToolCall, OpenAICompatibleModelConfig, OpenAIToolDefinition, OpenAIStreamData, OpenAIChatCompletion, OpenAIMessageContentImage, OpenAIChatCompletionRequest } from "./openaiCompatible"
|
|
4
|
+
* @import { ToolDefinition } from "../tool"
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import { styleText } from "node:util";
|
|
8
|
+
import { Sha256 } from "@aws-crypto/sha256-js";
|
|
9
|
+
import { fromIni } from "@aws-sdk/credential-providers";
|
|
10
|
+
import { HttpRequest } from "@smithy/protocol-http";
|
|
11
|
+
import { SignatureV4 } from "@smithy/signature-v4";
|
|
12
|
+
import { noThrow } from "../utils/noThrow.mjs";
|
|
13
|
+
import { retryOnError } from "../utils/retryOnError.mjs";
|
|
14
|
+
import { readBedrockStreamEvents } from "./platform/bedrock.mjs";
|
|
15
|
+
import { getGoogleCloudAccessToken } from "./platform/googleCloud.mjs";
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* @param {import("../modelDefinition").PlatformConfig} platformConfig
|
|
19
|
+
* @param {OpenAICompatibleModelConfig} modelConfig
|
|
20
|
+
* @param {ModelInput} input
|
|
21
|
+
* @param {number} retryCount
|
|
22
|
+
* @returns {Promise<ModelOutput | Error>}
|
|
23
|
+
*/
|
|
24
|
+
export async function callOpenAICompatibleModel(
|
|
25
|
+
platformConfig,
|
|
26
|
+
modelConfig,
|
|
27
|
+
input,
|
|
28
|
+
retryCount = 0,
|
|
29
|
+
) {
|
|
30
|
+
const retryInterval = Math.min(2 * 2 ** retryCount, 16);
|
|
31
|
+
|
|
32
|
+
return await noThrow(async () => {
|
|
33
|
+
const messages = convertGenericMessageToOpenAIFormat(input.messages);
|
|
34
|
+
const tools = convertGenericeToolDefinitionToOpenAIFormat(
|
|
35
|
+
input.tools || [],
|
|
36
|
+
);
|
|
37
|
+
|
|
38
|
+
const url = (() => {
|
|
39
|
+
const baseURL = platformConfig.baseURL;
|
|
40
|
+
|
|
41
|
+
switch (platformConfig.name) {
|
|
42
|
+
case "openai":
|
|
43
|
+
return `${baseURL}/v1/chat/completions`;
|
|
44
|
+
case "bedrock":
|
|
45
|
+
return `${baseURL}/model/${modelConfig.model}/invoke-with-response-stream`;
|
|
46
|
+
case "vertex-ai":
|
|
47
|
+
return `${baseURL}/endpoints/openapi/chat/completions`;
|
|
48
|
+
default:
|
|
49
|
+
throw new Error(`Unsupported platform: ${platformConfig.name}`);
|
|
50
|
+
}
|
|
51
|
+
})();
|
|
52
|
+
|
|
53
|
+
/** @type {Record<string,string>} */
|
|
54
|
+
const headers = await (async () => {
|
|
55
|
+
switch (platformConfig.name) {
|
|
56
|
+
case "openai":
|
|
57
|
+
return {
|
|
58
|
+
...platformConfig.customHeaders,
|
|
59
|
+
Authorization: `Bearer ${platformConfig.apiKey}`,
|
|
60
|
+
};
|
|
61
|
+
case "bedrock":
|
|
62
|
+
return platformConfig.customHeaders ?? {};
|
|
63
|
+
case "vertex-ai":
|
|
64
|
+
return {
|
|
65
|
+
...platformConfig.customHeaders,
|
|
66
|
+
Authorization: `Bearer ${await getGoogleCloudAccessToken()}`,
|
|
67
|
+
};
|
|
68
|
+
}
|
|
69
|
+
})();
|
|
70
|
+
|
|
71
|
+
const { model: _, ...modelConfigWithoutName } = modelConfig;
|
|
72
|
+
const platformRequest = (() => {
|
|
73
|
+
switch (platformConfig.name) {
|
|
74
|
+
case "openai":
|
|
75
|
+
return {
|
|
76
|
+
...modelConfig,
|
|
77
|
+
stream: true,
|
|
78
|
+
};
|
|
79
|
+
case "bedrock":
|
|
80
|
+
return {
|
|
81
|
+
...modelConfigWithoutName,
|
|
82
|
+
};
|
|
83
|
+
case "vertex-ai":
|
|
84
|
+
return {
|
|
85
|
+
...modelConfig,
|
|
86
|
+
model: modelConfig.model,
|
|
87
|
+
stream: true,
|
|
88
|
+
};
|
|
89
|
+
}
|
|
90
|
+
})();
|
|
91
|
+
|
|
92
|
+
/** @type {OpenAIChatCompletionRequest} */
|
|
93
|
+
const request = {
|
|
94
|
+
...platformRequest,
|
|
95
|
+
messages,
|
|
96
|
+
tools: tools.length ? tools : undefined,
|
|
97
|
+
stream_options: {
|
|
98
|
+
include_usage: true,
|
|
99
|
+
},
|
|
100
|
+
};
|
|
101
|
+
|
|
102
|
+
const runFetchDefault = async () =>
|
|
103
|
+
fetch(url, {
|
|
104
|
+
method: "POST",
|
|
105
|
+
headers: {
|
|
106
|
+
...headers,
|
|
107
|
+
"Content-Type": "application/json",
|
|
108
|
+
},
|
|
109
|
+
body: JSON.stringify(request),
|
|
110
|
+
signal: AbortSignal.timeout(120 * 1000),
|
|
111
|
+
});
|
|
112
|
+
|
|
113
|
+
// bedrock + sso profile
|
|
114
|
+
const runFetchForBedrock = async () => {
|
|
115
|
+
const region =
|
|
116
|
+
url.match(/bedrock-runtime\.([\w-]+)\.amazonaws\.com/)?.[1] ?? "";
|
|
117
|
+
const urlParsed = new URL(url);
|
|
118
|
+
const { hostname, pathname } = urlParsed;
|
|
119
|
+
|
|
120
|
+
const signer = new SignatureV4({
|
|
121
|
+
credentials: fromIni({
|
|
122
|
+
profile:
|
|
123
|
+
platformConfig.name === "bedrock" ? platformConfig.awsProfile : "",
|
|
124
|
+
}),
|
|
125
|
+
region,
|
|
126
|
+
service: "bedrock",
|
|
127
|
+
sha256: Sha256,
|
|
128
|
+
});
|
|
129
|
+
|
|
130
|
+
const req = new HttpRequest({
|
|
131
|
+
protocol: "https:",
|
|
132
|
+
method: "POST",
|
|
133
|
+
hostname,
|
|
134
|
+
path: pathname,
|
|
135
|
+
headers: {
|
|
136
|
+
host: hostname,
|
|
137
|
+
"Content-Type": "application/json",
|
|
138
|
+
},
|
|
139
|
+
body: JSON.stringify(request),
|
|
140
|
+
});
|
|
141
|
+
|
|
142
|
+
const signed = await signer.sign(req);
|
|
143
|
+
|
|
144
|
+
return fetch(url, {
|
|
145
|
+
method: signed.method,
|
|
146
|
+
headers: signed.headers,
|
|
147
|
+
body: signed.body,
|
|
148
|
+
signal: AbortSignal.timeout(120 * 1000),
|
|
149
|
+
});
|
|
150
|
+
};
|
|
151
|
+
|
|
152
|
+
const runFetch =
|
|
153
|
+
platformConfig.name === "bedrock" ? runFetchForBedrock : runFetchDefault;
|
|
154
|
+
|
|
155
|
+
const response = await retryOnError(() => runFetch(), {
|
|
156
|
+
shouldRetry: (err) => err instanceof Error && err.name === "TimeoutError",
|
|
157
|
+
beforeRetry: (err, interval) => {
|
|
158
|
+
console.error(
|
|
159
|
+
styleText(
|
|
160
|
+
"yellow",
|
|
161
|
+
`Failed to call model: ${String(err)}; Retry in ${interval} seconds...`,
|
|
162
|
+
),
|
|
163
|
+
);
|
|
164
|
+
return Promise.resolve();
|
|
165
|
+
},
|
|
166
|
+
initialInterval: 2,
|
|
167
|
+
maxInterval: 16,
|
|
168
|
+
multiplier: 2,
|
|
169
|
+
maxAttempt: 5,
|
|
170
|
+
});
|
|
171
|
+
|
|
172
|
+
if (response.status === 429 || response.status >= 500) {
|
|
173
|
+
console.error(
|
|
174
|
+
styleText(
|
|
175
|
+
"yellow",
|
|
176
|
+
`Model rate limit exceeded. Retry in ${retryInterval} seconds...`,
|
|
177
|
+
),
|
|
178
|
+
);
|
|
179
|
+
await new Promise((resolve) => setTimeout(resolve, retryInterval * 1000));
|
|
180
|
+
return callOpenAICompatibleModel(
|
|
181
|
+
platformConfig,
|
|
182
|
+
modelConfig,
|
|
183
|
+
input,
|
|
184
|
+
retryCount + 1,
|
|
185
|
+
);
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
if (response.status !== 200) {
|
|
189
|
+
throw new Error(
|
|
190
|
+
`Failed to call OpenAI compatible model: status=${response.status}, body=${await response.text()}`,
|
|
191
|
+
);
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
if (!response.body) {
|
|
195
|
+
throw new Error("Response body is empty");
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
const reader = response.body.getReader();
|
|
199
|
+
const eventStreamReader =
|
|
200
|
+
platformConfig.name === "bedrock"
|
|
201
|
+
? /** @type {typeof readOpenAIStreamData} */ (readBedrockStreamEvents)
|
|
202
|
+
: readOpenAIStreamData;
|
|
203
|
+
|
|
204
|
+
/** @type {OpenAIStreamData[]} */
|
|
205
|
+
const dataList = [];
|
|
206
|
+
/** @type {PartialMessageContent | undefined} */
|
|
207
|
+
let previousPartialContent;
|
|
208
|
+
for await (const data of eventStreamReader(reader)) {
|
|
209
|
+
dataList.push(data);
|
|
210
|
+
|
|
211
|
+
const partialContents = convertOpenAIStreamDataToAgentPartialContent(
|
|
212
|
+
data,
|
|
213
|
+
previousPartialContent,
|
|
214
|
+
);
|
|
215
|
+
|
|
216
|
+
if (partialContents.length) {
|
|
217
|
+
previousPartialContent = partialContents.at(-1);
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
if (input.onPartialMessageContent) {
|
|
221
|
+
for (const partialContent of partialContents) {
|
|
222
|
+
input.onPartialMessageContent(partialContent);
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
const chatCompletion = convertOpenAIStreamDataToChatCompletion(dataList);
|
|
228
|
+
if (chatCompletion instanceof Error) {
|
|
229
|
+
console.error(
|
|
230
|
+
styleText(
|
|
231
|
+
"yellow",
|
|
232
|
+
`Failed to process stream: ${chatCompletion.message}; Retry in ${retryInterval} seconds...`,
|
|
233
|
+
),
|
|
234
|
+
);
|
|
235
|
+
await new Promise((resolve) => setTimeout(resolve, retryInterval * 1000));
|
|
236
|
+
return callOpenAICompatibleModel(
|
|
237
|
+
platformConfig,
|
|
238
|
+
modelConfig,
|
|
239
|
+
input,
|
|
240
|
+
retryCount + 1,
|
|
241
|
+
);
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
const openAIAssistantMessage = chatCompletion.choices[0].message;
|
|
245
|
+
|
|
246
|
+
return {
|
|
247
|
+
message: convertOpenAIAssistantMessageToGenericFormat(
|
|
248
|
+
openAIAssistantMessage,
|
|
249
|
+
),
|
|
250
|
+
providerTokenUsage: chatCompletion.usage,
|
|
251
|
+
};
|
|
252
|
+
});
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
/**
|
|
256
|
+
* @param {Message[]} genericMessages
|
|
257
|
+
* @returns {OpenAIMessage[]}
|
|
258
|
+
*/
|
|
259
|
+
function convertGenericMessageToOpenAIFormat(genericMessages) {
|
|
260
|
+
/** @type {OpenAIMessage[]} */
|
|
261
|
+
const openAIMessages = [];
|
|
262
|
+
for (const genericMessage of genericMessages) {
|
|
263
|
+
switch (genericMessage.role) {
|
|
264
|
+
case "system": {
|
|
265
|
+
openAIMessages.push({
|
|
266
|
+
role: "system",
|
|
267
|
+
content: genericMessage.content.map((part) => ({
|
|
268
|
+
type: "text",
|
|
269
|
+
text: part.text,
|
|
270
|
+
})),
|
|
271
|
+
});
|
|
272
|
+
break;
|
|
273
|
+
}
|
|
274
|
+
case "user": {
|
|
275
|
+
const toolResults = genericMessage.content.filter(
|
|
276
|
+
(part) => part.type === "tool_result",
|
|
277
|
+
);
|
|
278
|
+
const userContentParts = genericMessage.content.filter(
|
|
279
|
+
(part) => part.type === "text" || part.type === "image",
|
|
280
|
+
);
|
|
281
|
+
|
|
282
|
+
// Tool Results
|
|
283
|
+
let imageIndex = 0;
|
|
284
|
+
for (const result of toolResults) {
|
|
285
|
+
const toolResultContentString = result.content
|
|
286
|
+
.map((part) => {
|
|
287
|
+
switch (part.type) {
|
|
288
|
+
case "text":
|
|
289
|
+
return part.text;
|
|
290
|
+
case "image":
|
|
291
|
+
imageIndex += 1;
|
|
292
|
+
return `(Image [${imageIndex}] omitted. See next message from user.)`;
|
|
293
|
+
default:
|
|
294
|
+
throw new Error(
|
|
295
|
+
`Unsupported content part: ${JSON.stringify(part)}`,
|
|
296
|
+
);
|
|
297
|
+
}
|
|
298
|
+
})
|
|
299
|
+
.join("\n\n");
|
|
300
|
+
openAIMessages.push({
|
|
301
|
+
role: "tool",
|
|
302
|
+
tool_call_id: result.toolUseId,
|
|
303
|
+
content: toolResultContentString,
|
|
304
|
+
});
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
/** @type {OpenAIMessageContentImage[]} */
|
|
308
|
+
const imageParts = [];
|
|
309
|
+
for (const result of toolResults) {
|
|
310
|
+
for (const part of result.content) {
|
|
311
|
+
if (part.type === "image") {
|
|
312
|
+
imageParts.push({
|
|
313
|
+
type: "image_url",
|
|
314
|
+
image_url: {
|
|
315
|
+
url: `data:${part.mimeType};base64,${part.data}`,
|
|
316
|
+
},
|
|
317
|
+
});
|
|
318
|
+
}
|
|
319
|
+
}
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
if (imageParts.length) {
|
|
323
|
+
openAIMessages.push({
|
|
324
|
+
role: "user",
|
|
325
|
+
content: imageParts,
|
|
326
|
+
});
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
// User Input Parts
|
|
330
|
+
if (userContentParts.length) {
|
|
331
|
+
openAIMessages.push({
|
|
332
|
+
role: "user",
|
|
333
|
+
content: userContentParts.map((part) => {
|
|
334
|
+
if (part.type === "text") {
|
|
335
|
+
return { type: "text", text: part.text };
|
|
336
|
+
}
|
|
337
|
+
if (part.type === "image") {
|
|
338
|
+
return {
|
|
339
|
+
type: "image_url",
|
|
340
|
+
image_url: {
|
|
341
|
+
url: `data:${part.mimeType};base64,${part.data}`,
|
|
342
|
+
},
|
|
343
|
+
};
|
|
344
|
+
}
|
|
345
|
+
throw new Error(
|
|
346
|
+
`Unsupported content part: ${JSON.stringify(part)}`,
|
|
347
|
+
);
|
|
348
|
+
}),
|
|
349
|
+
});
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
break;
|
|
353
|
+
}
|
|
354
|
+
case "assistant": {
|
|
355
|
+
/** @type {MessageContentThinking[]} */
|
|
356
|
+
const thinkingParts = genericMessage.content.filter(
|
|
357
|
+
(part) => part.type === "thinking",
|
|
358
|
+
);
|
|
359
|
+
if (thinkingParts.length > 1) {
|
|
360
|
+
console.error(
|
|
361
|
+
`OpenAI Unsupported message format: ${JSON.stringify(genericMessage)}`,
|
|
362
|
+
);
|
|
363
|
+
}
|
|
364
|
+
const thinking = thinkingParts.map((part) => part.thinking).join("\n");
|
|
365
|
+
|
|
366
|
+
/** @type {MessageContentText[]} */
|
|
367
|
+
const textParts = genericMessage.content.filter(
|
|
368
|
+
(part) => part.type === "text",
|
|
369
|
+
);
|
|
370
|
+
if (textParts.length > 1) {
|
|
371
|
+
console.error(
|
|
372
|
+
`OpenAI Unsupported message format: ${JSON.stringify(genericMessage)}`,
|
|
373
|
+
);
|
|
374
|
+
}
|
|
375
|
+
const text = textParts.map((part) => part.text).join("\n");
|
|
376
|
+
|
|
377
|
+
/** @type {MessageContentToolUse[]} */
|
|
378
|
+
const toolUseParts = genericMessage.content.filter(
|
|
379
|
+
(part) => part.type === "tool_use",
|
|
380
|
+
);
|
|
381
|
+
|
|
382
|
+
/** @type {OpenAIMessageToolCall[]} */
|
|
383
|
+
const toolCalls = toolUseParts.map((part) => ({
|
|
384
|
+
id: part.toolUseId,
|
|
385
|
+
type: "function",
|
|
386
|
+
function: {
|
|
387
|
+
name: part.toolName,
|
|
388
|
+
arguments: JSON.stringify(part.input),
|
|
389
|
+
},
|
|
390
|
+
}));
|
|
391
|
+
|
|
392
|
+
openAIMessages.push({
|
|
393
|
+
role: "assistant",
|
|
394
|
+
reasoning_content: thinking ? thinking : undefined,
|
|
395
|
+
content: text ? text : undefined,
|
|
396
|
+
tool_calls: toolCalls.length ? toolCalls : undefined,
|
|
397
|
+
});
|
|
398
|
+
}
|
|
399
|
+
}
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
return openAIMessages;
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
/**
|
|
406
|
+
* @param {ToolDefinition[]} genericToolDefs
|
|
407
|
+
* @returns {OpenAIToolDefinition[]}
|
|
408
|
+
*/
|
|
409
|
+
function convertGenericeToolDefinitionToOpenAIFormat(genericToolDefs) {
|
|
410
|
+
/** @type {OpenAIToolDefinition[]} */
|
|
411
|
+
const openAIToolDefs = [];
|
|
412
|
+
for (const toolDef of genericToolDefs) {
|
|
413
|
+
openAIToolDefs.push({
|
|
414
|
+
type: "function",
|
|
415
|
+
function: {
|
|
416
|
+
name: toolDef.name,
|
|
417
|
+
description: toolDef.description,
|
|
418
|
+
parameters: toolDef.inputSchema,
|
|
419
|
+
},
|
|
420
|
+
});
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
return openAIToolDefs;
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
/**
|
|
427
|
+
* @param {OpenAIAssistantMessage} openAIAsistantMessage
|
|
428
|
+
* @returns {AssistantMessage}
|
|
429
|
+
*/
|
|
430
|
+
function convertOpenAIAssistantMessageToGenericFormat(openAIAsistantMessage) {
|
|
431
|
+
/** @type {AssistantMessage["content"]} */
|
|
432
|
+
const content = [];
|
|
433
|
+
if (openAIAsistantMessage.reasoning_content) {
|
|
434
|
+
content.push({
|
|
435
|
+
type: "thinking",
|
|
436
|
+
thinking: openAIAsistantMessage.reasoning_content,
|
|
437
|
+
});
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
if (openAIAsistantMessage.content) {
|
|
441
|
+
content.push({ type: "text", text: openAIAsistantMessage.content });
|
|
442
|
+
}
|
|
443
|
+
|
|
444
|
+
if (openAIAsistantMessage.tool_calls) {
|
|
445
|
+
for (const toolCall of openAIAsistantMessage.tool_calls) {
|
|
446
|
+
if (toolCall.type === "function") {
|
|
447
|
+
/** @type {Record<string, unknown>} */
|
|
448
|
+
let args;
|
|
449
|
+
try {
|
|
450
|
+
args = JSON.parse(toolCall.function.arguments);
|
|
451
|
+
} catch (err) {
|
|
452
|
+
args = { err: String(err) };
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
content.push({
|
|
456
|
+
type: "tool_use",
|
|
457
|
+
toolUseId: toolCall.id,
|
|
458
|
+
toolName: toolCall.function.name,
|
|
459
|
+
input: args,
|
|
460
|
+
});
|
|
461
|
+
} else {
|
|
462
|
+
throw new Error(
|
|
463
|
+
`Unsupported tool call type: ${JSON.stringify(toolCall)}`,
|
|
464
|
+
);
|
|
465
|
+
}
|
|
466
|
+
}
|
|
467
|
+
}
|
|
468
|
+
|
|
469
|
+
return {
|
|
470
|
+
role: "assistant",
|
|
471
|
+
content,
|
|
472
|
+
};
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
/**
|
|
476
|
+
* @param {OpenAIStreamData[]} dataList
|
|
477
|
+
* @returns {OpenAIChatCompletion | Error}
|
|
478
|
+
*/
|
|
479
|
+
function convertOpenAIStreamDataToChatCompletion(dataList) {
|
|
480
|
+
const firstData = dataList.at(0);
|
|
481
|
+
if (!firstData) {
|
|
482
|
+
return new Error("No data found in the stream");
|
|
483
|
+
}
|
|
484
|
+
|
|
485
|
+
const fistChoice = firstData.choices.at(0);
|
|
486
|
+
if (!fistChoice) {
|
|
487
|
+
return new Error("No choice found in the first data");
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
const message = /** @type {OpenAIAssistantMessage} */ (fistChoice.delta);
|
|
491
|
+
|
|
492
|
+
/** @type {Partial<OpenAIChatCompletion>} */
|
|
493
|
+
const chatCompletion = {
|
|
494
|
+
...firstData,
|
|
495
|
+
choices: [
|
|
496
|
+
{
|
|
497
|
+
index: fistChoice.index,
|
|
498
|
+
message,
|
|
499
|
+
finish_reason: /** @type {string} */ (fistChoice.finish_reason),
|
|
500
|
+
},
|
|
501
|
+
],
|
|
502
|
+
};
|
|
503
|
+
|
|
504
|
+
for (let i = 1; i < dataList.length; i++) {
|
|
505
|
+
const data = dataList[i];
|
|
506
|
+
const firstChoice = data?.choices.at(0);
|
|
507
|
+
if (firstChoice) {
|
|
508
|
+
const delta = firstChoice.delta;
|
|
509
|
+
|
|
510
|
+
if (delta.reasoning_content) {
|
|
511
|
+
message.reasoning_content =
|
|
512
|
+
(message.reasoning_content ?? "") + delta.reasoning_content;
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
if (delta.content) {
|
|
516
|
+
message.content = (message.content ?? "") + delta.content;
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
if (delta.tool_calls) {
|
|
520
|
+
for (const toolCallDelta of delta.tool_calls) {
|
|
521
|
+
const toolCall = message.tool_calls?.at(toolCallDelta.index);
|
|
522
|
+
if (!toolCall) {
|
|
523
|
+
if (!message.tool_calls) {
|
|
524
|
+
message.tool_calls = [];
|
|
525
|
+
}
|
|
526
|
+
/** @type {OpenAIMessageToolCall[]} */ (message.tool_calls).push(
|
|
527
|
+
/** @type {OpenAIMessageToolCall} */
|
|
528
|
+
(toolCallDelta),
|
|
529
|
+
);
|
|
530
|
+
}
|
|
531
|
+
if (toolCall && toolCallDelta.function) {
|
|
532
|
+
toolCall.function.arguments =
|
|
533
|
+
(toolCall.function.arguments ?? "") +
|
|
534
|
+
toolCallDelta.function.arguments;
|
|
535
|
+
}
|
|
536
|
+
}
|
|
537
|
+
}
|
|
538
|
+
|
|
539
|
+
if (firstChoice.finish_reason && chatCompletion.choices) {
|
|
540
|
+
chatCompletion.choices[0].finish_reason = firstChoice.finish_reason;
|
|
541
|
+
}
|
|
542
|
+
}
|
|
543
|
+
|
|
544
|
+
if (data.usage) {
|
|
545
|
+
chatCompletion.usage = data.usage;
|
|
546
|
+
}
|
|
547
|
+
}
|
|
548
|
+
|
|
549
|
+
return /** @type {OpenAIChatCompletion} */ (chatCompletion);
|
|
550
|
+
}
|
|
551
|
+
|
|
552
|
+
/**
|
|
553
|
+
* @param {OpenAIStreamData} data
|
|
554
|
+
* @param {PartialMessageContent | undefined} previousPartialContent
|
|
555
|
+
* @returns {PartialMessageContent[]}
|
|
556
|
+
*/
|
|
557
|
+
function convertOpenAIStreamDataToAgentPartialContent(
|
|
558
|
+
data,
|
|
559
|
+
previousPartialContent,
|
|
560
|
+
) {
|
|
561
|
+
/** @type {PartialMessageContent[]} */
|
|
562
|
+
const partialContents = [];
|
|
563
|
+
const firstChoice = data.choices.at(0);
|
|
564
|
+
|
|
565
|
+
if (firstChoice?.delta.reasoning_content) {
|
|
566
|
+
partialContents.push({
|
|
567
|
+
type: "thinking",
|
|
568
|
+
content: firstChoice?.delta.reasoning_content,
|
|
569
|
+
position: previousPartialContent?.type === "thinking" ? "delta" : "start",
|
|
570
|
+
});
|
|
571
|
+
}
|
|
572
|
+
|
|
573
|
+
if (firstChoice?.delta.content) {
|
|
574
|
+
partialContents.push({
|
|
575
|
+
type: "text",
|
|
576
|
+
content: firstChoice.delta.content,
|
|
577
|
+
position: previousPartialContent?.type === "text" ? "delta" : "start",
|
|
578
|
+
});
|
|
579
|
+
}
|
|
580
|
+
|
|
581
|
+
if (firstChoice?.delta.tool_calls) {
|
|
582
|
+
partialContents.push({
|
|
583
|
+
type: "tool_use",
|
|
584
|
+
content: [
|
|
585
|
+
firstChoice.delta.tool_calls.at(0)?.function?.name,
|
|
586
|
+
firstChoice.delta.tool_calls.at(0)?.function?.arguments,
|
|
587
|
+
].join(" "),
|
|
588
|
+
position: previousPartialContent?.type === "tool_use" ? "delta" : "start",
|
|
589
|
+
});
|
|
590
|
+
}
|
|
591
|
+
|
|
592
|
+
if (firstChoice?.finish_reason) {
|
|
593
|
+
partialContents.push({
|
|
594
|
+
type: previousPartialContent?.type || "unknown",
|
|
595
|
+
position: "stop",
|
|
596
|
+
});
|
|
597
|
+
}
|
|
598
|
+
|
|
599
|
+
if (
|
|
600
|
+
partialContents.length &&
|
|
601
|
+
previousPartialContent &&
|
|
602
|
+
partialContents[0].position !== "stop" &&
|
|
603
|
+
partialContents[0].type !== previousPartialContent.type
|
|
604
|
+
) {
|
|
605
|
+
partialContents.unshift({
|
|
606
|
+
type: previousPartialContent.type,
|
|
607
|
+
position: "stop",
|
|
608
|
+
});
|
|
609
|
+
}
|
|
610
|
+
|
|
611
|
+
return partialContents;
|
|
612
|
+
}
|
|
613
|
+
|
|
614
|
+
/**
|
|
615
|
+
* @param {ReadableStreamDefaultReader<Uint8Array>} reader
|
|
616
|
+
*/
|
|
617
|
+
async function* readOpenAIStreamData(reader) {
|
|
618
|
+
let buffer = new Uint8Array();
|
|
619
|
+
|
|
620
|
+
while (true) {
|
|
621
|
+
const { done, value } = await reader.read();
|
|
622
|
+
if (done) {
|
|
623
|
+
break;
|
|
624
|
+
}
|
|
625
|
+
|
|
626
|
+
const nextBuffer = new Uint8Array(buffer.length + value.length);
|
|
627
|
+
nextBuffer.set(buffer);
|
|
628
|
+
nextBuffer.set(value, buffer.length);
|
|
629
|
+
buffer = nextBuffer;
|
|
630
|
+
|
|
631
|
+
const lineFeed = "\n".charCodeAt(0);
|
|
632
|
+
const dataEndIndices = [];
|
|
633
|
+
for (let i = 0; i < buffer.length - 1; i++) {
|
|
634
|
+
if (buffer[i] === lineFeed && buffer[i + 1] === lineFeed) {
|
|
635
|
+
dataEndIndices.push(i);
|
|
636
|
+
}
|
|
637
|
+
}
|
|
638
|
+
|
|
639
|
+
for (let i = 0; i < dataEndIndices.length; i++) {
|
|
640
|
+
const dataStartIndex = i === 0 ? 0 : dataEndIndices[i - 1] + 2;
|
|
641
|
+
const dataEndIndex = dataEndIndices[i];
|
|
642
|
+
const data = buffer.slice(dataStartIndex, dataEndIndex);
|
|
643
|
+
const decodedData = new TextDecoder().decode(data);
|
|
644
|
+
if (decodedData === "data: [DONE]") {
|
|
645
|
+
break;
|
|
646
|
+
}
|
|
647
|
+
if (decodedData.startsWith("data: ")) {
|
|
648
|
+
/** @type {OpenAIStreamData} */
|
|
649
|
+
const parsedData = JSON.parse(decodedData.slice("data: ".length));
|
|
650
|
+
yield parsedData;
|
|
651
|
+
}
|
|
652
|
+
}
|
|
653
|
+
|
|
654
|
+
if (dataEndIndices.length) {
|
|
655
|
+
buffer = buffer.slice(dataEndIndices[dataEndIndices.length - 1] + 2);
|
|
656
|
+
}
|
|
657
|
+
}
|
|
658
|
+
}
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import { execFile } from "node:child_process";
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* @param {{azureConfigDir: string}=} config
|
|
5
|
+
* @returns {Promise<string>}
|
|
6
|
+
*/
|
|
7
|
+
export async function getAzureAccessToken(config) {
|
|
8
|
+
/** @type {string} */
|
|
9
|
+
const stdout = await new Promise((resolve, reject) => {
|
|
10
|
+
execFile(
|
|
11
|
+
"az",
|
|
12
|
+
[
|
|
13
|
+
"account",
|
|
14
|
+
"get-access-token",
|
|
15
|
+
"--resource",
|
|
16
|
+
"https://cognitiveservices.azure.com",
|
|
17
|
+
"--query",
|
|
18
|
+
"accessToken",
|
|
19
|
+
"--output",
|
|
20
|
+
"tsv",
|
|
21
|
+
],
|
|
22
|
+
{
|
|
23
|
+
shell: false,
|
|
24
|
+
timeout: 10 * 1000,
|
|
25
|
+
env: config
|
|
26
|
+
? {
|
|
27
|
+
AZURE_CONFIG_DIR: config.azureConfigDir,
|
|
28
|
+
}
|
|
29
|
+
: undefined,
|
|
30
|
+
},
|
|
31
|
+
(error, stdout, _stderr) => {
|
|
32
|
+
if (error) {
|
|
33
|
+
reject(error);
|
|
34
|
+
return;
|
|
35
|
+
}
|
|
36
|
+
resolve(stdout.trim());
|
|
37
|
+
},
|
|
38
|
+
);
|
|
39
|
+
});
|
|
40
|
+
|
|
41
|
+
return stdout;
|
|
42
|
+
}
|