@agenr/agenr-plugin 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +12 -0
- package/dist/anthropic-RE4XNAKE.js +5515 -0
- package/dist/azure-openai-responses-IQLXOCZS.js +190 -0
- package/dist/chunk-6DQXEU2A.js +32306 -0
- package/dist/chunk-EAQYK3U2.js +41 -0
- package/dist/chunk-HNWLZUWE.js +31 -0
- package/dist/chunk-JRUUYSFL.js +262 -0
- package/dist/chunk-OLOUBEE5.js +14022 -0
- package/dist/chunk-P5HNPYGQ.js +174 -0
- package/dist/chunk-RD7BUOBD.js +416 -0
- package/dist/chunk-RWWH2U4W.js +7056 -0
- package/dist/chunk-SEOMNQGB.js +86 -0
- package/dist/chunk-SQLXP7LT.js +4792 -0
- package/dist/chunk-URGOKODJ.js +17 -0
- package/dist/dist-R6ESEJ6P.js +1244 -0
- package/dist/google-NAVXTQLO.js +371 -0
- package/dist/google-gemini-cli-NKYJWHX2.js +712 -0
- package/dist/google-vertex-ZBJ2EDRH.js +414 -0
- package/dist/index.js +15942 -0
- package/dist/mistral-SBQYC4J5.js +38407 -0
- package/dist/multipart-parser-DV373IRF.js +371 -0
- package/dist/openai-codex-responses-XN3T3DEN.js +712 -0
- package/dist/openai-completions-75ZFOFU6.js +657 -0
- package/dist/openai-responses-DCK4BVNT.js +198 -0
- package/dist/src-T5RRS2HN.js +1408 -0
- package/openclaw.plugin.json +86 -0
- package/package.json +31 -0
|
@@ -0,0 +1,657 @@
|
|
|
1
|
+
import {
|
|
2
|
+
buildCopilotDynamicHeaders,
|
|
3
|
+
hasCopilotVisionInput
|
|
4
|
+
} from "./chunk-HNWLZUWE.js";
|
|
5
|
+
import {
|
|
6
|
+
OpenAI
|
|
7
|
+
} from "./chunk-RWWH2U4W.js";
|
|
8
|
+
import {
|
|
9
|
+
parseStreamingJson
|
|
10
|
+
} from "./chunk-JRUUYSFL.js";
|
|
11
|
+
import {
|
|
12
|
+
getEnvApiKey
|
|
13
|
+
} from "./chunk-SEOMNQGB.js";
|
|
14
|
+
import {
|
|
15
|
+
buildBaseOptions,
|
|
16
|
+
clampReasoning,
|
|
17
|
+
sanitizeSurrogates,
|
|
18
|
+
transformMessages
|
|
19
|
+
} from "./chunk-P5HNPYGQ.js";
|
|
20
|
+
import {
|
|
21
|
+
AssistantMessageEventStream,
|
|
22
|
+
calculateCost,
|
|
23
|
+
supportsXhigh
|
|
24
|
+
} from "./chunk-OLOUBEE5.js";
|
|
25
|
+
import "./chunk-EAQYK3U2.js";
|
|
26
|
+
|
|
27
|
+
// ../../node_modules/.pnpm/@mariozechner+pi-ai@0.63.2_@modelcontextprotocol+sdk@1.27.1_zod@4.3.6__ws@8.20.0_zod@4.3.6/node_modules/@mariozechner/pi-ai/dist/providers/openai-completions.js
|
|
28
|
+
function hasToolHistory(messages) {
|
|
29
|
+
for (const msg of messages) {
|
|
30
|
+
if (msg.role === "toolResult") {
|
|
31
|
+
return true;
|
|
32
|
+
}
|
|
33
|
+
if (msg.role === "assistant") {
|
|
34
|
+
if (msg.content.some((block) => block.type === "toolCall")) {
|
|
35
|
+
return true;
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
return false;
|
|
40
|
+
}
|
|
41
|
+
var streamOpenAICompletions = (model, context, options) => {
|
|
42
|
+
const stream = new AssistantMessageEventStream();
|
|
43
|
+
(async () => {
|
|
44
|
+
const output = {
|
|
45
|
+
role: "assistant",
|
|
46
|
+
content: [],
|
|
47
|
+
api: model.api,
|
|
48
|
+
provider: model.provider,
|
|
49
|
+
model: model.id,
|
|
50
|
+
usage: {
|
|
51
|
+
input: 0,
|
|
52
|
+
output: 0,
|
|
53
|
+
cacheRead: 0,
|
|
54
|
+
cacheWrite: 0,
|
|
55
|
+
totalTokens: 0,
|
|
56
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }
|
|
57
|
+
},
|
|
58
|
+
stopReason: "stop",
|
|
59
|
+
timestamp: Date.now()
|
|
60
|
+
};
|
|
61
|
+
try {
|
|
62
|
+
const apiKey = options?.apiKey || getEnvApiKey(model.provider) || "";
|
|
63
|
+
const client = createClient(model, context, apiKey, options?.headers);
|
|
64
|
+
let params = buildParams(model, context, options);
|
|
65
|
+
const nextParams = await options?.onPayload?.(params, model);
|
|
66
|
+
if (nextParams !== void 0) {
|
|
67
|
+
params = nextParams;
|
|
68
|
+
}
|
|
69
|
+
const openaiStream = await client.chat.completions.create(params, { signal: options?.signal });
|
|
70
|
+
stream.push({ type: "start", partial: output });
|
|
71
|
+
let currentBlock = null;
|
|
72
|
+
const blocks = output.content;
|
|
73
|
+
const blockIndex = () => blocks.length - 1;
|
|
74
|
+
const finishCurrentBlock = (block) => {
|
|
75
|
+
if (block) {
|
|
76
|
+
if (block.type === "text") {
|
|
77
|
+
stream.push({
|
|
78
|
+
type: "text_end",
|
|
79
|
+
contentIndex: blockIndex(),
|
|
80
|
+
content: block.text,
|
|
81
|
+
partial: output
|
|
82
|
+
});
|
|
83
|
+
} else if (block.type === "thinking") {
|
|
84
|
+
stream.push({
|
|
85
|
+
type: "thinking_end",
|
|
86
|
+
contentIndex: blockIndex(),
|
|
87
|
+
content: block.thinking,
|
|
88
|
+
partial: output
|
|
89
|
+
});
|
|
90
|
+
} else if (block.type === "toolCall") {
|
|
91
|
+
block.arguments = parseStreamingJson(block.partialArgs);
|
|
92
|
+
delete block.partialArgs;
|
|
93
|
+
stream.push({
|
|
94
|
+
type: "toolcall_end",
|
|
95
|
+
contentIndex: blockIndex(),
|
|
96
|
+
toolCall: block,
|
|
97
|
+
partial: output
|
|
98
|
+
});
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
};
|
|
102
|
+
for await (const chunk of openaiStream) {
|
|
103
|
+
if (!chunk || typeof chunk !== "object")
|
|
104
|
+
continue;
|
|
105
|
+
output.responseId ||= chunk.id;
|
|
106
|
+
if (chunk.usage) {
|
|
107
|
+
output.usage = parseChunkUsage(chunk.usage, model);
|
|
108
|
+
}
|
|
109
|
+
const choice = Array.isArray(chunk.choices) ? chunk.choices[0] : void 0;
|
|
110
|
+
if (!choice)
|
|
111
|
+
continue;
|
|
112
|
+
if (!chunk.usage && choice.usage) {
|
|
113
|
+
output.usage = parseChunkUsage(choice.usage, model);
|
|
114
|
+
}
|
|
115
|
+
if (choice.finish_reason) {
|
|
116
|
+
const finishReasonResult = mapStopReason(choice.finish_reason);
|
|
117
|
+
output.stopReason = finishReasonResult.stopReason;
|
|
118
|
+
if (finishReasonResult.errorMessage) {
|
|
119
|
+
output.errorMessage = finishReasonResult.errorMessage;
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
if (choice.delta) {
|
|
123
|
+
if (choice.delta.content !== null && choice.delta.content !== void 0 && choice.delta.content.length > 0) {
|
|
124
|
+
if (!currentBlock || currentBlock.type !== "text") {
|
|
125
|
+
finishCurrentBlock(currentBlock);
|
|
126
|
+
currentBlock = { type: "text", text: "" };
|
|
127
|
+
output.content.push(currentBlock);
|
|
128
|
+
stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output });
|
|
129
|
+
}
|
|
130
|
+
if (currentBlock.type === "text") {
|
|
131
|
+
currentBlock.text += choice.delta.content;
|
|
132
|
+
stream.push({
|
|
133
|
+
type: "text_delta",
|
|
134
|
+
contentIndex: blockIndex(),
|
|
135
|
+
delta: choice.delta.content,
|
|
136
|
+
partial: output
|
|
137
|
+
});
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
const reasoningFields = ["reasoning_content", "reasoning", "reasoning_text"];
|
|
141
|
+
let foundReasoningField = null;
|
|
142
|
+
for (const field of reasoningFields) {
|
|
143
|
+
if (choice.delta[field] !== null && choice.delta[field] !== void 0 && choice.delta[field].length > 0) {
|
|
144
|
+
if (!foundReasoningField) {
|
|
145
|
+
foundReasoningField = field;
|
|
146
|
+
break;
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
if (foundReasoningField) {
|
|
151
|
+
if (!currentBlock || currentBlock.type !== "thinking") {
|
|
152
|
+
finishCurrentBlock(currentBlock);
|
|
153
|
+
currentBlock = {
|
|
154
|
+
type: "thinking",
|
|
155
|
+
thinking: "",
|
|
156
|
+
thinkingSignature: foundReasoningField
|
|
157
|
+
};
|
|
158
|
+
output.content.push(currentBlock);
|
|
159
|
+
stream.push({ type: "thinking_start", contentIndex: blockIndex(), partial: output });
|
|
160
|
+
}
|
|
161
|
+
if (currentBlock.type === "thinking") {
|
|
162
|
+
const delta = choice.delta[foundReasoningField];
|
|
163
|
+
currentBlock.thinking += delta;
|
|
164
|
+
stream.push({
|
|
165
|
+
type: "thinking_delta",
|
|
166
|
+
contentIndex: blockIndex(),
|
|
167
|
+
delta,
|
|
168
|
+
partial: output
|
|
169
|
+
});
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
if (choice?.delta?.tool_calls) {
|
|
173
|
+
for (const toolCall of choice.delta.tool_calls) {
|
|
174
|
+
if (!currentBlock || currentBlock.type !== "toolCall" || toolCall.id && currentBlock.id !== toolCall.id) {
|
|
175
|
+
finishCurrentBlock(currentBlock);
|
|
176
|
+
currentBlock = {
|
|
177
|
+
type: "toolCall",
|
|
178
|
+
id: toolCall.id || "",
|
|
179
|
+
name: toolCall.function?.name || "",
|
|
180
|
+
arguments: {},
|
|
181
|
+
partialArgs: ""
|
|
182
|
+
};
|
|
183
|
+
output.content.push(currentBlock);
|
|
184
|
+
stream.push({ type: "toolcall_start", contentIndex: blockIndex(), partial: output });
|
|
185
|
+
}
|
|
186
|
+
if (currentBlock.type === "toolCall") {
|
|
187
|
+
if (toolCall.id)
|
|
188
|
+
currentBlock.id = toolCall.id;
|
|
189
|
+
if (toolCall.function?.name)
|
|
190
|
+
currentBlock.name = toolCall.function.name;
|
|
191
|
+
let delta = "";
|
|
192
|
+
if (toolCall.function?.arguments) {
|
|
193
|
+
delta = toolCall.function.arguments;
|
|
194
|
+
currentBlock.partialArgs += toolCall.function.arguments;
|
|
195
|
+
currentBlock.arguments = parseStreamingJson(currentBlock.partialArgs);
|
|
196
|
+
}
|
|
197
|
+
stream.push({
|
|
198
|
+
type: "toolcall_delta",
|
|
199
|
+
contentIndex: blockIndex(),
|
|
200
|
+
delta,
|
|
201
|
+
partial: output
|
|
202
|
+
});
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
const reasoningDetails = choice.delta.reasoning_details;
|
|
207
|
+
if (reasoningDetails && Array.isArray(reasoningDetails)) {
|
|
208
|
+
for (const detail of reasoningDetails) {
|
|
209
|
+
if (detail.type === "reasoning.encrypted" && detail.id && detail.data) {
|
|
210
|
+
const matchingToolCall = output.content.find((b) => b.type === "toolCall" && b.id === detail.id);
|
|
211
|
+
if (matchingToolCall) {
|
|
212
|
+
matchingToolCall.thoughtSignature = JSON.stringify(detail);
|
|
213
|
+
}
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
finishCurrentBlock(currentBlock);
|
|
220
|
+
if (options?.signal?.aborted) {
|
|
221
|
+
throw new Error("Request was aborted");
|
|
222
|
+
}
|
|
223
|
+
if (output.stopReason === "aborted") {
|
|
224
|
+
throw new Error("Request was aborted");
|
|
225
|
+
}
|
|
226
|
+
if (output.stopReason === "error") {
|
|
227
|
+
throw new Error(output.errorMessage || "Provider returned an error stop reason");
|
|
228
|
+
}
|
|
229
|
+
stream.push({ type: "done", reason: output.stopReason, message: output });
|
|
230
|
+
stream.end();
|
|
231
|
+
} catch (error) {
|
|
232
|
+
for (const block of output.content)
|
|
233
|
+
delete block.index;
|
|
234
|
+
output.stopReason = options?.signal?.aborted ? "aborted" : "error";
|
|
235
|
+
output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);
|
|
236
|
+
const rawMetadata = error?.error?.metadata?.raw;
|
|
237
|
+
if (rawMetadata)
|
|
238
|
+
output.errorMessage += `
|
|
239
|
+
${rawMetadata}`;
|
|
240
|
+
stream.push({ type: "error", reason: output.stopReason, error: output });
|
|
241
|
+
stream.end();
|
|
242
|
+
}
|
|
243
|
+
})();
|
|
244
|
+
return stream;
|
|
245
|
+
};
|
|
246
|
+
var streamSimpleOpenAICompletions = (model, context, options) => {
|
|
247
|
+
const apiKey = options?.apiKey || getEnvApiKey(model.provider);
|
|
248
|
+
if (!apiKey) {
|
|
249
|
+
throw new Error(`No API key for provider: ${model.provider}`);
|
|
250
|
+
}
|
|
251
|
+
const base = buildBaseOptions(model, options, apiKey);
|
|
252
|
+
const reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning);
|
|
253
|
+
const toolChoice = options?.toolChoice;
|
|
254
|
+
return streamOpenAICompletions(model, context, {
|
|
255
|
+
...base,
|
|
256
|
+
reasoningEffort,
|
|
257
|
+
toolChoice
|
|
258
|
+
});
|
|
259
|
+
};
|
|
260
|
+
function createClient(model, context, apiKey, optionsHeaders) {
|
|
261
|
+
if (!apiKey) {
|
|
262
|
+
if (!process.env.OPENAI_API_KEY) {
|
|
263
|
+
throw new Error("OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.");
|
|
264
|
+
}
|
|
265
|
+
apiKey = process.env.OPENAI_API_KEY;
|
|
266
|
+
}
|
|
267
|
+
const headers = { ...model.headers };
|
|
268
|
+
if (model.provider === "github-copilot") {
|
|
269
|
+
const hasImages = hasCopilotVisionInput(context.messages);
|
|
270
|
+
const copilotHeaders = buildCopilotDynamicHeaders({
|
|
271
|
+
messages: context.messages,
|
|
272
|
+
hasImages
|
|
273
|
+
});
|
|
274
|
+
Object.assign(headers, copilotHeaders);
|
|
275
|
+
}
|
|
276
|
+
if (optionsHeaders) {
|
|
277
|
+
Object.assign(headers, optionsHeaders);
|
|
278
|
+
}
|
|
279
|
+
return new OpenAI({
|
|
280
|
+
apiKey,
|
|
281
|
+
baseURL: model.baseUrl,
|
|
282
|
+
dangerouslyAllowBrowser: true,
|
|
283
|
+
defaultHeaders: headers
|
|
284
|
+
});
|
|
285
|
+
}
|
|
286
|
+
function buildParams(model, context, options) {
|
|
287
|
+
const compat = getCompat(model);
|
|
288
|
+
const messages = convertMessages(model, context, compat);
|
|
289
|
+
maybeAddOpenRouterAnthropicCacheControl(model, messages);
|
|
290
|
+
const params = {
|
|
291
|
+
model: model.id,
|
|
292
|
+
messages,
|
|
293
|
+
stream: true
|
|
294
|
+
};
|
|
295
|
+
if (compat.supportsUsageInStreaming !== false) {
|
|
296
|
+
params.stream_options = { include_usage: true };
|
|
297
|
+
}
|
|
298
|
+
if (compat.supportsStore) {
|
|
299
|
+
params.store = false;
|
|
300
|
+
}
|
|
301
|
+
if (options?.maxTokens) {
|
|
302
|
+
if (compat.maxTokensField === "max_tokens") {
|
|
303
|
+
params.max_tokens = options.maxTokens;
|
|
304
|
+
} else {
|
|
305
|
+
params.max_completion_tokens = options.maxTokens;
|
|
306
|
+
}
|
|
307
|
+
}
|
|
308
|
+
if (options?.temperature !== void 0) {
|
|
309
|
+
params.temperature = options.temperature;
|
|
310
|
+
}
|
|
311
|
+
if (context.tools) {
|
|
312
|
+
params.tools = convertTools(context.tools, compat);
|
|
313
|
+
} else if (hasToolHistory(context.messages)) {
|
|
314
|
+
params.tools = [];
|
|
315
|
+
}
|
|
316
|
+
if (options?.toolChoice) {
|
|
317
|
+
params.tool_choice = options.toolChoice;
|
|
318
|
+
}
|
|
319
|
+
if (compat.thinkingFormat === "zai" && model.reasoning) {
|
|
320
|
+
params.enable_thinking = !!options?.reasoningEffort;
|
|
321
|
+
} else if (compat.thinkingFormat === "qwen" && model.reasoning) {
|
|
322
|
+
params.enable_thinking = !!options?.reasoningEffort;
|
|
323
|
+
} else if (compat.thinkingFormat === "qwen-chat-template" && model.reasoning) {
|
|
324
|
+
params.chat_template_kwargs = { enable_thinking: !!options?.reasoningEffort };
|
|
325
|
+
} else if (compat.thinkingFormat === "openrouter" && model.reasoning) {
|
|
326
|
+
const openRouterParams = params;
|
|
327
|
+
if (options?.reasoningEffort) {
|
|
328
|
+
openRouterParams.reasoning = {
|
|
329
|
+
effort: mapReasoningEffort(options.reasoningEffort, compat.reasoningEffortMap)
|
|
330
|
+
};
|
|
331
|
+
} else {
|
|
332
|
+
openRouterParams.reasoning = { effort: "none" };
|
|
333
|
+
}
|
|
334
|
+
} else if (options?.reasoningEffort && model.reasoning && compat.supportsReasoningEffort) {
|
|
335
|
+
params.reasoning_effort = mapReasoningEffort(options.reasoningEffort, compat.reasoningEffortMap);
|
|
336
|
+
}
|
|
337
|
+
if (model.baseUrl.includes("openrouter.ai") && model.compat?.openRouterRouting) {
|
|
338
|
+
params.provider = model.compat.openRouterRouting;
|
|
339
|
+
}
|
|
340
|
+
if (model.baseUrl.includes("ai-gateway.vercel.sh") && model.compat?.vercelGatewayRouting) {
|
|
341
|
+
const routing = model.compat.vercelGatewayRouting;
|
|
342
|
+
if (routing.only || routing.order) {
|
|
343
|
+
const gatewayOptions = {};
|
|
344
|
+
if (routing.only)
|
|
345
|
+
gatewayOptions.only = routing.only;
|
|
346
|
+
if (routing.order)
|
|
347
|
+
gatewayOptions.order = routing.order;
|
|
348
|
+
params.providerOptions = { gateway: gatewayOptions };
|
|
349
|
+
}
|
|
350
|
+
}
|
|
351
|
+
return params;
|
|
352
|
+
}
|
|
353
|
+
function mapReasoningEffort(effort, reasoningEffortMap) {
|
|
354
|
+
return reasoningEffortMap[effort] ?? effort;
|
|
355
|
+
}
|
|
356
|
+
function maybeAddOpenRouterAnthropicCacheControl(model, messages) {
|
|
357
|
+
if (model.provider !== "openrouter" || !model.id.startsWith("anthropic/"))
|
|
358
|
+
return;
|
|
359
|
+
for (let i = messages.length - 1; i >= 0; i--) {
|
|
360
|
+
const msg = messages[i];
|
|
361
|
+
if (msg.role !== "user" && msg.role !== "assistant")
|
|
362
|
+
continue;
|
|
363
|
+
const content = msg.content;
|
|
364
|
+
if (typeof content === "string") {
|
|
365
|
+
msg.content = [
|
|
366
|
+
Object.assign({ type: "text", text: content }, { cache_control: { type: "ephemeral" } })
|
|
367
|
+
];
|
|
368
|
+
return;
|
|
369
|
+
}
|
|
370
|
+
if (!Array.isArray(content))
|
|
371
|
+
continue;
|
|
372
|
+
for (let j = content.length - 1; j >= 0; j--) {
|
|
373
|
+
const part = content[j];
|
|
374
|
+
if (part?.type === "text") {
|
|
375
|
+
Object.assign(part, { cache_control: { type: "ephemeral" } });
|
|
376
|
+
return;
|
|
377
|
+
}
|
|
378
|
+
}
|
|
379
|
+
}
|
|
380
|
+
}
|
|
381
|
+
function convertMessages(model, context, compat) {
|
|
382
|
+
const params = [];
|
|
383
|
+
const normalizeToolCallId = (id) => {
|
|
384
|
+
if (id.includes("|")) {
|
|
385
|
+
const [callId] = id.split("|");
|
|
386
|
+
return callId.replace(/[^a-zA-Z0-9_-]/g, "_").slice(0, 40);
|
|
387
|
+
}
|
|
388
|
+
if (model.provider === "openai")
|
|
389
|
+
return id.length > 40 ? id.slice(0, 40) : id;
|
|
390
|
+
return id;
|
|
391
|
+
};
|
|
392
|
+
const transformedMessages = transformMessages(context.messages, model, (id) => normalizeToolCallId(id));
|
|
393
|
+
if (context.systemPrompt) {
|
|
394
|
+
const useDeveloperRole = model.reasoning && compat.supportsDeveloperRole;
|
|
395
|
+
const role = useDeveloperRole ? "developer" : "system";
|
|
396
|
+
params.push({ role, content: sanitizeSurrogates(context.systemPrompt) });
|
|
397
|
+
}
|
|
398
|
+
let lastRole = null;
|
|
399
|
+
for (let i = 0; i < transformedMessages.length; i++) {
|
|
400
|
+
const msg = transformedMessages[i];
|
|
401
|
+
if (compat.requiresAssistantAfterToolResult && lastRole === "toolResult" && msg.role === "user") {
|
|
402
|
+
params.push({
|
|
403
|
+
role: "assistant",
|
|
404
|
+
content: "I have processed the tool results."
|
|
405
|
+
});
|
|
406
|
+
}
|
|
407
|
+
if (msg.role === "user") {
|
|
408
|
+
if (typeof msg.content === "string") {
|
|
409
|
+
params.push({
|
|
410
|
+
role: "user",
|
|
411
|
+
content: sanitizeSurrogates(msg.content)
|
|
412
|
+
});
|
|
413
|
+
} else {
|
|
414
|
+
const content = msg.content.map((item) => {
|
|
415
|
+
if (item.type === "text") {
|
|
416
|
+
return {
|
|
417
|
+
type: "text",
|
|
418
|
+
text: sanitizeSurrogates(item.text)
|
|
419
|
+
};
|
|
420
|
+
} else {
|
|
421
|
+
return {
|
|
422
|
+
type: "image_url",
|
|
423
|
+
image_url: {
|
|
424
|
+
url: `data:${item.mimeType};base64,${item.data}`
|
|
425
|
+
}
|
|
426
|
+
};
|
|
427
|
+
}
|
|
428
|
+
});
|
|
429
|
+
const filteredContent = !model.input.includes("image") ? content.filter((c) => c.type !== "image_url") : content;
|
|
430
|
+
if (filteredContent.length === 0)
|
|
431
|
+
continue;
|
|
432
|
+
params.push({
|
|
433
|
+
role: "user",
|
|
434
|
+
content: filteredContent
|
|
435
|
+
});
|
|
436
|
+
}
|
|
437
|
+
} else if (msg.role === "assistant") {
|
|
438
|
+
const assistantMsg = {
|
|
439
|
+
role: "assistant",
|
|
440
|
+
content: compat.requiresAssistantAfterToolResult ? "" : null
|
|
441
|
+
};
|
|
442
|
+
const textBlocks = msg.content.filter((b) => b.type === "text");
|
|
443
|
+
const nonEmptyTextBlocks = textBlocks.filter((b) => b.text && b.text.trim().length > 0);
|
|
444
|
+
if (nonEmptyTextBlocks.length > 0) {
|
|
445
|
+
assistantMsg.content = nonEmptyTextBlocks.map((b) => sanitizeSurrogates(b.text)).join("");
|
|
446
|
+
}
|
|
447
|
+
const thinkingBlocks = msg.content.filter((b) => b.type === "thinking");
|
|
448
|
+
const nonEmptyThinkingBlocks = thinkingBlocks.filter((b) => b.thinking && b.thinking.trim().length > 0);
|
|
449
|
+
if (nonEmptyThinkingBlocks.length > 0) {
|
|
450
|
+
if (compat.requiresThinkingAsText) {
|
|
451
|
+
const thinkingText = nonEmptyThinkingBlocks.map((b) => b.thinking).join("\n\n");
|
|
452
|
+
const textContent = assistantMsg.content;
|
|
453
|
+
if (textContent) {
|
|
454
|
+
textContent.unshift({ type: "text", text: thinkingText });
|
|
455
|
+
} else {
|
|
456
|
+
assistantMsg.content = [{ type: "text", text: thinkingText }];
|
|
457
|
+
}
|
|
458
|
+
} else {
|
|
459
|
+
const signature = nonEmptyThinkingBlocks[0].thinkingSignature;
|
|
460
|
+
if (signature && signature.length > 0) {
|
|
461
|
+
assistantMsg[signature] = nonEmptyThinkingBlocks.map((b) => b.thinking).join("\n");
|
|
462
|
+
}
|
|
463
|
+
}
|
|
464
|
+
}
|
|
465
|
+
const toolCalls = msg.content.filter((b) => b.type === "toolCall");
|
|
466
|
+
if (toolCalls.length > 0) {
|
|
467
|
+
assistantMsg.tool_calls = toolCalls.map((tc) => ({
|
|
468
|
+
id: tc.id,
|
|
469
|
+
type: "function",
|
|
470
|
+
function: {
|
|
471
|
+
name: tc.name,
|
|
472
|
+
arguments: JSON.stringify(tc.arguments)
|
|
473
|
+
}
|
|
474
|
+
}));
|
|
475
|
+
const reasoningDetails = toolCalls.filter((tc) => tc.thoughtSignature).map((tc) => {
|
|
476
|
+
try {
|
|
477
|
+
return JSON.parse(tc.thoughtSignature);
|
|
478
|
+
} catch {
|
|
479
|
+
return null;
|
|
480
|
+
}
|
|
481
|
+
}).filter(Boolean);
|
|
482
|
+
if (reasoningDetails.length > 0) {
|
|
483
|
+
assistantMsg.reasoning_details = reasoningDetails;
|
|
484
|
+
}
|
|
485
|
+
}
|
|
486
|
+
const content = assistantMsg.content;
|
|
487
|
+
const hasContent = content !== null && content !== void 0 && (typeof content === "string" ? content.length > 0 : content.length > 0);
|
|
488
|
+
if (!hasContent && !assistantMsg.tool_calls) {
|
|
489
|
+
continue;
|
|
490
|
+
}
|
|
491
|
+
params.push(assistantMsg);
|
|
492
|
+
} else if (msg.role === "toolResult") {
|
|
493
|
+
const imageBlocks = [];
|
|
494
|
+
let j = i;
|
|
495
|
+
for (; j < transformedMessages.length && transformedMessages[j].role === "toolResult"; j++) {
|
|
496
|
+
const toolMsg = transformedMessages[j];
|
|
497
|
+
const textResult = toolMsg.content.filter((c) => c.type === "text").map((c) => c.text).join("\n");
|
|
498
|
+
const hasImages = toolMsg.content.some((c) => c.type === "image");
|
|
499
|
+
const hasText = textResult.length > 0;
|
|
500
|
+
const toolResultMsg = {
|
|
501
|
+
role: "tool",
|
|
502
|
+
content: sanitizeSurrogates(hasText ? textResult : "(see attached image)"),
|
|
503
|
+
tool_call_id: toolMsg.toolCallId
|
|
504
|
+
};
|
|
505
|
+
if (compat.requiresToolResultName && toolMsg.toolName) {
|
|
506
|
+
toolResultMsg.name = toolMsg.toolName;
|
|
507
|
+
}
|
|
508
|
+
params.push(toolResultMsg);
|
|
509
|
+
if (hasImages && model.input.includes("image")) {
|
|
510
|
+
for (const block of toolMsg.content) {
|
|
511
|
+
if (block.type === "image") {
|
|
512
|
+
imageBlocks.push({
|
|
513
|
+
type: "image_url",
|
|
514
|
+
image_url: {
|
|
515
|
+
url: `data:${block.mimeType};base64,${block.data}`
|
|
516
|
+
}
|
|
517
|
+
});
|
|
518
|
+
}
|
|
519
|
+
}
|
|
520
|
+
}
|
|
521
|
+
}
|
|
522
|
+
i = j - 1;
|
|
523
|
+
if (imageBlocks.length > 0) {
|
|
524
|
+
if (compat.requiresAssistantAfterToolResult) {
|
|
525
|
+
params.push({
|
|
526
|
+
role: "assistant",
|
|
527
|
+
content: "I have processed the tool results."
|
|
528
|
+
});
|
|
529
|
+
}
|
|
530
|
+
params.push({
|
|
531
|
+
role: "user",
|
|
532
|
+
content: [
|
|
533
|
+
{
|
|
534
|
+
type: "text",
|
|
535
|
+
text: "Attached image(s) from tool result:"
|
|
536
|
+
},
|
|
537
|
+
...imageBlocks
|
|
538
|
+
]
|
|
539
|
+
});
|
|
540
|
+
lastRole = "user";
|
|
541
|
+
} else {
|
|
542
|
+
lastRole = "toolResult";
|
|
543
|
+
}
|
|
544
|
+
continue;
|
|
545
|
+
}
|
|
546
|
+
lastRole = msg.role;
|
|
547
|
+
}
|
|
548
|
+
return params;
|
|
549
|
+
}
|
|
550
|
+
function convertTools(tools, compat) {
|
|
551
|
+
return tools.map((tool) => ({
|
|
552
|
+
type: "function",
|
|
553
|
+
function: {
|
|
554
|
+
name: tool.name,
|
|
555
|
+
description: tool.description,
|
|
556
|
+
parameters: tool.parameters,
|
|
557
|
+
// TypeBox already generates JSON Schema
|
|
558
|
+
// Only include strict if provider supports it. Some reject unknown fields.
|
|
559
|
+
...compat.supportsStrictMode !== false && { strict: false }
|
|
560
|
+
}
|
|
561
|
+
}));
|
|
562
|
+
}
|
|
563
|
+
function parseChunkUsage(rawUsage, model) {
|
|
564
|
+
const cachedTokens = rawUsage.prompt_tokens_details?.cached_tokens || 0;
|
|
565
|
+
const reasoningTokens = rawUsage.completion_tokens_details?.reasoning_tokens || 0;
|
|
566
|
+
const input = (rawUsage.prompt_tokens || 0) - cachedTokens;
|
|
567
|
+
const outputTokens = (rawUsage.completion_tokens || 0) + reasoningTokens;
|
|
568
|
+
const usage = {
|
|
569
|
+
input,
|
|
570
|
+
output: outputTokens,
|
|
571
|
+
cacheRead: cachedTokens,
|
|
572
|
+
cacheWrite: 0,
|
|
573
|
+
totalTokens: input + outputTokens + cachedTokens,
|
|
574
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }
|
|
575
|
+
};
|
|
576
|
+
calculateCost(model, usage);
|
|
577
|
+
return usage;
|
|
578
|
+
}
|
|
579
|
+
function mapStopReason(reason) {
|
|
580
|
+
if (reason === null)
|
|
581
|
+
return { stopReason: "stop" };
|
|
582
|
+
switch (reason) {
|
|
583
|
+
case "stop":
|
|
584
|
+
case "end":
|
|
585
|
+
return { stopReason: "stop" };
|
|
586
|
+
case "length":
|
|
587
|
+
return { stopReason: "length" };
|
|
588
|
+
case "function_call":
|
|
589
|
+
case "tool_calls":
|
|
590
|
+
return { stopReason: "toolUse" };
|
|
591
|
+
case "content_filter":
|
|
592
|
+
return { stopReason: "error", errorMessage: "Provider finish_reason: content_filter" };
|
|
593
|
+
case "network_error":
|
|
594
|
+
return { stopReason: "error", errorMessage: "Provider finish_reason: network_error" };
|
|
595
|
+
default:
|
|
596
|
+
return {
|
|
597
|
+
stopReason: "error",
|
|
598
|
+
errorMessage: `Provider finish_reason: ${reason}`
|
|
599
|
+
};
|
|
600
|
+
}
|
|
601
|
+
}
|
|
602
|
+
function detectCompat(model) {
|
|
603
|
+
const provider = model.provider;
|
|
604
|
+
const baseUrl = model.baseUrl;
|
|
605
|
+
const isZai = provider === "zai" || baseUrl.includes("api.z.ai");
|
|
606
|
+
const isNonStandard = provider === "cerebras" || baseUrl.includes("cerebras.ai") || provider === "xai" || baseUrl.includes("api.x.ai") || baseUrl.includes("chutes.ai") || baseUrl.includes("deepseek.com") || isZai || provider === "opencode" || baseUrl.includes("opencode.ai");
|
|
607
|
+
const useMaxTokens = baseUrl.includes("chutes.ai");
|
|
608
|
+
const isGrok = provider === "xai" || baseUrl.includes("api.x.ai");
|
|
609
|
+
const isGroq = provider === "groq" || baseUrl.includes("groq.com");
|
|
610
|
+
const reasoningEffortMap = isGroq && model.id === "qwen/qwen3-32b" ? {
|
|
611
|
+
minimal: "default",
|
|
612
|
+
low: "default",
|
|
613
|
+
medium: "default",
|
|
614
|
+
high: "default",
|
|
615
|
+
xhigh: "default"
|
|
616
|
+
} : {};
|
|
617
|
+
return {
|
|
618
|
+
supportsStore: !isNonStandard,
|
|
619
|
+
supportsDeveloperRole: !isNonStandard,
|
|
620
|
+
supportsReasoningEffort: !isGrok && !isZai,
|
|
621
|
+
reasoningEffortMap,
|
|
622
|
+
supportsUsageInStreaming: true,
|
|
623
|
+
maxTokensField: useMaxTokens ? "max_tokens" : "max_completion_tokens",
|
|
624
|
+
requiresToolResultName: false,
|
|
625
|
+
requiresAssistantAfterToolResult: false,
|
|
626
|
+
requiresThinkingAsText: false,
|
|
627
|
+
thinkingFormat: isZai ? "zai" : provider === "openrouter" || baseUrl.includes("openrouter.ai") ? "openrouter" : "openai",
|
|
628
|
+
openRouterRouting: {},
|
|
629
|
+
vercelGatewayRouting: {},
|
|
630
|
+
supportsStrictMode: true
|
|
631
|
+
};
|
|
632
|
+
}
|
|
633
|
+
function getCompat(model) {
|
|
634
|
+
const detected = detectCompat(model);
|
|
635
|
+
if (!model.compat)
|
|
636
|
+
return detected;
|
|
637
|
+
return {
|
|
638
|
+
supportsStore: model.compat.supportsStore ?? detected.supportsStore,
|
|
639
|
+
supportsDeveloperRole: model.compat.supportsDeveloperRole ?? detected.supportsDeveloperRole,
|
|
640
|
+
supportsReasoningEffort: model.compat.supportsReasoningEffort ?? detected.supportsReasoningEffort,
|
|
641
|
+
reasoningEffortMap: model.compat.reasoningEffortMap ?? detected.reasoningEffortMap,
|
|
642
|
+
supportsUsageInStreaming: model.compat.supportsUsageInStreaming ?? detected.supportsUsageInStreaming,
|
|
643
|
+
maxTokensField: model.compat.maxTokensField ?? detected.maxTokensField,
|
|
644
|
+
requiresToolResultName: model.compat.requiresToolResultName ?? detected.requiresToolResultName,
|
|
645
|
+
requiresAssistantAfterToolResult: model.compat.requiresAssistantAfterToolResult ?? detected.requiresAssistantAfterToolResult,
|
|
646
|
+
requiresThinkingAsText: model.compat.requiresThinkingAsText ?? detected.requiresThinkingAsText,
|
|
647
|
+
thinkingFormat: model.compat.thinkingFormat ?? detected.thinkingFormat,
|
|
648
|
+
openRouterRouting: model.compat.openRouterRouting ?? {},
|
|
649
|
+
vercelGatewayRouting: model.compat.vercelGatewayRouting ?? detected.vercelGatewayRouting,
|
|
650
|
+
supportsStrictMode: model.compat.supportsStrictMode ?? detected.supportsStrictMode
|
|
651
|
+
};
|
|
652
|
+
}
|
|
653
|
+
export {
|
|
654
|
+
convertMessages,
|
|
655
|
+
streamOpenAICompletions,
|
|
656
|
+
streamSimpleOpenAICompletions
|
|
657
|
+
};
|