@draht/ai 2026.3.2-2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1185 -0
- package/dist/api-registry.d.ts +20 -0
- package/dist/api-registry.d.ts.map +1 -0
- package/dist/api-registry.js +44 -0
- package/dist/api-registry.js.map +1 -0
- package/dist/cli.d.ts +3 -0
- package/dist/cli.d.ts.map +1 -0
- package/dist/cli.js +116 -0
- package/dist/cli.js.map +1 -0
- package/dist/env-api-keys.d.ts +9 -0
- package/dist/env-api-keys.d.ts.map +1 -0
- package/dist/env-api-keys.js +99 -0
- package/dist/env-api-keys.js.map +1 -0
- package/dist/index.d.ts +22 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +21 -0
- package/dist/index.js.map +1 -0
- package/dist/models.d.ts +24 -0
- package/dist/models.d.ts.map +1 -0
- package/dist/models.generated.d.ts +13133 -0
- package/dist/models.generated.d.ts.map +1 -0
- package/dist/models.generated.js +12939 -0
- package/dist/models.generated.js.map +1 -0
- package/dist/models.js +55 -0
- package/dist/models.js.map +1 -0
- package/dist/providers/amazon-bedrock.d.ts +15 -0
- package/dist/providers/amazon-bedrock.d.ts.map +1 -0
- package/dist/providers/amazon-bedrock.js +585 -0
- package/dist/providers/amazon-bedrock.js.map +1 -0
- package/dist/providers/anthropic.d.ts +33 -0
- package/dist/providers/anthropic.d.ts.map +1 -0
- package/dist/providers/anthropic.js +729 -0
- package/dist/providers/anthropic.js.map +1 -0
- package/dist/providers/azure-openai-responses.d.ts +15 -0
- package/dist/providers/azure-openai-responses.d.ts.map +1 -0
- package/dist/providers/azure-openai-responses.js +184 -0
- package/dist/providers/azure-openai-responses.js.map +1 -0
- package/dist/providers/github-copilot-headers.d.ts +8 -0
- package/dist/providers/github-copilot-headers.d.ts.map +1 -0
- package/dist/providers/github-copilot-headers.js +29 -0
- package/dist/providers/github-copilot-headers.js.map +1 -0
- package/dist/providers/google-gemini-cli.d.ts +74 -0
- package/dist/providers/google-gemini-cli.d.ts.map +1 -0
- package/dist/providers/google-gemini-cli.js +735 -0
- package/dist/providers/google-gemini-cli.js.map +1 -0
- package/dist/providers/google-shared.d.ts +65 -0
- package/dist/providers/google-shared.d.ts.map +1 -0
- package/dist/providers/google-shared.js +306 -0
- package/dist/providers/google-shared.js.map +1 -0
- package/dist/providers/google-vertex.d.ts +15 -0
- package/dist/providers/google-vertex.d.ts.map +1 -0
- package/dist/providers/google-vertex.js +371 -0
- package/dist/providers/google-vertex.js.map +1 -0
- package/dist/providers/google.d.ts +13 -0
- package/dist/providers/google.d.ts.map +1 -0
- package/dist/providers/google.js +352 -0
- package/dist/providers/google.js.map +1 -0
- package/dist/providers/openai-codex-responses.d.ts +9 -0
- package/dist/providers/openai-codex-responses.d.ts.map +1 -0
- package/dist/providers/openai-codex-responses.js +699 -0
- package/dist/providers/openai-codex-responses.js.map +1 -0
- package/dist/providers/openai-completions.d.ts +15 -0
- package/dist/providers/openai-completions.d.ts.map +1 -0
- package/dist/providers/openai-completions.js +712 -0
- package/dist/providers/openai-completions.js.map +1 -0
- package/dist/providers/openai-responses-shared.d.ts +17 -0
- package/dist/providers/openai-responses-shared.d.ts.map +1 -0
- package/dist/providers/openai-responses-shared.js +427 -0
- package/dist/providers/openai-responses-shared.js.map +1 -0
- package/dist/providers/openai-responses.d.ts +13 -0
- package/dist/providers/openai-responses.d.ts.map +1 -0
- package/dist/providers/openai-responses.js +198 -0
- package/dist/providers/openai-responses.js.map +1 -0
- package/dist/providers/register-builtins.d.ts +3 -0
- package/dist/providers/register-builtins.d.ts.map +1 -0
- package/dist/providers/register-builtins.js +63 -0
- package/dist/providers/register-builtins.js.map +1 -0
- package/dist/providers/simple-options.d.ts +8 -0
- package/dist/providers/simple-options.d.ts.map +1 -0
- package/dist/providers/simple-options.js +35 -0
- package/dist/providers/simple-options.js.map +1 -0
- package/dist/providers/transform-messages.d.ts +8 -0
- package/dist/providers/transform-messages.d.ts.map +1 -0
- package/dist/providers/transform-messages.js +155 -0
- package/dist/providers/transform-messages.js.map +1 -0
- package/dist/stream.d.ts +9 -0
- package/dist/stream.d.ts.map +1 -0
- package/dist/stream.js +28 -0
- package/dist/stream.js.map +1 -0
- package/dist/types.d.ts +279 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +2 -0
- package/dist/types.js.map +1 -0
- package/dist/utils/event-stream.d.ts +21 -0
- package/dist/utils/event-stream.d.ts.map +1 -0
- package/dist/utils/event-stream.js +81 -0
- package/dist/utils/event-stream.js.map +1 -0
- package/dist/utils/http-proxy.d.ts +2 -0
- package/dist/utils/http-proxy.d.ts.map +1 -0
- package/dist/utils/http-proxy.js +15 -0
- package/dist/utils/http-proxy.js.map +1 -0
- package/dist/utils/json-parse.d.ts +9 -0
- package/dist/utils/json-parse.d.ts.map +1 -0
- package/dist/utils/json-parse.js +29 -0
- package/dist/utils/json-parse.js.map +1 -0
- package/dist/utils/oauth/anthropic.d.ts +17 -0
- package/dist/utils/oauth/anthropic.d.ts.map +1 -0
- package/dist/utils/oauth/anthropic.js +104 -0
- package/dist/utils/oauth/anthropic.js.map +1 -0
- package/dist/utils/oauth/github-copilot.d.ts +30 -0
- package/dist/utils/oauth/github-copilot.d.ts.map +1 -0
- package/dist/utils/oauth/github-copilot.js +281 -0
- package/dist/utils/oauth/github-copilot.js.map +1 -0
- package/dist/utils/oauth/google-antigravity.d.ts +26 -0
- package/dist/utils/oauth/google-antigravity.d.ts.map +1 -0
- package/dist/utils/oauth/google-antigravity.js +373 -0
- package/dist/utils/oauth/google-antigravity.js.map +1 -0
- package/dist/utils/oauth/google-gemini-cli.d.ts +26 -0
- package/dist/utils/oauth/google-gemini-cli.d.ts.map +1 -0
- package/dist/utils/oauth/google-gemini-cli.js +478 -0
- package/dist/utils/oauth/google-gemini-cli.js.map +1 -0
- package/dist/utils/oauth/index.d.ts +62 -0
- package/dist/utils/oauth/index.d.ts.map +1 -0
- package/dist/utils/oauth/index.js +133 -0
- package/dist/utils/oauth/index.js.map +1 -0
- package/dist/utils/oauth/openai-codex.d.ts +34 -0
- package/dist/utils/oauth/openai-codex.d.ts.map +1 -0
- package/dist/utils/oauth/openai-codex.js +380 -0
- package/dist/utils/oauth/openai-codex.js.map +1 -0
- package/dist/utils/oauth/pkce.d.ts +13 -0
- package/dist/utils/oauth/pkce.d.ts.map +1 -0
- package/dist/utils/oauth/pkce.js +31 -0
- package/dist/utils/oauth/pkce.js.map +1 -0
- package/dist/utils/oauth/types.d.ts +47 -0
- package/dist/utils/oauth/types.d.ts.map +1 -0
- package/dist/utils/oauth/types.js +2 -0
- package/dist/utils/oauth/types.js.map +1 -0
- package/dist/utils/overflow.d.ts +52 -0
- package/dist/utils/overflow.d.ts.map +1 -0
- package/dist/utils/overflow.js +115 -0
- package/dist/utils/overflow.js.map +1 -0
- package/dist/utils/sanitize-unicode.d.ts +22 -0
- package/dist/utils/sanitize-unicode.d.ts.map +1 -0
- package/dist/utils/sanitize-unicode.js +26 -0
- package/dist/utils/sanitize-unicode.js.map +1 -0
- package/dist/utils/typebox-helpers.d.ts +17 -0
- package/dist/utils/typebox-helpers.d.ts.map +1 -0
- package/dist/utils/typebox-helpers.js +21 -0
- package/dist/utils/typebox-helpers.js.map +1 -0
- package/dist/utils/validation.d.ts +18 -0
- package/dist/utils/validation.d.ts.map +1 -0
- package/dist/utils/validation.js +72 -0
- package/dist/utils/validation.js.map +1 -0
- package/package.json +67 -0
|
@@ -0,0 +1,712 @@
|
|
|
1
|
+
import OpenAI from "openai";
|
|
2
|
+
import { getEnvApiKey } from "../env-api-keys.js";
|
|
3
|
+
import { calculateCost, supportsXhigh } from "../models.js";
|
|
4
|
+
import { AssistantMessageEventStream } from "../utils/event-stream.js";
|
|
5
|
+
import { parseStreamingJson } from "../utils/json-parse.js";
|
|
6
|
+
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
|
|
7
|
+
import { buildCopilotDynamicHeaders, hasCopilotVisionInput } from "./github-copilot-headers.js";
|
|
8
|
+
import { buildBaseOptions, clampReasoning } from "./simple-options.js";
|
|
9
|
+
import { transformMessages } from "./transform-messages.js";
|
|
10
|
+
/**
|
|
11
|
+
* Normalize tool call ID for Mistral.
|
|
12
|
+
* Mistral requires tool IDs to be exactly 9 alphanumeric characters (a-z, A-Z, 0-9).
|
|
13
|
+
*/
|
|
14
|
+
function normalizeMistralToolId(id) {
|
|
15
|
+
// Remove non-alphanumeric characters
|
|
16
|
+
let normalized = id.replace(/[^a-zA-Z0-9]/g, "");
|
|
17
|
+
// Mistral requires exactly 9 characters
|
|
18
|
+
if (normalized.length < 9) {
|
|
19
|
+
// Pad with deterministic characters based on original ID to ensure matching
|
|
20
|
+
const padding = "ABCDEFGHI";
|
|
21
|
+
normalized = normalized + padding.slice(0, 9 - normalized.length);
|
|
22
|
+
}
|
|
23
|
+
else if (normalized.length > 9) {
|
|
24
|
+
normalized = normalized.slice(0, 9);
|
|
25
|
+
}
|
|
26
|
+
return normalized;
|
|
27
|
+
}
|
|
28
|
+
/**
|
|
29
|
+
* Check if conversation messages contain tool calls or tool results.
|
|
30
|
+
* This is needed because Anthropic (via proxy) requires the tools param
|
|
31
|
+
* to be present when messages include tool_calls or tool role messages.
|
|
32
|
+
*/
|
|
33
|
+
function hasToolHistory(messages) {
|
|
34
|
+
for (const msg of messages) {
|
|
35
|
+
if (msg.role === "toolResult") {
|
|
36
|
+
return true;
|
|
37
|
+
}
|
|
38
|
+
if (msg.role === "assistant") {
|
|
39
|
+
if (msg.content.some((block) => block.type === "toolCall")) {
|
|
40
|
+
return true;
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
return false;
|
|
45
|
+
}
|
|
46
|
+
export const streamOpenAICompletions = (model, context, options) => {
|
|
47
|
+
const stream = new AssistantMessageEventStream();
|
|
48
|
+
(async () => {
|
|
49
|
+
const output = {
|
|
50
|
+
role: "assistant",
|
|
51
|
+
content: [],
|
|
52
|
+
api: model.api,
|
|
53
|
+
provider: model.provider,
|
|
54
|
+
model: model.id,
|
|
55
|
+
usage: {
|
|
56
|
+
input: 0,
|
|
57
|
+
output: 0,
|
|
58
|
+
cacheRead: 0,
|
|
59
|
+
cacheWrite: 0,
|
|
60
|
+
totalTokens: 0,
|
|
61
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
|
|
62
|
+
},
|
|
63
|
+
stopReason: "stop",
|
|
64
|
+
timestamp: Date.now(),
|
|
65
|
+
};
|
|
66
|
+
try {
|
|
67
|
+
const apiKey = options?.apiKey || getEnvApiKey(model.provider) || "";
|
|
68
|
+
const client = createClient(model, context, apiKey, options?.headers);
|
|
69
|
+
const params = buildParams(model, context, options);
|
|
70
|
+
options?.onPayload?.(params);
|
|
71
|
+
const openaiStream = await client.chat.completions.create(params, { signal: options?.signal });
|
|
72
|
+
stream.push({ type: "start", partial: output });
|
|
73
|
+
let currentBlock = null;
|
|
74
|
+
const blocks = output.content;
|
|
75
|
+
const blockIndex = () => blocks.length - 1;
|
|
76
|
+
const finishCurrentBlock = (block) => {
|
|
77
|
+
if (block) {
|
|
78
|
+
if (block.type === "text") {
|
|
79
|
+
stream.push({
|
|
80
|
+
type: "text_end",
|
|
81
|
+
contentIndex: blockIndex(),
|
|
82
|
+
content: block.text,
|
|
83
|
+
partial: output,
|
|
84
|
+
});
|
|
85
|
+
}
|
|
86
|
+
else if (block.type === "thinking") {
|
|
87
|
+
stream.push({
|
|
88
|
+
type: "thinking_end",
|
|
89
|
+
contentIndex: blockIndex(),
|
|
90
|
+
content: block.thinking,
|
|
91
|
+
partial: output,
|
|
92
|
+
});
|
|
93
|
+
}
|
|
94
|
+
else if (block.type === "toolCall") {
|
|
95
|
+
block.arguments = parseStreamingJson(block.partialArgs);
|
|
96
|
+
delete block.partialArgs;
|
|
97
|
+
stream.push({
|
|
98
|
+
type: "toolcall_end",
|
|
99
|
+
contentIndex: blockIndex(),
|
|
100
|
+
toolCall: block,
|
|
101
|
+
partial: output,
|
|
102
|
+
});
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
};
|
|
106
|
+
for await (const chunk of openaiStream) {
|
|
107
|
+
if (chunk.usage) {
|
|
108
|
+
const cachedTokens = chunk.usage.prompt_tokens_details?.cached_tokens || 0;
|
|
109
|
+
const reasoningTokens = chunk.usage.completion_tokens_details?.reasoning_tokens || 0;
|
|
110
|
+
const input = (chunk.usage.prompt_tokens || 0) - cachedTokens;
|
|
111
|
+
const outputTokens = (chunk.usage.completion_tokens || 0) + reasoningTokens;
|
|
112
|
+
output.usage = {
|
|
113
|
+
// OpenAI includes cached tokens in prompt_tokens, so subtract to get non-cached input
|
|
114
|
+
input,
|
|
115
|
+
output: outputTokens,
|
|
116
|
+
cacheRead: cachedTokens,
|
|
117
|
+
cacheWrite: 0,
|
|
118
|
+
// Compute totalTokens ourselves since we add reasoning_tokens to output
|
|
119
|
+
// and some providers (e.g., Groq) don't include them in total_tokens
|
|
120
|
+
totalTokens: input + outputTokens + cachedTokens,
|
|
121
|
+
cost: {
|
|
122
|
+
input: 0,
|
|
123
|
+
output: 0,
|
|
124
|
+
cacheRead: 0,
|
|
125
|
+
cacheWrite: 0,
|
|
126
|
+
total: 0,
|
|
127
|
+
},
|
|
128
|
+
};
|
|
129
|
+
calculateCost(model, output.usage);
|
|
130
|
+
}
|
|
131
|
+
const choice = chunk.choices?.[0];
|
|
132
|
+
if (!choice)
|
|
133
|
+
continue;
|
|
134
|
+
if (choice.finish_reason) {
|
|
135
|
+
output.stopReason = mapStopReason(choice.finish_reason);
|
|
136
|
+
}
|
|
137
|
+
if (choice.delta) {
|
|
138
|
+
if (choice.delta.content !== null &&
|
|
139
|
+
choice.delta.content !== undefined &&
|
|
140
|
+
choice.delta.content.length > 0) {
|
|
141
|
+
if (!currentBlock || currentBlock.type !== "text") {
|
|
142
|
+
finishCurrentBlock(currentBlock);
|
|
143
|
+
currentBlock = { type: "text", text: "" };
|
|
144
|
+
output.content.push(currentBlock);
|
|
145
|
+
stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output });
|
|
146
|
+
}
|
|
147
|
+
if (currentBlock.type === "text") {
|
|
148
|
+
currentBlock.text += choice.delta.content;
|
|
149
|
+
stream.push({
|
|
150
|
+
type: "text_delta",
|
|
151
|
+
contentIndex: blockIndex(),
|
|
152
|
+
delta: choice.delta.content,
|
|
153
|
+
partial: output,
|
|
154
|
+
});
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
// Some endpoints return reasoning in reasoning_content (llama.cpp),
|
|
158
|
+
// or reasoning (other openai compatible endpoints)
|
|
159
|
+
// Use the first non-empty reasoning field to avoid duplication
|
|
160
|
+
// (e.g., chutes.ai returns both reasoning_content and reasoning with same content)
|
|
161
|
+
const reasoningFields = ["reasoning_content", "reasoning", "reasoning_text"];
|
|
162
|
+
let foundReasoningField = null;
|
|
163
|
+
for (const field of reasoningFields) {
|
|
164
|
+
if (choice.delta[field] !== null &&
|
|
165
|
+
choice.delta[field] !== undefined &&
|
|
166
|
+
choice.delta[field].length > 0) {
|
|
167
|
+
if (!foundReasoningField) {
|
|
168
|
+
foundReasoningField = field;
|
|
169
|
+
break;
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
if (foundReasoningField) {
|
|
174
|
+
if (!currentBlock || currentBlock.type !== "thinking") {
|
|
175
|
+
finishCurrentBlock(currentBlock);
|
|
176
|
+
currentBlock = {
|
|
177
|
+
type: "thinking",
|
|
178
|
+
thinking: "",
|
|
179
|
+
thinkingSignature: foundReasoningField,
|
|
180
|
+
};
|
|
181
|
+
output.content.push(currentBlock);
|
|
182
|
+
stream.push({ type: "thinking_start", contentIndex: blockIndex(), partial: output });
|
|
183
|
+
}
|
|
184
|
+
if (currentBlock.type === "thinking") {
|
|
185
|
+
const delta = choice.delta[foundReasoningField];
|
|
186
|
+
currentBlock.thinking += delta;
|
|
187
|
+
stream.push({
|
|
188
|
+
type: "thinking_delta",
|
|
189
|
+
contentIndex: blockIndex(),
|
|
190
|
+
delta,
|
|
191
|
+
partial: output,
|
|
192
|
+
});
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
if (choice?.delta?.tool_calls) {
|
|
196
|
+
for (const toolCall of choice.delta.tool_calls) {
|
|
197
|
+
if (!currentBlock ||
|
|
198
|
+
currentBlock.type !== "toolCall" ||
|
|
199
|
+
(toolCall.id && currentBlock.id !== toolCall.id)) {
|
|
200
|
+
finishCurrentBlock(currentBlock);
|
|
201
|
+
currentBlock = {
|
|
202
|
+
type: "toolCall",
|
|
203
|
+
id: toolCall.id || "",
|
|
204
|
+
name: toolCall.function?.name || "",
|
|
205
|
+
arguments: {},
|
|
206
|
+
partialArgs: "",
|
|
207
|
+
};
|
|
208
|
+
output.content.push(currentBlock);
|
|
209
|
+
stream.push({ type: "toolcall_start", contentIndex: blockIndex(), partial: output });
|
|
210
|
+
}
|
|
211
|
+
if (currentBlock.type === "toolCall") {
|
|
212
|
+
if (toolCall.id)
|
|
213
|
+
currentBlock.id = toolCall.id;
|
|
214
|
+
if (toolCall.function?.name)
|
|
215
|
+
currentBlock.name = toolCall.function.name;
|
|
216
|
+
let delta = "";
|
|
217
|
+
if (toolCall.function?.arguments) {
|
|
218
|
+
delta = toolCall.function.arguments;
|
|
219
|
+
currentBlock.partialArgs += toolCall.function.arguments;
|
|
220
|
+
currentBlock.arguments = parseStreamingJson(currentBlock.partialArgs);
|
|
221
|
+
}
|
|
222
|
+
stream.push({
|
|
223
|
+
type: "toolcall_delta",
|
|
224
|
+
contentIndex: blockIndex(),
|
|
225
|
+
delta,
|
|
226
|
+
partial: output,
|
|
227
|
+
});
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
const reasoningDetails = choice.delta.reasoning_details;
|
|
232
|
+
if (reasoningDetails && Array.isArray(reasoningDetails)) {
|
|
233
|
+
for (const detail of reasoningDetails) {
|
|
234
|
+
if (detail.type === "reasoning.encrypted" && detail.id && detail.data) {
|
|
235
|
+
const matchingToolCall = output.content.find((b) => b.type === "toolCall" && b.id === detail.id);
|
|
236
|
+
if (matchingToolCall) {
|
|
237
|
+
matchingToolCall.thoughtSignature = JSON.stringify(detail);
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
finishCurrentBlock(currentBlock);
|
|
245
|
+
if (options?.signal?.aborted) {
|
|
246
|
+
throw new Error("Request was aborted");
|
|
247
|
+
}
|
|
248
|
+
if (output.stopReason === "aborted" || output.stopReason === "error") {
|
|
249
|
+
throw new Error("An unknown error occurred");
|
|
250
|
+
}
|
|
251
|
+
stream.push({ type: "done", reason: output.stopReason, message: output });
|
|
252
|
+
stream.end();
|
|
253
|
+
}
|
|
254
|
+
catch (error) {
|
|
255
|
+
for (const block of output.content)
|
|
256
|
+
delete block.index;
|
|
257
|
+
output.stopReason = options?.signal?.aborted ? "aborted" : "error";
|
|
258
|
+
output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);
|
|
259
|
+
// Some providers via OpenRouter give additional information in this field.
|
|
260
|
+
const rawMetadata = error?.error?.metadata?.raw;
|
|
261
|
+
if (rawMetadata)
|
|
262
|
+
output.errorMessage += `\n${rawMetadata}`;
|
|
263
|
+
stream.push({ type: "error", reason: output.stopReason, error: output });
|
|
264
|
+
stream.end();
|
|
265
|
+
}
|
|
266
|
+
})();
|
|
267
|
+
return stream;
|
|
268
|
+
};
|
|
269
|
+
export const streamSimpleOpenAICompletions = (model, context, options) => {
|
|
270
|
+
const apiKey = options?.apiKey || getEnvApiKey(model.provider);
|
|
271
|
+
if (!apiKey) {
|
|
272
|
+
throw new Error(`No API key for provider: ${model.provider}`);
|
|
273
|
+
}
|
|
274
|
+
const base = buildBaseOptions(model, options, apiKey);
|
|
275
|
+
const reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning);
|
|
276
|
+
const toolChoice = options?.toolChoice;
|
|
277
|
+
return streamOpenAICompletions(model, context, {
|
|
278
|
+
...base,
|
|
279
|
+
reasoningEffort,
|
|
280
|
+
toolChoice,
|
|
281
|
+
});
|
|
282
|
+
};
|
|
283
|
+
function createClient(model, context, apiKey, optionsHeaders) {
|
|
284
|
+
if (!apiKey) {
|
|
285
|
+
if (!process.env.OPENAI_API_KEY) {
|
|
286
|
+
throw new Error("OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.");
|
|
287
|
+
}
|
|
288
|
+
apiKey = process.env.OPENAI_API_KEY;
|
|
289
|
+
}
|
|
290
|
+
const headers = { ...model.headers };
|
|
291
|
+
if (model.provider === "github-copilot") {
|
|
292
|
+
const hasImages = hasCopilotVisionInput(context.messages);
|
|
293
|
+
const copilotHeaders = buildCopilotDynamicHeaders({
|
|
294
|
+
messages: context.messages,
|
|
295
|
+
hasImages,
|
|
296
|
+
});
|
|
297
|
+
Object.assign(headers, copilotHeaders);
|
|
298
|
+
}
|
|
299
|
+
// Merge options headers last so they can override defaults
|
|
300
|
+
if (optionsHeaders) {
|
|
301
|
+
Object.assign(headers, optionsHeaders);
|
|
302
|
+
}
|
|
303
|
+
return new OpenAI({
|
|
304
|
+
apiKey,
|
|
305
|
+
baseURL: model.baseUrl,
|
|
306
|
+
dangerouslyAllowBrowser: true,
|
|
307
|
+
defaultHeaders: headers,
|
|
308
|
+
});
|
|
309
|
+
}
|
|
310
|
+
function buildParams(model, context, options) {
|
|
311
|
+
const compat = getCompat(model);
|
|
312
|
+
const messages = convertMessages(model, context, compat);
|
|
313
|
+
maybeAddOpenRouterAnthropicCacheControl(model, messages);
|
|
314
|
+
const params = {
|
|
315
|
+
model: model.id,
|
|
316
|
+
messages,
|
|
317
|
+
stream: true,
|
|
318
|
+
};
|
|
319
|
+
if (compat.supportsUsageInStreaming !== false) {
|
|
320
|
+
params.stream_options = { include_usage: true };
|
|
321
|
+
}
|
|
322
|
+
if (compat.supportsStore) {
|
|
323
|
+
params.store = false;
|
|
324
|
+
}
|
|
325
|
+
if (options?.maxTokens) {
|
|
326
|
+
if (compat.maxTokensField === "max_tokens") {
|
|
327
|
+
params.max_tokens = options.maxTokens;
|
|
328
|
+
}
|
|
329
|
+
else {
|
|
330
|
+
params.max_completion_tokens = options.maxTokens;
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
if (options?.temperature !== undefined) {
|
|
334
|
+
params.temperature = options.temperature;
|
|
335
|
+
}
|
|
336
|
+
if (context.tools) {
|
|
337
|
+
params.tools = convertTools(context.tools, compat);
|
|
338
|
+
}
|
|
339
|
+
else if (hasToolHistory(context.messages)) {
|
|
340
|
+
// Anthropic (via LiteLLM/proxy) requires tools param when conversation has tool_calls/tool_results
|
|
341
|
+
params.tools = [];
|
|
342
|
+
}
|
|
343
|
+
if (options?.toolChoice) {
|
|
344
|
+
params.tool_choice = options.toolChoice;
|
|
345
|
+
}
|
|
346
|
+
if ((compat.thinkingFormat === "zai" || compat.thinkingFormat === "qwen") && model.reasoning) {
|
|
347
|
+
// Both Z.ai and Qwen use enable_thinking: boolean
|
|
348
|
+
params.enable_thinking = !!options?.reasoningEffort;
|
|
349
|
+
}
|
|
350
|
+
else if (options?.reasoningEffort && model.reasoning && compat.supportsReasoningEffort) {
|
|
351
|
+
// OpenAI-style reasoning_effort
|
|
352
|
+
params.reasoning_effort = options.reasoningEffort;
|
|
353
|
+
}
|
|
354
|
+
// OpenRouter provider routing preferences
|
|
355
|
+
if (model.baseUrl.includes("openrouter.ai") && model.compat?.openRouterRouting) {
|
|
356
|
+
params.provider = model.compat.openRouterRouting;
|
|
357
|
+
}
|
|
358
|
+
// Vercel AI Gateway provider routing preferences
|
|
359
|
+
if (model.baseUrl.includes("ai-gateway.vercel.sh") && model.compat?.vercelGatewayRouting) {
|
|
360
|
+
const routing = model.compat.vercelGatewayRouting;
|
|
361
|
+
if (routing.only || routing.order) {
|
|
362
|
+
const gatewayOptions = {};
|
|
363
|
+
if (routing.only)
|
|
364
|
+
gatewayOptions.only = routing.only;
|
|
365
|
+
if (routing.order)
|
|
366
|
+
gatewayOptions.order = routing.order;
|
|
367
|
+
params.providerOptions = { gateway: gatewayOptions };
|
|
368
|
+
}
|
|
369
|
+
}
|
|
370
|
+
return params;
|
|
371
|
+
}
|
|
372
|
+
function maybeAddOpenRouterAnthropicCacheControl(model, messages) {
|
|
373
|
+
if (model.provider !== "openrouter" || !model.id.startsWith("anthropic/"))
|
|
374
|
+
return;
|
|
375
|
+
// Anthropic-style caching requires cache_control on a text part. Add a breakpoint
|
|
376
|
+
// on the last user/assistant message (walking backwards until we find text content).
|
|
377
|
+
for (let i = messages.length - 1; i >= 0; i--) {
|
|
378
|
+
const msg = messages[i];
|
|
379
|
+
if (msg.role !== "user" && msg.role !== "assistant")
|
|
380
|
+
continue;
|
|
381
|
+
const content = msg.content;
|
|
382
|
+
if (typeof content === "string") {
|
|
383
|
+
msg.content = [
|
|
384
|
+
Object.assign({ type: "text", text: content }, { cache_control: { type: "ephemeral" } }),
|
|
385
|
+
];
|
|
386
|
+
return;
|
|
387
|
+
}
|
|
388
|
+
if (!Array.isArray(content))
|
|
389
|
+
continue;
|
|
390
|
+
// Find last text part and add cache_control
|
|
391
|
+
for (let j = content.length - 1; j >= 0; j--) {
|
|
392
|
+
const part = content[j];
|
|
393
|
+
if (part?.type === "text") {
|
|
394
|
+
Object.assign(part, { cache_control: { type: "ephemeral" } });
|
|
395
|
+
return;
|
|
396
|
+
}
|
|
397
|
+
}
|
|
398
|
+
}
|
|
399
|
+
}
|
|
400
|
+
export function convertMessages(model, context, compat) {
|
|
401
|
+
const params = [];
|
|
402
|
+
const normalizeToolCallId = (id) => {
|
|
403
|
+
if (compat.requiresMistralToolIds)
|
|
404
|
+
return normalizeMistralToolId(id);
|
|
405
|
+
// Handle pipe-separated IDs from OpenAI Responses API
|
|
406
|
+
// Format: {call_id}|{id} where {id} can be 400+ chars with special chars (+, /, =)
|
|
407
|
+
// These come from providers like github-copilot, openai-codex, opencode
|
|
408
|
+
// Extract just the call_id part and normalize it
|
|
409
|
+
if (id.includes("|")) {
|
|
410
|
+
const [callId] = id.split("|");
|
|
411
|
+
// Sanitize to allowed chars and truncate to 40 chars (OpenAI limit)
|
|
412
|
+
return callId.replace(/[^a-zA-Z0-9_-]/g, "_").slice(0, 40);
|
|
413
|
+
}
|
|
414
|
+
if (model.provider === "openai")
|
|
415
|
+
return id.length > 40 ? id.slice(0, 40) : id;
|
|
416
|
+
return id;
|
|
417
|
+
};
|
|
418
|
+
const transformedMessages = transformMessages(context.messages, model, (id) => normalizeToolCallId(id));
|
|
419
|
+
if (context.systemPrompt) {
|
|
420
|
+
const useDeveloperRole = model.reasoning && compat.supportsDeveloperRole;
|
|
421
|
+
const role = useDeveloperRole ? "developer" : "system";
|
|
422
|
+
params.push({ role: role, content: sanitizeSurrogates(context.systemPrompt) });
|
|
423
|
+
}
|
|
424
|
+
let lastRole = null;
|
|
425
|
+
for (let i = 0; i < transformedMessages.length; i++) {
|
|
426
|
+
const msg = transformedMessages[i];
|
|
427
|
+
// Some providers (e.g. Mistral/Devstral) don't allow user messages directly after tool results
|
|
428
|
+
// Insert a synthetic assistant message to bridge the gap
|
|
429
|
+
if (compat.requiresAssistantAfterToolResult && lastRole === "toolResult" && msg.role === "user") {
|
|
430
|
+
params.push({
|
|
431
|
+
role: "assistant",
|
|
432
|
+
content: "I have processed the tool results.",
|
|
433
|
+
});
|
|
434
|
+
}
|
|
435
|
+
if (msg.role === "user") {
|
|
436
|
+
if (typeof msg.content === "string") {
|
|
437
|
+
params.push({
|
|
438
|
+
role: "user",
|
|
439
|
+
content: sanitizeSurrogates(msg.content),
|
|
440
|
+
});
|
|
441
|
+
}
|
|
442
|
+
else {
|
|
443
|
+
const content = msg.content.map((item) => {
|
|
444
|
+
if (item.type === "text") {
|
|
445
|
+
return {
|
|
446
|
+
type: "text",
|
|
447
|
+
text: sanitizeSurrogates(item.text),
|
|
448
|
+
};
|
|
449
|
+
}
|
|
450
|
+
else {
|
|
451
|
+
return {
|
|
452
|
+
type: "image_url",
|
|
453
|
+
image_url: {
|
|
454
|
+
url: `data:${item.mimeType};base64,${item.data}`,
|
|
455
|
+
},
|
|
456
|
+
};
|
|
457
|
+
}
|
|
458
|
+
});
|
|
459
|
+
const filteredContent = !model.input.includes("image")
|
|
460
|
+
? content.filter((c) => c.type !== "image_url")
|
|
461
|
+
: content;
|
|
462
|
+
if (filteredContent.length === 0)
|
|
463
|
+
continue;
|
|
464
|
+
params.push({
|
|
465
|
+
role: "user",
|
|
466
|
+
content: filteredContent,
|
|
467
|
+
});
|
|
468
|
+
}
|
|
469
|
+
}
|
|
470
|
+
else if (msg.role === "assistant") {
|
|
471
|
+
// Some providers (e.g. Mistral) don't accept null content, use empty string instead
|
|
472
|
+
const assistantMsg = {
|
|
473
|
+
role: "assistant",
|
|
474
|
+
content: compat.requiresAssistantAfterToolResult ? "" : null,
|
|
475
|
+
};
|
|
476
|
+
const textBlocks = msg.content.filter((b) => b.type === "text");
|
|
477
|
+
// Filter out empty text blocks to avoid API validation errors
|
|
478
|
+
const nonEmptyTextBlocks = textBlocks.filter((b) => b.text && b.text.trim().length > 0);
|
|
479
|
+
if (nonEmptyTextBlocks.length > 0) {
|
|
480
|
+
// GitHub Copilot requires assistant content as a string, not an array.
|
|
481
|
+
// Sending as array causes Claude models to re-answer all previous prompts.
|
|
482
|
+
if (model.provider === "github-copilot") {
|
|
483
|
+
assistantMsg.content = nonEmptyTextBlocks.map((b) => sanitizeSurrogates(b.text)).join("");
|
|
484
|
+
}
|
|
485
|
+
else {
|
|
486
|
+
assistantMsg.content = nonEmptyTextBlocks.map((b) => {
|
|
487
|
+
return { type: "text", text: sanitizeSurrogates(b.text) };
|
|
488
|
+
});
|
|
489
|
+
}
|
|
490
|
+
}
|
|
491
|
+
// Handle thinking blocks
|
|
492
|
+
const thinkingBlocks = msg.content.filter((b) => b.type === "thinking");
|
|
493
|
+
// Filter out empty thinking blocks to avoid API validation errors
|
|
494
|
+
const nonEmptyThinkingBlocks = thinkingBlocks.filter((b) => b.thinking && b.thinking.trim().length > 0);
|
|
495
|
+
if (nonEmptyThinkingBlocks.length > 0) {
|
|
496
|
+
if (compat.requiresThinkingAsText) {
|
|
497
|
+
// Convert thinking blocks to plain text (no tags to avoid model mimicking them)
|
|
498
|
+
const thinkingText = nonEmptyThinkingBlocks.map((b) => b.thinking).join("\n\n");
|
|
499
|
+
const textContent = assistantMsg.content;
|
|
500
|
+
if (textContent) {
|
|
501
|
+
textContent.unshift({ type: "text", text: thinkingText });
|
|
502
|
+
}
|
|
503
|
+
else {
|
|
504
|
+
assistantMsg.content = [{ type: "text", text: thinkingText }];
|
|
505
|
+
}
|
|
506
|
+
}
|
|
507
|
+
else {
|
|
508
|
+
// Use the signature from the first thinking block if available (for llama.cpp server + gpt-oss)
|
|
509
|
+
const signature = nonEmptyThinkingBlocks[0].thinkingSignature;
|
|
510
|
+
if (signature && signature.length > 0) {
|
|
511
|
+
assistantMsg[signature] = nonEmptyThinkingBlocks.map((b) => b.thinking).join("\n");
|
|
512
|
+
}
|
|
513
|
+
}
|
|
514
|
+
}
|
|
515
|
+
const toolCalls = msg.content.filter((b) => b.type === "toolCall");
|
|
516
|
+
if (toolCalls.length > 0) {
|
|
517
|
+
assistantMsg.tool_calls = toolCalls.map((tc) => ({
|
|
518
|
+
id: tc.id,
|
|
519
|
+
type: "function",
|
|
520
|
+
function: {
|
|
521
|
+
name: tc.name,
|
|
522
|
+
arguments: JSON.stringify(tc.arguments),
|
|
523
|
+
},
|
|
524
|
+
}));
|
|
525
|
+
const reasoningDetails = toolCalls
|
|
526
|
+
.filter((tc) => tc.thoughtSignature)
|
|
527
|
+
.map((tc) => {
|
|
528
|
+
try {
|
|
529
|
+
return JSON.parse(tc.thoughtSignature);
|
|
530
|
+
}
|
|
531
|
+
catch {
|
|
532
|
+
return null;
|
|
533
|
+
}
|
|
534
|
+
})
|
|
535
|
+
.filter(Boolean);
|
|
536
|
+
if (reasoningDetails.length > 0) {
|
|
537
|
+
assistantMsg.reasoning_details = reasoningDetails;
|
|
538
|
+
}
|
|
539
|
+
}
|
|
540
|
+
// Skip assistant messages that have no content and no tool calls.
|
|
541
|
+
// Mistral explicitly requires "either content or tool_calls, but not none".
|
|
542
|
+
// Other providers also don't accept empty assistant messages.
|
|
543
|
+
// This handles aborted assistant responses that got no content.
|
|
544
|
+
const content = assistantMsg.content;
|
|
545
|
+
const hasContent = content !== null &&
|
|
546
|
+
content !== undefined &&
|
|
547
|
+
(typeof content === "string" ? content.length > 0 : content.length > 0);
|
|
548
|
+
if (!hasContent && !assistantMsg.tool_calls) {
|
|
549
|
+
continue;
|
|
550
|
+
}
|
|
551
|
+
params.push(assistantMsg);
|
|
552
|
+
}
|
|
553
|
+
else if (msg.role === "toolResult") {
|
|
554
|
+
const imageBlocks = [];
|
|
555
|
+
let j = i;
|
|
556
|
+
for (; j < transformedMessages.length && transformedMessages[j].role === "toolResult"; j++) {
|
|
557
|
+
const toolMsg = transformedMessages[j];
|
|
558
|
+
// Extract text and image content
|
|
559
|
+
const textResult = toolMsg.content
|
|
560
|
+
.filter((c) => c.type === "text")
|
|
561
|
+
.map((c) => c.text)
|
|
562
|
+
.join("\n");
|
|
563
|
+
const hasImages = toolMsg.content.some((c) => c.type === "image");
|
|
564
|
+
// Always send tool result with text (or placeholder if only images)
|
|
565
|
+
const hasText = textResult.length > 0;
|
|
566
|
+
// Some providers (e.g. Mistral) require the 'name' field in tool results
|
|
567
|
+
const toolResultMsg = {
|
|
568
|
+
role: "tool",
|
|
569
|
+
content: sanitizeSurrogates(hasText ? textResult : "(see attached image)"),
|
|
570
|
+
tool_call_id: toolMsg.toolCallId,
|
|
571
|
+
};
|
|
572
|
+
if (compat.requiresToolResultName && toolMsg.toolName) {
|
|
573
|
+
toolResultMsg.name = toolMsg.toolName;
|
|
574
|
+
}
|
|
575
|
+
params.push(toolResultMsg);
|
|
576
|
+
if (hasImages && model.input.includes("image")) {
|
|
577
|
+
for (const block of toolMsg.content) {
|
|
578
|
+
if (block.type === "image") {
|
|
579
|
+
imageBlocks.push({
|
|
580
|
+
type: "image_url",
|
|
581
|
+
image_url: {
|
|
582
|
+
url: `data:${block.mimeType};base64,${block.data}`,
|
|
583
|
+
},
|
|
584
|
+
});
|
|
585
|
+
}
|
|
586
|
+
}
|
|
587
|
+
}
|
|
588
|
+
}
|
|
589
|
+
i = j - 1;
|
|
590
|
+
if (imageBlocks.length > 0) {
|
|
591
|
+
if (compat.requiresAssistantAfterToolResult) {
|
|
592
|
+
params.push({
|
|
593
|
+
role: "assistant",
|
|
594
|
+
content: "I have processed the tool results.",
|
|
595
|
+
});
|
|
596
|
+
}
|
|
597
|
+
params.push({
|
|
598
|
+
role: "user",
|
|
599
|
+
content: [
|
|
600
|
+
{
|
|
601
|
+
type: "text",
|
|
602
|
+
text: "Attached image(s) from tool result:",
|
|
603
|
+
},
|
|
604
|
+
...imageBlocks,
|
|
605
|
+
],
|
|
606
|
+
});
|
|
607
|
+
lastRole = "user";
|
|
608
|
+
}
|
|
609
|
+
else {
|
|
610
|
+
lastRole = "toolResult";
|
|
611
|
+
}
|
|
612
|
+
continue;
|
|
613
|
+
}
|
|
614
|
+
lastRole = msg.role;
|
|
615
|
+
}
|
|
616
|
+
return params;
|
|
617
|
+
}
|
|
618
|
+
function convertTools(tools, compat) {
|
|
619
|
+
return tools.map((tool) => ({
|
|
620
|
+
type: "function",
|
|
621
|
+
function: {
|
|
622
|
+
name: tool.name,
|
|
623
|
+
description: tool.description,
|
|
624
|
+
parameters: tool.parameters, // TypeBox already generates JSON Schema
|
|
625
|
+
// Only include strict if provider supports it. Some reject unknown fields.
|
|
626
|
+
...(compat.supportsStrictMode !== false && { strict: false }),
|
|
627
|
+
},
|
|
628
|
+
}));
|
|
629
|
+
}
|
|
630
|
+
function mapStopReason(reason) {
|
|
631
|
+
if (reason === null)
|
|
632
|
+
return "stop";
|
|
633
|
+
switch (reason) {
|
|
634
|
+
case "stop":
|
|
635
|
+
return "stop";
|
|
636
|
+
case "length":
|
|
637
|
+
return "length";
|
|
638
|
+
case "function_call":
|
|
639
|
+
case "tool_calls":
|
|
640
|
+
return "toolUse";
|
|
641
|
+
case "content_filter":
|
|
642
|
+
return "error";
|
|
643
|
+
default: {
|
|
644
|
+
const _exhaustive = reason;
|
|
645
|
+
throw new Error(`Unhandled stop reason: ${_exhaustive}`);
|
|
646
|
+
}
|
|
647
|
+
}
|
|
648
|
+
}
|
|
649
|
+
/**
|
|
650
|
+
* Detect compatibility settings from provider and baseUrl for known providers.
|
|
651
|
+
* Provider takes precedence over URL-based detection since it's explicitly configured.
|
|
652
|
+
* Returns a fully resolved OpenAICompletionsCompat object with all fields set.
|
|
653
|
+
*/
|
|
654
|
+
function detectCompat(model) {
|
|
655
|
+
const provider = model.provider;
|
|
656
|
+
const baseUrl = model.baseUrl;
|
|
657
|
+
const isZai = provider === "zai" || baseUrl.includes("api.z.ai");
|
|
658
|
+
const isNonStandard = provider === "cerebras" ||
|
|
659
|
+
baseUrl.includes("cerebras.ai") ||
|
|
660
|
+
provider === "xai" ||
|
|
661
|
+
baseUrl.includes("api.x.ai") ||
|
|
662
|
+
provider === "mistral" ||
|
|
663
|
+
baseUrl.includes("mistral.ai") ||
|
|
664
|
+
baseUrl.includes("chutes.ai") ||
|
|
665
|
+
baseUrl.includes("deepseek.com") ||
|
|
666
|
+
isZai ||
|
|
667
|
+
provider === "opencode" ||
|
|
668
|
+
baseUrl.includes("opencode.ai");
|
|
669
|
+
const useMaxTokens = provider === "mistral" || baseUrl.includes("mistral.ai") || baseUrl.includes("chutes.ai");
|
|
670
|
+
const isGrok = provider === "xai" || baseUrl.includes("api.x.ai");
|
|
671
|
+
const isMistral = provider === "mistral" || baseUrl.includes("mistral.ai");
|
|
672
|
+
return {
|
|
673
|
+
supportsStore: !isNonStandard,
|
|
674
|
+
supportsDeveloperRole: !isNonStandard,
|
|
675
|
+
supportsReasoningEffort: !isGrok && !isZai,
|
|
676
|
+
supportsUsageInStreaming: true,
|
|
677
|
+
maxTokensField: useMaxTokens ? "max_tokens" : "max_completion_tokens",
|
|
678
|
+
requiresToolResultName: isMistral,
|
|
679
|
+
requiresAssistantAfterToolResult: false, // Mistral no longer requires this as of Dec 2024
|
|
680
|
+
requiresThinkingAsText: isMistral,
|
|
681
|
+
requiresMistralToolIds: isMistral,
|
|
682
|
+
thinkingFormat: isZai ? "zai" : "openai",
|
|
683
|
+
openRouterRouting: {},
|
|
684
|
+
vercelGatewayRouting: {},
|
|
685
|
+
supportsStrictMode: true,
|
|
686
|
+
};
|
|
687
|
+
}
|
|
688
|
+
/**
|
|
689
|
+
* Get resolved compatibility settings for a model.
|
|
690
|
+
* Uses explicit model.compat if provided, otherwise auto-detects from provider/URL.
|
|
691
|
+
*/
|
|
692
|
+
function getCompat(model) {
|
|
693
|
+
const detected = detectCompat(model);
|
|
694
|
+
if (!model.compat)
|
|
695
|
+
return detected;
|
|
696
|
+
return {
|
|
697
|
+
supportsStore: model.compat.supportsStore ?? detected.supportsStore,
|
|
698
|
+
supportsDeveloperRole: model.compat.supportsDeveloperRole ?? detected.supportsDeveloperRole,
|
|
699
|
+
supportsReasoningEffort: model.compat.supportsReasoningEffort ?? detected.supportsReasoningEffort,
|
|
700
|
+
supportsUsageInStreaming: model.compat.supportsUsageInStreaming ?? detected.supportsUsageInStreaming,
|
|
701
|
+
maxTokensField: model.compat.maxTokensField ?? detected.maxTokensField,
|
|
702
|
+
requiresToolResultName: model.compat.requiresToolResultName ?? detected.requiresToolResultName,
|
|
703
|
+
requiresAssistantAfterToolResult: model.compat.requiresAssistantAfterToolResult ?? detected.requiresAssistantAfterToolResult,
|
|
704
|
+
requiresThinkingAsText: model.compat.requiresThinkingAsText ?? detected.requiresThinkingAsText,
|
|
705
|
+
requiresMistralToolIds: model.compat.requiresMistralToolIds ?? detected.requiresMistralToolIds,
|
|
706
|
+
thinkingFormat: model.compat.thinkingFormat ?? detected.thinkingFormat,
|
|
707
|
+
openRouterRouting: model.compat.openRouterRouting ?? {},
|
|
708
|
+
vercelGatewayRouting: model.compat.vercelGatewayRouting ?? detected.vercelGatewayRouting,
|
|
709
|
+
supportsStrictMode: model.compat.supportsStrictMode ?? detected.supportsStrictMode,
|
|
710
|
+
};
|
|
711
|
+
}
|
|
712
|
+
//# sourceMappingURL=openai-completions.js.map
|