@core-ai/openai 0.5.1 → 0.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +21 -0
- package/dist/chunk-ZHHJ76M7.js +416 -0
- package/dist/compat.d.ts +18 -0
- package/dist/compat.js +612 -0
- package/dist/index.d.ts +7 -1
- package/dist/index.js +529 -522
- package/dist/provider-options-DK-Tz0pz.d.ts +157 -0
- package/package.json +9 -4
package/dist/compat.js
ADDED
|
@@ -0,0 +1,612 @@
|
|
|
1
|
+
import {
|
|
2
|
+
clampReasoningEffort,
|
|
3
|
+
convertToolChoice,
|
|
4
|
+
convertTools,
|
|
5
|
+
createOpenAIEmbeddingModel,
|
|
6
|
+
createOpenAIImageModel,
|
|
7
|
+
createStructuredOutputOptions,
|
|
8
|
+
getOpenAIModelCapabilities,
|
|
9
|
+
getStructuredOutputToolName,
|
|
10
|
+
openaiCompatGenerateProviderOptionsSchema,
|
|
11
|
+
openaiCompatProviderOptionsSchema,
|
|
12
|
+
parseOpenAICompatGenerateProviderOptions,
|
|
13
|
+
safeParseJsonObject,
|
|
14
|
+
toOpenAIReasoningEffort,
|
|
15
|
+
validateOpenAIReasoningConfig,
|
|
16
|
+
wrapOpenAIError
|
|
17
|
+
} from "./chunk-ZHHJ76M7.js";
|
|
18
|
+
|
|
19
|
+
// src/compat/provider.ts
|
|
20
|
+
import OpenAI from "openai";
|
|
21
|
+
|
|
22
|
+
// src/compat/chat-model.ts
|
|
23
|
+
import {
|
|
24
|
+
StructuredOutputNoObjectGeneratedError,
|
|
25
|
+
StructuredOutputParseError,
|
|
26
|
+
StructuredOutputValidationError,
|
|
27
|
+
createObjectStream,
|
|
28
|
+
createChatStream
|
|
29
|
+
} from "@core-ai/core-ai";
|
|
30
|
+
|
|
31
|
+
// src/compat/chat-adapter.ts
|
|
32
|
+
function convertMessages(messages) {
|
|
33
|
+
return messages.map(convertMessage);
|
|
34
|
+
}
|
|
35
|
+
function convertMessage(message) {
|
|
36
|
+
if (message.role === "system") {
|
|
37
|
+
return {
|
|
38
|
+
role: "system",
|
|
39
|
+
content: message.content
|
|
40
|
+
};
|
|
41
|
+
}
|
|
42
|
+
if (message.role === "user") {
|
|
43
|
+
return {
|
|
44
|
+
role: "user",
|
|
45
|
+
content: typeof message.content === "string" ? message.content : message.content.map(convertUserContentPart)
|
|
46
|
+
};
|
|
47
|
+
}
|
|
48
|
+
if (message.role === "assistant") {
|
|
49
|
+
const text = message.parts.flatMap((part) => {
|
|
50
|
+
if (part.type === "text") return [part.text];
|
|
51
|
+
if (part.type === "reasoning" && part.text.length > 0) {
|
|
52
|
+
return [`<thinking>${part.text}</thinking>`];
|
|
53
|
+
}
|
|
54
|
+
return [];
|
|
55
|
+
}).join("\n\n");
|
|
56
|
+
const toolCalls = message.parts.flatMap(
|
|
57
|
+
(part) => part.type === "tool-call" ? [part.toolCall] : []
|
|
58
|
+
);
|
|
59
|
+
return {
|
|
60
|
+
role: "assistant",
|
|
61
|
+
content: text.length > 0 ? text : null,
|
|
62
|
+
...toolCalls.length > 0 ? {
|
|
63
|
+
tool_calls: toolCalls.map((toolCall) => ({
|
|
64
|
+
id: toolCall.id,
|
|
65
|
+
type: "function",
|
|
66
|
+
function: {
|
|
67
|
+
name: toolCall.name,
|
|
68
|
+
arguments: JSON.stringify(toolCall.arguments)
|
|
69
|
+
}
|
|
70
|
+
}))
|
|
71
|
+
} : {}
|
|
72
|
+
};
|
|
73
|
+
}
|
|
74
|
+
return {
|
|
75
|
+
role: "tool",
|
|
76
|
+
tool_call_id: message.toolCallId,
|
|
77
|
+
content: message.content
|
|
78
|
+
};
|
|
79
|
+
}
|
|
80
|
+
function convertUserContentPart(part) {
|
|
81
|
+
if (part.type === "text") {
|
|
82
|
+
return {
|
|
83
|
+
type: "text",
|
|
84
|
+
text: part.text
|
|
85
|
+
};
|
|
86
|
+
}
|
|
87
|
+
if (part.type === "image") {
|
|
88
|
+
const url = part.source.type === "url" ? part.source.url : `data:${part.source.mediaType};base64,${part.source.data}`;
|
|
89
|
+
return {
|
|
90
|
+
type: "image_url",
|
|
91
|
+
image_url: {
|
|
92
|
+
url
|
|
93
|
+
}
|
|
94
|
+
};
|
|
95
|
+
}
|
|
96
|
+
return {
|
|
97
|
+
type: "file",
|
|
98
|
+
file: {
|
|
99
|
+
file_data: part.data,
|
|
100
|
+
...part.filename ? { filename: part.filename } : {}
|
|
101
|
+
}
|
|
102
|
+
};
|
|
103
|
+
}
|
|
104
|
+
function createGenerateRequest(modelId, options) {
|
|
105
|
+
return createRequest(modelId, options, false);
|
|
106
|
+
}
|
|
107
|
+
function createStreamRequest(modelId, options) {
|
|
108
|
+
return createRequest(modelId, options, true);
|
|
109
|
+
}
|
|
110
|
+
function createRequest(modelId, options, stream) {
|
|
111
|
+
const openaiOptions = parseOpenAICompatGenerateProviderOptions(
|
|
112
|
+
options.providerOptions
|
|
113
|
+
);
|
|
114
|
+
return {
|
|
115
|
+
...createRequestBase(modelId, options),
|
|
116
|
+
...stream ? {
|
|
117
|
+
stream: true,
|
|
118
|
+
stream_options: {
|
|
119
|
+
include_usage: true
|
|
120
|
+
}
|
|
121
|
+
} : {},
|
|
122
|
+
...mapOpenAIProviderOptionsToRequestFields(openaiOptions)
|
|
123
|
+
};
|
|
124
|
+
}
|
|
125
|
+
function createRequestBase(modelId, options) {
|
|
126
|
+
validateOpenAIReasoningConfig(modelId, options);
|
|
127
|
+
const reasoningFields = mapReasoningToRequestFields(modelId, options);
|
|
128
|
+
return {
|
|
129
|
+
model: modelId,
|
|
130
|
+
messages: convertMessages(options.messages),
|
|
131
|
+
...options.tools && Object.keys(options.tools).length > 0 ? { tools: convertTools(options.tools) } : {},
|
|
132
|
+
...options.toolChoice ? { tool_choice: convertToolChoice(options.toolChoice) } : {},
|
|
133
|
+
...reasoningFields,
|
|
134
|
+
...mapSamplingToRequestFields(options)
|
|
135
|
+
};
|
|
136
|
+
}
|
|
137
|
+
function mapSamplingToRequestFields(options) {
|
|
138
|
+
return {
|
|
139
|
+
...options.temperature !== void 0 ? { temperature: options.temperature } : {},
|
|
140
|
+
...options.maxTokens !== void 0 ? { max_tokens: options.maxTokens } : {},
|
|
141
|
+
...options.topP !== void 0 ? { top_p: options.topP } : {}
|
|
142
|
+
};
|
|
143
|
+
}
|
|
144
|
+
function mapOpenAIProviderOptionsToRequestFields(options) {
|
|
145
|
+
return {
|
|
146
|
+
...options?.store !== void 0 ? { store: options.store } : {},
|
|
147
|
+
...options?.serviceTier !== void 0 ? { service_tier: options.serviceTier } : {},
|
|
148
|
+
...options?.parallelToolCalls !== void 0 ? { parallel_tool_calls: options.parallelToolCalls } : {},
|
|
149
|
+
...options?.user !== void 0 ? { user: options.user } : {},
|
|
150
|
+
...options?.stopSequences ? { stop: options.stopSequences } : {},
|
|
151
|
+
...options?.frequencyPenalty !== void 0 ? { frequency_penalty: options.frequencyPenalty } : {},
|
|
152
|
+
...options?.presencePenalty !== void 0 ? { presence_penalty: options.presencePenalty } : {},
|
|
153
|
+
...options?.seed !== void 0 ? { seed: options.seed } : {}
|
|
154
|
+
};
|
|
155
|
+
}
|
|
156
|
+
function mapGenerateResponse(response) {
|
|
157
|
+
const firstChoice = response.choices[0];
|
|
158
|
+
if (!firstChoice) {
|
|
159
|
+
return {
|
|
160
|
+
parts: [],
|
|
161
|
+
content: null,
|
|
162
|
+
reasoning: null,
|
|
163
|
+
toolCalls: [],
|
|
164
|
+
finishReason: "unknown",
|
|
165
|
+
usage: {
|
|
166
|
+
inputTokens: 0,
|
|
167
|
+
outputTokens: 0,
|
|
168
|
+
inputTokenDetails: {
|
|
169
|
+
cacheReadTokens: 0,
|
|
170
|
+
cacheWriteTokens: 0
|
|
171
|
+
},
|
|
172
|
+
outputTokenDetails: {}
|
|
173
|
+
}
|
|
174
|
+
};
|
|
175
|
+
}
|
|
176
|
+
const reasoningTokens = response.usage?.completion_tokens_details?.reasoning_tokens;
|
|
177
|
+
const content = extractTextContent(firstChoice.message.content);
|
|
178
|
+
const toolCalls = parseToolCalls(firstChoice.message.tool_calls);
|
|
179
|
+
const parts = createAssistantParts(content, toolCalls);
|
|
180
|
+
return {
|
|
181
|
+
parts,
|
|
182
|
+
content,
|
|
183
|
+
reasoning: null,
|
|
184
|
+
toolCalls,
|
|
185
|
+
finishReason: mapFinishReason(firstChoice.finish_reason),
|
|
186
|
+
usage: {
|
|
187
|
+
inputTokens: response.usage?.prompt_tokens ?? 0,
|
|
188
|
+
outputTokens: response.usage?.completion_tokens ?? 0,
|
|
189
|
+
inputTokenDetails: {
|
|
190
|
+
cacheReadTokens: response.usage?.prompt_tokens_details?.cached_tokens ?? 0,
|
|
191
|
+
cacheWriteTokens: 0
|
|
192
|
+
},
|
|
193
|
+
outputTokenDetails: {
|
|
194
|
+
...reasoningTokens !== void 0 ? { reasoningTokens } : {}
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
};
|
|
198
|
+
}
|
|
199
|
+
function parseToolCalls(calls) {
|
|
200
|
+
if (!calls) {
|
|
201
|
+
return [];
|
|
202
|
+
}
|
|
203
|
+
return calls.flatMap((toolCall) => {
|
|
204
|
+
if (toolCall.type !== "function") {
|
|
205
|
+
return [];
|
|
206
|
+
}
|
|
207
|
+
return [mapFunctionToolCall(toolCall)];
|
|
208
|
+
});
|
|
209
|
+
}
|
|
210
|
+
function mapFunctionToolCall(toolCall) {
|
|
211
|
+
return {
|
|
212
|
+
id: toolCall.id,
|
|
213
|
+
name: toolCall.function.name,
|
|
214
|
+
arguments: safeParseJsonObject(toolCall.function.arguments)
|
|
215
|
+
};
|
|
216
|
+
}
|
|
217
|
+
function mapFinishReason(reason) {
|
|
218
|
+
if (reason === "stop") {
|
|
219
|
+
return "stop";
|
|
220
|
+
}
|
|
221
|
+
if (reason === "length") {
|
|
222
|
+
return "length";
|
|
223
|
+
}
|
|
224
|
+
if (reason === "tool_calls" || reason === "function_call") {
|
|
225
|
+
return "tool-calls";
|
|
226
|
+
}
|
|
227
|
+
if (reason === "content_filter") {
|
|
228
|
+
return "content-filter";
|
|
229
|
+
}
|
|
230
|
+
return "unknown";
|
|
231
|
+
}
|
|
232
|
+
async function* transformStream(stream) {
|
|
233
|
+
const bufferedToolCalls = /* @__PURE__ */ new Map();
|
|
234
|
+
const emittedToolCalls = /* @__PURE__ */ new Set();
|
|
235
|
+
let finishReason = "unknown";
|
|
236
|
+
let usage = {
|
|
237
|
+
inputTokens: 0,
|
|
238
|
+
outputTokens: 0,
|
|
239
|
+
inputTokenDetails: {
|
|
240
|
+
cacheReadTokens: 0,
|
|
241
|
+
cacheWriteTokens: 0
|
|
242
|
+
},
|
|
243
|
+
outputTokenDetails: {}
|
|
244
|
+
};
|
|
245
|
+
for await (const chunk of stream) {
|
|
246
|
+
if (chunk.usage) {
|
|
247
|
+
const reasoningTokens = chunk.usage.completion_tokens_details?.reasoning_tokens;
|
|
248
|
+
usage = {
|
|
249
|
+
inputTokens: chunk.usage.prompt_tokens ?? 0,
|
|
250
|
+
outputTokens: chunk.usage.completion_tokens ?? 0,
|
|
251
|
+
inputTokenDetails: {
|
|
252
|
+
cacheReadTokens: chunk.usage.prompt_tokens_details?.cached_tokens ?? 0,
|
|
253
|
+
cacheWriteTokens: 0
|
|
254
|
+
},
|
|
255
|
+
outputTokenDetails: {
|
|
256
|
+
...reasoningTokens !== void 0 ? { reasoningTokens } : {}
|
|
257
|
+
}
|
|
258
|
+
};
|
|
259
|
+
}
|
|
260
|
+
const choice = chunk.choices[0];
|
|
261
|
+
if (!choice) {
|
|
262
|
+
continue;
|
|
263
|
+
}
|
|
264
|
+
if (choice.delta.content) {
|
|
265
|
+
yield {
|
|
266
|
+
type: "text-delta",
|
|
267
|
+
text: choice.delta.content
|
|
268
|
+
};
|
|
269
|
+
}
|
|
270
|
+
if (choice.delta.tool_calls) {
|
|
271
|
+
for (const partialToolCall of choice.delta.tool_calls) {
|
|
272
|
+
const current = bufferedToolCalls.get(
|
|
273
|
+
partialToolCall.index
|
|
274
|
+
) ?? {
|
|
275
|
+
id: partialToolCall.id ?? `tool-${partialToolCall.index}`,
|
|
276
|
+
name: partialToolCall.function?.name ?? "",
|
|
277
|
+
arguments: ""
|
|
278
|
+
};
|
|
279
|
+
const wasNew = !bufferedToolCalls.has(partialToolCall.index);
|
|
280
|
+
if (partialToolCall.id) {
|
|
281
|
+
current.id = partialToolCall.id;
|
|
282
|
+
}
|
|
283
|
+
if (partialToolCall.function?.name) {
|
|
284
|
+
current.name = partialToolCall.function.name;
|
|
285
|
+
}
|
|
286
|
+
if (partialToolCall.function?.arguments) {
|
|
287
|
+
current.arguments += partialToolCall.function.arguments;
|
|
288
|
+
yield {
|
|
289
|
+
type: "tool-call-delta",
|
|
290
|
+
toolCallId: current.id,
|
|
291
|
+
argumentsDelta: partialToolCall.function.arguments
|
|
292
|
+
};
|
|
293
|
+
}
|
|
294
|
+
bufferedToolCalls.set(partialToolCall.index, current);
|
|
295
|
+
if (wasNew) {
|
|
296
|
+
yield {
|
|
297
|
+
type: "tool-call-start",
|
|
298
|
+
toolCallId: current.id,
|
|
299
|
+
toolName: current.name
|
|
300
|
+
};
|
|
301
|
+
}
|
|
302
|
+
}
|
|
303
|
+
}
|
|
304
|
+
if (choice.finish_reason) {
|
|
305
|
+
finishReason = mapFinishReason(choice.finish_reason);
|
|
306
|
+
}
|
|
307
|
+
if (finishReason === "tool-calls") {
|
|
308
|
+
for (const toolCall of bufferedToolCalls.values()) {
|
|
309
|
+
if (emittedToolCalls.has(toolCall.id)) {
|
|
310
|
+
continue;
|
|
311
|
+
}
|
|
312
|
+
emittedToolCalls.add(toolCall.id);
|
|
313
|
+
yield {
|
|
314
|
+
type: "tool-call-end",
|
|
315
|
+
toolCall: {
|
|
316
|
+
id: toolCall.id,
|
|
317
|
+
name: toolCall.name,
|
|
318
|
+
arguments: safeParseJsonObject(toolCall.arguments)
|
|
319
|
+
}
|
|
320
|
+
};
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
}
|
|
324
|
+
yield {
|
|
325
|
+
type: "finish",
|
|
326
|
+
finishReason,
|
|
327
|
+
usage
|
|
328
|
+
};
|
|
329
|
+
}
|
|
330
|
+
function mapReasoningToRequestFields(modelId, options) {
|
|
331
|
+
if (!options.reasoning) {
|
|
332
|
+
return {};
|
|
333
|
+
}
|
|
334
|
+
const capabilities = getOpenAIModelCapabilities(modelId);
|
|
335
|
+
if (!capabilities.reasoning.supportsEffort) {
|
|
336
|
+
return {};
|
|
337
|
+
}
|
|
338
|
+
const clampedEffort = clampReasoningEffort(
|
|
339
|
+
options.reasoning.effort,
|
|
340
|
+
capabilities.reasoning.supportedRange
|
|
341
|
+
);
|
|
342
|
+
return {
|
|
343
|
+
reasoning_effort: toOpenAIReasoningEffort(clampedEffort)
|
|
344
|
+
};
|
|
345
|
+
}
|
|
346
|
+
function createAssistantParts(content, toolCalls) {
|
|
347
|
+
const parts = [];
|
|
348
|
+
if (content) {
|
|
349
|
+
parts.push({
|
|
350
|
+
type: "text",
|
|
351
|
+
text: content
|
|
352
|
+
});
|
|
353
|
+
}
|
|
354
|
+
for (const toolCall of toolCalls) {
|
|
355
|
+
parts.push({
|
|
356
|
+
type: "tool-call",
|
|
357
|
+
toolCall
|
|
358
|
+
});
|
|
359
|
+
}
|
|
360
|
+
return parts;
|
|
361
|
+
}
|
|
362
|
+
function extractTextContent(content) {
|
|
363
|
+
if (typeof content === "string") {
|
|
364
|
+
return content;
|
|
365
|
+
}
|
|
366
|
+
if (!Array.isArray(content)) {
|
|
367
|
+
return null;
|
|
368
|
+
}
|
|
369
|
+
const text = content.flatMap((item) => {
|
|
370
|
+
if (!item || typeof item !== "object") {
|
|
371
|
+
return [];
|
|
372
|
+
}
|
|
373
|
+
const textValue = item.text;
|
|
374
|
+
return typeof textValue === "string" ? [textValue] : [];
|
|
375
|
+
}).join("");
|
|
376
|
+
return text.length > 0 ? text : null;
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
// src/compat/chat-model.ts
|
|
380
|
+
function createOpenAICompatChatModel(client, modelId) {
|
|
381
|
+
const provider = "openai";
|
|
382
|
+
async function callOpenAIChatCompletionsApi(request, signal) {
|
|
383
|
+
try {
|
|
384
|
+
return await client.chat.completions.create(request, {
|
|
385
|
+
signal
|
|
386
|
+
});
|
|
387
|
+
} catch (error) {
|
|
388
|
+
throw wrapOpenAIError(error);
|
|
389
|
+
}
|
|
390
|
+
}
|
|
391
|
+
async function generateChat(options) {
|
|
392
|
+
const request = createGenerateRequest(modelId, options);
|
|
393
|
+
const response = await callOpenAIChatCompletionsApi(request, options.signal);
|
|
394
|
+
return mapGenerateResponse(response);
|
|
395
|
+
}
|
|
396
|
+
async function streamChat(options) {
|
|
397
|
+
const request = createStreamRequest(modelId, options);
|
|
398
|
+
return createChatStream(
|
|
399
|
+
async () => transformStream(
|
|
400
|
+
await callOpenAIChatCompletionsApi(request, options.signal)
|
|
401
|
+
),
|
|
402
|
+
{ signal: options.signal }
|
|
403
|
+
);
|
|
404
|
+
}
|
|
405
|
+
return {
|
|
406
|
+
provider,
|
|
407
|
+
modelId,
|
|
408
|
+
generate: generateChat,
|
|
409
|
+
stream: streamChat,
|
|
410
|
+
async generateObject(options) {
|
|
411
|
+
const structuredOptions = createStructuredOutputOptions(options);
|
|
412
|
+
const result = await generateChat(structuredOptions);
|
|
413
|
+
const toolName = getStructuredOutputToolName(options);
|
|
414
|
+
const object = extractStructuredObject(
|
|
415
|
+
result,
|
|
416
|
+
options.schema,
|
|
417
|
+
provider,
|
|
418
|
+
toolName
|
|
419
|
+
);
|
|
420
|
+
return {
|
|
421
|
+
object,
|
|
422
|
+
finishReason: result.finishReason,
|
|
423
|
+
usage: result.usage
|
|
424
|
+
};
|
|
425
|
+
},
|
|
426
|
+
async streamObject(options) {
|
|
427
|
+
const structuredOptions = createStructuredOutputOptions(options);
|
|
428
|
+
const stream = await streamChat(structuredOptions);
|
|
429
|
+
const toolName = getStructuredOutputToolName(options);
|
|
430
|
+
return createObjectStream(
|
|
431
|
+
transformStructuredOutputStream(
|
|
432
|
+
stream,
|
|
433
|
+
options.schema,
|
|
434
|
+
provider,
|
|
435
|
+
toolName
|
|
436
|
+
),
|
|
437
|
+
{
|
|
438
|
+
signal: options.signal
|
|
439
|
+
}
|
|
440
|
+
);
|
|
441
|
+
}
|
|
442
|
+
};
|
|
443
|
+
}
|
|
444
|
+
function extractStructuredObject(result, schema, provider, toolName) {
|
|
445
|
+
const structuredToolCall = result.toolCalls.find(
|
|
446
|
+
(toolCall) => toolCall.name === toolName
|
|
447
|
+
);
|
|
448
|
+
if (structuredToolCall) {
|
|
449
|
+
return validateStructuredToolArguments(
|
|
450
|
+
schema,
|
|
451
|
+
structuredToolCall.arguments,
|
|
452
|
+
provider
|
|
453
|
+
);
|
|
454
|
+
}
|
|
455
|
+
const rawOutput = result.content?.trim();
|
|
456
|
+
if (rawOutput && rawOutput.length > 0) {
|
|
457
|
+
return parseAndValidateStructuredPayload(schema, rawOutput, provider);
|
|
458
|
+
}
|
|
459
|
+
throw new StructuredOutputNoObjectGeneratedError(
|
|
460
|
+
"model did not emit a structured object payload",
|
|
461
|
+
provider
|
|
462
|
+
);
|
|
463
|
+
}
|
|
464
|
+
async function* transformStructuredOutputStream(stream, schema, provider, toolName) {
|
|
465
|
+
let validatedObject;
|
|
466
|
+
let contentBuffer = "";
|
|
467
|
+
const toolArgumentDeltas = /* @__PURE__ */ new Map();
|
|
468
|
+
for await (const event of stream) {
|
|
469
|
+
if (event.type === "text-delta") {
|
|
470
|
+
contentBuffer += event.text;
|
|
471
|
+
yield {
|
|
472
|
+
type: "object-delta",
|
|
473
|
+
text: event.text
|
|
474
|
+
};
|
|
475
|
+
continue;
|
|
476
|
+
}
|
|
477
|
+
if (event.type === "tool-call-delta") {
|
|
478
|
+
const previous = toolArgumentDeltas.get(event.toolCallId) ?? "";
|
|
479
|
+
toolArgumentDeltas.set(
|
|
480
|
+
event.toolCallId,
|
|
481
|
+
`${previous}${event.argumentsDelta}`
|
|
482
|
+
);
|
|
483
|
+
yield {
|
|
484
|
+
type: "object-delta",
|
|
485
|
+
text: event.argumentsDelta
|
|
486
|
+
};
|
|
487
|
+
continue;
|
|
488
|
+
}
|
|
489
|
+
if (event.type === "tool-call-end" && event.toolCall.name === toolName) {
|
|
490
|
+
validatedObject = validateStructuredToolArguments(
|
|
491
|
+
schema,
|
|
492
|
+
event.toolCall.arguments,
|
|
493
|
+
provider
|
|
494
|
+
);
|
|
495
|
+
yield {
|
|
496
|
+
type: "object",
|
|
497
|
+
object: validatedObject
|
|
498
|
+
};
|
|
499
|
+
continue;
|
|
500
|
+
}
|
|
501
|
+
if (event.type === "finish") {
|
|
502
|
+
if (validatedObject === void 0) {
|
|
503
|
+
const fallbackPayload = getFallbackStructuredPayload(
|
|
504
|
+
contentBuffer,
|
|
505
|
+
toolArgumentDeltas
|
|
506
|
+
);
|
|
507
|
+
if (!fallbackPayload) {
|
|
508
|
+
throw new StructuredOutputNoObjectGeneratedError(
|
|
509
|
+
"structured output stream ended without an object payload",
|
|
510
|
+
provider
|
|
511
|
+
);
|
|
512
|
+
}
|
|
513
|
+
validatedObject = parseAndValidateStructuredPayload(
|
|
514
|
+
schema,
|
|
515
|
+
fallbackPayload,
|
|
516
|
+
provider
|
|
517
|
+
);
|
|
518
|
+
yield {
|
|
519
|
+
type: "object",
|
|
520
|
+
object: validatedObject
|
|
521
|
+
};
|
|
522
|
+
}
|
|
523
|
+
yield {
|
|
524
|
+
type: "finish",
|
|
525
|
+
finishReason: event.finishReason,
|
|
526
|
+
usage: event.usage
|
|
527
|
+
};
|
|
528
|
+
}
|
|
529
|
+
}
|
|
530
|
+
}
|
|
531
|
+
function getFallbackStructuredPayload(contentBuffer, toolArgumentDeltas) {
|
|
532
|
+
for (const delta of toolArgumentDeltas.values()) {
|
|
533
|
+
const trimmed = delta.trim();
|
|
534
|
+
if (trimmed.length > 0) {
|
|
535
|
+
return trimmed;
|
|
536
|
+
}
|
|
537
|
+
}
|
|
538
|
+
const trimmedContent = contentBuffer.trim();
|
|
539
|
+
if (trimmedContent.length > 0) {
|
|
540
|
+
return trimmedContent;
|
|
541
|
+
}
|
|
542
|
+
return void 0;
|
|
543
|
+
}
|
|
544
|
+
function validateStructuredToolArguments(schema, toolArguments, provider) {
|
|
545
|
+
return validateStructuredObject(
|
|
546
|
+
schema,
|
|
547
|
+
toolArguments,
|
|
548
|
+
provider,
|
|
549
|
+
JSON.stringify(toolArguments)
|
|
550
|
+
);
|
|
551
|
+
}
|
|
552
|
+
function parseAndValidateStructuredPayload(schema, rawPayload, provider) {
|
|
553
|
+
const parsedPayload = parseJson(rawPayload, provider);
|
|
554
|
+
return validateStructuredObject(
|
|
555
|
+
schema,
|
|
556
|
+
parsedPayload,
|
|
557
|
+
provider,
|
|
558
|
+
rawPayload
|
|
559
|
+
);
|
|
560
|
+
}
|
|
561
|
+
function parseJson(rawOutput, provider) {
|
|
562
|
+
try {
|
|
563
|
+
return JSON.parse(rawOutput);
|
|
564
|
+
} catch (error) {
|
|
565
|
+
throw new StructuredOutputParseError(
|
|
566
|
+
"failed to parse structured output as JSON",
|
|
567
|
+
provider,
|
|
568
|
+
{
|
|
569
|
+
rawOutput,
|
|
570
|
+
cause: error
|
|
571
|
+
}
|
|
572
|
+
);
|
|
573
|
+
}
|
|
574
|
+
}
|
|
575
|
+
function validateStructuredObject(schema, value, provider, rawOutput) {
|
|
576
|
+
const parsed = schema.safeParse(value);
|
|
577
|
+
if (parsed.success) {
|
|
578
|
+
return parsed.data;
|
|
579
|
+
}
|
|
580
|
+
throw new StructuredOutputValidationError(
|
|
581
|
+
"structured output does not match schema",
|
|
582
|
+
provider,
|
|
583
|
+
formatZodIssues(parsed.error.issues),
|
|
584
|
+
{
|
|
585
|
+
rawOutput
|
|
586
|
+
}
|
|
587
|
+
);
|
|
588
|
+
}
|
|
589
|
+
function formatZodIssues(issues) {
|
|
590
|
+
return issues.map((issue) => {
|
|
591
|
+
const path = issue.path.length > 0 ? issue.path.map((segment) => String(segment)).join(".") : "<root>";
|
|
592
|
+
return `${path}: ${issue.message}`;
|
|
593
|
+
});
|
|
594
|
+
}
|
|
595
|
+
|
|
596
|
+
// src/compat/provider.ts
|
|
597
|
+
function createOpenAICompat(options = {}) {
|
|
598
|
+
const client = options.client ?? new OpenAI({
|
|
599
|
+
apiKey: options.apiKey,
|
|
600
|
+
baseURL: options.baseURL
|
|
601
|
+
});
|
|
602
|
+
return {
|
|
603
|
+
chatModel: (modelId) => createOpenAICompatChatModel(client, modelId),
|
|
604
|
+
embeddingModel: (modelId) => createOpenAIEmbeddingModel(client, modelId),
|
|
605
|
+
imageModel: (modelId) => createOpenAIImageModel(client, modelId)
|
|
606
|
+
};
|
|
607
|
+
}
|
|
608
|
+
export {
|
|
609
|
+
createOpenAICompat,
|
|
610
|
+
openaiCompatGenerateProviderOptionsSchema,
|
|
611
|
+
openaiCompatProviderOptionsSchema
|
|
612
|
+
};
|
package/dist/index.d.ts
CHANGED
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
import OpenAI from 'openai';
|
|
2
2
|
import { ChatModel, EmbeddingModel, ImageModel } from '@core-ai/core-ai';
|
|
3
|
+
export { O as OpenAICompatGenerateProviderOptions, a as OpenAICompatRequestOptions, c as OpenAIEmbedProviderOptions, d as OpenAIImageProviderOptions, e as OpenAIResponsesGenerateProviderOptions, f as OpenAIResponsesProviderOptions, o as openaiCompatGenerateProviderOptionsSchema, b as openaiCompatProviderOptionsSchema, g as openaiEmbedProviderOptionsSchema, h as openaiImageProviderOptionsSchema, i as openaiResponsesGenerateProviderOptionsSchema, j as openaiResponsesProviderOptionsSchema } from './provider-options-DK-Tz0pz.js';
|
|
4
|
+
import 'zod';
|
|
3
5
|
|
|
4
6
|
type OpenAIProviderOptions = {
|
|
5
7
|
apiKey?: string;
|
|
@@ -13,4 +15,8 @@ type OpenAIProvider = {
|
|
|
13
15
|
};
|
|
14
16
|
declare function createOpenAI(options?: OpenAIProviderOptions): OpenAIProvider;
|
|
15
17
|
|
|
16
|
-
|
|
18
|
+
type OpenAIReasoningMetadata = {
|
|
19
|
+
encryptedContent?: string;
|
|
20
|
+
};
|
|
21
|
+
|
|
22
|
+
export { type OpenAIProvider, type OpenAIProviderOptions, type OpenAIReasoningMetadata, createOpenAI };
|