@core-ai/openai 0.3.0 → 0.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +254 -19
- package/package.json +2 -3
package/dist/index.js
CHANGED
|
@@ -12,6 +12,140 @@ import {
|
|
|
12
12
|
|
|
13
13
|
// src/chat-adapter.ts
|
|
14
14
|
import { zodToJsonSchema } from "zod-to-json-schema";
|
|
15
|
+
import { ProviderError } from "@core-ai/core-ai";
|
|
16
|
+
|
|
17
|
+
// src/model-capabilities.ts
|
|
18
|
+
var DEFAULT_CAPABILITIES = {
|
|
19
|
+
reasoning: {
|
|
20
|
+
supportsEffort: true,
|
|
21
|
+
supportedRange: ["low", "medium", "high"],
|
|
22
|
+
restrictsSamplingParams: false
|
|
23
|
+
}
|
|
24
|
+
};
|
|
25
|
+
var MODEL_CAPABILITIES = {
|
|
26
|
+
"gpt-5.2": {
|
|
27
|
+
reasoning: {
|
|
28
|
+
supportsEffort: true,
|
|
29
|
+
supportedRange: ["low", "medium", "high", "max"],
|
|
30
|
+
restrictsSamplingParams: true
|
|
31
|
+
}
|
|
32
|
+
},
|
|
33
|
+
"gpt-5.2-codex": {
|
|
34
|
+
reasoning: {
|
|
35
|
+
supportsEffort: true,
|
|
36
|
+
supportedRange: ["low", "medium", "high", "max"],
|
|
37
|
+
restrictsSamplingParams: true
|
|
38
|
+
}
|
|
39
|
+
},
|
|
40
|
+
"gpt-5.2-pro": {
|
|
41
|
+
reasoning: {
|
|
42
|
+
supportsEffort: true,
|
|
43
|
+
supportedRange: ["low", "medium", "high", "max"],
|
|
44
|
+
restrictsSamplingParams: true
|
|
45
|
+
}
|
|
46
|
+
},
|
|
47
|
+
"gpt-5.1": {
|
|
48
|
+
reasoning: {
|
|
49
|
+
supportsEffort: true,
|
|
50
|
+
supportedRange: ["low", "medium", "high"],
|
|
51
|
+
restrictsSamplingParams: true
|
|
52
|
+
}
|
|
53
|
+
},
|
|
54
|
+
"gpt-5": {
|
|
55
|
+
reasoning: {
|
|
56
|
+
supportsEffort: true,
|
|
57
|
+
supportedRange: ["minimal", "low", "medium", "high"],
|
|
58
|
+
restrictsSamplingParams: true
|
|
59
|
+
}
|
|
60
|
+
},
|
|
61
|
+
"gpt-5-mini": {
|
|
62
|
+
reasoning: {
|
|
63
|
+
supportsEffort: true,
|
|
64
|
+
supportedRange: ["minimal", "low", "medium", "high"],
|
|
65
|
+
restrictsSamplingParams: true
|
|
66
|
+
}
|
|
67
|
+
},
|
|
68
|
+
"gpt-5-nano": {
|
|
69
|
+
reasoning: {
|
|
70
|
+
supportsEffort: true,
|
|
71
|
+
supportedRange: ["minimal", "low", "medium", "high"],
|
|
72
|
+
restrictsSamplingParams: true
|
|
73
|
+
}
|
|
74
|
+
},
|
|
75
|
+
o3: {
|
|
76
|
+
reasoning: {
|
|
77
|
+
supportsEffort: true,
|
|
78
|
+
supportedRange: ["low", "medium", "high"],
|
|
79
|
+
restrictsSamplingParams: false
|
|
80
|
+
}
|
|
81
|
+
},
|
|
82
|
+
"o3-mini": {
|
|
83
|
+
reasoning: {
|
|
84
|
+
supportsEffort: true,
|
|
85
|
+
supportedRange: ["low", "medium", "high"],
|
|
86
|
+
restrictsSamplingParams: false
|
|
87
|
+
}
|
|
88
|
+
},
|
|
89
|
+
"o4-mini": {
|
|
90
|
+
reasoning: {
|
|
91
|
+
supportsEffort: true,
|
|
92
|
+
supportedRange: ["low", "medium", "high"],
|
|
93
|
+
restrictsSamplingParams: false
|
|
94
|
+
}
|
|
95
|
+
},
|
|
96
|
+
o1: {
|
|
97
|
+
reasoning: {
|
|
98
|
+
supportsEffort: true,
|
|
99
|
+
supportedRange: ["low", "medium", "high"],
|
|
100
|
+
restrictsSamplingParams: false
|
|
101
|
+
}
|
|
102
|
+
},
|
|
103
|
+
"o1-mini": {
|
|
104
|
+
reasoning: {
|
|
105
|
+
supportsEffort: false,
|
|
106
|
+
supportedRange: [],
|
|
107
|
+
restrictsSamplingParams: false
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
};
|
|
111
|
+
var EFFORT_RANK = {
|
|
112
|
+
minimal: 0,
|
|
113
|
+
low: 1,
|
|
114
|
+
medium: 2,
|
|
115
|
+
high: 3,
|
|
116
|
+
max: 4
|
|
117
|
+
};
|
|
118
|
+
function getOpenAIModelCapabilities(modelId) {
|
|
119
|
+
const normalizedModelId = normalizeModelId(modelId);
|
|
120
|
+
return MODEL_CAPABILITIES[normalizedModelId] ?? DEFAULT_CAPABILITIES;
|
|
121
|
+
}
|
|
122
|
+
function normalizeModelId(modelId) {
|
|
123
|
+
return modelId.replace(/-\d{8}$/, "");
|
|
124
|
+
}
|
|
125
|
+
function clampReasoningEffort(effort, supportedRange) {
|
|
126
|
+
if (supportedRange.length === 0 || supportedRange.includes(effort)) {
|
|
127
|
+
return effort;
|
|
128
|
+
}
|
|
129
|
+
const targetRank = EFFORT_RANK[effort];
|
|
130
|
+
let best = supportedRange[0] ?? effort;
|
|
131
|
+
let bestDistance = Math.abs(EFFORT_RANK[best] - targetRank);
|
|
132
|
+
for (const candidate of supportedRange) {
|
|
133
|
+
const distance = Math.abs(EFFORT_RANK[candidate] - targetRank);
|
|
134
|
+
if (distance < bestDistance) {
|
|
135
|
+
best = candidate;
|
|
136
|
+
bestDistance = distance;
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
return best;
|
|
140
|
+
}
|
|
141
|
+
function toOpenAIReasoningEffort(effort) {
|
|
142
|
+
if (effort === "max") {
|
|
143
|
+
return "xhigh";
|
|
144
|
+
}
|
|
145
|
+
return effort;
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
// src/chat-adapter.ts
|
|
15
149
|
var DEFAULT_STRUCTURED_OUTPUT_TOOL_NAME = "core_ai_generate_object";
|
|
16
150
|
var DEFAULT_STRUCTURED_OUTPUT_TOOL_DESCRIPTION = "Return a JSON object that matches the requested schema.";
|
|
17
151
|
function convertMessages(messages) {
|
|
@@ -31,11 +165,15 @@ function convertMessage(message) {
|
|
|
31
165
|
};
|
|
32
166
|
}
|
|
33
167
|
if (message.role === "assistant") {
|
|
168
|
+
const text = message.parts.flatMap((part) => part.type === "text" ? [part.text] : []).join("");
|
|
169
|
+
const toolCalls = message.parts.flatMap(
|
|
170
|
+
(part) => part.type === "tool-call" ? [part.toolCall] : []
|
|
171
|
+
);
|
|
34
172
|
return {
|
|
35
173
|
role: "assistant",
|
|
36
|
-
content:
|
|
37
|
-
...
|
|
38
|
-
tool_calls:
|
|
174
|
+
content: text.length > 0 ? text : null,
|
|
175
|
+
...toolCalls.length > 0 ? {
|
|
176
|
+
tool_calls: toolCalls.map((toolCall) => ({
|
|
39
177
|
id: toolCall.id,
|
|
40
178
|
type: "function",
|
|
41
179
|
function: {
|
|
@@ -119,6 +257,7 @@ function createStructuredOutputOptions(options) {
|
|
|
119
257
|
type: "tool",
|
|
120
258
|
toolName
|
|
121
259
|
},
|
|
260
|
+
reasoning: options.reasoning,
|
|
122
261
|
config: options.config,
|
|
123
262
|
providerOptions: options.providerOptions,
|
|
124
263
|
signal: options.signal
|
|
@@ -141,11 +280,14 @@ function createStreamRequest(modelId, options) {
|
|
|
141
280
|
};
|
|
142
281
|
}
|
|
143
282
|
function createRequestBase(modelId, options) {
|
|
283
|
+
validateOpenAIReasoningConfig(modelId, options);
|
|
284
|
+
const reasoningFields = mapReasoningToRequestFields(modelId, options);
|
|
144
285
|
return {
|
|
145
286
|
model: modelId,
|
|
146
287
|
messages: convertMessages(options.messages),
|
|
147
288
|
...options.tools && Object.keys(options.tools).length > 0 ? { tools: convertTools(options.tools) } : {},
|
|
148
289
|
...options.toolChoice ? { tool_choice: convertToolChoice(options.toolChoice) } : {},
|
|
290
|
+
...reasoningFields,
|
|
149
291
|
...mapConfigToRequestFields(options.config)
|
|
150
292
|
};
|
|
151
293
|
}
|
|
@@ -163,27 +305,42 @@ function mapGenerateResponse(response) {
|
|
|
163
305
|
const firstChoice = response.choices[0];
|
|
164
306
|
if (!firstChoice) {
|
|
165
307
|
return {
|
|
308
|
+
parts: [],
|
|
166
309
|
content: null,
|
|
310
|
+
reasoning: null,
|
|
167
311
|
toolCalls: [],
|
|
168
312
|
finishReason: "unknown",
|
|
169
313
|
usage: {
|
|
170
314
|
inputTokens: 0,
|
|
171
315
|
outputTokens: 0,
|
|
172
|
-
|
|
173
|
-
|
|
316
|
+
inputTokenDetails: {
|
|
317
|
+
cacheReadTokens: 0,
|
|
318
|
+
cacheWriteTokens: 0
|
|
319
|
+
},
|
|
320
|
+
outputTokenDetails: {}
|
|
174
321
|
}
|
|
175
322
|
};
|
|
176
323
|
}
|
|
177
|
-
const reasoningTokens = response.usage?.completion_tokens_details?.reasoning_tokens
|
|
324
|
+
const reasoningTokens = response.usage?.completion_tokens_details?.reasoning_tokens;
|
|
325
|
+
const content = extractTextContent(firstChoice.message.content);
|
|
326
|
+
const toolCalls = parseToolCalls(firstChoice.message.tool_calls);
|
|
327
|
+
const parts = createAssistantParts(content, toolCalls);
|
|
178
328
|
return {
|
|
179
|
-
|
|
180
|
-
|
|
329
|
+
parts,
|
|
330
|
+
content,
|
|
331
|
+
reasoning: null,
|
|
332
|
+
toolCalls,
|
|
181
333
|
finishReason: mapFinishReason(firstChoice.finish_reason),
|
|
182
334
|
usage: {
|
|
183
335
|
inputTokens: response.usage?.prompt_tokens ?? 0,
|
|
184
336
|
outputTokens: response.usage?.completion_tokens ?? 0,
|
|
185
|
-
|
|
186
|
-
|
|
337
|
+
inputTokenDetails: {
|
|
338
|
+
cacheReadTokens: response.usage?.prompt_tokens_details?.cached_tokens ?? 0,
|
|
339
|
+
cacheWriteTokens: 0
|
|
340
|
+
},
|
|
341
|
+
outputTokenDetails: {
|
|
342
|
+
...reasoningTokens !== void 0 ? { reasoningTokens } : {}
|
|
343
|
+
}
|
|
187
344
|
}
|
|
188
345
|
};
|
|
189
346
|
}
|
|
@@ -227,16 +384,25 @@ async function* transformStream(stream) {
|
|
|
227
384
|
let usage = {
|
|
228
385
|
inputTokens: 0,
|
|
229
386
|
outputTokens: 0,
|
|
230
|
-
|
|
231
|
-
|
|
387
|
+
inputTokenDetails: {
|
|
388
|
+
cacheReadTokens: 0,
|
|
389
|
+
cacheWriteTokens: 0
|
|
390
|
+
},
|
|
391
|
+
outputTokenDetails: {}
|
|
232
392
|
};
|
|
233
393
|
for await (const chunk of stream) {
|
|
234
394
|
if (chunk.usage) {
|
|
395
|
+
const reasoningTokens = chunk.usage.completion_tokens_details?.reasoning_tokens;
|
|
235
396
|
usage = {
|
|
236
397
|
inputTokens: chunk.usage.prompt_tokens ?? 0,
|
|
237
398
|
outputTokens: chunk.usage.completion_tokens ?? 0,
|
|
238
|
-
|
|
239
|
-
|
|
399
|
+
inputTokenDetails: {
|
|
400
|
+
cacheReadTokens: chunk.usage.prompt_tokens_details?.cached_tokens ?? 0,
|
|
401
|
+
cacheWriteTokens: 0
|
|
402
|
+
},
|
|
403
|
+
outputTokenDetails: {
|
|
404
|
+
...reasoningTokens !== void 0 ? { reasoningTokens } : {}
|
|
405
|
+
}
|
|
240
406
|
};
|
|
241
407
|
}
|
|
242
408
|
const choice = chunk.choices[0];
|
|
@@ -245,7 +411,7 @@ async function* transformStream(stream) {
|
|
|
245
411
|
}
|
|
246
412
|
if (choice.delta.content) {
|
|
247
413
|
yield {
|
|
248
|
-
type: "
|
|
414
|
+
type: "text-delta",
|
|
249
415
|
text: choice.delta.content
|
|
250
416
|
};
|
|
251
417
|
}
|
|
@@ -320,15 +486,84 @@ function safeParseJsonObject(json) {
|
|
|
320
486
|
return {};
|
|
321
487
|
}
|
|
322
488
|
}
|
|
489
|
+
function validateOpenAIReasoningConfig(modelId, options) {
|
|
490
|
+
if (!options.reasoning) {
|
|
491
|
+
return;
|
|
492
|
+
}
|
|
493
|
+
const capabilities = getOpenAIModelCapabilities(modelId);
|
|
494
|
+
if (!capabilities.reasoning.restrictsSamplingParams) {
|
|
495
|
+
return;
|
|
496
|
+
}
|
|
497
|
+
if (options.config?.temperature !== void 0) {
|
|
498
|
+
throw new ProviderError(
|
|
499
|
+
`OpenAI model "${modelId}" does not support temperature when reasoning is enabled`,
|
|
500
|
+
"openai"
|
|
501
|
+
);
|
|
502
|
+
}
|
|
503
|
+
if (options.config?.topP !== void 0) {
|
|
504
|
+
throw new ProviderError(
|
|
505
|
+
`OpenAI model "${modelId}" does not support topP when reasoning is enabled`,
|
|
506
|
+
"openai"
|
|
507
|
+
);
|
|
508
|
+
}
|
|
509
|
+
}
|
|
510
|
+
function mapReasoningToRequestFields(modelId, options) {
|
|
511
|
+
if (!options.reasoning) {
|
|
512
|
+
return {};
|
|
513
|
+
}
|
|
514
|
+
const capabilities = getOpenAIModelCapabilities(modelId);
|
|
515
|
+
if (!capabilities.reasoning.supportsEffort) {
|
|
516
|
+
return {};
|
|
517
|
+
}
|
|
518
|
+
const clampedEffort = clampReasoningEffort(
|
|
519
|
+
options.reasoning.effort,
|
|
520
|
+
capabilities.reasoning.supportedRange
|
|
521
|
+
);
|
|
522
|
+
return {
|
|
523
|
+
reasoning_effort: toOpenAIReasoningEffort(clampedEffort)
|
|
524
|
+
};
|
|
525
|
+
}
|
|
526
|
+
function createAssistantParts(content, toolCalls) {
|
|
527
|
+
const parts = [];
|
|
528
|
+
if (content) {
|
|
529
|
+
parts.push({
|
|
530
|
+
type: "text",
|
|
531
|
+
text: content
|
|
532
|
+
});
|
|
533
|
+
}
|
|
534
|
+
for (const toolCall of toolCalls) {
|
|
535
|
+
parts.push({
|
|
536
|
+
type: "tool-call",
|
|
537
|
+
toolCall
|
|
538
|
+
});
|
|
539
|
+
}
|
|
540
|
+
return parts;
|
|
541
|
+
}
|
|
542
|
+
function extractTextContent(content) {
|
|
543
|
+
if (typeof content === "string") {
|
|
544
|
+
return content;
|
|
545
|
+
}
|
|
546
|
+
if (!Array.isArray(content)) {
|
|
547
|
+
return null;
|
|
548
|
+
}
|
|
549
|
+
const text = content.flatMap((item) => {
|
|
550
|
+
if (!item || typeof item !== "object") {
|
|
551
|
+
return [];
|
|
552
|
+
}
|
|
553
|
+
const textValue = item.text;
|
|
554
|
+
return typeof textValue === "string" ? [textValue] : [];
|
|
555
|
+
}).join("");
|
|
556
|
+
return text.length > 0 ? text : null;
|
|
557
|
+
}
|
|
323
558
|
|
|
324
559
|
// src/openai-error.ts
|
|
325
560
|
import { APIError } from "openai";
|
|
326
|
-
import { ProviderError } from "@core-ai/core-ai";
|
|
561
|
+
import { ProviderError as ProviderError2 } from "@core-ai/core-ai";
|
|
327
562
|
function wrapOpenAIError(error) {
|
|
328
563
|
if (error instanceof APIError) {
|
|
329
|
-
return new
|
|
564
|
+
return new ProviderError2(error.message, "openai", error.status, error);
|
|
330
565
|
}
|
|
331
|
-
return new
|
|
566
|
+
return new ProviderError2(
|
|
332
567
|
error instanceof Error ? error.message : String(error),
|
|
333
568
|
"openai",
|
|
334
569
|
void 0,
|
|
@@ -419,7 +654,7 @@ async function* transformStructuredOutputStream(stream, schema, provider, toolNa
|
|
|
419
654
|
let contentBuffer = "";
|
|
420
655
|
const toolArgumentDeltas = /* @__PURE__ */ new Map();
|
|
421
656
|
for await (const event of stream) {
|
|
422
|
-
if (event.type === "
|
|
657
|
+
if (event.type === "text-delta") {
|
|
423
658
|
contentBuffer += event.text;
|
|
424
659
|
yield {
|
|
425
660
|
type: "object-delta",
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@core-ai/openai",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.5.1",
|
|
4
4
|
"description": "OpenAI provider package for @core-ai/core-ai",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"author": "Omnifact (https://omnifact.ai)",
|
|
@@ -38,12 +38,11 @@
|
|
|
38
38
|
"build": "tsup",
|
|
39
39
|
"lint": "eslint src/ --max-warnings 0",
|
|
40
40
|
"check-types": "tsc --noEmit",
|
|
41
|
-
"prepublishOnly": "npm run build",
|
|
42
41
|
"test": "vitest run",
|
|
43
42
|
"test:watch": "vitest"
|
|
44
43
|
},
|
|
45
44
|
"dependencies": {
|
|
46
|
-
"@core-ai/core-ai": "^0.
|
|
45
|
+
"@core-ai/core-ai": "^0.5.1",
|
|
47
46
|
"openai": "^6.1.0",
|
|
48
47
|
"zod-to-json-schema": "^3.25.1"
|
|
49
48
|
},
|