@core-ai/openai 0.4.0 → 0.5.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +234 -19
  2. package/package.json +2 -3
package/dist/index.js CHANGED
@@ -12,6 +12,140 @@ import {
12
12
 
13
13
  // src/chat-adapter.ts
14
14
  import { zodToJsonSchema } from "zod-to-json-schema";
15
+ import { ProviderError } from "@core-ai/core-ai";
16
+
17
+ // src/model-capabilities.ts
18
+ var DEFAULT_CAPABILITIES = {
19
+ reasoning: {
20
+ supportsEffort: true,
21
+ supportedRange: ["low", "medium", "high"],
22
+ restrictsSamplingParams: false
23
+ }
24
+ };
25
+ var MODEL_CAPABILITIES = {
26
+ "gpt-5.2": {
27
+ reasoning: {
28
+ supportsEffort: true,
29
+ supportedRange: ["low", "medium", "high", "max"],
30
+ restrictsSamplingParams: true
31
+ }
32
+ },
33
+ "gpt-5.2-codex": {
34
+ reasoning: {
35
+ supportsEffort: true,
36
+ supportedRange: ["low", "medium", "high", "max"],
37
+ restrictsSamplingParams: true
38
+ }
39
+ },
40
+ "gpt-5.2-pro": {
41
+ reasoning: {
42
+ supportsEffort: true,
43
+ supportedRange: ["low", "medium", "high", "max"],
44
+ restrictsSamplingParams: true
45
+ }
46
+ },
47
+ "gpt-5.1": {
48
+ reasoning: {
49
+ supportsEffort: true,
50
+ supportedRange: ["low", "medium", "high"],
51
+ restrictsSamplingParams: true
52
+ }
53
+ },
54
+ "gpt-5": {
55
+ reasoning: {
56
+ supportsEffort: true,
57
+ supportedRange: ["minimal", "low", "medium", "high"],
58
+ restrictsSamplingParams: true
59
+ }
60
+ },
61
+ "gpt-5-mini": {
62
+ reasoning: {
63
+ supportsEffort: true,
64
+ supportedRange: ["minimal", "low", "medium", "high"],
65
+ restrictsSamplingParams: true
66
+ }
67
+ },
68
+ "gpt-5-nano": {
69
+ reasoning: {
70
+ supportsEffort: true,
71
+ supportedRange: ["minimal", "low", "medium", "high"],
72
+ restrictsSamplingParams: true
73
+ }
74
+ },
75
+ o3: {
76
+ reasoning: {
77
+ supportsEffort: true,
78
+ supportedRange: ["low", "medium", "high"],
79
+ restrictsSamplingParams: false
80
+ }
81
+ },
82
+ "o3-mini": {
83
+ reasoning: {
84
+ supportsEffort: true,
85
+ supportedRange: ["low", "medium", "high"],
86
+ restrictsSamplingParams: false
87
+ }
88
+ },
89
+ "o4-mini": {
90
+ reasoning: {
91
+ supportsEffort: true,
92
+ supportedRange: ["low", "medium", "high"],
93
+ restrictsSamplingParams: false
94
+ }
95
+ },
96
+ o1: {
97
+ reasoning: {
98
+ supportsEffort: true,
99
+ supportedRange: ["low", "medium", "high"],
100
+ restrictsSamplingParams: false
101
+ }
102
+ },
103
+ "o1-mini": {
104
+ reasoning: {
105
+ supportsEffort: false,
106
+ supportedRange: [],
107
+ restrictsSamplingParams: false
108
+ }
109
+ }
110
+ };
111
+ var EFFORT_RANK = {
112
+ minimal: 0,
113
+ low: 1,
114
+ medium: 2,
115
+ high: 3,
116
+ max: 4
117
+ };
118
+ function getOpenAIModelCapabilities(modelId) {
119
+ const normalizedModelId = normalizeModelId(modelId);
120
+ return MODEL_CAPABILITIES[normalizedModelId] ?? DEFAULT_CAPABILITIES;
121
+ }
122
+ function normalizeModelId(modelId) {
123
+ return modelId.replace(/-\d{8}$/, "");
124
+ }
125
+ function clampReasoningEffort(effort, supportedRange) {
126
+ if (supportedRange.length === 0 || supportedRange.includes(effort)) {
127
+ return effort;
128
+ }
129
+ const targetRank = EFFORT_RANK[effort];
130
+ let best = supportedRange[0] ?? effort;
131
+ let bestDistance = Math.abs(EFFORT_RANK[best] - targetRank);
132
+ for (const candidate of supportedRange) {
133
+ const distance = Math.abs(EFFORT_RANK[candidate] - targetRank);
134
+ if (distance < bestDistance) {
135
+ best = candidate;
136
+ bestDistance = distance;
137
+ }
138
+ }
139
+ return best;
140
+ }
141
+ function toOpenAIReasoningEffort(effort) {
142
+ if (effort === "max") {
143
+ return "xhigh";
144
+ }
145
+ return effort;
146
+ }
147
+
148
+ // src/chat-adapter.ts
15
149
  var DEFAULT_STRUCTURED_OUTPUT_TOOL_NAME = "core_ai_generate_object";
16
150
  var DEFAULT_STRUCTURED_OUTPUT_TOOL_DESCRIPTION = "Return a JSON object that matches the requested schema.";
17
151
  function convertMessages(messages) {
@@ -31,11 +165,15 @@ function convertMessage(message) {
31
165
  };
32
166
  }
33
167
  if (message.role === "assistant") {
168
+ const text = message.parts.flatMap((part) => part.type === "text" ? [part.text] : []).join("");
169
+ const toolCalls = message.parts.flatMap(
170
+ (part) => part.type === "tool-call" ? [part.toolCall] : []
171
+ );
34
172
  return {
35
173
  role: "assistant",
36
- content: message.content,
37
- ...message.toolCalls && message.toolCalls.length > 0 ? {
38
- tool_calls: message.toolCalls.map((toolCall) => ({
174
+ content: text.length > 0 ? text : null,
175
+ ...toolCalls.length > 0 ? {
176
+ tool_calls: toolCalls.map((toolCall) => ({
39
177
  id: toolCall.id,
40
178
  type: "function",
41
179
  function: {
@@ -119,6 +257,7 @@ function createStructuredOutputOptions(options) {
119
257
  type: "tool",
120
258
  toolName
121
259
  },
260
+ reasoning: options.reasoning,
122
261
  config: options.config,
123
262
  providerOptions: options.providerOptions,
124
263
  signal: options.signal
@@ -141,11 +280,14 @@ function createStreamRequest(modelId, options) {
141
280
  };
142
281
  }
143
282
  function createRequestBase(modelId, options) {
283
+ validateOpenAIReasoningConfig(modelId, options);
284
+ const reasoningFields = mapReasoningToRequestFields(modelId, options);
144
285
  return {
145
286
  model: modelId,
146
287
  messages: convertMessages(options.messages),
147
288
  ...options.tools && Object.keys(options.tools).length > 0 ? { tools: convertTools(options.tools) } : {},
148
289
  ...options.toolChoice ? { tool_choice: convertToolChoice(options.toolChoice) } : {},
290
+ ...reasoningFields,
149
291
  ...mapConfigToRequestFields(options.config)
150
292
  };
151
293
  }
@@ -163,7 +305,9 @@ function mapGenerateResponse(response) {
163
305
  const firstChoice = response.choices[0];
164
306
  if (!firstChoice) {
165
307
  return {
308
+ parts: [],
166
309
  content: null,
310
+ reasoning: null,
167
311
  toolCalls: [],
168
312
  finishReason: "unknown",
169
313
  usage: {
@@ -173,16 +317,19 @@ function mapGenerateResponse(response) {
173
317
  cacheReadTokens: 0,
174
318
  cacheWriteTokens: 0
175
319
  },
176
- outputTokenDetails: {
177
- reasoningTokens: 0
178
- }
320
+ outputTokenDetails: {}
179
321
  }
180
322
  };
181
323
  }
182
- const reasoningTokens = response.usage?.completion_tokens_details?.reasoning_tokens ?? 0;
324
+ const reasoningTokens = response.usage?.completion_tokens_details?.reasoning_tokens;
325
+ const content = extractTextContent(firstChoice.message.content);
326
+ const toolCalls = parseToolCalls(firstChoice.message.tool_calls);
327
+ const parts = createAssistantParts(content, toolCalls);
183
328
  return {
184
- content: firstChoice.message.content,
185
- toolCalls: parseToolCalls(firstChoice.message.tool_calls),
329
+ parts,
330
+ content,
331
+ reasoning: null,
332
+ toolCalls,
186
333
  finishReason: mapFinishReason(firstChoice.finish_reason),
187
334
  usage: {
188
335
  inputTokens: response.usage?.prompt_tokens ?? 0,
@@ -192,7 +339,7 @@ function mapGenerateResponse(response) {
192
339
  cacheWriteTokens: 0
193
340
  },
194
341
  outputTokenDetails: {
195
- reasoningTokens
342
+ ...reasoningTokens !== void 0 ? { reasoningTokens } : {}
196
343
  }
197
344
  }
198
345
  };
@@ -241,12 +388,11 @@ async function* transformStream(stream) {
241
388
  cacheReadTokens: 0,
242
389
  cacheWriteTokens: 0
243
390
  },
244
- outputTokenDetails: {
245
- reasoningTokens: 0
246
- }
391
+ outputTokenDetails: {}
247
392
  };
248
393
  for await (const chunk of stream) {
249
394
  if (chunk.usage) {
395
+ const reasoningTokens = chunk.usage.completion_tokens_details?.reasoning_tokens;
250
396
  usage = {
251
397
  inputTokens: chunk.usage.prompt_tokens ?? 0,
252
398
  outputTokens: chunk.usage.completion_tokens ?? 0,
@@ -255,7 +401,7 @@ async function* transformStream(stream) {
255
401
  cacheWriteTokens: 0
256
402
  },
257
403
  outputTokenDetails: {
258
- reasoningTokens: chunk.usage.completion_tokens_details?.reasoning_tokens ?? 0
404
+ ...reasoningTokens !== void 0 ? { reasoningTokens } : {}
259
405
  }
260
406
  };
261
407
  }
@@ -265,7 +411,7 @@ async function* transformStream(stream) {
265
411
  }
266
412
  if (choice.delta.content) {
267
413
  yield {
268
- type: "content-delta",
414
+ type: "text-delta",
269
415
  text: choice.delta.content
270
416
  };
271
417
  }
@@ -340,15 +486,84 @@ function safeParseJsonObject(json) {
340
486
  return {};
341
487
  }
342
488
  }
489
+ function validateOpenAIReasoningConfig(modelId, options) {
490
+ if (!options.reasoning) {
491
+ return;
492
+ }
493
+ const capabilities = getOpenAIModelCapabilities(modelId);
494
+ if (!capabilities.reasoning.restrictsSamplingParams) {
495
+ return;
496
+ }
497
+ if (options.config?.temperature !== void 0) {
498
+ throw new ProviderError(
499
+ `OpenAI model "${modelId}" does not support temperature when reasoning is enabled`,
500
+ "openai"
501
+ );
502
+ }
503
+ if (options.config?.topP !== void 0) {
504
+ throw new ProviderError(
505
+ `OpenAI model "${modelId}" does not support topP when reasoning is enabled`,
506
+ "openai"
507
+ );
508
+ }
509
+ }
510
+ function mapReasoningToRequestFields(modelId, options) {
511
+ if (!options.reasoning) {
512
+ return {};
513
+ }
514
+ const capabilities = getOpenAIModelCapabilities(modelId);
515
+ if (!capabilities.reasoning.supportsEffort) {
516
+ return {};
517
+ }
518
+ const clampedEffort = clampReasoningEffort(
519
+ options.reasoning.effort,
520
+ capabilities.reasoning.supportedRange
521
+ );
522
+ return {
523
+ reasoning_effort: toOpenAIReasoningEffort(clampedEffort)
524
+ };
525
+ }
526
+ function createAssistantParts(content, toolCalls) {
527
+ const parts = [];
528
+ if (content) {
529
+ parts.push({
530
+ type: "text",
531
+ text: content
532
+ });
533
+ }
534
+ for (const toolCall of toolCalls) {
535
+ parts.push({
536
+ type: "tool-call",
537
+ toolCall
538
+ });
539
+ }
540
+ return parts;
541
+ }
542
+ function extractTextContent(content) {
543
+ if (typeof content === "string") {
544
+ return content;
545
+ }
546
+ if (!Array.isArray(content)) {
547
+ return null;
548
+ }
549
+ const text = content.flatMap((item) => {
550
+ if (!item || typeof item !== "object") {
551
+ return [];
552
+ }
553
+ const textValue = item.text;
554
+ return typeof textValue === "string" ? [textValue] : [];
555
+ }).join("");
556
+ return text.length > 0 ? text : null;
557
+ }
343
558
 
344
559
  // src/openai-error.ts
345
560
  import { APIError } from "openai";
346
- import { ProviderError } from "@core-ai/core-ai";
561
+ import { ProviderError as ProviderError2 } from "@core-ai/core-ai";
347
562
  function wrapOpenAIError(error) {
348
563
  if (error instanceof APIError) {
349
- return new ProviderError(error.message, "openai", error.status, error);
564
+ return new ProviderError2(error.message, "openai", error.status, error);
350
565
  }
351
- return new ProviderError(
566
+ return new ProviderError2(
352
567
  error instanceof Error ? error.message : String(error),
353
568
  "openai",
354
569
  void 0,
@@ -439,7 +654,7 @@ async function* transformStructuredOutputStream(stream, schema, provider, toolNa
439
654
  let contentBuffer = "";
440
655
  const toolArgumentDeltas = /* @__PURE__ */ new Map();
441
656
  for await (const event of stream) {
442
- if (event.type === "content-delta") {
657
+ if (event.type === "text-delta") {
443
658
  contentBuffer += event.text;
444
659
  yield {
445
660
  type: "object-delta",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@core-ai/openai",
3
- "version": "0.4.0",
3
+ "version": "0.5.1",
4
4
  "description": "OpenAI provider package for @core-ai/core-ai",
5
5
  "license": "MIT",
6
6
  "author": "Omnifact (https://omnifact.ai)",
@@ -38,12 +38,11 @@
38
38
  "build": "tsup",
39
39
  "lint": "eslint src/ --max-warnings 0",
40
40
  "check-types": "tsc --noEmit",
41
- "prepublishOnly": "npm run build",
42
41
  "test": "vitest run",
43
42
  "test:watch": "vitest"
44
43
  },
45
44
  "dependencies": {
46
- "@core-ai/core-ai": "^0.4.0",
45
+ "@core-ai/core-ai": "^0.5.1",
47
46
  "openai": "^6.1.0",
48
47
  "zod-to-json-schema": "^3.25.1"
49
48
  },