@core-ai/openai 0.5.1 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -12,6 +12,8 @@ npm install @core-ai/core-ai @core-ai/openai zod
12
12
 
13
13
  ## Usage
14
14
 
15
+ The default entrypoint uses the OpenAI **Responses API**:
16
+
15
17
  ```ts
16
18
  import { generate } from '@core-ai/core-ai';
17
19
  import { createOpenAI } from '@core-ai/openai';
@@ -26,3 +28,22 @@ const result = await generate({
26
28
 
27
29
  console.log(result.content);
28
30
  ```
31
+
32
+ ## Chat Completions (Compat)
33
+
34
+ For the legacy Chat Completions API — useful for Azure OpenAI, proxies, or third-party OpenAI-compatible endpoints — import from `@core-ai/openai/compat`:
35
+
36
+ ```ts
37
+ import { generate } from '@core-ai/core-ai';
38
+ import { createOpenAICompat } from '@core-ai/openai/compat';
39
+
40
+ const openai = createOpenAICompat({ apiKey: process.env.OPENAI_API_KEY });
41
+ const model = openai.chatModel('gpt-5-mini');
42
+
43
+ const result = await generate({
44
+ model,
45
+ messages: [{ role: 'user', content: 'Hello!' }],
46
+ });
47
+
48
+ console.log(result.content);
49
+ ```
@@ -0,0 +1,416 @@
1
+ // src/provider-options.ts
2
+ import { z } from "zod";
3
+ var openaiResponsesGenerateProviderOptionsSchema = z.object({
4
+ store: z.boolean().optional(),
5
+ serviceTier: z.enum(["auto", "default", "flex", "scale", "priority"]).optional(),
6
+ include: z.array(z.string()).optional(),
7
+ parallelToolCalls: z.boolean().optional(),
8
+ user: z.string().optional()
9
+ }).strict();
10
+ var openaiCompatGenerateProviderOptionsSchema = openaiResponsesGenerateProviderOptionsSchema.omit({
11
+ include: true
12
+ }).extend({
13
+ stopSequences: z.array(z.string()).optional(),
14
+ frequencyPenalty: z.number().optional(),
15
+ presencePenalty: z.number().optional(),
16
+ seed: z.number().int().optional()
17
+ }).strict();
18
+ var openaiEmbedProviderOptionsSchema = z.object({
19
+ encodingFormat: z.enum(["float", "base64"]).optional(),
20
+ user: z.string().optional()
21
+ }).strict();
22
+ var openaiImageProviderOptionsSchema = z.object({
23
+ background: z.enum(["transparent", "opaque", "auto"]).optional(),
24
+ moderation: z.enum(["low", "auto"]).optional(),
25
+ outputCompression: z.number().int().min(0).max(100).optional(),
26
+ outputFormat: z.enum(["png", "jpeg", "webp"]).optional(),
27
+ quality: z.enum(["standard", "hd", "low", "medium", "high", "auto"]).optional(),
28
+ responseFormat: z.enum(["url", "b64_json"]).optional(),
29
+ style: z.enum(["vivid", "natural"]).optional(),
30
+ user: z.string().optional()
31
+ }).strict();
32
+ function parseOpenAIResponsesGenerateProviderOptions(providerOptions) {
33
+ const rawOptions = providerOptions?.openai;
34
+ if (rawOptions === void 0) {
35
+ return void 0;
36
+ }
37
+ return openaiResponsesGenerateProviderOptionsSchema.parse(rawOptions);
38
+ }
39
+ function parseOpenAICompatGenerateProviderOptions(providerOptions) {
40
+ const rawOptions = providerOptions?.openai;
41
+ if (rawOptions === void 0) {
42
+ return void 0;
43
+ }
44
+ return openaiCompatGenerateProviderOptionsSchema.parse(rawOptions);
45
+ }
46
+ function parseOpenAIEmbedProviderOptions(providerOptions) {
47
+ const rawOptions = providerOptions?.openai;
48
+ if (rawOptions === void 0) {
49
+ return void 0;
50
+ }
51
+ return openaiEmbedProviderOptionsSchema.parse(rawOptions);
52
+ }
53
+ function parseOpenAIImageProviderOptions(providerOptions) {
54
+ const rawOptions = providerOptions?.openai;
55
+ if (rawOptions === void 0) {
56
+ return void 0;
57
+ }
58
+ return openaiImageProviderOptionsSchema.parse(rawOptions);
59
+ }
60
+ var openaiResponsesProviderOptionsSchema = openaiResponsesGenerateProviderOptionsSchema;
61
+ var openaiCompatProviderOptionsSchema = openaiCompatGenerateProviderOptionsSchema;
62
+
63
+ // src/openai-error.ts
64
+ import { APIError } from "openai";
65
+ import { ProviderError } from "@core-ai/core-ai";
66
+ function wrapOpenAIError(error) {
67
+ if (error instanceof APIError) {
68
+ return new ProviderError(error.message, "openai", error.status, error);
69
+ }
70
+ return new ProviderError(
71
+ error instanceof Error ? error.message : String(error),
72
+ "openai",
73
+ void 0,
74
+ error
75
+ );
76
+ }
77
+
78
+ // src/embedding-model.ts
79
+ function createOpenAIEmbeddingModel(client, modelId) {
80
+ return {
81
+ provider: "openai",
82
+ modelId,
83
+ async embed(options) {
84
+ try {
85
+ const openaiOptions = parseOpenAIEmbedProviderOptions(
86
+ options.providerOptions
87
+ );
88
+ const response = await client.embeddings.create({
89
+ model: modelId,
90
+ input: options.input,
91
+ ...options.dimensions !== void 0 ? { dimensions: options.dimensions } : {},
92
+ ...mapOpenAIEmbedProviderOptionsToRequestFields(
93
+ openaiOptions
94
+ )
95
+ });
96
+ return {
97
+ embeddings: response.data.slice().sort((a, b) => a.index - b.index).map((item) => item.embedding),
98
+ usage: {
99
+ inputTokens: response.usage.prompt_tokens
100
+ }
101
+ };
102
+ } catch (error) {
103
+ throw wrapOpenAIError(error);
104
+ }
105
+ }
106
+ };
107
+ }
108
+ function mapOpenAIEmbedProviderOptionsToRequestFields(options) {
109
+ return {
110
+ ...options?.encodingFormat !== void 0 ? { encoding_format: options.encodingFormat } : {},
111
+ ...options?.user !== void 0 ? { user: options.user } : {}
112
+ };
113
+ }
114
+
115
+ // src/image-model.ts
116
+ function createOpenAIImageModel(client, modelId) {
117
+ return {
118
+ provider: "openai",
119
+ modelId,
120
+ async generate(options) {
121
+ try {
122
+ const openaiOptions = parseOpenAIImageProviderOptions(
123
+ options.providerOptions
124
+ );
125
+ const request = {
126
+ model: modelId,
127
+ prompt: options.prompt,
128
+ ...options.n !== void 0 ? { n: options.n } : {},
129
+ ...options.size !== void 0 ? { size: options.size } : {},
130
+ ...mapOpenAIImageProviderOptionsToRequestFields(
131
+ openaiOptions
132
+ )
133
+ };
134
+ const response = await client.images.generate(
135
+ request
136
+ );
137
+ return {
138
+ images: (response.data ?? []).map((image) => ({
139
+ base64: image.b64_json ?? void 0,
140
+ url: image.url ?? void 0,
141
+ revisedPrompt: image.revised_prompt ?? void 0
142
+ }))
143
+ };
144
+ } catch (error) {
145
+ throw wrapOpenAIError(error);
146
+ }
147
+ }
148
+ };
149
+ }
150
+ function mapOpenAIImageProviderOptionsToRequestFields(options) {
151
+ return {
152
+ ...options?.background !== void 0 ? { background: options.background } : {},
153
+ ...options?.moderation !== void 0 ? { moderation: options.moderation } : {},
154
+ ...options?.outputCompression !== void 0 ? { output_compression: options.outputCompression } : {},
155
+ ...options?.outputFormat !== void 0 ? { output_format: options.outputFormat } : {},
156
+ ...options?.quality !== void 0 ? { quality: options.quality } : {},
157
+ ...options?.responseFormat !== void 0 ? { response_format: options.responseFormat } : {},
158
+ ...options?.style !== void 0 ? { style: options.style } : {},
159
+ ...options?.user !== void 0 ? { user: options.user } : {}
160
+ };
161
+ }
162
+
163
+ // src/shared/tools.ts
164
+ import { zodToJsonSchema } from "zod-to-json-schema";
165
+ var DEFAULT_STRUCTURED_OUTPUT_TOOL_NAME = "core_ai_generate_object";
166
+ var DEFAULT_STRUCTURED_OUTPUT_TOOL_DESCRIPTION = "Return a JSON object that matches the requested schema.";
167
+ function convertTools(tools) {
168
+ return Object.values(tools).map((tool) => ({
169
+ type: "function",
170
+ function: {
171
+ name: tool.name,
172
+ description: tool.description,
173
+ parameters: zodToJsonSchema(tool.parameters)
174
+ }
175
+ }));
176
+ }
177
+ function convertToolChoice(choice) {
178
+ if (typeof choice === "string") {
179
+ return choice;
180
+ }
181
+ return {
182
+ type: "function",
183
+ function: {
184
+ name: choice.toolName
185
+ }
186
+ };
187
+ }
188
+ function getStructuredOutputToolName(options) {
189
+ return options.schemaName?.trim() || DEFAULT_STRUCTURED_OUTPUT_TOOL_NAME;
190
+ }
191
+ function createStructuredOutputOptions(options) {
192
+ const toolName = getStructuredOutputToolName(options);
193
+ return {
194
+ messages: options.messages,
195
+ tools: {
196
+ structured_output: {
197
+ name: toolName,
198
+ description: options.schemaDescription ?? DEFAULT_STRUCTURED_OUTPUT_TOOL_DESCRIPTION,
199
+ parameters: options.schema
200
+ }
201
+ },
202
+ toolChoice: {
203
+ type: "tool",
204
+ toolName
205
+ },
206
+ reasoning: options.reasoning,
207
+ temperature: options.temperature,
208
+ maxTokens: options.maxTokens,
209
+ topP: options.topP,
210
+ providerOptions: options.providerOptions,
211
+ signal: options.signal
212
+ };
213
+ }
214
+
215
+ // src/model-capabilities.ts
216
+ var DEFAULT_CAPABILITIES = {
217
+ reasoning: {
218
+ supportsEffort: true,
219
+ supportedRange: ["low", "medium", "high"],
220
+ restrictsSamplingParams: false
221
+ }
222
+ };
223
+ var MODEL_CAPABILITIES = {
224
+ "gpt-5.4": {
225
+ reasoning: {
226
+ supportsEffort: true,
227
+ supportedRange: ["low", "medium", "high", "max"],
228
+ restrictsSamplingParams: true
229
+ }
230
+ },
231
+ "gpt-5.4-pro": {
232
+ reasoning: {
233
+ supportsEffort: true,
234
+ supportedRange: ["low", "medium", "high", "max"],
235
+ restrictsSamplingParams: true
236
+ }
237
+ },
238
+ "gpt-5.2": {
239
+ reasoning: {
240
+ supportsEffort: true,
241
+ supportedRange: ["low", "medium", "high", "max"],
242
+ restrictsSamplingParams: true
243
+ }
244
+ },
245
+ "gpt-5.2-codex": {
246
+ reasoning: {
247
+ supportsEffort: true,
248
+ supportedRange: ["low", "medium", "high", "max"],
249
+ restrictsSamplingParams: true
250
+ }
251
+ },
252
+ "gpt-5.2-pro": {
253
+ reasoning: {
254
+ supportsEffort: true,
255
+ supportedRange: ["low", "medium", "high", "max"],
256
+ restrictsSamplingParams: true
257
+ }
258
+ },
259
+ "gpt-5.1": {
260
+ reasoning: {
261
+ supportsEffort: true,
262
+ supportedRange: ["low", "medium", "high"],
263
+ restrictsSamplingParams: true
264
+ }
265
+ },
266
+ "gpt-5": {
267
+ reasoning: {
268
+ supportsEffort: true,
269
+ supportedRange: ["minimal", "low", "medium", "high"],
270
+ restrictsSamplingParams: true
271
+ }
272
+ },
273
+ "gpt-5-mini": {
274
+ reasoning: {
275
+ supportsEffort: true,
276
+ supportedRange: ["minimal", "low", "medium", "high"],
277
+ restrictsSamplingParams: true
278
+ }
279
+ },
280
+ "gpt-5-nano": {
281
+ reasoning: {
282
+ supportsEffort: true,
283
+ supportedRange: ["minimal", "low", "medium", "high"],
284
+ restrictsSamplingParams: true
285
+ }
286
+ },
287
+ o3: {
288
+ reasoning: {
289
+ supportsEffort: true,
290
+ supportedRange: ["low", "medium", "high"],
291
+ restrictsSamplingParams: false
292
+ }
293
+ },
294
+ "o3-mini": {
295
+ reasoning: {
296
+ supportsEffort: true,
297
+ supportedRange: ["low", "medium", "high"],
298
+ restrictsSamplingParams: false
299
+ }
300
+ },
301
+ "o4-mini": {
302
+ reasoning: {
303
+ supportsEffort: true,
304
+ supportedRange: ["low", "medium", "high"],
305
+ restrictsSamplingParams: false
306
+ }
307
+ },
308
+ o1: {
309
+ reasoning: {
310
+ supportsEffort: true,
311
+ supportedRange: ["low", "medium", "high"],
312
+ restrictsSamplingParams: false
313
+ }
314
+ },
315
+ "o1-mini": {
316
+ reasoning: {
317
+ supportsEffort: false,
318
+ supportedRange: [],
319
+ restrictsSamplingParams: false
320
+ }
321
+ }
322
+ };
323
+ var EFFORT_RANK = {
324
+ minimal: 0,
325
+ low: 1,
326
+ medium: 2,
327
+ high: 3,
328
+ max: 4
329
+ };
330
+ function getOpenAIModelCapabilities(modelId) {
331
+ const normalizedModelId = normalizeModelId(modelId);
332
+ return MODEL_CAPABILITIES[normalizedModelId] ?? DEFAULT_CAPABILITIES;
333
+ }
334
+ function normalizeModelId(modelId) {
335
+ return modelId.replace(/-\d{8}$/, "");
336
+ }
337
+ function clampReasoningEffort(effort, supportedRange) {
338
+ if (supportedRange.length === 0 || supportedRange.includes(effort)) {
339
+ return effort;
340
+ }
341
+ const targetRank = EFFORT_RANK[effort];
342
+ let best = supportedRange[0] ?? effort;
343
+ let bestDistance = Math.abs(EFFORT_RANK[best] - targetRank);
344
+ for (const candidate of supportedRange) {
345
+ const distance = Math.abs(EFFORT_RANK[candidate] - targetRank);
346
+ if (distance < bestDistance) {
347
+ best = candidate;
348
+ bestDistance = distance;
349
+ }
350
+ }
351
+ return best;
352
+ }
353
+ function toOpenAIReasoningEffort(effort) {
354
+ if (effort === "max") {
355
+ return "xhigh";
356
+ }
357
+ return effort;
358
+ }
359
+
360
+ // src/shared/utils.ts
361
+ import { ProviderError as ProviderError2 } from "@core-ai/core-ai";
362
+ function safeParseJsonObject(json) {
363
+ try {
364
+ const parsed = JSON.parse(json);
365
+ if (parsed && typeof parsed === "object" && !Array.isArray(parsed)) {
366
+ return parsed;
367
+ }
368
+ return {};
369
+ } catch {
370
+ return {};
371
+ }
372
+ }
373
+ function validateOpenAIReasoningConfig(modelId, options) {
374
+ if (!options.reasoning) {
375
+ return;
376
+ }
377
+ const capabilities = getOpenAIModelCapabilities(modelId);
378
+ if (!capabilities.reasoning.restrictsSamplingParams) {
379
+ return;
380
+ }
381
+ if (options.temperature !== void 0) {
382
+ throw new ProviderError2(
383
+ `OpenAI model "${modelId}" does not support temperature when reasoning is enabled`,
384
+ "openai"
385
+ );
386
+ }
387
+ if (options.topP !== void 0) {
388
+ throw new ProviderError2(
389
+ `OpenAI model "${modelId}" does not support topP when reasoning is enabled`,
390
+ "openai"
391
+ );
392
+ }
393
+ }
394
+
395
+ export {
396
+ getOpenAIModelCapabilities,
397
+ clampReasoningEffort,
398
+ toOpenAIReasoningEffort,
399
+ convertTools,
400
+ convertToolChoice,
401
+ getStructuredOutputToolName,
402
+ createStructuredOutputOptions,
403
+ safeParseJsonObject,
404
+ validateOpenAIReasoningConfig,
405
+ openaiResponsesGenerateProviderOptionsSchema,
406
+ openaiCompatGenerateProviderOptionsSchema,
407
+ openaiEmbedProviderOptionsSchema,
408
+ openaiImageProviderOptionsSchema,
409
+ parseOpenAIResponsesGenerateProviderOptions,
410
+ parseOpenAICompatGenerateProviderOptions,
411
+ openaiResponsesProviderOptionsSchema,
412
+ openaiCompatProviderOptionsSchema,
413
+ wrapOpenAIError,
414
+ createOpenAIEmbeddingModel,
415
+ createOpenAIImageModel
416
+ };
@@ -0,0 +1,18 @@
1
+ import OpenAI from 'openai';
2
+ import { ChatModel, EmbeddingModel, ImageModel } from '@core-ai/core-ai';
3
+ export { O as OpenAICompatGenerateProviderOptions, a as OpenAICompatRequestOptions, o as openaiCompatGenerateProviderOptionsSchema, b as openaiCompatProviderOptionsSchema } from './provider-options-DK-Tz0pz.js';
4
+ import 'zod';
5
+
6
+ type OpenAICompatProviderOptions = {
7
+ apiKey?: string;
8
+ baseURL?: string;
9
+ client?: OpenAI;
10
+ };
11
+ type OpenAICompatProvider = {
12
+ chatModel(modelId: string): ChatModel;
13
+ embeddingModel(modelId: string): EmbeddingModel;
14
+ imageModel(modelId: string): ImageModel;
15
+ };
16
+ declare function createOpenAICompat(options?: OpenAICompatProviderOptions): OpenAICompatProvider;
17
+
18
+ export { type OpenAICompatProvider, type OpenAICompatProviderOptions, createOpenAICompat };