@core-ai/openai 0.4.0 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +21 -0
- package/dist/chunk-7CU5JW63.js +402 -0
- package/dist/compat.d.ts +18 -0
- package/dist/compat.js +612 -0
- package/dist/index.d.ts +7 -1
- package/dist/index.js +484 -315
- package/dist/provider-options-DK-Tz0pz.d.ts +157 -0
- package/package.json +9 -5
package/README.md
CHANGED
|
@@ -12,6 +12,8 @@ npm install @core-ai/core-ai @core-ai/openai zod
|
|
|
12
12
|
|
|
13
13
|
## Usage
|
|
14
14
|
|
|
15
|
+
The default entrypoint uses the OpenAI **Responses API**:
|
|
16
|
+
|
|
15
17
|
```ts
|
|
16
18
|
import { generate } from '@core-ai/core-ai';
|
|
17
19
|
import { createOpenAI } from '@core-ai/openai';
|
|
@@ -26,3 +28,22 @@ const result = await generate({
|
|
|
26
28
|
|
|
27
29
|
console.log(result.content);
|
|
28
30
|
```
|
|
31
|
+
|
|
32
|
+
## Chat Completions (Compat)
|
|
33
|
+
|
|
34
|
+
For the legacy Chat Completions API — useful for Azure OpenAI, proxies, or third-party OpenAI-compatible endpoints — import from `@core-ai/openai/compat`:
|
|
35
|
+
|
|
36
|
+
```ts
|
|
37
|
+
import { generate } from '@core-ai/core-ai';
|
|
38
|
+
import { createOpenAICompat } from '@core-ai/openai/compat';
|
|
39
|
+
|
|
40
|
+
const openai = createOpenAICompat({ apiKey: process.env.OPENAI_API_KEY });
|
|
41
|
+
const model = openai.chatModel('gpt-5-mini');
|
|
42
|
+
|
|
43
|
+
const result = await generate({
|
|
44
|
+
model,
|
|
45
|
+
messages: [{ role: 'user', content: 'Hello!' }],
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
console.log(result.content);
|
|
49
|
+
```
|
|
@@ -0,0 +1,402 @@
|
|
|
1
|
+
// src/provider-options.ts
|
|
2
|
+
import { z } from "zod";
|
|
3
|
+
var openaiResponsesGenerateProviderOptionsSchema = z.object({
|
|
4
|
+
store: z.boolean().optional(),
|
|
5
|
+
serviceTier: z.enum(["auto", "default", "flex", "scale", "priority"]).optional(),
|
|
6
|
+
include: z.array(z.string()).optional(),
|
|
7
|
+
parallelToolCalls: z.boolean().optional(),
|
|
8
|
+
user: z.string().optional()
|
|
9
|
+
}).strict();
|
|
10
|
+
var openaiCompatGenerateProviderOptionsSchema = openaiResponsesGenerateProviderOptionsSchema.omit({
|
|
11
|
+
include: true
|
|
12
|
+
}).extend({
|
|
13
|
+
stopSequences: z.array(z.string()).optional(),
|
|
14
|
+
frequencyPenalty: z.number().optional(),
|
|
15
|
+
presencePenalty: z.number().optional(),
|
|
16
|
+
seed: z.number().int().optional()
|
|
17
|
+
}).strict();
|
|
18
|
+
var openaiEmbedProviderOptionsSchema = z.object({
|
|
19
|
+
encodingFormat: z.enum(["float", "base64"]).optional(),
|
|
20
|
+
user: z.string().optional()
|
|
21
|
+
}).strict();
|
|
22
|
+
var openaiImageProviderOptionsSchema = z.object({
|
|
23
|
+
background: z.enum(["transparent", "opaque", "auto"]).optional(),
|
|
24
|
+
moderation: z.enum(["low", "auto"]).optional(),
|
|
25
|
+
outputCompression: z.number().int().min(0).max(100).optional(),
|
|
26
|
+
outputFormat: z.enum(["png", "jpeg", "webp"]).optional(),
|
|
27
|
+
quality: z.enum(["standard", "hd", "low", "medium", "high", "auto"]).optional(),
|
|
28
|
+
responseFormat: z.enum(["url", "b64_json"]).optional(),
|
|
29
|
+
style: z.enum(["vivid", "natural"]).optional(),
|
|
30
|
+
user: z.string().optional()
|
|
31
|
+
}).strict();
|
|
32
|
+
function parseOpenAIResponsesGenerateProviderOptions(providerOptions) {
|
|
33
|
+
const rawOptions = providerOptions?.openai;
|
|
34
|
+
if (rawOptions === void 0) {
|
|
35
|
+
return void 0;
|
|
36
|
+
}
|
|
37
|
+
return openaiResponsesGenerateProviderOptionsSchema.parse(rawOptions);
|
|
38
|
+
}
|
|
39
|
+
function parseOpenAICompatGenerateProviderOptions(providerOptions) {
|
|
40
|
+
const rawOptions = providerOptions?.openai;
|
|
41
|
+
if (rawOptions === void 0) {
|
|
42
|
+
return void 0;
|
|
43
|
+
}
|
|
44
|
+
return openaiCompatGenerateProviderOptionsSchema.parse(rawOptions);
|
|
45
|
+
}
|
|
46
|
+
function parseOpenAIEmbedProviderOptions(providerOptions) {
|
|
47
|
+
const rawOptions = providerOptions?.openai;
|
|
48
|
+
if (rawOptions === void 0) {
|
|
49
|
+
return void 0;
|
|
50
|
+
}
|
|
51
|
+
return openaiEmbedProviderOptionsSchema.parse(rawOptions);
|
|
52
|
+
}
|
|
53
|
+
function parseOpenAIImageProviderOptions(providerOptions) {
|
|
54
|
+
const rawOptions = providerOptions?.openai;
|
|
55
|
+
if (rawOptions === void 0) {
|
|
56
|
+
return void 0;
|
|
57
|
+
}
|
|
58
|
+
return openaiImageProviderOptionsSchema.parse(rawOptions);
|
|
59
|
+
}
|
|
60
|
+
var openaiResponsesProviderOptionsSchema = openaiResponsesGenerateProviderOptionsSchema;
|
|
61
|
+
var openaiCompatProviderOptionsSchema = openaiCompatGenerateProviderOptionsSchema;
|
|
62
|
+
|
|
63
|
+
// src/openai-error.ts
|
|
64
|
+
import { APIError } from "openai";
|
|
65
|
+
import { ProviderError } from "@core-ai/core-ai";
|
|
66
|
+
function wrapOpenAIError(error) {
|
|
67
|
+
if (error instanceof APIError) {
|
|
68
|
+
return new ProviderError(error.message, "openai", error.status, error);
|
|
69
|
+
}
|
|
70
|
+
return new ProviderError(
|
|
71
|
+
error instanceof Error ? error.message : String(error),
|
|
72
|
+
"openai",
|
|
73
|
+
void 0,
|
|
74
|
+
error
|
|
75
|
+
);
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
// src/embedding-model.ts
|
|
79
|
+
function createOpenAIEmbeddingModel(client, modelId) {
|
|
80
|
+
return {
|
|
81
|
+
provider: "openai",
|
|
82
|
+
modelId,
|
|
83
|
+
async embed(options) {
|
|
84
|
+
try {
|
|
85
|
+
const openaiOptions = parseOpenAIEmbedProviderOptions(
|
|
86
|
+
options.providerOptions
|
|
87
|
+
);
|
|
88
|
+
const response = await client.embeddings.create({
|
|
89
|
+
model: modelId,
|
|
90
|
+
input: options.input,
|
|
91
|
+
...options.dimensions !== void 0 ? { dimensions: options.dimensions } : {},
|
|
92
|
+
...mapOpenAIEmbedProviderOptionsToRequestFields(
|
|
93
|
+
openaiOptions
|
|
94
|
+
)
|
|
95
|
+
});
|
|
96
|
+
return {
|
|
97
|
+
embeddings: response.data.slice().sort((a, b) => a.index - b.index).map((item) => item.embedding),
|
|
98
|
+
usage: {
|
|
99
|
+
inputTokens: response.usage.prompt_tokens
|
|
100
|
+
}
|
|
101
|
+
};
|
|
102
|
+
} catch (error) {
|
|
103
|
+
throw wrapOpenAIError(error);
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
};
|
|
107
|
+
}
|
|
108
|
+
function mapOpenAIEmbedProviderOptionsToRequestFields(options) {
|
|
109
|
+
return {
|
|
110
|
+
...options?.encodingFormat !== void 0 ? { encoding_format: options.encodingFormat } : {},
|
|
111
|
+
...options?.user !== void 0 ? { user: options.user } : {}
|
|
112
|
+
};
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
// src/image-model.ts
|
|
116
|
+
function createOpenAIImageModel(client, modelId) {
|
|
117
|
+
return {
|
|
118
|
+
provider: "openai",
|
|
119
|
+
modelId,
|
|
120
|
+
async generate(options) {
|
|
121
|
+
try {
|
|
122
|
+
const openaiOptions = parseOpenAIImageProviderOptions(
|
|
123
|
+
options.providerOptions
|
|
124
|
+
);
|
|
125
|
+
const request = {
|
|
126
|
+
model: modelId,
|
|
127
|
+
prompt: options.prompt,
|
|
128
|
+
...options.n !== void 0 ? { n: options.n } : {},
|
|
129
|
+
...options.size !== void 0 ? { size: options.size } : {},
|
|
130
|
+
...mapOpenAIImageProviderOptionsToRequestFields(
|
|
131
|
+
openaiOptions
|
|
132
|
+
)
|
|
133
|
+
};
|
|
134
|
+
const response = await client.images.generate(
|
|
135
|
+
request
|
|
136
|
+
);
|
|
137
|
+
return {
|
|
138
|
+
images: (response.data ?? []).map((image) => ({
|
|
139
|
+
base64: image.b64_json ?? void 0,
|
|
140
|
+
url: image.url ?? void 0,
|
|
141
|
+
revisedPrompt: image.revised_prompt ?? void 0
|
|
142
|
+
}))
|
|
143
|
+
};
|
|
144
|
+
} catch (error) {
|
|
145
|
+
throw wrapOpenAIError(error);
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
};
|
|
149
|
+
}
|
|
150
|
+
function mapOpenAIImageProviderOptionsToRequestFields(options) {
|
|
151
|
+
return {
|
|
152
|
+
...options?.background !== void 0 ? { background: options.background } : {},
|
|
153
|
+
...options?.moderation !== void 0 ? { moderation: options.moderation } : {},
|
|
154
|
+
...options?.outputCompression !== void 0 ? { output_compression: options.outputCompression } : {},
|
|
155
|
+
...options?.outputFormat !== void 0 ? { output_format: options.outputFormat } : {},
|
|
156
|
+
...options?.quality !== void 0 ? { quality: options.quality } : {},
|
|
157
|
+
...options?.responseFormat !== void 0 ? { response_format: options.responseFormat } : {},
|
|
158
|
+
...options?.style !== void 0 ? { style: options.style } : {},
|
|
159
|
+
...options?.user !== void 0 ? { user: options.user } : {}
|
|
160
|
+
};
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
// src/shared/tools.ts
|
|
164
|
+
import { zodToJsonSchema } from "zod-to-json-schema";
|
|
165
|
+
var DEFAULT_STRUCTURED_OUTPUT_TOOL_NAME = "core_ai_generate_object";
|
|
166
|
+
var DEFAULT_STRUCTURED_OUTPUT_TOOL_DESCRIPTION = "Return a JSON object that matches the requested schema.";
|
|
167
|
+
function convertTools(tools) {
|
|
168
|
+
return Object.values(tools).map((tool) => ({
|
|
169
|
+
type: "function",
|
|
170
|
+
function: {
|
|
171
|
+
name: tool.name,
|
|
172
|
+
description: tool.description,
|
|
173
|
+
parameters: zodToJsonSchema(tool.parameters)
|
|
174
|
+
}
|
|
175
|
+
}));
|
|
176
|
+
}
|
|
177
|
+
function convertToolChoice(choice) {
|
|
178
|
+
if (typeof choice === "string") {
|
|
179
|
+
return choice;
|
|
180
|
+
}
|
|
181
|
+
return {
|
|
182
|
+
type: "function",
|
|
183
|
+
function: {
|
|
184
|
+
name: choice.toolName
|
|
185
|
+
}
|
|
186
|
+
};
|
|
187
|
+
}
|
|
188
|
+
function getStructuredOutputToolName(options) {
|
|
189
|
+
return options.schemaName?.trim() || DEFAULT_STRUCTURED_OUTPUT_TOOL_NAME;
|
|
190
|
+
}
|
|
191
|
+
function createStructuredOutputOptions(options) {
|
|
192
|
+
const toolName = getStructuredOutputToolName(options);
|
|
193
|
+
return {
|
|
194
|
+
messages: options.messages,
|
|
195
|
+
tools: {
|
|
196
|
+
structured_output: {
|
|
197
|
+
name: toolName,
|
|
198
|
+
description: options.schemaDescription ?? DEFAULT_STRUCTURED_OUTPUT_TOOL_DESCRIPTION,
|
|
199
|
+
parameters: options.schema
|
|
200
|
+
}
|
|
201
|
+
},
|
|
202
|
+
toolChoice: {
|
|
203
|
+
type: "tool",
|
|
204
|
+
toolName
|
|
205
|
+
},
|
|
206
|
+
reasoning: options.reasoning,
|
|
207
|
+
temperature: options.temperature,
|
|
208
|
+
maxTokens: options.maxTokens,
|
|
209
|
+
topP: options.topP,
|
|
210
|
+
providerOptions: options.providerOptions,
|
|
211
|
+
signal: options.signal
|
|
212
|
+
};
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
// src/model-capabilities.ts
|
|
216
|
+
var DEFAULT_CAPABILITIES = {
|
|
217
|
+
reasoning: {
|
|
218
|
+
supportsEffort: true,
|
|
219
|
+
supportedRange: ["low", "medium", "high"],
|
|
220
|
+
restrictsSamplingParams: false
|
|
221
|
+
}
|
|
222
|
+
};
|
|
223
|
+
var MODEL_CAPABILITIES = {
|
|
224
|
+
"gpt-5.2": {
|
|
225
|
+
reasoning: {
|
|
226
|
+
supportsEffort: true,
|
|
227
|
+
supportedRange: ["low", "medium", "high", "max"],
|
|
228
|
+
restrictsSamplingParams: true
|
|
229
|
+
}
|
|
230
|
+
},
|
|
231
|
+
"gpt-5.2-codex": {
|
|
232
|
+
reasoning: {
|
|
233
|
+
supportsEffort: true,
|
|
234
|
+
supportedRange: ["low", "medium", "high", "max"],
|
|
235
|
+
restrictsSamplingParams: true
|
|
236
|
+
}
|
|
237
|
+
},
|
|
238
|
+
"gpt-5.2-pro": {
|
|
239
|
+
reasoning: {
|
|
240
|
+
supportsEffort: true,
|
|
241
|
+
supportedRange: ["low", "medium", "high", "max"],
|
|
242
|
+
restrictsSamplingParams: true
|
|
243
|
+
}
|
|
244
|
+
},
|
|
245
|
+
"gpt-5.1": {
|
|
246
|
+
reasoning: {
|
|
247
|
+
supportsEffort: true,
|
|
248
|
+
supportedRange: ["low", "medium", "high"],
|
|
249
|
+
restrictsSamplingParams: true
|
|
250
|
+
}
|
|
251
|
+
},
|
|
252
|
+
"gpt-5": {
|
|
253
|
+
reasoning: {
|
|
254
|
+
supportsEffort: true,
|
|
255
|
+
supportedRange: ["minimal", "low", "medium", "high"],
|
|
256
|
+
restrictsSamplingParams: true
|
|
257
|
+
}
|
|
258
|
+
},
|
|
259
|
+
"gpt-5-mini": {
|
|
260
|
+
reasoning: {
|
|
261
|
+
supportsEffort: true,
|
|
262
|
+
supportedRange: ["minimal", "low", "medium", "high"],
|
|
263
|
+
restrictsSamplingParams: true
|
|
264
|
+
}
|
|
265
|
+
},
|
|
266
|
+
"gpt-5-nano": {
|
|
267
|
+
reasoning: {
|
|
268
|
+
supportsEffort: true,
|
|
269
|
+
supportedRange: ["minimal", "low", "medium", "high"],
|
|
270
|
+
restrictsSamplingParams: true
|
|
271
|
+
}
|
|
272
|
+
},
|
|
273
|
+
o3: {
|
|
274
|
+
reasoning: {
|
|
275
|
+
supportsEffort: true,
|
|
276
|
+
supportedRange: ["low", "medium", "high"],
|
|
277
|
+
restrictsSamplingParams: false
|
|
278
|
+
}
|
|
279
|
+
},
|
|
280
|
+
"o3-mini": {
|
|
281
|
+
reasoning: {
|
|
282
|
+
supportsEffort: true,
|
|
283
|
+
supportedRange: ["low", "medium", "high"],
|
|
284
|
+
restrictsSamplingParams: false
|
|
285
|
+
}
|
|
286
|
+
},
|
|
287
|
+
"o4-mini": {
|
|
288
|
+
reasoning: {
|
|
289
|
+
supportsEffort: true,
|
|
290
|
+
supportedRange: ["low", "medium", "high"],
|
|
291
|
+
restrictsSamplingParams: false
|
|
292
|
+
}
|
|
293
|
+
},
|
|
294
|
+
o1: {
|
|
295
|
+
reasoning: {
|
|
296
|
+
supportsEffort: true,
|
|
297
|
+
supportedRange: ["low", "medium", "high"],
|
|
298
|
+
restrictsSamplingParams: false
|
|
299
|
+
}
|
|
300
|
+
},
|
|
301
|
+
"o1-mini": {
|
|
302
|
+
reasoning: {
|
|
303
|
+
supportsEffort: false,
|
|
304
|
+
supportedRange: [],
|
|
305
|
+
restrictsSamplingParams: false
|
|
306
|
+
}
|
|
307
|
+
}
|
|
308
|
+
};
|
|
309
|
+
var EFFORT_RANK = {
|
|
310
|
+
minimal: 0,
|
|
311
|
+
low: 1,
|
|
312
|
+
medium: 2,
|
|
313
|
+
high: 3,
|
|
314
|
+
max: 4
|
|
315
|
+
};
|
|
316
|
+
function getOpenAIModelCapabilities(modelId) {
|
|
317
|
+
const normalizedModelId = normalizeModelId(modelId);
|
|
318
|
+
return MODEL_CAPABILITIES[normalizedModelId] ?? DEFAULT_CAPABILITIES;
|
|
319
|
+
}
|
|
320
|
+
function normalizeModelId(modelId) {
|
|
321
|
+
return modelId.replace(/-\d{8}$/, "");
|
|
322
|
+
}
|
|
323
|
+
function clampReasoningEffort(effort, supportedRange) {
|
|
324
|
+
if (supportedRange.length === 0 || supportedRange.includes(effort)) {
|
|
325
|
+
return effort;
|
|
326
|
+
}
|
|
327
|
+
const targetRank = EFFORT_RANK[effort];
|
|
328
|
+
let best = supportedRange[0] ?? effort;
|
|
329
|
+
let bestDistance = Math.abs(EFFORT_RANK[best] - targetRank);
|
|
330
|
+
for (const candidate of supportedRange) {
|
|
331
|
+
const distance = Math.abs(EFFORT_RANK[candidate] - targetRank);
|
|
332
|
+
if (distance < bestDistance) {
|
|
333
|
+
best = candidate;
|
|
334
|
+
bestDistance = distance;
|
|
335
|
+
}
|
|
336
|
+
}
|
|
337
|
+
return best;
|
|
338
|
+
}
|
|
339
|
+
function toOpenAIReasoningEffort(effort) {
|
|
340
|
+
if (effort === "max") {
|
|
341
|
+
return "xhigh";
|
|
342
|
+
}
|
|
343
|
+
return effort;
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
// src/shared/utils.ts
|
|
347
|
+
import { ProviderError as ProviderError2 } from "@core-ai/core-ai";
|
|
348
|
+
function safeParseJsonObject(json) {
|
|
349
|
+
try {
|
|
350
|
+
const parsed = JSON.parse(json);
|
|
351
|
+
if (parsed && typeof parsed === "object" && !Array.isArray(parsed)) {
|
|
352
|
+
return parsed;
|
|
353
|
+
}
|
|
354
|
+
return {};
|
|
355
|
+
} catch {
|
|
356
|
+
return {};
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
function validateOpenAIReasoningConfig(modelId, options) {
|
|
360
|
+
if (!options.reasoning) {
|
|
361
|
+
return;
|
|
362
|
+
}
|
|
363
|
+
const capabilities = getOpenAIModelCapabilities(modelId);
|
|
364
|
+
if (!capabilities.reasoning.restrictsSamplingParams) {
|
|
365
|
+
return;
|
|
366
|
+
}
|
|
367
|
+
if (options.temperature !== void 0) {
|
|
368
|
+
throw new ProviderError2(
|
|
369
|
+
`OpenAI model "${modelId}" does not support temperature when reasoning is enabled`,
|
|
370
|
+
"openai"
|
|
371
|
+
);
|
|
372
|
+
}
|
|
373
|
+
if (options.topP !== void 0) {
|
|
374
|
+
throw new ProviderError2(
|
|
375
|
+
`OpenAI model "${modelId}" does not support topP when reasoning is enabled`,
|
|
376
|
+
"openai"
|
|
377
|
+
);
|
|
378
|
+
}
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
export {
|
|
382
|
+
getOpenAIModelCapabilities,
|
|
383
|
+
clampReasoningEffort,
|
|
384
|
+
toOpenAIReasoningEffort,
|
|
385
|
+
convertTools,
|
|
386
|
+
convertToolChoice,
|
|
387
|
+
getStructuredOutputToolName,
|
|
388
|
+
createStructuredOutputOptions,
|
|
389
|
+
safeParseJsonObject,
|
|
390
|
+
validateOpenAIReasoningConfig,
|
|
391
|
+
openaiResponsesGenerateProviderOptionsSchema,
|
|
392
|
+
openaiCompatGenerateProviderOptionsSchema,
|
|
393
|
+
openaiEmbedProviderOptionsSchema,
|
|
394
|
+
openaiImageProviderOptionsSchema,
|
|
395
|
+
parseOpenAIResponsesGenerateProviderOptions,
|
|
396
|
+
parseOpenAICompatGenerateProviderOptions,
|
|
397
|
+
openaiResponsesProviderOptionsSchema,
|
|
398
|
+
openaiCompatProviderOptionsSchema,
|
|
399
|
+
wrapOpenAIError,
|
|
400
|
+
createOpenAIEmbeddingModel,
|
|
401
|
+
createOpenAIImageModel
|
|
402
|
+
};
|
package/dist/compat.d.ts
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import OpenAI from 'openai';
|
|
2
|
+
import { ChatModel, EmbeddingModel, ImageModel } from '@core-ai/core-ai';
|
|
3
|
+
export { O as OpenAICompatGenerateProviderOptions, a as OpenAICompatRequestOptions, o as openaiCompatGenerateProviderOptionsSchema, b as openaiCompatProviderOptionsSchema } from './provider-options-DK-Tz0pz.js';
|
|
4
|
+
import 'zod';
|
|
5
|
+
|
|
6
|
+
type OpenAICompatProviderOptions = {
|
|
7
|
+
apiKey?: string;
|
|
8
|
+
baseURL?: string;
|
|
9
|
+
client?: OpenAI;
|
|
10
|
+
};
|
|
11
|
+
type OpenAICompatProvider = {
|
|
12
|
+
chatModel(modelId: string): ChatModel;
|
|
13
|
+
embeddingModel(modelId: string): EmbeddingModel;
|
|
14
|
+
imageModel(modelId: string): ImageModel;
|
|
15
|
+
};
|
|
16
|
+
declare function createOpenAICompat(options?: OpenAICompatProviderOptions): OpenAICompatProvider;
|
|
17
|
+
|
|
18
|
+
export { type OpenAICompatProvider, type OpenAICompatProviderOptions, createOpenAICompat };
|