@core-ai/openai 0.2.1 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +290 -77
  2. package/package.json +2 -2
package/dist/index.js CHANGED
@@ -2,12 +2,18 @@
2
2
  import OpenAI from "openai";
3
3
 
4
4
  // src/chat-model.ts
5
- import { createStreamResult } from "@core-ai/core-ai";
5
+ import {
6
+ StructuredOutputNoObjectGeneratedError,
7
+ StructuredOutputParseError,
8
+ StructuredOutputValidationError,
9
+ createObjectStreamResult,
10
+ createStreamResult
11
+ } from "@core-ai/core-ai";
6
12
 
7
13
  // src/chat-adapter.ts
8
- import { APIError } from "openai";
9
14
  import { zodToJsonSchema } from "zod-to-json-schema";
10
- import { ProviderError } from "@core-ai/core-ai";
15
+ var DEFAULT_STRUCTURED_OUTPUT_TOOL_NAME = "core_ai_generate_object";
16
+ var DEFAULT_STRUCTURED_OUTPUT_TOOL_DESCRIPTION = "Return a JSON object that matches the requested schema.";
11
17
  function convertMessages(messages) {
12
18
  return messages.map(convertMessage);
13
19
  }
@@ -91,38 +97,66 @@ function convertToolChoice(choice) {
91
97
  }
92
98
  };
93
99
  }
100
+ function getStructuredOutputToolName(options) {
101
+ const trimmedName = options.schemaName?.trim();
102
+ if (trimmedName && trimmedName.length > 0) {
103
+ return trimmedName;
104
+ }
105
+ return DEFAULT_STRUCTURED_OUTPUT_TOOL_NAME;
106
+ }
107
+ function createStructuredOutputOptions(options) {
108
+ const toolName = getStructuredOutputToolName(options);
109
+ return {
110
+ messages: options.messages,
111
+ tools: {
112
+ structured_output: {
113
+ name: toolName,
114
+ description: options.schemaDescription ?? DEFAULT_STRUCTURED_OUTPUT_TOOL_DESCRIPTION,
115
+ parameters: options.schema
116
+ }
117
+ },
118
+ toolChoice: {
119
+ type: "tool",
120
+ toolName
121
+ },
122
+ config: options.config,
123
+ providerOptions: options.providerOptions,
124
+ signal: options.signal
125
+ };
126
+ }
94
127
  function createGenerateRequest(modelId, options) {
95
128
  return {
96
- model: modelId,
97
- messages: convertMessages(options.messages),
98
- ...options.tools && Object.keys(options.tools).length > 0 ? { tools: convertTools(options.tools) } : {},
99
- ...options.toolChoice ? { tool_choice: convertToolChoice(options.toolChoice) } : {},
100
- ...options.config?.temperature !== void 0 ? { temperature: options.config.temperature } : {},
101
- ...options.config?.maxTokens !== void 0 ? { max_tokens: options.config.maxTokens } : {},
102
- ...options.config?.topP !== void 0 ? { top_p: options.config.topP } : {},
103
- ...options.config?.stopSequences ? { stop: options.config.stopSequences } : {},
104
- ...options.config?.frequencyPenalty !== void 0 ? { frequency_penalty: options.config.frequencyPenalty } : {},
105
- ...options.config?.presencePenalty !== void 0 ? { presence_penalty: options.config.presencePenalty } : {},
129
+ ...createRequestBase(modelId, options),
106
130
  ...options.providerOptions
107
131
  };
108
132
  }
109
133
  function createStreamRequest(modelId, options) {
110
134
  return {
111
- model: modelId,
112
- messages: convertMessages(options.messages),
135
+ ...createRequestBase(modelId, options),
113
136
  stream: true,
114
137
  stream_options: {
115
138
  include_usage: true
116
139
  },
140
+ ...options.providerOptions
141
+ };
142
+ }
143
+ function createRequestBase(modelId, options) {
144
+ return {
145
+ model: modelId,
146
+ messages: convertMessages(options.messages),
117
147
  ...options.tools && Object.keys(options.tools).length > 0 ? { tools: convertTools(options.tools) } : {},
118
148
  ...options.toolChoice ? { tool_choice: convertToolChoice(options.toolChoice) } : {},
119
- ...options.config?.temperature !== void 0 ? { temperature: options.config.temperature } : {},
120
- ...options.config?.maxTokens !== void 0 ? { max_tokens: options.config.maxTokens } : {},
121
- ...options.config?.topP !== void 0 ? { top_p: options.config.topP } : {},
122
- ...options.config?.stopSequences ? { stop: options.config.stopSequences } : {},
123
- ...options.config?.frequencyPenalty !== void 0 ? { frequency_penalty: options.config.frequencyPenalty } : {},
124
- ...options.config?.presencePenalty !== void 0 ? { presence_penalty: options.config.presencePenalty } : {},
125
- ...options.providerOptions
149
+ ...mapConfigToRequestFields(options.config)
150
+ };
151
+ }
152
+ function mapConfigToRequestFields(config) {
153
+ return {
154
+ ...config?.temperature !== void 0 ? { temperature: config.temperature } : {},
155
+ ...config?.maxTokens !== void 0 ? { max_tokens: config.maxTokens } : {},
156
+ ...config?.topP !== void 0 ? { top_p: config.topP } : {},
157
+ ...config?.stopSequences ? { stop: config.stopSequences } : {},
158
+ ...config?.frequencyPenalty !== void 0 ? { frequency_penalty: config.frequencyPenalty } : {},
159
+ ...config?.presencePenalty !== void 0 ? { presence_penalty: config.presencePenalty } : {}
126
160
  };
127
161
  }
128
162
  function mapGenerateResponse(response) {
@@ -135,8 +169,13 @@ function mapGenerateResponse(response) {
135
169
  usage: {
136
170
  inputTokens: 0,
137
171
  outputTokens: 0,
138
- reasoningTokens: 0,
139
- totalTokens: 0
172
+ inputTokenDetails: {
173
+ cacheReadTokens: 0,
174
+ cacheWriteTokens: 0
175
+ },
176
+ outputTokenDetails: {
177
+ reasoningTokens: 0
178
+ }
140
179
  }
141
180
  };
142
181
  }
@@ -148,8 +187,13 @@ function mapGenerateResponse(response) {
148
187
  usage: {
149
188
  inputTokens: response.usage?.prompt_tokens ?? 0,
150
189
  outputTokens: response.usage?.completion_tokens ?? 0,
151
- reasoningTokens,
152
- totalTokens: response.usage?.total_tokens ?? 0
190
+ inputTokenDetails: {
191
+ cacheReadTokens: response.usage?.prompt_tokens_details?.cached_tokens ?? 0,
192
+ cacheWriteTokens: 0
193
+ },
194
+ outputTokenDetails: {
195
+ reasoningTokens
196
+ }
153
197
  }
154
198
  };
155
199
  }
@@ -193,16 +237,26 @@ async function* transformStream(stream) {
193
237
  let usage = {
194
238
  inputTokens: 0,
195
239
  outputTokens: 0,
196
- reasoningTokens: 0,
197
- totalTokens: 0
240
+ inputTokenDetails: {
241
+ cacheReadTokens: 0,
242
+ cacheWriteTokens: 0
243
+ },
244
+ outputTokenDetails: {
245
+ reasoningTokens: 0
246
+ }
198
247
  };
199
248
  for await (const chunk of stream) {
200
249
  if (chunk.usage) {
201
250
  usage = {
202
251
  inputTokens: chunk.usage.prompt_tokens ?? 0,
203
252
  outputTokens: chunk.usage.completion_tokens ?? 0,
204
- reasoningTokens: chunk.usage.completion_tokens_details?.reasoning_tokens ?? 0,
205
- totalTokens: chunk.usage.total_tokens ?? 0
253
+ inputTokenDetails: {
254
+ cacheReadTokens: chunk.usage.prompt_tokens_details?.cached_tokens ?? 0,
255
+ cacheWriteTokens: 0
256
+ },
257
+ outputTokenDetails: {
258
+ reasoningTokens: chunk.usage.completion_tokens_details?.reasoning_tokens ?? 0
259
+ }
206
260
  };
207
261
  }
208
262
  const choice = chunk.choices[0];
@@ -217,7 +271,9 @@ async function* transformStream(stream) {
217
271
  }
218
272
  if (choice.delta.tool_calls) {
219
273
  for (const partialToolCall of choice.delta.tool_calls) {
220
- const current = bufferedToolCalls.get(partialToolCall.index) ?? {
274
+ const current = bufferedToolCalls.get(
275
+ partialToolCall.index
276
+ ) ?? {
221
277
  id: partialToolCall.id ?? `tool-${partialToolCall.index}`,
222
278
  name: partialToolCall.function?.name ?? "",
223
279
  arguments: ""
@@ -284,7 +340,11 @@ function safeParseJsonObject(json) {
284
340
  return {};
285
341
  }
286
342
  }
287
- function wrapError(error) {
343
+
344
+ // src/openai-error.ts
345
+ import { APIError } from "openai";
346
+ import { ProviderError } from "@core-ai/core-ai";
347
+ function wrapOpenAIError(error) {
288
348
  if (error instanceof APIError) {
289
349
  return new ProviderError(error.message, "openai", error.status, error);
290
350
  }
@@ -298,33 +358,210 @@ function wrapError(error) {
298
358
 
299
359
  // src/chat-model.ts
300
360
  function createOpenAIChatModel(client, modelId) {
361
+ const provider = "openai";
362
+ async function callOpenAIChatCompletionsApi(request) {
363
+ try {
364
+ return await client.chat.completions.create(
365
+ request
366
+ );
367
+ } catch (error) {
368
+ throw wrapOpenAIError(error);
369
+ }
370
+ }
371
+ async function generateChat(options) {
372
+ const request = createGenerateRequest(modelId, options);
373
+ const response = await callOpenAIChatCompletionsApi(request);
374
+ return mapGenerateResponse(response);
375
+ }
376
+ async function streamChat(options) {
377
+ const request = createStreamRequest(modelId, options);
378
+ const stream = await callOpenAIChatCompletionsApi(request);
379
+ return createStreamResult(transformStream(stream));
380
+ }
301
381
  return {
302
- provider: "openai",
382
+ provider,
303
383
  modelId,
304
- async generate(options) {
305
- try {
306
- const request = createGenerateRequest(modelId, options);
307
- const response = await client.chat.completions.create(request);
308
- return mapGenerateResponse(response);
309
- } catch (error) {
310
- throw wrapError(error);
311
- }
384
+ generate: generateChat,
385
+ stream: streamChat,
386
+ async generateObject(options) {
387
+ const structuredOptions = createStructuredOutputOptions(options);
388
+ const result = await generateChat(structuredOptions);
389
+ const toolName = getStructuredOutputToolName(options);
390
+ const object = extractStructuredObject(
391
+ result,
392
+ options.schema,
393
+ provider,
394
+ toolName
395
+ );
396
+ return {
397
+ object,
398
+ finishReason: result.finishReason,
399
+ usage: result.usage
400
+ };
312
401
  },
313
- async stream(options) {
314
- try {
315
- const request = createStreamRequest(modelId, options);
316
- const stream = await client.chat.completions.create(request);
317
- return createStreamResult(transformStream(stream));
318
- } catch (error) {
319
- throw wrapError(error);
320
- }
402
+ async streamObject(options) {
403
+ const structuredOptions = createStructuredOutputOptions(options);
404
+ const stream = await streamChat(structuredOptions);
405
+ const toolName = getStructuredOutputToolName(options);
406
+ return createObjectStreamResult(
407
+ transformStructuredOutputStream(
408
+ stream,
409
+ options.schema,
410
+ provider,
411
+ toolName
412
+ )
413
+ );
321
414
  }
322
415
  };
323
416
  }
417
+ function extractStructuredObject(result, schema, provider, toolName) {
418
+ const structuredToolCall = result.toolCalls.find(
419
+ (toolCall) => toolCall.name === toolName
420
+ );
421
+ if (structuredToolCall) {
422
+ return validateStructuredToolArguments(
423
+ schema,
424
+ structuredToolCall.arguments,
425
+ provider
426
+ );
427
+ }
428
+ const rawOutput = result.content?.trim();
429
+ if (rawOutput && rawOutput.length > 0) {
430
+ return parseAndValidateStructuredPayload(schema, rawOutput, provider);
431
+ }
432
+ throw new StructuredOutputNoObjectGeneratedError(
433
+ "model did not emit a structured object payload",
434
+ provider
435
+ );
436
+ }
437
+ async function* transformStructuredOutputStream(stream, schema, provider, toolName) {
438
+ let validatedObject;
439
+ let contentBuffer = "";
440
+ const toolArgumentDeltas = /* @__PURE__ */ new Map();
441
+ for await (const event of stream) {
442
+ if (event.type === "content-delta") {
443
+ contentBuffer += event.text;
444
+ yield {
445
+ type: "object-delta",
446
+ text: event.text
447
+ };
448
+ continue;
449
+ }
450
+ if (event.type === "tool-call-delta") {
451
+ const previous = toolArgumentDeltas.get(event.toolCallId) ?? "";
452
+ toolArgumentDeltas.set(
453
+ event.toolCallId,
454
+ `${previous}${event.argumentsDelta}`
455
+ );
456
+ yield {
457
+ type: "object-delta",
458
+ text: event.argumentsDelta
459
+ };
460
+ continue;
461
+ }
462
+ if (event.type === "tool-call-end" && event.toolCall.name === toolName) {
463
+ validatedObject = validateStructuredToolArguments(
464
+ schema,
465
+ event.toolCall.arguments,
466
+ provider
467
+ );
468
+ yield {
469
+ type: "object",
470
+ object: validatedObject
471
+ };
472
+ continue;
473
+ }
474
+ if (event.type === "finish") {
475
+ if (validatedObject === void 0) {
476
+ const fallbackPayload = getFallbackStructuredPayload(
477
+ contentBuffer,
478
+ toolArgumentDeltas
479
+ );
480
+ if (!fallbackPayload) {
481
+ throw new StructuredOutputNoObjectGeneratedError(
482
+ "structured output stream ended without an object payload",
483
+ provider
484
+ );
485
+ }
486
+ validatedObject = parseAndValidateStructuredPayload(
487
+ schema,
488
+ fallbackPayload,
489
+ provider
490
+ );
491
+ yield {
492
+ type: "object",
493
+ object: validatedObject
494
+ };
495
+ }
496
+ yield {
497
+ type: "finish",
498
+ finishReason: event.finishReason,
499
+ usage: event.usage
500
+ };
501
+ }
502
+ }
503
+ }
504
+ function getFallbackStructuredPayload(contentBuffer, toolArgumentDeltas) {
505
+ for (const delta of toolArgumentDeltas.values()) {
506
+ const trimmed = delta.trim();
507
+ if (trimmed.length > 0) {
508
+ return trimmed;
509
+ }
510
+ }
511
+ const trimmedContent = contentBuffer.trim();
512
+ if (trimmedContent.length > 0) {
513
+ return trimmedContent;
514
+ }
515
+ return void 0;
516
+ }
517
+ function validateStructuredToolArguments(schema, toolArguments, provider) {
518
+ return validateStructuredObject(
519
+ schema,
520
+ toolArguments,
521
+ provider,
522
+ JSON.stringify(toolArguments)
523
+ );
524
+ }
525
+ function parseAndValidateStructuredPayload(schema, rawPayload, provider) {
526
+ const parsedPayload = parseJson(rawPayload, provider);
527
+ return validateStructuredObject(schema, parsedPayload, provider, rawPayload);
528
+ }
529
+ function parseJson(rawOutput, provider) {
530
+ try {
531
+ return JSON.parse(rawOutput);
532
+ } catch (error) {
533
+ throw new StructuredOutputParseError(
534
+ "failed to parse structured output as JSON",
535
+ provider,
536
+ {
537
+ rawOutput,
538
+ cause: error
539
+ }
540
+ );
541
+ }
542
+ }
543
+ function validateStructuredObject(schema, value, provider, rawOutput) {
544
+ const parsed = schema.safeParse(value);
545
+ if (parsed.success) {
546
+ return parsed.data;
547
+ }
548
+ throw new StructuredOutputValidationError(
549
+ "structured output does not match schema",
550
+ provider,
551
+ formatZodIssues(parsed.error.issues),
552
+ {
553
+ rawOutput
554
+ }
555
+ );
556
+ }
557
+ function formatZodIssues(issues) {
558
+ return issues.map((issue) => {
559
+ const path = issue.path.length > 0 ? issue.path.map((segment) => String(segment)).join(".") : "<root>";
560
+ return `${path}: ${issue.message}`;
561
+ });
562
+ }
324
563
 
325
564
  // src/embedding-model.ts
326
- import { APIError as APIError2 } from "openai";
327
- import { ProviderError as ProviderError2 } from "@core-ai/core-ai";
328
565
  function createOpenAIEmbeddingModel(client, modelId) {
329
566
  return {
330
567
  provider: "openai",
@@ -344,26 +581,13 @@ function createOpenAIEmbeddingModel(client, modelId) {
344
581
  }
345
582
  };
346
583
  } catch (error) {
347
- throw wrapError2(error);
584
+ throw wrapOpenAIError(error);
348
585
  }
349
586
  }
350
587
  };
351
588
  }
352
- function wrapError2(error) {
353
- if (error instanceof APIError2) {
354
- return new ProviderError2(error.message, "openai", error.status, error);
355
- }
356
- return new ProviderError2(
357
- error instanceof Error ? error.message : String(error),
358
- "openai",
359
- void 0,
360
- error
361
- );
362
- }
363
589
 
364
590
  // src/image-model.ts
365
- import { APIError as APIError3 } from "openai";
366
- import { ProviderError as ProviderError3 } from "@core-ai/core-ai";
367
591
  function createOpenAIImageModel(client, modelId) {
368
592
  return {
369
593
  provider: "openai",
@@ -388,22 +612,11 @@ function createOpenAIImageModel(client, modelId) {
388
612
  }))
389
613
  };
390
614
  } catch (error) {
391
- throw wrapError3(error);
615
+ throw wrapOpenAIError(error);
392
616
  }
393
617
  }
394
618
  };
395
619
  }
396
- function wrapError3(error) {
397
- if (error instanceof APIError3) {
398
- return new ProviderError3(error.message, "openai", error.status, error);
399
- }
400
- return new ProviderError3(
401
- error instanceof Error ? error.message : String(error),
402
- "openai",
403
- void 0,
404
- error
405
- );
406
- }
407
620
 
408
621
  // src/provider.ts
409
622
  function createOpenAI(options = {}) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@core-ai/openai",
3
- "version": "0.2.1",
3
+ "version": "0.4.0",
4
4
  "description": "OpenAI provider package for @core-ai/core-ai",
5
5
  "license": "MIT",
6
6
  "author": "Omnifact (https://omnifact.ai)",
@@ -43,7 +43,7 @@
43
43
  "test:watch": "vitest"
44
44
  },
45
45
  "dependencies": {
46
- "@core-ai/core-ai": "^0.2.1",
46
+ "@core-ai/core-ai": "^0.4.0",
47
47
  "openai": "^6.1.0",
48
48
  "zod-to-json-schema": "^3.25.1"
49
49
  },