@aigne/openai 0.16.16 → 1.74.0-beta

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/README.md +11 -11
  2. package/dist/_virtual/rolldown_runtime.cjs +29 -0
  3. package/dist/index.cjs +10 -0
  4. package/dist/index.d.cts +4 -0
  5. package/dist/index.d.mts +4 -0
  6. package/dist/index.mjs +5 -0
  7. package/dist/openai-chat-model.cjs +371 -0
  8. package/dist/openai-chat-model.d.cts +165 -0
  9. package/dist/openai-chat-model.d.cts.map +1 -0
  10. package/dist/openai-chat-model.d.mts +165 -0
  11. package/dist/openai-chat-model.d.mts.map +1 -0
  12. package/dist/openai-chat-model.mjs +368 -0
  13. package/dist/openai-chat-model.mjs.map +1 -0
  14. package/dist/openai-image-model.cjs +123 -0
  15. package/dist/openai-image-model.d.cts +57 -0
  16. package/dist/openai-image-model.d.cts.map +1 -0
  17. package/dist/openai-image-model.d.mts +57 -0
  18. package/dist/openai-image-model.d.mts.map +1 -0
  19. package/dist/openai-image-model.mjs +123 -0
  20. package/dist/openai-image-model.mjs.map +1 -0
  21. package/dist/openai-video-model.cjs +112 -0
  22. package/dist/openai-video-model.d.cts +95 -0
  23. package/dist/openai-video-model.d.cts.map +1 -0
  24. package/dist/openai-video-model.d.mts +95 -0
  25. package/dist/openai-video-model.d.mts.map +1 -0
  26. package/dist/openai-video-model.mjs +112 -0
  27. package/dist/openai-video-model.mjs.map +1 -0
  28. package/dist/openai.cjs +14 -0
  29. package/dist/openai.mjs +13 -0
  30. package/dist/openai.mjs.map +1 -0
  31. package/package.json +29 -30
  32. package/CHANGELOG.md +0 -2448
  33. package/lib/cjs/index.d.ts +0 -3
  34. package/lib/cjs/index.js +0 -19
  35. package/lib/cjs/openai-chat-model.d.ts +0 -160
  36. package/lib/cjs/openai-chat-model.js +0 -465
  37. package/lib/cjs/openai-image-model.d.ts +0 -55
  38. package/lib/cjs/openai-image-model.js +0 -110
  39. package/lib/cjs/openai-video-model.d.ts +0 -92
  40. package/lib/cjs/openai-video-model.js +0 -118
  41. package/lib/cjs/openai.d.ts +0 -4
  42. package/lib/cjs/openai.js +0 -17
  43. package/lib/cjs/package.json +0 -3
  44. package/lib/dts/index.d.ts +0 -3
  45. package/lib/dts/openai-chat-model.d.ts +0 -160
  46. package/lib/dts/openai-image-model.d.ts +0 -55
  47. package/lib/dts/openai-video-model.d.ts +0 -92
  48. package/lib/dts/openai.d.ts +0 -4
  49. package/lib/esm/index.d.ts +0 -3
  50. package/lib/esm/index.js +0 -3
  51. package/lib/esm/openai-chat-model.d.ts +0 -160
  52. package/lib/esm/openai-chat-model.js +0 -459
  53. package/lib/esm/openai-image-model.d.ts +0 -55
  54. package/lib/esm/openai-image-model.js +0 -106
  55. package/lib/esm/openai-video-model.d.ts +0 -92
  56. package/lib/esm/openai-video-model.js +0 -114
  57. package/lib/esm/openai.d.ts +0 -4
  58. package/lib/esm/openai.js +0 -10
  59. package/lib/esm/package.json +0 -3
@@ -1,55 +0,0 @@
1
- import { type AgentInvokeOptions, ImageModel, type ImageModelInput, type ImageModelOptions, type ImageModelOutput } from "@aigne/core";
2
- import { type Camelize } from "@aigne/core/utils/camelize.js";
3
- import type OpenAI from "openai";
4
- import type { ClientOptions } from "openai";
5
- export interface OpenAIImageModelInput extends ImageModelInput, Camelize<Omit<OpenAI.ImageGenerateParams | OpenAI.ImageEditParams, "prompt" | "model" | "n" | "response_format">> {
6
- }
7
- export interface OpenAIImageModelOutput extends ImageModelOutput {
8
- }
9
- export interface OpenAIImageModelOptions extends ImageModelOptions<OpenAIImageModelInput, OpenAIImageModelOutput> {
10
- /**
11
- * API key for OpenAI API
12
- *
13
- * If not provided, will look for OPENAI_API_KEY in environment variables
14
- */
15
- apiKey?: string;
16
- /**
17
- * Base URL for OpenAI API
18
- *
19
- * Useful for proxies or alternate endpoints
20
- */
21
- baseURL?: string;
22
- /**
23
- * OpenAI model to use
24
- *
25
- * Defaults to 'dall-e-2'
26
- */
27
- model?: string;
28
- /**
29
- * Additional model options to control behavior
30
- */
31
- modelOptions?: Omit<Partial<OpenAIImageModelInput>, "model">;
32
- /**
33
- * Client options for OpenAI API
34
- */
35
- clientOptions?: Partial<ClientOptions>;
36
- }
37
- export declare class OpenAIImageModel extends ImageModel<OpenAIImageModelInput, OpenAIImageModelOutput> {
38
- options?: OpenAIImageModelOptions | undefined;
39
- constructor(options?: OpenAIImageModelOptions | undefined);
40
- protected _client?: OpenAI;
41
- protected apiKeyEnvName: string;
42
- get client(): OpenAI;
43
- get credential(): {
44
- url: string | undefined;
45
- apiKey: string | undefined;
46
- model: string;
47
- };
48
- get modelOptions(): Omit<Partial<OpenAIImageModelInput>, "model"> | undefined;
49
- /**
50
- * Process the input and generate a response
51
- * @param input The input to process
52
- * @returns The generated response
53
- */
54
- process(input: OpenAIImageModelInput, _options: AgentInvokeOptions): Promise<OpenAIImageModelOutput>;
55
- }
@@ -1,110 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.OpenAIImageModel = void 0;
4
- const core_1 = require("@aigne/core");
5
- const camelize_js_1 = require("@aigne/core/utils/camelize.js");
6
- const type_utils_js_1 = require("@aigne/core/utils/type-utils.js");
7
- const zod_1 = require("zod");
8
- const openai_js_1 = require("./openai.js");
9
- const DEFAULT_MODEL = "dall-e-2";
10
- const OUTPUT_MIME_TYPE = "image/png";
11
- const SUPPORTED_PARAMS = {
12
- "dall-e-2": ["prompt", "size", "n"],
13
- "dall-e-3": ["prompt", "size", "n", "quality", "style", "user"],
14
- "gpt-image-1": [
15
- "prompt",
16
- "size",
17
- "background",
18
- "moderation",
19
- "outputCompression",
20
- "outputFormat",
21
- "quality",
22
- "user",
23
- "stream",
24
- ],
25
- };
26
- const SUPPORT_EDIT_MODELS = ["gpt-image-1"];
27
- const openAIImageModelInputSchema = core_1.imageModelInputSchema.extend({});
28
- const openAIImageModelOptionsSchema = zod_1.z.object({
29
- apiKey: zod_1.z.string().optional(),
30
- baseURL: zod_1.z.string().optional(),
31
- model: zod_1.z.string().optional(),
32
- modelOptions: zod_1.z.object({}).optional(),
33
- clientOptions: zod_1.z.object({}).optional(),
34
- });
35
- class OpenAIImageModel extends core_1.ImageModel {
36
- options;
37
- constructor(options) {
38
- super({
39
- ...options,
40
- inputSchema: openAIImageModelInputSchema,
41
- description: options?.description ?? "Draw or edit image by OpenAI image models",
42
- });
43
- this.options = options;
44
- if (options)
45
- (0, type_utils_js_1.checkArguments)(this.name, openAIImageModelOptionsSchema, options);
46
- }
47
- _client;
48
- apiKeyEnvName = "OPENAI_API_KEY";
49
- get client() {
50
- if (this._client)
51
- return this._client;
52
- const { apiKey, url } = this.credential;
53
- if (!apiKey)
54
- throw new Error(`${this.name} requires an API key. Please provide it via \`options.apiKey\`, or set the \`${this.apiKeyEnvName}\` environment variable`);
55
- this._client ??= new openai_js_1.CustomOpenAI({
56
- baseURL: url,
57
- apiKey,
58
- ...this.options?.clientOptions,
59
- });
60
- return this._client;
61
- }
62
- get credential() {
63
- return {
64
- url: this.options?.baseURL || process.env.OPENAI_BASE_URL,
65
- apiKey: this.options?.apiKey || process.env[this.apiKeyEnvName],
66
- model: this.options?.model || DEFAULT_MODEL,
67
- };
68
- }
69
- get modelOptions() {
70
- return this.options?.modelOptions;
71
- }
72
- /**
73
- * Process the input and generate a response
74
- * @param input The input to process
75
- * @returns The generated response
76
- */
77
- async process(input, _options) {
78
- const model = input.modelOptions?.model || this.credential.model;
79
- if (input.image?.length && !SUPPORT_EDIT_MODELS.includes(model)) {
80
- throw new Error(`Model ${model} does not support image editing`);
81
- }
82
- const body = {
83
- ...(0, camelize_js_1.snakelize)((0, type_utils_js_1.pick)({ ...this.modelOptions, ...input.modelOptions, ...input }, SUPPORTED_PARAMS[model] || SUPPORTED_PARAMS[DEFAULT_MODEL])),
84
- model,
85
- };
86
- const response = input.image?.length
87
- ? (await this.client.images.edit({
88
- ...body,
89
- image: await Promise.all(input.image.map((image) => this.transformFileType("file", image).then((file) => new File([Buffer.from(file.data, "base64")], file.filename || "image.png", {
90
- type: file.mimeType,
91
- })))),
92
- }, { stream: false }))
93
- : (await this.client.images.generate({ ...body }, { stream: false }));
94
- return {
95
- images: (response.data ?? []).map((image) => {
96
- if (image.url)
97
- return { type: "url", url: image.url, mimeType: OUTPUT_MIME_TYPE };
98
- if (image.b64_json)
99
- return { type: "file", data: image.b64_json, mimeType: OUTPUT_MIME_TYPE };
100
- throw new Error("Image response does not contain a valid URL or base64 data");
101
- }),
102
- usage: {
103
- inputTokens: response.usage?.input_tokens || 0,
104
- outputTokens: response.usage?.output_tokens || 0,
105
- },
106
- model,
107
- };
108
- }
109
- }
110
- exports.OpenAIImageModel = OpenAIImageModel;
@@ -1,92 +0,0 @@
1
- import { type AgentInvokeOptions, VideoModel, type VideoModelInput, type VideoModelOptions, type VideoModelOutput } from "@aigne/core";
2
- import type OpenAI from "openai";
3
- import type { ClientOptions } from "openai";
4
- /**
5
- * Input options for OpenAI Video Model
6
- */
7
- export interface OpenAIVideoModelInput extends VideoModelInput {
8
- /**
9
- * Sora model to use for video generation
10
- *
11
- * - `sora-2`: Standard version, lower cost
12
- * - `sora-2-pro`: Pro version, higher quality
13
- *
14
- * @default "sora-2"
15
- */
16
- model?: "sora-2" | "sora-2-pro";
17
- /**
18
- * Video resolution (width x height)
19
- *
20
- * - `720x1280`: Vertical video (9:16)
21
- * - `1280x720`: Horizontal video (16:9)
22
- * - `1024x1792`: Vertical video (9:16, higher resolution)
23
- * - `1792x1024`: Horizontal video (16:9, higher resolution)
24
- */
25
- size?: "720x1280" | "1280x720" | "1024x1792" | "1792x1024";
26
- /**
27
- * Video duration in seconds
28
- *
29
- * @default "4"
30
- */
31
- seconds?: "4" | "8" | "12";
32
- }
33
- /**
34
- * Output from OpenAI Video Model
35
- */
36
- export interface OpenAIVideoModelOutput extends VideoModelOutput {
37
- }
38
- /**
39
- * Configuration options for OpenAI Video Model
40
- */
41
- export interface OpenAIVideoModelOptions extends VideoModelOptions<OpenAIVideoModelInput, OpenAIVideoModelOutput> {
42
- /**
43
- * API key for OpenAI API
44
- *
45
- * If not provided, will look for OPENAI_API_KEY in environment variables
46
- */
47
- apiKey?: string;
48
- /**
49
- * Base URL for OpenAI API
50
- *
51
- * Useful for proxies or alternate endpoints
52
- */
53
- baseURL?: string;
54
- /**
55
- * OpenAI model to use
56
- *
57
- * Defaults to 'sora-2'
58
- */
59
- model?: string;
60
- /**
61
- * Additional model options to control behavior
62
- */
63
- modelOptions?: Omit<Partial<OpenAIVideoModelInput>, "model">;
64
- /**
65
- * Client options for OpenAI API
66
- */
67
- clientOptions?: Partial<ClientOptions>;
68
- /**
69
- * Polling interval in milliseconds for checking video generation status
70
- *
71
- * Defaults to 2000ms (2 seconds)
72
- */
73
- pollingInterval?: number;
74
- }
75
- export declare class OpenAIVideoModel extends VideoModel<OpenAIVideoModelInput, OpenAIVideoModelOutput> {
76
- options?: OpenAIVideoModelOptions | undefined;
77
- constructor(options?: OpenAIVideoModelOptions | undefined);
78
- /**
79
- * @hidden
80
- */
81
- protected _client?: OpenAI;
82
- protected apiKeyEnvName: string;
83
- get client(): OpenAI;
84
- get credential(): {
85
- url: string | undefined;
86
- apiKey: string | undefined;
87
- model: string;
88
- };
89
- get modelOptions(): Omit<Partial<OpenAIVideoModelInput>, "model"> | undefined;
90
- downloadToFile(videoId: string): Promise<string>;
91
- process(input: OpenAIVideoModelInput, _options: AgentInvokeOptions): Promise<OpenAIVideoModelOutput>;
92
- }
@@ -1,118 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.OpenAIVideoModel = void 0;
4
- const core_1 = require("@aigne/core");
5
- const logger_js_1 = require("@aigne/core/utils/logger.js");
6
- const type_utils_js_1 = require("@aigne/core/utils/type-utils.js");
7
- const zod_1 = require("zod");
8
- const openai_js_1 = require("./openai.js");
9
- const DEFAULT_MODEL = "sora-2";
10
- const DEFAULT_SECONDS = 4;
11
- const openAIVideoModelInputSchema = core_1.videoModelInputSchema.extend({
12
- model: zod_1.z.enum(["sora-2", "sora-2-pro"]).optional(),
13
- seconds: zod_1.z.enum(["4", "8", "12"]).optional(),
14
- size: zod_1.z.enum(["720x1280", "1280x720", "1024x1792", "1792x1024"]).optional(),
15
- });
16
- const openAIVideoModelOptionsSchema = zod_1.z.object({
17
- apiKey: zod_1.z.string().optional(),
18
- baseURL: zod_1.z.string().optional(),
19
- model: zod_1.z.string().optional(),
20
- modelOptions: zod_1.z.object({}).optional(),
21
- clientOptions: zod_1.z.object({}).optional(),
22
- pollingInterval: zod_1.z.number().optional(),
23
- });
24
- class OpenAIVideoModel extends core_1.VideoModel {
25
- options;
26
- constructor(options) {
27
- super({
28
- ...options,
29
- description: options?.description ?? "Generate videos using OpenAI Sora models",
30
- inputSchema: openAIVideoModelInputSchema,
31
- });
32
- this.options = options;
33
- if (options)
34
- (0, type_utils_js_1.checkArguments)(this.name, openAIVideoModelOptionsSchema, options);
35
- }
36
- /**
37
- * @hidden
38
- */
39
- _client;
40
- apiKeyEnvName = "OPENAI_API_KEY";
41
- get client() {
42
- const { apiKey, url } = this.credential;
43
- if (!apiKey)
44
- throw new Error(`${this.name} requires an API key. Please provide it via \`options.apiKey\`, or set the \`${this.apiKeyEnvName}\` environment variable`);
45
- this._client ??= new openai_js_1.CustomOpenAI({
46
- baseURL: url,
47
- apiKey,
48
- ...this.options?.clientOptions,
49
- });
50
- return this._client;
51
- }
52
- get credential() {
53
- return {
54
- url: this.options?.baseURL || process.env.OPENAI_BASE_URL,
55
- apiKey: this.options?.apiKey || process.env[this.apiKeyEnvName],
56
- model: this.options?.model || DEFAULT_MODEL,
57
- };
58
- }
59
- get modelOptions() {
60
- return this.options?.modelOptions;
61
- }
62
- async downloadToFile(videoId) {
63
- logger_js_1.logger.debug("Downloading video content...");
64
- const content = await this.client.videos.downloadContent(videoId);
65
- const arrayBuffer = await content.arrayBuffer();
66
- const buffer = Buffer.from(arrayBuffer);
67
- return buffer.toString("base64");
68
- }
69
- async process(input, _options) {
70
- const model = input.model ?? input.modelOptions?.model ?? this.credential.model;
71
- const createParams = {
72
- model: model,
73
- prompt: input.prompt,
74
- };
75
- if (input.seconds)
76
- createParams.seconds = input.seconds;
77
- if (input.size)
78
- createParams.size = input.size;
79
- if (input.image) {
80
- createParams.input_reference = await this.transformFileType("file", input.image).then((file) => new File([Buffer.from(file.data, "base64")], file.filename || "image.png", {
81
- type: file.mimeType,
82
- }));
83
- }
84
- let video = await this.client.videos.create(createParams);
85
- logger_js_1.logger.debug(`Video generation started: ${video.id}`);
86
- const pollingInterval = this.options?.pollingInterval ?? 2000;
87
- while (video.status === "in_progress" || video.status === "queued") {
88
- await new Promise((resolve) => setTimeout(resolve, pollingInterval));
89
- video = await this.client.videos.retrieve(video.id);
90
- const progress = video.progress ?? 0;
91
- const statusText = video.status === "queued" ? "Queued" : "Processing";
92
- logger_js_1.logger.debug(`${statusText}: ${progress.toFixed(1)}%`);
93
- }
94
- if (video.status === "failed") {
95
- throw new Error(`Video generation failed: ${video.error?.message || "Unknown error"}`);
96
- }
97
- if (video.status !== "completed") {
98
- throw new Error(`Unexpected video status: ${video.status}`);
99
- }
100
- return {
101
- videos: [
102
- {
103
- type: "file",
104
- data: await this.downloadToFile(video.id),
105
- mimeType: "video/mp4",
106
- filename: `${video.id}.mp4`,
107
- },
108
- ],
109
- usage: {
110
- inputTokens: 0,
111
- outputTokens: 0,
112
- },
113
- model,
114
- seconds: input.seconds ? parseInt(input.seconds, 10) : DEFAULT_SECONDS,
115
- };
116
- }
117
- }
118
- exports.OpenAIVideoModel = OpenAIVideoModel;
@@ -1,4 +0,0 @@
1
- import OpenAI, { type APIError } from "openai";
2
- export declare class CustomOpenAI extends OpenAI {
3
- protected makeStatusError(status: number, error: object, message: string | undefined, headers: Headers): APIError;
4
- }
package/lib/cjs/openai.js DELETED
@@ -1,17 +0,0 @@
1
- "use strict";
2
- var __importDefault = (this && this.__importDefault) || function (mod) {
3
- return (mod && mod.__esModule) ? mod : { "default": mod };
4
- };
5
- Object.defineProperty(exports, "__esModule", { value: true });
6
- exports.CustomOpenAI = void 0;
7
- const openai_1 = __importDefault(require("openai"));
8
- // Use a custom OpenAI client to handle API errors for better error messages
9
- class CustomOpenAI extends openai_1.default {
10
- makeStatusError(status, error, message, headers) {
11
- if (!("error" in error) || typeof error.error !== "string") {
12
- message = JSON.stringify(error);
13
- }
14
- return super.makeStatusError(status, error, message, headers);
15
- }
16
- }
17
- exports.CustomOpenAI = CustomOpenAI;
@@ -1,3 +0,0 @@
1
- {
2
- "type": "commonjs"
3
- }
@@ -1,3 +0,0 @@
1
- export * from "./openai-chat-model.js";
2
- export * from "./openai-image-model.js";
3
- export * from "./openai-video-model.js";
@@ -1,160 +0,0 @@
1
- import { type AgentInvokeOptions, type AgentProcessResult, ChatModel, type ChatModelInput, type ChatModelInputMessage, type ChatModelInputTool, type ChatModelOptions, type ChatModelOutput } from "@aigne/core";
2
- import { type PromiseOrValue } from "@aigne/core/utils/type-utils.js";
3
- import type { ClientOptions, OpenAI } from "openai";
4
- import type { ChatCompletionMessageParam, ChatCompletionTool } from "openai/resources";
5
- import { z } from "zod";
6
- export interface OpenAIChatModelCapabilities {
7
- supportsNativeStructuredOutputs: boolean;
8
- supportsEndWithSystemMessage: boolean;
9
- supportsToolsUseWithJsonSchema: boolean;
10
- supportsParallelToolCalls: boolean;
11
- supportsToolsEmptyParameters: boolean;
12
- supportsToolStreaming: boolean;
13
- supportsTemperature: boolean;
14
- }
15
- /**
16
- * Configuration options for OpenAI Chat Model
17
- */
18
- export interface OpenAIChatModelOptions extends ChatModelOptions {
19
- /**
20
- * API key for OpenAI API
21
- *
22
- * If not provided, will look for OPENAI_API_KEY in environment variables
23
- */
24
- apiKey?: string;
25
- /**
26
- * Base URL for OpenAI API
27
- *
28
- * Useful for proxies or alternate endpoints
29
- */
30
- baseURL?: string;
31
- /**
32
- * Client options for OpenAI API
33
- */
34
- clientOptions?: Partial<ClientOptions>;
35
- }
36
- /**
37
- * @hidden
38
- */
39
- export declare const openAIChatModelOptionsSchema: z.ZodObject<{
40
- apiKey: z.ZodOptional<z.ZodString>;
41
- baseURL: z.ZodOptional<z.ZodString>;
42
- model: z.ZodOptional<z.ZodString>;
43
- modelOptions: z.ZodOptional<z.ZodObject<{
44
- model: z.ZodOptional<z.ZodString>;
45
- temperature: z.ZodOptional<z.ZodNumber>;
46
- topP: z.ZodOptional<z.ZodNumber>;
47
- frequencyPenalty: z.ZodOptional<z.ZodNumber>;
48
- presencePenalty: z.ZodOptional<z.ZodNumber>;
49
- parallelToolCalls: z.ZodDefault<z.ZodOptional<z.ZodBoolean>>;
50
- }, "strip", z.ZodTypeAny, {
51
- parallelToolCalls: boolean;
52
- model?: string | undefined;
53
- temperature?: number | undefined;
54
- topP?: number | undefined;
55
- frequencyPenalty?: number | undefined;
56
- presencePenalty?: number | undefined;
57
- }, {
58
- model?: string | undefined;
59
- temperature?: number | undefined;
60
- topP?: number | undefined;
61
- frequencyPenalty?: number | undefined;
62
- presencePenalty?: number | undefined;
63
- parallelToolCalls?: boolean | undefined;
64
- }>>;
65
- }, "strip", z.ZodTypeAny, {
66
- model?: string | undefined;
67
- apiKey?: string | undefined;
68
- baseURL?: string | undefined;
69
- modelOptions?: {
70
- parallelToolCalls: boolean;
71
- model?: string | undefined;
72
- temperature?: number | undefined;
73
- topP?: number | undefined;
74
- frequencyPenalty?: number | undefined;
75
- presencePenalty?: number | undefined;
76
- } | undefined;
77
- }, {
78
- model?: string | undefined;
79
- apiKey?: string | undefined;
80
- baseURL?: string | undefined;
81
- modelOptions?: {
82
- model?: string | undefined;
83
- temperature?: number | undefined;
84
- topP?: number | undefined;
85
- frequencyPenalty?: number | undefined;
86
- presencePenalty?: number | undefined;
87
- parallelToolCalls?: boolean | undefined;
88
- } | undefined;
89
- }>;
90
- /**
91
- * Implementation of the ChatModel interface for OpenAI's API
92
- *
93
- * This model provides access to OpenAI's capabilities including:
94
- * - Text generation
95
- * - Tool use with parallel tool calls
96
- * - JSON structured output
97
- * - Image understanding
98
- *
99
- * Default model: 'gpt-4o-mini'
100
- *
101
- * @example
102
- * Here's how to create and use an OpenAI chat model:
103
- * {@includeCode ../test/openai-chat-model.test.ts#example-openai-chat-model}
104
- *
105
- * @example
106
- * Here's an example with streaming response:
107
- * {@includeCode ../test/openai-chat-model.test.ts#example-openai-chat-model-stream}
108
- */
109
- export declare class OpenAIChatModel extends ChatModel {
110
- options?: OpenAIChatModelOptions | undefined;
111
- constructor(options?: OpenAIChatModelOptions | undefined);
112
- /**
113
- * @hidden
114
- */
115
- protected _client?: OpenAI;
116
- protected apiKeyEnvName: string;
117
- protected apiKeyDefault: string | undefined;
118
- protected supportsNativeStructuredOutputs: boolean;
119
- protected supportsToolsUseWithJsonSchema: boolean;
120
- protected supportsParallelToolCalls: boolean;
121
- protected supportsToolsEmptyParameters: boolean;
122
- protected supportsToolStreaming: boolean;
123
- protected supportsTemperature: boolean;
124
- get client(): OpenAI;
125
- get credential(): {
126
- url: string | undefined;
127
- apiKey: string | undefined;
128
- model: string;
129
- };
130
- /**
131
- * Process the input and generate a response
132
- * @param input The input to process
133
- * @returns The generated response
134
- */
135
- process(input: ChatModelInput, options: AgentInvokeOptions): PromiseOrValue<AgentProcessResult<ChatModelOutput>>;
136
- private getReasoningEffort;
137
- private _process;
138
- private getParallelToolCalls;
139
- protected getRunMessages(input: ChatModelInput): Promise<ChatCompletionMessageParam[]>;
140
- private getRunResponseFormat;
141
- private requestStructuredOutput;
142
- private extractResultFromStream;
143
- /**
144
- * Controls how optional fields are handled in JSON schema conversion
145
- * - "anyOf": All fields are required but can be null (default)
146
- * - "optional": Fields marked as optional in schema remain optional
147
- */
148
- protected optionalFieldMode?: "anyOf" | "optional";
149
- protected jsonSchemaToOpenAIJsonSchema(schema: Record<string, unknown>): Record<string, unknown>;
150
- }
151
- /**
152
- * @hidden
153
- */
154
- export declare function contentsFromInputMessages(messages: ChatModelInputMessage[]): Promise<ChatCompletionMessageParam[]>;
155
- /**
156
- * @hidden
157
- */
158
- export declare function toolsFromInputTools(tools?: ChatModelInputTool[], options?: {
159
- addTypeToEmptyParameters?: boolean;
160
- }): ChatCompletionTool[] | undefined;
@@ -1,55 +0,0 @@
1
- import { type AgentInvokeOptions, ImageModel, type ImageModelInput, type ImageModelOptions, type ImageModelOutput } from "@aigne/core";
2
- import { type Camelize } from "@aigne/core/utils/camelize.js";
3
- import type OpenAI from "openai";
4
- import type { ClientOptions } from "openai";
5
- export interface OpenAIImageModelInput extends ImageModelInput, Camelize<Omit<OpenAI.ImageGenerateParams | OpenAI.ImageEditParams, "prompt" | "model" | "n" | "response_format">> {
6
- }
7
- export interface OpenAIImageModelOutput extends ImageModelOutput {
8
- }
9
- export interface OpenAIImageModelOptions extends ImageModelOptions<OpenAIImageModelInput, OpenAIImageModelOutput> {
10
- /**
11
- * API key for OpenAI API
12
- *
13
- * If not provided, will look for OPENAI_API_KEY in environment variables
14
- */
15
- apiKey?: string;
16
- /**
17
- * Base URL for OpenAI API
18
- *
19
- * Useful for proxies or alternate endpoints
20
- */
21
- baseURL?: string;
22
- /**
23
- * OpenAI model to use
24
- *
25
- * Defaults to 'dall-e-2'
26
- */
27
- model?: string;
28
- /**
29
- * Additional model options to control behavior
30
- */
31
- modelOptions?: Omit<Partial<OpenAIImageModelInput>, "model">;
32
- /**
33
- * Client options for OpenAI API
34
- */
35
- clientOptions?: Partial<ClientOptions>;
36
- }
37
- export declare class OpenAIImageModel extends ImageModel<OpenAIImageModelInput, OpenAIImageModelOutput> {
38
- options?: OpenAIImageModelOptions | undefined;
39
- constructor(options?: OpenAIImageModelOptions | undefined);
40
- protected _client?: OpenAI;
41
- protected apiKeyEnvName: string;
42
- get client(): OpenAI;
43
- get credential(): {
44
- url: string | undefined;
45
- apiKey: string | undefined;
46
- model: string;
47
- };
48
- get modelOptions(): Omit<Partial<OpenAIImageModelInput>, "model"> | undefined;
49
- /**
50
- * Process the input and generate a response
51
- * @param input The input to process
52
- * @returns The generated response
53
- */
54
- process(input: OpenAIImageModelInput, _options: AgentInvokeOptions): Promise<OpenAIImageModelOutput>;
55
- }