@aigne/openai 0.16.16 → 1.74.0-beta

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/README.md +11 -11
  2. package/dist/_virtual/rolldown_runtime.cjs +29 -0
  3. package/dist/index.cjs +10 -0
  4. package/dist/index.d.cts +4 -0
  5. package/dist/index.d.mts +4 -0
  6. package/dist/index.mjs +5 -0
  7. package/dist/openai-chat-model.cjs +371 -0
  8. package/dist/openai-chat-model.d.cts +165 -0
  9. package/dist/openai-chat-model.d.cts.map +1 -0
  10. package/dist/openai-chat-model.d.mts +165 -0
  11. package/dist/openai-chat-model.d.mts.map +1 -0
  12. package/dist/openai-chat-model.mjs +368 -0
  13. package/dist/openai-chat-model.mjs.map +1 -0
  14. package/dist/openai-image-model.cjs +123 -0
  15. package/dist/openai-image-model.d.cts +57 -0
  16. package/dist/openai-image-model.d.cts.map +1 -0
  17. package/dist/openai-image-model.d.mts +57 -0
  18. package/dist/openai-image-model.d.mts.map +1 -0
  19. package/dist/openai-image-model.mjs +123 -0
  20. package/dist/openai-image-model.mjs.map +1 -0
  21. package/dist/openai-video-model.cjs +112 -0
  22. package/dist/openai-video-model.d.cts +95 -0
  23. package/dist/openai-video-model.d.cts.map +1 -0
  24. package/dist/openai-video-model.d.mts +95 -0
  25. package/dist/openai-video-model.d.mts.map +1 -0
  26. package/dist/openai-video-model.mjs +112 -0
  27. package/dist/openai-video-model.mjs.map +1 -0
  28. package/dist/openai.cjs +14 -0
  29. package/dist/openai.mjs +13 -0
  30. package/dist/openai.mjs.map +1 -0
  31. package/package.json +29 -30
  32. package/CHANGELOG.md +0 -2448
  33. package/lib/cjs/index.d.ts +0 -3
  34. package/lib/cjs/index.js +0 -19
  35. package/lib/cjs/openai-chat-model.d.ts +0 -160
  36. package/lib/cjs/openai-chat-model.js +0 -465
  37. package/lib/cjs/openai-image-model.d.ts +0 -55
  38. package/lib/cjs/openai-image-model.js +0 -110
  39. package/lib/cjs/openai-video-model.d.ts +0 -92
  40. package/lib/cjs/openai-video-model.js +0 -118
  41. package/lib/cjs/openai.d.ts +0 -4
  42. package/lib/cjs/openai.js +0 -17
  43. package/lib/cjs/package.json +0 -3
  44. package/lib/dts/index.d.ts +0 -3
  45. package/lib/dts/openai-chat-model.d.ts +0 -160
  46. package/lib/dts/openai-image-model.d.ts +0 -55
  47. package/lib/dts/openai-video-model.d.ts +0 -92
  48. package/lib/dts/openai.d.ts +0 -4
  49. package/lib/esm/index.d.ts +0 -3
  50. package/lib/esm/index.js +0 -3
  51. package/lib/esm/openai-chat-model.d.ts +0 -160
  52. package/lib/esm/openai-chat-model.js +0 -459
  53. package/lib/esm/openai-image-model.d.ts +0 -55
  54. package/lib/esm/openai-image-model.js +0 -106
  55. package/lib/esm/openai-video-model.d.ts +0 -92
  56. package/lib/esm/openai-video-model.js +0 -114
  57. package/lib/esm/openai.d.ts +0 -4
  58. package/lib/esm/openai.js +0 -10
  59. package/lib/esm/package.json +0 -3
@@ -0,0 +1,95 @@
1
+ import { AgentInvokeOptions, VideoModel, VideoModelInput, VideoModelOptions, VideoModelOutput } from "@aigne/core";
2
+ import OpenAI, { ClientOptions } from "openai";
3
+
4
+ //#region src/openai-video-model.d.ts
5
+ /**
6
+ * Input options for OpenAI Video Model
7
+ */
8
+ interface OpenAIVideoModelInput extends VideoModelInput {
9
+ /**
10
+ * Sora model to use for video generation
11
+ *
12
+ * - `sora-2`: Standard version, lower cost
13
+ * - `sora-2-pro`: Pro version, higher quality
14
+ *
15
+ * @default "sora-2"
16
+ */
17
+ model?: "sora-2" | "sora-2-pro";
18
+ /**
19
+ * Video resolution (width x height)
20
+ *
21
+ * - `720x1280`: Vertical video (9:16)
22
+ * - `1280x720`: Horizontal video (16:9)
23
+ * - `1024x1792`: Vertical video (9:16, higher resolution)
24
+ * - `1792x1024`: Horizontal video (16:9, higher resolution)
25
+ */
26
+ size?: "720x1280" | "1280x720" | "1024x1792" | "1792x1024";
27
+ /**
28
+ * Video duration in seconds
29
+ *
30
+ * @default "4"
31
+ */
32
+ seconds?: "4" | "8" | "12";
33
+ }
34
+ /**
35
+ * Output from OpenAI Video Model
36
+ */
37
+ interface OpenAIVideoModelOutput extends VideoModelOutput {}
38
+ /**
39
+ * Configuration options for OpenAI Video Model
40
+ */
41
+ interface OpenAIVideoModelOptions extends VideoModelOptions<OpenAIVideoModelInput, OpenAIVideoModelOutput> {
42
+ /**
43
+ * API key for OpenAI API
44
+ *
45
+ * If not provided, will look for OPENAI_API_KEY in environment variables
46
+ */
47
+ apiKey?: string;
48
+ /**
49
+ * Base URL for OpenAI API
50
+ *
51
+ * Useful for proxies or alternate endpoints
52
+ */
53
+ baseURL?: string;
54
+ /**
55
+ * OpenAI model to use
56
+ *
57
+ * Defaults to 'sora-2'
58
+ */
59
+ model?: string;
60
+ /**
61
+ * Additional model options to control behavior
62
+ */
63
+ modelOptions?: Omit<Partial<OpenAIVideoModelInput>, "model">;
64
+ /**
65
+ * Client options for OpenAI API
66
+ */
67
+ clientOptions?: Partial<ClientOptions>;
68
+ /**
69
+ * Polling interval in milliseconds for checking video generation status
70
+ *
71
+ * Defaults to 2000ms (2 seconds)
72
+ */
73
+ pollingInterval?: number;
74
+ }
75
+ declare class OpenAIVideoModel extends VideoModel<OpenAIVideoModelInput, OpenAIVideoModelOutput> {
76
+ options?: OpenAIVideoModelOptions | undefined;
77
+ constructor(options?: OpenAIVideoModelOptions | undefined);
78
+ /**
79
+ * @hidden
80
+ */
81
+ protected _client?: OpenAI;
82
+ protected apiKeyEnvName: string;
83
+ get client(): OpenAI;
84
+ get credential(): {
85
+ url: string | undefined;
86
+ apiKey: string | undefined;
87
+ model: string;
88
+ };
89
+ get modelOptions(): Omit<Partial<OpenAIVideoModelInput>, "model"> | undefined;
90
+ downloadToFile(videoId: string): Promise<string>;
91
+ process(input: OpenAIVideoModelInput, _options: AgentInvokeOptions): Promise<OpenAIVideoModelOutput>;
92
+ }
93
+ //#endregion
94
+ export { OpenAIVideoModel, OpenAIVideoModelInput, OpenAIVideoModelOptions, OpenAIVideoModelOutput };
95
+ //# sourceMappingURL=openai-video-model.d.mts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"openai-video-model.d.mts","names":[],"sources":["../src/openai-video-model.ts"],"mappings":";;;;;AAqBA;AAgCA;UAhCiB,qBAAA,SAA8B,eAAA;EAAA;AAgC/C;AAKA;;;;;;EArC+C,KAAA;EAAA;AAgC/C;AAKA;;;;;;EArC+C,IAAA;EAAA;AAgC/C;AAKA;;;EArC+C,OAAA;AAAA;AAAA;AAgC/C;AAKA;AArC+C,UAgC9B,sBAAA,SAA+B,gBAAA;AAAA;AAKhD;;AALgD,UAK/B,uBAAA,SACP,iBAAA,CAAkB,qBAAA,EAAuB,sBAAA;EAAA;;;;;EAAA,MAAA;EAAA;;;;;EAAA,OAAA;EAAA;;;;;EAAA,KAAA;EAAA;;;EAAA,YAAA,GAyBlC,IAAA,CAAK,OAAA,CAAQ,qBAAA;EAAA;;;EAAA,aAAA,GAKZ,OAAA,CAAQ,aAAA;EAAA;;;;AAyB1B;EAzB0B,eAAA;AAAA;AAAA,cAyBb,gBAAA,SAAyB,UAAA,CAAW,qBAAA,EAAuB,sBAAA;EAAA,OAAA,GAChC,uBAAA;EAAA,YAAA,OAAA,GAAA,uBAAA;EAAA;;;EAAA,UAAA,OAAA,GAYlB,MAAA;EAAA,UAAA,aAAA;EAAA,IAAA,OAAA,GAIV,MAAA;EAAA,IAAA,WAAA;IAAA,GAAA;IAAA,MAAA;IAAA,KAAA;EAAA;EAAA,IAAA,aAAA,GAuBM,IAAA,CAAA,OAAA,CAAA,qBAAA;EAAA,eAAA,OAAA,WAIuB,OAAA;EAAA,QAAA,KAAA,EAS9B,qBAAA,EAAA,QAAA,EACG,kBAAA,GACT,OAAA,CAAQ,sBAAA;AAAA"}
@@ -0,0 +1,112 @@
1
+ import { CustomOpenAI } from "./openai.mjs";
2
+ import { VideoModel, videoModelInputSchema } from "@aigne/core";
3
+ import { logger } from "@aigne/core/utils/logger";
4
+ import { checkArguments } from "@aigne/core/utils/type-utils";
5
+ import { z } from "zod";
6
+
7
+ //#region src/openai-video-model.ts
8
+ const DEFAULT_MODEL = "sora-2";
9
+ const DEFAULT_SECONDS = 4;
10
+ const openAIVideoModelInputSchema = videoModelInputSchema.extend({
11
+ model: z.enum(["sora-2", "sora-2-pro"]).optional(),
12
+ seconds: z.enum([
13
+ "4",
14
+ "8",
15
+ "12"
16
+ ]).optional(),
17
+ size: z.enum([
18
+ "720x1280",
19
+ "1280x720",
20
+ "1024x1792",
21
+ "1792x1024"
22
+ ]).optional()
23
+ });
24
+ const openAIVideoModelOptionsSchema = z.object({
25
+ apiKey: z.string().optional(),
26
+ baseURL: z.string().optional(),
27
+ model: z.string().optional(),
28
+ modelOptions: z.object({}).optional(),
29
+ clientOptions: z.object({}).optional(),
30
+ pollingInterval: z.number().optional()
31
+ });
32
+ var OpenAIVideoModel = class extends VideoModel {
33
+ constructor(options) {
34
+ super({
35
+ ...options,
36
+ description: options?.description ?? "Generate videos using OpenAI Sora models",
37
+ inputSchema: openAIVideoModelInputSchema
38
+ });
39
+ this.options = options;
40
+ if (options) checkArguments(this.name, openAIVideoModelOptionsSchema, options);
41
+ }
42
+ /**
43
+ * @hidden
44
+ */
45
+ _client;
46
+ apiKeyEnvName = "OPENAI_API_KEY";
47
+ get client() {
48
+ const { apiKey, url } = this.credential;
49
+ if (!apiKey) throw new Error(`${this.name} requires an API key. Please provide it via \`options.apiKey\`, or set the \`${this.apiKeyEnvName}\` environment variable`);
50
+ this._client ??= new CustomOpenAI({
51
+ baseURL: url,
52
+ apiKey,
53
+ ...this.options?.clientOptions
54
+ });
55
+ return this._client;
56
+ }
57
+ get credential() {
58
+ return {
59
+ url: this.options?.baseURL || process.env.OPENAI_BASE_URL,
60
+ apiKey: this.options?.apiKey || process.env[this.apiKeyEnvName],
61
+ model: this.options?.model || DEFAULT_MODEL
62
+ };
63
+ }
64
+ get modelOptions() {
65
+ return this.options?.modelOptions;
66
+ }
67
+ async downloadToFile(videoId) {
68
+ logger.debug("Downloading video content...");
69
+ const arrayBuffer = await (await this.client.videos.downloadContent(videoId)).arrayBuffer();
70
+ return Buffer.from(arrayBuffer).toString("base64");
71
+ }
72
+ async process(input, _options) {
73
+ const model = input.model ?? input.modelOptions?.model ?? this.credential.model;
74
+ const createParams = {
75
+ model,
76
+ prompt: input.prompt
77
+ };
78
+ if (input.seconds) createParams.seconds = input.seconds;
79
+ if (input.size) createParams.size = input.size;
80
+ if (input.image) createParams.input_reference = await this.transformFileType("file", input.image).then((file) => new File([Buffer.from(file.data, "base64")], file.filename || "image.png", { type: file.mimeType }));
81
+ let video = await this.client.videos.create(createParams);
82
+ logger.debug(`Video generation started: ${video.id}`);
83
+ const pollingInterval = this.options?.pollingInterval ?? 2e3;
84
+ while (video.status === "in_progress" || video.status === "queued") {
85
+ await new Promise((resolve) => setTimeout(resolve, pollingInterval));
86
+ video = await this.client.videos.retrieve(video.id);
87
+ const progress = video.progress ?? 0;
88
+ const statusText = video.status === "queued" ? "Queued" : "Processing";
89
+ logger.debug(`${statusText}: ${progress.toFixed(1)}%`);
90
+ }
91
+ if (video.status === "failed") throw new Error(`Video generation failed: ${video.error?.message || "Unknown error"}`);
92
+ if (video.status !== "completed") throw new Error(`Unexpected video status: ${video.status}`);
93
+ return {
94
+ videos: [{
95
+ type: "file",
96
+ data: await this.downloadToFile(video.id),
97
+ mimeType: "video/mp4",
98
+ filename: `${video.id}.mp4`
99
+ }],
100
+ usage: {
101
+ inputTokens: 0,
102
+ outputTokens: 0
103
+ },
104
+ model,
105
+ seconds: input.seconds ? parseInt(input.seconds, 10) : DEFAULT_SECONDS
106
+ };
107
+ }
108
+ };
109
+
110
+ //#endregion
111
+ export { OpenAIVideoModel };
112
+ //# sourceMappingURL=openai-video-model.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"openai-video-model.mjs","names":[],"sources":["../src/openai-video-model.ts"],"sourcesContent":["import {\n type AgentInvokeOptions,\n VideoModel,\n type VideoModelInput,\n type VideoModelOptions,\n type VideoModelOutput,\n videoModelInputSchema,\n} from \"@aigne/core\";\nimport { logger } from \"@aigne/core/utils/logger\";\nimport { checkArguments } from \"@aigne/core/utils/type-utils\";\nimport type OpenAI from \"openai\";\nimport type { ClientOptions } from \"openai\";\nimport { type ZodType, z } from \"zod\";\nimport { CustomOpenAI } from \"./openai.js\";\n\nconst DEFAULT_MODEL = \"sora-2\";\nconst DEFAULT_SECONDS = 4;\n\n/**\n * Input options for OpenAI Video Model\n */\nexport interface OpenAIVideoModelInput extends VideoModelInput {\n /**\n * Sora model to use for video generation\n *\n * - `sora-2`: Standard version, lower cost\n * - `sora-2-pro`: Pro version, higher quality\n *\n * @default \"sora-2\"\n */\n model?: \"sora-2\" | \"sora-2-pro\";\n\n /**\n * Video resolution (width x height)\n *\n * - `720x1280`: Vertical video (9:16)\n * - `1280x720`: Horizontal video (16:9)\n * - `1024x1792`: Vertical video (9:16, higher resolution)\n * - `1792x1024`: Horizontal video (16:9, higher resolution)\n */\n size?: \"720x1280\" | \"1280x720\" | \"1024x1792\" | \"1792x1024\";\n\n /**\n * Video duration in seconds\n *\n * @default \"4\"\n */\n seconds?: \"4\" | \"8\" | \"12\";\n}\n\n/**\n * Output from OpenAI Video Model\n */\nexport interface OpenAIVideoModelOutput extends VideoModelOutput {}\n\n/**\n * Configuration options for OpenAI Video Model\n */\nexport interface OpenAIVideoModelOptions\n extends VideoModelOptions<OpenAIVideoModelInput, OpenAIVideoModelOutput> {\n /**\n * API key for OpenAI API\n *\n * If not provided, will look for OPENAI_API_KEY in environment variables\n */\n apiKey?: string;\n\n /**\n * Base URL for OpenAI API\n *\n * Useful for proxies or alternate endpoints\n */\n baseURL?: string;\n\n /**\n * OpenAI model to use\n *\n * Defaults to 'sora-2'\n */\n model?: string;\n\n /**\n * Additional model options to control behavior\n */\n modelOptions?: Omit<Partial<OpenAIVideoModelInput>, \"model\">;\n\n /**\n * Client options for OpenAI API\n */\n clientOptions?: Partial<ClientOptions>;\n\n /**\n * Polling interval in milliseconds for checking video generation status\n *\n * Defaults to 2000ms (2 seconds)\n */\n pollingInterval?: number;\n}\n\nconst openAIVideoModelInputSchema: ZodType<OpenAIVideoModelInput> = videoModelInputSchema.extend({\n model: z.enum([\"sora-2\", \"sora-2-pro\"]).optional(),\n seconds: z.enum([\"4\", \"8\", \"12\"]).optional(),\n size: z.enum([\"720x1280\", \"1280x720\", \"1024x1792\", \"1792x1024\"]).optional(),\n});\n\nconst openAIVideoModelOptionsSchema = z.object({\n apiKey: z.string().optional(),\n baseURL: z.string().optional(),\n model: z.string().optional(),\n modelOptions: z.object({}).optional(),\n clientOptions: z.object({}).optional(),\n pollingInterval: z.number().optional(),\n});\n\nexport class OpenAIVideoModel extends VideoModel<OpenAIVideoModelInput, OpenAIVideoModelOutput> {\n constructor(public override options?: OpenAIVideoModelOptions) {\n super({\n ...options,\n description: options?.description ?? \"Generate videos using OpenAI Sora models\",\n inputSchema: openAIVideoModelInputSchema,\n });\n if (options) checkArguments(this.name, openAIVideoModelOptionsSchema, options);\n }\n\n /**\n * @hidden\n */\n protected _client?: OpenAI;\n\n protected apiKeyEnvName = \"OPENAI_API_KEY\";\n\n get client() {\n const { apiKey, url } = this.credential;\n if (!apiKey)\n throw new Error(\n `${this.name} requires an API key. Please provide it via \\`options.apiKey\\`, or set the \\`${this.apiKeyEnvName}\\` environment variable`,\n );\n\n this._client ??= new CustomOpenAI({\n baseURL: url,\n apiKey,\n ...this.options?.clientOptions,\n });\n return this._client;\n }\n\n override get credential() {\n return {\n url: this.options?.baseURL || process.env.OPENAI_BASE_URL,\n apiKey: this.options?.apiKey || process.env[this.apiKeyEnvName],\n model: this.options?.model || DEFAULT_MODEL,\n };\n }\n\n get modelOptions() {\n return this.options?.modelOptions;\n }\n\n async downloadToFile(videoId: string): Promise<string> {\n logger.debug(\"Downloading video content...\");\n const content = await this.client.videos.downloadContent(videoId);\n const arrayBuffer = await content.arrayBuffer();\n const buffer = Buffer.from(arrayBuffer);\n return buffer.toString(\"base64\");\n }\n\n override async process(\n input: OpenAIVideoModelInput,\n _options: AgentInvokeOptions,\n ): Promise<OpenAIVideoModelOutput> {\n const model = input.model ?? input.modelOptions?.model ?? this.credential.model;\n\n const createParams: OpenAI.Videos.VideoCreateParams = {\n model: model as OpenAI.Videos.VideoModel,\n prompt: input.prompt,\n };\n\n if (input.seconds) createParams.seconds = input.seconds;\n if (input.size) createParams.size = input.size;\n if (input.image) {\n createParams.input_reference = await this.transformFileType(\"file\", input.image).then(\n (file) =>\n new File([Buffer.from(file.data, \"base64\")], file.filename || \"image.png\", {\n type: file.mimeType,\n }),\n );\n }\n\n let video = await this.client.videos.create(createParams);\n logger.debug(`Video generation started: ${video.id}`);\n\n const pollingInterval = this.options?.pollingInterval ?? 2000;\n while (video.status === \"in_progress\" || video.status === \"queued\") {\n await new Promise((resolve) => setTimeout(resolve, pollingInterval));\n video = await this.client.videos.retrieve(video.id);\n\n const progress = video.progress ?? 0;\n const statusText = video.status === \"queued\" ? \"Queued\" : \"Processing\";\n logger.debug(`${statusText}: ${progress.toFixed(1)}%`);\n }\n\n if (video.status === \"failed\") {\n throw new Error(`Video generation failed: ${video.error?.message || \"Unknown error\"}`);\n }\n\n if (video.status !== \"completed\") {\n throw new Error(`Unexpected video status: ${video.status}`);\n }\n\n return {\n videos: [\n {\n type: \"file\",\n data: await this.downloadToFile(video.id),\n mimeType: \"video/mp4\",\n filename: `${video.id}.mp4`,\n },\n ],\n usage: {\n inputTokens: 0,\n outputTokens: 0,\n },\n model,\n seconds: input.seconds ? parseInt(input.seconds, 10) : DEFAULT_SECONDS,\n };\n }\n}\n"],"mappings":";;;;;;;AAeA,MAAM,gBAAgB;AACtB,MAAM,kBAAkB;AAmFxB,MAAM,8BAA8D,sBAAsB,OAAO;CAC/F,OAAO,EAAE,KAAK,CAAC,UAAU,aAAa,CAAC,CAAC,UAAU;CAClD,SAAS,EAAE,KAAK;EAAC;EAAK;EAAK;EAAK,CAAC,CAAC,UAAU;CAC5C,MAAM,EAAE,KAAK;EAAC;EAAY;EAAY;EAAa;EAAY,CAAC,CAAC,UAAU;CAC5E,CAAC;AAEF,MAAM,gCAAgC,EAAE,OAAO;CAC7C,QAAQ,EAAE,QAAQ,CAAC,UAAU;CAC7B,SAAS,EAAE,QAAQ,CAAC,UAAU;CAC9B,OAAO,EAAE,QAAQ,CAAC,UAAU;CAC5B,cAAc,EAAE,OAAO,EAAE,CAAC,CAAC,UAAU;CACrC,eAAe,EAAE,OAAO,EAAE,CAAC,CAAC,UAAU;CACtC,iBAAiB,EAAE,QAAQ,CAAC,UAAU;CACvC,CAAC;AAEF,IAAa,mBAAb,cAAsC,WAA0D;CAC9F,YAAY,AAAgB,SAAmC;AAC7D,QAAM;GACJ,GAAG;GACH,aAAa,SAAS,eAAe;GACrC,aAAa;GACd,CAAC;EALwB;AAM1B,MAAI,QAAS,gBAAe,KAAK,MAAM,+BAA+B,QAAQ;;;;;CAMhF,AAAU;CAEV,AAAU,gBAAgB;CAE1B,IAAI,SAAS;EACX,MAAM,EAAE,QAAQ,QAAQ,KAAK;AAC7B,MAAI,CAAC,OACH,OAAM,IAAI,MACR,GAAG,KAAK,KAAK,+EAA+E,KAAK,cAAc,yBAChH;AAEH,OAAK,YAAY,IAAI,aAAa;GAChC,SAAS;GACT;GACA,GAAG,KAAK,SAAS;GAClB,CAAC;AACF,SAAO,KAAK;;CAGd,IAAa,aAAa;AACxB,SAAO;GACL,KAAK,KAAK,SAAS,WAAW,QAAQ,IAAI;GAC1C,QAAQ,KAAK,SAAS,UAAU,QAAQ,IAAI,KAAK;GACjD,OAAO,KAAK,SAAS,SAAS;GAC/B;;CAGH,IAAI,eAAe;AACjB,SAAO,KAAK,SAAS;;CAGvB,MAAM,eAAe,SAAkC;AACrD,SAAO,MAAM,+BAA+B;EAE5C,MAAM,cAAc,OADJ,MAAM,KAAK,OAAO,OAAO,gBAAgB,QAAQ,EAC/B,aAAa;AAE/C,SADe,OAAO,KAAK,YAAY,CACzB,SAAS,SAAS;;CAGlC,MAAe,QACb,OACA,UACiC;EACjC,MAAM,QAAQ,MAAM,SAAS,MAAM,cAAc,SAAS,KAAK,WAAW;EAE1E,MAAM,eAAgD;GAC7C;GACP,QAAQ,MAAM;GACf;AAED,MAAI,MAAM,QAAS,cAAa,UAAU,MAAM;AAChD,MAAI,MAAM,KAAM,cAAa,OAAO,MAAM;AAC1C,MAAI,MAAM,MACR,cAAa,kBAAkB,MAAM,KAAK,kBAAkB,QAAQ,MAAM,MAAM,CAAC,MAC9E,SACC,IAAI,KAAK,CAAC,OAAO,KAAK,KAAK,MAAM,SAAS,CAAC,EAAE,KAAK,YAAY,aAAa,EACzE,MAAM,KAAK,UACZ,CAAC,CACL;EAGH,IAAI,QAAQ,MAAM,KAAK,OAAO,OAAO,OAAO,aAAa;AACzD,SAAO,MAAM,6BAA6B,MAAM,KAAK;EAErD,MAAM,kBAAkB,KAAK,SAAS,mBAAmB;AACzD,SAAO,MAAM,WAAW,iBAAiB,MAAM,WAAW,UAAU;AAClE,SAAM,IAAI,SAAS,YAAY,WAAW,SAAS,gBAAgB,CAAC;AACpE,WAAQ,MAAM,KAAK,OAAO,OAAO,SAAS,MAAM,GAAG;GAEnD,MAAM,WAAW,MAAM,YAAY;GACnC,MAAM,aAAa,MAAM,WAAW,WAAW,WAAW;AAC1D,UAAO,MAAM,GAAG,WAAW,IAAI,SAAS,QAAQ,EAAE,CAAC,GAAG;;AAGxD,MAAI,MAAM,WAAW,SACnB,OAAM,IAAI,MAAM,4BAA4B,MAAM,OAAO,WAAW,kBAAkB;AAGxF,MAAI,MAAM,WAAW,YACnB,OAAM,IAAI,MAAM,4BAA4B,MAAM,SAAS;AAG7D,SAAO;GACL,QAAQ,CACN;IACE,MAAM;IACN,MAAM,MAAM,KAAK,eAAe,MAAM,GAAG;IACzC,UAAU;IACV,UAAU,GAAG,MAAM,GAAG;IACvB,CACF;GACD,OAAO;IACL,aAAa;IACb,cAAc;IACf;GACD;GACA,SAAS,MAAM,UAAU,SAAS,MAAM,SAAS,GAAG,GAAG;GACxD"}
@@ -0,0 +1,14 @@
1
+ const require_rolldown_runtime = require('./_virtual/rolldown_runtime.cjs');
2
+ let openai = require("openai");
3
+ openai = require_rolldown_runtime.__toESM(openai);
4
+
5
+ //#region src/openai.ts
6
+ var CustomOpenAI = class extends openai.default {
7
+ makeStatusError(status, error, message, headers) {
8
+ if (!("error" in error) || typeof error.error !== "string") message = JSON.stringify(error);
9
+ return super.makeStatusError(status, error, message, headers);
10
+ }
11
+ };
12
+
13
+ //#endregion
14
+ exports.CustomOpenAI = CustomOpenAI;
@@ -0,0 +1,13 @@
1
+ import OpenAI from "openai";
2
+
3
+ //#region src/openai.ts
4
+ var CustomOpenAI = class extends OpenAI {
5
+ makeStatusError(status, error, message, headers) {
6
+ if (!("error" in error) || typeof error.error !== "string") message = JSON.stringify(error);
7
+ return super.makeStatusError(status, error, message, headers);
8
+ }
9
+ };
10
+
11
+ //#endregion
12
+ export { CustomOpenAI };
13
+ //# sourceMappingURL=openai.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"openai.mjs","names":[],"sources":["../src/openai.ts"],"sourcesContent":["import OpenAI, { type APIError } from \"openai\";\n\n// Use a custom OpenAI client to handle API errors for better error messages\nexport class CustomOpenAI extends OpenAI {\n protected override makeStatusError(\n status: number,\n error: object,\n message: string | undefined,\n headers: Headers,\n ): APIError {\n if (!(\"error\" in error) || typeof error.error !== \"string\") {\n message = JSON.stringify(error);\n }\n\n return super.makeStatusError(status, error, message, headers);\n }\n}\n"],"mappings":";;;AAGA,IAAa,eAAb,cAAkC,OAAO;CACvC,AAAmB,gBACjB,QACA,OACA,SACA,SACU;AACV,MAAI,EAAE,WAAW,UAAU,OAAO,MAAM,UAAU,SAChD,WAAU,KAAK,UAAU,MAAM;AAGjC,SAAO,MAAM,gBAAgB,QAAQ,OAAO,SAAS,QAAQ"}
package/package.json CHANGED
@@ -1,60 +1,59 @@
1
1
  {
2
2
  "name": "@aigne/openai",
3
- "version": "0.16.16",
3
+ "version": "1.74.0-beta",
4
4
  "description": "AIGNE OpenAI SDK for integrating with OpenAI's GPT models and API services",
5
+ "license": "Elastic-2.0",
5
6
  "publishConfig": {
6
7
  "access": "public"
7
8
  },
8
9
  "author": "Arcblock <blocklet@arcblock.io> https://github.com/blocklet",
9
10
  "homepage": "https://www.aigne.io/framework",
10
- "license": "Elastic-2.0",
11
11
  "repository": {
12
12
  "type": "git",
13
- "url": "git+https://github.com/AIGNE-io/aigne-framework"
13
+ "url": "git+https://github.com/ArcBlock/aigne-framework"
14
14
  },
15
15
  "bugs": {
16
- "url": "https://github.com/AIGNE-io/aigne-framework/issues"
16
+ "url": "https://github.com/ArcBlock/aigne-framework/issues"
17
+ },
18
+ "type": "module",
19
+ "main": "./dist/index.cjs",
20
+ "module": "./dist/index.mjs",
21
+ "types": "./dist/index.d.cts",
22
+ "exports": {
23
+ ".": {
24
+ "require": "./dist/index.cjs",
25
+ "import": "./dist/index.mjs"
26
+ },
27
+ "./*": "./*"
17
28
  },
18
29
  "files": [
19
- "lib/cjs",
20
- "lib/dts",
21
- "lib/esm",
30
+ "dist",
22
31
  "LICENSE",
23
32
  "README.md",
24
33
  "CHANGELOG.md"
25
34
  ],
26
- "type": "module",
27
- "main": "./lib/cjs/index.js",
28
- "module": "./lib/esm/index.js",
29
- "types": "./lib/dts/index.d.ts",
30
- "exports": {
31
- ".": {
32
- "import": "./lib/esm/index.js",
33
- "require": "./lib/cjs/index.js",
34
- "types": "./lib/dts/index.d.ts"
35
- }
36
- },
37
35
  "dependencies": {
38
36
  "@aigne/uuid": "^13.0.1",
39
37
  "openai": "^6.14.0",
40
38
  "zod": "^3.25.67",
41
- "@aigne/core": "^1.72.0",
42
- "@aigne/platform-helpers": "^0.6.7"
39
+ "@aigne/core": "^1.74.0-beta",
40
+ "@aigne/utils": "^1.74.0-beta"
43
41
  },
44
42
  "devDependencies": {
45
- "@types/bun": "^1.2.22",
46
- "@types/node": "^24.5.1",
43
+ "@types/bun": "^1.3.6",
47
44
  "npm-run-all": "^4.1.5",
48
- "rimraf": "^6.0.1",
49
- "typescript": "^5.9.2",
50
- "@aigne/test-utils": "^0.5.69"
45
+ "rimraf": "^6.1.2",
46
+ "tsdown": "0.20.0-beta.3",
47
+ "typescript": "5.9.2",
48
+ "@aigne/typescript-config": "0.0.0",
49
+ "@aigne/utils": "1.74.0-beta",
50
+ "@aigne/scripts": "0.0.0"
51
51
  },
52
52
  "scripts": {
53
- "lint": "tsc --noEmit",
54
- "build": "tsc --build scripts/tsconfig.build.json",
55
- "clean": "rimraf lib test/coverage",
53
+ "build": "tsdown",
54
+ "check-types": "tsc --noEmit",
55
+ "clean": "rimraf dist coverage",
56
56
  "test": "bun test",
57
- "test:coverage": "bun test --coverage --coverage-reporter=lcov --coverage-reporter=text",
58
- "postbuild": "node ../../scripts/post-build-lib.mjs"
57
+ "test:coverage": "bun test --coverage --coverage-reporter=lcov --coverage-reporter=text"
59
58
  }
60
59
  }