modelfusion 0.61.0 → 0.62.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -1
- package/model-provider/openai/OpenAICompletionModel.d.ts +10 -10
- package/model-provider/openai/OpenAICostCalculator.cjs +10 -0
- package/model-provider/openai/OpenAICostCalculator.js +10 -0
- package/model-provider/openai/OpenAIImageGenerationModel.d.ts +1 -1
- package/model-provider/openai/OpenAISpeechModel.cjs +93 -0
- package/model-provider/openai/OpenAISpeechModel.d.ts +52 -0
- package/model-provider/openai/OpenAISpeechModel.js +88 -0
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +16 -16
- package/model-provider/openai/index.cjs +2 -1
- package/model-provider/openai/index.d.ts +2 -1
- package/model-provider/openai/index.js +2 -1
- package/package.json +1 -1
package/README.md
CHANGED
@@ -14,6 +14,7 @@
|
|
14
14
|
|
15
15
|
**ModelFusion** is a TypeScript library for building AI applications, chatbots, and agents.
|
16
16
|
|
17
|
+
- **Vendor-neutral**: ModelFusion is a non-commercial open source project that is community-driven. You can use it with any supported vendor.
|
17
18
|
- **Multimodal**: ModelFusion supports a wide range of models including text generation, image generation, text-to-speech, speech-to-text, and embedding models.
|
18
19
|
- **Streaming**: ModelFusion supports streaming for many generation models, e.g. text streaming, structure streaming, and full duplex speech streaming.
|
19
20
|
- **Utility functions**: ModelFusion provides functionality for tools and tool usage, vector indices, and guards functions.
|
@@ -109,7 +110,7 @@ const speech = await generateSpeech(
|
|
109
110
|
);
|
110
111
|
```
|
111
112
|
|
112
|
-
Providers: [Eleven Labs](https://modelfusion.dev/integration/model-provider/elevenlabs), [LMNT](https://modelfusion.dev/integration/model-provider/lmnt)
|
113
|
+
Providers: [Eleven Labs](https://modelfusion.dev/integration/model-provider/elevenlabs), [LMNT](https://modelfusion.dev/integration/model-provider/lmnt), [OpenAI](https://modelfusion.dev/integration/model-provider/openai)
|
113
114
|
|
114
115
|
#### streamSpeech
|
115
116
|
|
@@ -150,18 +150,18 @@ export declare class OpenAICompletionModel extends AbstractModel<OpenAICompletio
|
|
150
150
|
object: "text_completion";
|
151
151
|
usage: {
|
152
152
|
prompt_tokens: number;
|
153
|
-
completion_tokens: number;
|
154
153
|
total_tokens: number;
|
154
|
+
completion_tokens: number;
|
155
155
|
};
|
156
156
|
model: string;
|
157
157
|
id: string;
|
158
|
-
created: number;
|
159
158
|
choices: {
|
160
159
|
text: string;
|
161
160
|
finish_reason: string;
|
162
161
|
index: number;
|
163
162
|
logprobs?: any;
|
164
163
|
}[];
|
164
|
+
created: number;
|
165
165
|
};
|
166
166
|
text: string;
|
167
167
|
usage: {
|
@@ -212,45 +212,45 @@ declare const OpenAICompletionResponseSchema: z.ZodObject<{
|
|
212
212
|
total_tokens: z.ZodNumber;
|
213
213
|
}, "strip", z.ZodTypeAny, {
|
214
214
|
prompt_tokens: number;
|
215
|
-
completion_tokens: number;
|
216
215
|
total_tokens: number;
|
216
|
+
completion_tokens: number;
|
217
217
|
}, {
|
218
218
|
prompt_tokens: number;
|
219
|
-
completion_tokens: number;
|
220
219
|
total_tokens: number;
|
220
|
+
completion_tokens: number;
|
221
221
|
}>;
|
222
222
|
}, "strip", z.ZodTypeAny, {
|
223
223
|
object: "text_completion";
|
224
224
|
usage: {
|
225
225
|
prompt_tokens: number;
|
226
|
-
completion_tokens: number;
|
227
226
|
total_tokens: number;
|
227
|
+
completion_tokens: number;
|
228
228
|
};
|
229
229
|
model: string;
|
230
230
|
id: string;
|
231
|
-
created: number;
|
232
231
|
choices: {
|
233
232
|
text: string;
|
234
233
|
finish_reason: string;
|
235
234
|
index: number;
|
236
235
|
logprobs?: any;
|
237
236
|
}[];
|
237
|
+
created: number;
|
238
238
|
}, {
|
239
239
|
object: "text_completion";
|
240
240
|
usage: {
|
241
241
|
prompt_tokens: number;
|
242
|
-
completion_tokens: number;
|
243
242
|
total_tokens: number;
|
243
|
+
completion_tokens: number;
|
244
244
|
};
|
245
245
|
model: string;
|
246
246
|
id: string;
|
247
|
-
created: number;
|
248
247
|
choices: {
|
249
248
|
text: string;
|
250
249
|
finish_reason: string;
|
251
250
|
index: number;
|
252
251
|
logprobs?: any;
|
253
252
|
}[];
|
253
|
+
created: number;
|
254
254
|
}>;
|
255
255
|
export type OpenAICompletionResponse = z.infer<typeof OpenAICompletionResponseSchema>;
|
256
256
|
export type OpenAITextResponseFormatType<T> = {
|
@@ -267,18 +267,18 @@ export declare const OpenAITextResponseFormat: {
|
|
267
267
|
object: "text_completion";
|
268
268
|
usage: {
|
269
269
|
prompt_tokens: number;
|
270
|
-
completion_tokens: number;
|
271
270
|
total_tokens: number;
|
271
|
+
completion_tokens: number;
|
272
272
|
};
|
273
273
|
model: string;
|
274
274
|
id: string;
|
275
|
-
created: number;
|
276
275
|
choices: {
|
277
276
|
text: string;
|
278
277
|
finish_reason: string;
|
279
278
|
index: number;
|
280
279
|
logprobs?: any;
|
281
280
|
}[];
|
281
|
+
created: number;
|
282
282
|
}>;
|
283
283
|
};
|
284
284
|
/**
|
@@ -6,6 +6,7 @@ const OpenAITextEmbeddingModel_js_1 = require("./OpenAITextEmbeddingModel.cjs");
|
|
6
6
|
const OpenAICompletionModel_js_1 = require("./OpenAICompletionModel.cjs");
|
7
7
|
const OpenAITranscriptionModel_js_1 = require("./OpenAITranscriptionModel.cjs");
|
8
8
|
const OpenAIChatModel_js_1 = require("./chat/OpenAIChatModel.cjs");
|
9
|
+
const OpenAISpeechModel_js_1 = require("./OpenAISpeechModel.cjs");
|
9
10
|
class OpenAICostCalculator {
|
10
11
|
constructor() {
|
11
12
|
Object.defineProperty(this, "provider", {
|
@@ -68,6 +69,15 @@ class OpenAICostCalculator {
|
|
68
69
|
.response,
|
69
70
|
});
|
70
71
|
}
|
72
|
+
case "generate-speech": {
|
73
|
+
if (model == null) {
|
74
|
+
return null;
|
75
|
+
}
|
76
|
+
return (0, OpenAISpeechModel_js_1.calculateOpenAISpeechCostInMillicents)({
|
77
|
+
model: model,
|
78
|
+
input: call.input,
|
79
|
+
});
|
80
|
+
}
|
71
81
|
}
|
72
82
|
return null;
|
73
83
|
}
|
@@ -3,6 +3,7 @@ import { calculateOpenAIEmbeddingCostInMillicents, isOpenAIEmbeddingModel, } fro
|
|
3
3
|
import { calculateOpenAICompletionCostInMillicents, isOpenAICompletionModel, } from "./OpenAICompletionModel.js";
|
4
4
|
import { calculateOpenAITranscriptionCostInMillicents, } from "./OpenAITranscriptionModel.js";
|
5
5
|
import { calculateOpenAIChatCostInMillicents, isOpenAIChatModel, } from "./chat/OpenAIChatModel.js";
|
6
|
+
import { calculateOpenAISpeechCostInMillicents, } from "./OpenAISpeechModel.js";
|
6
7
|
export class OpenAICostCalculator {
|
7
8
|
constructor() {
|
8
9
|
Object.defineProperty(this, "provider", {
|
@@ -65,6 +66,15 @@ export class OpenAICostCalculator {
|
|
65
66
|
.response,
|
66
67
|
});
|
67
68
|
}
|
69
|
+
case "generate-speech": {
|
70
|
+
if (model == null) {
|
71
|
+
return null;
|
72
|
+
}
|
73
|
+
return calculateOpenAISpeechCostInMillicents({
|
74
|
+
model: model,
|
75
|
+
input: call.input,
|
76
|
+
});
|
77
|
+
}
|
68
78
|
}
|
69
79
|
return null;
|
70
80
|
}
|
@@ -11,7 +11,7 @@ export declare const OPENAI_IMAGE_MODELS: {
|
|
11
11
|
getCost(settings: OpenAIImageGenerationSettings): 2000 | 1800 | 1600 | null;
|
12
12
|
};
|
13
13
|
"dall-e-3": {
|
14
|
-
getCost(settings: OpenAIImageGenerationSettings):
|
14
|
+
getCost(settings: OpenAIImageGenerationSettings): 8000 | 4000 | 12000 | null;
|
15
15
|
};
|
16
16
|
};
|
17
17
|
/**
|
@@ -0,0 +1,93 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.OpenAISpeechModel = exports.calculateOpenAISpeechCostInMillicents = exports.OPENAI_SPEECH_MODELS = void 0;
|
4
|
+
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
5
|
+
const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
|
6
|
+
const postToApi_js_1 = require("../../core/api/postToApi.cjs");
|
7
|
+
const OpenAIApiConfiguration_js_1 = require("./OpenAIApiConfiguration.cjs");
|
8
|
+
const OpenAIError_js_1 = require("./OpenAIError.cjs");
|
9
|
+
/**
|
10
|
+
* @see https://openai.com/pricing
|
11
|
+
*/
|
12
|
+
exports.OPENAI_SPEECH_MODELS = {
|
13
|
+
"tts-1": {
|
14
|
+
costInMillicentsPerCharacter: 1.5, // = 1500 / 1000,
|
15
|
+
},
|
16
|
+
"tts-1-hd": {
|
17
|
+
costInMillicentsPerCharacter: 3, // = 3000 / 1000
|
18
|
+
},
|
19
|
+
};
|
20
|
+
const calculateOpenAISpeechCostInMillicents = ({ model, input, }) => {
|
21
|
+
if (!exports.OPENAI_SPEECH_MODELS[model]) {
|
22
|
+
return null;
|
23
|
+
}
|
24
|
+
return (input.length * exports.OPENAI_SPEECH_MODELS[model].costInMillicentsPerCharacter);
|
25
|
+
};
|
26
|
+
exports.calculateOpenAISpeechCostInMillicents = calculateOpenAISpeechCostInMillicents;
|
27
|
+
/**
|
28
|
+
* Synthesize speech using the OpenAI API.
|
29
|
+
*
|
30
|
+
* @see https://platform.openai.com/docs/api-reference/audio/createSpeech
|
31
|
+
*/
|
32
|
+
class OpenAISpeechModel extends AbstractModel_js_1.AbstractModel {
|
33
|
+
constructor(settings) {
|
34
|
+
super({ settings });
|
35
|
+
Object.defineProperty(this, "provider", {
|
36
|
+
enumerable: true,
|
37
|
+
configurable: true,
|
38
|
+
writable: true,
|
39
|
+
value: "openai"
|
40
|
+
});
|
41
|
+
}
|
42
|
+
get voice() {
|
43
|
+
return this.settings.voice;
|
44
|
+
}
|
45
|
+
get modelName() {
|
46
|
+
return this.settings.model;
|
47
|
+
}
|
48
|
+
async callAPI(text, options) {
|
49
|
+
return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
|
50
|
+
retry: this.settings.api?.retry,
|
51
|
+
throttle: this.settings.api?.throttle,
|
52
|
+
call: async () => callOpenAITextToSpeechAPI({
|
53
|
+
...this.settings,
|
54
|
+
abortSignal: options?.run?.abortSignal,
|
55
|
+
text,
|
56
|
+
}),
|
57
|
+
});
|
58
|
+
}
|
59
|
+
get settingsForEvent() {
|
60
|
+
return {
|
61
|
+
voice: this.settings.voice,
|
62
|
+
speed: this.settings.speed,
|
63
|
+
model: this.settings.model,
|
64
|
+
responseFormat: this.settings.responseFormat,
|
65
|
+
};
|
66
|
+
}
|
67
|
+
doGenerateSpeechStandard(text, options) {
|
68
|
+
return this.callAPI(text, options);
|
69
|
+
}
|
70
|
+
withSettings(additionalSettings) {
|
71
|
+
return new OpenAISpeechModel({
|
72
|
+
...this.settings,
|
73
|
+
...additionalSettings,
|
74
|
+
});
|
75
|
+
}
|
76
|
+
}
|
77
|
+
exports.OpenAISpeechModel = OpenAISpeechModel;
|
78
|
+
async function callOpenAITextToSpeechAPI({ api = new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration(), abortSignal, text, voice, model, speed, responseFormat, }) {
|
79
|
+
return (0, postToApi_js_1.postJsonToApi)({
|
80
|
+
url: api.assembleUrl(`/audio/speech`),
|
81
|
+
headers: api.headers,
|
82
|
+
body: {
|
83
|
+
input: text,
|
84
|
+
voice,
|
85
|
+
speed,
|
86
|
+
model,
|
87
|
+
response_format: responseFormat,
|
88
|
+
},
|
89
|
+
failedResponseHandler: OpenAIError_js_1.failedOpenAICallResponseHandler,
|
90
|
+
successfulResponseHandler: (0, postToApi_js_1.createAudioMpegResponseHandler)(),
|
91
|
+
abortSignal,
|
92
|
+
});
|
93
|
+
}
|
@@ -0,0 +1,52 @@
|
|
1
|
+
/// <reference types="node" />
|
2
|
+
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
3
|
+
import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
4
|
+
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
5
|
+
import { SpeechGenerationModel, SpeechGenerationModelSettings } from "../../model-function/generate-speech/SpeechGenerationModel.js";
|
6
|
+
/**
|
7
|
+
* @see https://openai.com/pricing
|
8
|
+
*/
|
9
|
+
export declare const OPENAI_SPEECH_MODELS: {
|
10
|
+
"tts-1": {
|
11
|
+
costInMillicentsPerCharacter: number;
|
12
|
+
};
|
13
|
+
"tts-1-hd": {
|
14
|
+
costInMillicentsPerCharacter: number;
|
15
|
+
};
|
16
|
+
};
|
17
|
+
export type OpenAISpeechModelType = keyof typeof OPENAI_SPEECH_MODELS;
|
18
|
+
export declare const calculateOpenAISpeechCostInMillicents: ({ model, input, }: {
|
19
|
+
model: OpenAISpeechModelType;
|
20
|
+
input: string;
|
21
|
+
}) => number | null;
|
22
|
+
export type OpenAISpeechVoice = "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer";
|
23
|
+
type OpenAISpeechModelResponseFormat = "mp3" | "opus" | "aac" | "flac";
|
24
|
+
export interface OpenAISpeechModelSettings extends SpeechGenerationModelSettings {
|
25
|
+
api?: ApiConfiguration;
|
26
|
+
voice: OpenAISpeechVoice;
|
27
|
+
model: OpenAISpeechModelType;
|
28
|
+
/**
|
29
|
+
* The speed of the generated audio. Select a value from 0.25 to 4.0. 1.0 is the default.
|
30
|
+
*/
|
31
|
+
speed?: number;
|
32
|
+
/**
|
33
|
+
* Defaults to mp3.
|
34
|
+
*/
|
35
|
+
responseFormat?: OpenAISpeechModelResponseFormat;
|
36
|
+
}
|
37
|
+
/**
|
38
|
+
* Synthesize speech using the OpenAI API.
|
39
|
+
*
|
40
|
+
* @see https://platform.openai.com/docs/api-reference/audio/createSpeech
|
41
|
+
*/
|
42
|
+
export declare class OpenAISpeechModel extends AbstractModel<OpenAISpeechModelSettings> implements SpeechGenerationModel<OpenAISpeechModelSettings> {
|
43
|
+
constructor(settings: OpenAISpeechModelSettings);
|
44
|
+
readonly provider: "openai";
|
45
|
+
get voice(): OpenAISpeechVoice;
|
46
|
+
get modelName(): "tts-1" | "tts-1-hd";
|
47
|
+
private callAPI;
|
48
|
+
get settingsForEvent(): Partial<OpenAISpeechModelSettings>;
|
49
|
+
doGenerateSpeechStandard(text: string, options?: FunctionOptions): Promise<Buffer>;
|
50
|
+
withSettings(additionalSettings: Partial<OpenAISpeechModelSettings>): this;
|
51
|
+
}
|
52
|
+
export {};
|
@@ -0,0 +1,88 @@
|
|
1
|
+
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
2
|
+
import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
|
3
|
+
import { createAudioMpegResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
|
4
|
+
import { OpenAIApiConfiguration } from "./OpenAIApiConfiguration.js";
|
5
|
+
import { failedOpenAICallResponseHandler } from "./OpenAIError.js";
|
6
|
+
/**
|
7
|
+
* @see https://openai.com/pricing
|
8
|
+
*/
|
9
|
+
export const OPENAI_SPEECH_MODELS = {
|
10
|
+
"tts-1": {
|
11
|
+
costInMillicentsPerCharacter: 1.5, // = 1500 / 1000,
|
12
|
+
},
|
13
|
+
"tts-1-hd": {
|
14
|
+
costInMillicentsPerCharacter: 3, // = 3000 / 1000
|
15
|
+
},
|
16
|
+
};
|
17
|
+
export const calculateOpenAISpeechCostInMillicents = ({ model, input, }) => {
|
18
|
+
if (!OPENAI_SPEECH_MODELS[model]) {
|
19
|
+
return null;
|
20
|
+
}
|
21
|
+
return (input.length * OPENAI_SPEECH_MODELS[model].costInMillicentsPerCharacter);
|
22
|
+
};
|
23
|
+
/**
|
24
|
+
* Synthesize speech using the OpenAI API.
|
25
|
+
*
|
26
|
+
* @see https://platform.openai.com/docs/api-reference/audio/createSpeech
|
27
|
+
*/
|
28
|
+
export class OpenAISpeechModel extends AbstractModel {
|
29
|
+
constructor(settings) {
|
30
|
+
super({ settings });
|
31
|
+
Object.defineProperty(this, "provider", {
|
32
|
+
enumerable: true,
|
33
|
+
configurable: true,
|
34
|
+
writable: true,
|
35
|
+
value: "openai"
|
36
|
+
});
|
37
|
+
}
|
38
|
+
get voice() {
|
39
|
+
return this.settings.voice;
|
40
|
+
}
|
41
|
+
get modelName() {
|
42
|
+
return this.settings.model;
|
43
|
+
}
|
44
|
+
async callAPI(text, options) {
|
45
|
+
return callWithRetryAndThrottle({
|
46
|
+
retry: this.settings.api?.retry,
|
47
|
+
throttle: this.settings.api?.throttle,
|
48
|
+
call: async () => callOpenAITextToSpeechAPI({
|
49
|
+
...this.settings,
|
50
|
+
abortSignal: options?.run?.abortSignal,
|
51
|
+
text,
|
52
|
+
}),
|
53
|
+
});
|
54
|
+
}
|
55
|
+
get settingsForEvent() {
|
56
|
+
return {
|
57
|
+
voice: this.settings.voice,
|
58
|
+
speed: this.settings.speed,
|
59
|
+
model: this.settings.model,
|
60
|
+
responseFormat: this.settings.responseFormat,
|
61
|
+
};
|
62
|
+
}
|
63
|
+
doGenerateSpeechStandard(text, options) {
|
64
|
+
return this.callAPI(text, options);
|
65
|
+
}
|
66
|
+
withSettings(additionalSettings) {
|
67
|
+
return new OpenAISpeechModel({
|
68
|
+
...this.settings,
|
69
|
+
...additionalSettings,
|
70
|
+
});
|
71
|
+
}
|
72
|
+
}
|
73
|
+
async function callOpenAITextToSpeechAPI({ api = new OpenAIApiConfiguration(), abortSignal, text, voice, model, speed, responseFormat, }) {
|
74
|
+
return postJsonToApi({
|
75
|
+
url: api.assembleUrl(`/audio/speech`),
|
76
|
+
headers: api.headers,
|
77
|
+
body: {
|
78
|
+
input: text,
|
79
|
+
voice,
|
80
|
+
speed,
|
81
|
+
model,
|
82
|
+
response_format: responseFormat,
|
83
|
+
},
|
84
|
+
failedResponseHandler: failedOpenAICallResponseHandler,
|
85
|
+
successfulResponseHandler: createAudioMpegResponseHandler(),
|
86
|
+
abortSignal,
|
87
|
+
});
|
88
|
+
}
|
@@ -175,12 +175,11 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
|
|
175
175
|
object: "chat.completion";
|
176
176
|
usage: {
|
177
177
|
prompt_tokens: number;
|
178
|
-
completion_tokens: number;
|
179
178
|
total_tokens: number;
|
179
|
+
completion_tokens: number;
|
180
180
|
};
|
181
181
|
model: string;
|
182
182
|
id: string;
|
183
|
-
created: number;
|
184
183
|
choices: {
|
185
184
|
message: {
|
186
185
|
content: string | null;
|
@@ -194,6 +193,7 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
|
|
194
193
|
index: number;
|
195
194
|
logprobs?: any;
|
196
195
|
}[];
|
196
|
+
created: number;
|
197
197
|
};
|
198
198
|
text: string;
|
199
199
|
usage: {
|
@@ -215,12 +215,11 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
|
|
215
215
|
object: "chat.completion";
|
216
216
|
usage: {
|
217
217
|
prompt_tokens: number;
|
218
|
-
completion_tokens: number;
|
219
218
|
total_tokens: number;
|
219
|
+
completion_tokens: number;
|
220
220
|
};
|
221
221
|
model: string;
|
222
222
|
id: string;
|
223
|
-
created: number;
|
224
223
|
choices: {
|
225
224
|
message: {
|
226
225
|
content: string | null;
|
@@ -234,6 +233,7 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
|
|
234
233
|
index: number;
|
235
234
|
logprobs?: any;
|
236
235
|
}[];
|
236
|
+
created: number;
|
237
237
|
};
|
238
238
|
valueText: string;
|
239
239
|
value: any;
|
@@ -249,12 +249,11 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
|
|
249
249
|
object: "chat.completion";
|
250
250
|
usage: {
|
251
251
|
prompt_tokens: number;
|
252
|
-
completion_tokens: number;
|
253
252
|
total_tokens: number;
|
253
|
+
completion_tokens: number;
|
254
254
|
};
|
255
255
|
model: string;
|
256
256
|
id: string;
|
257
|
-
created: number;
|
258
257
|
choices: {
|
259
258
|
message: {
|
260
259
|
content: string | null;
|
@@ -268,6 +267,7 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
|
|
268
267
|
index: number;
|
269
268
|
logprobs?: any;
|
270
269
|
}[];
|
270
|
+
created: number;
|
271
271
|
};
|
272
272
|
structureAndText: {
|
273
273
|
structure: null;
|
@@ -285,12 +285,11 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
|
|
285
285
|
object: "chat.completion";
|
286
286
|
usage: {
|
287
287
|
prompt_tokens: number;
|
288
|
-
completion_tokens: number;
|
289
288
|
total_tokens: number;
|
289
|
+
completion_tokens: number;
|
290
290
|
};
|
291
291
|
model: string;
|
292
292
|
id: string;
|
293
|
-
created: number;
|
294
293
|
choices: {
|
295
294
|
message: {
|
296
295
|
content: string | null;
|
@@ -304,6 +303,7 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
|
|
304
303
|
index: number;
|
305
304
|
logprobs?: any;
|
306
305
|
}[];
|
306
|
+
created: number;
|
307
307
|
};
|
308
308
|
structureAndText: {
|
309
309
|
structure: string;
|
@@ -401,23 +401,22 @@ declare const openAIChatResponseSchema: z.ZodObject<{
|
|
401
401
|
total_tokens: z.ZodNumber;
|
402
402
|
}, "strip", z.ZodTypeAny, {
|
403
403
|
prompt_tokens: number;
|
404
|
-
completion_tokens: number;
|
405
404
|
total_tokens: number;
|
405
|
+
completion_tokens: number;
|
406
406
|
}, {
|
407
407
|
prompt_tokens: number;
|
408
|
-
completion_tokens: number;
|
409
408
|
total_tokens: number;
|
409
|
+
completion_tokens: number;
|
410
410
|
}>;
|
411
411
|
}, "strip", z.ZodTypeAny, {
|
412
412
|
object: "chat.completion";
|
413
413
|
usage: {
|
414
414
|
prompt_tokens: number;
|
415
|
-
completion_tokens: number;
|
416
415
|
total_tokens: number;
|
416
|
+
completion_tokens: number;
|
417
417
|
};
|
418
418
|
model: string;
|
419
419
|
id: string;
|
420
|
-
created: number;
|
421
420
|
choices: {
|
422
421
|
message: {
|
423
422
|
content: string | null;
|
@@ -431,16 +430,16 @@ declare const openAIChatResponseSchema: z.ZodObject<{
|
|
431
430
|
index: number;
|
432
431
|
logprobs?: any;
|
433
432
|
}[];
|
433
|
+
created: number;
|
434
434
|
}, {
|
435
435
|
object: "chat.completion";
|
436
436
|
usage: {
|
437
437
|
prompt_tokens: number;
|
438
|
-
completion_tokens: number;
|
439
438
|
total_tokens: number;
|
439
|
+
completion_tokens: number;
|
440
440
|
};
|
441
441
|
model: string;
|
442
442
|
id: string;
|
443
|
-
created: number;
|
444
443
|
choices: {
|
445
444
|
message: {
|
446
445
|
content: string | null;
|
@@ -454,6 +453,7 @@ declare const openAIChatResponseSchema: z.ZodObject<{
|
|
454
453
|
index: number;
|
455
454
|
logprobs?: any;
|
456
455
|
}[];
|
456
|
+
created: number;
|
457
457
|
}>;
|
458
458
|
export type OpenAIChatResponse = z.infer<typeof openAIChatResponseSchema>;
|
459
459
|
export type OpenAIChatResponseFormatType<T> = {
|
@@ -470,12 +470,11 @@ export declare const OpenAIChatResponseFormat: {
|
|
470
470
|
object: "chat.completion";
|
471
471
|
usage: {
|
472
472
|
prompt_tokens: number;
|
473
|
-
completion_tokens: number;
|
474
473
|
total_tokens: number;
|
474
|
+
completion_tokens: number;
|
475
475
|
};
|
476
476
|
model: string;
|
477
477
|
id: string;
|
478
|
-
created: number;
|
479
478
|
choices: {
|
480
479
|
message: {
|
481
480
|
content: string | null;
|
@@ -489,6 +488,7 @@ export declare const OpenAIChatResponseFormat: {
|
|
489
488
|
index: number;
|
490
489
|
logprobs?: any;
|
491
490
|
}[];
|
491
|
+
created: number;
|
492
492
|
}>;
|
493
493
|
};
|
494
494
|
/**
|
@@ -17,12 +17,13 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
17
|
exports.OpenAIError = void 0;
|
18
18
|
__exportStar(require("./AzureOpenAIApiConfiguration.cjs"), exports);
|
19
19
|
__exportStar(require("./OpenAIApiConfiguration.cjs"), exports);
|
20
|
+
__exportStar(require("./OpenAICompletionModel.cjs"), exports);
|
20
21
|
__exportStar(require("./OpenAICostCalculator.cjs"), exports);
|
21
22
|
var OpenAIError_js_1 = require("./OpenAIError.cjs");
|
22
23
|
Object.defineProperty(exports, "OpenAIError", { enumerable: true, get: function () { return OpenAIError_js_1.OpenAIError; } });
|
23
24
|
__exportStar(require("./OpenAIImageGenerationModel.cjs"), exports);
|
25
|
+
__exportStar(require("./OpenAISpeechModel.cjs"), exports);
|
24
26
|
__exportStar(require("./OpenAITextEmbeddingModel.cjs"), exports);
|
25
|
-
__exportStar(require("./OpenAICompletionModel.cjs"), exports);
|
26
27
|
__exportStar(require("./OpenAITranscriptionModel.cjs"), exports);
|
27
28
|
__exportStar(require("./TikTokenTokenizer.cjs"), exports);
|
28
29
|
__exportStar(require("./chat/OpenAIChatMessage.cjs"), exports);
|
@@ -1,10 +1,11 @@
|
|
1
1
|
export * from "./AzureOpenAIApiConfiguration.js";
|
2
2
|
export * from "./OpenAIApiConfiguration.js";
|
3
|
+
export * from "./OpenAICompletionModel.js";
|
3
4
|
export * from "./OpenAICostCalculator.js";
|
4
5
|
export { OpenAIError, OpenAIErrorData } from "./OpenAIError.js";
|
5
6
|
export * from "./OpenAIImageGenerationModel.js";
|
7
|
+
export * from "./OpenAISpeechModel.js";
|
6
8
|
export * from "./OpenAITextEmbeddingModel.js";
|
7
|
-
export * from "./OpenAICompletionModel.js";
|
8
9
|
export * from "./OpenAITranscriptionModel.js";
|
9
10
|
export * from "./TikTokenTokenizer.js";
|
10
11
|
export * from "./chat/OpenAIChatMessage.js";
|
@@ -1,10 +1,11 @@
|
|
1
1
|
export * from "./AzureOpenAIApiConfiguration.js";
|
2
2
|
export * from "./OpenAIApiConfiguration.js";
|
3
|
+
export * from "./OpenAICompletionModel.js";
|
3
4
|
export * from "./OpenAICostCalculator.js";
|
4
5
|
export { OpenAIError } from "./OpenAIError.js";
|
5
6
|
export * from "./OpenAIImageGenerationModel.js";
|
7
|
+
export * from "./OpenAISpeechModel.js";
|
6
8
|
export * from "./OpenAITextEmbeddingModel.js";
|
7
|
-
export * from "./OpenAICompletionModel.js";
|
8
9
|
export * from "./OpenAITranscriptionModel.js";
|
9
10
|
export * from "./TikTokenTokenizer.js";
|
10
11
|
export * from "./chat/OpenAIChatMessage.js";
|
package/package.json
CHANGED