modelfusion 0.47.3 → 0.49.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +55 -33
- package/core/getRun.cjs +5 -3
- package/core/getRun.js +5 -3
- package/index.cjs +1 -0
- package/index.d.ts +1 -0
- package/index.js +1 -0
- package/model-function/AsyncIterableResultPromise.cjs +5 -5
- package/model-function/AsyncIterableResultPromise.d.ts +3 -3
- package/model-function/AsyncIterableResultPromise.js +5 -5
- package/model-function/Model.d.ts +1 -1
- package/model-function/ModelCallEvent.d.ts +5 -7
- package/model-function/embed/embed.cjs +3 -3
- package/model-function/embed/embed.js +3 -3
- package/model-function/{executeCall.cjs → executeStandardCall.cjs} +3 -3
- package/model-function/{executeCall.d.ts → executeStandardCall.d.ts} +1 -1
- package/model-function/{executeCall.js → executeStandardCall.js} +1 -1
- package/model-function/executeStreamCall.cjs +132 -0
- package/model-function/executeStreamCall.d.ts +20 -0
- package/model-function/executeStreamCall.js +128 -0
- package/model-function/generate-image/generateImage.cjs +2 -2
- package/model-function/generate-image/generateImage.js +2 -2
- package/model-function/generate-speech/SpeechGenerationEvent.d.ts +27 -0
- package/model-function/generate-speech/SpeechGenerationModel.d.ts +15 -0
- package/model-function/{synthesize-speech/synthesizeSpeech.cjs → generate-speech/generateSpeech.cjs} +7 -7
- package/model-function/{synthesize-speech/synthesizeSpeech.d.ts → generate-speech/generateSpeech.d.ts} +2 -2
- package/model-function/{synthesize-speech/synthesizeSpeech.js → generate-speech/generateSpeech.js} +5 -5
- package/model-function/generate-speech/index.cjs +20 -0
- package/model-function/generate-speech/index.d.ts +4 -0
- package/model-function/generate-speech/index.js +4 -0
- package/model-function/generate-speech/streamSpeech.cjs +34 -0
- package/model-function/generate-speech/streamSpeech.d.ts +8 -0
- package/model-function/generate-speech/streamSpeech.js +30 -0
- package/model-function/generate-structure/generateStructure.cjs +2 -2
- package/model-function/generate-structure/generateStructure.js +2 -2
- package/model-function/generate-structure/generateStructureOrText.cjs +2 -2
- package/model-function/generate-structure/generateStructureOrText.js +2 -2
- package/model-function/generate-structure/index.cjs +27 -0
- package/model-function/generate-structure/index.d.ts +11 -0
- package/model-function/generate-structure/index.js +11 -0
- package/model-function/generate-structure/streamStructure.cjs +28 -136
- package/model-function/generate-structure/streamStructure.js +27 -135
- package/model-function/generate-text/TextGenerationEvent.d.ts +6 -0
- package/model-function/generate-text/generateText.cjs +3 -3
- package/model-function/generate-text/generateText.d.ts +1 -1
- package/model-function/generate-text/generateText.js +3 -3
- package/model-function/generate-text/index.cjs +0 -1
- package/model-function/generate-text/index.d.ts +0 -1
- package/model-function/generate-text/index.js +0 -1
- package/model-function/generate-text/streamText.cjs +21 -128
- package/model-function/generate-text/streamText.js +20 -127
- package/model-function/generate-text/trimChatPrompt.cjs +1 -1
- package/model-function/generate-text/trimChatPrompt.d.ts +1 -1
- package/model-function/generate-text/trimChatPrompt.js +1 -1
- package/model-function/{transcribe-speech/transcribe.cjs → generate-transcription/generateTranscription.cjs} +6 -6
- package/model-function/{transcribe-speech/transcribe.d.ts → generate-transcription/generateTranscription.d.ts} +2 -2
- package/model-function/{transcribe-speech/transcribe.js → generate-transcription/generateTranscription.js} +4 -4
- package/model-function/index.cjs +5 -20
- package/model-function/index.d.ts +5 -20
- package/model-function/index.js +5 -20
- package/model-provider/elevenlabs/ElevenLabsApiConfiguration.cjs +3 -0
- package/model-provider/elevenlabs/ElevenLabsApiConfiguration.d.ts +1 -0
- package/model-provider/elevenlabs/ElevenLabsApiConfiguration.js +3 -0
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +191 -0
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.d.ts +39 -0
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +187 -0
- package/model-provider/elevenlabs/index.cjs +1 -1
- package/model-provider/elevenlabs/index.d.ts +1 -1
- package/model-provider/elevenlabs/index.js +1 -1
- package/model-provider/huggingface/HuggingFaceImageDescriptionModel.cjs +21 -2
- package/model-provider/huggingface/HuggingFaceImageDescriptionModel.d.ts +11 -6
- package/model-provider/huggingface/HuggingFaceImageDescriptionModel.js +21 -2
- package/model-provider/lmnt/{LmntSpeechSynthesisModel.cjs → LmntSpeechModel.cjs} +5 -5
- package/model-provider/lmnt/LmntSpeechModel.d.ts +26 -0
- package/model-provider/lmnt/{LmntSpeechSynthesisModel.js → LmntSpeechModel.js} +3 -3
- package/model-provider/lmnt/index.cjs +1 -1
- package/model-provider/lmnt/index.d.ts +1 -1
- package/model-provider/lmnt/index.js +1 -1
- package/model-provider/openai/{OpenAITextGenerationModel.cjs → OpenAICompletionModel.cjs} +17 -17
- package/model-provider/openai/{OpenAITextGenerationModel.d.ts → OpenAICompletionModel.d.ts} +25 -25
- package/model-provider/openai/{OpenAITextGenerationModel.js → OpenAICompletionModel.js} +12 -12
- package/model-provider/openai/OpenAICostCalculator.cjs +3 -3
- package/model-provider/openai/OpenAICostCalculator.js +3 -3
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +1 -1
- package/model-provider/openai/TikTokenTokenizer.d.ts +2 -2
- package/model-provider/openai/index.cjs +1 -1
- package/model-provider/openai/index.d.ts +1 -1
- package/model-provider/openai/index.js +1 -1
- package/package.json +3 -1
- package/ui/MediaSourceAppender.cjs +54 -0
- package/ui/MediaSourceAppender.d.ts +11 -0
- package/ui/MediaSourceAppender.js +50 -0
- package/ui/index.cjs +17 -0
- package/ui/index.d.ts +1 -0
- package/ui/index.js +1 -0
- package/util/SimpleWebSocket.cjs +41 -0
- package/util/SimpleWebSocket.d.ts +12 -0
- package/util/SimpleWebSocket.js +14 -0
- package/model-function/describe-image/ImageDescriptionEvent.d.ts +0 -18
- package/model-function/describe-image/ImageDescriptionModel.d.ts +0 -10
- package/model-function/describe-image/describeImage.cjs +0 -26
- package/model-function/describe-image/describeImage.d.ts +0 -9
- package/model-function/describe-image/describeImage.js +0 -22
- package/model-function/generate-text/TextStreamingEvent.cjs +0 -2
- package/model-function/generate-text/TextStreamingEvent.d.ts +0 -7
- package/model-function/generate-text/TextStreamingEvent.js +0 -1
- package/model-function/synthesize-speech/SpeechSynthesisEvent.cjs +0 -2
- package/model-function/synthesize-speech/SpeechSynthesisEvent.d.ts +0 -21
- package/model-function/synthesize-speech/SpeechSynthesisEvent.js +0 -1
- package/model-function/synthesize-speech/SpeechSynthesisModel.cjs +0 -2
- package/model-function/synthesize-speech/SpeechSynthesisModel.d.ts +0 -11
- package/model-function/synthesize-speech/SpeechSynthesisModel.js +0 -1
- package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.cjs +0 -79
- package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.d.ts +0 -30
- package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.js +0 -75
- package/model-provider/lmnt/LmntSpeechSynthesisModel.d.ts +0 -26
- /package/model-function/{describe-image/ImageDescriptionEvent.cjs → generate-speech/SpeechGenerationEvent.cjs} +0 -0
- /package/model-function/{describe-image/ImageDescriptionEvent.js → generate-speech/SpeechGenerationEvent.js} +0 -0
- /package/model-function/{describe-image/ImageDescriptionModel.cjs → generate-speech/SpeechGenerationModel.cjs} +0 -0
- /package/model-function/{describe-image/ImageDescriptionModel.js → generate-speech/SpeechGenerationModel.js} +0 -0
- /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionEvent.cjs +0 -0
- /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionEvent.d.ts +0 -0
- /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionEvent.js +0 -0
- /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionModel.cjs +0 -0
- /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionModel.d.ts +0 -0
- /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionModel.js +0 -0
@@ -81,25 +81,25 @@ export declare const OPENAI_TEXT_GENERATION_MODELS: {
|
|
81
81
|
completionTokenCostInMillicents: number;
|
82
82
|
};
|
83
83
|
};
|
84
|
-
export declare function
|
85
|
-
baseModel:
|
84
|
+
export declare function getOpenAICompletionModelInformation(model: OpenAICompletionModelType): {
|
85
|
+
baseModel: OpenAICompletionBaseModelType;
|
86
86
|
isFineTuned: boolean;
|
87
87
|
contextWindowSize: number;
|
88
88
|
promptTokenCostInMillicents: number;
|
89
89
|
completionTokenCostInMillicents: number;
|
90
90
|
};
|
91
|
-
type
|
92
|
-
type
|
93
|
-
export type
|
94
|
-
export type
|
95
|
-
export declare const
|
96
|
-
export declare const
|
97
|
-
model:
|
98
|
-
response:
|
91
|
+
type FineTuneableOpenAICompletionModelType = "davinci-002" | "babbage-002";
|
92
|
+
type FineTunedOpenAICompletionModelType = `ft:${FineTuneableOpenAICompletionModelType}:${string}:${string}:${string}`;
|
93
|
+
export type OpenAICompletionBaseModelType = keyof typeof OPENAI_TEXT_GENERATION_MODELS;
|
94
|
+
export type OpenAICompletionModelType = OpenAICompletionBaseModelType | FineTunedOpenAICompletionModelType;
|
95
|
+
export declare const isOpenAICompletionModel: (model: string) => model is OpenAICompletionModelType;
|
96
|
+
export declare const calculateOpenAICompletionCostInMillicents: ({ model, response, }: {
|
97
|
+
model: OpenAICompletionModelType;
|
98
|
+
response: OpenAICompletionResponse;
|
99
99
|
}) => number;
|
100
|
-
export interface
|
100
|
+
export interface OpenAICompletionCallSettings {
|
101
101
|
api?: ApiConfiguration;
|
102
|
-
model:
|
102
|
+
model: OpenAICompletionModelType;
|
103
103
|
suffix?: string;
|
104
104
|
maxTokens?: number;
|
105
105
|
temperature?: number;
|
@@ -113,7 +113,7 @@ export interface OpenAITextGenerationCallSettings {
|
|
113
113
|
bestOf?: number;
|
114
114
|
logitBias?: Record<number, number>;
|
115
115
|
}
|
116
|
-
export interface
|
116
|
+
export interface OpenAICompletionModelSettings extends TextGenerationModelSettings, Omit<OpenAICompletionCallSettings, "stop" | "maxTokens"> {
|
117
117
|
isUserIdForwardingEnabled?: boolean;
|
118
118
|
}
|
119
119
|
/**
|
@@ -122,7 +122,7 @@ export interface OpenAITextGenerationModelSettings extends TextGenerationModelSe
|
|
122
122
|
* @see https://platform.openai.com/docs/api-reference/completions/create
|
123
123
|
*
|
124
124
|
* @example
|
125
|
-
* const model = new
|
125
|
+
* const model = new OpenAICompletionModel({
|
126
126
|
* model: "gpt-3.5-turbo-instruct",
|
127
127
|
* temperature: 0.7,
|
128
128
|
* maxCompletionTokens: 500,
|
@@ -134,17 +134,17 @@ export interface OpenAITextGenerationModelSettings extends TextGenerationModelSe
|
|
134
134
|
* "Write a short story about a robot learning to love:\n\n"
|
135
135
|
* );
|
136
136
|
*/
|
137
|
-
export declare class
|
138
|
-
constructor(settings:
|
137
|
+
export declare class OpenAICompletionModel extends AbstractModel<OpenAICompletionModelSettings> implements TextStreamingModel<string, OpenAICompletionModelSettings> {
|
138
|
+
constructor(settings: OpenAICompletionModelSettings);
|
139
139
|
readonly provider: "openai";
|
140
|
-
get modelName():
|
140
|
+
get modelName(): OpenAICompletionModelType;
|
141
141
|
readonly contextWindowSize: number;
|
142
142
|
readonly tokenizer: TikTokenTokenizer;
|
143
143
|
countPromptTokens(input: string): Promise<number>;
|
144
144
|
callAPI<RESULT>(prompt: string, options: {
|
145
145
|
responseFormat: OpenAITextResponseFormatType<RESULT>;
|
146
146
|
} & FunctionOptions): Promise<RESULT>;
|
147
|
-
get settingsForEvent(): Partial<
|
147
|
+
get settingsForEvent(): Partial<OpenAICompletionModelSettings>;
|
148
148
|
doGenerateText(prompt: string, options?: FunctionOptions): Promise<{
|
149
149
|
response: {
|
150
150
|
object: "text_completion";
|
@@ -174,18 +174,18 @@ export declare class OpenAITextGenerationModel extends AbstractModel<OpenAITextG
|
|
174
174
|
/**
|
175
175
|
* Returns this model with an instruction prompt format.
|
176
176
|
*/
|
177
|
-
withInstructionPrompt(): PromptFormatTextStreamingModel<import("../../index.js").InstructionPrompt, string,
|
177
|
+
withInstructionPrompt(): PromptFormatTextStreamingModel<import("../../index.js").InstructionPrompt, string, OpenAICompletionModelSettings, this>;
|
178
178
|
/**
|
179
179
|
* Returns this model with a chat prompt format.
|
180
180
|
*/
|
181
181
|
withChatPrompt(options?: {
|
182
182
|
user?: string;
|
183
183
|
ai?: string;
|
184
|
-
}): PromptFormatTextStreamingModel<import("../../index.js").ChatPrompt, string,
|
185
|
-
withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string,
|
186
|
-
withSettings(additionalSettings: Partial<
|
184
|
+
}): PromptFormatTextStreamingModel<import("../../index.js").ChatPrompt, string, OpenAICompletionModelSettings, this>;
|
185
|
+
withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string, OpenAICompletionModelSettings, this>;
|
186
|
+
withSettings(additionalSettings: Partial<OpenAICompletionModelSettings>): this;
|
187
187
|
}
|
188
|
-
declare const
|
188
|
+
declare const OpenAICompletionResponseSchema: z.ZodObject<{
|
189
189
|
id: z.ZodString;
|
190
190
|
object: z.ZodLiteral<"text_completion">;
|
191
191
|
created: z.ZodNumber;
|
@@ -252,7 +252,7 @@ declare const openAITextGenerationResponseSchema: z.ZodObject<{
|
|
252
252
|
logprobs?: any;
|
253
253
|
}[];
|
254
254
|
}>;
|
255
|
-
export type
|
255
|
+
export type OpenAICompletionResponse = z.infer<typeof OpenAICompletionResponseSchema>;
|
256
256
|
export type OpenAITextResponseFormatType<T> = {
|
257
257
|
stream: boolean;
|
258
258
|
handler: ResponseHandler<T>;
|
@@ -292,7 +292,7 @@ export declare const OpenAITextResponseFormat: {
|
|
292
292
|
}) => Promise<AsyncIterable<Delta<string>>>;
|
293
293
|
};
|
294
294
|
};
|
295
|
-
export type
|
295
|
+
export type OpenAICompletionDelta = Array<{
|
296
296
|
content: string;
|
297
297
|
isComplete: boolean;
|
298
298
|
delta: string;
|
@@ -84,7 +84,7 @@ export const OPENAI_TEXT_GENERATION_MODELS = {
|
|
84
84
|
completionTokenCostInMillicents: 0.04,
|
85
85
|
},
|
86
86
|
};
|
87
|
-
export function
|
87
|
+
export function getOpenAICompletionModelInformation(model) {
|
88
88
|
// Model is already a base model:
|
89
89
|
if (model in OPENAI_TEXT_GENERATION_MODELS) {
|
90
90
|
const baseModelInformation = OPENAI_TEXT_GENERATION_MODELS[model];
|
@@ -111,11 +111,11 @@ export function getOpenAITextGenerationModelInformation(model) {
|
|
111
111
|
}
|
112
112
|
throw new Error(`Unknown OpenAI chat base model ${baseModel}.`);
|
113
113
|
}
|
114
|
-
export const
|
114
|
+
export const isOpenAICompletionModel = (model) => model in OPENAI_TEXT_GENERATION_MODELS ||
|
115
115
|
model.startsWith("ft:davinci-002:") ||
|
116
116
|
model.startsWith("ft:babbage-002:");
|
117
|
-
export const
|
118
|
-
const modelInformation =
|
117
|
+
export const calculateOpenAICompletionCostInMillicents = ({ model, response, }) => {
|
118
|
+
const modelInformation = getOpenAICompletionModelInformation(model);
|
119
119
|
return (response.usage.prompt_tokens *
|
120
120
|
modelInformation.promptTokenCostInMillicents +
|
121
121
|
response.usage.completion_tokens *
|
@@ -127,7 +127,7 @@ export const calculateOpenAITextGenerationCostInMillicents = ({ model, response,
|
|
127
127
|
* @see https://platform.openai.com/docs/api-reference/completions/create
|
128
128
|
*
|
129
129
|
* @example
|
130
|
-
* const model = new
|
130
|
+
* const model = new OpenAICompletionModel({
|
131
131
|
* model: "gpt-3.5-turbo-instruct",
|
132
132
|
* temperature: 0.7,
|
133
133
|
* maxCompletionTokens: 500,
|
@@ -139,7 +139,7 @@ export const calculateOpenAITextGenerationCostInMillicents = ({ model, response,
|
|
139
139
|
* "Write a short story about a robot learning to love:\n\n"
|
140
140
|
* );
|
141
141
|
*/
|
142
|
-
export class
|
142
|
+
export class OpenAICompletionModel extends AbstractModel {
|
143
143
|
constructor(settings) {
|
144
144
|
super({ settings });
|
145
145
|
Object.defineProperty(this, "provider", {
|
@@ -160,7 +160,7 @@ export class OpenAITextGenerationModel extends AbstractModel {
|
|
160
160
|
writable: true,
|
161
161
|
value: void 0
|
162
162
|
});
|
163
|
-
const modelInformation =
|
163
|
+
const modelInformation = getOpenAICompletionModelInformation(this.settings.model);
|
164
164
|
this.tokenizer = new TikTokenTokenizer({
|
165
165
|
model: modelInformation.baseModel,
|
166
166
|
});
|
@@ -189,7 +189,7 @@ export class OpenAITextGenerationModel extends AbstractModel {
|
|
189
189
|
return callWithRetryAndThrottle({
|
190
190
|
retry: callSettings.api?.retry,
|
191
191
|
throttle: callSettings.api?.throttle,
|
192
|
-
call: async () =>
|
192
|
+
call: async () => callOpenAICompletionAPI(callSettings),
|
193
193
|
});
|
194
194
|
}
|
195
195
|
get settingsForEvent() {
|
@@ -254,10 +254,10 @@ export class OpenAITextGenerationModel extends AbstractModel {
|
|
254
254
|
});
|
255
255
|
}
|
256
256
|
withSettings(additionalSettings) {
|
257
|
-
return new
|
257
|
+
return new OpenAICompletionModel(Object.assign({}, this.settings, additionalSettings));
|
258
258
|
}
|
259
259
|
}
|
260
|
-
const
|
260
|
+
const OpenAICompletionResponseSchema = z.object({
|
261
261
|
id: z.string(),
|
262
262
|
object: z.literal("text_completion"),
|
263
263
|
created: z.number(),
|
@@ -274,7 +274,7 @@ const openAITextGenerationResponseSchema = z.object({
|
|
274
274
|
total_tokens: z.number(),
|
275
275
|
}),
|
276
276
|
});
|
277
|
-
async function
|
277
|
+
async function callOpenAICompletionAPI({ api = new OpenAIApiConfiguration(), abortSignal, responseFormat, model, prompt, suffix, maxTokens, temperature, topP, n, logprobs, echo, stop, presencePenalty, frequencyPenalty, bestOf, logitBias, user, }) {
|
278
278
|
// empty arrays are not allowed for stop:
|
279
279
|
if (stop != null && Array.isArray(stop) && stop.length === 0) {
|
280
280
|
stop = undefined;
|
@@ -311,7 +311,7 @@ export const OpenAITextResponseFormat = {
|
|
311
311
|
*/
|
312
312
|
json: {
|
313
313
|
stream: false,
|
314
|
-
handler: createJsonResponseHandler(
|
314
|
+
handler: createJsonResponseHandler(OpenAICompletionResponseSchema),
|
315
315
|
},
|
316
316
|
/**
|
317
317
|
* Returns an async iterable over the full deltas (all choices, including full current state at time of event)
|
@@ -3,7 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.OpenAICostCalculator = void 0;
|
4
4
|
const OpenAIImageGenerationModel_js_1 = require("./OpenAIImageGenerationModel.cjs");
|
5
5
|
const OpenAITextEmbeddingModel_js_1 = require("./OpenAITextEmbeddingModel.cjs");
|
6
|
-
const
|
6
|
+
const OpenAICompletionModel_js_1 = require("./OpenAICompletionModel.cjs");
|
7
7
|
const OpenAITranscriptionModel_js_1 = require("./OpenAITranscriptionModel.cjs");
|
8
8
|
const OpenAIChatModel_js_1 = require("./chat/OpenAIChatModel.cjs");
|
9
9
|
class OpenAICostCalculator {
|
@@ -50,8 +50,8 @@ class OpenAICostCalculator {
|
|
50
50
|
response: call.result.response,
|
51
51
|
});
|
52
52
|
}
|
53
|
-
if ((0,
|
54
|
-
return (0,
|
53
|
+
if ((0, OpenAICompletionModel_js_1.isOpenAICompletionModel)(model)) {
|
54
|
+
return (0, OpenAICompletionModel_js_1.calculateOpenAICompletionCostInMillicents)({
|
55
55
|
model,
|
56
56
|
response: call.result.response,
|
57
57
|
});
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import { calculateOpenAIImageGenerationCostInMillicents, } from "./OpenAIImageGenerationModel.js";
|
2
2
|
import { calculateOpenAIEmbeddingCostInMillicents, isOpenAIEmbeddingModel, } from "./OpenAITextEmbeddingModel.js";
|
3
|
-
import {
|
3
|
+
import { calculateOpenAICompletionCostInMillicents, isOpenAICompletionModel, } from "./OpenAICompletionModel.js";
|
4
4
|
import { calculateOpenAITranscriptionCostInMillicents, } from "./OpenAITranscriptionModel.js";
|
5
5
|
import { calculateOpenAIChatCostInMillicents, isOpenAIChatModel, } from "./chat/OpenAIChatModel.js";
|
6
6
|
export class OpenAICostCalculator {
|
@@ -47,8 +47,8 @@ export class OpenAICostCalculator {
|
|
47
47
|
response: call.result.response,
|
48
48
|
});
|
49
49
|
}
|
50
|
-
if (
|
51
|
-
return
|
50
|
+
if (isOpenAICompletionModel(model)) {
|
51
|
+
return calculateOpenAICompletionCostInMillicents({
|
52
52
|
model,
|
53
53
|
response: call.result.response,
|
54
54
|
});
|
@@ -4,7 +4,7 @@ import { FunctionOptions } from "../../core/FunctionOptions.js";
|
|
4
4
|
import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
5
5
|
import { ResponseHandler } from "../../core/api/postToApi.js";
|
6
6
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
7
|
-
import { TranscriptionModel, TranscriptionModelSettings } from "../../model-function/
|
7
|
+
import { TranscriptionModel, TranscriptionModelSettings } from "../../model-function/generate-transcription/TranscriptionModel.js";
|
8
8
|
/**
|
9
9
|
* @see https://openai.com/pricing
|
10
10
|
*/
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import { FullTokenizer } from "../../model-function/tokenize-text/Tokenizer.js";
|
2
2
|
import { OpenAITextEmbeddingModelType } from "./OpenAITextEmbeddingModel.js";
|
3
|
-
import {
|
3
|
+
import { OpenAICompletionBaseModelType } from "./OpenAICompletionModel.js";
|
4
4
|
import { OpenAIChatBaseModelType } from "./chat/OpenAIChatModel.js";
|
5
5
|
/**
|
6
6
|
* TikToken tokenizer for OpenAI language models.
|
@@ -22,7 +22,7 @@ export declare class TikTokenTokenizer implements FullTokenizer {
|
|
22
22
|
* Get a TikToken tokenizer for a specific model or encoding.
|
23
23
|
*/
|
24
24
|
constructor(options: {
|
25
|
-
model: OpenAIChatBaseModelType |
|
25
|
+
model: OpenAIChatBaseModelType | OpenAICompletionBaseModelType | OpenAITextEmbeddingModelType;
|
26
26
|
});
|
27
27
|
private readonly tiktoken;
|
28
28
|
tokenize(text: string): Promise<number[]>;
|
@@ -22,7 +22,7 @@ var OpenAIError_js_1 = require("./OpenAIError.cjs");
|
|
22
22
|
Object.defineProperty(exports, "OpenAIError", { enumerable: true, get: function () { return OpenAIError_js_1.OpenAIError; } });
|
23
23
|
__exportStar(require("./OpenAIImageGenerationModel.cjs"), exports);
|
24
24
|
__exportStar(require("./OpenAITextEmbeddingModel.cjs"), exports);
|
25
|
-
__exportStar(require("./
|
25
|
+
__exportStar(require("./OpenAICompletionModel.cjs"), exports);
|
26
26
|
__exportStar(require("./OpenAITranscriptionModel.cjs"), exports);
|
27
27
|
__exportStar(require("./TikTokenTokenizer.cjs"), exports);
|
28
28
|
__exportStar(require("./chat/OpenAIChatMessage.cjs"), exports);
|
@@ -4,7 +4,7 @@ export * from "./OpenAICostCalculator.js";
|
|
4
4
|
export { OpenAIError, OpenAIErrorData } from "./OpenAIError.js";
|
5
5
|
export * from "./OpenAIImageGenerationModel.js";
|
6
6
|
export * from "./OpenAITextEmbeddingModel.js";
|
7
|
-
export * from "./
|
7
|
+
export * from "./OpenAICompletionModel.js";
|
8
8
|
export * from "./OpenAITranscriptionModel.js";
|
9
9
|
export * from "./TikTokenTokenizer.js";
|
10
10
|
export * from "./chat/OpenAIChatMessage.js";
|
@@ -4,7 +4,7 @@ export * from "./OpenAICostCalculator.js";
|
|
4
4
|
export { OpenAIError } from "./OpenAIError.js";
|
5
5
|
export * from "./OpenAIImageGenerationModel.js";
|
6
6
|
export * from "./OpenAITextEmbeddingModel.js";
|
7
|
-
export * from "./
|
7
|
+
export * from "./OpenAICompletionModel.js";
|
8
8
|
export * from "./OpenAITranscriptionModel.js";
|
9
9
|
export * from "./TikTokenTokenizer.js";
|
10
10
|
export * from "./chat/OpenAIChatMessage.js";
|
package/package.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
{
|
2
2
|
"name": "modelfusion",
|
3
3
|
"description": "Build multimodal applications, chatbots, and agents with JavaScript and TypeScript.",
|
4
|
-
"version": "0.
|
4
|
+
"version": "0.49.0",
|
5
5
|
"author": "Lars Grammel",
|
6
6
|
"license": "MIT",
|
7
7
|
"keywords": [
|
@@ -57,6 +57,7 @@
|
|
57
57
|
"js-tiktoken": "1.0.7",
|
58
58
|
"nanoid": "3.3.6",
|
59
59
|
"secure-json-parse": "2.7.0",
|
60
|
+
"ws": "8.14.2",
|
60
61
|
"zod": "3.22.4",
|
61
62
|
"zod-to-json-schema": "3.21.4"
|
62
63
|
},
|
@@ -64,6 +65,7 @@
|
|
64
65
|
"@tsconfig/recommended": "1.0.3",
|
65
66
|
"@types/deep-equal": "^1.0.2",
|
66
67
|
"@types/node": "18.11.9",
|
68
|
+
"@types/ws": "^8.5.7",
|
67
69
|
"@typescript-eslint/eslint-plugin": "^6.1.0",
|
68
70
|
"@typescript-eslint/parser": "^6.1.0",
|
69
71
|
"copyfiles": "2.4.1",
|
@@ -0,0 +1,54 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.MediaSourceAppender = void 0;
|
4
|
+
class MediaSourceAppender {
|
5
|
+
constructor(type) {
|
6
|
+
Object.defineProperty(this, "mediaSource", {
|
7
|
+
enumerable: true,
|
8
|
+
configurable: true,
|
9
|
+
writable: true,
|
10
|
+
value: new MediaSource()
|
11
|
+
});
|
12
|
+
Object.defineProperty(this, "audioChunks", {
|
13
|
+
enumerable: true,
|
14
|
+
configurable: true,
|
15
|
+
writable: true,
|
16
|
+
value: []
|
17
|
+
});
|
18
|
+
Object.defineProperty(this, "sourceBuffer", {
|
19
|
+
enumerable: true,
|
20
|
+
configurable: true,
|
21
|
+
writable: true,
|
22
|
+
value: void 0
|
23
|
+
});
|
24
|
+
this.mediaSource.addEventListener("sourceopen", async () => {
|
25
|
+
this.sourceBuffer = this.mediaSource.addSourceBuffer(type);
|
26
|
+
this.sourceBuffer.addEventListener("updateend", () => {
|
27
|
+
this.tryAppendNextChunk();
|
28
|
+
});
|
29
|
+
});
|
30
|
+
}
|
31
|
+
tryAppendNextChunk() {
|
32
|
+
if (this.sourceBuffer != null &&
|
33
|
+
!this.sourceBuffer.updating &&
|
34
|
+
this.audioChunks.length > 0) {
|
35
|
+
this.sourceBuffer.appendBuffer(this.audioChunks.shift());
|
36
|
+
}
|
37
|
+
}
|
38
|
+
addBase64Data(base64Data) {
|
39
|
+
this.addData(Uint8Array.from(atob(base64Data), (char) => char.charCodeAt(0)).buffer);
|
40
|
+
}
|
41
|
+
addData(data) {
|
42
|
+
this.audioChunks.push(data);
|
43
|
+
this.tryAppendNextChunk();
|
44
|
+
}
|
45
|
+
close() {
|
46
|
+
if (this.mediaSource.readyState === "open") {
|
47
|
+
this.mediaSource.endOfStream();
|
48
|
+
}
|
49
|
+
}
|
50
|
+
get mediaSourceUrl() {
|
51
|
+
return URL.createObjectURL(this.mediaSource);
|
52
|
+
}
|
53
|
+
}
|
54
|
+
exports.MediaSourceAppender = MediaSourceAppender;
|
@@ -0,0 +1,11 @@
|
|
1
|
+
export declare class MediaSourceAppender {
|
2
|
+
private readonly mediaSource;
|
3
|
+
private readonly audioChunks;
|
4
|
+
private sourceBuffer?;
|
5
|
+
constructor(type: string);
|
6
|
+
private tryAppendNextChunk;
|
7
|
+
addBase64Data(base64Data: string): void;
|
8
|
+
addData(data: ArrayBuffer): void;
|
9
|
+
close(): void;
|
10
|
+
get mediaSourceUrl(): string;
|
11
|
+
}
|
@@ -0,0 +1,50 @@
|
|
1
|
+
export class MediaSourceAppender {
|
2
|
+
constructor(type) {
|
3
|
+
Object.defineProperty(this, "mediaSource", {
|
4
|
+
enumerable: true,
|
5
|
+
configurable: true,
|
6
|
+
writable: true,
|
7
|
+
value: new MediaSource()
|
8
|
+
});
|
9
|
+
Object.defineProperty(this, "audioChunks", {
|
10
|
+
enumerable: true,
|
11
|
+
configurable: true,
|
12
|
+
writable: true,
|
13
|
+
value: []
|
14
|
+
});
|
15
|
+
Object.defineProperty(this, "sourceBuffer", {
|
16
|
+
enumerable: true,
|
17
|
+
configurable: true,
|
18
|
+
writable: true,
|
19
|
+
value: void 0
|
20
|
+
});
|
21
|
+
this.mediaSource.addEventListener("sourceopen", async () => {
|
22
|
+
this.sourceBuffer = this.mediaSource.addSourceBuffer(type);
|
23
|
+
this.sourceBuffer.addEventListener("updateend", () => {
|
24
|
+
this.tryAppendNextChunk();
|
25
|
+
});
|
26
|
+
});
|
27
|
+
}
|
28
|
+
tryAppendNextChunk() {
|
29
|
+
if (this.sourceBuffer != null &&
|
30
|
+
!this.sourceBuffer.updating &&
|
31
|
+
this.audioChunks.length > 0) {
|
32
|
+
this.sourceBuffer.appendBuffer(this.audioChunks.shift());
|
33
|
+
}
|
34
|
+
}
|
35
|
+
addBase64Data(base64Data) {
|
36
|
+
this.addData(Uint8Array.from(atob(base64Data), (char) => char.charCodeAt(0)).buffer);
|
37
|
+
}
|
38
|
+
addData(data) {
|
39
|
+
this.audioChunks.push(data);
|
40
|
+
this.tryAppendNextChunk();
|
41
|
+
}
|
42
|
+
close() {
|
43
|
+
if (this.mediaSource.readyState === "open") {
|
44
|
+
this.mediaSource.endOfStream();
|
45
|
+
}
|
46
|
+
}
|
47
|
+
get mediaSourceUrl() {
|
48
|
+
return URL.createObjectURL(this.mediaSource);
|
49
|
+
}
|
50
|
+
}
|
package/ui/index.cjs
ADDED
@@ -0,0 +1,17 @@
|
|
1
|
+
"use strict";
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
3
|
+
if (k2 === undefined) k2 = k;
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
7
|
+
}
|
8
|
+
Object.defineProperty(o, k2, desc);
|
9
|
+
}) : (function(o, m, k, k2) {
|
10
|
+
if (k2 === undefined) k2 = k;
|
11
|
+
o[k2] = m[k];
|
12
|
+
}));
|
13
|
+
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
14
|
+
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
15
|
+
};
|
16
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
17
|
+
__exportStar(require("./MediaSourceAppender.cjs"), exports);
|
package/ui/index.d.ts
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
export * from "./MediaSourceAppender.js";
|
package/ui/index.js
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
export * from "./MediaSourceAppender.js";
|
@@ -0,0 +1,41 @@
|
|
1
|
+
"use strict";
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
3
|
+
if (k2 === undefined) k2 = k;
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
7
|
+
}
|
8
|
+
Object.defineProperty(o, k2, desc);
|
9
|
+
}) : (function(o, m, k, k2) {
|
10
|
+
if (k2 === undefined) k2 = k;
|
11
|
+
o[k2] = m[k];
|
12
|
+
}));
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
15
|
+
}) : function(o, v) {
|
16
|
+
o["default"] = v;
|
17
|
+
});
|
18
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
19
|
+
if (mod && mod.__esModule) return mod;
|
20
|
+
var result = {};
|
21
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
22
|
+
__setModuleDefault(result, mod);
|
23
|
+
return result;
|
24
|
+
};
|
25
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
26
|
+
exports.createSimpleWebSocket = void 0;
|
27
|
+
/**
|
28
|
+
* Creates a simplified websocket connection. This function works in both Node.js and browser.
|
29
|
+
*/
|
30
|
+
async function createSimpleWebSocket(url) {
|
31
|
+
if (typeof window === "undefined") {
|
32
|
+
// Use ws library in Node.js:
|
33
|
+
const { default: WebSocket } = await Promise.resolve().then(() => __importStar(require("ws")));
|
34
|
+
return new WebSocket(url);
|
35
|
+
}
|
36
|
+
else {
|
37
|
+
// Use native WebSocket in browser:
|
38
|
+
return new WebSocket(url);
|
39
|
+
}
|
40
|
+
}
|
41
|
+
exports.createSimpleWebSocket = createSimpleWebSocket;
|
@@ -0,0 +1,12 @@
|
|
1
|
+
export interface SimpleWebSocket {
|
2
|
+
send(data: string): void;
|
3
|
+
onmessage: ((event: MessageEvent) => void) | null;
|
4
|
+
onopen: ((event: Event) => void) | null;
|
5
|
+
onclose: ((event: CloseEvent) => void) | null;
|
6
|
+
onerror: ((event: Event) => void) | null;
|
7
|
+
close(code?: number, reason?: string): void;
|
8
|
+
}
|
9
|
+
/**
|
10
|
+
* Creates a simplified websocket connection. This function works in both Node.js and browser.
|
11
|
+
*/
|
12
|
+
export declare function createSimpleWebSocket(url: string): Promise<SimpleWebSocket>;
|
@@ -0,0 +1,14 @@
|
|
1
|
+
/**
|
2
|
+
* Creates a simplified websocket connection. This function works in both Node.js and browser.
|
3
|
+
*/
|
4
|
+
export async function createSimpleWebSocket(url) {
|
5
|
+
if (typeof window === "undefined") {
|
6
|
+
// Use ws library in Node.js:
|
7
|
+
const { default: WebSocket } = await import("ws");
|
8
|
+
return new WebSocket(url);
|
9
|
+
}
|
10
|
+
else {
|
11
|
+
// Use native WebSocket in browser:
|
12
|
+
return new WebSocket(url);
|
13
|
+
}
|
14
|
+
}
|
@@ -1,18 +0,0 @@
|
|
1
|
-
import { BaseModelCallFinishedEvent, BaseModelCallStartedEvent } from "../ModelCallEvent.js";
|
2
|
-
export interface ImageDescriptionStartedEvent extends BaseModelCallStartedEvent {
|
3
|
-
functionType: "image-description";
|
4
|
-
}
|
5
|
-
export type ImageDescriptionFinishedEventResult = {
|
6
|
-
status: "success";
|
7
|
-
response: unknown;
|
8
|
-
value: string;
|
9
|
-
} | {
|
10
|
-
status: "error";
|
11
|
-
error: unknown;
|
12
|
-
} | {
|
13
|
-
status: "abort";
|
14
|
-
};
|
15
|
-
export interface ImageDescriptionFinishedEvent extends BaseModelCallFinishedEvent {
|
16
|
-
functionType: "image-description";
|
17
|
-
result: ImageDescriptionFinishedEventResult;
|
18
|
-
}
|
@@ -1,10 +0,0 @@
|
|
1
|
-
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
2
|
-
import { Model, ModelSettings } from "../Model.js";
|
3
|
-
export interface ImageDescriptionModelSettings extends ModelSettings {
|
4
|
-
}
|
5
|
-
export interface ImageDescriptionModel<DATA, SETTINGS extends ImageDescriptionModelSettings = ImageDescriptionModelSettings> extends Model<SETTINGS> {
|
6
|
-
doDescribeImage: (data: DATA, options?: FunctionOptions) => PromiseLike<{
|
7
|
-
response: unknown;
|
8
|
-
description: string;
|
9
|
-
}>;
|
10
|
-
}
|
@@ -1,26 +0,0 @@
|
|
1
|
-
"use strict";
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.describeImage = void 0;
|
4
|
-
const executeCall_js_1 = require("../executeCall.cjs");
|
5
|
-
const ModelFunctionPromise_js_1 = require("../ModelFunctionPromise.cjs");
|
6
|
-
/**
|
7
|
-
* Describe an image as text.
|
8
|
-
*
|
9
|
-
* Depending on the model, this can be used for image captioning, for describing the contents of an image, or for OCR.
|
10
|
-
*/
|
11
|
-
function describeImage(model, data, options) {
|
12
|
-
return new ModelFunctionPromise_js_1.ModelFunctionPromise((0, executeCall_js_1.executeCall)({
|
13
|
-
functionType: "image-description",
|
14
|
-
input: data,
|
15
|
-
model,
|
16
|
-
options,
|
17
|
-
generateResponse: async (options) => {
|
18
|
-
const result = await model.doDescribeImage(data, options);
|
19
|
-
return {
|
20
|
-
response: result.response,
|
21
|
-
extractedValue: result.description,
|
22
|
-
};
|
23
|
-
},
|
24
|
-
}));
|
25
|
-
}
|
26
|
-
exports.describeImage = describeImage;
|
@@ -1,9 +0,0 @@
|
|
1
|
-
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
2
|
-
import { ModelFunctionPromise } from "../ModelFunctionPromise.js";
|
3
|
-
import { ImageDescriptionModel, ImageDescriptionModelSettings } from "./ImageDescriptionModel.js";
|
4
|
-
/**
|
5
|
-
* Describe an image as text.
|
6
|
-
*
|
7
|
-
* Depending on the model, this can be used for image captioning, for describing the contents of an image, or for OCR.
|
8
|
-
*/
|
9
|
-
export declare function describeImage<DATA>(model: ImageDescriptionModel<DATA, ImageDescriptionModelSettings>, data: DATA, options?: FunctionOptions): ModelFunctionPromise<string>;
|
@@ -1,22 +0,0 @@
|
|
1
|
-
import { executeCall } from "../executeCall.js";
|
2
|
-
import { ModelFunctionPromise } from "../ModelFunctionPromise.js";
|
3
|
-
/**
|
4
|
-
* Describe an image as text.
|
5
|
-
*
|
6
|
-
* Depending on the model, this can be used for image captioning, for describing the contents of an image, or for OCR.
|
7
|
-
*/
|
8
|
-
export function describeImage(model, data, options) {
|
9
|
-
return new ModelFunctionPromise(executeCall({
|
10
|
-
functionType: "image-description",
|
11
|
-
input: data,
|
12
|
-
model,
|
13
|
-
options,
|
14
|
-
generateResponse: async (options) => {
|
15
|
-
const result = await model.doDescribeImage(data, options);
|
16
|
-
return {
|
17
|
-
response: result.response,
|
18
|
-
extractedValue: result.description,
|
19
|
-
};
|
20
|
-
},
|
21
|
-
}));
|
22
|
-
}
|