modelfusion 0.40.1 → 0.41.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +14 -7
- package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.d.ts +3 -3
- package/core/FunctionEvent.d.ts +1 -1
- package/model-function/AsyncIterableResultPromise.d.ts +1 -1
- package/model-function/Delta.d.ts +8 -0
- package/model-function/ModelCallEvent.d.ts +1 -1
- package/model-function/ModelCallMetadata.d.ts +13 -0
- package/model-function/describe-image/ImageDescriptionEvent.d.ts +1 -1
- package/model-function/describe-image/ImageDescriptionModel.d.ts +6 -4
- package/model-function/describe-image/describeImage.cjs +7 -2
- package/model-function/describe-image/describeImage.d.ts +2 -2
- package/model-function/describe-image/describeImage.js +7 -2
- package/model-function/embed/EmbeddingEvent.d.ts +1 -1
- package/model-function/embed/EmbeddingModel.d.ts +6 -4
- package/model-function/embed/embed.cjs +16 -11
- package/model-function/embed/embed.d.ts +3 -3
- package/model-function/embed/embed.js +16 -11
- package/model-function/executeCall.cjs +26 -30
- package/model-function/executeCall.d.ts +19 -28
- package/model-function/executeCall.js +26 -30
- package/model-function/generate-image/ImageGenerationEvent.d.ts +1 -1
- package/model-function/generate-image/ImageGenerationModel.d.ts +6 -4
- package/model-function/generate-image/generateImage.cjs +7 -2
- package/model-function/generate-image/generateImage.d.ts +2 -2
- package/model-function/generate-image/generateImage.js +7 -2
- package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +6 -5
- package/model-function/generate-structure/StructureFromTextGenerationModel.d.ts +7 -5
- package/model-function/generate-structure/StructureFromTextGenerationModel.js +6 -5
- package/model-function/generate-structure/StructureGenerationEvent.d.ts +1 -1
- package/model-function/generate-structure/StructureGenerationModel.d.ts +15 -18
- package/model-function/generate-structure/StructureOrTextGenerationModel.d.ts +19 -17
- package/model-function/generate-structure/generateStructure.cjs +10 -8
- package/model-function/generate-structure/generateStructure.d.ts +2 -2
- package/model-function/generate-structure/generateStructure.js +10 -8
- package/model-function/generate-structure/generateStructureOrText.cjs +15 -8
- package/model-function/generate-structure/generateStructureOrText.d.ts +4 -4
- package/model-function/generate-structure/generateStructureOrText.js +15 -8
- package/model-function/generate-structure/streamStructure.cjs +4 -16
- package/model-function/generate-structure/streamStructure.d.ts +3 -7
- package/model-function/generate-structure/streamStructure.js +4 -16
- package/model-function/generate-text/TextGenerationEvent.d.ts +1 -1
- package/model-function/generate-text/TextGenerationModel.d.ts +18 -19
- package/model-function/generate-text/generateText.cjs +8 -9
- package/model-function/generate-text/generateText.d.ts +2 -2
- package/model-function/generate-text/generateText.js +8 -9
- package/model-function/generate-text/streamText.cjs +8 -21
- package/model-function/generate-text/streamText.d.ts +3 -7
- package/model-function/generate-text/streamText.js +8 -21
- package/model-function/index.cjs +2 -2
- package/model-function/index.d.ts +2 -2
- package/model-function/index.js +2 -2
- package/model-function/synthesize-speech/SpeechSynthesisEvent.d.ts +1 -1
- package/model-function/synthesize-speech/SpeechSynthesisModel.d.ts +3 -3
- package/model-function/synthesize-speech/synthesizeSpeech.cjs +7 -2
- package/model-function/synthesize-speech/synthesizeSpeech.d.ts +2 -2
- package/model-function/synthesize-speech/synthesizeSpeech.js +7 -2
- package/model-function/transcribe-speech/TranscriptionEvent.d.ts +1 -1
- package/model-function/transcribe-speech/TranscriptionModel.d.ts +6 -4
- package/model-function/transcribe-speech/transcribe.cjs +7 -2
- package/model-function/transcribe-speech/transcribe.d.ts +2 -2
- package/model-function/transcribe-speech/transcribe.js +7 -2
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +14 -18
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +11 -9
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +14 -18
- package/model-provider/cohere/CohereTextEmbeddingModel.cjs +13 -16
- package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +12 -10
- package/model-provider/cohere/CohereTextEmbeddingModel.js +13 -16
- package/model-provider/cohere/CohereTextGenerationModel.cjs +25 -28
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +24 -22
- package/model-provider/cohere/CohereTextGenerationModel.js +25 -28
- package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.cjs +10 -17
- package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.d.ts +2 -2
- package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.js +10 -17
- package/model-provider/huggingface/HuggingFaceImageDescriptionModel.cjs +13 -16
- package/model-provider/huggingface/HuggingFaceImageDescriptionModel.d.ts +9 -7
- package/model-provider/huggingface/HuggingFaceImageDescriptionModel.js +13 -16
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +19 -25
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +8 -6
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +19 -25
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +18 -24
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +10 -8
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +18 -24
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +13 -16
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +8 -6
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +13 -16
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +27 -33
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +62 -60
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +27 -33
- package/model-provider/lmnt/LmntSpeechSynthesisModel.cjs +7 -12
- package/model-provider/lmnt/LmntSpeechSynthesisModel.d.ts +2 -2
- package/model-provider/lmnt/LmntSpeechSynthesisModel.js +7 -12
- package/model-provider/openai/OpenAIImageGenerationModel.cjs +8 -16
- package/model-provider/openai/OpenAIImageGenerationModel.d.ts +11 -11
- package/model-provider/openai/OpenAIImageGenerationModel.js +8 -16
- package/model-provider/openai/OpenAITextEmbeddingModel.cjs +18 -24
- package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +18 -16
- package/model-provider/openai/OpenAITextEmbeddingModel.js +18 -24
- package/model-provider/openai/OpenAITextGenerationModel.cjs +19 -26
- package/model-provider/openai/OpenAITextGenerationModel.d.ts +31 -33
- package/model-provider/openai/OpenAITextGenerationModel.js +19 -26
- package/model-provider/openai/OpenAITranscriptionModel.cjs +19 -28
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +27 -7
- package/model-provider/openai/OpenAITranscriptionModel.js +19 -28
- package/model-provider/openai/chat/OpenAIChatModel.cjs +76 -85
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +127 -50
- package/model-provider/openai/chat/OpenAIChatModel.js +77 -86
- package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +4 -3
- package/model-provider/openai/chat/OpenAIChatStreamIterable.d.ts +2 -2
- package/model-provider/openai/chat/OpenAIChatStreamIterable.js +2 -1
- package/model-provider/stability/StabilityImageGenerationModel.cjs +16 -21
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +13 -11
- package/model-provider/stability/StabilityImageGenerationModel.js +16 -21
- package/package.json +1 -1
- package/prompt/PromptFormatTextGenerationModel.cjs +2 -18
- package/prompt/PromptFormatTextGenerationModel.d.ts +14 -10
- package/prompt/PromptFormatTextGenerationModel.js +2 -18
- package/prompt/PromptFormatTextStreamingModel.cjs +31 -0
- package/prompt/PromptFormatTextStreamingModel.d.ts +13 -0
- package/prompt/PromptFormatTextStreamingModel.js +27 -0
- package/prompt/chat/trimChatPrompt.d.ts +2 -2
- package/prompt/index.cjs +1 -0
- package/prompt/index.d.ts +1 -0
- package/prompt/index.js +1 -0
- package/retriever/Retriever.d.ts +3 -6
- package/retriever/retrieve.cjs +2 -2
- package/retriever/retrieve.d.ts +3 -3
- package/retriever/retrieve.js +2 -2
- package/tool/executeTool.cjs +2 -2
- package/tool/executeTool.js +2 -2
- package/tool/useTool.cjs +2 -4
- package/tool/useTool.d.ts +2 -2
- package/tool/useTool.js +2 -4
- package/tool/useToolOrGenerateText.d.ts +2 -2
- package/util/SafeResult.d.ts +1 -1
- package/util/runSafe.cjs +1 -1
- package/util/runSafe.js +1 -1
- package/vector-index/VectorIndexRetriever.cjs +0 -7
- package/vector-index/VectorIndexRetriever.d.ts +5 -5
- package/vector-index/VectorIndexRetriever.js +0 -7
- package/vector-index/upsertIntoVectorIndex.d.ts +4 -4
- package/model-function/DeltaEvent.d.ts +0 -7
- package/model-function/ModelFunctionOptions.d.ts +0 -4
- /package/model-function/{DeltaEvent.cjs → Delta.cjs} +0 -0
- /package/model-function/{DeltaEvent.js → Delta.js} +0 -0
- /package/model-function/{ModelFunctionOptions.cjs → ModelCallMetadata.cjs} +0 -0
- /package/model-function/{ModelFunctionOptions.js → ModelCallMetadata.js} +0 -0
@@ -5,7 +5,7 @@ import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postTo
|
|
5
5
|
import { AsyncQueue } from "../../event-source/AsyncQueue.js";
|
6
6
|
import { parseEventSourceStream } from "../../event-source/parseEventSourceStream.js";
|
7
7
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
8
|
-
import {
|
8
|
+
import { PromptFormatTextStreamingModel } from "../../prompt/PromptFormatTextStreamingModel.js";
|
9
9
|
import { LlamaCppApiConfiguration } from "./LlamaCppApiConfiguration.js";
|
10
10
|
import { failedLlamaCppCallResponseHandler } from "./LlamaCppError.js";
|
11
11
|
import { LlamaCppTokenizer } from "./LlamaCppTokenizer.js";
|
@@ -33,25 +33,19 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
|
|
33
33
|
return this.settings.contextWindowSize;
|
34
34
|
}
|
35
35
|
async callAPI(prompt, options) {
|
36
|
-
const { run, settings, responseFormat } = options;
|
37
|
-
const combinedSettings = {
|
38
|
-
...this.settings,
|
39
|
-
...settings,
|
40
|
-
};
|
41
|
-
const callSettings = {
|
42
|
-
...combinedSettings,
|
43
|
-
// mapping
|
44
|
-
nPredict: combinedSettings.maxCompletionTokens,
|
45
|
-
stop: combinedSettings.stopSequences,
|
46
|
-
// other
|
47
|
-
abortSignal: run?.abortSignal,
|
48
|
-
prompt,
|
49
|
-
responseFormat,
|
50
|
-
};
|
51
36
|
return callWithRetryAndThrottle({
|
52
|
-
retry:
|
53
|
-
throttle:
|
54
|
-
call: async () => callLlamaCppTextGenerationAPI(
|
37
|
+
retry: this.settings.api?.retry,
|
38
|
+
throttle: this.settings.api?.throttle,
|
39
|
+
call: async () => callLlamaCppTextGenerationAPI({
|
40
|
+
...this.settings,
|
41
|
+
// mapping
|
42
|
+
nPredict: this.settings.maxCompletionTokens,
|
43
|
+
stop: this.settings.stopSequences,
|
44
|
+
// other
|
45
|
+
abortSignal: options.run?.abortSignal,
|
46
|
+
prompt,
|
47
|
+
responseFormat: options.responseFormat,
|
48
|
+
}),
|
55
49
|
});
|
56
50
|
}
|
57
51
|
get settingsForEvent() {
|
@@ -81,16 +75,22 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
|
|
81
75
|
const tokens = await this.tokenizer.tokenize(prompt);
|
82
76
|
return tokens.length;
|
83
77
|
}
|
84
|
-
|
85
|
-
|
78
|
+
async doGenerateText(prompt, options) {
|
79
|
+
const response = await this.callAPI(prompt, {
|
86
80
|
...options,
|
87
81
|
responseFormat: LlamaCppTextGenerationResponseFormat.json,
|
88
82
|
});
|
83
|
+
return {
|
84
|
+
response,
|
85
|
+
text: response.content,
|
86
|
+
usage: {
|
87
|
+
promptTokens: response.tokens_evaluated,
|
88
|
+
completionTokens: response.tokens_predicted,
|
89
|
+
totalTokens: response.tokens_evaluated + response.tokens_predicted,
|
90
|
+
},
|
91
|
+
};
|
89
92
|
}
|
90
|
-
|
91
|
-
return response.content;
|
92
|
-
}
|
93
|
-
generateDeltaStreamResponse(prompt, options) {
|
93
|
+
doStreamText(prompt, options) {
|
94
94
|
return this.callAPI(prompt, {
|
95
95
|
...options,
|
96
96
|
responseFormat: LlamaCppTextGenerationResponseFormat.deltaIterable,
|
@@ -100,7 +100,7 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
|
|
100
100
|
return fullDelta.delta;
|
101
101
|
}
|
102
102
|
withPromptFormat(promptFormat) {
|
103
|
-
return new
|
103
|
+
return new PromptFormatTextStreamingModel({
|
104
104
|
model: this.withSettings({
|
105
105
|
stopSequences: [
|
106
106
|
...(this.settings.stopSequences ?? []),
|
@@ -110,13 +110,6 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
|
|
110
110
|
promptFormat,
|
111
111
|
});
|
112
112
|
}
|
113
|
-
extractUsage(response) {
|
114
|
-
return {
|
115
|
-
promptTokens: response.tokens_evaluated,
|
116
|
-
completionTokens: response.tokens_predicted,
|
117
|
-
totalTokens: response.tokens_evaluated + response.tokens_predicted,
|
118
|
-
};
|
119
|
-
}
|
120
113
|
withSettings(additionalSettings) {
|
121
114
|
return new LlamaCppTextGenerationModel(Object.assign({}, this.settings, additionalSettings));
|
122
115
|
}
|
@@ -235,6 +228,7 @@ async function createLlamaCppFullDeltaIterableQueue(stream) {
|
|
235
228
|
isComplete: eventData.stop,
|
236
229
|
delta: eventData.content,
|
237
230
|
},
|
231
|
+
valueDelta: eventData.content,
|
238
232
|
});
|
239
233
|
if (eventData.stop) {
|
240
234
|
queue.close();
|
@@ -25,19 +25,14 @@ class LmntSpeechSynthesisModel extends AbstractModel_js_1.AbstractModel {
|
|
25
25
|
return this.settings.voice;
|
26
26
|
}
|
27
27
|
async callAPI(text, options) {
|
28
|
-
const run = options?.run;
|
29
|
-
const settings = options?.settings;
|
30
|
-
const callSettings = {
|
31
|
-
// copied settings:
|
32
|
-
...this.settings,
|
33
|
-
...settings,
|
34
|
-
abortSignal: run?.abortSignal,
|
35
|
-
text,
|
36
|
-
};
|
37
28
|
return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
|
38
|
-
retry:
|
39
|
-
throttle:
|
40
|
-
call: async () => callLmntTextToSpeechAPI(
|
29
|
+
retry: this.settings.api?.retry,
|
30
|
+
throttle: this.settings.api?.throttle,
|
31
|
+
call: async () => callLmntTextToSpeechAPI({
|
32
|
+
...this.settings,
|
33
|
+
abortSignal: options?.run?.abortSignal,
|
34
|
+
text,
|
35
|
+
}),
|
41
36
|
});
|
42
37
|
}
|
43
38
|
get settingsForEvent() {
|
@@ -1,7 +1,7 @@
|
|
1
1
|
/// <reference types="node" />
|
2
2
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
3
3
|
import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
4
|
-
import {
|
4
|
+
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
5
5
|
import { SpeechSynthesisModel, SpeechSynthesisModelSettings } from "../../model-function/synthesize-speech/SpeechSynthesisModel.js";
|
6
6
|
export interface LmntSpeechSynthesisModelSettings extends SpeechSynthesisModelSettings {
|
7
7
|
api?: ApiConfiguration;
|
@@ -21,6 +21,6 @@ export declare class LmntSpeechSynthesisModel extends AbstractModel<LmntSpeechSy
|
|
21
21
|
get modelName(): string;
|
22
22
|
private callAPI;
|
23
23
|
get settingsForEvent(): Partial<LmntSpeechSynthesisModelSettings>;
|
24
|
-
generateSpeechResponse(text: string, options?:
|
24
|
+
generateSpeechResponse(text: string, options?: FunctionOptions): Promise<Buffer>;
|
25
25
|
withSettings(additionalSettings: Partial<LmntSpeechSynthesisModelSettings>): this;
|
26
26
|
}
|
@@ -22,19 +22,14 @@ export class LmntSpeechSynthesisModel extends AbstractModel {
|
|
22
22
|
return this.settings.voice;
|
23
23
|
}
|
24
24
|
async callAPI(text, options) {
|
25
|
-
const run = options?.run;
|
26
|
-
const settings = options?.settings;
|
27
|
-
const callSettings = {
|
28
|
-
// copied settings:
|
29
|
-
...this.settings,
|
30
|
-
...settings,
|
31
|
-
abortSignal: run?.abortSignal,
|
32
|
-
text,
|
33
|
-
};
|
34
25
|
return callWithRetryAndThrottle({
|
35
|
-
retry:
|
36
|
-
throttle:
|
37
|
-
call: async () => callLmntTextToSpeechAPI(
|
26
|
+
retry: this.settings.api?.retry,
|
27
|
+
throttle: this.settings.api?.throttle,
|
28
|
+
call: async () => callLmntTextToSpeechAPI({
|
29
|
+
...this.settings,
|
30
|
+
abortSignal: options?.run?.abortSignal,
|
31
|
+
text,
|
32
|
+
}),
|
38
33
|
});
|
39
34
|
}
|
40
35
|
get settingsForEvent() {
|
@@ -46,17 +46,10 @@ class OpenAIImageGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
46
46
|
}
|
47
47
|
async callAPI(prompt, options) {
|
48
48
|
const run = options?.run;
|
49
|
-
const settings = options?.settings;
|
50
49
|
const responseFormat = options?.responseFormat;
|
51
|
-
const combinedSettings = {
|
52
|
-
...this.settings,
|
53
|
-
...settings,
|
54
|
-
};
|
55
50
|
const callSettings = {
|
51
|
+
...this.settings,
|
56
52
|
user: this.settings.isUserIdForwardingEnabled ? run?.userId : undefined,
|
57
|
-
// Copied settings:
|
58
|
-
...combinedSettings,
|
59
|
-
// other settings:
|
60
53
|
abortSignal: run?.abortSignal,
|
61
54
|
responseFormat,
|
62
55
|
prompt,
|
@@ -74,16 +67,15 @@ class OpenAIImageGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
74
67
|
];
|
75
68
|
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
76
69
|
}
|
77
|
-
|
78
|
-
|
70
|
+
async doGenerateImage(prompt, options) {
|
71
|
+
const response = await this.callAPI(prompt, {
|
79
72
|
responseFormat: exports.OpenAIImageGenerationResponseFormat.base64Json,
|
80
|
-
|
81
|
-
settings: options?.settings,
|
82
|
-
run: options?.run,
|
73
|
+
...options,
|
83
74
|
});
|
84
|
-
|
85
|
-
|
86
|
-
|
75
|
+
return {
|
76
|
+
response,
|
77
|
+
base64Image: response.data[0].b64_json,
|
78
|
+
};
|
87
79
|
}
|
88
80
|
withSettings(additionalSettings) {
|
89
81
|
return new OpenAIImageGenerationModel(Object.assign({}, this.settings, additionalSettings));
|
@@ -1,9 +1,9 @@
|
|
1
1
|
import { z } from "zod";
|
2
2
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
3
|
-
import { ModelFunctionOptions } from "../../model-function/ModelFunctionOptions.js";
|
4
3
|
import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
5
4
|
import { ImageGenerationModel, ImageGenerationModelSettings } from "../../model-function/generate-image/ImageGenerationModel.js";
|
6
5
|
import { ResponseHandler } from "../../core/api/postToApi.js";
|
6
|
+
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
7
7
|
export interface OpenAIImageGenerationCallSettings {
|
8
8
|
n?: number;
|
9
9
|
size?: "256x256" | "512x512" | "1024x1024";
|
@@ -26,23 +26,23 @@ export interface OpenAIImageGenerationSettings extends ImageGenerationModelSetti
|
|
26
26
|
* "the wicked witch of the west in the style of early 19th century painting"
|
27
27
|
* );
|
28
28
|
*/
|
29
|
-
export declare class OpenAIImageGenerationModel extends AbstractModel<OpenAIImageGenerationSettings> implements ImageGenerationModel<string,
|
29
|
+
export declare class OpenAIImageGenerationModel extends AbstractModel<OpenAIImageGenerationSettings> implements ImageGenerationModel<string, OpenAIImageGenerationSettings> {
|
30
30
|
constructor(settings: OpenAIImageGenerationSettings);
|
31
31
|
readonly provider: "openai";
|
32
32
|
readonly modelName: null;
|
33
33
|
callAPI<RESULT>(prompt: string, options: {
|
34
34
|
responseFormat: OpenAIImageGenerationResponseFormatType<RESULT>;
|
35
|
-
} &
|
36
|
-
user?: string;
|
37
|
-
}>>): Promise<RESULT>;
|
35
|
+
} & FunctionOptions): Promise<RESULT>;
|
38
36
|
get settingsForEvent(): Partial<OpenAIImageGenerationSettings>;
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
37
|
+
doGenerateImage(prompt: string, options?: FunctionOptions): Promise<{
|
38
|
+
response: {
|
39
|
+
data: {
|
40
|
+
b64_json: string;
|
41
|
+
}[];
|
42
|
+
created: number;
|
43
|
+
};
|
44
|
+
base64Image: string;
|
44
45
|
}>;
|
45
|
-
extractBase64Image(response: OpenAIImageGenerationBase64JsonResponse): string;
|
46
46
|
withSettings(additionalSettings: Partial<OpenAIImageGenerationSettings>): this;
|
47
47
|
}
|
48
48
|
export type OpenAIImageGenerationResponseFormatType<T> = {
|
@@ -42,17 +42,10 @@ export class OpenAIImageGenerationModel extends AbstractModel {
|
|
42
42
|
}
|
43
43
|
async callAPI(prompt, options) {
|
44
44
|
const run = options?.run;
|
45
|
-
const settings = options?.settings;
|
46
45
|
const responseFormat = options?.responseFormat;
|
47
|
-
const combinedSettings = {
|
48
|
-
...this.settings,
|
49
|
-
...settings,
|
50
|
-
};
|
51
46
|
const callSettings = {
|
47
|
+
...this.settings,
|
52
48
|
user: this.settings.isUserIdForwardingEnabled ? run?.userId : undefined,
|
53
|
-
// Copied settings:
|
54
|
-
...combinedSettings,
|
55
|
-
// other settings:
|
56
49
|
abortSignal: run?.abortSignal,
|
57
50
|
responseFormat,
|
58
51
|
prompt,
|
@@ -70,16 +63,15 @@ export class OpenAIImageGenerationModel extends AbstractModel {
|
|
70
63
|
];
|
71
64
|
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
72
65
|
}
|
73
|
-
|
74
|
-
|
66
|
+
async doGenerateImage(prompt, options) {
|
67
|
+
const response = await this.callAPI(prompt, {
|
75
68
|
responseFormat: OpenAIImageGenerationResponseFormat.base64Json,
|
76
|
-
|
77
|
-
settings: options?.settings,
|
78
|
-
run: options?.run,
|
69
|
+
...options,
|
79
70
|
});
|
80
|
-
|
81
|
-
|
82
|
-
|
71
|
+
return {
|
72
|
+
response,
|
73
|
+
base64Image: response.data[0].b64_json,
|
74
|
+
};
|
83
75
|
}
|
84
76
|
withSettings(additionalSettings) {
|
85
77
|
return new OpenAIImageGenerationModel(Object.assign({}, this.settings, additionalSettings));
|
@@ -5,10 +5,10 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
5
5
|
Object.defineProperty(exports, "__esModule", { value: true });
|
6
6
|
exports.OpenAITextEmbeddingModel = exports.calculateOpenAIEmbeddingCostInMillicents = exports.isOpenAIEmbeddingModel = exports.OPENAI_TEXT_EMBEDDING_MODELS = void 0;
|
7
7
|
const zod_1 = __importDefault(require("zod"));
|
8
|
-
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
9
|
-
const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
|
10
8
|
const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
|
11
9
|
const postToApi_js_1 = require("../../core/api/postToApi.cjs");
|
10
|
+
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
11
|
+
const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
|
12
12
|
const OpenAIApiConfiguration_js_1 = require("./OpenAIApiConfiguration.cjs");
|
13
13
|
const OpenAIError_js_1 = require("./OpenAIError.cjs");
|
14
14
|
const TikTokenTokenizer_js_1 = require("./TikTokenTokenizer.cjs");
|
@@ -91,37 +91,31 @@ class OpenAITextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
|
|
91
91
|
return (0, countTokens_js_1.countTokens)(this.tokenizer, input);
|
92
92
|
}
|
93
93
|
async callAPI(texts, options) {
|
94
|
-
const run = options?.run;
|
95
|
-
const settings = options?.settings;
|
96
|
-
const combinedSettings = {
|
97
|
-
...this.settings,
|
98
|
-
...settings,
|
99
|
-
};
|
100
|
-
const callSettings = {
|
101
|
-
user: this.settings.isUserIdForwardingEnabled ? run?.userId : undefined,
|
102
|
-
// Copied settings:
|
103
|
-
...combinedSettings,
|
104
|
-
// other settings:
|
105
|
-
abortSignal: run?.abortSignal,
|
106
|
-
input: texts,
|
107
|
-
};
|
108
94
|
return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
|
109
|
-
retry:
|
110
|
-
throttle:
|
111
|
-
call: async () => callOpenAITextEmbeddingAPI(
|
95
|
+
retry: this.settings.api?.retry,
|
96
|
+
throttle: this.settings.api?.throttle,
|
97
|
+
call: async () => callOpenAITextEmbeddingAPI({
|
98
|
+
...this.settings,
|
99
|
+
user: this.settings.isUserIdForwardingEnabled
|
100
|
+
? options?.run?.userId
|
101
|
+
: undefined,
|
102
|
+
abortSignal: options?.run?.abortSignal,
|
103
|
+
input: texts,
|
104
|
+
}),
|
112
105
|
});
|
113
106
|
}
|
114
107
|
get settingsForEvent() {
|
115
108
|
return {};
|
116
109
|
}
|
117
|
-
|
110
|
+
async doEmbedValues(texts, options) {
|
118
111
|
if (texts.length > this.maxValuesPerCall) {
|
119
112
|
throw new Error(`The OpenAI embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
|
120
113
|
}
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
114
|
+
const response = await this.callAPI(texts, options);
|
115
|
+
return {
|
116
|
+
response,
|
117
|
+
embeddings: response.data.map((data) => data.embedding),
|
118
|
+
};
|
125
119
|
}
|
126
120
|
withSettings(additionalSettings) {
|
127
121
|
return new OpenAITextEmbeddingModel(Object.assign({}, this.settings, additionalSettings));
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import z from "zod";
|
2
|
-
import {
|
3
|
-
import { ModelFunctionOptions } from "../../model-function/ModelFunctionOptions.js";
|
2
|
+
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
4
3
|
import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
4
|
+
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
5
5
|
import { EmbeddingModel, EmbeddingModelSettings } from "../../model-function/embed/EmbeddingModel.js";
|
6
6
|
import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
|
7
7
|
export declare const OPENAI_TEXT_EMBEDDING_MODELS: {
|
@@ -36,7 +36,7 @@ export interface OpenAITextEmbeddingModelSettings extends EmbeddingModelSettings
|
|
36
36
|
* ]
|
37
37
|
* );
|
38
38
|
*/
|
39
|
-
export declare class OpenAITextEmbeddingModel extends AbstractModel<OpenAITextEmbeddingModelSettings> implements EmbeddingModel<string,
|
39
|
+
export declare class OpenAITextEmbeddingModel extends AbstractModel<OpenAITextEmbeddingModelSettings> implements EmbeddingModel<string, OpenAITextEmbeddingModelSettings> {
|
40
40
|
constructor(settings: OpenAITextEmbeddingModelSettings);
|
41
41
|
readonly provider: "openai";
|
42
42
|
get modelName(): "text-embedding-ada-002";
|
@@ -45,22 +45,24 @@ export declare class OpenAITextEmbeddingModel extends AbstractModel<OpenAITextEm
|
|
45
45
|
readonly tokenizer: TikTokenTokenizer;
|
46
46
|
readonly contextWindowSize: number;
|
47
47
|
countTokens(input: string): Promise<number>;
|
48
|
-
callAPI(texts: Array<string>, options?:
|
48
|
+
callAPI(texts: Array<string>, options?: FunctionOptions): Promise<OpenAITextEmbeddingResponse>;
|
49
49
|
get settingsForEvent(): Partial<OpenAITextEmbeddingModelSettings>;
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
50
|
+
doEmbedValues(texts: string[], options?: FunctionOptions): Promise<{
|
51
|
+
response: {
|
52
|
+
object: "list";
|
53
|
+
model: string;
|
54
|
+
usage: {
|
55
|
+
prompt_tokens: number;
|
56
|
+
total_tokens: number;
|
57
|
+
};
|
58
|
+
data: {
|
59
|
+
object: "embedding";
|
60
|
+
embedding: number[];
|
61
|
+
index: number;
|
62
|
+
}[];
|
56
63
|
};
|
57
|
-
|
58
|
-
object: "embedding";
|
59
|
-
embedding: number[];
|
60
|
-
index: number;
|
61
|
-
}[];
|
64
|
+
embeddings: number[][];
|
62
65
|
}>;
|
63
|
-
extractEmbeddings(response: OpenAITextEmbeddingResponse): number[][];
|
64
66
|
withSettings(additionalSettings: OpenAITextEmbeddingModelSettings): this;
|
65
67
|
}
|
66
68
|
declare const openAITextEmbeddingResponseSchema: z.ZodObject<{
|
@@ -1,8 +1,8 @@
|
|
1
1
|
import z from "zod";
|
2
|
-
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
3
|
-
import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
|
4
2
|
import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
|
5
3
|
import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
|
4
|
+
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
5
|
+
import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
|
6
6
|
import { OpenAIApiConfiguration } from "./OpenAIApiConfiguration.js";
|
7
7
|
import { failedOpenAICallResponseHandler } from "./OpenAIError.js";
|
8
8
|
import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
|
@@ -83,37 +83,31 @@ export class OpenAITextEmbeddingModel extends AbstractModel {
|
|
83
83
|
return countTokens(this.tokenizer, input);
|
84
84
|
}
|
85
85
|
async callAPI(texts, options) {
|
86
|
-
const run = options?.run;
|
87
|
-
const settings = options?.settings;
|
88
|
-
const combinedSettings = {
|
89
|
-
...this.settings,
|
90
|
-
...settings,
|
91
|
-
};
|
92
|
-
const callSettings = {
|
93
|
-
user: this.settings.isUserIdForwardingEnabled ? run?.userId : undefined,
|
94
|
-
// Copied settings:
|
95
|
-
...combinedSettings,
|
96
|
-
// other settings:
|
97
|
-
abortSignal: run?.abortSignal,
|
98
|
-
input: texts,
|
99
|
-
};
|
100
86
|
return callWithRetryAndThrottle({
|
101
|
-
retry:
|
102
|
-
throttle:
|
103
|
-
call: async () => callOpenAITextEmbeddingAPI(
|
87
|
+
retry: this.settings.api?.retry,
|
88
|
+
throttle: this.settings.api?.throttle,
|
89
|
+
call: async () => callOpenAITextEmbeddingAPI({
|
90
|
+
...this.settings,
|
91
|
+
user: this.settings.isUserIdForwardingEnabled
|
92
|
+
? options?.run?.userId
|
93
|
+
: undefined,
|
94
|
+
abortSignal: options?.run?.abortSignal,
|
95
|
+
input: texts,
|
96
|
+
}),
|
104
97
|
});
|
105
98
|
}
|
106
99
|
get settingsForEvent() {
|
107
100
|
return {};
|
108
101
|
}
|
109
|
-
|
102
|
+
async doEmbedValues(texts, options) {
|
110
103
|
if (texts.length > this.maxValuesPerCall) {
|
111
104
|
throw new Error(`The OpenAI embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
|
112
105
|
}
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
106
|
+
const response = await this.callAPI(texts, options);
|
107
|
+
return {
|
108
|
+
response,
|
109
|
+
embeddings: response.data.map((data) => data.embedding),
|
110
|
+
};
|
117
111
|
}
|
118
112
|
withSettings(additionalSettings) {
|
119
113
|
return new OpenAITextEmbeddingModel(Object.assign({}, this.settings, additionalSettings));
|
@@ -12,7 +12,7 @@ const AsyncQueue_js_1 = require("../../event-source/AsyncQueue.cjs");
|
|
12
12
|
const parseEventSourceStream_js_1 = require("../../event-source/parseEventSourceStream.cjs");
|
13
13
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
14
14
|
const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
|
15
|
-
const
|
15
|
+
const PromptFormatTextStreamingModel_js_1 = require("../../prompt/PromptFormatTextStreamingModel.cjs");
|
16
16
|
const OpenAIApiConfiguration_js_1 = require("./OpenAIApiConfiguration.cjs");
|
17
17
|
const OpenAIError_js_1 = require("./OpenAIError.cjs");
|
18
18
|
const TikTokenTokenizer_js_1 = require("./TikTokenTokenizer.cjs");
|
@@ -181,18 +181,14 @@ class OpenAITextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
181
181
|
return (0, countTokens_js_1.countTokens)(this.tokenizer, input);
|
182
182
|
}
|
183
183
|
async callAPI(prompt, options) {
|
184
|
-
const { run,
|
185
|
-
const combinedSettings = {
|
186
|
-
...this.settings,
|
187
|
-
...settings,
|
188
|
-
};
|
184
|
+
const { run, responseFormat } = options;
|
189
185
|
const callSettings = {
|
190
186
|
user: this.settings.isUserIdForwardingEnabled ? run?.userId : undefined,
|
191
187
|
// Copied settings:
|
192
|
-
...
|
188
|
+
...this.settings,
|
193
189
|
// map to OpenAI API names:
|
194
|
-
stop:
|
195
|
-
maxTokens:
|
190
|
+
stop: this.settings.stopSequences,
|
191
|
+
maxTokens: this.settings.maxCompletionTokens,
|
196
192
|
// other settings:
|
197
193
|
abortSignal: run?.abortSignal,
|
198
194
|
prompt,
|
@@ -221,26 +217,29 @@ class OpenAITextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
221
217
|
];
|
222
218
|
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
223
219
|
}
|
224
|
-
|
225
|
-
|
220
|
+
async doGenerateText(prompt, options) {
|
221
|
+
const response = await this.callAPI(prompt, {
|
226
222
|
...options,
|
227
223
|
responseFormat: exports.OpenAITextResponseFormat.json,
|
228
224
|
});
|
225
|
+
return {
|
226
|
+
response,
|
227
|
+
text: response.choices[0].text,
|
228
|
+
usage: {
|
229
|
+
promptTokens: response.usage.prompt_tokens,
|
230
|
+
completionTokens: response.usage.completion_tokens,
|
231
|
+
totalTokens: response.usage.total_tokens,
|
232
|
+
},
|
233
|
+
};
|
229
234
|
}
|
230
|
-
|
231
|
-
return response.choices[0].text;
|
232
|
-
}
|
233
|
-
generateDeltaStreamResponse(prompt, options) {
|
235
|
+
doStreamText(prompt, options) {
|
234
236
|
return this.callAPI(prompt, {
|
235
237
|
...options,
|
236
238
|
responseFormat: exports.OpenAITextResponseFormat.deltaIterable,
|
237
239
|
});
|
238
240
|
}
|
239
|
-
extractTextDelta(fullDelta) {
|
240
|
-
return fullDelta[0].delta;
|
241
|
-
}
|
242
241
|
withPromptFormat(promptFormat) {
|
243
|
-
return new
|
242
|
+
return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
|
244
243
|
model: this.withSettings({
|
245
244
|
stopSequences: [
|
246
245
|
...(this.settings.stopSequences ?? []),
|
@@ -250,13 +249,6 @@ class OpenAITextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
250
249
|
promptFormat,
|
251
250
|
});
|
252
251
|
}
|
253
|
-
extractUsage(response) {
|
254
|
-
return {
|
255
|
-
promptTokens: response.usage.prompt_tokens,
|
256
|
-
completionTokens: response.usage.completion_tokens,
|
257
|
-
totalTokens: response.usage.total_tokens,
|
258
|
-
};
|
259
|
-
}
|
260
252
|
withSettings(additionalSettings) {
|
261
253
|
return new OpenAITextGenerationModel(Object.assign({}, this.settings, additionalSettings));
|
262
254
|
}
|
@@ -385,6 +377,7 @@ async function createOpenAITextFullDeltaIterableQueue(stream) {
|
|
385
377
|
queue.push({
|
386
378
|
type: "delta",
|
387
379
|
fullDelta: streamDeltaDeepCopy,
|
380
|
+
valueDelta: streamDeltaDeepCopy[0].delta,
|
388
381
|
});
|
389
382
|
}
|
390
383
|
}
|