modelfusion 0.21.0 → 0.23.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -0
- package/composed-function/summarize/SummarizationFunction.d.ts +1 -1
- package/composed-function/summarize/summarizeRecursively.d.ts +1 -1
- package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.d.ts +1 -1
- package/{run → core}/DefaultRun.cjs +1 -1
- package/{run → core}/DefaultRun.js +1 -1
- package/{run → core}/FunctionEvent.d.ts +30 -6
- package/core/FunctionOptions.d.ts +33 -0
- package/core/GlobalFunctionLogging.cjs +12 -0
- package/core/GlobalFunctionLogging.d.ts +3 -0
- package/core/GlobalFunctionLogging.js +7 -0
- package/core/getFunctionCallLogger.cjs +74 -0
- package/core/getFunctionCallLogger.d.ts +3 -0
- package/core/getFunctionCallLogger.js +70 -0
- package/{run → core}/index.cjs +1 -1
- package/{run → core}/index.d.ts +1 -1
- package/{run → core}/index.js +1 -1
- package/index.cjs +1 -1
- package/index.d.ts +1 -1
- package/index.js +1 -1
- package/model-function/AbstractModel.d.ts +1 -0
- package/model-function/Model.d.ts +7 -2
- package/model-function/ModelCallEvent.d.ts +39 -5
- package/model-function/ModelFunctionOptions.d.ts +1 -1
- package/model-function/SuccessfulModelCall.cjs +4 -2
- package/model-function/SuccessfulModelCall.d.ts +1 -1
- package/model-function/SuccessfulModelCall.js +4 -2
- package/model-function/embed-text/TextEmbeddingEvent.d.ts +12 -12
- package/model-function/embed-text/TextEmbeddingModel.d.ts +1 -1
- package/model-function/embed-text/embedText.cjs +6 -61
- package/model-function/embed-text/embedText.d.ts +3 -3
- package/model-function/embed-text/embedText.js +6 -61
- package/model-function/executeCall.cjs +50 -30
- package/model-function/executeCall.d.ts +16 -22
- package/model-function/executeCall.js +48 -28
- package/model-function/generate-image/ImageGenerationEvent.d.ts +9 -11
- package/model-function/generate-image/generateImage.cjs +2 -27
- package/model-function/generate-image/generateImage.d.ts +1 -1
- package/model-function/generate-image/generateImage.js +2 -27
- package/model-function/generate-json/JsonGenerationEvent.d.ts +14 -11
- package/model-function/generate-json/JsonGenerationModel.d.ts +13 -0
- package/model-function/generate-json/JsonOrTextGenerationModel.d.ts +23 -0
- package/model-function/generate-json/JsonTextGenerationModel.cjs +3 -0
- package/model-function/generate-json/JsonTextGenerationModel.d.ts +6 -5
- package/model-function/generate-json/JsonTextGenerationModel.js +3 -0
- package/model-function/generate-json/generateJson.cjs +3 -27
- package/model-function/generate-json/generateJson.d.ts +2 -2
- package/model-function/generate-json/generateJson.js +3 -27
- package/model-function/generate-json/generateJsonOrText.cjs +3 -27
- package/model-function/generate-json/generateJsonOrText.d.ts +2 -2
- package/model-function/generate-json/generateJsonOrText.js +3 -27
- package/model-function/generate-text/TextGenerationEvent.d.ts +14 -11
- package/model-function/generate-text/TextGenerationModel.d.ts +5 -0
- package/model-function/generate-text/TextStreamingEvent.d.ts +4 -17
- package/model-function/generate-text/generateText.cjs +3 -27
- package/model-function/generate-text/generateText.d.ts +1 -1
- package/model-function/generate-text/generateText.js +3 -27
- package/model-function/generate-text/streamText.cjs +31 -33
- package/model-function/generate-text/streamText.d.ts +5 -5
- package/model-function/generate-text/streamText.js +31 -33
- package/model-function/index.cjs +3 -2
- package/model-function/index.d.ts +3 -2
- package/model-function/index.js +3 -2
- package/model-function/synthesize-speech/SpeechSynthesisEvent.d.ts +12 -11
- package/model-function/synthesize-speech/SpeechSynthesisModel.d.ts +1 -1
- package/model-function/synthesize-speech/synthesizeSpeech.cjs +2 -28
- package/model-function/synthesize-speech/synthesizeSpeech.d.ts +1 -1
- package/model-function/synthesize-speech/synthesizeSpeech.js +2 -28
- package/model-function/transcribe-speech/TranscriptionEvent.d.ts +9 -11
- package/model-function/transcribe-speech/TranscriptionModel.d.ts +1 -1
- package/model-function/transcribe-speech/transcribe.cjs +2 -27
- package/model-function/transcribe-speech/transcribe.d.ts +1 -1
- package/model-function/transcribe-speech/transcribe.js +2 -27
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +9 -0
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +1 -0
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +9 -0
- package/model-provider/cohere/CohereTextEmbeddingModel.cjs +6 -0
- package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +4 -3
- package/model-provider/cohere/CohereTextEmbeddingModel.js +6 -0
- package/model-provider/cohere/CohereTextGenerationModel.cjs +20 -0
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +2 -1
- package/model-provider/cohere/CohereTextGenerationModel.js +20 -0
- package/model-provider/cohere/CohereTokenizer.d.ts +1 -1
- package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.cjs +8 -0
- package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.d.ts +1 -0
- package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.js +8 -0
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +7 -0
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +1 -0
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +7 -0
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +16 -0
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +1 -0
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +16 -0
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +6 -0
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +1 -0
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +6 -0
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +31 -0
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +6 -0
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +31 -0
- package/model-provider/llamacpp/LlamaCppTokenizer.d.ts +1 -1
- package/model-provider/openai/OpenAIImageGenerationModel.cjs +8 -0
- package/model-provider/openai/OpenAIImageGenerationModel.d.ts +1 -0
- package/model-provider/openai/OpenAIImageGenerationModel.js +8 -0
- package/model-provider/openai/OpenAITextEmbeddingModel.cjs +5 -0
- package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +12 -11
- package/model-provider/openai/OpenAITextEmbeddingModel.js +5 -0
- package/model-provider/openai/OpenAITextGenerationModel.cjs +24 -0
- package/model-provider/openai/OpenAITextGenerationModel.d.ts +14 -8
- package/model-provider/openai/OpenAITextGenerationModel.js +24 -0
- package/model-provider/openai/OpenAITranscriptionModel.cjs +7 -0
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +2 -0
- package/model-provider/openai/OpenAITranscriptionModel.js +7 -0
- package/model-provider/openai/chat/OpenAIChatModel.cjs +20 -0
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +17 -11
- package/model-provider/openai/chat/OpenAIChatModel.js +20 -0
- package/model-provider/openai/chat/OpenAIChatPrompt.d.ts +2 -2
- package/model-provider/stability/StabilityImageGenerationModel.cjs +15 -0
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +1 -0
- package/model-provider/stability/StabilityImageGenerationModel.js +15 -0
- package/package.json +1 -1
- package/prompt/PromptFormatTextGenerationModel.cjs +3 -0
- package/prompt/PromptFormatTextGenerationModel.d.ts +1 -0
- package/prompt/PromptFormatTextGenerationModel.js +3 -0
- package/text-chunk/split/SplitFunction.d.ts +1 -1
- package/tool/ExecuteToolEvent.d.ts +7 -16
- package/tool/Tool.d.ts +1 -1
- package/tool/WebSearchTool.cjs +25 -0
- package/tool/WebSearchTool.d.ts +56 -1
- package/tool/WebSearchTool.js +25 -0
- package/tool/executeTool.cjs +17 -8
- package/tool/executeTool.d.ts +1 -1
- package/tool/executeTool.js +17 -8
- package/tool/useTool.d.ts +2 -2
- package/tool/useToolOrGenerateText.d.ts +2 -2
- package/vector-index/VectorIndex.d.ts +1 -1
- package/vector-index/memory/MemoryVectorIndex.d.ts +1 -1
- package/vector-index/pinecone/PineconeVectorIndex.d.ts +1 -1
- package/model-function/generate-json/GenerateJsonModel.d.ts +0 -8
- package/model-function/generate-json/GenerateJsonOrTextModel.d.ts +0 -18
- package/run/ConsoleLogger.cjs +0 -9
- package/run/ConsoleLogger.d.ts +0 -5
- package/run/ConsoleLogger.js +0 -5
- package/run/FunctionOptions.d.ts +0 -19
- /package/{run → core}/DefaultRun.d.ts +0 -0
- /package/{run → core}/FunctionEvent.cjs +0 -0
- /package/{run → core}/FunctionEvent.js +0 -0
- /package/{run → core}/FunctionEventSource.cjs +0 -0
- /package/{run → core}/FunctionEventSource.d.ts +0 -0
- /package/{run → core}/FunctionEventSource.js +0 -0
- /package/{run → core}/FunctionObserver.cjs +0 -0
- /package/{run → core}/FunctionObserver.d.ts +0 -0
- /package/{run → core}/FunctionObserver.js +0 -0
- /package/{run → core}/FunctionOptions.cjs +0 -0
- /package/{run → core}/FunctionOptions.js +0 -0
- /package/{run → core}/GlobalFunctionObservers.cjs +0 -0
- /package/{run → core}/GlobalFunctionObservers.d.ts +0 -0
- /package/{run → core}/GlobalFunctionObservers.js +0 -0
- /package/{run → core}/Run.cjs +0 -0
- /package/{run → core}/Run.d.ts +0 -0
- /package/{run → core}/Run.js +0 -0
- /package/{run → core}/Vector.cjs +0 -0
- /package/{run → core}/Vector.d.ts +0 -0
- /package/{run → core}/Vector.js +0 -0
- /package/model-function/generate-json/{GenerateJsonModel.cjs → JsonGenerationModel.cjs} +0 -0
- /package/model-function/generate-json/{GenerateJsonModel.js → JsonGenerationModel.js} +0 -0
- /package/model-function/generate-json/{GenerateJsonOrTextModel.cjs → JsonOrTextGenerationModel.cjs} +0 -0
- /package/model-function/generate-json/{GenerateJsonOrTextModel.js → JsonOrTextGenerationModel.js} +0 -0
@@ -40,10 +40,10 @@ export interface CohereTextGenerationModelSettings extends TextGenerationModelSe
|
|
40
40
|
p?: number;
|
41
41
|
frequencyPenalty?: number;
|
42
42
|
presencePenalty?: number;
|
43
|
-
stopSequences?: string[];
|
44
43
|
returnLikelihoods?: "GENERATION" | "ALL" | "NONE";
|
45
44
|
logitBias?: Record<string, number>;
|
46
45
|
truncate?: "NONE" | "START" | "END";
|
46
|
+
cohereStopSequences?: string[];
|
47
47
|
}
|
48
48
|
/**
|
49
49
|
* Create a text generation model that calls the Cohere Co.Generate API.
|
@@ -73,6 +73,7 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
|
|
73
73
|
callAPI<RESPONSE>(prompt: string, options: {
|
74
74
|
responseFormat: CohereTextGenerationResponseFormatType<RESPONSE>;
|
75
75
|
} & ModelFunctionOptions<CohereTextGenerationModelSettings>): Promise<RESPONSE>;
|
76
|
+
get settingsForEvent(): Partial<CohereTextGenerationModelSettings>;
|
76
77
|
generateTextResponse(prompt: string, options?: ModelFunctionOptions<CohereTextGenerationModelSettings>): Promise<{
|
77
78
|
prompt: string;
|
78
79
|
id: string;
|
@@ -96,6 +96,8 @@ export class CohereTextGenerationModel extends AbstractModel {
|
|
96
96
|
// to exclude stop tokens from the generated text
|
97
97
|
endSequences: combinedSettings.stopSequences,
|
98
98
|
maxTokens: combinedSettings.maxCompletionTokens,
|
99
|
+
// mapped name because of conflict with stopSequences:
|
100
|
+
stopSequences: combinedSettings.cohereStopSequences,
|
99
101
|
abortSignal: run?.abortSignal,
|
100
102
|
prompt,
|
101
103
|
responseFormat,
|
@@ -106,6 +108,24 @@ export class CohereTextGenerationModel extends AbstractModel {
|
|
106
108
|
call: async () => callCohereTextGenerationAPI(callSettings),
|
107
109
|
});
|
108
110
|
}
|
111
|
+
get settingsForEvent() {
|
112
|
+
const eventSettingProperties = [
|
113
|
+
"maxCompletionTokens",
|
114
|
+
"stopSequences",
|
115
|
+
"baseUrl",
|
116
|
+
"numGenerations",
|
117
|
+
"temperature",
|
118
|
+
"k",
|
119
|
+
"p",
|
120
|
+
"frequencyPenalty",
|
121
|
+
"presencePenalty",
|
122
|
+
"returnLikelihoods",
|
123
|
+
"logitBias",
|
124
|
+
"truncate",
|
125
|
+
"cohereStopSequences",
|
126
|
+
];
|
127
|
+
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
128
|
+
}
|
109
129
|
generateTextResponse(prompt, options) {
|
110
130
|
return this.callAPI(prompt, {
|
111
131
|
...options,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import z from "zod";
|
2
2
|
import { FullTokenizer } from "../../model-function/tokenize-text/Tokenizer.js";
|
3
|
-
import { Run } from "../../
|
3
|
+
import { Run } from "../../core/Run.js";
|
4
4
|
import { RetryFunction } from "../../util/api/RetryFunction.js";
|
5
5
|
import { ThrottleFunction } from "../../util/api/ThrottleFunction.js";
|
6
6
|
import { CohereTextGenerationModelType } from "./CohereTextGenerationModel.js";
|
@@ -49,6 +49,14 @@ class ElevenLabsSpeechSynthesisModel extends AbstractModel_js_1.AbstractModel {
|
|
49
49
|
}),
|
50
50
|
});
|
51
51
|
}
|
52
|
+
get settingsForEvent() {
|
53
|
+
return {
|
54
|
+
baseUrl: this.settings.baseUrl,
|
55
|
+
model: this.settings.model,
|
56
|
+
voice: this.settings.voice,
|
57
|
+
voiceSettings: this.settings.voiceSettings,
|
58
|
+
};
|
59
|
+
}
|
52
60
|
generateSpeechResponse(text, options) {
|
53
61
|
return this.callAPI(text, options);
|
54
62
|
}
|
@@ -24,6 +24,7 @@ export declare class ElevenLabsSpeechSynthesisModel extends AbstractModel<Eleven
|
|
24
24
|
readonly modelName: null;
|
25
25
|
private get apiKey();
|
26
26
|
private callAPI;
|
27
|
+
get settingsForEvent(): Partial<ElevenLabsSpeechSynthesisModelSettings>;
|
27
28
|
generateSpeechResponse(text: string, options?: ModelFunctionOptions<ElevenLabsSpeechSynthesisModelSettings> | undefined): Promise<Buffer>;
|
28
29
|
withSettings(additionalSettings: Partial<ElevenLabsSpeechSynthesisModelSettings>): this;
|
29
30
|
}
|
@@ -46,6 +46,14 @@ export class ElevenLabsSpeechSynthesisModel extends AbstractModel {
|
|
46
46
|
}),
|
47
47
|
});
|
48
48
|
}
|
49
|
+
get settingsForEvent() {
|
50
|
+
return {
|
51
|
+
baseUrl: this.settings.baseUrl,
|
52
|
+
model: this.settings.model,
|
53
|
+
voice: this.settings.voice,
|
54
|
+
voiceSettings: this.settings.voiceSettings,
|
55
|
+
};
|
56
|
+
}
|
49
57
|
generateSpeechResponse(text, options) {
|
50
58
|
return this.callAPI(text, options);
|
51
59
|
}
|
@@ -104,6 +104,13 @@ class HuggingFaceTextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
|
|
104
104
|
call: async () => callHuggingFaceTextGenerationAPI(callSettings),
|
105
105
|
});
|
106
106
|
}
|
107
|
+
get settingsForEvent() {
|
108
|
+
return {
|
109
|
+
baseUrl: this.settings.baseUrl,
|
110
|
+
embeddingDimensions: this.settings.embeddingDimensions,
|
111
|
+
options: this.settings.options,
|
112
|
+
};
|
113
|
+
}
|
107
114
|
generateEmbeddingResponse(texts, options) {
|
108
115
|
return this.callAPI(texts, options);
|
109
116
|
}
|
@@ -47,6 +47,7 @@ export declare class HuggingFaceTextEmbeddingModel extends AbstractModel<Hugging
|
|
47
47
|
readonly tokenizer: undefined;
|
48
48
|
private get apiKey();
|
49
49
|
callAPI(texts: Array<string>, options?: ModelFunctionOptions<HuggingFaceTextEmbeddingModelSettings>): Promise<HuggingFaceTextEmbeddingResponse>;
|
50
|
+
get settingsForEvent(): Partial<HuggingFaceTextEmbeddingModelSettings>;
|
50
51
|
readonly countPromptTokens: undefined;
|
51
52
|
generateEmbeddingResponse(texts: string[], options?: ModelFunctionOptions<HuggingFaceTextEmbeddingModelSettings>): Promise<number[][]>;
|
52
53
|
extractEmbeddings(response: HuggingFaceTextEmbeddingResponse): number[][];
|
@@ -98,6 +98,13 @@ export class HuggingFaceTextEmbeddingModel extends AbstractModel {
|
|
98
98
|
call: async () => callHuggingFaceTextGenerationAPI(callSettings),
|
99
99
|
});
|
100
100
|
}
|
101
|
+
get settingsForEvent() {
|
102
|
+
return {
|
103
|
+
baseUrl: this.settings.baseUrl,
|
104
|
+
embeddingDimensions: this.settings.embeddingDimensions,
|
105
|
+
options: this.settings.options,
|
106
|
+
};
|
107
|
+
}
|
101
108
|
generateEmbeddingResponse(texts, options) {
|
102
109
|
return this.callAPI(texts, options);
|
103
110
|
}
|
@@ -102,6 +102,22 @@ class HuggingFaceTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
102
102
|
call: async () => callHuggingFaceTextGenerationAPI(callSettings),
|
103
103
|
});
|
104
104
|
}
|
105
|
+
get settingsForEvent() {
|
106
|
+
const eventSettingProperties = [
|
107
|
+
"stopSequences",
|
108
|
+
"maxCompletionTokens",
|
109
|
+
"baseUrl",
|
110
|
+
"topK",
|
111
|
+
"topP",
|
112
|
+
"temperature",
|
113
|
+
"repetitionPenalty",
|
114
|
+
"maxTime",
|
115
|
+
"numReturnSequences",
|
116
|
+
"doSample",
|
117
|
+
"options",
|
118
|
+
];
|
119
|
+
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
120
|
+
}
|
105
121
|
generateTextResponse(prompt, options) {
|
106
122
|
return this.callAPI(prompt, options);
|
107
123
|
}
|
@@ -50,6 +50,7 @@ export declare class HuggingFaceTextGenerationModel extends AbstractModel<Huggin
|
|
50
50
|
readonly tokenizer: undefined;
|
51
51
|
private get apiKey();
|
52
52
|
callAPI(prompt: string, options?: ModelFunctionOptions<HuggingFaceTextGenerationModelSettings>): Promise<HuggingFaceTextGenerationResponse>;
|
53
|
+
get settingsForEvent(): Partial<HuggingFaceTextGenerationModelSettings>;
|
53
54
|
readonly countPromptTokens: undefined;
|
54
55
|
generateTextResponse(prompt: string, options?: ModelFunctionOptions<HuggingFaceTextGenerationModelSettings>): Promise<{
|
55
56
|
generated_text: string;
|
@@ -96,6 +96,22 @@ export class HuggingFaceTextGenerationModel extends AbstractModel {
|
|
96
96
|
call: async () => callHuggingFaceTextGenerationAPI(callSettings),
|
97
97
|
});
|
98
98
|
}
|
99
|
+
get settingsForEvent() {
|
100
|
+
const eventSettingProperties = [
|
101
|
+
"stopSequences",
|
102
|
+
"maxCompletionTokens",
|
103
|
+
"baseUrl",
|
104
|
+
"topK",
|
105
|
+
"topP",
|
106
|
+
"temperature",
|
107
|
+
"repetitionPenalty",
|
108
|
+
"maxTime",
|
109
|
+
"numReturnSequences",
|
110
|
+
"doSample",
|
111
|
+
"options",
|
112
|
+
];
|
113
|
+
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
114
|
+
}
|
99
115
|
generateTextResponse(prompt, options) {
|
100
116
|
return this.callAPI(prompt, options);
|
101
117
|
}
|
@@ -72,6 +72,12 @@ class LlamaCppTextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
|
|
72
72
|
call: async () => callLlamaCppEmbeddingAPI(callSettings),
|
73
73
|
});
|
74
74
|
}
|
75
|
+
get settingsForEvent() {
|
76
|
+
return {
|
77
|
+
baseUrl: this.settings.baseUrl,
|
78
|
+
embeddingDimensions: this.settings.embeddingDimensions,
|
79
|
+
};
|
80
|
+
}
|
75
81
|
generateEmbeddingResponse(texts, options) {
|
76
82
|
return this.callAPI(texts, options);
|
77
83
|
}
|
@@ -24,6 +24,7 @@ export declare class LlamaCppTextEmbeddingModel extends AbstractModel<LlamaCppTe
|
|
24
24
|
private readonly tokenizer;
|
25
25
|
tokenize(text: string): Promise<number[]>;
|
26
26
|
callAPI(texts: Array<string>, options?: ModelFunctionOptions<LlamaCppTextEmbeddingModelSettings>): Promise<LlamaCppTextEmbeddingResponse>;
|
27
|
+
get settingsForEvent(): Partial<LlamaCppTextEmbeddingModelSettings>;
|
27
28
|
generateEmbeddingResponse(texts: string[], options?: ModelFunctionOptions<LlamaCppTextEmbeddingModelSettings>): Promise<{
|
28
29
|
embedding: number[];
|
29
30
|
}>;
|
@@ -66,6 +66,12 @@ export class LlamaCppTextEmbeddingModel extends AbstractModel {
|
|
66
66
|
call: async () => callLlamaCppEmbeddingAPI(callSettings),
|
67
67
|
});
|
68
68
|
}
|
69
|
+
get settingsForEvent() {
|
70
|
+
return {
|
71
|
+
baseUrl: this.settings.baseUrl,
|
72
|
+
embeddingDimensions: this.settings.embeddingDimensions,
|
73
|
+
};
|
74
|
+
}
|
69
75
|
generateEmbeddingResponse(texts, options) {
|
70
76
|
return this.callAPI(texts, options);
|
71
77
|
}
|
@@ -61,6 +61,30 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
61
61
|
call: async () => callLlamaCppTextGenerationAPI(callSettings),
|
62
62
|
});
|
63
63
|
}
|
64
|
+
get settingsForEvent() {
|
65
|
+
const eventSettingProperties = [
|
66
|
+
"maxCompletionTokens",
|
67
|
+
"stopSequences",
|
68
|
+
"baseUrl",
|
69
|
+
"contextWindowSize",
|
70
|
+
"temperature",
|
71
|
+
"topK",
|
72
|
+
"topP",
|
73
|
+
"nKeep",
|
74
|
+
"tfsZ",
|
75
|
+
"typicalP",
|
76
|
+
"repeatPenalty",
|
77
|
+
"repeatLastN",
|
78
|
+
"penalizeNl",
|
79
|
+
"mirostat",
|
80
|
+
"mirostatTau",
|
81
|
+
"mirostatEta",
|
82
|
+
"seed",
|
83
|
+
"ignoreEos",
|
84
|
+
"logitBias",
|
85
|
+
];
|
86
|
+
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
87
|
+
}
|
64
88
|
async countPromptTokens(prompt) {
|
65
89
|
const tokens = await this.tokenizer.tokenize(prompt);
|
66
90
|
return tokens.length;
|
@@ -91,6 +115,13 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
91
115
|
promptFormat,
|
92
116
|
});
|
93
117
|
}
|
118
|
+
extractUsage(response) {
|
119
|
+
return {
|
120
|
+
promptTokens: response.tokens_evaluated,
|
121
|
+
completionTokens: response.tokens_predicted,
|
122
|
+
totalTokens: response.tokens_evaluated + response.tokens_predicted,
|
123
|
+
};
|
124
|
+
}
|
94
125
|
withSettings(additionalSettings) {
|
95
126
|
return new LlamaCppTextGenerationModel(Object.assign({}, this.settings, additionalSettings));
|
96
127
|
}
|
@@ -47,6 +47,7 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
|
|
47
47
|
callAPI<RESPONSE>(prompt: string, options: {
|
48
48
|
responseFormat: LlamaCppTextGenerationResponseFormatType<RESPONSE>;
|
49
49
|
} & ModelFunctionOptions<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): Promise<RESPONSE>;
|
50
|
+
get settingsForEvent(): Partial<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>;
|
50
51
|
countPromptTokens(prompt: string): Promise<number>;
|
51
52
|
generateTextResponse(prompt: string, options?: ModelFunctionOptions<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): Promise<{
|
52
53
|
model: string;
|
@@ -101,6 +102,11 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
|
|
101
102
|
generateDeltaStreamResponse(prompt: string, options?: ModelFunctionOptions<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): Promise<AsyncIterable<DeltaEvent<LlamaCppTextGenerationDelta>>>;
|
102
103
|
extractTextDelta(fullDelta: LlamaCppTextGenerationDelta): string | undefined;
|
103
104
|
withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatTextGenerationModel<INPUT_PROMPT, string, LlamaCppTextGenerationResponse, LlamaCppTextGenerationDelta, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
105
|
+
extractUsage(response: LlamaCppTextGenerationResponse): {
|
106
|
+
promptTokens: number;
|
107
|
+
completionTokens: number;
|
108
|
+
totalTokens: number;
|
109
|
+
};
|
104
110
|
withSettings(additionalSettings: Partial<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): this;
|
105
111
|
}
|
106
112
|
declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
|
@@ -55,6 +55,30 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
|
|
55
55
|
call: async () => callLlamaCppTextGenerationAPI(callSettings),
|
56
56
|
});
|
57
57
|
}
|
58
|
+
get settingsForEvent() {
|
59
|
+
const eventSettingProperties = [
|
60
|
+
"maxCompletionTokens",
|
61
|
+
"stopSequences",
|
62
|
+
"baseUrl",
|
63
|
+
"contextWindowSize",
|
64
|
+
"temperature",
|
65
|
+
"topK",
|
66
|
+
"topP",
|
67
|
+
"nKeep",
|
68
|
+
"tfsZ",
|
69
|
+
"typicalP",
|
70
|
+
"repeatPenalty",
|
71
|
+
"repeatLastN",
|
72
|
+
"penalizeNl",
|
73
|
+
"mirostat",
|
74
|
+
"mirostatTau",
|
75
|
+
"mirostatEta",
|
76
|
+
"seed",
|
77
|
+
"ignoreEos",
|
78
|
+
"logitBias",
|
79
|
+
];
|
80
|
+
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
81
|
+
}
|
58
82
|
async countPromptTokens(prompt) {
|
59
83
|
const tokens = await this.tokenizer.tokenize(prompt);
|
60
84
|
return tokens.length;
|
@@ -85,6 +109,13 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
|
|
85
109
|
promptFormat,
|
86
110
|
});
|
87
111
|
}
|
112
|
+
extractUsage(response) {
|
113
|
+
return {
|
114
|
+
promptTokens: response.tokens_evaluated,
|
115
|
+
completionTokens: response.tokens_predicted,
|
116
|
+
totalTokens: response.tokens_evaluated + response.tokens_predicted,
|
117
|
+
};
|
118
|
+
}
|
88
119
|
withSettings(additionalSettings) {
|
89
120
|
return new LlamaCppTextGenerationModel(Object.assign({}, this.settings, additionalSettings));
|
90
121
|
}
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import z from "zod";
|
2
2
|
import { BasicTokenizer } from "../../model-function/tokenize-text/Tokenizer.js";
|
3
|
-
import { Run } from "../../
|
3
|
+
import { Run } from "../../core/Run.js";
|
4
4
|
import { RetryFunction } from "../../util/api/RetryFunction.js";
|
5
5
|
import { ThrottleFunction } from "../../util/api/ThrottleFunction.js";
|
6
6
|
export interface LlamaCppTokenizerSettings {
|
@@ -68,6 +68,14 @@ class OpenAIImageGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
68
68
|
call: async () => callOpenAIImageGenerationAPI(callSettings),
|
69
69
|
});
|
70
70
|
}
|
71
|
+
get settingsForEvent() {
|
72
|
+
const eventSettingProperties = [
|
73
|
+
"baseUrl",
|
74
|
+
"n",
|
75
|
+
"size",
|
76
|
+
];
|
77
|
+
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
78
|
+
}
|
71
79
|
generateImageResponse(prompt, options) {
|
72
80
|
return this.callAPI(prompt, {
|
73
81
|
responseFormat: exports.OpenAIImageGenerationResponseFormat.base64Json,
|
@@ -35,6 +35,7 @@ export declare class OpenAIImageGenerationModel extends AbstractModel<OpenAIImag
|
|
35
35
|
} & ModelFunctionOptions<Partial<OpenAIImageGenerationCallSettings & OpenAIModelSettings & {
|
36
36
|
user?: string;
|
37
37
|
}>>): Promise<RESULT>;
|
38
|
+
get settingsForEvent(): Partial<OpenAIImageGenerationSettings>;
|
38
39
|
generateImageResponse(prompt: string, options?: ModelFunctionOptions<OpenAIImageGenerationSettings>): Promise<{
|
39
40
|
data: {
|
40
41
|
b64_json: string;
|
@@ -64,6 +64,14 @@ export class OpenAIImageGenerationModel extends AbstractModel {
|
|
64
64
|
call: async () => callOpenAIImageGenerationAPI(callSettings),
|
65
65
|
});
|
66
66
|
}
|
67
|
+
get settingsForEvent() {
|
68
|
+
const eventSettingProperties = [
|
69
|
+
"baseUrl",
|
70
|
+
"n",
|
71
|
+
"size",
|
72
|
+
];
|
73
|
+
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
74
|
+
}
|
67
75
|
generateImageResponse(prompt, options) {
|
68
76
|
return this.callAPI(prompt, {
|
69
77
|
responseFormat: OpenAIImageGenerationResponseFormat.base64Json,
|
@@ -112,6 +112,11 @@ class OpenAITextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
|
|
112
112
|
call: async () => callOpenAITextEmbeddingAPI(callSettings),
|
113
113
|
});
|
114
114
|
}
|
115
|
+
get settingsForEvent() {
|
116
|
+
return {
|
117
|
+
baseUrl: this.settings.baseUrl,
|
118
|
+
};
|
119
|
+
}
|
115
120
|
generateEmbeddingResponse(texts, options) {
|
116
121
|
if (texts.length > this.maxTextsPerCall) {
|
117
122
|
throw new Error(`The OpenAI embedding API only supports ${this.maxTextsPerCall} texts per API call.`);
|
@@ -51,18 +51,19 @@ export declare class OpenAITextEmbeddingModel extends AbstractModel<OpenAITextEm
|
|
51
51
|
private get apiKey();
|
52
52
|
countTokens(input: string): Promise<number>;
|
53
53
|
callAPI(text: string, options?: ModelFunctionOptions<OpenAITextEmbeddingModelSettings>): Promise<OpenAITextEmbeddingResponse>;
|
54
|
+
get settingsForEvent(): Partial<OpenAITextEmbeddingModelSettings>;
|
54
55
|
generateEmbeddingResponse(texts: string[], options?: ModelFunctionOptions<OpenAITextEmbeddingModelSettings>): Promise<{
|
55
56
|
object: "list";
|
56
57
|
model: string;
|
58
|
+
usage: {
|
59
|
+
prompt_tokens: number;
|
60
|
+
total_tokens: number;
|
61
|
+
};
|
57
62
|
data: {
|
58
63
|
object: "embedding";
|
59
64
|
embedding: number[];
|
60
65
|
index: number;
|
61
66
|
}[];
|
62
|
-
usage: {
|
63
|
-
prompt_tokens: number;
|
64
|
-
total_tokens: number;
|
65
|
-
};
|
66
67
|
}>;
|
67
68
|
extractEmbeddings(response: OpenAITextEmbeddingResponse): number[][];
|
68
69
|
withSettings(additionalSettings: OpenAITextEmbeddingModelSettings): this;
|
@@ -96,27 +97,27 @@ declare const openAITextEmbeddingResponseSchema: z.ZodObject<{
|
|
96
97
|
}, "strip", z.ZodTypeAny, {
|
97
98
|
object: "list";
|
98
99
|
model: string;
|
100
|
+
usage: {
|
101
|
+
prompt_tokens: number;
|
102
|
+
total_tokens: number;
|
103
|
+
};
|
99
104
|
data: {
|
100
105
|
object: "embedding";
|
101
106
|
embedding: number[];
|
102
107
|
index: number;
|
103
108
|
}[];
|
109
|
+
}, {
|
110
|
+
object: "list";
|
111
|
+
model: string;
|
104
112
|
usage: {
|
105
113
|
prompt_tokens: number;
|
106
114
|
total_tokens: number;
|
107
115
|
};
|
108
|
-
}, {
|
109
|
-
object: "list";
|
110
|
-
model: string;
|
111
116
|
data: {
|
112
117
|
object: "embedding";
|
113
118
|
embedding: number[];
|
114
119
|
index: number;
|
115
120
|
}[];
|
116
|
-
usage: {
|
117
|
-
prompt_tokens: number;
|
118
|
-
total_tokens: number;
|
119
|
-
};
|
120
121
|
}>;
|
121
122
|
export type OpenAITextEmbeddingResponse = z.infer<typeof openAITextEmbeddingResponseSchema>;
|
122
123
|
export {};
|
@@ -104,6 +104,11 @@ export class OpenAITextEmbeddingModel extends AbstractModel {
|
|
104
104
|
call: async () => callOpenAITextEmbeddingAPI(callSettings),
|
105
105
|
});
|
106
106
|
}
|
107
|
+
get settingsForEvent() {
|
108
|
+
return {
|
109
|
+
baseUrl: this.settings.baseUrl,
|
110
|
+
};
|
111
|
+
}
|
107
112
|
generateEmbeddingResponse(texts, options) {
|
108
113
|
if (texts.length > this.maxTextsPerCall) {
|
109
114
|
throw new Error(`The OpenAI embedding API only supports ${this.maxTextsPerCall} texts per API call.`);
|
@@ -155,6 +155,23 @@ class OpenAITextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
155
155
|
call: async () => callOpenAITextGenerationAPI(callSettings),
|
156
156
|
});
|
157
157
|
}
|
158
|
+
get settingsForEvent() {
|
159
|
+
const eventSettingProperties = [
|
160
|
+
"maxCompletionTokens",
|
161
|
+
"stopSequences",
|
162
|
+
"baseUrl",
|
163
|
+
"suffix",
|
164
|
+
"temperature",
|
165
|
+
"topP",
|
166
|
+
"n",
|
167
|
+
"logprobs",
|
168
|
+
"echo",
|
169
|
+
"presencePenalty",
|
170
|
+
"frequencyPenalty",
|
171
|
+
"bestOf",
|
172
|
+
];
|
173
|
+
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
174
|
+
}
|
158
175
|
generateTextResponse(prompt, options) {
|
159
176
|
return this.callAPI(prompt, {
|
160
177
|
...options,
|
@@ -181,6 +198,13 @@ class OpenAITextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
181
198
|
promptFormat,
|
182
199
|
});
|
183
200
|
}
|
201
|
+
extractUsage(response) {
|
202
|
+
return {
|
203
|
+
promptTokens: response.usage.prompt_tokens,
|
204
|
+
completionTokens: response.usage.completion_tokens,
|
205
|
+
totalTokens: response.usage.total_tokens,
|
206
|
+
};
|
207
|
+
}
|
184
208
|
withSettings(additionalSettings) {
|
185
209
|
return new OpenAITextGenerationModel(Object.assign({}, this.settings, additionalSettings));
|
186
210
|
}
|
@@ -120,16 +120,17 @@ export declare class OpenAITextGenerationModel extends AbstractModel<OpenAITextG
|
|
120
120
|
} & ModelFunctionOptions<Partial<OpenAIImageGenerationCallSettings & OpenAIModelSettings & {
|
121
121
|
user?: string;
|
122
122
|
}>>): Promise<RESULT>;
|
123
|
+
get settingsForEvent(): Partial<OpenAITextGenerationModelSettings>;
|
123
124
|
generateTextResponse(prompt: string, options?: ModelFunctionOptions<OpenAITextGenerationModelSettings>): Promise<{
|
124
125
|
object: "text_completion";
|
125
126
|
model: string;
|
126
|
-
id: string;
|
127
|
-
created: number;
|
128
127
|
usage: {
|
129
128
|
prompt_tokens: number;
|
130
129
|
total_tokens: number;
|
131
130
|
completion_tokens: number;
|
132
131
|
};
|
132
|
+
id: string;
|
133
|
+
created: number;
|
133
134
|
choices: {
|
134
135
|
text: string;
|
135
136
|
finish_reason: string;
|
@@ -141,6 +142,11 @@ export declare class OpenAITextGenerationModel extends AbstractModel<OpenAITextG
|
|
141
142
|
generateDeltaStreamResponse(prompt: string, options?: ModelFunctionOptions<OpenAITextGenerationModelSettings>): Promise<AsyncIterable<DeltaEvent<OpenAITextGenerationDelta>>>;
|
142
143
|
extractTextDelta(fullDelta: OpenAITextGenerationDelta): string | undefined;
|
143
144
|
withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatTextGenerationModel<INPUT_PROMPT, string, OpenAITextGenerationResponse, OpenAITextGenerationDelta, OpenAITextGenerationModelSettings, this>;
|
145
|
+
extractUsage(response: OpenAITextGenerationResponse): {
|
146
|
+
promptTokens: number;
|
147
|
+
completionTokens: number;
|
148
|
+
totalTokens: number;
|
149
|
+
};
|
144
150
|
withSettings(additionalSettings: Partial<OpenAITextGenerationModelSettings>): this;
|
145
151
|
}
|
146
152
|
declare const openAITextGenerationResponseSchema: z.ZodObject<{
|
@@ -180,13 +186,13 @@ declare const openAITextGenerationResponseSchema: z.ZodObject<{
|
|
180
186
|
}, "strip", z.ZodTypeAny, {
|
181
187
|
object: "text_completion";
|
182
188
|
model: string;
|
183
|
-
id: string;
|
184
|
-
created: number;
|
185
189
|
usage: {
|
186
190
|
prompt_tokens: number;
|
187
191
|
total_tokens: number;
|
188
192
|
completion_tokens: number;
|
189
193
|
};
|
194
|
+
id: string;
|
195
|
+
created: number;
|
190
196
|
choices: {
|
191
197
|
text: string;
|
192
198
|
finish_reason: string;
|
@@ -196,13 +202,13 @@ declare const openAITextGenerationResponseSchema: z.ZodObject<{
|
|
196
202
|
}, {
|
197
203
|
object: "text_completion";
|
198
204
|
model: string;
|
199
|
-
id: string;
|
200
|
-
created: number;
|
201
205
|
usage: {
|
202
206
|
prompt_tokens: number;
|
203
207
|
total_tokens: number;
|
204
208
|
completion_tokens: number;
|
205
209
|
};
|
210
|
+
id: string;
|
211
|
+
created: number;
|
206
212
|
choices: {
|
207
213
|
text: string;
|
208
214
|
finish_reason: string;
|
@@ -224,13 +230,13 @@ export declare const OpenAITextResponseFormat: {
|
|
224
230
|
handler: ResponseHandler<{
|
225
231
|
object: "text_completion";
|
226
232
|
model: string;
|
227
|
-
id: string;
|
228
|
-
created: number;
|
229
233
|
usage: {
|
230
234
|
prompt_tokens: number;
|
231
235
|
total_tokens: number;
|
232
236
|
completion_tokens: number;
|
233
237
|
};
|
238
|
+
id: string;
|
239
|
+
created: number;
|
234
240
|
choices: {
|
235
241
|
text: string;
|
236
242
|
finish_reason: string;
|
@@ -147,6 +147,23 @@ export class OpenAITextGenerationModel extends AbstractModel {
|
|
147
147
|
call: async () => callOpenAITextGenerationAPI(callSettings),
|
148
148
|
});
|
149
149
|
}
|
150
|
+
get settingsForEvent() {
|
151
|
+
const eventSettingProperties = [
|
152
|
+
"maxCompletionTokens",
|
153
|
+
"stopSequences",
|
154
|
+
"baseUrl",
|
155
|
+
"suffix",
|
156
|
+
"temperature",
|
157
|
+
"topP",
|
158
|
+
"n",
|
159
|
+
"logprobs",
|
160
|
+
"echo",
|
161
|
+
"presencePenalty",
|
162
|
+
"frequencyPenalty",
|
163
|
+
"bestOf",
|
164
|
+
];
|
165
|
+
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
166
|
+
}
|
150
167
|
generateTextResponse(prompt, options) {
|
151
168
|
return this.callAPI(prompt, {
|
152
169
|
...options,
|
@@ -173,6 +190,13 @@ export class OpenAITextGenerationModel extends AbstractModel {
|
|
173
190
|
promptFormat,
|
174
191
|
});
|
175
192
|
}
|
193
|
+
extractUsage(response) {
|
194
|
+
return {
|
195
|
+
promptTokens: response.usage.prompt_tokens,
|
196
|
+
completionTokens: response.usage.completion_tokens,
|
197
|
+
totalTokens: response.usage.total_tokens,
|
198
|
+
};
|
199
|
+
}
|
176
200
|
withSettings(additionalSettings) {
|
177
201
|
return new OpenAITextGenerationModel(Object.assign({}, this.settings, additionalSettings));
|
178
202
|
}
|
@@ -93,6 +93,13 @@ class OpenAITranscriptionModel extends AbstractModel_js_1.AbstractModel {
|
|
93
93
|
call: async () => callOpenAITranscriptionAPI(callSettings),
|
94
94
|
});
|
95
95
|
}
|
96
|
+
getEventSettingProperties() {
|
97
|
+
return ["baseUrl"];
|
98
|
+
}
|
99
|
+
get settingsForEvent() {
|
100
|
+
const eventSettingProperties = ["baseUrl"];
|
101
|
+
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
102
|
+
}
|
96
103
|
withSettings(additionalSettings) {
|
97
104
|
return new OpenAITranscriptionModel(Object.assign({}, this.settings, additionalSettings));
|
98
105
|
}
|