modelfusion 0.40.0 → 0.41.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +14 -7
- package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.d.ts +3 -3
- package/core/FunctionEvent.d.ts +1 -1
- package/model-function/AsyncIterableResultPromise.d.ts +1 -1
- package/model-function/Delta.d.ts +8 -0
- package/model-function/ModelCallEvent.d.ts +1 -1
- package/model-function/ModelCallMetadata.d.ts +13 -0
- package/model-function/describe-image/ImageDescriptionEvent.d.ts +1 -1
- package/model-function/describe-image/ImageDescriptionModel.d.ts +6 -4
- package/model-function/describe-image/describeImage.cjs +7 -2
- package/model-function/describe-image/describeImage.d.ts +2 -2
- package/model-function/describe-image/describeImage.js +7 -2
- package/model-function/embed/EmbeddingEvent.d.ts +1 -1
- package/model-function/embed/EmbeddingModel.d.ts +6 -4
- package/model-function/embed/embed.cjs +16 -11
- package/model-function/embed/embed.d.ts +3 -3
- package/model-function/embed/embed.js +16 -11
- package/model-function/executeCall.cjs +26 -30
- package/model-function/executeCall.d.ts +19 -28
- package/model-function/executeCall.js +26 -30
- package/model-function/generate-image/ImageGenerationEvent.d.ts +1 -1
- package/model-function/generate-image/ImageGenerationModel.d.ts +6 -4
- package/model-function/generate-image/generateImage.cjs +7 -2
- package/model-function/generate-image/generateImage.d.ts +2 -2
- package/model-function/generate-image/generateImage.js +7 -2
- package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +6 -5
- package/model-function/generate-structure/StructureFromTextGenerationModel.d.ts +7 -5
- package/model-function/generate-structure/StructureFromTextGenerationModel.js +6 -5
- package/model-function/generate-structure/StructureGenerationEvent.d.ts +1 -1
- package/model-function/generate-structure/StructureGenerationModel.d.ts +15 -18
- package/model-function/generate-structure/StructureOrTextGenerationModel.d.ts +19 -17
- package/model-function/generate-structure/generateStructure.cjs +10 -8
- package/model-function/generate-structure/generateStructure.d.ts +2 -2
- package/model-function/generate-structure/generateStructure.js +10 -8
- package/model-function/generate-structure/generateStructureOrText.cjs +15 -8
- package/model-function/generate-structure/generateStructureOrText.d.ts +4 -4
- package/model-function/generate-structure/generateStructureOrText.js +15 -8
- package/model-function/generate-structure/streamStructure.cjs +4 -16
- package/model-function/generate-structure/streamStructure.d.ts +3 -7
- package/model-function/generate-structure/streamStructure.js +4 -16
- package/model-function/generate-text/TextGenerationEvent.d.ts +1 -1
- package/model-function/generate-text/TextGenerationModel.d.ts +18 -19
- package/model-function/generate-text/generateText.cjs +8 -9
- package/model-function/generate-text/generateText.d.ts +2 -2
- package/model-function/generate-text/generateText.js +8 -9
- package/model-function/generate-text/streamText.cjs +8 -21
- package/model-function/generate-text/streamText.d.ts +3 -7
- package/model-function/generate-text/streamText.js +8 -21
- package/model-function/index.cjs +2 -2
- package/model-function/index.d.ts +2 -2
- package/model-function/index.js +2 -2
- package/model-function/synthesize-speech/SpeechSynthesisEvent.d.ts +1 -1
- package/model-function/synthesize-speech/SpeechSynthesisModel.d.ts +3 -3
- package/model-function/synthesize-speech/synthesizeSpeech.cjs +7 -2
- package/model-function/synthesize-speech/synthesizeSpeech.d.ts +2 -2
- package/model-function/synthesize-speech/synthesizeSpeech.js +7 -2
- package/model-function/transcribe-speech/TranscriptionEvent.d.ts +1 -1
- package/model-function/transcribe-speech/TranscriptionModel.d.ts +6 -4
- package/model-function/transcribe-speech/transcribe.cjs +7 -2
- package/model-function/transcribe-speech/transcribe.d.ts +2 -2
- package/model-function/transcribe-speech/transcribe.js +7 -2
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +14 -18
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +11 -9
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +14 -18
- package/model-provider/cohere/CohereTextEmbeddingModel.cjs +13 -16
- package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +12 -10
- package/model-provider/cohere/CohereTextEmbeddingModel.js +13 -16
- package/model-provider/cohere/CohereTextGenerationModel.cjs +29 -29
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +24 -22
- package/model-provider/cohere/CohereTextGenerationModel.js +29 -29
- package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.cjs +10 -17
- package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.d.ts +2 -2
- package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.js +10 -17
- package/model-provider/huggingface/HuggingFaceImageDescriptionModel.cjs +13 -16
- package/model-provider/huggingface/HuggingFaceImageDescriptionModel.d.ts +9 -7
- package/model-provider/huggingface/HuggingFaceImageDescriptionModel.js +13 -16
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +19 -25
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +8 -6
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +19 -25
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +18 -24
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +10 -8
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +18 -24
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +13 -16
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +8 -6
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +13 -16
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +31 -34
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +62 -60
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +31 -34
- package/model-provider/lmnt/LmntSpeechSynthesisModel.cjs +7 -12
- package/model-provider/lmnt/LmntSpeechSynthesisModel.d.ts +2 -2
- package/model-provider/lmnt/LmntSpeechSynthesisModel.js +7 -12
- package/model-provider/openai/OpenAIImageGenerationModel.cjs +8 -16
- package/model-provider/openai/OpenAIImageGenerationModel.d.ts +11 -11
- package/model-provider/openai/OpenAIImageGenerationModel.js +8 -16
- package/model-provider/openai/OpenAITextEmbeddingModel.cjs +18 -24
- package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +18 -16
- package/model-provider/openai/OpenAITextEmbeddingModel.js +18 -24
- package/model-provider/openai/OpenAITextGenerationModel.cjs +23 -27
- package/model-provider/openai/OpenAITextGenerationModel.d.ts +31 -33
- package/model-provider/openai/OpenAITextGenerationModel.js +23 -27
- package/model-provider/openai/OpenAITranscriptionModel.cjs +19 -28
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +27 -7
- package/model-provider/openai/OpenAITranscriptionModel.js +19 -28
- package/model-provider/openai/chat/OpenAIChatModel.cjs +82 -86
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +127 -50
- package/model-provider/openai/chat/OpenAIChatModel.js +83 -87
- package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +4 -3
- package/model-provider/openai/chat/OpenAIChatStreamIterable.d.ts +2 -2
- package/model-provider/openai/chat/OpenAIChatStreamIterable.js +2 -1
- package/model-provider/stability/StabilityImageGenerationModel.cjs +16 -21
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +13 -11
- package/model-provider/stability/StabilityImageGenerationModel.js +16 -21
- package/package.json +1 -1
- package/prompt/PromptFormatTextGenerationModel.cjs +6 -19
- package/prompt/PromptFormatTextGenerationModel.d.ts +14 -10
- package/prompt/PromptFormatTextGenerationModel.js +6 -19
- package/prompt/PromptFormatTextStreamingModel.cjs +31 -0
- package/prompt/PromptFormatTextStreamingModel.d.ts +13 -0
- package/prompt/PromptFormatTextStreamingModel.js +27 -0
- package/prompt/chat/trimChatPrompt.d.ts +2 -2
- package/prompt/index.cjs +1 -0
- package/prompt/index.d.ts +1 -0
- package/prompt/index.js +1 -0
- package/retriever/Retriever.d.ts +3 -6
- package/retriever/retrieve.cjs +2 -2
- package/retriever/retrieve.d.ts +3 -3
- package/retriever/retrieve.js +2 -2
- package/tool/executeTool.cjs +2 -2
- package/tool/executeTool.js +2 -2
- package/tool/useTool.cjs +2 -4
- package/tool/useTool.d.ts +2 -2
- package/tool/useTool.js +2 -4
- package/tool/useToolOrGenerateText.d.ts +2 -2
- package/util/SafeResult.d.ts +1 -1
- package/util/runSafe.cjs +1 -1
- package/util/runSafe.js +1 -1
- package/vector-index/VectorIndexRetriever.cjs +0 -7
- package/vector-index/VectorIndexRetriever.d.ts +5 -5
- package/vector-index/VectorIndexRetriever.js +0 -7
- package/vector-index/upsertIntoVectorIndex.d.ts +4 -4
- package/model-function/DeltaEvent.d.ts +0 -7
- package/model-function/ModelFunctionOptions.d.ts +0 -4
- /package/model-function/{DeltaEvent.cjs → Delta.cjs} +0 -0
- /package/model-function/{DeltaEvent.js → Delta.js} +0 -0
- /package/model-function/{ModelFunctionOptions.cjs → ModelCallMetadata.cjs} +0 -0
- /package/model-function/{ModelFunctionOptions.js → ModelCallMetadata.js} +0 -0
@@ -1,7 +1,7 @@
|
|
1
1
|
import z from "zod";
|
2
|
+
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
2
3
|
import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
3
4
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
4
|
-
import { ModelFunctionOptions } from "../../model-function/ModelFunctionOptions.js";
|
5
5
|
import { EmbeddingModel, EmbeddingModelSettings } from "../../model-function/embed/EmbeddingModel.js";
|
6
6
|
import { FullTokenizer } from "../../model-function/tokenize-text/Tokenizer.js";
|
7
7
|
export declare const COHERE_TEXT_EMBEDDING_MODELS: {
|
@@ -38,7 +38,7 @@ export interface CohereTextEmbeddingModelSettings extends EmbeddingModelSettings
|
|
38
38
|
* ]
|
39
39
|
* );
|
40
40
|
*/
|
41
|
-
export declare class CohereTextEmbeddingModel extends AbstractModel<CohereTextEmbeddingModelSettings> implements EmbeddingModel<string,
|
41
|
+
export declare class CohereTextEmbeddingModel extends AbstractModel<CohereTextEmbeddingModelSettings> implements EmbeddingModel<string, CohereTextEmbeddingModelSettings>, FullTokenizer {
|
42
42
|
constructor(settings: CohereTextEmbeddingModelSettings);
|
43
43
|
readonly provider: "cohere";
|
44
44
|
get modelName(): "embed-english-light-v2.0" | "embed-english-v2.0" | "embed-multilingual-v2.0";
|
@@ -52,19 +52,21 @@ export declare class CohereTextEmbeddingModel extends AbstractModel<CohereTextEm
|
|
52
52
|
tokenTexts: string[];
|
53
53
|
}>;
|
54
54
|
detokenize(tokens: number[]): Promise<string>;
|
55
|
-
callAPI(texts: Array<string>, options?:
|
55
|
+
callAPI(texts: Array<string>, options?: FunctionOptions): Promise<CohereTextEmbeddingResponse>;
|
56
56
|
get settingsForEvent(): Partial<CohereTextEmbeddingModelSettings>;
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
57
|
+
doEmbedValues(texts: string[], options?: FunctionOptions): Promise<{
|
58
|
+
response: {
|
59
|
+
id: string;
|
60
|
+
meta: {
|
61
|
+
api_version: {
|
62
|
+
version: string;
|
63
|
+
};
|
62
64
|
};
|
65
|
+
texts: string[];
|
66
|
+
embeddings: number[][];
|
63
67
|
};
|
64
|
-
texts: string[];
|
65
68
|
embeddings: number[][];
|
66
69
|
}>;
|
67
|
-
extractEmbeddings(response: CohereTextEmbeddingResponse): number[][];
|
68
70
|
withSettings(additionalSettings: Partial<CohereTextEmbeddingModelSettings>): this;
|
69
71
|
}
|
70
72
|
declare const cohereTextEmbeddingResponseSchema: z.ZodObject<{
|
@@ -91,18 +91,14 @@ export class CohereTextEmbeddingModel extends AbstractModel {
|
|
91
91
|
if (texts.length > this.maxValuesPerCall) {
|
92
92
|
throw new Error(`The Cohere embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
|
93
93
|
}
|
94
|
-
const run = options?.run;
|
95
|
-
const settings = options?.settings;
|
96
|
-
const callSettings = {
|
97
|
-
...this.settings,
|
98
|
-
...settings,
|
99
|
-
abortSignal: run?.abortSignal,
|
100
|
-
texts,
|
101
|
-
};
|
102
94
|
return callWithRetryAndThrottle({
|
103
|
-
retry:
|
104
|
-
throttle:
|
105
|
-
call: async () => callCohereEmbeddingAPI(
|
95
|
+
retry: this.settings.api?.retry,
|
96
|
+
throttle: this.settings.api?.throttle,
|
97
|
+
call: async () => callCohereEmbeddingAPI({
|
98
|
+
...this.settings,
|
99
|
+
abortSignal: options?.run?.abortSignal,
|
100
|
+
texts,
|
101
|
+
}),
|
106
102
|
});
|
107
103
|
}
|
108
104
|
get settingsForEvent() {
|
@@ -110,11 +106,12 @@ export class CohereTextEmbeddingModel extends AbstractModel {
|
|
110
106
|
truncate: this.settings.truncate,
|
111
107
|
};
|
112
108
|
}
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
109
|
+
async doEmbedValues(texts, options) {
|
110
|
+
const response = await this.callAPI(texts, options);
|
111
|
+
return {
|
112
|
+
response,
|
113
|
+
embeddings: response.embeddings,
|
114
|
+
};
|
118
115
|
}
|
119
116
|
withSettings(additionalSettings) {
|
120
117
|
return new CohereTextEmbeddingModel(Object.assign({}, this.settings, additionalSettings));
|
@@ -11,7 +11,7 @@ const postToApi_js_1 = require("../../core/api/postToApi.cjs");
|
|
11
11
|
const AsyncQueue_js_1 = require("../../event-source/AsyncQueue.cjs");
|
12
12
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
13
13
|
const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
|
14
|
-
const
|
14
|
+
const PromptFormatTextStreamingModel_js_1 = require("../../prompt/PromptFormatTextStreamingModel.cjs");
|
15
15
|
const CohereApiConfiguration_js_1 = require("./CohereApiConfiguration.cjs");
|
16
16
|
const CohereError_js_1 = require("./CohereError.cjs");
|
17
17
|
const CohereTokenizer_js_1 = require("./CohereTokenizer.cjs");
|
@@ -81,27 +81,21 @@ class CohereTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
81
81
|
return (0, countTokens_js_1.countTokens)(this.tokenizer, input);
|
82
82
|
}
|
83
83
|
async callAPI(prompt, options) {
|
84
|
-
const { run, settings, responseFormat } = options;
|
85
|
-
const combinedSettings = {
|
86
|
-
...this.settings,
|
87
|
-
settings,
|
88
|
-
};
|
89
|
-
const callSettings = {
|
90
|
-
...combinedSettings,
|
91
|
-
// use endSequences instead of stopSequences
|
92
|
-
// to exclude stop tokens from the generated text
|
93
|
-
endSequences: combinedSettings.stopSequences,
|
94
|
-
maxTokens: combinedSettings.maxCompletionTokens,
|
95
|
-
// mapped name because of conflict with stopSequences:
|
96
|
-
stopSequences: combinedSettings.cohereStopSequences,
|
97
|
-
abortSignal: run?.abortSignal,
|
98
|
-
prompt,
|
99
|
-
responseFormat,
|
100
|
-
};
|
101
84
|
return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
|
102
|
-
retry:
|
103
|
-
throttle:
|
104
|
-
call: async () => callCohereTextGenerationAPI(
|
85
|
+
retry: this.settings.api?.retry,
|
86
|
+
throttle: this.settings.api?.throttle,
|
87
|
+
call: async () => callCohereTextGenerationAPI({
|
88
|
+
...this.settings,
|
89
|
+
// use endSequences instead of stopSequences
|
90
|
+
// to exclude stop tokens from the generated text
|
91
|
+
endSequences: this.settings.stopSequences,
|
92
|
+
maxTokens: this.settings.maxCompletionTokens,
|
93
|
+
// mapped name because of conflict with stopSequences:
|
94
|
+
stopSequences: this.settings.cohereStopSequences,
|
95
|
+
abortSignal: options.run?.abortSignal,
|
96
|
+
responseFormat: options.responseFormat,
|
97
|
+
prompt,
|
98
|
+
}),
|
105
99
|
});
|
106
100
|
}
|
107
101
|
get settingsForEvent() {
|
@@ -121,16 +115,17 @@ class CohereTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
121
115
|
];
|
122
116
|
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
123
117
|
}
|
124
|
-
|
125
|
-
|
118
|
+
async doGenerateText(prompt, options) {
|
119
|
+
const response = await this.callAPI(prompt, {
|
126
120
|
...options,
|
127
121
|
responseFormat: exports.CohereTextGenerationResponseFormat.json,
|
128
122
|
});
|
123
|
+
return {
|
124
|
+
response,
|
125
|
+
text: response.generations[0].text,
|
126
|
+
};
|
129
127
|
}
|
130
|
-
|
131
|
-
return response.generations[0].text;
|
132
|
-
}
|
133
|
-
generateDeltaStreamResponse(prompt, options) {
|
128
|
+
doStreamText(prompt, options) {
|
134
129
|
return this.callAPI(prompt, {
|
135
130
|
...options,
|
136
131
|
responseFormat: exports.CohereTextGenerationResponseFormat.deltaIterable,
|
@@ -140,9 +135,12 @@ class CohereTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
140
135
|
return fullDelta.delta;
|
141
136
|
}
|
142
137
|
withPromptFormat(promptFormat) {
|
143
|
-
return new
|
138
|
+
return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
|
144
139
|
model: this.withSettings({
|
145
|
-
stopSequences:
|
140
|
+
stopSequences: [
|
141
|
+
...(this.settings.stopSequences ?? []),
|
142
|
+
...promptFormat.stopSequences,
|
143
|
+
],
|
146
144
|
}),
|
147
145
|
promptFormat,
|
148
146
|
});
|
@@ -218,6 +216,7 @@ async function createCohereTextGenerationFullDeltaIterableQueue(stream) {
|
|
218
216
|
isComplete: true,
|
219
217
|
delta: "",
|
220
218
|
},
|
219
|
+
valueDelta: "",
|
221
220
|
});
|
222
221
|
}
|
223
222
|
else {
|
@@ -229,6 +228,7 @@ async function createCohereTextGenerationFullDeltaIterableQueue(stream) {
|
|
229
228
|
isComplete: false,
|
230
229
|
delta: event.text,
|
231
230
|
},
|
231
|
+
valueDelta: event.text,
|
232
232
|
});
|
233
233
|
}
|
234
234
|
}
|
@@ -1,12 +1,12 @@
|
|
1
1
|
import { z } from "zod";
|
2
|
+
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
2
3
|
import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
3
4
|
import { ResponseHandler } from "../../core/api/postToApi.js";
|
4
5
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
5
|
-
import {
|
6
|
-
import {
|
7
|
-
import { TextGenerationModel, TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
|
6
|
+
import { Delta } from "../../model-function/Delta.js";
|
7
|
+
import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
8
8
|
import { PromptFormat } from "../../prompt/PromptFormat.js";
|
9
|
-
import {
|
9
|
+
import { PromptFormatTextStreamingModel } from "../../prompt/PromptFormatTextStreamingModel.js";
|
10
10
|
import { CohereTokenizer } from "./CohereTokenizer.js";
|
11
11
|
export declare const COHERE_TEXT_GENERATION_MODELS: {
|
12
12
|
command: {
|
@@ -54,7 +54,7 @@ export interface CohereTextGenerationModelSettings extends TextGenerationModelSe
|
|
54
54
|
* "Write a short story about a robot learning to love:\n\n"
|
55
55
|
* );
|
56
56
|
*/
|
57
|
-
export declare class CohereTextGenerationModel extends AbstractModel<CohereTextGenerationModelSettings> implements
|
57
|
+
export declare class CohereTextGenerationModel extends AbstractModel<CohereTextGenerationModelSettings> implements TextStreamingModel<string, CohereTextGenerationModelSettings> {
|
58
58
|
constructor(settings: CohereTextGenerationModelSettings);
|
59
59
|
readonly provider: "cohere";
|
60
60
|
get modelName(): "command" | "command-nightly" | "command-light" | "command-light-nightly";
|
@@ -63,26 +63,28 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
|
|
63
63
|
countPromptTokens(input: string): Promise<number>;
|
64
64
|
callAPI<RESPONSE>(prompt: string, options: {
|
65
65
|
responseFormat: CohereTextGenerationResponseFormatType<RESPONSE>;
|
66
|
-
} &
|
66
|
+
} & FunctionOptions): Promise<RESPONSE>;
|
67
67
|
get settingsForEvent(): Partial<CohereTextGenerationModelSettings>;
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
generations: {
|
72
|
-
text: string;
|
68
|
+
doGenerateText(prompt: string, options?: FunctionOptions): Promise<{
|
69
|
+
response: {
|
70
|
+
prompt: string;
|
73
71
|
id: string;
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
72
|
+
generations: {
|
73
|
+
text: string;
|
74
|
+
id: string;
|
75
|
+
finish_reason?: string | undefined;
|
76
|
+
}[];
|
77
|
+
meta?: {
|
78
|
+
api_version: {
|
79
|
+
version: string;
|
80
|
+
};
|
81
|
+
} | undefined;
|
82
|
+
};
|
83
|
+
text: string;
|
81
84
|
}>;
|
82
|
-
|
83
|
-
generateDeltaStreamResponse(prompt: string, options?: ModelFunctionOptions<CohereTextGenerationModelSettings>): Promise<AsyncIterable<DeltaEvent<CohereTextGenerationDelta>>>;
|
85
|
+
doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
|
84
86
|
extractTextDelta(fullDelta: CohereTextGenerationDelta): string | undefined;
|
85
|
-
withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>):
|
87
|
+
withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string, CohereTextGenerationModelSettings, this>;
|
86
88
|
withSettings(additionalSettings: Partial<CohereTextGenerationModelSettings>): this;
|
87
89
|
}
|
88
90
|
declare const cohereTextGenerationResponseSchema: z.ZodObject<{
|
@@ -184,7 +186,7 @@ export declare const CohereTextGenerationResponseFormat: {
|
|
184
186
|
stream: true;
|
185
187
|
handler: ({ response }: {
|
186
188
|
response: Response;
|
187
|
-
}) => Promise<AsyncIterable<
|
189
|
+
}) => Promise<AsyncIterable<Delta<string>>>;
|
188
190
|
};
|
189
191
|
};
|
190
192
|
export {};
|
@@ -5,7 +5,7 @@ import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postTo
|
|
5
5
|
import { AsyncQueue } from "../../event-source/AsyncQueue.js";
|
6
6
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
7
7
|
import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
|
8
|
-
import {
|
8
|
+
import { PromptFormatTextStreamingModel } from "../../prompt/PromptFormatTextStreamingModel.js";
|
9
9
|
import { CohereApiConfiguration } from "./CohereApiConfiguration.js";
|
10
10
|
import { failedCohereCallResponseHandler } from "./CohereError.js";
|
11
11
|
import { CohereTokenizer } from "./CohereTokenizer.js";
|
@@ -75,27 +75,21 @@ export class CohereTextGenerationModel extends AbstractModel {
|
|
75
75
|
return countTokens(this.tokenizer, input);
|
76
76
|
}
|
77
77
|
async callAPI(prompt, options) {
|
78
|
-
const { run, settings, responseFormat } = options;
|
79
|
-
const combinedSettings = {
|
80
|
-
...this.settings,
|
81
|
-
settings,
|
82
|
-
};
|
83
|
-
const callSettings = {
|
84
|
-
...combinedSettings,
|
85
|
-
// use endSequences instead of stopSequences
|
86
|
-
// to exclude stop tokens from the generated text
|
87
|
-
endSequences: combinedSettings.stopSequences,
|
88
|
-
maxTokens: combinedSettings.maxCompletionTokens,
|
89
|
-
// mapped name because of conflict with stopSequences:
|
90
|
-
stopSequences: combinedSettings.cohereStopSequences,
|
91
|
-
abortSignal: run?.abortSignal,
|
92
|
-
prompt,
|
93
|
-
responseFormat,
|
94
|
-
};
|
95
78
|
return callWithRetryAndThrottle({
|
96
|
-
retry:
|
97
|
-
throttle:
|
98
|
-
call: async () => callCohereTextGenerationAPI(
|
79
|
+
retry: this.settings.api?.retry,
|
80
|
+
throttle: this.settings.api?.throttle,
|
81
|
+
call: async () => callCohereTextGenerationAPI({
|
82
|
+
...this.settings,
|
83
|
+
// use endSequences instead of stopSequences
|
84
|
+
// to exclude stop tokens from the generated text
|
85
|
+
endSequences: this.settings.stopSequences,
|
86
|
+
maxTokens: this.settings.maxCompletionTokens,
|
87
|
+
// mapped name because of conflict with stopSequences:
|
88
|
+
stopSequences: this.settings.cohereStopSequences,
|
89
|
+
abortSignal: options.run?.abortSignal,
|
90
|
+
responseFormat: options.responseFormat,
|
91
|
+
prompt,
|
92
|
+
}),
|
99
93
|
});
|
100
94
|
}
|
101
95
|
get settingsForEvent() {
|
@@ -115,16 +109,17 @@ export class CohereTextGenerationModel extends AbstractModel {
|
|
115
109
|
];
|
116
110
|
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
117
111
|
}
|
118
|
-
|
119
|
-
|
112
|
+
async doGenerateText(prompt, options) {
|
113
|
+
const response = await this.callAPI(prompt, {
|
120
114
|
...options,
|
121
115
|
responseFormat: CohereTextGenerationResponseFormat.json,
|
122
116
|
});
|
117
|
+
return {
|
118
|
+
response,
|
119
|
+
text: response.generations[0].text,
|
120
|
+
};
|
123
121
|
}
|
124
|
-
|
125
|
-
return response.generations[0].text;
|
126
|
-
}
|
127
|
-
generateDeltaStreamResponse(prompt, options) {
|
122
|
+
doStreamText(prompt, options) {
|
128
123
|
return this.callAPI(prompt, {
|
129
124
|
...options,
|
130
125
|
responseFormat: CohereTextGenerationResponseFormat.deltaIterable,
|
@@ -134,9 +129,12 @@ export class CohereTextGenerationModel extends AbstractModel {
|
|
134
129
|
return fullDelta.delta;
|
135
130
|
}
|
136
131
|
withPromptFormat(promptFormat) {
|
137
|
-
return new
|
132
|
+
return new PromptFormatTextStreamingModel({
|
138
133
|
model: this.withSettings({
|
139
|
-
stopSequences:
|
134
|
+
stopSequences: [
|
135
|
+
...(this.settings.stopSequences ?? []),
|
136
|
+
...promptFormat.stopSequences,
|
137
|
+
],
|
140
138
|
}),
|
141
139
|
promptFormat,
|
142
140
|
});
|
@@ -211,6 +209,7 @@ async function createCohereTextGenerationFullDeltaIterableQueue(stream) {
|
|
211
209
|
isComplete: true,
|
212
210
|
delta: "",
|
213
211
|
},
|
212
|
+
valueDelta: "",
|
214
213
|
});
|
215
214
|
}
|
216
215
|
else {
|
@@ -222,6 +221,7 @@ async function createCohereTextGenerationFullDeltaIterableQueue(stream) {
|
|
222
221
|
isComplete: false,
|
223
222
|
delta: event.text,
|
224
223
|
},
|
224
|
+
valueDelta: event.text,
|
225
225
|
});
|
226
226
|
}
|
227
227
|
}
|
@@ -25,24 +25,17 @@ class ElevenLabsSpeechSynthesisModel extends AbstractModel_js_1.AbstractModel {
|
|
25
25
|
return this.settings.voice;
|
26
26
|
}
|
27
27
|
async callAPI(text, options) {
|
28
|
-
const run = options?.run;
|
29
|
-
const settings = options?.settings;
|
30
|
-
const combinedSettings = {
|
31
|
-
...this.settings,
|
32
|
-
...settings,
|
33
|
-
};
|
34
|
-
const callSettings = {
|
35
|
-
api: combinedSettings.api,
|
36
|
-
abortSignal: run?.abortSignal,
|
37
|
-
text,
|
38
|
-
voiceId: combinedSettings.voice,
|
39
|
-
modelId: combinedSettings.model,
|
40
|
-
voiceSettings: combinedSettings.voiceSettings,
|
41
|
-
};
|
42
28
|
return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
|
43
|
-
retry:
|
44
|
-
throttle:
|
45
|
-
call: async () => callElevenLabsTextToSpeechAPI(
|
29
|
+
retry: this.settings.api?.retry,
|
30
|
+
throttle: this.settings.api?.throttle,
|
31
|
+
call: async () => callElevenLabsTextToSpeechAPI({
|
32
|
+
api: this.settings.api,
|
33
|
+
abortSignal: options?.run?.abortSignal,
|
34
|
+
text,
|
35
|
+
voiceId: this.settings.voice,
|
36
|
+
modelId: this.settings.model,
|
37
|
+
voiceSettings: this.settings.voiceSettings,
|
38
|
+
}),
|
46
39
|
});
|
47
40
|
}
|
48
41
|
get settingsForEvent() {
|
@@ -1,7 +1,7 @@
|
|
1
1
|
/// <reference types="node" />
|
2
|
+
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
2
3
|
import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
3
4
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
4
|
-
import { ModelFunctionOptions } from "../../model-function/ModelFunctionOptions.js";
|
5
5
|
import { SpeechSynthesisModel, SpeechSynthesisModelSettings } from "../../model-function/synthesize-speech/SpeechSynthesisModel.js";
|
6
6
|
export interface ElevenLabsSpeechSynthesisModelSettings extends SpeechSynthesisModelSettings {
|
7
7
|
api?: ApiConfiguration;
|
@@ -25,6 +25,6 @@ export declare class ElevenLabsSpeechSynthesisModel extends AbstractModel<Eleven
|
|
25
25
|
get modelName(): string;
|
26
26
|
private callAPI;
|
27
27
|
get settingsForEvent(): Partial<ElevenLabsSpeechSynthesisModelSettings>;
|
28
|
-
generateSpeechResponse(text: string, options?:
|
28
|
+
generateSpeechResponse(text: string, options?: FunctionOptions): Promise<Buffer>;
|
29
29
|
withSettings(additionalSettings: Partial<ElevenLabsSpeechSynthesisModelSettings>): this;
|
30
30
|
}
|
@@ -22,24 +22,17 @@ export class ElevenLabsSpeechSynthesisModel extends AbstractModel {
|
|
22
22
|
return this.settings.voice;
|
23
23
|
}
|
24
24
|
async callAPI(text, options) {
|
25
|
-
const run = options?.run;
|
26
|
-
const settings = options?.settings;
|
27
|
-
const combinedSettings = {
|
28
|
-
...this.settings,
|
29
|
-
...settings,
|
30
|
-
};
|
31
|
-
const callSettings = {
|
32
|
-
api: combinedSettings.api,
|
33
|
-
abortSignal: run?.abortSignal,
|
34
|
-
text,
|
35
|
-
voiceId: combinedSettings.voice,
|
36
|
-
modelId: combinedSettings.model,
|
37
|
-
voiceSettings: combinedSettings.voiceSettings,
|
38
|
-
};
|
39
25
|
return callWithRetryAndThrottle({
|
40
|
-
retry:
|
41
|
-
throttle:
|
42
|
-
call: async () => callElevenLabsTextToSpeechAPI(
|
26
|
+
retry: this.settings.api?.retry,
|
27
|
+
throttle: this.settings.api?.throttle,
|
28
|
+
call: async () => callElevenLabsTextToSpeechAPI({
|
29
|
+
api: this.settings.api,
|
30
|
+
abortSignal: options?.run?.abortSignal,
|
31
|
+
text,
|
32
|
+
voiceId: this.settings.voice,
|
33
|
+
modelId: this.settings.model,
|
34
|
+
voiceSettings: this.settings.voiceSettings,
|
35
|
+
}),
|
43
36
|
});
|
44
37
|
}
|
45
38
|
get settingsForEvent() {
|
@@ -35,28 +35,25 @@ class HuggingFaceImageDescriptionModel extends AbstractModel_js_1.AbstractModel
|
|
35
35
|
return this.settings.model;
|
36
36
|
}
|
37
37
|
async callAPI(data, options) {
|
38
|
-
const run = options?.run;
|
39
|
-
const settings = options?.settings;
|
40
|
-
const callSettings = {
|
41
|
-
...this.settings,
|
42
|
-
...settings,
|
43
|
-
abortSignal: run?.abortSignal,
|
44
|
-
data,
|
45
|
-
};
|
46
38
|
return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
|
47
|
-
retry:
|
48
|
-
throttle:
|
49
|
-
call: async () => callHuggingFaceImageDescriptionAPI(
|
39
|
+
retry: this.settings.api?.retry,
|
40
|
+
throttle: this.settings.api?.throttle,
|
41
|
+
call: async () => callHuggingFaceImageDescriptionAPI({
|
42
|
+
...this.settings,
|
43
|
+
abortSignal: options?.run?.abortSignal,
|
44
|
+
data,
|
45
|
+
}),
|
50
46
|
});
|
51
47
|
}
|
52
48
|
get settingsForEvent() {
|
53
49
|
return {};
|
54
50
|
}
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
51
|
+
async doDescribeImage(data, options) {
|
52
|
+
const response = await this.callAPI(data, options);
|
53
|
+
return {
|
54
|
+
response,
|
55
|
+
description: response[0].generated_text,
|
56
|
+
};
|
60
57
|
}
|
61
58
|
withSettings(additionalSettings) {
|
62
59
|
return new HuggingFaceImageDescriptionModel(Object.assign({}, this.settings, additionalSettings));
|
@@ -2,7 +2,7 @@
|
|
2
2
|
import z from "zod";
|
3
3
|
import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
4
4
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
5
|
-
import {
|
5
|
+
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
6
6
|
import { ImageDescriptionModel, ImageDescriptionModelSettings } from "../../model-function/describe-image/ImageDescriptionModel.js";
|
7
7
|
export interface HuggingFaceImageDescriptionModelSettings extends ImageDescriptionModelSettings {
|
8
8
|
api?: ApiConfiguration;
|
@@ -13,17 +13,19 @@ export interface HuggingFaceImageDescriptionModelSettings extends ImageDescripti
|
|
13
13
|
*
|
14
14
|
* @see https://huggingface.co/tasks/image-to-text
|
15
15
|
*/
|
16
|
-
export declare class HuggingFaceImageDescriptionModel extends AbstractModel<HuggingFaceImageDescriptionModelSettings> implements ImageDescriptionModel<Buffer,
|
16
|
+
export declare class HuggingFaceImageDescriptionModel extends AbstractModel<HuggingFaceImageDescriptionModelSettings> implements ImageDescriptionModel<Buffer, HuggingFaceImageDescriptionModelSettings> {
|
17
17
|
constructor(settings: HuggingFaceImageDescriptionModelSettings);
|
18
18
|
readonly provider = "huggingface";
|
19
19
|
get modelName(): string;
|
20
|
-
callAPI(data: Buffer, options?:
|
20
|
+
callAPI(data: Buffer, options?: FunctionOptions): Promise<HuggingFaceImageDescriptionResponse>;
|
21
21
|
get settingsForEvent(): Partial<HuggingFaceImageDescriptionModelSettings>;
|
22
22
|
readonly countPromptTokens: undefined;
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
23
|
+
doDescribeImage(data: Buffer, options?: FunctionOptions): Promise<{
|
24
|
+
response: {
|
25
|
+
generated_text: string;
|
26
|
+
}[];
|
27
|
+
description: string;
|
28
|
+
}>;
|
27
29
|
withSettings(additionalSettings: Partial<HuggingFaceImageDescriptionModelSettings>): this;
|
28
30
|
}
|
29
31
|
declare const huggingFaceImageDescriptionResponseSchema: z.ZodArray<z.ZodObject<{
|
@@ -29,28 +29,25 @@ export class HuggingFaceImageDescriptionModel extends AbstractModel {
|
|
29
29
|
return this.settings.model;
|
30
30
|
}
|
31
31
|
async callAPI(data, options) {
|
32
|
-
const run = options?.run;
|
33
|
-
const settings = options?.settings;
|
34
|
-
const callSettings = {
|
35
|
-
...this.settings,
|
36
|
-
...settings,
|
37
|
-
abortSignal: run?.abortSignal,
|
38
|
-
data,
|
39
|
-
};
|
40
32
|
return callWithRetryAndThrottle({
|
41
|
-
retry:
|
42
|
-
throttle:
|
43
|
-
call: async () => callHuggingFaceImageDescriptionAPI(
|
33
|
+
retry: this.settings.api?.retry,
|
34
|
+
throttle: this.settings.api?.throttle,
|
35
|
+
call: async () => callHuggingFaceImageDescriptionAPI({
|
36
|
+
...this.settings,
|
37
|
+
abortSignal: options?.run?.abortSignal,
|
38
|
+
data,
|
39
|
+
}),
|
44
40
|
});
|
45
41
|
}
|
46
42
|
get settingsForEvent() {
|
47
43
|
return {};
|
48
44
|
}
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
45
|
+
async doDescribeImage(data, options) {
|
46
|
+
const response = await this.callAPI(data, options);
|
47
|
+
return {
|
48
|
+
response,
|
49
|
+
description: response[0].generated_text,
|
50
|
+
};
|
54
51
|
}
|
55
52
|
withSettings(additionalSettings) {
|
56
53
|
return new HuggingFaceImageDescriptionModel(Object.assign({}, this.settings, additionalSettings));
|