modelfusion 0.58.0 → 0.60.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -3
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +1 -0
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.d.ts +1 -1
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +1 -0
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +16 -2
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +17 -6
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +16 -2
- package/package.json +1 -1
- package/tool/NoSuchToolError.cjs +8 -0
- package/tool/NoSuchToolError.d.ts +6 -0
- package/tool/NoSuchToolError.js +8 -0
- package/tool/ToolExecutionError.cjs +10 -0
- package/tool/ToolExecutionError.d.ts +8 -0
- package/tool/ToolExecutionError.js +10 -0
- package/util/JSONParseError.cjs +1 -0
- package/util/JSONParseError.d.ts +1 -0
- package/util/JSONParseError.js +1 -0
package/README.md
CHANGED
@@ -39,8 +39,7 @@ You can provide API keys for the different [integrations](https://modelfusion.de
|
|
39
39
|
|
40
40
|
### [Generate Text](https://modelfusion.dev/guide/function/generate-text)
|
41
41
|
|
42
|
-
Generate text using a language model and a prompt.
|
43
|
-
You can stream the text if it is supported by the model.
|
42
|
+
Generate text using a language model and a prompt. You can stream the text if it is supported by the model. You can use images for multi-modal prompting if the model supports it (e.g. with [llama.cpp](https://modelfusion.dev/guide/)).
|
44
43
|
You can use [prompt formats](https://modelfusion.dev/guide/function/generate-text#prompt-format) to change the prompt format of a model.
|
45
44
|
|
46
45
|
#### generateText
|
@@ -118,6 +117,7 @@ const textStream = await streamText(/* ... */);
|
|
118
117
|
|
119
118
|
const speechStream = await streamSpeech(
|
120
119
|
new ElevenLabsSpeechModel({
|
120
|
+
model: "eleven_turbo_v2",
|
121
121
|
voice: "pNInz6obpgDQGcFmaJgB", // Adam
|
122
122
|
optimizeStreamingLatency: 1,
|
123
123
|
voiceSettings: { stability: 1, similarityBoost: 0.35 },
|
@@ -447,7 +447,7 @@ const retrievedTexts = await retrieve(
|
|
447
447
|
);
|
448
448
|
```
|
449
449
|
|
450
|
-
Available Vector Stores: [Memory](https://modelfusion.dev/integration/vector-index/memory), [Pinecone](https://modelfusion.dev/integration/vector-index/pinecone)
|
450
|
+
Available Vector Stores: [Memory](https://modelfusion.dev/integration/vector-index/memory), [SQLite VSS](https://modelfusion.dev/integration/vector-index/sqlite-vss), [Pinecone](https://modelfusion.dev/integration/vector-index/pinecone)
|
451
451
|
|
452
452
|
### Prompt Formats
|
453
453
|
|
@@ -4,7 +4,7 @@ import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
|
4
4
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
5
5
|
import { Delta } from "../../model-function/Delta.js";
|
6
6
|
import { StreamingSpeechGenerationModel, SpeechGenerationModelSettings } from "../../model-function/generate-speech/SpeechGenerationModel.js";
|
7
|
-
declare const elevenLabsModels: readonly ["eleven_multilingual_v2", "eleven_multilingual_v1", "eleven_monolingual_v1"];
|
7
|
+
declare const elevenLabsModels: readonly ["eleven_multilingual_v2", "eleven_multilingual_v1", "eleven_monolingual_v1", "eleven_turbo_v2"];
|
8
8
|
export interface ElevenLabsSpeechModelSettings extends SpeechGenerationModelSettings {
|
9
9
|
api?: ApiConfiguration & {
|
10
10
|
apiKey: string;
|
@@ -75,7 +75,7 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
75
75
|
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
76
76
|
}
|
77
77
|
async countPromptTokens(prompt) {
|
78
|
-
const tokens = await this.tokenizer.tokenize(prompt);
|
78
|
+
const tokens = await this.tokenizer.tokenize(prompt.text);
|
79
79
|
return tokens.length;
|
80
80
|
}
|
81
81
|
async doGenerateText(prompt, options) {
|
@@ -99,6 +99,14 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
99
99
|
responseFormat: exports.LlamaCppTextGenerationResponseFormat.deltaIterable,
|
100
100
|
});
|
101
101
|
}
|
102
|
+
withTextPrompt() {
|
103
|
+
return this.withPromptFormat({
|
104
|
+
format(prompt) {
|
105
|
+
return { text: prompt };
|
106
|
+
},
|
107
|
+
stopSequences: [],
|
108
|
+
});
|
109
|
+
}
|
102
110
|
withPromptFormat(promptFormat) {
|
103
111
|
return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
|
104
112
|
model: this.withSettings({
|
@@ -177,7 +185,7 @@ async function callLlamaCppTextGenerationAPI({ api = new LlamaCppApiConfiguratio
|
|
177
185
|
headers: api.headers,
|
178
186
|
body: {
|
179
187
|
stream: responseFormat.stream,
|
180
|
-
prompt,
|
188
|
+
prompt: prompt.text,
|
181
189
|
temperature,
|
182
190
|
top_k: topK,
|
183
191
|
top_p: topP,
|
@@ -195,6 +203,12 @@ async function callLlamaCppTextGenerationAPI({ api = new LlamaCppApiConfiguratio
|
|
195
203
|
seed,
|
196
204
|
ignore_eos: ignoreEos,
|
197
205
|
logit_bias: logitBias,
|
206
|
+
image_data: prompt.images != null
|
207
|
+
? Object.entries(prompt.images).map(([id, data]) => ({
|
208
|
+
id: +id,
|
209
|
+
data,
|
210
|
+
}))
|
211
|
+
: undefined,
|
198
212
|
},
|
199
213
|
failedResponseHandler: LlamaCppError_js_1.failedLlamaCppCallResponseHandler,
|
200
214
|
successfulResponseHandler: responseFormat.handler,
|
@@ -31,18 +31,28 @@ export interface LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE extends
|
|
31
31
|
ignoreEos?: boolean;
|
32
32
|
logitBias?: Array<[number, number | false]>;
|
33
33
|
}
|
34
|
-
export
|
34
|
+
export interface LlamaCppTextGenerationPrompt {
|
35
|
+
/**
|
36
|
+
* Text prompt. Images can be included through references such as `[img-ID]`, e.g. `[img-1]`.
|
37
|
+
*/
|
38
|
+
text: string;
|
39
|
+
/**
|
40
|
+
* Maps image id to image base data.
|
41
|
+
*/
|
42
|
+
images?: Record<number, string>;
|
43
|
+
}
|
44
|
+
export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends number | undefined> extends AbstractModel<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>> implements TextStreamingModel<LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>> {
|
35
45
|
constructor(settings?: LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>);
|
36
46
|
readonly provider = "llamacpp";
|
37
47
|
get modelName(): null;
|
38
48
|
get contextWindowSize(): CONTEXT_WINDOW_SIZE;
|
39
49
|
readonly tokenizer: LlamaCppTokenizer;
|
40
|
-
callAPI<RESPONSE>(prompt:
|
50
|
+
callAPI<RESPONSE>(prompt: LlamaCppTextGenerationPrompt, options: {
|
41
51
|
responseFormat: LlamaCppTextGenerationResponseFormatType<RESPONSE>;
|
42
52
|
} & FunctionOptions): Promise<RESPONSE>;
|
43
53
|
get settingsForEvent(): Partial<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>;
|
44
|
-
countPromptTokens(prompt:
|
45
|
-
doGenerateText(prompt:
|
54
|
+
countPromptTokens(prompt: LlamaCppTextGenerationPrompt): Promise<number>;
|
55
|
+
doGenerateText(prompt: LlamaCppTextGenerationPrompt, options?: FunctionOptions): Promise<{
|
46
56
|
response: {
|
47
57
|
model: string;
|
48
58
|
prompt: string;
|
@@ -99,8 +109,9 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
|
|
99
109
|
totalTokens: number;
|
100
110
|
};
|
101
111
|
}>;
|
102
|
-
doStreamText(prompt:
|
103
|
-
|
112
|
+
doStreamText(prompt: LlamaCppTextGenerationPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
|
113
|
+
withTextPrompt(): PromptFormatTextStreamingModel<string, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
114
|
+
withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, LlamaCppTextGenerationPrompt>): PromptFormatTextStreamingModel<INPUT_PROMPT, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
104
115
|
withSettings(additionalSettings: Partial<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): this;
|
105
116
|
}
|
106
117
|
declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
|
@@ -72,7 +72,7 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
|
|
72
72
|
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
73
73
|
}
|
74
74
|
async countPromptTokens(prompt) {
|
75
|
-
const tokens = await this.tokenizer.tokenize(prompt);
|
75
|
+
const tokens = await this.tokenizer.tokenize(prompt.text);
|
76
76
|
return tokens.length;
|
77
77
|
}
|
78
78
|
async doGenerateText(prompt, options) {
|
@@ -96,6 +96,14 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
|
|
96
96
|
responseFormat: LlamaCppTextGenerationResponseFormat.deltaIterable,
|
97
97
|
});
|
98
98
|
}
|
99
|
+
withTextPrompt() {
|
100
|
+
return this.withPromptFormat({
|
101
|
+
format(prompt) {
|
102
|
+
return { text: prompt };
|
103
|
+
},
|
104
|
+
stopSequences: [],
|
105
|
+
});
|
106
|
+
}
|
99
107
|
withPromptFormat(promptFormat) {
|
100
108
|
return new PromptFormatTextStreamingModel({
|
101
109
|
model: this.withSettings({
|
@@ -173,7 +181,7 @@ async function callLlamaCppTextGenerationAPI({ api = new LlamaCppApiConfiguratio
|
|
173
181
|
headers: api.headers,
|
174
182
|
body: {
|
175
183
|
stream: responseFormat.stream,
|
176
|
-
prompt,
|
184
|
+
prompt: prompt.text,
|
177
185
|
temperature,
|
178
186
|
top_k: topK,
|
179
187
|
top_p: topP,
|
@@ -191,6 +199,12 @@ async function callLlamaCppTextGenerationAPI({ api = new LlamaCppApiConfiguratio
|
|
191
199
|
seed,
|
192
200
|
ignore_eos: ignoreEos,
|
193
201
|
logit_bias: logitBias,
|
202
|
+
image_data: prompt.images != null
|
203
|
+
? Object.entries(prompt.images).map(([id, data]) => ({
|
204
|
+
id: +id,
|
205
|
+
data,
|
206
|
+
}))
|
207
|
+
: undefined,
|
194
208
|
},
|
195
209
|
failedResponseHandler: failedLlamaCppCallResponseHandler,
|
196
210
|
successfulResponseHandler: responseFormat.handler,
|
package/package.json
CHANGED
package/tool/NoSuchToolError.cjs
CHANGED
@@ -13,5 +13,13 @@ class NoSuchToolError extends Error {
|
|
13
13
|
this.name = "NoSuchToolError";
|
14
14
|
this.toolName = toolName;
|
15
15
|
}
|
16
|
+
toJSON() {
|
17
|
+
return {
|
18
|
+
name: this.name,
|
19
|
+
toolName: this.toolName,
|
20
|
+
message: this.message,
|
21
|
+
stack: this.stack,
|
22
|
+
};
|
23
|
+
}
|
16
24
|
}
|
17
25
|
exports.NoSuchToolError = NoSuchToolError;
|
package/tool/NoSuchToolError.js
CHANGED
@@ -27,5 +27,15 @@ class ToolExecutionError extends Error {
|
|
27
27
|
this.input = input;
|
28
28
|
this.cause = cause;
|
29
29
|
}
|
30
|
+
toJSON() {
|
31
|
+
return {
|
32
|
+
name: this.name,
|
33
|
+
toolName: this.toolName,
|
34
|
+
input: this.input,
|
35
|
+
cause: this.cause,
|
36
|
+
message: this.message,
|
37
|
+
stack: this.stack,
|
38
|
+
};
|
39
|
+
}
|
30
40
|
}
|
31
41
|
exports.ToolExecutionError = ToolExecutionError;
|
@@ -8,4 +8,12 @@ export declare class ToolExecutionError extends Error {
|
|
8
8
|
message: string | undefined;
|
9
9
|
cause: unknown | undefined;
|
10
10
|
});
|
11
|
+
toJSON(): {
|
12
|
+
name: string;
|
13
|
+
toolName: string;
|
14
|
+
input: unknown;
|
15
|
+
cause: unknown;
|
16
|
+
message: string;
|
17
|
+
stack: string | undefined;
|
18
|
+
};
|
11
19
|
}
|
@@ -24,4 +24,14 @@ export class ToolExecutionError extends Error {
|
|
24
24
|
this.input = input;
|
25
25
|
this.cause = cause;
|
26
26
|
}
|
27
|
+
toJSON() {
|
28
|
+
return {
|
29
|
+
name: this.name,
|
30
|
+
toolName: this.toolName,
|
31
|
+
input: this.input,
|
32
|
+
cause: this.cause,
|
33
|
+
message: this.message,
|
34
|
+
stack: this.stack,
|
35
|
+
};
|
36
|
+
}
|
27
37
|
}
|
package/util/JSONParseError.cjs
CHANGED
package/util/JSONParseError.d.ts
CHANGED