modelfusion 0.121.2 → 0.123.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +78 -1
- package/README.md +127 -85
- package/index.cjs +0 -1
- package/index.d.ts +0 -1
- package/index.js +0 -1
- package/model-function/ModelCallEvent.d.ts +6 -2
- package/model-function/classify/Classifier.cjs +2 -0
- package/model-function/classify/Classifier.d.ts +10 -0
- package/model-function/classify/Classifier.js +1 -0
- package/model-function/classify/ClassifyEvent.cjs +2 -0
- package/model-function/classify/ClassifyEvent.d.ts +20 -0
- package/model-function/classify/ClassifyEvent.js +1 -0
- package/model-function/classify/EmbeddingSimilarityClassifier.cjs +97 -0
- package/model-function/classify/EmbeddingSimilarityClassifier.d.ts +40 -0
- package/model-function/classify/EmbeddingSimilarityClassifier.js +93 -0
- package/model-function/classify/classify.cjs +27 -0
- package/model-function/classify/classify.d.ts +17 -0
- package/model-function/classify/classify.js +23 -0
- package/{classifier → model-function/classify}/index.cjs +4 -1
- package/model-function/classify/index.d.ts +4 -0
- package/model-function/classify/index.js +4 -0
- package/model-function/embed/embed.cjs +14 -14
- package/model-function/embed/embed.d.ts +24 -18
- package/model-function/embed/embed.js +14 -14
- package/model-function/generate-image/generateImage.cjs +6 -6
- package/model-function/generate-image/generateImage.d.ts +12 -9
- package/model-function/generate-image/generateImage.js +6 -6
- package/model-function/generate-speech/generateSpeech.cjs +7 -7
- package/model-function/generate-speech/generateSpeech.d.ts +12 -9
- package/model-function/generate-speech/generateSpeech.js +7 -7
- package/model-function/generate-speech/streamSpeech.cjs +6 -6
- package/model-function/generate-speech/streamSpeech.d.ts +12 -8
- package/model-function/generate-speech/streamSpeech.js +6 -6
- package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +5 -3
- package/model-function/generate-structure/StructureFromTextGenerationModel.d.ts +1 -1
- package/model-function/generate-structure/StructureFromTextGenerationModel.js +5 -3
- package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +5 -1
- package/model-function/generate-structure/StructureFromTextStreamingModel.js +5 -1
- package/model-function/generate-structure/StructureGenerationModel.d.ts +1 -1
- package/model-function/generate-structure/generateStructure.cjs +8 -8
- package/model-function/generate-structure/generateStructure.d.ts +17 -10
- package/model-function/generate-structure/generateStructure.js +8 -8
- package/model-function/generate-structure/streamStructure.cjs +6 -6
- package/model-function/generate-structure/streamStructure.d.ts +16 -10
- package/model-function/generate-structure/streamStructure.js +6 -6
- package/model-function/generate-text/generateText.cjs +6 -6
- package/model-function/generate-text/generateText.d.ts +12 -9
- package/model-function/generate-text/generateText.js +6 -6
- package/model-function/generate-text/streamText.cjs +6 -6
- package/model-function/generate-text/streamText.d.ts +12 -8
- package/model-function/generate-text/streamText.js +6 -6
- package/model-function/generate-transcription/generateTranscription.cjs +3 -3
- package/model-function/generate-transcription/generateTranscription.d.ts +12 -9
- package/model-function/generate-transcription/generateTranscription.js +3 -3
- package/model-function/index.cjs +1 -0
- package/model-function/index.d.ts +1 -0
- package/model-function/index.js +1 -0
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +12 -12
- package/model-provider/cohere/CohereTextGenerationModel.test.cjs +7 -4
- package/model-provider/cohere/CohereTextGenerationModel.test.js +7 -4
- package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +10 -10
- package/model-provider/llamacpp/LlamaCppCompletionModel.test.cjs +4 -1
- package/model-provider/llamacpp/LlamaCppCompletionModel.test.js +4 -1
- package/model-provider/mistral/MistralChatModel.test.cjs +15 -8
- package/model-provider/mistral/MistralChatModel.test.js +15 -8
- package/model-provider/mistral/MistralTextEmbeddingModel.d.ts +13 -13
- package/model-provider/ollama/OllamaChatModel.d.ts +9 -9
- package/model-provider/ollama/OllamaChatModel.test.cjs +6 -1
- package/model-provider/ollama/OllamaChatModel.test.js +6 -1
- package/model-provider/ollama/OllamaCompletionModel.test.cjs +31 -16
- package/model-provider/ollama/OllamaCompletionModel.test.js +31 -16
- package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.cjs +4 -4
- package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +1 -1
- package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.js +4 -4
- package/model-provider/openai/OpenAIChatModel.test.cjs +21 -14
- package/model-provider/openai/OpenAIChatModel.test.js +21 -14
- package/model-provider/openai/OpenAICompletionModel.test.cjs +15 -9
- package/model-provider/openai/OpenAICompletionModel.test.js +15 -9
- package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +12 -12
- package/package.json +1 -1
- package/tool/execute-tool/executeTool.cjs +5 -5
- package/tool/execute-tool/executeTool.d.ts +8 -4
- package/tool/execute-tool/executeTool.js +5 -5
- package/tool/execute-tool/safeExecuteToolCall.cjs +1 -1
- package/tool/execute-tool/safeExecuteToolCall.js +1 -1
- package/tool/generate-tool-call/TextGenerationToolCallModel.cjs +4 -2
- package/tool/generate-tool-call/TextGenerationToolCallModel.js +4 -2
- package/tool/generate-tool-call/generateToolCall.cjs +7 -7
- package/tool/generate-tool-call/generateToolCall.d.ts +11 -5
- package/tool/generate-tool-call/generateToolCall.js +7 -7
- package/tool/generate-tool-calls/TextGenerationToolCallsModel.cjs +4 -2
- package/tool/generate-tool-calls/TextGenerationToolCallsModel.js +4 -2
- package/tool/generate-tool-calls/generateToolCalls.cjs +3 -3
- package/tool/generate-tool-calls/generateToolCalls.d.ts +11 -5
- package/tool/generate-tool-calls/generateToolCalls.js +3 -3
- package/tool/use-tool/useTool.cjs +2 -2
- package/tool/use-tool/useTool.d.ts +5 -1
- package/tool/use-tool/useTool.js +2 -2
- package/tool/use-tools/useTools.cjs +8 -2
- package/tool/use-tools/useTools.d.ts +5 -1
- package/tool/use-tools/useTools.js +8 -2
- package/vector-index/VectorIndexRetriever.cjs +5 -1
- package/vector-index/VectorIndexRetriever.js +5 -1
- package/vector-index/upsertIntoVectorIndex.cjs +5 -1
- package/vector-index/upsertIntoVectorIndex.js +5 -1
- package/classifier/SemanticClassifier.cjs +0 -75
- package/classifier/SemanticClassifier.d.ts +0 -25
- package/classifier/SemanticClassifier.js +0 -71
- package/classifier/index.d.ts +0 -1
- package/classifier/index.js +0 -1
package/CHANGELOG.md
CHANGED
@@ -1,10 +1,87 @@
|
|
1
1
|
# Changelog
|
2
2
|
|
3
|
+
## v0.123.0 - 2024-01-13
|
4
|
+
|
5
|
+
### Added
|
6
|
+
|
7
|
+
- `classify` model function ([docs](https://modelfusion.dev/guide/function/classify)) for classifying values. The `SemanticClassifier` has been renamed to `EmbeddingSimilarityClassifier` and can be used in conjunction with `classify`:
|
8
|
+
|
9
|
+
```ts
|
10
|
+
import { classify, EmbeddingSimilarityClassifier, openai } from "modelfusion";
|
11
|
+
|
12
|
+
const classifier = new EmbeddingSimilarityClassifier({
|
13
|
+
embeddingModel: openai.TextEmbedder({ model: "text-embedding-ada-002" }),
|
14
|
+
similarityThreshold: 0.82,
|
15
|
+
clusters: [
|
16
|
+
{
|
17
|
+
name: "politics" as const,
|
18
|
+
values: [
|
19
|
+
"they will save the country!",
|
20
|
+
// ...
|
21
|
+
],
|
22
|
+
},
|
23
|
+
{
|
24
|
+
name: "chitchat" as const,
|
25
|
+
values: [
|
26
|
+
"how's the weather today?",
|
27
|
+
// ...
|
28
|
+
],
|
29
|
+
},
|
30
|
+
],
|
31
|
+
});
|
32
|
+
|
33
|
+
// strongly typed result:
|
34
|
+
const result = await classify({
|
35
|
+
model: classifier,
|
36
|
+
value: "don't you love politics?",
|
37
|
+
});
|
38
|
+
```
|
39
|
+
|
40
|
+
## v0.122.0 - 2024-01-13
|
41
|
+
|
42
|
+
### Changed
|
43
|
+
|
44
|
+
- **breaking change**: Switch from positional parameters to named parameters (parameter object) for all model and tool functions. The parameter object is the first and only parameter of the function. Additional options (last parameter before) are now part of the parameter object. Example:
|
45
|
+
|
46
|
+
```ts
|
47
|
+
// old:
|
48
|
+
const text = await generateText(
|
49
|
+
openai
|
50
|
+
.ChatTextGenerator({
|
51
|
+
model: "gpt-3.5-turbo",
|
52
|
+
maxGenerationTokens: 1000,
|
53
|
+
})
|
54
|
+
.withTextPrompt(),
|
55
|
+
|
56
|
+
"Write a short story about a robot learning to love",
|
57
|
+
|
58
|
+
{
|
59
|
+
functionId: "example-function",
|
60
|
+
}
|
61
|
+
);
|
62
|
+
|
63
|
+
// new:
|
64
|
+
const text = await generateText({
|
65
|
+
model: openai
|
66
|
+
.ChatTextGenerator({
|
67
|
+
model: "gpt-3.5-turbo",
|
68
|
+
maxGenerationTokens: 1000,
|
69
|
+
})
|
70
|
+
.withTextPrompt(),
|
71
|
+
|
72
|
+
prompt: "Write a short story about a robot learning to love",
|
73
|
+
|
74
|
+
functionId: "example-function",
|
75
|
+
});
|
76
|
+
```
|
77
|
+
|
78
|
+
This change was made to make the API more flexible and to allow for future extensions.
|
79
|
+
|
3
80
|
## v0.121.2 - 2024-01-11
|
4
81
|
|
5
82
|
### Fixed
|
6
83
|
|
7
|
-
- Ollama response schema for repeated calls with Ollama 0.1.19 completion models. Thanks [@
|
84
|
+
- Ollama response schema for repeated calls with Ollama 0.1.19 completion models. Thanks [@Necmttn](https://github.com/Necmttn) for the bugfix!
|
8
85
|
|
9
86
|
## v0.121.1 - 2024-01-10
|
10
87
|
|
package/README.md
CHANGED
@@ -22,7 +22,7 @@
|
|
22
22
|
- **Built for production**: ModelFusion is fully tree-shakeable, can be used in serverless environments, and only uses a minimal set of dependencies.
|
23
23
|
|
24
24
|
> [!NOTE]
|
25
|
-
> ModelFusion is getting closer to a stable v1, which is expected in
|
25
|
+
> ModelFusion is getting closer to a stable v1, which is expected in Q2/2024. The main API is now mostly stable, but until version 1.0 there may be breaking changes. Feedback and suggestions are welcome.
|
26
26
|
|
27
27
|
## Quick Install
|
28
28
|
|
@@ -53,10 +53,10 @@ You can use [prompt templates](https://modelfusion.dev/guide/function/generate-t
|
|
53
53
|
```ts
|
54
54
|
import { generateText, openai } from "modelfusion";
|
55
55
|
|
56
|
-
const text = await generateText(
|
57
|
-
openai.CompletionTextGenerator({ model: "gpt-3.5-turbo-instruct" }),
|
58
|
-
"Write a short story about a robot learning to love:\n\n"
|
59
|
-
);
|
56
|
+
const text = await generateText({
|
57
|
+
model: openai.CompletionTextGenerator({ model: "gpt-3.5-turbo-instruct" }),
|
58
|
+
prompt: "Write a short story about a robot learning to love:\n\n",
|
59
|
+
});
|
60
60
|
```
|
61
61
|
|
62
62
|
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Mistral](https://modelfusion.dev/integration/model-provider/mistral), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere)
|
@@ -66,10 +66,10 @@ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai),
|
|
66
66
|
```ts
|
67
67
|
import { streamText, openai } from "modelfusion";
|
68
68
|
|
69
|
-
const textStream = await streamText(
|
70
|
-
openai.CompletionTextGenerator({ model: "gpt-3.5-turbo-instruct" }),
|
71
|
-
"Write a short story about a robot learning to love:\n\n"
|
72
|
-
);
|
69
|
+
const textStream = await streamText({
|
70
|
+
model: openai.CompletionTextGenerator({ model: "gpt-3.5-turbo-instruct" }),
|
71
|
+
prompt: "Write a short story about a robot learning to love:\n\n",
|
72
|
+
});
|
73
73
|
|
74
74
|
for await (const textPart of textStream) {
|
75
75
|
process.stdout.write(textPart);
|
@@ -88,15 +88,15 @@ import { readFileSync } from "fs";
|
|
88
88
|
|
89
89
|
const image = readFileSync("./image.png").toString("base64");
|
90
90
|
|
91
|
-
const textStream = await streamText(
|
92
|
-
openai.ChatTextGenerator({ model: "gpt-4-vision-preview" }),
|
93
|
-
[
|
91
|
+
const textStream = await streamText({
|
92
|
+
model: openai.ChatTextGenerator({ model: "gpt-4-vision-preview" }),
|
93
|
+
prompt: [
|
94
94
|
openai.ChatMessage.user([
|
95
95
|
{ type: "text", text: "Describe the image in detail:" },
|
96
96
|
{ type: "image", base64Image: image, mimeType: "image/png" },
|
97
97
|
]),
|
98
|
-
]
|
99
|
-
);
|
98
|
+
],
|
99
|
+
});
|
100
100
|
|
101
101
|
for await (const textPart of textStream) {
|
102
102
|
process.stdout.write(textPart);
|
@@ -121,9 +121,8 @@ import {
|
|
121
121
|
jsonStructurePrompt,
|
122
122
|
} from "modelfusion";
|
123
123
|
|
124
|
-
const sentiment = await generateStructure(
|
125
|
-
|
126
|
-
ollama
|
124
|
+
const sentiment = await generateStructure({
|
125
|
+
model: ollama
|
127
126
|
.ChatTextGenerator({
|
128
127
|
model: "openhermes2.5-mistral",
|
129
128
|
maxGenerationTokens: 1024,
|
@@ -131,8 +130,7 @@ const sentiment = await generateStructure(
|
|
131
130
|
})
|
132
131
|
.asStructureGenerationModel(jsonStructurePrompt.instruction()),
|
133
132
|
|
134
|
-
|
135
|
-
zodSchema(
|
133
|
+
schema: zodSchema(
|
136
134
|
z.object({
|
137
135
|
sentiment: z
|
138
136
|
.enum(["positive", "neutral", "negative"])
|
@@ -140,16 +138,15 @@ const sentiment = await generateStructure(
|
|
140
138
|
})
|
141
139
|
),
|
142
140
|
|
143
|
-
|
144
|
-
{
|
141
|
+
prompt: {
|
145
142
|
system:
|
146
143
|
"You are a sentiment evaluator. " +
|
147
144
|
"Analyze the sentiment of the following product review:",
|
148
145
|
instruction:
|
149
146
|
"After I opened the package, I was met by a very unpleasant smell " +
|
150
147
|
"that did not disappear even after washing. Never again!",
|
151
|
-
}
|
152
|
-
);
|
148
|
+
},
|
149
|
+
});
|
153
150
|
```
|
154
151
|
|
155
152
|
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Ollama](https://modelfusion.dev//integration/model-provider/ollama), [Llama.cpp](https://modelfusion.dev//integration/model-provider/llama.cpp)
|
@@ -161,8 +158,8 @@ Stream a structure that matches a schema. Partial structures before the final pa
|
|
161
158
|
```ts
|
162
159
|
import { zodSchema, openai, streamStructure } from "modelfusion";
|
163
160
|
|
164
|
-
const structureStream = await streamStructure(
|
165
|
-
openai
|
161
|
+
const structureStream = await streamStructure({
|
162
|
+
model: openai
|
166
163
|
.ChatTextGenerator(/* ... */)
|
167
164
|
.asFunctionCallStructureGenerationModel({
|
168
165
|
fnName: "generateCharacter",
|
@@ -170,7 +167,7 @@ const structureStream = await streamStructure(
|
|
170
167
|
})
|
171
168
|
.withTextPrompt(),
|
172
169
|
|
173
|
-
zodSchema(
|
170
|
+
schema: zodSchema(
|
174
171
|
z.object({
|
175
172
|
characters: z.array(
|
176
173
|
z.object({
|
@@ -184,8 +181,8 @@ const structureStream = await streamStructure(
|
|
184
181
|
})
|
185
182
|
),
|
186
183
|
|
187
|
-
"Generate 3 character descriptions for a fantasy role playing game."
|
188
|
-
);
|
184
|
+
prompt: "Generate 3 character descriptions for a fantasy role playing game.",
|
185
|
+
});
|
189
186
|
|
190
187
|
for await (const part of structureStream) {
|
191
188
|
if (!part.isComplete) {
|
@@ -207,10 +204,11 @@ Generate an image from a prompt.
|
|
207
204
|
```ts
|
208
205
|
import { generateImage, openai } from "modelfusion";
|
209
206
|
|
210
|
-
const image = await generateImage(
|
211
|
-
openai.ImageGenerator({ model: "dall-e-3", size: "1024x1024" }),
|
212
|
-
|
213
|
-
|
207
|
+
const image = await generateImage({
|
208
|
+
model: openai.ImageGenerator({ model: "dall-e-3", size: "1024x1024" }),
|
209
|
+
prompt:
|
210
|
+
"the wicked witch of the west in the style of early 19th century painting",
|
211
|
+
});
|
214
212
|
```
|
215
213
|
|
216
214
|
Providers: [OpenAI (Dall·E)](https://modelfusion.dev/integration/model-provider/openai), [Stability AI](https://modelfusion.dev/integration/model-provider/stability), [Automatic1111](https://modelfusion.dev/integration/model-provider/automatic1111)
|
@@ -227,15 +225,16 @@ Synthesize speech (audio) from text. Also called TTS (text-to-speech).
|
|
227
225
|
import { generateSpeech, lmnt } from "modelfusion";
|
228
226
|
|
229
227
|
// `speech` is a Buffer with MP3 audio data
|
230
|
-
const speech = await generateSpeech(
|
231
|
-
lmnt.SpeechGenerator({
|
228
|
+
const speech = await generateSpeech({
|
229
|
+
model: lmnt.SpeechGenerator({
|
232
230
|
voice: "034b632b-df71-46c8-b440-86a42ffc3cf3", // Henry
|
233
231
|
}),
|
234
|
-
|
232
|
+
text:
|
233
|
+
"Good evening, ladies and gentlemen! Exciting news on the airwaves tonight " +
|
235
234
|
"as The Rolling Stones unveil 'Hackney Diamonds,' their first collection of " +
|
236
235
|
"fresh tunes in nearly twenty years, featuring the illustrious Lady Gaga, the " +
|
237
|
-
"magical Stevie Wonder, and the final beats from the late Charlie Watts."
|
238
|
-
);
|
236
|
+
"magical Stevie Wonder, and the final beats from the late Charlie Watts.",
|
237
|
+
});
|
239
238
|
```
|
240
239
|
|
241
240
|
Providers: [Eleven Labs](https://modelfusion.dev/integration/model-provider/elevenlabs), [LMNT](https://modelfusion.dev/integration/model-provider/lmnt), [OpenAI](https://modelfusion.dev/integration/model-provider/openai)
|
@@ -249,8 +248,8 @@ import { streamSpeech, elevenlabs } from "modelfusion";
|
|
249
248
|
|
250
249
|
const textStream: AsyncIterable<string>;
|
251
250
|
|
252
|
-
const speechStream = await streamSpeech(
|
253
|
-
elevenlabs.SpeechGenerator({
|
251
|
+
const speechStream = await streamSpeech({
|
252
|
+
model: elevenlabs.SpeechGenerator({
|
254
253
|
model: "eleven_turbo_v2",
|
255
254
|
voice: "pNInz6obpgDQGcFmaJgB", // Adam
|
256
255
|
optimizeStreamingLatency: 1,
|
@@ -259,8 +258,8 @@ const speechStream = await streamSpeech(
|
|
259
258
|
chunkLengthSchedule: [50, 90, 120, 150, 200],
|
260
259
|
},
|
261
260
|
}),
|
262
|
-
textStream
|
263
|
-
);
|
261
|
+
text: textStream,
|
262
|
+
});
|
264
263
|
|
265
264
|
for await (const part of speechStream) {
|
266
265
|
// each part is a Buffer with MP3 audio data
|
@@ -276,13 +275,13 @@ Transcribe speech (audio) data into text. Also called speech-to-text (STT).
|
|
276
275
|
```ts
|
277
276
|
import { generateTranscription, openai } from "modelfusion";
|
278
277
|
|
279
|
-
const transcription = await generateTranscription(
|
280
|
-
openai.Transcriber({ model: "whisper-1" }),
|
281
|
-
{
|
278
|
+
const transcription = await generateTranscription({
|
279
|
+
model: openai.Transcriber({ model: "whisper-1" }),
|
280
|
+
data: {
|
282
281
|
type: "mp3",
|
283
282
|
data: await fs.promises.readFile("data/test.mp3"),
|
284
|
-
}
|
285
|
-
);
|
283
|
+
},
|
284
|
+
});
|
286
285
|
```
|
287
286
|
|
288
287
|
Providers: [OpenAI (Whisper)](https://modelfusion.dev/integration/model-provider/openai), [Whisper.cpp](https://modelfusion.dev/integration/model-provider/whispercpp)
|
@@ -292,24 +291,63 @@ Providers: [OpenAI (Whisper)](https://modelfusion.dev/integration/model-provider
|
|
292
291
|
Create embeddings for text and other values. Embeddings are vectors that represent the essence of the values in the context of the model.
|
293
292
|
|
294
293
|
```ts
|
294
|
+
import { embed, embedMany, openai } from "modelfusion";
|
295
|
+
|
295
296
|
// embed single value:
|
296
|
-
const embedding = await embed(
|
297
|
-
openai.TextEmbedder({ model: "text-embedding-ada-002" }),
|
298
|
-
"At first, Nox didn't know what to do with the pup."
|
299
|
-
);
|
297
|
+
const embedding = await embed({
|
298
|
+
model: openai.TextEmbedder({ model: "text-embedding-ada-002" }),
|
299
|
+
value: "At first, Nox didn't know what to do with the pup.",
|
300
|
+
});
|
300
301
|
|
301
302
|
// embed many values:
|
302
|
-
const embeddings = await embedMany(
|
303
|
-
openai.TextEmbedder({ model: "text-embedding-ada-002" }),
|
304
|
-
[
|
303
|
+
const embeddings = await embedMany({
|
304
|
+
model: openai.TextEmbedder({ model: "text-embedding-ada-002" }),
|
305
|
+
values: [
|
305
306
|
"At first, Nox didn't know what to do with the pup.",
|
306
307
|
"He keenly observed and absorbed everything around him, from the birds in the sky to the trees in the forest.",
|
307
|
-
]
|
308
|
-
);
|
308
|
+
],
|
309
|
+
});
|
309
310
|
```
|
310
311
|
|
311
312
|
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Mistral](https://modelfusion.dev/integration/model-provider/mistral), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere)
|
312
313
|
|
314
|
+
### [Classify Value](https://modelfusion.dev/guide/function/classify)
|
315
|
+
|
316
|
+
Classifies a value into a category.
|
317
|
+
|
318
|
+
```ts
|
319
|
+
import { classify, EmbeddingSimilarityClassifier, openai } from "modelfusion";
|
320
|
+
|
321
|
+
const classifier = new EmbeddingSimilarityClassifier({
|
322
|
+
embeddingModel: openai.TextEmbedder({ model: "text-embedding-ada-002" }),
|
323
|
+
similarityThreshold: 0.82,
|
324
|
+
clusters: [
|
325
|
+
{
|
326
|
+
name: "politics" as const,
|
327
|
+
values: [
|
328
|
+
"they will save the country!",
|
329
|
+
// ...
|
330
|
+
],
|
331
|
+
},
|
332
|
+
{
|
333
|
+
name: "chitchat" as const,
|
334
|
+
values: [
|
335
|
+
"how's the weather today?",
|
336
|
+
// ...
|
337
|
+
],
|
338
|
+
},
|
339
|
+
],
|
340
|
+
});
|
341
|
+
|
342
|
+
// strongly typed result:
|
343
|
+
const result = await classify({
|
344
|
+
model: classifier,
|
345
|
+
value: "don't you love politics?",
|
346
|
+
});
|
347
|
+
```
|
348
|
+
|
349
|
+
Classifiers: [EmbeddingSimilarityClassifier](https://modelfusion.dev/guide/function/classify#embeddingsimilarityclassifier)
|
350
|
+
|
313
351
|
### [Tokenize Text](https://modelfusion.dev/guide/function/tokenize-text)
|
314
352
|
|
315
353
|
Split text into tokens and reconstruct the text from tokens.
|
@@ -339,11 +377,11 @@ ModelFusion offers several tools out-of-the-box: [Math.js](https://modelfusion.d
|
|
339
377
|
With `useTool`, you can ask a tool-compatible language model (e.g. OpenAI chat) to invoke a single tool. `useTool` first generates a tool call and then executes the tool with the arguments.
|
340
378
|
|
341
379
|
```ts
|
342
|
-
const { tool, toolCall, args, ok, result } = await useTool(
|
343
|
-
openai.ChatTextGenerator({ model: "gpt-3.5-turbo" }),
|
344
|
-
calculator,
|
345
|
-
[openai.ChatMessage.user("What's fourteen times twelve?")]
|
346
|
-
);
|
380
|
+
const { tool, toolCall, args, ok, result } = await useTool({
|
381
|
+
model: openai.ChatTextGenerator({ model: "gpt-3.5-turbo" }),
|
382
|
+
too: calculator,
|
383
|
+
prompt: [openai.ChatMessage.user("What's fourteen times twelve?")],
|
384
|
+
});
|
347
385
|
|
348
386
|
console.log(`Tool call:`, toolCall);
|
349
387
|
console.log(`Tool:`, tool);
|
@@ -357,11 +395,11 @@ console.log(`Result or Error:`, result);
|
|
357
395
|
With `useTools`, you can ask a language model to generate several tool calls as well as text. The model will choose which tools (if any) should be called with which arguments. Both the text and the tool calls are optional. This function executes the tools.
|
358
396
|
|
359
397
|
```ts
|
360
|
-
const { text, toolResults } = await useTools(
|
361
|
-
openai.ChatTextGenerator({ model: "gpt-3.5-turbo" }),
|
362
|
-
[calculator /* ... */],
|
363
|
-
[openai.ChatMessage.user("What's fourteen times twelve?")]
|
364
|
-
);
|
398
|
+
const { text, toolResults } = await useTools({
|
399
|
+
model: openai.ChatTextGenerator({ model: "gpt-3.5-turbo" }),
|
400
|
+
tools: [calculator /* ... */],
|
401
|
+
prompt: [openai.ChatMessage.user("What's fourteen times twelve?")],
|
402
|
+
});
|
365
403
|
```
|
366
404
|
|
367
405
|
#### [Agent Loop](https://modelfusion.dev/guide/tools/agent-loop)
|
@@ -411,21 +449,22 @@ Prompt templates let you use higher level prompt structures (such as text, instr
|
|
411
449
|
#### Text Prompt Example
|
412
450
|
|
413
451
|
```ts
|
414
|
-
const text = await generateText(
|
415
|
-
openai
|
452
|
+
const text = await generateText({
|
453
|
+
model: openai
|
416
454
|
.ChatTextGenerator({
|
417
455
|
// ...
|
418
456
|
})
|
419
457
|
.withTextPrompt(),
|
420
|
-
|
421
|
-
|
458
|
+
|
459
|
+
prompt: "Write a short story about a robot learning to love",
|
460
|
+
});
|
422
461
|
```
|
423
462
|
|
424
463
|
#### Instruction Prompt Example
|
425
464
|
|
426
465
|
```ts
|
427
|
-
const text = await generateText(
|
428
|
-
llamacpp
|
466
|
+
const text = await generateText({
|
467
|
+
model: llamacpp
|
429
468
|
.CompletionTextGenerator({
|
430
469
|
// run https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF with llama.cpp
|
431
470
|
promptTemplate: llamacpp.prompt.Llama2, // Set prompt template
|
@@ -433,11 +472,12 @@ const text = await generateText(
|
|
433
472
|
maxGenerationTokens: 512,
|
434
473
|
})
|
435
474
|
.withInstructionPrompt(),
|
436
|
-
|
475
|
+
|
476
|
+
prompt: {
|
437
477
|
system: "You are a story writer.",
|
438
478
|
instruction: "Write a short story about a robot learning to love.",
|
439
|
-
}
|
440
|
-
);
|
479
|
+
},
|
480
|
+
});
|
441
481
|
```
|
442
482
|
|
443
483
|
They can also be accessed through the shorthand methods `.withTextPrompt()`, `.withChatPrompt()` and `.withInstructionPrompt()` for many models:
|
@@ -445,13 +485,14 @@ They can also be accessed through the shorthand methods `.withTextPrompt()`, `.w
|
|
445
485
|
#### Chat Prompt Example
|
446
486
|
|
447
487
|
```ts
|
448
|
-
const textStream = await streamText(
|
449
|
-
openai
|
488
|
+
const textStream = await streamText({
|
489
|
+
model: openai
|
450
490
|
.ChatTextGenerator({
|
451
491
|
model: "gpt-3.5-turbo",
|
452
492
|
})
|
453
493
|
.withChatPrompt(),
|
454
|
-
|
494
|
+
|
495
|
+
prompt: {
|
455
496
|
system: "You are a celebrated poet.",
|
456
497
|
messages: [
|
457
498
|
{
|
@@ -467,8 +508,8 @@ const textStream = await streamText(
|
|
467
508
|
content: "Write a short story about Robbie learning to love",
|
468
509
|
},
|
469
510
|
],
|
470
|
-
}
|
471
|
-
);
|
511
|
+
},
|
512
|
+
});
|
472
513
|
```
|
473
514
|
|
474
515
|
| Prompt Template | Text Prompt | Instruction Prompt | Chat Prompt |
|
@@ -504,19 +545,19 @@ const image = await generateImage(
|
|
504
545
|
|
505
546
|
### Metadata and original responses
|
506
547
|
|
507
|
-
ModelFusion model functions return rich responses that include the raw (original) response and metadata when you set the `fullResponse`
|
548
|
+
ModelFusion model functions return rich responses that include the raw (original) response and metadata when you set the `fullResponse` argument to `true`.
|
508
549
|
|
509
550
|
```ts
|
510
551
|
// access the raw response (needs to be typed) and the metadata:
|
511
|
-
const { text, rawResponse, metadata } = await generateText(
|
512
|
-
openai.CompletionTextGenerator({
|
552
|
+
const { text, rawResponse, metadata } = await generateText({
|
553
|
+
model: openai.CompletionTextGenerator({
|
513
554
|
model: "gpt-3.5-turbo-instruct",
|
514
555
|
maxGenerationTokens: 1000,
|
515
556
|
n: 2, // generate 2 completions
|
516
557
|
}),
|
517
|
-
"Write a short story about a robot learning to love:\n\n",
|
518
|
-
|
519
|
-
);
|
558
|
+
prompt: "Write a short story about a robot learning to love:\n\n",
|
559
|
+
fullResponse: true,
|
560
|
+
});
|
520
561
|
|
521
562
|
console.log(metadata);
|
522
563
|
|
@@ -550,6 +591,7 @@ modelfusion.setLogFormat("detailed-object"); // log full events
|
|
550
591
|
- [Generate transcription](https://modelfusion.dev/guide/function/generation-transcription)
|
551
592
|
- [Tokenize Text](https://modelfusion.dev/guide/function/tokenize-text)
|
552
593
|
- [Embed Value](https://modelfusion.dev/guide/function/embed)
|
594
|
+
- [Classify Value](https://modelfusion.dev/guide/function/classify)
|
553
595
|
- [Tools](https://modelfusion.dev/guide/tools)
|
554
596
|
- [Use Tool](https://modelfusion.dev/guide/tools/use-tool)
|
555
597
|
- [Use Tools](https://modelfusion.dev/guide/tools/use-tools)
|
package/index.cjs
CHANGED
@@ -14,7 +14,6 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
14
14
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
15
15
|
};
|
16
16
|
Object.defineProperty(exports, "__esModule", { value: true });
|
17
|
-
__exportStar(require("./classifier/index.cjs"), exports);
|
18
17
|
__exportStar(require("./core/index.cjs"), exports);
|
19
18
|
__exportStar(require("./model-function/index.cjs"), exports);
|
20
19
|
__exportStar(require("./model-provider/index.cjs"), exports);
|
package/index.d.ts
CHANGED
package/index.js
CHANGED
@@ -2,6 +2,7 @@ import { BaseFunctionFinishedEvent, BaseFunctionStartedEvent } from "../core/Fun
|
|
2
2
|
import { ToolCallGenerationFinishedEvent, ToolCallGenerationStartedEvent } from "../tool/generate-tool-call/ToolCallGenerationEvent.js";
|
3
3
|
import { ToolCallsGenerationFinishedEvent, ToolCallsGenerationStartedEvent } from "../tool/generate-tool-calls/ToolCallsGenerationEvent.js";
|
4
4
|
import { ModelInformation } from "./ModelInformation.js";
|
5
|
+
import { ClassifyFinishedEvent, ClassifyStartedEvent } from "./classify/ClassifyEvent.js";
|
5
6
|
import { EmbeddingFinishedEvent, EmbeddingStartedEvent } from "./embed/EmbeddingEvent.js";
|
6
7
|
import { ImageGenerationFinishedEvent, ImageGenerationStartedEvent } from "./generate-image/ImageGenerationEvent.js";
|
7
8
|
import { SpeechGenerationFinishedEvent, SpeechGenerationStartedEvent, SpeechStreamingFinishedEvent, SpeechStreamingStartedEvent } from "./generate-speech/SpeechGenerationEvent.js";
|
@@ -22,6 +23,9 @@ export interface BaseModelCallStartedEvent extends BaseFunctionStartedEvent {
|
|
22
23
|
}
|
23
24
|
export type BaseModelCallFinishedEventResult = {
|
24
25
|
status: "success";
|
26
|
+
/**
|
27
|
+
* The original model response.
|
28
|
+
*/
|
25
29
|
rawResponse: unknown;
|
26
30
|
value: unknown;
|
27
31
|
/**
|
@@ -49,5 +53,5 @@ export interface BaseModelCallFinishedEvent extends BaseFunctionFinishedEvent {
|
|
49
53
|
*/
|
50
54
|
result: BaseModelCallFinishedEventResult;
|
51
55
|
}
|
52
|
-
export type ModelCallStartedEvent = EmbeddingStartedEvent | ImageGenerationStartedEvent | SpeechGenerationStartedEvent | SpeechStreamingStartedEvent | StructureGenerationStartedEvent | StructureStreamingStartedEvent | TextGenerationStartedEvent | TextStreamingStartedEvent | ToolCallGenerationStartedEvent | ToolCallsGenerationStartedEvent | TranscriptionStartedEvent;
|
53
|
-
export type ModelCallFinishedEvent = EmbeddingFinishedEvent | ImageGenerationFinishedEvent | SpeechGenerationFinishedEvent | SpeechStreamingFinishedEvent | StructureGenerationFinishedEvent | StructureStreamingFinishedEvent | TextGenerationFinishedEvent | TextStreamingFinishedEvent | ToolCallGenerationFinishedEvent | ToolCallsGenerationFinishedEvent | TranscriptionFinishedEvent;
|
56
|
+
export type ModelCallStartedEvent = ClassifyStartedEvent | EmbeddingStartedEvent | ImageGenerationStartedEvent | SpeechGenerationStartedEvent | SpeechStreamingStartedEvent | StructureGenerationStartedEvent | StructureStreamingStartedEvent | TextGenerationStartedEvent | TextStreamingStartedEvent | ToolCallGenerationStartedEvent | ToolCallsGenerationStartedEvent | TranscriptionStartedEvent;
|
57
|
+
export type ModelCallFinishedEvent = ClassifyFinishedEvent | EmbeddingFinishedEvent | ImageGenerationFinishedEvent | SpeechGenerationFinishedEvent | SpeechStreamingFinishedEvent | StructureGenerationFinishedEvent | StructureStreamingFinishedEvent | TextGenerationFinishedEvent | TextStreamingFinishedEvent | ToolCallGenerationFinishedEvent | ToolCallsGenerationFinishedEvent | TranscriptionFinishedEvent;
|
@@ -0,0 +1,10 @@
|
|
1
|
+
import { FunctionCallOptions } from "../../core/FunctionOptions.js";
|
2
|
+
import { Model, ModelSettings } from "../Model.js";
|
3
|
+
export interface ClassifierSettings extends ModelSettings {
|
4
|
+
}
|
5
|
+
export interface Classifier<VALUE, CLASS extends string | null, SETTINGS extends ClassifierSettings = ClassifierSettings> extends Model<SETTINGS> {
|
6
|
+
doClassify(value: VALUE, options: FunctionCallOptions): PromiseLike<{
|
7
|
+
rawResponse: unknown | undefined;
|
8
|
+
class: CLASS;
|
9
|
+
}>;
|
10
|
+
}
|
@@ -0,0 +1 @@
|
|
1
|
+
export {};
|
@@ -0,0 +1,20 @@
|
|
1
|
+
import { BaseModelCallFinishedEvent, BaseModelCallStartedEvent } from "../ModelCallEvent.js";
|
2
|
+
export interface ClassifyStartedEvent extends BaseModelCallStartedEvent {
|
3
|
+
functionType: "classify";
|
4
|
+
input: unknown | Array<unknown>;
|
5
|
+
}
|
6
|
+
export type ClassifyFinishedEventResult = {
|
7
|
+
status: "success";
|
8
|
+
rawResponse: unknown;
|
9
|
+
value: unknown;
|
10
|
+
} | {
|
11
|
+
status: "error";
|
12
|
+
error: unknown;
|
13
|
+
} | {
|
14
|
+
status: "abort";
|
15
|
+
};
|
16
|
+
export interface ClassifyFinishedEvent extends BaseModelCallFinishedEvent {
|
17
|
+
functionType: "classify";
|
18
|
+
input: unknown;
|
19
|
+
result: ClassifyFinishedEventResult;
|
20
|
+
}
|
@@ -0,0 +1 @@
|
|
1
|
+
export {};
|