modelfusion 0.75.0 → 0.76.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +15 -1
- package/model-provider/anthropic/AnthropicPromptFormat.cjs +2 -5
- package/model-provider/anthropic/AnthropicPromptFormat.js +2 -5
- package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +4 -1
- package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +4 -1
- package/model-provider/anthropic/AnthropicTextGenerationModel.js +4 -1
- package/package.json +1 -1
package/README.md
CHANGED
@@ -50,6 +50,8 @@ You can use [prompt formats](https://modelfusion.dev/guide/function/generate-tex
|
|
50
50
|
#### generateText
|
51
51
|
|
52
52
|
```ts
|
53
|
+
import { generateText, openai } from "modelfusion";
|
54
|
+
|
53
55
|
const text = await generateText(
|
54
56
|
openai.CompletionTextGenerator({ model: "gpt-3.5-turbo-instruct" }),
|
55
57
|
"Write a short story about a robot learning to love:\n\n"
|
@@ -61,6 +63,8 @@ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai),
|
|
61
63
|
#### streamText
|
62
64
|
|
63
65
|
```ts
|
66
|
+
import { streamText, openai } from "modelfusion";
|
67
|
+
|
64
68
|
const textStream = await streamText(
|
65
69
|
openai.CompletionTextGenerator({ model: "gpt-3.5-turbo-instruct" }),
|
66
70
|
"Write a short story about a robot learning to love:\n\n"
|
@@ -78,6 +82,8 @@ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai),
|
|
78
82
|
Multi-modal vision models such as GPT 4 Vision can process images as part of the prompt.
|
79
83
|
|
80
84
|
```ts
|
85
|
+
import { streamText, openai } from "modelfusion";
|
86
|
+
|
81
87
|
const textStream = await streamText(
|
82
88
|
openai.ChatTextGenerator({ model: "gpt-4-vision-preview" }),
|
83
89
|
[
|
@@ -95,6 +101,8 @@ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai),
|
|
95
101
|
Generate an image from a prompt.
|
96
102
|
|
97
103
|
```ts
|
104
|
+
import { generateImage, openai } from "modelfusion";
|
105
|
+
|
98
106
|
const image = await generateImage(
|
99
107
|
openai.ImageGenerator({ model: "dall-e-3", size: "1024x1024" }),
|
100
108
|
"the wicked witch of the west in the style of early 19th century painting"
|
@@ -112,6 +120,8 @@ Synthesize speech (audio) from text. Also called TTS (text-to-speech).
|
|
112
120
|
`generateSpeech` synthesizes speech from text.
|
113
121
|
|
114
122
|
```ts
|
123
|
+
import { generateSpeech, lmnt } from "modelfusion";
|
124
|
+
|
115
125
|
// `speech` is a Buffer with MP3 audio data
|
116
126
|
const speech = await generateSpeech(
|
117
127
|
lmnt.Speech({
|
@@ -131,7 +141,9 @@ Providers: [Eleven Labs](https://modelfusion.dev/integration/model-provider/elev
|
|
131
141
|
`generateSpeech` generates a stream of speech chunks from text or from a text stream. Depending on the model, this can be fully duplex.
|
132
142
|
|
133
143
|
```ts
|
134
|
-
|
144
|
+
import { streamSpeech, elevenlabs } from "modelfusion";
|
145
|
+
|
146
|
+
const textStream: AsyncIterable<string>;
|
135
147
|
|
136
148
|
const speechStream = await streamSpeech(
|
137
149
|
elevenlabs.Speech({
|
@@ -158,6 +170,8 @@ Providers: [Eleven Labs](https://modelfusion.dev/integration/model-provider/elev
|
|
158
170
|
Transcribe speech (audio) data into text. Also called speech-to-text (STT).
|
159
171
|
|
160
172
|
```ts
|
173
|
+
import { generateTranscription, openai } from "modelfusion";
|
174
|
+
|
161
175
|
const transcription = await generateTranscription(
|
162
176
|
openai.Transcription({ model: "whisper-1" }),
|
163
177
|
{
|
@@ -24,10 +24,7 @@ exports.text = text;
|
|
24
24
|
function instruction() {
|
25
25
|
return {
|
26
26
|
format: (instruction) => {
|
27
|
-
let text = "";
|
28
|
-
if (instruction.system != null) {
|
29
|
-
text += `${instruction.system}`;
|
30
|
-
}
|
27
|
+
let text = instruction.system ?? "";
|
31
28
|
text += "\n\nHuman:";
|
32
29
|
text += instruction.instruction;
|
33
30
|
text += "\n\nAssistant:";
|
@@ -46,7 +43,7 @@ function chat() {
|
|
46
43
|
return {
|
47
44
|
format: (chatPrompt) => {
|
48
45
|
(0, validateChatPrompt_js_1.validateChatPrompt)(chatPrompt);
|
49
|
-
let text = chatPrompt.system
|
46
|
+
let text = chatPrompt.system ?? "";
|
50
47
|
for (const { role, content } of chatPrompt.messages) {
|
51
48
|
switch (role) {
|
52
49
|
case "user": {
|
@@ -20,10 +20,7 @@ export function text() {
|
|
20
20
|
export function instruction() {
|
21
21
|
return {
|
22
22
|
format: (instruction) => {
|
23
|
-
let text = "";
|
24
|
-
if (instruction.system != null) {
|
25
|
-
text += `${instruction.system}`;
|
26
|
-
}
|
23
|
+
let text = instruction.system ?? "";
|
27
24
|
text += "\n\nHuman:";
|
28
25
|
text += instruction.instruction;
|
29
26
|
text += "\n\nAssistant:";
|
@@ -41,7 +38,7 @@ export function chat() {
|
|
41
38
|
return {
|
42
39
|
format: (chatPrompt) => {
|
43
40
|
validateChatPrompt(chatPrompt);
|
44
|
-
let text = chatPrompt.system
|
41
|
+
let text = chatPrompt.system ?? "";
|
45
42
|
for (const { role, content } of chatPrompt.messages) {
|
46
43
|
switch (role) {
|
47
44
|
case "user": {
|
@@ -21,11 +21,14 @@ exports.ANTHROPIC_TEXT_GENERATION_MODELS = {
|
|
21
21
|
contextWindowSize: 100000,
|
22
22
|
},
|
23
23
|
"claude-2": {
|
24
|
-
contextWindowSize:
|
24
|
+
contextWindowSize: 200000,
|
25
25
|
},
|
26
26
|
"claude-2.0": {
|
27
27
|
contextWindowSize: 100000,
|
28
28
|
},
|
29
|
+
"claude-2.1": {
|
30
|
+
contextWindowSize: 200000,
|
31
|
+
},
|
29
32
|
};
|
30
33
|
/**
|
31
34
|
* Create a text generation model that calls the Anthropic API.
|
@@ -20,6 +20,9 @@ export declare const ANTHROPIC_TEXT_GENERATION_MODELS: {
|
|
20
20
|
"claude-2.0": {
|
21
21
|
contextWindowSize: number;
|
22
22
|
};
|
23
|
+
"claude-2.1": {
|
24
|
+
contextWindowSize: number;
|
25
|
+
};
|
23
26
|
};
|
24
27
|
export type AnthropicTextGenerationModelType = keyof typeof ANTHROPIC_TEXT_GENERATION_MODELS;
|
25
28
|
export interface AnthropicTextGenerationModelSettings extends TextGenerationModelSettings {
|
@@ -38,7 +41,7 @@ export interface AnthropicTextGenerationModelSettings extends TextGenerationMode
|
|
38
41
|
export declare class AnthropicTextGenerationModel extends AbstractModel<AnthropicTextGenerationModelSettings> implements TextStreamingModel<string, AnthropicTextGenerationModelSettings> {
|
39
42
|
constructor(settings: AnthropicTextGenerationModelSettings);
|
40
43
|
readonly provider: "anthropic";
|
41
|
-
get modelName(): "claude-instant-1" | "claude-instant-1.2" | "claude-2" | "claude-2.0";
|
44
|
+
get modelName(): "claude-instant-1" | "claude-instant-1.2" | "claude-2" | "claude-2.0" | "claude-2.1";
|
42
45
|
readonly contextWindowSize: number;
|
43
46
|
readonly tokenizer: undefined;
|
44
47
|
readonly countPromptTokens: undefined;
|
@@ -18,11 +18,14 @@ export const ANTHROPIC_TEXT_GENERATION_MODELS = {
|
|
18
18
|
contextWindowSize: 100000,
|
19
19
|
},
|
20
20
|
"claude-2": {
|
21
|
-
contextWindowSize:
|
21
|
+
contextWindowSize: 200000,
|
22
22
|
},
|
23
23
|
"claude-2.0": {
|
24
24
|
contextWindowSize: 100000,
|
25
25
|
},
|
26
|
+
"claude-2.1": {
|
27
|
+
contextWindowSize: 200000,
|
28
|
+
},
|
26
29
|
};
|
27
30
|
/**
|
28
31
|
* Create a text generation model that calls the Anthropic API.
|