modelfusion 0.74.1 → 0.76.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +55 -33
- package/guard/fixStructure.cjs +1 -1
- package/guard/fixStructure.d.ts +1 -1
- package/guard/fixStructure.js +1 -1
- package/model-function/embed/EmbeddingModel.d.ts +1 -1
- package/model-function/embed/embed.cjs +1 -1
- package/model-function/embed/embed.d.ts +2 -2
- package/model-function/embed/embed.js +1 -1
- package/model-function/generate-image/generateImage.d.ts +1 -1
- package/model-function/generate-speech/generateSpeech.d.ts +1 -1
- package/model-function/generate-speech/streamSpeech.d.ts +1 -1
- package/model-function/generate-structure/generateStructure.d.ts +1 -1
- package/model-function/generate-structure/streamStructure.d.ts +1 -1
- package/model-function/generate-text/generateText.d.ts +1 -1
- package/model-function/generate-text/streamText.d.ts +1 -1
- package/model-function/generate-transcription/generateTranscription.d.ts +1 -1
- package/model-provider/anthropic/AnthropicFacade.cjs +15 -0
- package/model-provider/anthropic/AnthropicFacade.d.ts +9 -0
- package/model-provider/anthropic/AnthropicFacade.js +11 -0
- package/model-provider/anthropic/AnthropicPromptFormat.cjs +2 -5
- package/model-provider/anthropic/AnthropicPromptFormat.js +2 -5
- package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +4 -1
- package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +4 -1
- package/model-provider/anthropic/AnthropicTextGenerationModel.js +4 -1
- package/model-provider/anthropic/index.cjs +2 -1
- package/model-provider/anthropic/index.d.ts +1 -0
- package/model-provider/anthropic/index.js +1 -0
- package/model-provider/automatic1111/Automatic1111Facade.cjs +15 -0
- package/model-provider/automatic1111/Automatic1111Facade.d.ts +9 -0
- package/model-provider/automatic1111/Automatic1111Facade.js +11 -0
- package/model-provider/automatic1111/index.cjs +14 -1
- package/model-provider/automatic1111/index.d.ts +1 -0
- package/model-provider/automatic1111/index.js +1 -0
- package/model-provider/cohere/CohereFacade.cjs +71 -0
- package/model-provider/cohere/CohereFacade.d.ts +59 -0
- package/model-provider/cohere/CohereFacade.js +65 -0
- package/model-provider/cohere/CohereTextEmbeddingModel.cjs +1 -1
- package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextEmbeddingModel.js +1 -1
- package/model-provider/cohere/index.cjs +14 -1
- package/model-provider/cohere/index.d.ts +1 -0
- package/model-provider/cohere/index.js +1 -0
- package/model-provider/elevenlabs/ElevenLabsFacade.cjs +18 -0
- package/model-provider/elevenlabs/ElevenLabsFacade.d.ts +12 -0
- package/model-provider/elevenlabs/ElevenLabsFacade.js +14 -0
- package/model-provider/elevenlabs/index.cjs +14 -0
- package/model-provider/elevenlabs/index.d.ts +1 -0
- package/model-provider/elevenlabs/index.js +1 -0
- package/model-provider/huggingface/HuggingFaceFacade.cjs +55 -0
- package/model-provider/huggingface/HuggingFaceFacade.d.ts +46 -0
- package/model-provider/huggingface/HuggingFaceFacade.js +50 -0
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +1 -1
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +1 -1
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +1 -1
- package/model-provider/huggingface/index.cjs +14 -2
- package/model-provider/huggingface/index.d.ts +1 -1
- package/model-provider/huggingface/index.js +1 -1
- package/model-provider/llamacpp/LlamaCppFacade.cjs +19 -0
- package/model-provider/llamacpp/LlamaCppFacade.d.ts +7 -0
- package/model-provider/llamacpp/LlamaCppFacade.js +13 -0
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +2 -2
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +2 -2
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +2 -2
- package/model-provider/llamacpp/index.cjs +2 -1
- package/model-provider/llamacpp/index.d.ts +1 -0
- package/model-provider/llamacpp/index.js +1 -0
- package/model-provider/lmnt/LmntFacade.cjs +15 -0
- package/model-provider/lmnt/LmntFacade.d.ts +9 -0
- package/model-provider/lmnt/LmntFacade.js +11 -0
- package/model-provider/lmnt/index.cjs +14 -0
- package/model-provider/lmnt/index.d.ts +1 -0
- package/model-provider/lmnt/index.js +1 -0
- package/model-provider/ollama/OllamaFacade.cjs +13 -0
- package/model-provider/ollama/OllamaFacade.d.ts +4 -0
- package/model-provider/ollama/OllamaFacade.js +8 -0
- package/model-provider/ollama/OllamaTextEmbeddingModel.cjs +2 -2
- package/model-provider/ollama/OllamaTextEmbeddingModel.d.ts +2 -2
- package/model-provider/ollama/OllamaTextEmbeddingModel.js +2 -2
- package/model-provider/ollama/index.cjs +14 -1
- package/model-provider/ollama/index.d.ts +1 -0
- package/model-provider/ollama/index.js +1 -0
- package/model-provider/openai/OpenAIFacade.cjs +148 -0
- package/model-provider/openai/OpenAIFacade.d.ts +124 -0
- package/model-provider/openai/OpenAIFacade.js +138 -0
- package/model-provider/openai/OpenAITextEmbeddingModel.cjs +1 -1
- package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +1 -1
- package/model-provider/openai/OpenAITextEmbeddingModel.js +1 -1
- package/model-provider/openai/TikTokenTokenizer.cjs +2 -2
- package/model-provider/openai/TikTokenTokenizer.d.ts +4 -3
- package/model-provider/openai/TikTokenTokenizer.js +2 -2
- package/model-provider/openai/index.cjs +2 -1
- package/model-provider/openai/index.d.ts +1 -0
- package/model-provider/openai/index.js +1 -0
- package/model-provider/stability/StabilityFacade.cjs +32 -0
- package/model-provider/stability/StabilityFacade.d.ts +26 -0
- package/model-provider/stability/StabilityFacade.js +28 -0
- package/model-provider/stability/index.cjs +14 -1
- package/model-provider/stability/index.d.ts +1 -0
- package/model-provider/stability/index.js +1 -0
- package/package.json +1 -1
- package/model-provider/huggingface/HuggingFaceImageDescriptionModel.cjs +0 -94
- package/model-provider/huggingface/HuggingFaceImageDescriptionModel.d.ts +0 -44
- package/model-provider/huggingface/HuggingFaceImageDescriptionModel.js +0 -90
package/README.md
CHANGED
@@ -50,8 +50,10 @@ You can use [prompt formats](https://modelfusion.dev/guide/function/generate-tex
|
|
50
50
|
#### generateText
|
51
51
|
|
52
52
|
```ts
|
53
|
+
import { generateText, openai } from "modelfusion";
|
54
|
+
|
53
55
|
const text = await generateText(
|
54
|
-
|
56
|
+
openai.CompletionTextGenerator({ model: "gpt-3.5-turbo-instruct" }),
|
55
57
|
"Write a short story about a robot learning to love:\n\n"
|
56
58
|
);
|
57
59
|
```
|
@@ -61,8 +63,10 @@ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai),
|
|
61
63
|
#### streamText
|
62
64
|
|
63
65
|
```ts
|
66
|
+
import { streamText, openai } from "modelfusion";
|
67
|
+
|
64
68
|
const textStream = await streamText(
|
65
|
-
|
69
|
+
openai.CompletionTextGenerator({ model: "gpt-3.5-turbo-instruct" }),
|
66
70
|
"Write a short story about a robot learning to love:\n\n"
|
67
71
|
);
|
68
72
|
|
@@ -78,8 +82,10 @@ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai),
|
|
78
82
|
Multi-modal vision models such as GPT 4 Vision can process images as part of the prompt.
|
79
83
|
|
80
84
|
```ts
|
85
|
+
import { streamText, openai } from "modelfusion";
|
86
|
+
|
81
87
|
const textStream = await streamText(
|
82
|
-
|
88
|
+
openai.ChatTextGenerator({ model: "gpt-4-vision-preview" }),
|
83
89
|
[
|
84
90
|
OpenAIChatMessage.user("Describe the image in detail:", {
|
85
91
|
image: { base64Content: image, mimeType: "image/png" },
|
@@ -95,8 +101,10 @@ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai),
|
|
95
101
|
Generate an image from a prompt.
|
96
102
|
|
97
103
|
```ts
|
104
|
+
import { generateImage, openai } from "modelfusion";
|
105
|
+
|
98
106
|
const image = await generateImage(
|
99
|
-
|
107
|
+
openai.ImageGenerator({ model: "dall-e-3", size: "1024x1024" }),
|
100
108
|
"the wicked witch of the west in the style of early 19th century painting"
|
101
109
|
);
|
102
110
|
```
|
@@ -112,9 +120,11 @@ Synthesize speech (audio) from text. Also called TTS (text-to-speech).
|
|
112
120
|
`generateSpeech` synthesizes speech from text.
|
113
121
|
|
114
122
|
```ts
|
123
|
+
import { generateSpeech, lmnt } from "modelfusion";
|
124
|
+
|
115
125
|
// `speech` is a Buffer with MP3 audio data
|
116
126
|
const speech = await generateSpeech(
|
117
|
-
|
127
|
+
lmnt.Speech({
|
118
128
|
voice: "034b632b-df71-46c8-b440-86a42ffc3cf3", // Henry
|
119
129
|
}),
|
120
130
|
"Good evening, ladies and gentlemen! Exciting news on the airwaves tonight " +
|
@@ -131,10 +141,12 @@ Providers: [Eleven Labs](https://modelfusion.dev/integration/model-provider/elev
|
|
131
141
|
`generateSpeech` generates a stream of speech chunks from text or from a text stream. Depending on the model, this can be fully duplex.
|
132
142
|
|
133
143
|
```ts
|
134
|
-
|
144
|
+
import { streamSpeech, elevenlabs } from "modelfusion";
|
145
|
+
|
146
|
+
const textStream: AsyncIterable<string>;
|
135
147
|
|
136
148
|
const speechStream = await streamSpeech(
|
137
|
-
|
149
|
+
elevenlabs.Speech({
|
138
150
|
model: "eleven_turbo_v2",
|
139
151
|
voice: "pNInz6obpgDQGcFmaJgB", // Adam
|
140
152
|
optimizeStreamingLatency: 1,
|
@@ -158,8 +170,10 @@ Providers: [Eleven Labs](https://modelfusion.dev/integration/model-provider/elev
|
|
158
170
|
Transcribe speech (audio) data into text. Also called speech-to-text (STT).
|
159
171
|
|
160
172
|
```ts
|
173
|
+
import { generateTranscription, openai } from "modelfusion";
|
174
|
+
|
161
175
|
const transcription = await generateTranscription(
|
162
|
-
|
176
|
+
openai.Transcription({ model: "whisper-1" }),
|
163
177
|
{
|
164
178
|
type: "mp3",
|
165
179
|
data: await fs.promises.readFile("data/test.mp3"),
|
@@ -179,7 +193,7 @@ Generate a structure that matches a schema.
|
|
179
193
|
|
180
194
|
```ts
|
181
195
|
const sentiment = await generateStructure(
|
182
|
-
|
196
|
+
openai.ChatTextGenerator({
|
183
197
|
model: "gpt-3.5-turbo",
|
184
198
|
temperature: 0,
|
185
199
|
maxCompletionTokens: 50,
|
@@ -214,7 +228,7 @@ Stream a structure that matches a schema. Partial structures before the final pa
|
|
214
228
|
|
215
229
|
```ts
|
216
230
|
const structureStream = await streamStructure(
|
217
|
-
|
231
|
+
openai.ChatTextGenerator({
|
218
232
|
model: "gpt-3.5-turbo",
|
219
233
|
temperature: 0,
|
220
234
|
maxCompletionTokens: 2000,
|
@@ -261,13 +275,13 @@ Create embeddings for text and other values. Embeddings are vectors that represe
|
|
261
275
|
```ts
|
262
276
|
// embed single value:
|
263
277
|
const embedding = await embed(
|
264
|
-
|
278
|
+
openai.TextEmbedder({ model: "text-embedding-ada-002" }),
|
265
279
|
"At first, Nox didn't know what to do with the pup."
|
266
280
|
);
|
267
281
|
|
268
282
|
// embed many values:
|
269
283
|
const embeddings = await embedMany(
|
270
|
-
|
284
|
+
openai.TextEmbedder({ model: "text-embedding-ada-002" }),
|
271
285
|
[
|
272
286
|
"At first, Nox didn't know what to do with the pup.",
|
273
287
|
"He keenly observed and absorbed everything around him, from the birds in the sky to the trees in the forest.",
|
@@ -282,7 +296,7 @@ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai),
|
|
282
296
|
Split text into tokens and reconstruct the text from tokens.
|
283
297
|
|
284
298
|
```ts
|
285
|
-
const tokenizer =
|
299
|
+
const tokenizer = openai.Tokenizer({ model: "gpt-4" });
|
286
300
|
|
287
301
|
const text = "At first, Nox didn't know what to do with the pup.";
|
288
302
|
|
@@ -305,7 +319,7 @@ Guard functions can be used to implement retry on error, redacting and changing
|
|
305
319
|
const result = await guard(
|
306
320
|
(input, options) =>
|
307
321
|
generateStructure(
|
308
|
-
|
322
|
+
openai.ChatTextGenerator({
|
309
323
|
// ...
|
310
324
|
}),
|
311
325
|
new ZodStructureDefinition({
|
@@ -379,7 +393,7 @@ With `generateToolCall`, you can generate a tool call for a specific tool with a
|
|
379
393
|
|
380
394
|
```ts
|
381
395
|
const { id, name, args } = await generateToolCall(
|
382
|
-
|
396
|
+
openai.ChatTextGenerator({ model: "gpt-3.5-turbo" }),
|
383
397
|
calculator,
|
384
398
|
[OpenAIChatMessage.user("What's fourteen times twelve?")]
|
385
399
|
);
|
@@ -391,7 +405,7 @@ With `generateToolCallsOrText`, you can ask a language model to generate several
|
|
391
405
|
|
392
406
|
```ts
|
393
407
|
const { text, toolCalls } = await generateToolCallsOrText(
|
394
|
-
|
408
|
+
openai.ChatTextGenerator({ model: "gpt-3.5-turbo" }),
|
395
409
|
[toolA, toolB, toolC],
|
396
410
|
[OpenAIChatMessage.user(query)]
|
397
411
|
);
|
@@ -415,7 +429,7 @@ With `useTool`, you can use a tool with a language model that supports tools cal
|
|
415
429
|
|
416
430
|
```ts
|
417
431
|
const { tool, toolCall, args, ok, result } = await useTool(
|
418
|
-
|
432
|
+
openai.ChatTextGenerator({ model: "gpt-3.5-turbo" }),
|
419
433
|
calculator,
|
420
434
|
[OpenAIChatMessage.user("What's fourteen times twelve?")]
|
421
435
|
);
|
@@ -433,7 +447,7 @@ With `useToolsOrGenerateText`, you can ask a language model to generate several
|
|
433
447
|
|
434
448
|
```ts
|
435
449
|
const { text, toolResults } = await useToolsOrGenerateText(
|
436
|
-
|
450
|
+
openai.ChatTextGenerator({ model: "gpt-3.5-turbo" }),
|
437
451
|
[calculator /* ... */],
|
438
452
|
[OpenAIChatMessage.user("What's fourteen times twelve?")]
|
439
453
|
);
|
@@ -453,7 +467,7 @@ const texts = [
|
|
453
467
|
];
|
454
468
|
|
455
469
|
const vectorIndex = new MemoryVectorIndex<string>();
|
456
|
-
const embeddingModel =
|
470
|
+
const embeddingModel = openai.TextEmbedder({
|
457
471
|
model: "text-embedding-ada-002",
|
458
472
|
});
|
459
473
|
|
@@ -487,9 +501,11 @@ Prompt formats let you use higher level prompt structures (such as text, instruc
|
|
487
501
|
|
488
502
|
```ts
|
489
503
|
const text = await generateText(
|
490
|
-
|
491
|
-
|
492
|
-
|
504
|
+
anthropic
|
505
|
+
.TextGenerator({
|
506
|
+
model: "claude-instant-1",
|
507
|
+
})
|
508
|
+
.withTextPrompt(),
|
493
509
|
"Write a short story about a robot learning to love"
|
494
510
|
);
|
495
511
|
```
|
@@ -499,10 +515,12 @@ const text = await generateText(
|
|
499
515
|
```ts
|
500
516
|
// example assumes you are running https://huggingface.co/TheBloke/Llama-2-7B-GGUF with llama.cpp
|
501
517
|
const text = await generateText(
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
518
|
+
llamacpp
|
519
|
+
.TextGenerator({
|
520
|
+
contextWindowSize: 4096, // Llama 2 context window size
|
521
|
+
maxCompletionTokens: 1000,
|
522
|
+
})
|
523
|
+
.withTextPromptFormat(Llama2PromptFormat.instruction()),
|
506
524
|
{
|
507
525
|
system: "You are a story writer.",
|
508
526
|
instruction: "Write a short story about a robot learning to love.",
|
@@ -516,9 +534,11 @@ They can also be accessed through the shorthand methods `.withTextPrompt()`, `.w
|
|
516
534
|
|
517
535
|
```ts
|
518
536
|
const textStream = await streamText(
|
519
|
-
|
520
|
-
|
521
|
-
|
537
|
+
openai
|
538
|
+
.ChatTextGenerator({
|
539
|
+
model: "gpt-3.5-turbo",
|
540
|
+
})
|
541
|
+
.withChatPrompt(),
|
522
542
|
{
|
523
543
|
system: "You are a celebrated poet.",
|
524
544
|
messages: [
|
@@ -555,9 +575,11 @@ You an use prompt formats with image models as well, e.g. to use a basic text pr
|
|
555
575
|
|
556
576
|
```ts
|
557
577
|
const image = await generateImage(
|
558
|
-
|
559
|
-
|
560
|
-
|
578
|
+
stability
|
579
|
+
.ImageGenerator({
|
580
|
+
//...
|
581
|
+
})
|
582
|
+
.withBasicPrompt(),
|
561
583
|
"the wicked witch of the west in the style of early 19th century painting"
|
562
584
|
);
|
563
585
|
```
|
@@ -574,7 +596,7 @@ ModelFusion model functions return rich results that include the original respon
|
|
574
596
|
```ts
|
575
597
|
// access the full response (needs to be typed) and the metadata:
|
576
598
|
const { value, response, metadata } = await generateText(
|
577
|
-
|
599
|
+
openai.CompletionTextGenerator({
|
578
600
|
model: "gpt-3.5-turbo-instruct",
|
579
601
|
maxCompletionTokens: 1000,
|
580
602
|
n: 2, // generate 2 completions
|
package/guard/fixStructure.cjs
CHANGED
@@ -29,7 +29,7 @@ const StructureValidationError_js_1 = require("../model-function/generate-struct
|
|
29
29
|
* const result = await guard(
|
30
30
|
* (input) =>
|
31
31
|
* generateStructure(
|
32
|
-
*
|
32
|
+
* openai.ChatTextGenerator({
|
33
33
|
* // ...
|
34
34
|
* }),
|
35
35
|
* new ZodStructureDefinition({
|
package/guard/fixStructure.d.ts
CHANGED
package/guard/fixStructure.js
CHANGED
@@ -26,7 +26,7 @@ import { StructureValidationError } from "../model-function/generate-structure/S
|
|
26
26
|
* const result = await guard(
|
27
27
|
* (input) =>
|
28
28
|
* generateStructure(
|
29
|
-
*
|
29
|
+
* openai.ChatTextGenerator({
|
30
30
|
* // ...
|
31
31
|
* }),
|
32
32
|
* new ZodStructureDefinition({
|
@@ -15,7 +15,7 @@ export interface EmbeddingModel<VALUE, SETTINGS extends EmbeddingModelSettings =
|
|
15
15
|
/**
|
16
16
|
* True if the model can handle multiple embedding calls in parallel.
|
17
17
|
*/
|
18
|
-
readonly
|
18
|
+
readonly isParallelizable: boolean;
|
19
19
|
doEmbedValues(values: VALUE[], options?: FunctionOptions): PromiseLike<{
|
20
20
|
response: unknown;
|
21
21
|
embeddings: Vector[];
|
@@ -22,7 +22,7 @@ async function embedMany(model, values, options) {
|
|
22
22
|
}
|
23
23
|
// call the model for each group:
|
24
24
|
let responses;
|
25
|
-
if (model.
|
25
|
+
if (model.isParallelizable) {
|
26
26
|
responses = await Promise.all(valueGroups.map((valueGroup) => model.doEmbedValues(valueGroup, options)));
|
27
27
|
}
|
28
28
|
else {
|
@@ -9,7 +9,7 @@ import { EmbeddingModel, EmbeddingModelSettings } from "./EmbeddingModel.js";
|
|
9
9
|
*
|
10
10
|
* @example
|
11
11
|
* const embeddings = await embedMany(
|
12
|
-
*
|
12
|
+
* openai.TextEmbedder(...),
|
13
13
|
* [
|
14
14
|
* "At first, Nox didn't know what to do with the pup.",
|
15
15
|
* "He keenly observed and absorbed everything around him, from the birds in the sky to the trees in the forest.",
|
@@ -39,7 +39,7 @@ export declare function embedMany<VALUE>(model: EmbeddingModel<VALUE, EmbeddingM
|
|
39
39
|
*
|
40
40
|
* @example
|
41
41
|
* const embedding = await embed(
|
42
|
-
*
|
42
|
+
* openai.TextEmbedder(...),
|
43
43
|
* "At first, Nox didn't know what to do with the pup."
|
44
44
|
* );
|
45
45
|
*
|
@@ -19,7 +19,7 @@ export async function embedMany(model, values, options) {
|
|
19
19
|
}
|
20
20
|
// call the model for each group:
|
21
21
|
let responses;
|
22
|
-
if (model.
|
22
|
+
if (model.isParallelizable) {
|
23
23
|
responses = await Promise.all(valueGroups.map((valueGroup) => model.doEmbedValues(valueGroup, options)));
|
24
24
|
}
|
25
25
|
else {
|
@@ -12,7 +12,7 @@ import { ImageGenerationModel, ImageGenerationModelSettings } from "./ImageGener
|
|
12
12
|
*
|
13
13
|
* @example
|
14
14
|
* const image = await generateImage(
|
15
|
-
*
|
15
|
+
* stability.ImageGenerator(...),
|
16
16
|
* [
|
17
17
|
* { text: "the wicked witch of the west" },
|
18
18
|
* { text: "style of early 19th century painting", weight: 0.5 },
|
@@ -9,7 +9,7 @@ import { SpeechGenerationModel, SpeechGenerationModelSettings } from "./SpeechGe
|
|
9
9
|
*
|
10
10
|
* @example
|
11
11
|
* const speech = await generateSpeech(
|
12
|
-
*
|
12
|
+
* lmnt.Speech(...),
|
13
13
|
* "Good evening, ladies and gentlemen! Exciting news on the airwaves tonight " +
|
14
14
|
* "as The Rolling Stones unveil 'Hackney Diamonds.'
|
15
15
|
* );
|
@@ -12,7 +12,7 @@ import { SpeechGenerationModelSettings, StreamingSpeechGenerationModel } from ".
|
|
12
12
|
* const textStream = await streamText(...);
|
13
13
|
*
|
14
14
|
* const speechStream = await streamSpeech(
|
15
|
-
*
|
15
|
+
* elevenlabs.Speech(...),
|
16
16
|
* textStream
|
17
17
|
* );
|
18
18
|
*
|
@@ -12,7 +12,7 @@ import { StructureGenerationModel, StructureGenerationModelSettings } from "./St
|
|
12
12
|
*
|
13
13
|
* @example
|
14
14
|
* const sentiment = await generateStructure(
|
15
|
-
*
|
15
|
+
* openai.ChatTextGenerator(...),
|
16
16
|
* new ZodStructureDefinition({
|
17
17
|
* name: "sentiment",
|
18
18
|
* description: "Write the sentiment analysis",
|
@@ -12,7 +12,7 @@ import { TextGenerationModel, TextGenerationModelSettings } from "./TextGenerati
|
|
12
12
|
*
|
13
13
|
* @example
|
14
14
|
* const text = await generateText(
|
15
|
-
*
|
15
|
+
* openai.CompletionTextGenerator(...),
|
16
16
|
* "Write a short story about a robot learning to love:\n\n"
|
17
17
|
* );
|
18
18
|
*
|
@@ -12,7 +12,7 @@ import { TextStreamingModel } from "./TextGenerationModel.js";
|
|
12
12
|
*
|
13
13
|
* @example
|
14
14
|
* const textStream = await streamText(
|
15
|
-
*
|
15
|
+
* openai.CompletionTextGenerator(...),
|
16
16
|
* "Write a short story about a robot learning to love:\n\n"
|
17
17
|
* );
|
18
18
|
*
|
@@ -10,7 +10,7 @@ import { TranscriptionModel, TranscriptionModelSettings } from "./TranscriptionM
|
|
10
10
|
* const data = await fs.promises.readFile("data/test.mp3");
|
11
11
|
*
|
12
12
|
* const transcription = await generateTranscription(
|
13
|
-
*
|
13
|
+
* openai.Transcription({ model: "whisper-1" }),
|
14
14
|
* { type: "mp3", data }
|
15
15
|
* );
|
16
16
|
*
|
@@ -0,0 +1,15 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.TextGenerator = void 0;
|
4
|
+
const AnthropicTextGenerationModel_js_1 = require("./AnthropicTextGenerationModel.cjs");
|
5
|
+
/**
|
6
|
+
* Create a text generation model that calls the Anthropic API.
|
7
|
+
*
|
8
|
+
* @see https://docs.anthropic.com/claude/reference/complete_post
|
9
|
+
*
|
10
|
+
* @return A new instance of {@link AnthropicTextGenerationModel}.
|
11
|
+
*/
|
12
|
+
function TextGenerator(settings) {
|
13
|
+
return new AnthropicTextGenerationModel_js_1.AnthropicTextGenerationModel(settings);
|
14
|
+
}
|
15
|
+
exports.TextGenerator = TextGenerator;
|
@@ -0,0 +1,9 @@
|
|
1
|
+
import { AnthropicTextGenerationModel, AnthropicTextGenerationModelSettings } from "./AnthropicTextGenerationModel.js";
|
2
|
+
/**
|
3
|
+
* Create a text generation model that calls the Anthropic API.
|
4
|
+
*
|
5
|
+
* @see https://docs.anthropic.com/claude/reference/complete_post
|
6
|
+
*
|
7
|
+
* @return A new instance of {@link AnthropicTextGenerationModel}.
|
8
|
+
*/
|
9
|
+
export declare function TextGenerator(settings: AnthropicTextGenerationModelSettings): AnthropicTextGenerationModel;
|
@@ -0,0 +1,11 @@
|
|
1
|
+
import { AnthropicTextGenerationModel, } from "./AnthropicTextGenerationModel.js";
|
2
|
+
/**
|
3
|
+
* Create a text generation model that calls the Anthropic API.
|
4
|
+
*
|
5
|
+
* @see https://docs.anthropic.com/claude/reference/complete_post
|
6
|
+
*
|
7
|
+
* @return A new instance of {@link AnthropicTextGenerationModel}.
|
8
|
+
*/
|
9
|
+
export function TextGenerator(settings) {
|
10
|
+
return new AnthropicTextGenerationModel(settings);
|
11
|
+
}
|
@@ -24,10 +24,7 @@ exports.text = text;
|
|
24
24
|
function instruction() {
|
25
25
|
return {
|
26
26
|
format: (instruction) => {
|
27
|
-
let text = "";
|
28
|
-
if (instruction.system != null) {
|
29
|
-
text += `${instruction.system}`;
|
30
|
-
}
|
27
|
+
let text = instruction.system ?? "";
|
31
28
|
text += "\n\nHuman:";
|
32
29
|
text += instruction.instruction;
|
33
30
|
text += "\n\nAssistant:";
|
@@ -46,7 +43,7 @@ function chat() {
|
|
46
43
|
return {
|
47
44
|
format: (chatPrompt) => {
|
48
45
|
(0, validateChatPrompt_js_1.validateChatPrompt)(chatPrompt);
|
49
|
-
let text = chatPrompt.system
|
46
|
+
let text = chatPrompt.system ?? "";
|
50
47
|
for (const { role, content } of chatPrompt.messages) {
|
51
48
|
switch (role) {
|
52
49
|
case "user": {
|
@@ -20,10 +20,7 @@ export function text() {
|
|
20
20
|
export function instruction() {
|
21
21
|
return {
|
22
22
|
format: (instruction) => {
|
23
|
-
let text = "";
|
24
|
-
if (instruction.system != null) {
|
25
|
-
text += `${instruction.system}`;
|
26
|
-
}
|
23
|
+
let text = instruction.system ?? "";
|
27
24
|
text += "\n\nHuman:";
|
28
25
|
text += instruction.instruction;
|
29
26
|
text += "\n\nAssistant:";
|
@@ -41,7 +38,7 @@ export function chat() {
|
|
41
38
|
return {
|
42
39
|
format: (chatPrompt) => {
|
43
40
|
validateChatPrompt(chatPrompt);
|
44
|
-
let text = chatPrompt.system
|
41
|
+
let text = chatPrompt.system ?? "";
|
45
42
|
for (const { role, content } of chatPrompt.messages) {
|
46
43
|
switch (role) {
|
47
44
|
case "user": {
|
@@ -21,11 +21,14 @@ exports.ANTHROPIC_TEXT_GENERATION_MODELS = {
|
|
21
21
|
contextWindowSize: 100000,
|
22
22
|
},
|
23
23
|
"claude-2": {
|
24
|
-
contextWindowSize:
|
24
|
+
contextWindowSize: 200000,
|
25
25
|
},
|
26
26
|
"claude-2.0": {
|
27
27
|
contextWindowSize: 100000,
|
28
28
|
},
|
29
|
+
"claude-2.1": {
|
30
|
+
contextWindowSize: 200000,
|
31
|
+
},
|
29
32
|
};
|
30
33
|
/**
|
31
34
|
* Create a text generation model that calls the Anthropic API.
|
@@ -20,6 +20,9 @@ export declare const ANTHROPIC_TEXT_GENERATION_MODELS: {
|
|
20
20
|
"claude-2.0": {
|
21
21
|
contextWindowSize: number;
|
22
22
|
};
|
23
|
+
"claude-2.1": {
|
24
|
+
contextWindowSize: number;
|
25
|
+
};
|
23
26
|
};
|
24
27
|
export type AnthropicTextGenerationModelType = keyof typeof ANTHROPIC_TEXT_GENERATION_MODELS;
|
25
28
|
export interface AnthropicTextGenerationModelSettings extends TextGenerationModelSettings {
|
@@ -38,7 +41,7 @@ export interface AnthropicTextGenerationModelSettings extends TextGenerationMode
|
|
38
41
|
export declare class AnthropicTextGenerationModel extends AbstractModel<AnthropicTextGenerationModelSettings> implements TextStreamingModel<string, AnthropicTextGenerationModelSettings> {
|
39
42
|
constructor(settings: AnthropicTextGenerationModelSettings);
|
40
43
|
readonly provider: "anthropic";
|
41
|
-
get modelName(): "claude-instant-1" | "claude-instant-1.2" | "claude-2" | "claude-2.0";
|
44
|
+
get modelName(): "claude-instant-1" | "claude-instant-1.2" | "claude-2" | "claude-2.0" | "claude-2.1";
|
42
45
|
readonly contextWindowSize: number;
|
43
46
|
readonly tokenizer: undefined;
|
44
47
|
readonly countPromptTokens: undefined;
|
@@ -18,11 +18,14 @@ export const ANTHROPIC_TEXT_GENERATION_MODELS = {
|
|
18
18
|
contextWindowSize: 100000,
|
19
19
|
},
|
20
20
|
"claude-2": {
|
21
|
-
contextWindowSize:
|
21
|
+
contextWindowSize: 200000,
|
22
22
|
},
|
23
23
|
"claude-2.0": {
|
24
24
|
contextWindowSize: 100000,
|
25
25
|
},
|
26
|
+
"claude-2.1": {
|
27
|
+
contextWindowSize: 200000,
|
28
|
+
},
|
26
29
|
};
|
27
30
|
/**
|
28
31
|
* Create a text generation model that calls the Anthropic API.
|
@@ -26,10 +26,11 @@ var __importStar = (this && this.__importStar) || function (mod) {
|
|
26
26
|
return result;
|
27
27
|
};
|
28
28
|
Object.defineProperty(exports, "__esModule", { value: true });
|
29
|
-
exports.AnthropicPromptFormat = exports.anthropicErrorDataSchema = exports.AnthropicError = void 0;
|
29
|
+
exports.AnthropicPromptFormat = exports.anthropic = exports.anthropicErrorDataSchema = exports.AnthropicError = void 0;
|
30
30
|
__exportStar(require("./AnthropicApiConfiguration.cjs"), exports);
|
31
31
|
var AnthropicError_js_1 = require("./AnthropicError.cjs");
|
32
32
|
Object.defineProperty(exports, "AnthropicError", { enumerable: true, get: function () { return AnthropicError_js_1.AnthropicError; } });
|
33
33
|
Object.defineProperty(exports, "anthropicErrorDataSchema", { enumerable: true, get: function () { return AnthropicError_js_1.anthropicErrorDataSchema; } });
|
34
|
+
exports.anthropic = __importStar(require("./AnthropicFacade.cjs"));
|
34
35
|
exports.AnthropicPromptFormat = __importStar(require("./AnthropicPromptFormat.cjs"));
|
35
36
|
__exportStar(require("./AnthropicTextGenerationModel.cjs"), exports);
|
@@ -1,4 +1,5 @@
|
|
1
1
|
export * from "./AnthropicApiConfiguration.js";
|
2
2
|
export { AnthropicError, anthropicErrorDataSchema } from "./AnthropicError.js";
|
3
|
+
export * as anthropic from "./AnthropicFacade.js";
|
3
4
|
export * as AnthropicPromptFormat from "./AnthropicPromptFormat.js";
|
4
5
|
export * from "./AnthropicTextGenerationModel.js";
|
@@ -1,4 +1,5 @@
|
|
1
1
|
export * from "./AnthropicApiConfiguration.js";
|
2
2
|
export { AnthropicError, anthropicErrorDataSchema } from "./AnthropicError.js";
|
3
|
+
export * as anthropic from "./AnthropicFacade.js";
|
3
4
|
export * as AnthropicPromptFormat from "./AnthropicPromptFormat.js";
|
4
5
|
export * from "./AnthropicTextGenerationModel.js";
|
@@ -0,0 +1,15 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.ImageGenerator = void 0;
|
4
|
+
const Automatic1111ImageGenerationModel_js_1 = require("./Automatic1111ImageGenerationModel.cjs");
|
5
|
+
/**
|
6
|
+
* Create an image generation model that calls the AUTOMATIC1111 Stable Diffusion Web UI API.
|
7
|
+
*
|
8
|
+
* @see https://github.com/AUTOMATIC1111/stable-diffusion-webui
|
9
|
+
*
|
10
|
+
* @return A new instance of ${@link Automatic1111ImageGenerationModel}.
|
11
|
+
*/
|
12
|
+
function ImageGenerator(settings) {
|
13
|
+
return new Automatic1111ImageGenerationModel_js_1.Automatic1111ImageGenerationModel(settings);
|
14
|
+
}
|
15
|
+
exports.ImageGenerator = ImageGenerator;
|
@@ -0,0 +1,9 @@
|
|
1
|
+
import { Automatic1111ImageGenerationModel, Automatic1111ImageGenerationSettings } from "./Automatic1111ImageGenerationModel.js";
|
2
|
+
/**
|
3
|
+
* Create an image generation model that calls the AUTOMATIC1111 Stable Diffusion Web UI API.
|
4
|
+
*
|
5
|
+
* @see https://github.com/AUTOMATIC1111/stable-diffusion-webui
|
6
|
+
*
|
7
|
+
* @return A new instance of ${@link Automatic1111ImageGenerationModel}.
|
8
|
+
*/
|
9
|
+
export declare function ImageGenerator(settings: Automatic1111ImageGenerationSettings): Automatic1111ImageGenerationModel;
|
@@ -0,0 +1,11 @@
|
|
1
|
+
import { Automatic1111ImageGenerationModel, } from "./Automatic1111ImageGenerationModel.js";
|
2
|
+
/**
|
3
|
+
* Create an image generation model that calls the AUTOMATIC1111 Stable Diffusion Web UI API.
|
4
|
+
*
|
5
|
+
* @see https://github.com/AUTOMATIC1111/stable-diffusion-webui
|
6
|
+
*
|
7
|
+
* @return A new instance of ${@link Automatic1111ImageGenerationModel}.
|
8
|
+
*/
|
9
|
+
export function ImageGenerator(settings) {
|
10
|
+
return new Automatic1111ImageGenerationModel(settings);
|
11
|
+
}
|
@@ -10,13 +10,26 @@ var __createBinding = (this && this.__createBinding) || (Object.create ? (functi
|
|
10
10
|
if (k2 === undefined) k2 = k;
|
11
11
|
o[k2] = m[k];
|
12
12
|
}));
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
15
|
+
}) : function(o, v) {
|
16
|
+
o["default"] = v;
|
17
|
+
});
|
13
18
|
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
14
19
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
15
20
|
};
|
21
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
22
|
+
if (mod && mod.__esModule) return mod;
|
23
|
+
var result = {};
|
24
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
25
|
+
__setModuleDefault(result, mod);
|
26
|
+
return result;
|
27
|
+
};
|
16
28
|
Object.defineProperty(exports, "__esModule", { value: true });
|
17
|
-
exports.Automatic1111Error = void 0;
|
29
|
+
exports.automatic1111 = exports.Automatic1111Error = void 0;
|
18
30
|
__exportStar(require("./Automatic1111ApiConfiguration.cjs"), exports);
|
19
31
|
var Automatic1111Error_js_1 = require("./Automatic1111Error.cjs");
|
20
32
|
Object.defineProperty(exports, "Automatic1111Error", { enumerable: true, get: function () { return Automatic1111Error_js_1.Automatic1111Error; } });
|
33
|
+
exports.automatic1111 = __importStar(require("./Automatic1111Facade.cjs"));
|
21
34
|
__exportStar(require("./Automatic1111ImageGenerationModel.cjs"), exports);
|
22
35
|
__exportStar(require("./Automatic1111ImageGenerationPrompt.cjs"), exports);
|
@@ -1,4 +1,5 @@
|
|
1
1
|
export * from "./Automatic1111ApiConfiguration.js";
|
2
2
|
export { Automatic1111Error, Automatic1111ErrorData, } from "./Automatic1111Error.js";
|
3
|
+
export * as automatic1111 from "./Automatic1111Facade.js";
|
3
4
|
export * from "./Automatic1111ImageGenerationModel.js";
|
4
5
|
export * from "./Automatic1111ImageGenerationPrompt.js";
|