modelfusion 0.6.0 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +14 -13
- package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.cjs +1 -1
- package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.js +1 -1
- package/composed-function/use-tool/useTool.cjs +4 -1
- package/composed-function/use-tool/useTool.js +4 -1
- package/model-function/embed-text/embedText.cjs +16 -30
- package/model-function/embed-text/embedText.d.ts +14 -4
- package/model-function/embed-text/embedText.js +16 -30
- package/model-function/generate-image/generateImage.cjs +7 -20
- package/model-function/generate-image/generateImage.d.ts +7 -2
- package/model-function/generate-image/generateImage.js +7 -20
- package/model-function/generate-json/generateJson.cjs +7 -5
- package/model-function/generate-json/generateJson.d.ts +6 -1
- package/model-function/generate-json/generateJson.js +7 -5
- package/model-function/generate-json/generateJsonOrText.cjs +7 -5
- package/model-function/generate-json/generateJsonOrText.d.ts +10 -1
- package/model-function/generate-json/generateJsonOrText.js +7 -5
- package/model-function/generate-text/generateText.cjs +7 -17
- package/model-function/generate-text/generateText.d.ts +7 -2
- package/model-function/generate-text/generateText.js +7 -17
- package/model-function/generate-text/streamText.cjs +6 -4
- package/model-function/generate-text/streamText.d.ts +9 -1
- package/model-function/generate-text/streamText.js +6 -4
- package/model-function/transcribe-audio/transcribe.cjs +7 -19
- package/model-function/transcribe-audio/transcribe.d.ts +7 -2
- package/model-function/transcribe-audio/transcribe.js +7 -19
- package/model-provider/cohere/CohereTextEmbeddingModel.cjs +1 -1
- package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextEmbeddingModel.js +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.cjs +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.js +1 -1
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +1 -1
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +1 -1
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +1 -1
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +1 -1
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +1 -1
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +1 -1
- package/model-provider/openai/OpenAIImageGenerationModel.cjs +1 -1
- package/model-provider/openai/OpenAIImageGenerationModel.d.ts +1 -1
- package/model-provider/openai/OpenAIImageGenerationModel.js +1 -1
- package/model-provider/openai/OpenAITextEmbeddingModel.cjs +1 -1
- package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +1 -1
- package/model-provider/openai/OpenAITextEmbeddingModel.js +1 -1
- package/model-provider/openai/OpenAITextGenerationModel.cjs +1 -1
- package/model-provider/openai/OpenAITextGenerationModel.d.ts +1 -1
- package/model-provider/openai/OpenAITextGenerationModel.js +1 -1
- package/model-provider/openai/OpenAITranscriptionModel.cjs +1 -1
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +1 -1
- package/model-provider/openai/OpenAITranscriptionModel.js +1 -1
- package/model-provider/openai/chat/OpenAIChatModel.cjs +1 -1
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +1 -1
- package/model-provider/openai/chat/OpenAIChatModel.js +1 -1
- package/model-provider/stability/StabilityImageGenerationModel.cjs +1 -1
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +1 -1
- package/model-provider/stability/StabilityImageGenerationModel.js +1 -1
- package/package.json +1 -1
- package/text-chunk/SimilarTextChunksFromVectorIndexRetriever.cjs +1 -1
- package/text-chunk/SimilarTextChunksFromVectorIndexRetriever.js +1 -1
- package/text-chunk/upsertTextChunks.cjs +1 -1
- package/text-chunk/upsertTextChunks.js +1 -1
package/README.md
CHANGED
@@ -10,9 +10,8 @@
|
|
10
10
|
|
11
11
|
[Introduction](#introduction) | [Quick Install](#quick-install) | [Usage](#usage-examples) | [Features](#features) | [Integrations](#integrations) | [Documentation](#documentation) | [Examples](#more-examples) | [modelfusion.dev](https://modelfusion.dev)
|
12
12
|
|
13
|
-
|
14
|
-
|
15
|
-
ModelFusion is in its initial development phase. Until version 1.0 there may be breaking changes.
|
13
|
+
> [!NOTE]
|
14
|
+
> ModelFusion is in its initial development phase. Until version 1.0 there may be breaking changes, because we are still exploring the API design. We welcome your feedback and suggestions.
|
16
15
|
|
17
16
|
## Introduction
|
18
17
|
|
@@ -49,7 +48,7 @@ You can use [prompt mappings](https://modelfusion.dev/guide/function/generate-te
|
|
49
48
|
#### generateText
|
50
49
|
|
51
50
|
```ts
|
52
|
-
const
|
51
|
+
const text = await generateText(
|
53
52
|
new OpenAITextGenerationModel({ model: "text-davinci-003" }),
|
54
53
|
"Write a short story about a robot learning to love:\n\n"
|
55
54
|
);
|
@@ -58,7 +57,7 @@ const { text } = await generateText(
|
|
58
57
|
#### streamText
|
59
58
|
|
60
59
|
```ts
|
61
|
-
const
|
60
|
+
const textStream = await streamText(
|
62
61
|
new OpenAIChatModel({ model: "gpt-3.5-turbo", maxTokens: 1000 }),
|
63
62
|
[
|
64
63
|
OpenAIChatMessage.system("You are a story writer."),
|
@@ -76,7 +75,7 @@ for await (const textFragment of textStream) {
|
|
76
75
|
[Prompt mapping](https://modelfusion.dev/guide/function/generate-text/prompt-mapping) lets you use higher level prompt structures (such as instruction or chat prompts) for different models.
|
77
76
|
|
78
77
|
```ts
|
79
|
-
const
|
78
|
+
const text = await generateText(
|
80
79
|
new LlamaCppTextGenerationModel({
|
81
80
|
contextWindowSize: 4096, // Llama 2 context window size
|
82
81
|
nPredict: 1000,
|
@@ -89,7 +88,7 @@ const { text } = await generateText(
|
|
89
88
|
```
|
90
89
|
|
91
90
|
```ts
|
92
|
-
const
|
91
|
+
const textStream = await streamText(
|
93
92
|
new OpenAIChatModel({
|
94
93
|
model: "gpt-3.5-turbo",
|
95
94
|
}).mapPrompt(ChatToOpenAIChatPromptMapping()),
|
@@ -104,14 +103,15 @@ const { textStream } = await streamText(
|
|
104
103
|
|
105
104
|
#### Metadata and original responses
|
106
105
|
|
107
|
-
|
106
|
+
ModelFusion model functions return rich results that include the original response and metadata when you set the `fullResponse` option to `true`.
|
108
107
|
|
109
108
|
```ts
|
110
109
|
const { text, response, metadata } = await generateText(
|
111
110
|
new OpenAITextGenerationModel({
|
112
111
|
model: "text-davinci-003",
|
113
112
|
}),
|
114
|
-
"Write a short story about a robot learning to love:\n\n"
|
113
|
+
"Write a short story about a robot learning to love:\n\n",
|
114
|
+
{ fullResponse: true }
|
115
115
|
);
|
116
116
|
```
|
117
117
|
|
@@ -120,7 +120,7 @@ const { text, response, metadata } = await generateText(
|
|
120
120
|
Generate JSON value that matches a schema.
|
121
121
|
|
122
122
|
```ts
|
123
|
-
const
|
123
|
+
const value = await generateJson(
|
124
124
|
new OpenAIChatModel({
|
125
125
|
model: "gpt-3.5-turbo",
|
126
126
|
temperature: 0,
|
@@ -249,7 +249,7 @@ const { tool, parameters, result, text } = await useToolOrGenerateText(
|
|
249
249
|
Turn audio (voice) into text.
|
250
250
|
|
251
251
|
```ts
|
252
|
-
const
|
252
|
+
const transcription = await transcribe(
|
253
253
|
new OpenAITranscriptionModel({ model: "whisper-1" }),
|
254
254
|
{
|
255
255
|
type: "mp3",
|
@@ -263,7 +263,7 @@ const { transcription } = await transcribe(
|
|
263
263
|
Generate a base64-encoded image from a prompt.
|
264
264
|
|
265
265
|
```ts
|
266
|
-
const
|
266
|
+
const image = await generateImage(
|
267
267
|
new OpenAIImageGenerationModel({ size: "512x512" }),
|
268
268
|
"the wicked witch of the west in the style of early 19th century painting"
|
269
269
|
);
|
@@ -274,7 +274,7 @@ const { image } = await generateImage(
|
|
274
274
|
Create embeddings for text. Embeddings are vectors that represent the meaning of the text.
|
275
275
|
|
276
276
|
```ts
|
277
|
-
const
|
277
|
+
const embeddings = await embedTexts(
|
278
278
|
new OpenAITextEmbeddingModel({ model: "text-embedding-ada-002" }),
|
279
279
|
[
|
280
280
|
"At first, Nox didn't know what to do with the pup.",
|
@@ -399,6 +399,7 @@ Use higher level prompts that are mapped into model specific prompt formats.
|
|
399
399
|
- [Examples & Tutorials](https://modelfusion.dev/tutorial)
|
400
400
|
- [Integrations](https://modelfusion.dev/integration/model-provider)
|
401
401
|
- [API Reference](https://modelfusion.dev/api/modules)
|
402
|
+
- [Blog](https://modelfusion.dev/api/blog)
|
402
403
|
|
403
404
|
## More Examples
|
404
405
|
|
package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.cjs
CHANGED
@@ -18,7 +18,7 @@ async function summarizeRecursivelyWithTextGenerationAndTokenSplitting({ text, m
|
|
18
18
|
maxTokensPerChunk: tokenLimit - emptyPromptTokens,
|
19
19
|
}),
|
20
20
|
summarize: async (input) => {
|
21
|
-
const
|
21
|
+
const text = await (0, generateText_js_1.generateText)(model, await prompt(input), options);
|
22
22
|
return text;
|
23
23
|
},
|
24
24
|
join,
|
package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.js
CHANGED
@@ -15,7 +15,7 @@ export async function summarizeRecursivelyWithTextGenerationAndTokenSplitting({
|
|
15
15
|
maxTokensPerChunk: tokenLimit - emptyPromptTokens,
|
16
16
|
}),
|
17
17
|
summarize: async (input) => {
|
18
|
-
const
|
18
|
+
const text = await generateText(model, await prompt(input), options);
|
19
19
|
return text;
|
20
20
|
},
|
21
21
|
join,
|
@@ -19,7 +19,10 @@ async function useTool(model, tool, prompt, options) {
|
|
19
19
|
name: tool.name,
|
20
20
|
description: tool.description,
|
21
21
|
schema: tool.inputSchema,
|
22
|
-
}, () => prompt(tool),
|
22
|
+
}, () => prompt(tool), {
|
23
|
+
...(options ?? {}),
|
24
|
+
fullResponse: true,
|
25
|
+
});
|
23
26
|
return {
|
24
27
|
tool: tool.name,
|
25
28
|
parameters: value,
|
@@ -16,7 +16,10 @@ export async function useTool(model, tool, prompt, options) {
|
|
16
16
|
name: tool.name,
|
17
17
|
description: tool.description,
|
18
18
|
schema: tool.inputSchema,
|
19
|
-
}, () => prompt(tool),
|
19
|
+
}, () => prompt(tool), {
|
20
|
+
...(options ?? {}),
|
21
|
+
fullResponse: true,
|
22
|
+
});
|
20
23
|
return {
|
21
24
|
tool: tool.name,
|
22
25
|
parameters: value,
|
@@ -2,18 +2,6 @@
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.embedText = exports.embedTexts = void 0;
|
4
4
|
const executeCall_js_1 = require("../executeCall.cjs");
|
5
|
-
/**
|
6
|
-
* Generate embeddings for multiple texts.
|
7
|
-
*
|
8
|
-
* @example
|
9
|
-
* const { embeddings } = await embedTexts(
|
10
|
-
* new OpenAITextEmbeddingModel(...),
|
11
|
-
* [
|
12
|
-
* "At first, Nox didn't know what to do with the pup.",
|
13
|
-
* "He keenly observed and absorbed everything around him, from the birds in the sky to the trees in the forest.",
|
14
|
-
* ]
|
15
|
-
* );
|
16
|
-
*/
|
17
5
|
async function embedTexts(model, texts, options) {
|
18
6
|
const result = await (0, executeCall_js_1.executeCall)({
|
19
7
|
model,
|
@@ -65,26 +53,24 @@ async function embedTexts(model, texts, options) {
|
|
65
53
|
generatedEmbeddings: output,
|
66
54
|
}),
|
67
55
|
});
|
68
|
-
return
|
69
|
-
|
70
|
-
|
71
|
-
|
56
|
+
return options?.fullResponse === true
|
57
|
+
? {
|
58
|
+
embeddings: result.output,
|
59
|
+
metadata: result.metadata,
|
60
|
+
}
|
61
|
+
: result.output;
|
72
62
|
}
|
73
63
|
exports.embedTexts = embedTexts;
|
74
|
-
/**
|
75
|
-
* Generate an embedding for a single text.
|
76
|
-
*
|
77
|
-
* @example
|
78
|
-
* const { embedding } = await embedText(
|
79
|
-
* new OpenAITextEmbeddingModel(...),
|
80
|
-
* "At first, Nox didn't know what to do with the pup."
|
81
|
-
* );
|
82
|
-
*/
|
83
64
|
async function embedText(model, text, options) {
|
84
|
-
const result = await embedTexts(model, [text],
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
65
|
+
const result = await embedTexts(model, [text], {
|
66
|
+
...(options ?? {}),
|
67
|
+
fullResponse: true,
|
68
|
+
});
|
69
|
+
return options?.fullResponse === true
|
70
|
+
? {
|
71
|
+
embedding: result.embeddings[0],
|
72
|
+
metadata: result.metadata,
|
73
|
+
}
|
74
|
+
: result.embeddings[0];
|
89
75
|
}
|
90
76
|
exports.embedText = embedText;
|
@@ -6,7 +6,7 @@ import { TextEmbeddingModel, TextEmbeddingModelSettings } from "./TextEmbeddingM
|
|
6
6
|
* Generate embeddings for multiple texts.
|
7
7
|
*
|
8
8
|
* @example
|
9
|
-
* const
|
9
|
+
* const embeddings = await embedTexts(
|
10
10
|
* new OpenAITextEmbeddingModel(...),
|
11
11
|
* [
|
12
12
|
* "At first, Nox didn't know what to do with the pup.",
|
@@ -14,20 +14,30 @@ import { TextEmbeddingModel, TextEmbeddingModelSettings } from "./TextEmbeddingM
|
|
14
14
|
* ]
|
15
15
|
* );
|
16
16
|
*/
|
17
|
-
export declare function embedTexts<RESPONSE, SETTINGS extends TextEmbeddingModelSettings>(model: TextEmbeddingModel<RESPONSE, SETTINGS>, texts: string[], options
|
17
|
+
export declare function embedTexts<RESPONSE, SETTINGS extends TextEmbeddingModelSettings>(model: TextEmbeddingModel<RESPONSE, SETTINGS>, texts: string[], options: FunctionOptions<SETTINGS> & {
|
18
|
+
fullResponse: true;
|
19
|
+
}): Promise<{
|
18
20
|
embeddings: Array<Vector>;
|
19
21
|
metadata: CallMetadata<TextEmbeddingModel<RESPONSE, SETTINGS>>;
|
20
22
|
}>;
|
23
|
+
export declare function embedTexts<RESPONSE, SETTINGS extends TextEmbeddingModelSettings>(model: TextEmbeddingModel<RESPONSE, SETTINGS>, texts: string[], options?: FunctionOptions<SETTINGS> & {
|
24
|
+
fullResponse?: false;
|
25
|
+
}): Promise<Array<Vector>>;
|
21
26
|
/**
|
22
27
|
* Generate an embedding for a single text.
|
23
28
|
*
|
24
29
|
* @example
|
25
|
-
* const
|
30
|
+
* const embedding = await embedText(
|
26
31
|
* new OpenAITextEmbeddingModel(...),
|
27
32
|
* "At first, Nox didn't know what to do with the pup."
|
28
33
|
* );
|
29
34
|
*/
|
30
|
-
export declare function embedText<RESPONSE, SETTINGS extends TextEmbeddingModelSettings>(model: TextEmbeddingModel<RESPONSE, SETTINGS>, text: string, options
|
35
|
+
export declare function embedText<RESPONSE, SETTINGS extends TextEmbeddingModelSettings>(model: TextEmbeddingModel<RESPONSE, SETTINGS>, text: string, options: FunctionOptions<SETTINGS> & {
|
36
|
+
fullResponse: true;
|
37
|
+
}): Promise<{
|
31
38
|
embedding: Vector;
|
32
39
|
metadata: CallMetadata<TextEmbeddingModel<RESPONSE, SETTINGS>>;
|
33
40
|
}>;
|
41
|
+
export declare function embedText<RESPONSE, SETTINGS extends TextEmbeddingModelSettings>(model: TextEmbeddingModel<RESPONSE, SETTINGS>, text: string, options?: FunctionOptions<SETTINGS> & {
|
42
|
+
fullResponse?: false;
|
43
|
+
}): Promise<Vector>;
|
@@ -1,16 +1,4 @@
|
|
1
1
|
import { executeCall } from "../executeCall.js";
|
2
|
-
/**
|
3
|
-
* Generate embeddings for multiple texts.
|
4
|
-
*
|
5
|
-
* @example
|
6
|
-
* const { embeddings } = await embedTexts(
|
7
|
-
* new OpenAITextEmbeddingModel(...),
|
8
|
-
* [
|
9
|
-
* "At first, Nox didn't know what to do with the pup.",
|
10
|
-
* "He keenly observed and absorbed everything around him, from the birds in the sky to the trees in the forest.",
|
11
|
-
* ]
|
12
|
-
* );
|
13
|
-
*/
|
14
2
|
export async function embedTexts(model, texts, options) {
|
15
3
|
const result = await executeCall({
|
16
4
|
model,
|
@@ -62,24 +50,22 @@ export async function embedTexts(model, texts, options) {
|
|
62
50
|
generatedEmbeddings: output,
|
63
51
|
}),
|
64
52
|
});
|
65
|
-
return
|
66
|
-
|
67
|
-
|
68
|
-
|
53
|
+
return options?.fullResponse === true
|
54
|
+
? {
|
55
|
+
embeddings: result.output,
|
56
|
+
metadata: result.metadata,
|
57
|
+
}
|
58
|
+
: result.output;
|
69
59
|
}
|
70
|
-
/**
|
71
|
-
* Generate an embedding for a single text.
|
72
|
-
*
|
73
|
-
* @example
|
74
|
-
* const { embedding } = await embedText(
|
75
|
-
* new OpenAITextEmbeddingModel(...),
|
76
|
-
* "At first, Nox didn't know what to do with the pup."
|
77
|
-
* );
|
78
|
-
*/
|
79
60
|
export async function embedText(model, text, options) {
|
80
|
-
const result = await embedTexts(model, [text],
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
61
|
+
const result = await embedTexts(model, [text], {
|
62
|
+
...(options ?? {}),
|
63
|
+
fullResponse: true,
|
64
|
+
});
|
65
|
+
return options?.fullResponse === true
|
66
|
+
? {
|
67
|
+
embedding: result.embeddings[0],
|
68
|
+
metadata: result.metadata,
|
69
|
+
}
|
70
|
+
: result.embeddings[0];
|
85
71
|
}
|
@@ -2,21 +2,6 @@
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.generateImage = void 0;
|
4
4
|
const executeCall_js_1 = require("../executeCall.cjs");
|
5
|
-
/**
|
6
|
-
* Generates a base64-encoded image using a prompt.
|
7
|
-
* The prompt format depends on the model.
|
8
|
-
* For example, OpenAI image models expect a string prompt,
|
9
|
-
* and Stability AI models expect an array of text prompts with optional weights.
|
10
|
-
*
|
11
|
-
* @example
|
12
|
-
* const { image } = await generateImage(
|
13
|
-
* new StabilityImageGenerationModel(...),
|
14
|
-
* [
|
15
|
-
* { text: "the wicked witch of the west" },
|
16
|
-
* { text: "style of early 19th century painting", weight: 0.5 },
|
17
|
-
* ]
|
18
|
-
* );
|
19
|
-
*/
|
20
5
|
async function generateImage(model, prompt, options) {
|
21
6
|
const result = await (0, executeCall_js_1.executeCall)({
|
22
7
|
model,
|
@@ -54,10 +39,12 @@ async function generateImage(model, prompt, options) {
|
|
54
39
|
generatedImage: output,
|
55
40
|
}),
|
56
41
|
});
|
57
|
-
return
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
42
|
+
return options?.fullResponse === true
|
43
|
+
? {
|
44
|
+
image: result.output,
|
45
|
+
response: result.response,
|
46
|
+
metadata: result.metadata,
|
47
|
+
}
|
48
|
+
: result.output;
|
62
49
|
}
|
63
50
|
exports.generateImage = generateImage;
|
@@ -8,7 +8,7 @@ import { ImageGenerationModel, ImageGenerationModelSettings } from "./ImageGener
|
|
8
8
|
* and Stability AI models expect an array of text prompts with optional weights.
|
9
9
|
*
|
10
10
|
* @example
|
11
|
-
* const
|
11
|
+
* const image = await generateImage(
|
12
12
|
* new StabilityImageGenerationModel(...),
|
13
13
|
* [
|
14
14
|
* { text: "the wicked witch of the west" },
|
@@ -16,8 +16,13 @@ import { ImageGenerationModel, ImageGenerationModelSettings } from "./ImageGener
|
|
16
16
|
* ]
|
17
17
|
* );
|
18
18
|
*/
|
19
|
-
export declare function generateImage<PROMPT, RESPONSE, SETTINGS extends ImageGenerationModelSettings>(model: ImageGenerationModel<PROMPT, RESPONSE, SETTINGS>, prompt: PROMPT, options
|
19
|
+
export declare function generateImage<PROMPT, RESPONSE, SETTINGS extends ImageGenerationModelSettings>(model: ImageGenerationModel<PROMPT, RESPONSE, SETTINGS>, prompt: PROMPT, options: FunctionOptions<SETTINGS> & {
|
20
|
+
fullResponse: true;
|
21
|
+
}): Promise<{
|
20
22
|
image: string;
|
21
23
|
response: RESPONSE;
|
22
24
|
metadata: CallMetadata<ImageGenerationModel<PROMPT, RESPONSE, SETTINGS>>;
|
23
25
|
}>;
|
26
|
+
export declare function generateImage<PROMPT, RESPONSE, SETTINGS extends ImageGenerationModelSettings>(model: ImageGenerationModel<PROMPT, RESPONSE, SETTINGS>, prompt: PROMPT, options?: FunctionOptions<SETTINGS> & {
|
27
|
+
fullResponse?: false;
|
28
|
+
}): Promise<string>;
|
@@ -1,19 +1,4 @@
|
|
1
1
|
import { executeCall } from "../executeCall.js";
|
2
|
-
/**
|
3
|
-
* Generates a base64-encoded image using a prompt.
|
4
|
-
* The prompt format depends on the model.
|
5
|
-
* For example, OpenAI image models expect a string prompt,
|
6
|
-
* and Stability AI models expect an array of text prompts with optional weights.
|
7
|
-
*
|
8
|
-
* @example
|
9
|
-
* const { image } = await generateImage(
|
10
|
-
* new StabilityImageGenerationModel(...),
|
11
|
-
* [
|
12
|
-
* { text: "the wicked witch of the west" },
|
13
|
-
* { text: "style of early 19th century painting", weight: 0.5 },
|
14
|
-
* ]
|
15
|
-
* );
|
16
|
-
*/
|
17
2
|
export async function generateImage(model, prompt, options) {
|
18
3
|
const result = await executeCall({
|
19
4
|
model,
|
@@ -51,9 +36,11 @@ export async function generateImage(model, prompt, options) {
|
|
51
36
|
generatedImage: output,
|
52
37
|
}),
|
53
38
|
});
|
54
|
-
return
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
39
|
+
return options?.fullResponse === true
|
40
|
+
? {
|
41
|
+
image: result.output,
|
42
|
+
response: result.response,
|
43
|
+
metadata: result.metadata,
|
44
|
+
}
|
45
|
+
: result.output;
|
59
46
|
}
|
@@ -52,10 +52,12 @@ async function generateJson(model, schemaDefinition, prompt, options) {
|
|
52
52
|
generatedJson: output,
|
53
53
|
}),
|
54
54
|
});
|
55
|
-
return
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
55
|
+
return options?.fullResponse === true
|
56
|
+
? {
|
57
|
+
value: result.output,
|
58
|
+
response: result.response,
|
59
|
+
metadata: result.metadata,
|
60
|
+
}
|
61
|
+
: result.output;
|
60
62
|
}
|
61
63
|
exports.generateJson = generateJson;
|
@@ -2,8 +2,13 @@ import { FunctionOptions } from "../FunctionOptions.js";
|
|
2
2
|
import { CallMetadata } from "../executeCall.js";
|
3
3
|
import { GenerateJsonModel, GenerateJsonModelSettings, GenerateJsonPrompt } from "./GenerateJsonModel.js";
|
4
4
|
import { SchemaDefinition } from "./SchemaDefinition.js";
|
5
|
-
export declare function generateJson<STRUCTURE, PROMPT, RESPONSE, NAME extends string, SETTINGS extends GenerateJsonModelSettings>(model: GenerateJsonModel<PROMPT, RESPONSE, SETTINGS>, schemaDefinition: SchemaDefinition<NAME, STRUCTURE>, prompt: (schemaDefinition: SchemaDefinition<NAME, STRUCTURE>) => PROMPT & GenerateJsonPrompt<RESPONSE>, options
|
5
|
+
export declare function generateJson<STRUCTURE, PROMPT, RESPONSE, NAME extends string, SETTINGS extends GenerateJsonModelSettings>(model: GenerateJsonModel<PROMPT, RESPONSE, SETTINGS>, schemaDefinition: SchemaDefinition<NAME, STRUCTURE>, prompt: (schemaDefinition: SchemaDefinition<NAME, STRUCTURE>) => PROMPT & GenerateJsonPrompt<RESPONSE>, options: FunctionOptions<SETTINGS> & {
|
6
|
+
fullResponse: true;
|
7
|
+
}): Promise<{
|
6
8
|
value: STRUCTURE;
|
7
9
|
response: RESPONSE;
|
8
10
|
metadata: CallMetadata<GenerateJsonModel<PROMPT, RESPONSE, SETTINGS>>;
|
9
11
|
}>;
|
12
|
+
export declare function generateJson<STRUCTURE, PROMPT, RESPONSE, NAME extends string, SETTINGS extends GenerateJsonModelSettings>(model: GenerateJsonModel<PROMPT, RESPONSE, SETTINGS>, schemaDefinition: SchemaDefinition<NAME, STRUCTURE>, prompt: (schemaDefinition: SchemaDefinition<NAME, STRUCTURE>) => PROMPT & GenerateJsonPrompt<RESPONSE>, options?: FunctionOptions<SETTINGS> & {
|
13
|
+
fullResponse?: false;
|
14
|
+
}): Promise<STRUCTURE>;
|
@@ -49,9 +49,11 @@ export async function generateJson(model, schemaDefinition, prompt, options) {
|
|
49
49
|
generatedJson: output,
|
50
50
|
}),
|
51
51
|
});
|
52
|
-
return
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
52
|
+
return options?.fullResponse === true
|
53
|
+
? {
|
54
|
+
value: result.output,
|
55
|
+
response: result.response,
|
56
|
+
metadata: result.metadata,
|
57
|
+
}
|
58
|
+
: result.output;
|
57
59
|
}
|
@@ -65,10 +65,12 @@ async function generateJsonOrText(model, schemaDefinitions, prompt, options) {
|
|
65
65
|
generatedJson: output,
|
66
66
|
}),
|
67
67
|
});
|
68
|
-
return
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
68
|
+
return options?.fullResponse === true
|
69
|
+
? {
|
70
|
+
...result.output,
|
71
|
+
response: result.response,
|
72
|
+
metadata: result.metadata,
|
73
|
+
}
|
74
|
+
: result.output;
|
73
75
|
}
|
74
76
|
exports.generateJsonOrText = generateJsonOrText;
|
@@ -14,7 +14,9 @@ type ToSchemaUnion<T> = {
|
|
14
14
|
} : never;
|
15
15
|
}[keyof T];
|
16
16
|
type ToOutputValue<SCHEMAS extends SchemaDefinitionArray<SchemaDefinition<any, any>[]>> = ToSchemaUnion<ToSchemaDefinitionsMap<SCHEMAS>>;
|
17
|
-
export declare function generateJsonOrText<SCHEMAS extends SchemaDefinition<any, any>[], PROMPT, RESPONSE, SETTINGS extends GenerateJsonOrTextModelSettings>(model: GenerateJsonOrTextModel<PROMPT, RESPONSE, SETTINGS>, schemaDefinitions: SCHEMAS, prompt: (schemaDefinitions: SCHEMAS) => PROMPT & GenerateJsonOrTextPrompt<RESPONSE>, options
|
17
|
+
export declare function generateJsonOrText<SCHEMAS extends SchemaDefinition<any, any>[], PROMPT, RESPONSE, SETTINGS extends GenerateJsonOrTextModelSettings>(model: GenerateJsonOrTextModel<PROMPT, RESPONSE, SETTINGS>, schemaDefinitions: SCHEMAS, prompt: (schemaDefinitions: SCHEMAS) => PROMPT & GenerateJsonOrTextPrompt<RESPONSE>, options: FunctionOptions<SETTINGS> & {
|
18
|
+
fullResponse: true;
|
19
|
+
}): Promise<({
|
18
20
|
schema: null;
|
19
21
|
value: null;
|
20
22
|
text: string;
|
@@ -22,4 +24,11 @@ export declare function generateJsonOrText<SCHEMAS extends SchemaDefinition<any,
|
|
22
24
|
response: RESPONSE;
|
23
25
|
metadata: CallMetadata<GenerateJsonOrTextModel<PROMPT, RESPONSE, SETTINGS>>;
|
24
26
|
}>;
|
27
|
+
export declare function generateJsonOrText<SCHEMAS extends SchemaDefinition<any, any>[], PROMPT, RESPONSE, SETTINGS extends GenerateJsonOrTextModelSettings>(model: GenerateJsonOrTextModel<PROMPT, RESPONSE, SETTINGS>, schemaDefinitions: SCHEMAS, prompt: (schemaDefinitions: SCHEMAS) => PROMPT & GenerateJsonOrTextPrompt<RESPONSE>, options?: FunctionOptions<SETTINGS> & {
|
28
|
+
fullResponse?: false;
|
29
|
+
}): Promise<{
|
30
|
+
schema: null;
|
31
|
+
value: null;
|
32
|
+
text: string;
|
33
|
+
} | ToOutputValue<SCHEMAS>>;
|
25
34
|
export {};
|
@@ -62,9 +62,11 @@ export async function generateJsonOrText(model, schemaDefinitions, prompt, optio
|
|
62
62
|
generatedJson: output,
|
63
63
|
}),
|
64
64
|
});
|
65
|
-
return
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
65
|
+
return options?.fullResponse === true
|
66
|
+
? {
|
67
|
+
...result.output,
|
68
|
+
response: result.response,
|
69
|
+
metadata: result.metadata,
|
70
|
+
}
|
71
|
+
: result.output;
|
70
72
|
}
|
@@ -2,18 +2,6 @@
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.generateText = void 0;
|
4
4
|
const executeCall_js_1 = require("../executeCall.cjs");
|
5
|
-
/**
|
6
|
-
* Generates a text using a prompt.
|
7
|
-
* The prompt format depends on the model.
|
8
|
-
* For example, OpenAI text models expect a string prompt, and OpenAI chat models expect an array of chat messages.
|
9
|
-
*
|
10
|
-
* @example
|
11
|
-
* const model = new OpenAITextGenerationModel(...);
|
12
|
-
*
|
13
|
-
* const { text } = await model.generateText(
|
14
|
-
* "Write a short story about a robot learning to love:\n\n"
|
15
|
-
* );
|
16
|
-
*/
|
17
5
|
async function generateText(
|
18
6
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
19
7
|
model, prompt, options) {
|
@@ -58,10 +46,12 @@ model, prompt, options) {
|
|
58
46
|
generatedText: output,
|
59
47
|
}),
|
60
48
|
});
|
61
|
-
return
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
49
|
+
return options?.fullResponse === true
|
50
|
+
? {
|
51
|
+
text: result.output,
|
52
|
+
response: result.response,
|
53
|
+
metadata: result.metadata,
|
54
|
+
}
|
55
|
+
: result.output;
|
66
56
|
}
|
67
57
|
exports.generateText = generateText;
|
@@ -9,12 +9,17 @@ import { TextGenerationModel, TextGenerationModelSettings } from "./TextGenerati
|
|
9
9
|
* @example
|
10
10
|
* const model = new OpenAITextGenerationModel(...);
|
11
11
|
*
|
12
|
-
* const
|
12
|
+
* const text = await model.generateText(
|
13
13
|
* "Write a short story about a robot learning to love:\n\n"
|
14
14
|
* );
|
15
15
|
*/
|
16
|
-
export declare function generateText<PROMPT, RESPONSE, SETTINGS extends TextGenerationModelSettings>(model: TextGenerationModel<PROMPT, RESPONSE, any, SETTINGS>, prompt: PROMPT, options
|
16
|
+
export declare function generateText<PROMPT, RESPONSE, SETTINGS extends TextGenerationModelSettings>(model: TextGenerationModel<PROMPT, RESPONSE, any, SETTINGS>, prompt: PROMPT, options: FunctionOptions<SETTINGS> & {
|
17
|
+
fullResponse: true;
|
18
|
+
}): Promise<{
|
17
19
|
text: string;
|
18
20
|
response: RESPONSE;
|
19
21
|
metadata: CallMetadata<TextGenerationModel<PROMPT, RESPONSE, unknown, SETTINGS>>;
|
20
22
|
}>;
|
23
|
+
export declare function generateText<PROMPT, RESPONSE, SETTINGS extends TextGenerationModelSettings>(model: TextGenerationModel<PROMPT, RESPONSE, any, SETTINGS>, prompt: PROMPT, options?: FunctionOptions<SETTINGS> & {
|
24
|
+
fullResponse?: false;
|
25
|
+
}): Promise<string>;
|
@@ -1,16 +1,4 @@
|
|
1
1
|
import { executeCall } from "../executeCall.js";
|
2
|
-
/**
|
3
|
-
* Generates a text using a prompt.
|
4
|
-
* The prompt format depends on the model.
|
5
|
-
* For example, OpenAI text models expect a string prompt, and OpenAI chat models expect an array of chat messages.
|
6
|
-
*
|
7
|
-
* @example
|
8
|
-
* const model = new OpenAITextGenerationModel(...);
|
9
|
-
*
|
10
|
-
* const { text } = await model.generateText(
|
11
|
-
* "Write a short story about a robot learning to love:\n\n"
|
12
|
-
* );
|
13
|
-
*/
|
14
2
|
export async function generateText(
|
15
3
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
16
4
|
model, prompt, options) {
|
@@ -55,9 +43,11 @@ model, prompt, options) {
|
|
55
43
|
generatedText: output,
|
56
44
|
}),
|
57
45
|
});
|
58
|
-
return
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
46
|
+
return options?.fullResponse === true
|
47
|
+
? {
|
48
|
+
text: result.output,
|
49
|
+
response: result.response,
|
50
|
+
metadata: result.metadata,
|
51
|
+
}
|
52
|
+
: result.output;
|
63
53
|
}
|
@@ -107,9 +107,11 @@ async function streamText(model, prompt, options) {
|
|
107
107
|
});
|
108
108
|
throw result.error;
|
109
109
|
}
|
110
|
-
return
|
111
|
-
|
112
|
-
|
113
|
-
|
110
|
+
return options?.fullResponse === true
|
111
|
+
? {
|
112
|
+
textStream: result.output,
|
113
|
+
metadata: startMetadata,
|
114
|
+
}
|
115
|
+
: result.output;
|
114
116
|
}
|
115
117
|
exports.streamText = streamText;
|
@@ -5,7 +5,15 @@ import { TextGenerationModel, TextGenerationModelSettings } from "./TextGenerati
|
|
5
5
|
export declare function streamText<PROMPT, FULL_DELTA, SETTINGS extends TextGenerationModelSettings>(model: TextGenerationModel<PROMPT, unknown, FULL_DELTA, SETTINGS> & {
|
6
6
|
generateDeltaStreamResponse: (prompt: PROMPT, options: FunctionOptions<SETTINGS>) => PromiseLike<AsyncIterable<DeltaEvent<FULL_DELTA>>>;
|
7
7
|
extractTextDelta: (fullDelta: FULL_DELTA) => string | undefined;
|
8
|
-
}, prompt: PROMPT, options?: FunctionOptions<SETTINGS>
|
8
|
+
}, prompt: PROMPT, options?: FunctionOptions<SETTINGS> & {
|
9
|
+
fullResponse?: false;
|
10
|
+
}): Promise<AsyncIterable<string>>;
|
11
|
+
export declare function streamText<PROMPT, FULL_DELTA, SETTINGS extends TextGenerationModelSettings>(model: TextGenerationModel<PROMPT, unknown, FULL_DELTA, SETTINGS> & {
|
12
|
+
generateDeltaStreamResponse: (prompt: PROMPT, options: FunctionOptions<SETTINGS>) => PromiseLike<AsyncIterable<DeltaEvent<FULL_DELTA>>>;
|
13
|
+
extractTextDelta: (fullDelta: FULL_DELTA) => string | undefined;
|
14
|
+
}, prompt: PROMPT, options: FunctionOptions<SETTINGS> & {
|
15
|
+
fullResponse: true;
|
16
|
+
}): Promise<{
|
9
17
|
textStream: AsyncIterable<string>;
|
10
18
|
metadata: Omit<CallMetadata<TextGenerationModel<PROMPT, unknown, FULL_DELTA, SETTINGS>>, "durationInMs">;
|
11
19
|
}>;
|
@@ -104,8 +104,10 @@ export async function streamText(model, prompt, options) {
|
|
104
104
|
});
|
105
105
|
throw result.error;
|
106
106
|
}
|
107
|
-
return
|
108
|
-
|
109
|
-
|
110
|
-
|
107
|
+
return options?.fullResponse === true
|
108
|
+
? {
|
109
|
+
textStream: result.output,
|
110
|
+
metadata: startMetadata,
|
111
|
+
}
|
112
|
+
: result.output;
|
111
113
|
}
|
@@ -2,20 +2,6 @@
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.transcribe = void 0;
|
4
4
|
const executeCall_js_1 = require("../executeCall.cjs");
|
5
|
-
/**
|
6
|
-
* Transcribe audio data into text.
|
7
|
-
*
|
8
|
-
* @example
|
9
|
-
* const data = await fs.promises.readFile("data/test.mp3");
|
10
|
-
*
|
11
|
-
* const { transcription } = await transcribe(
|
12
|
-
* new OpenAITranscriptionModel({ model: "whisper-1" }),
|
13
|
-
* {
|
14
|
-
* type: "mp3",
|
15
|
-
* data,
|
16
|
-
* }
|
17
|
-
* );
|
18
|
-
*/
|
19
5
|
async function transcribe(model, data, options) {
|
20
6
|
const result = await (0, executeCall_js_1.executeCall)({
|
21
7
|
model,
|
@@ -53,10 +39,12 @@ async function transcribe(model, data, options) {
|
|
53
39
|
transcription: output,
|
54
40
|
}),
|
55
41
|
});
|
56
|
-
return
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
42
|
+
return options?.fullResponse === true
|
43
|
+
? {
|
44
|
+
transcription: result.output,
|
45
|
+
response: result.response,
|
46
|
+
metadata: result.metadata,
|
47
|
+
}
|
48
|
+
: result.output;
|
61
49
|
}
|
62
50
|
exports.transcribe = transcribe;
|
@@ -7,7 +7,7 @@ import { TranscriptionModel, TranscriptionModelSettings } from "./TranscriptionM
|
|
7
7
|
* @example
|
8
8
|
* const data = await fs.promises.readFile("data/test.mp3");
|
9
9
|
*
|
10
|
-
* const
|
10
|
+
* const transcription = await transcribe(
|
11
11
|
* new OpenAITranscriptionModel({ model: "whisper-1" }),
|
12
12
|
* {
|
13
13
|
* type: "mp3",
|
@@ -15,8 +15,13 @@ import { TranscriptionModel, TranscriptionModelSettings } from "./TranscriptionM
|
|
15
15
|
* }
|
16
16
|
* );
|
17
17
|
*/
|
18
|
-
export declare function transcribe<DATA, RESPONSE, SETTINGS extends TranscriptionModelSettings>(model: TranscriptionModel<DATA, RESPONSE, SETTINGS>, data: DATA, options
|
18
|
+
export declare function transcribe<DATA, RESPONSE, SETTINGS extends TranscriptionModelSettings>(model: TranscriptionModel<DATA, RESPONSE, SETTINGS>, data: DATA, options: FunctionOptions<SETTINGS> & {
|
19
|
+
fullResponse: true;
|
20
|
+
}): Promise<{
|
19
21
|
transcription: string;
|
20
22
|
response: RESPONSE;
|
21
23
|
metadata: CallMetadata<TranscriptionModel<DATA, RESPONSE, SETTINGS>>;
|
22
24
|
}>;
|
25
|
+
export declare function transcribe<DATA, RESPONSE, SETTINGS extends TranscriptionModelSettings>(model: TranscriptionModel<DATA, RESPONSE, SETTINGS>, data: DATA, options?: FunctionOptions<SETTINGS> & {
|
26
|
+
fullResponse?: false;
|
27
|
+
}): Promise<string>;
|
@@ -1,18 +1,4 @@
|
|
1
1
|
import { executeCall } from "../executeCall.js";
|
2
|
-
/**
|
3
|
-
* Transcribe audio data into text.
|
4
|
-
*
|
5
|
-
* @example
|
6
|
-
* const data = await fs.promises.readFile("data/test.mp3");
|
7
|
-
*
|
8
|
-
* const { transcription } = await transcribe(
|
9
|
-
* new OpenAITranscriptionModel({ model: "whisper-1" }),
|
10
|
-
* {
|
11
|
-
* type: "mp3",
|
12
|
-
* data,
|
13
|
-
* }
|
14
|
-
* );
|
15
|
-
*/
|
16
2
|
export async function transcribe(model, data, options) {
|
17
3
|
const result = await executeCall({
|
18
4
|
model,
|
@@ -50,9 +36,11 @@ export async function transcribe(model, data, options) {
|
|
50
36
|
transcription: output,
|
51
37
|
}),
|
52
38
|
});
|
53
|
-
return
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
39
|
+
return options?.fullResponse === true
|
40
|
+
? {
|
41
|
+
transcription: result.output,
|
42
|
+
response: result.response,
|
43
|
+
metadata: result.metadata,
|
44
|
+
}
|
45
|
+
: result.output;
|
58
46
|
}
|
@@ -30,7 +30,7 @@ exports.COHERE_TEXT_EMBEDDING_MODELS = {
|
|
30
30
|
* @see https://docs.cohere.com/reference/embed
|
31
31
|
*
|
32
32
|
* @example
|
33
|
-
* const
|
33
|
+
* const embeddings = await embedTexts(
|
34
34
|
* new CohereTextEmbeddingModel({ model: "embed-english-light-v2.0" }),
|
35
35
|
* [
|
36
36
|
* "At first, Nox didn't know what to do with the pup.",
|
@@ -38,7 +38,7 @@ export interface CohereTextEmbeddingModelSettings extends TextEmbeddingModelSett
|
|
38
38
|
* @see https://docs.cohere.com/reference/embed
|
39
39
|
*
|
40
40
|
* @example
|
41
|
-
* const
|
41
|
+
* const embeddings = await embedTexts(
|
42
42
|
* new CohereTextEmbeddingModel({ model: "embed-english-light-v2.0" }),
|
43
43
|
* [
|
44
44
|
* "At first, Nox didn't know what to do with the pup.",
|
@@ -24,7 +24,7 @@ export const COHERE_TEXT_EMBEDDING_MODELS = {
|
|
24
24
|
* @see https://docs.cohere.com/reference/embed
|
25
25
|
*
|
26
26
|
* @example
|
27
|
-
* const
|
27
|
+
* const embeddings = await embedTexts(
|
28
28
|
* new CohereTextEmbeddingModel({ model: "embed-english-light-v2.0" }),
|
29
29
|
* [
|
30
30
|
* "At first, Nox didn't know what to do with the pup.",
|
@@ -59,7 +59,7 @@ export interface CohereTextGenerationModelSettings extends TextGenerationModelSe
|
|
59
59
|
* maxTokens: 500,
|
60
60
|
* });
|
61
61
|
*
|
62
|
-
* const
|
62
|
+
* const text = await generateText(
|
63
63
|
* model,
|
64
64
|
* "Write a short story about a robot learning to love:\n\n"
|
65
65
|
* );
|
@@ -21,7 +21,7 @@ const HuggingFaceError_js_1 = require("./HuggingFaceError.cjs");
|
|
21
21
|
* retry: retryWithExponentialBackoff({ maxTries: 5 }),
|
22
22
|
* });
|
23
23
|
*
|
24
|
-
* const
|
24
|
+
* const embeddings = await embedTexts(
|
25
25
|
* model,
|
26
26
|
* [
|
27
27
|
* "At first, Nox didn't know what to do with the pup.",
|
@@ -29,7 +29,7 @@ export interface HuggingFaceTextEmbeddingModelSettings extends TextEmbeddingMode
|
|
29
29
|
* retry: retryWithExponentialBackoff({ maxTries: 5 }),
|
30
30
|
* });
|
31
31
|
*
|
32
|
-
* const
|
32
|
+
* const embeddings = await embedTexts(
|
33
33
|
* model,
|
34
34
|
* [
|
35
35
|
* "At first, Nox didn't know what to do with the pup.",
|
@@ -15,7 +15,7 @@ import { failedHuggingFaceCallResponseHandler } from "./HuggingFaceError.js";
|
|
15
15
|
* retry: retryWithExponentialBackoff({ maxTries: 5 }),
|
16
16
|
* });
|
17
17
|
*
|
18
|
-
* const
|
18
|
+
* const embeddings = await embedTexts(
|
19
19
|
* model,
|
20
20
|
* [
|
21
21
|
* "At first, Nox didn't know what to do with the pup.",
|
@@ -23,7 +23,7 @@ const PromptMappingTextGenerationModel_js_1 = require("../../prompt/PromptMappin
|
|
23
23
|
* retry: retryWithExponentialBackoff({ maxTries: 5 }),
|
24
24
|
* });
|
25
25
|
*
|
26
|
-
* const
|
26
|
+
* const text = await generateText(
|
27
27
|
* model,
|
28
28
|
* "Write a short story about a robot learning to love:\n\n"
|
29
29
|
* );
|
@@ -38,7 +38,7 @@ export interface HuggingFaceTextGenerationModelSettings extends TextGenerationMo
|
|
38
38
|
* retry: retryWithExponentialBackoff({ maxTries: 5 }),
|
39
39
|
* });
|
40
40
|
*
|
41
|
-
* const
|
41
|
+
* const text = await generateText(
|
42
42
|
* model,
|
43
43
|
* "Write a short story about a robot learning to love:\n\n"
|
44
44
|
* );
|
@@ -17,7 +17,7 @@ import { PromptMappingTextGenerationModel } from "../../prompt/PromptMappingText
|
|
17
17
|
* retry: retryWithExponentialBackoff({ maxTries: 5 }),
|
18
18
|
* });
|
19
19
|
*
|
20
|
-
* const
|
20
|
+
* const text = await generateText(
|
21
21
|
* model,
|
22
22
|
* "Write a short story about a robot learning to love:\n\n"
|
23
23
|
* );
|
@@ -22,7 +22,7 @@ exports.calculateOpenAIImageGenerationCostInMillicents = calculateOpenAIImageGen
|
|
22
22
|
* @see https://platform.openai.com/docs/api-reference/images/create
|
23
23
|
*
|
24
24
|
* @example
|
25
|
-
* const
|
25
|
+
* const image = await generateImage(
|
26
26
|
* new OpenAIImageGenerationModel({ size: "512x512" }),
|
27
27
|
* "the wicked witch of the west in the style of early 19th century painting"
|
28
28
|
* );
|
@@ -20,7 +20,7 @@ export interface OpenAIImageGenerationSettings extends ImageGenerationModelSetti
|
|
20
20
|
* @see https://platform.openai.com/docs/api-reference/images/create
|
21
21
|
*
|
22
22
|
* @example
|
23
|
-
* const
|
23
|
+
* const image = await generateImage(
|
24
24
|
* new OpenAIImageGenerationModel({ size: "512x512" }),
|
25
25
|
* "the wicked witch of the west in the style of early 19th century painting"
|
26
26
|
* );
|
@@ -18,7 +18,7 @@ export const calculateOpenAIImageGenerationCostInMillicents = ({ settings, }) =>
|
|
18
18
|
* @see https://platform.openai.com/docs/api-reference/images/create
|
19
19
|
*
|
20
20
|
* @example
|
21
|
-
* const
|
21
|
+
* const image = await generateImage(
|
22
22
|
* new OpenAIImageGenerationModel({ size: "512x512" }),
|
23
23
|
* "the wicked witch of the west in the style of early 19th century painting"
|
24
24
|
* );
|
@@ -36,7 +36,7 @@ exports.calculateOpenAIEmbeddingCostInMillicents = calculateOpenAIEmbeddingCostI
|
|
36
36
|
* @see https://platform.openai.com/docs/api-reference/embeddings
|
37
37
|
*
|
38
38
|
* @example
|
39
|
-
* const
|
39
|
+
* const embeddings = await embedTexts(
|
40
40
|
* new OpenAITextEmbeddingModel({ model: "text-embedding-ada-002" }),
|
41
41
|
* [
|
42
42
|
* "At first, Nox didn't know what to do with the pup.",
|
@@ -32,7 +32,7 @@ export interface OpenAITextEmbeddingModelSettings extends TextEmbeddingModelSett
|
|
32
32
|
* @see https://platform.openai.com/docs/api-reference/embeddings
|
33
33
|
*
|
34
34
|
* @example
|
35
|
-
* const
|
35
|
+
* const embeddings = await embedTexts(
|
36
36
|
* new OpenAITextEmbeddingModel({ model: "text-embedding-ada-002" }),
|
37
37
|
* [
|
38
38
|
* "At first, Nox didn't know what to do with the pup.",
|
@@ -28,7 +28,7 @@ export const calculateOpenAIEmbeddingCostInMillicents = ({ model, responses, })
|
|
28
28
|
* @see https://platform.openai.com/docs/api-reference/embeddings
|
29
29
|
*
|
30
30
|
* @example
|
31
|
-
* const
|
31
|
+
* const embeddings = await embedTexts(
|
32
32
|
* new OpenAITextEmbeddingModel({ model: "text-embedding-ada-002" }),
|
33
33
|
* [
|
34
34
|
* "At first, Nox didn't know what to do with the pup.",
|
@@ -79,7 +79,7 @@ exports.calculateOpenAITextGenerationCostInMillicents = calculateOpenAITextGener
|
|
79
79
|
* retry: retryWithExponentialBackoff({ maxTries: 5 }),
|
80
80
|
* });
|
81
81
|
*
|
82
|
-
* const
|
82
|
+
* const text = await generateText(
|
83
83
|
* model,
|
84
84
|
* "Write a short story about a robot learning to love:\n\n"
|
85
85
|
* );
|
@@ -96,7 +96,7 @@ export interface OpenAITextGenerationModelSettings extends TextGenerationModelSe
|
|
96
96
|
* retry: retryWithExponentialBackoff({ maxTries: 5 }),
|
97
97
|
* });
|
98
98
|
*
|
99
|
-
* const
|
99
|
+
* const text = await generateText(
|
100
100
|
* model,
|
101
101
|
* "Write a short story about a robot learning to love:\n\n"
|
102
102
|
* );
|
@@ -71,7 +71,7 @@ export const calculateOpenAITextGenerationCostInMillicents = ({ model, response,
|
|
71
71
|
* retry: retryWithExponentialBackoff({ maxTries: 5 }),
|
72
72
|
* });
|
73
73
|
*
|
74
|
-
* const
|
74
|
+
* const text = await generateText(
|
75
75
|
* model,
|
76
76
|
* "Write a short story about a robot learning to love:\n\n"
|
77
77
|
* );
|
@@ -34,7 +34,7 @@ exports.calculateOpenAITranscriptionCostInMillicents = calculateOpenAITranscript
|
|
34
34
|
* @example
|
35
35
|
* const data = await fs.promises.readFile("data/test.mp3");
|
36
36
|
*
|
37
|
-
* const
|
37
|
+
* const transcription = await transcribe(
|
38
38
|
* new OpenAITranscriptionModel({ model: "whisper-1" }),
|
39
39
|
* {
|
40
40
|
* type: "mp3",
|
@@ -39,7 +39,7 @@ export type OpenAITranscriptionInput = {
|
|
39
39
|
* @example
|
40
40
|
* const data = await fs.promises.readFile("data/test.mp3");
|
41
41
|
*
|
42
|
-
* const
|
42
|
+
* const transcription = await transcribe(
|
43
43
|
* new OpenAITranscriptionModel({ model: "whisper-1" }),
|
44
44
|
* {
|
45
45
|
* type: "mp3",
|
@@ -27,7 +27,7 @@ export const calculateOpenAITranscriptionCostInMillicents = ({ model, response,
|
|
27
27
|
* @example
|
28
28
|
* const data = await fs.promises.readFile("data/test.mp3");
|
29
29
|
*
|
30
|
-
* const
|
30
|
+
* const transcription = await transcribe(
|
31
31
|
* new OpenAITranscriptionModel({ model: "whisper-1" }),
|
32
32
|
* {
|
33
33
|
* type: "mp3",
|
@@ -95,7 +95,7 @@ exports.calculateOpenAIChatCostInMillicents = calculateOpenAIChatCostInMillicent
|
|
95
95
|
* maxTokens: 500,
|
96
96
|
* });
|
97
97
|
*
|
98
|
-
* const
|
98
|
+
* const text = await generateText([
|
99
99
|
* model,
|
100
100
|
* OpenAIChatMessage.system(
|
101
101
|
* "Write a short story about a robot learning to love:"
|
@@ -109,7 +109,7 @@ export interface OpenAIChatSettings extends TextGenerationModelSettings, OpenAIM
|
|
109
109
|
* maxTokens: 500,
|
110
110
|
* });
|
111
111
|
*
|
112
|
-
* const
|
112
|
+
* const text = await generateText([
|
113
113
|
* model,
|
114
114
|
* OpenAIChatMessage.system(
|
115
115
|
* "Write a short story about a robot learning to love:"
|
@@ -87,7 +87,7 @@ export const calculateOpenAIChatCostInMillicents = ({ model, response, }) => res
|
|
87
87
|
* maxTokens: 500,
|
88
88
|
* });
|
89
89
|
*
|
90
|
-
* const
|
90
|
+
* const text = await generateText([
|
91
91
|
* model,
|
92
92
|
* OpenAIChatMessage.system(
|
93
93
|
* "Write a short story about a robot learning to love:"
|
@@ -12,7 +12,7 @@ const StabilityError_js_1 = require("./StabilityError.cjs");
|
|
12
12
|
* @see https://api.stability.ai/docs#tag/v1generation/operation/textToImage
|
13
13
|
*
|
14
14
|
* @example
|
15
|
-
* const
|
15
|
+
* const image = await generateImage(
|
16
16
|
* new StabilityImageGenerationModel({
|
17
17
|
* model: "stable-diffusion-512-v2-1",
|
18
18
|
* cfgScale: 7,
|
@@ -10,7 +10,7 @@ import { ThrottleFunction } from "../../util/api/ThrottleFunction.js";
|
|
10
10
|
* @see https://api.stability.ai/docs#tag/v1generation/operation/textToImage
|
11
11
|
*
|
12
12
|
* @example
|
13
|
-
* const
|
13
|
+
* const image = await generateImage(
|
14
14
|
* new StabilityImageGenerationModel({
|
15
15
|
* model: "stable-diffusion-512-v2-1",
|
16
16
|
* cfgScale: 7,
|
@@ -9,7 +9,7 @@ import { failedStabilityCallResponseHandler } from "./StabilityError.js";
|
|
9
9
|
* @see https://api.stability.ai/docs#tag/v1generation/operation/textToImage
|
10
10
|
*
|
11
11
|
* @example
|
12
|
-
* const
|
12
|
+
* const image = await generateImage(
|
13
13
|
* new StabilityImageGenerationModel({
|
14
14
|
* model: "stable-diffusion-512-v2-1",
|
15
15
|
* cfgScale: 7,
|
package/package.json
CHANGED
@@ -36,7 +36,7 @@ class SimilarTextChunksFromVectorIndexRetriever {
|
|
36
36
|
run: options.run,
|
37
37
|
});
|
38
38
|
}
|
39
|
-
const
|
39
|
+
const embedding = await (0, embedText_js_1.embedText)(this.embeddingModel, query, {
|
40
40
|
functionId: options?.functionId,
|
41
41
|
run: options?.run,
|
42
42
|
});
|
@@ -33,7 +33,7 @@ export class SimilarTextChunksFromVectorIndexRetriever {
|
|
33
33
|
run: options.run,
|
34
34
|
});
|
35
35
|
}
|
36
|
-
const
|
36
|
+
const embedding = await embedText(this.embeddingModel, query, {
|
37
37
|
functionId: options?.functionId,
|
38
38
|
run: options?.run,
|
39
39
|
});
|
@@ -5,7 +5,7 @@ const nanoid_1 = require("nanoid");
|
|
5
5
|
const embedText_js_1 = require("../model-function/embed-text/embedText.cjs");
|
6
6
|
async function upsertTextChunks({ vectorIndex, embeddingModel, generateId = nanoid_1.nanoid, chunks, ids, }, options) {
|
7
7
|
// many embedding models support bulk embedding, so we first embed all texts:
|
8
|
-
const
|
8
|
+
const embeddings = await (0, embedText_js_1.embedTexts)(embeddingModel, chunks.map((chunk) => chunk.text), options);
|
9
9
|
await vectorIndex.upsertMany(chunks.map((chunk, i) => ({
|
10
10
|
id: ids?.[i] ?? generateId(),
|
11
11
|
vector: embeddings[i],
|
@@ -2,7 +2,7 @@ import { nanoid as createId } from "nanoid";
|
|
2
2
|
import { embedTexts } from "../model-function/embed-text/embedText.js";
|
3
3
|
export async function upsertTextChunks({ vectorIndex, embeddingModel, generateId = createId, chunks, ids, }, options) {
|
4
4
|
// many embedding models support bulk embedding, so we first embed all texts:
|
5
|
-
const
|
5
|
+
const embeddings = await embedTexts(embeddingModel, chunks.map((chunk) => chunk.text), options);
|
6
6
|
await vectorIndex.upsertMany(chunks.map((chunk, i) => ({
|
7
7
|
id: ids?.[i] ?? generateId(),
|
8
8
|
vector: embeddings[i],
|