modelfusion 0.5.0 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +22 -21
- package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.cjs +3 -3
- package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.js +4 -4
- package/composed-function/use-tool/useTool.cjs +4 -1
- package/composed-function/use-tool/useTool.js +4 -1
- package/model-function/embed-text/embedText.cjs +16 -30
- package/model-function/embed-text/embedText.d.ts +14 -4
- package/model-function/embed-text/embedText.js +16 -30
- package/model-function/generate-image/generateImage.cjs +7 -20
- package/model-function/generate-image/generateImage.d.ts +7 -2
- package/model-function/generate-image/generateImage.js +7 -20
- package/model-function/generate-json/generateJson.cjs +7 -5
- package/model-function/generate-json/generateJson.d.ts +6 -1
- package/model-function/generate-json/generateJson.js +7 -5
- package/model-function/generate-json/generateJsonOrText.cjs +7 -5
- package/model-function/generate-json/generateJsonOrText.d.ts +10 -1
- package/model-function/generate-json/generateJsonOrText.js +7 -5
- package/model-function/generate-text/generateText.cjs +7 -17
- package/model-function/generate-text/generateText.d.ts +7 -2
- package/model-function/generate-text/generateText.js +7 -17
- package/model-function/generate-text/streamText.cjs +6 -4
- package/model-function/generate-text/streamText.d.ts +9 -1
- package/model-function/generate-text/streamText.js +6 -4
- package/model-function/transcribe-audio/transcribe.cjs +7 -19
- package/model-function/transcribe-audio/transcribe.d.ts +7 -2
- package/model-function/transcribe-audio/transcribe.js +7 -19
- package/model-provider/cohere/CohereTextEmbeddingModel.cjs +1 -1
- package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextEmbeddingModel.js +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.cjs +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.js +1 -1
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +1 -1
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +1 -1
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +1 -1
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +1 -1
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +1 -1
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +1 -1
- package/model-provider/openai/OpenAIImageGenerationModel.cjs +1 -1
- package/model-provider/openai/OpenAIImageGenerationModel.d.ts +1 -1
- package/model-provider/openai/OpenAIImageGenerationModel.js +1 -1
- package/model-provider/openai/OpenAITextEmbeddingModel.cjs +1 -1
- package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +1 -1
- package/model-provider/openai/OpenAITextEmbeddingModel.js +1 -1
- package/model-provider/openai/OpenAITextGenerationModel.cjs +1 -1
- package/model-provider/openai/OpenAITextGenerationModel.d.ts +1 -1
- package/model-provider/openai/OpenAITextGenerationModel.js +1 -1
- package/model-provider/openai/OpenAITranscriptionModel.cjs +1 -1
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +1 -1
- package/model-provider/openai/OpenAITranscriptionModel.js +1 -1
- package/model-provider/openai/chat/OpenAIChatModel.cjs +1 -1
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +1 -1
- package/model-provider/openai/chat/OpenAIChatModel.js +1 -1
- package/model-provider/stability/StabilityImageGenerationModel.cjs +1 -1
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +1 -1
- package/model-provider/stability/StabilityImageGenerationModel.js +1 -1
- package/package.json +2 -2
- package/text-chunk/SimilarTextChunksFromVectorIndexRetriever.cjs +1 -1
- package/text-chunk/SimilarTextChunksFromVectorIndexRetriever.js +1 -1
- package/text-chunk/split/splitOnSeparator.cjs +7 -9
- package/text-chunk/split/splitOnSeparator.d.ts +5 -6
- package/text-chunk/split/splitOnSeparator.js +6 -7
- package/text-chunk/split/splitRecursively.cjs +16 -7
- package/text-chunk/split/splitRecursively.d.ts +13 -4
- package/text-chunk/split/splitRecursively.js +13 -4
- package/text-chunk/split/splitTextChunks.cjs +10 -8
- package/text-chunk/split/splitTextChunks.d.ts +1 -0
- package/text-chunk/split/splitTextChunks.js +8 -7
- package/text-chunk/upsertTextChunks.cjs +1 -1
- package/text-chunk/upsertTextChunks.js +1 -1
package/README.md
CHANGED
@@ -10,9 +10,8 @@
|
|
10
10
|
|
11
11
|
[Introduction](#introduction) | [Quick Install](#quick-install) | [Usage](#usage-examples) | [Features](#features) | [Integrations](#integrations) | [Documentation](#documentation) | [Examples](#more-examples) | [modelfusion.dev](https://modelfusion.dev)
|
12
12
|
|
13
|
-
|
14
|
-
|
15
|
-
ModelFusion is in its initial development phase. Until version 1.0 there may be breaking changes.
|
13
|
+
> [!NOTE]
|
14
|
+
> ModelFusion is in its initial development phase. Until version 1.0 there may be breaking changes, because we are still exploring the API design. We welcome your feedback and suggestions.
|
16
15
|
|
17
16
|
## Introduction
|
18
17
|
|
@@ -49,7 +48,7 @@ You can use [prompt mappings](https://modelfusion.dev/guide/function/generate-te
|
|
49
48
|
#### generateText
|
50
49
|
|
51
50
|
```ts
|
52
|
-
const
|
51
|
+
const text = await generateText(
|
53
52
|
new OpenAITextGenerationModel({ model: "text-davinci-003" }),
|
54
53
|
"Write a short story about a robot learning to love:\n\n"
|
55
54
|
);
|
@@ -58,7 +57,7 @@ const { text } = await generateText(
|
|
58
57
|
#### streamText
|
59
58
|
|
60
59
|
```ts
|
61
|
-
const
|
60
|
+
const textStream = await streamText(
|
62
61
|
new OpenAIChatModel({ model: "gpt-3.5-turbo", maxTokens: 1000 }),
|
63
62
|
[
|
64
63
|
OpenAIChatMessage.system("You are a story writer."),
|
@@ -76,7 +75,7 @@ for await (const textFragment of textStream) {
|
|
76
75
|
[Prompt mapping](https://modelfusion.dev/guide/function/generate-text/prompt-mapping) lets you use higher level prompt structures (such as instruction or chat prompts) for different models.
|
77
76
|
|
78
77
|
```ts
|
79
|
-
const
|
78
|
+
const text = await generateText(
|
80
79
|
new LlamaCppTextGenerationModel({
|
81
80
|
contextWindowSize: 4096, // Llama 2 context window size
|
82
81
|
nPredict: 1000,
|
@@ -89,7 +88,7 @@ const { text } = await generateText(
|
|
89
88
|
```
|
90
89
|
|
91
90
|
```ts
|
92
|
-
const
|
91
|
+
const textStream = await streamText(
|
93
92
|
new OpenAIChatModel({
|
94
93
|
model: "gpt-3.5-turbo",
|
95
94
|
}).mapPrompt(ChatToOpenAIChatPromptMapping()),
|
@@ -104,14 +103,15 @@ const { textStream } = await streamText(
|
|
104
103
|
|
105
104
|
#### Metadata and original responses
|
106
105
|
|
107
|
-
|
106
|
+
ModelFusion model functions return rich results that include the original response and metadata when you set the `fullResponse` option to `true`.
|
108
107
|
|
109
108
|
```ts
|
110
109
|
const { text, response, metadata } = await generateText(
|
111
110
|
new OpenAITextGenerationModel({
|
112
111
|
model: "text-davinci-003",
|
113
112
|
}),
|
114
|
-
"Write a short story about a robot learning to love:\n\n"
|
113
|
+
"Write a short story about a robot learning to love:\n\n",
|
114
|
+
{ fullResponse: true }
|
115
115
|
);
|
116
116
|
```
|
117
117
|
|
@@ -120,7 +120,7 @@ const { text, response, metadata } = await generateText(
|
|
120
120
|
Generate JSON value that matches a schema.
|
121
121
|
|
122
122
|
```ts
|
123
|
-
const
|
123
|
+
const value = await generateJson(
|
124
124
|
new OpenAIChatModel({
|
125
125
|
model: "gpt-3.5-turbo",
|
126
126
|
temperature: 0,
|
@@ -249,7 +249,7 @@ const { tool, parameters, result, text } = await useToolOrGenerateText(
|
|
249
249
|
Turn audio (voice) into text.
|
250
250
|
|
251
251
|
```ts
|
252
|
-
const
|
252
|
+
const transcription = await transcribe(
|
253
253
|
new OpenAITranscriptionModel({ model: "whisper-1" }),
|
254
254
|
{
|
255
255
|
type: "mp3",
|
@@ -263,7 +263,7 @@ const { transcription } = await transcribe(
|
|
263
263
|
Generate a base64-encoded image from a prompt.
|
264
264
|
|
265
265
|
```ts
|
266
|
-
const
|
266
|
+
const image = await generateImage(
|
267
267
|
new OpenAIImageGenerationModel({ size: "512x512" }),
|
268
268
|
"the wicked witch of the west in the style of early 19th century painting"
|
269
269
|
);
|
@@ -274,7 +274,7 @@ const { image } = await generateImage(
|
|
274
274
|
Create embeddings for text. Embeddings are vectors that represent the meaning of the text.
|
275
275
|
|
276
276
|
```ts
|
277
|
-
const
|
277
|
+
const embeddings = await embedTexts(
|
278
278
|
new OpenAITextEmbeddingModel({ model: "text-embedding-ada-002" }),
|
279
279
|
[
|
280
280
|
"At first, Nox didn't know what to do with the pup.",
|
@@ -343,9 +343,9 @@ const { chunks } = await retrieveTextChunks(
|
|
343
343
|
- [Transcribe Audio](https://modelfusion.dev/guide/function/transcribe-audio)
|
344
344
|
- [Generate images](https://modelfusion.dev/guide/function/generate-image)
|
345
345
|
- Summarize text
|
346
|
-
- Split text
|
347
346
|
- [Tools](https://modelfusion.dev/guide/tools)
|
348
|
-
- [Text Chunks](https://modelfusion.dev/guide/text-
|
347
|
+
- [Text Chunks](https://modelfusion.dev/guide/text-chunk/)
|
348
|
+
- [Split Text](https://modelfusion.dev/guide/text-chunk/split)
|
349
349
|
- [Run abstraction](https://modelfusion.dev/guide/run/)
|
350
350
|
- [Abort signals](https://modelfusion.dev/guide/run/abort)
|
351
351
|
- [Cost calculation](https://modelfusion.dev/guide/run/cost-calculation)
|
@@ -399,6 +399,7 @@ Use higher level prompts that are mapped into model specific prompt formats.
|
|
399
399
|
- [Examples & Tutorials](https://modelfusion.dev/tutorial)
|
400
400
|
- [Integrations](https://modelfusion.dev/integration/model-provider)
|
401
401
|
- [API Reference](https://modelfusion.dev/api/modules)
|
402
|
+
- [Blog](https://modelfusion.dev/api/blog)
|
402
403
|
|
403
404
|
## More Examples
|
404
405
|
|
@@ -416,6 +417,12 @@ Examples for the individual functions and objects.
|
|
416
417
|
|
417
418
|
A web chat with an AI assistant, implemented as a Next.js app.
|
418
419
|
|
420
|
+
### [Chat with PDF](https://github.com/lgrammel/modelfusion/tree/main/examples/pdf-chat-terminal)
|
421
|
+
|
422
|
+
> _terminal app_, _PDF parsing_, _in memory vector indices_, _retrieval augmented generation_, _hypothetical document embedding_
|
423
|
+
|
424
|
+
Ask questions about a PDF document and get answers from the document.
|
425
|
+
|
419
426
|
### [Image generator (Next.js)](https://github.com/lgrammel/modelfusion/tree/main/examples/image-generator-next-js)
|
420
427
|
|
421
428
|
> _Next.js app_, _Stability AI image generation_
|
@@ -440,12 +447,6 @@ TypeScript implementation of the classic [BabyAGI](https://github.com/yoheinakaj
|
|
440
447
|
|
441
448
|
Small agent that solves middle school math problems. It uses a calculator tool to solve the problems.
|
442
449
|
|
443
|
-
### [Chat with PDF](https://github.com/lgrammel/modelfusion/tree/main/examples/pdf-chat-terminal)
|
444
|
-
|
445
|
-
> _terminal app_, _PDF parsing_, _in memory vector indices_, _retrieval augmented generation_, _hypothetical document embedding_
|
446
|
-
|
447
|
-
Ask questions about a PDF document and get answers from the document.
|
448
|
-
|
449
450
|
### [PDF to Tweet](https://github.com/lgrammel/modelfusion/tree/main/examples/pdf-to-tweet)
|
450
451
|
|
451
452
|
> _terminal app_, _PDF parsing_, _recursive information extraction_, _in memory vector index, \_style example retrieval_, _OpenAI GPT-4_, _cost calculation_
|
package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.cjs
CHANGED
@@ -13,12 +13,12 @@ async function summarizeRecursivelyWithTextGenerationAndTokenSplitting({ text, m
|
|
13
13
|
(model.maxCompletionTokens ?? model.contextWindowSize / 4), join, }, options) {
|
14
14
|
const emptyPromptTokens = await model.countPromptTokens(await prompt({ text: "" }));
|
15
15
|
return (0, summarizeRecursively_js_1.summarizeRecursively)({
|
16
|
-
split: (0, splitRecursively_js_1.
|
16
|
+
split: (0, splitRecursively_js_1.splitAtToken)({
|
17
17
|
tokenizer: model.tokenizer,
|
18
|
-
|
18
|
+
maxTokensPerChunk: tokenLimit - emptyPromptTokens,
|
19
19
|
}),
|
20
20
|
summarize: async (input) => {
|
21
|
-
const
|
21
|
+
const text = await (0, generateText_js_1.generateText)(model, await prompt(input), options);
|
22
22
|
return text;
|
23
23
|
},
|
24
24
|
join,
|
package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.js
CHANGED
@@ -1,5 +1,5 @@
|
|
1
1
|
import { generateText } from "../../model-function/generate-text/generateText.js";
|
2
|
-
import {
|
2
|
+
import { splitAtToken } from "../../text-chunk/split/splitRecursively.js";
|
3
3
|
import { summarizeRecursively } from "./summarizeRecursively.js";
|
4
4
|
/**
|
5
5
|
* Recursively summarizes a text using a text generation model, e.g. for summarization or text extraction.
|
@@ -10,12 +10,12 @@ export async function summarizeRecursivelyWithTextGenerationAndTokenSplitting({
|
|
10
10
|
(model.maxCompletionTokens ?? model.contextWindowSize / 4), join, }, options) {
|
11
11
|
const emptyPromptTokens = await model.countPromptTokens(await prompt({ text: "" }));
|
12
12
|
return summarizeRecursively({
|
13
|
-
split:
|
13
|
+
split: splitAtToken({
|
14
14
|
tokenizer: model.tokenizer,
|
15
|
-
|
15
|
+
maxTokensPerChunk: tokenLimit - emptyPromptTokens,
|
16
16
|
}),
|
17
17
|
summarize: async (input) => {
|
18
|
-
const
|
18
|
+
const text = await generateText(model, await prompt(input), options);
|
19
19
|
return text;
|
20
20
|
},
|
21
21
|
join,
|
@@ -19,7 +19,10 @@ async function useTool(model, tool, prompt, options) {
|
|
19
19
|
name: tool.name,
|
20
20
|
description: tool.description,
|
21
21
|
schema: tool.inputSchema,
|
22
|
-
}, () => prompt(tool),
|
22
|
+
}, () => prompt(tool), {
|
23
|
+
...(options ?? {}),
|
24
|
+
fullResponse: true,
|
25
|
+
});
|
23
26
|
return {
|
24
27
|
tool: tool.name,
|
25
28
|
parameters: value,
|
@@ -16,7 +16,10 @@ export async function useTool(model, tool, prompt, options) {
|
|
16
16
|
name: tool.name,
|
17
17
|
description: tool.description,
|
18
18
|
schema: tool.inputSchema,
|
19
|
-
}, () => prompt(tool),
|
19
|
+
}, () => prompt(tool), {
|
20
|
+
...(options ?? {}),
|
21
|
+
fullResponse: true,
|
22
|
+
});
|
20
23
|
return {
|
21
24
|
tool: tool.name,
|
22
25
|
parameters: value,
|
@@ -2,18 +2,6 @@
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.embedText = exports.embedTexts = void 0;
|
4
4
|
const executeCall_js_1 = require("../executeCall.cjs");
|
5
|
-
/**
|
6
|
-
* Generate embeddings for multiple texts.
|
7
|
-
*
|
8
|
-
* @example
|
9
|
-
* const { embeddings } = await embedTexts(
|
10
|
-
* new OpenAITextEmbeddingModel(...),
|
11
|
-
* [
|
12
|
-
* "At first, Nox didn't know what to do with the pup.",
|
13
|
-
* "He keenly observed and absorbed everything around him, from the birds in the sky to the trees in the forest.",
|
14
|
-
* ]
|
15
|
-
* );
|
16
|
-
*/
|
17
5
|
async function embedTexts(model, texts, options) {
|
18
6
|
const result = await (0, executeCall_js_1.executeCall)({
|
19
7
|
model,
|
@@ -65,26 +53,24 @@ async function embedTexts(model, texts, options) {
|
|
65
53
|
generatedEmbeddings: output,
|
66
54
|
}),
|
67
55
|
});
|
68
|
-
return
|
69
|
-
|
70
|
-
|
71
|
-
|
56
|
+
return options?.fullResponse === true
|
57
|
+
? {
|
58
|
+
embeddings: result.output,
|
59
|
+
metadata: result.metadata,
|
60
|
+
}
|
61
|
+
: result.output;
|
72
62
|
}
|
73
63
|
exports.embedTexts = embedTexts;
|
74
|
-
/**
|
75
|
-
* Generate an embedding for a single text.
|
76
|
-
*
|
77
|
-
* @example
|
78
|
-
* const { embedding } = await embedText(
|
79
|
-
* new OpenAITextEmbeddingModel(...),
|
80
|
-
* "At first, Nox didn't know what to do with the pup."
|
81
|
-
* );
|
82
|
-
*/
|
83
64
|
async function embedText(model, text, options) {
|
84
|
-
const result = await embedTexts(model, [text],
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
65
|
+
const result = await embedTexts(model, [text], {
|
66
|
+
...(options ?? {}),
|
67
|
+
fullResponse: true,
|
68
|
+
});
|
69
|
+
return options?.fullResponse === true
|
70
|
+
? {
|
71
|
+
embedding: result.embeddings[0],
|
72
|
+
metadata: result.metadata,
|
73
|
+
}
|
74
|
+
: result.embeddings[0];
|
89
75
|
}
|
90
76
|
exports.embedText = embedText;
|
@@ -6,7 +6,7 @@ import { TextEmbeddingModel, TextEmbeddingModelSettings } from "./TextEmbeddingM
|
|
6
6
|
* Generate embeddings for multiple texts.
|
7
7
|
*
|
8
8
|
* @example
|
9
|
-
* const
|
9
|
+
* const embeddings = await embedTexts(
|
10
10
|
* new OpenAITextEmbeddingModel(...),
|
11
11
|
* [
|
12
12
|
* "At first, Nox didn't know what to do with the pup.",
|
@@ -14,20 +14,30 @@ import { TextEmbeddingModel, TextEmbeddingModelSettings } from "./TextEmbeddingM
|
|
14
14
|
* ]
|
15
15
|
* );
|
16
16
|
*/
|
17
|
-
export declare function embedTexts<RESPONSE, SETTINGS extends TextEmbeddingModelSettings>(model: TextEmbeddingModel<RESPONSE, SETTINGS>, texts: string[], options
|
17
|
+
export declare function embedTexts<RESPONSE, SETTINGS extends TextEmbeddingModelSettings>(model: TextEmbeddingModel<RESPONSE, SETTINGS>, texts: string[], options: FunctionOptions<SETTINGS> & {
|
18
|
+
fullResponse: true;
|
19
|
+
}): Promise<{
|
18
20
|
embeddings: Array<Vector>;
|
19
21
|
metadata: CallMetadata<TextEmbeddingModel<RESPONSE, SETTINGS>>;
|
20
22
|
}>;
|
23
|
+
export declare function embedTexts<RESPONSE, SETTINGS extends TextEmbeddingModelSettings>(model: TextEmbeddingModel<RESPONSE, SETTINGS>, texts: string[], options?: FunctionOptions<SETTINGS> & {
|
24
|
+
fullResponse?: false;
|
25
|
+
}): Promise<Array<Vector>>;
|
21
26
|
/**
|
22
27
|
* Generate an embedding for a single text.
|
23
28
|
*
|
24
29
|
* @example
|
25
|
-
* const
|
30
|
+
* const embedding = await embedText(
|
26
31
|
* new OpenAITextEmbeddingModel(...),
|
27
32
|
* "At first, Nox didn't know what to do with the pup."
|
28
33
|
* );
|
29
34
|
*/
|
30
|
-
export declare function embedText<RESPONSE, SETTINGS extends TextEmbeddingModelSettings>(model: TextEmbeddingModel<RESPONSE, SETTINGS>, text: string, options
|
35
|
+
export declare function embedText<RESPONSE, SETTINGS extends TextEmbeddingModelSettings>(model: TextEmbeddingModel<RESPONSE, SETTINGS>, text: string, options: FunctionOptions<SETTINGS> & {
|
36
|
+
fullResponse: true;
|
37
|
+
}): Promise<{
|
31
38
|
embedding: Vector;
|
32
39
|
metadata: CallMetadata<TextEmbeddingModel<RESPONSE, SETTINGS>>;
|
33
40
|
}>;
|
41
|
+
export declare function embedText<RESPONSE, SETTINGS extends TextEmbeddingModelSettings>(model: TextEmbeddingModel<RESPONSE, SETTINGS>, text: string, options?: FunctionOptions<SETTINGS> & {
|
42
|
+
fullResponse?: false;
|
43
|
+
}): Promise<Vector>;
|
@@ -1,16 +1,4 @@
|
|
1
1
|
import { executeCall } from "../executeCall.js";
|
2
|
-
/**
|
3
|
-
* Generate embeddings for multiple texts.
|
4
|
-
*
|
5
|
-
* @example
|
6
|
-
* const { embeddings } = await embedTexts(
|
7
|
-
* new OpenAITextEmbeddingModel(...),
|
8
|
-
* [
|
9
|
-
* "At first, Nox didn't know what to do with the pup.",
|
10
|
-
* "He keenly observed and absorbed everything around him, from the birds in the sky to the trees in the forest.",
|
11
|
-
* ]
|
12
|
-
* );
|
13
|
-
*/
|
14
2
|
export async function embedTexts(model, texts, options) {
|
15
3
|
const result = await executeCall({
|
16
4
|
model,
|
@@ -62,24 +50,22 @@ export async function embedTexts(model, texts, options) {
|
|
62
50
|
generatedEmbeddings: output,
|
63
51
|
}),
|
64
52
|
});
|
65
|
-
return
|
66
|
-
|
67
|
-
|
68
|
-
|
53
|
+
return options?.fullResponse === true
|
54
|
+
? {
|
55
|
+
embeddings: result.output,
|
56
|
+
metadata: result.metadata,
|
57
|
+
}
|
58
|
+
: result.output;
|
69
59
|
}
|
70
|
-
/**
|
71
|
-
* Generate an embedding for a single text.
|
72
|
-
*
|
73
|
-
* @example
|
74
|
-
* const { embedding } = await embedText(
|
75
|
-
* new OpenAITextEmbeddingModel(...),
|
76
|
-
* "At first, Nox didn't know what to do with the pup."
|
77
|
-
* );
|
78
|
-
*/
|
79
60
|
export async function embedText(model, text, options) {
|
80
|
-
const result = await embedTexts(model, [text],
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
61
|
+
const result = await embedTexts(model, [text], {
|
62
|
+
...(options ?? {}),
|
63
|
+
fullResponse: true,
|
64
|
+
});
|
65
|
+
return options?.fullResponse === true
|
66
|
+
? {
|
67
|
+
embedding: result.embeddings[0],
|
68
|
+
metadata: result.metadata,
|
69
|
+
}
|
70
|
+
: result.embeddings[0];
|
85
71
|
}
|
@@ -2,21 +2,6 @@
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.generateImage = void 0;
|
4
4
|
const executeCall_js_1 = require("../executeCall.cjs");
|
5
|
-
/**
|
6
|
-
* Generates a base64-encoded image using a prompt.
|
7
|
-
* The prompt format depends on the model.
|
8
|
-
* For example, OpenAI image models expect a string prompt,
|
9
|
-
* and Stability AI models expect an array of text prompts with optional weights.
|
10
|
-
*
|
11
|
-
* @example
|
12
|
-
* const { image } = await generateImage(
|
13
|
-
* new StabilityImageGenerationModel(...),
|
14
|
-
* [
|
15
|
-
* { text: "the wicked witch of the west" },
|
16
|
-
* { text: "style of early 19th century painting", weight: 0.5 },
|
17
|
-
* ]
|
18
|
-
* );
|
19
|
-
*/
|
20
5
|
async function generateImage(model, prompt, options) {
|
21
6
|
const result = await (0, executeCall_js_1.executeCall)({
|
22
7
|
model,
|
@@ -54,10 +39,12 @@ async function generateImage(model, prompt, options) {
|
|
54
39
|
generatedImage: output,
|
55
40
|
}),
|
56
41
|
});
|
57
|
-
return
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
42
|
+
return options?.fullResponse === true
|
43
|
+
? {
|
44
|
+
image: result.output,
|
45
|
+
response: result.response,
|
46
|
+
metadata: result.metadata,
|
47
|
+
}
|
48
|
+
: result.output;
|
62
49
|
}
|
63
50
|
exports.generateImage = generateImage;
|
@@ -8,7 +8,7 @@ import { ImageGenerationModel, ImageGenerationModelSettings } from "./ImageGener
|
|
8
8
|
* and Stability AI models expect an array of text prompts with optional weights.
|
9
9
|
*
|
10
10
|
* @example
|
11
|
-
* const
|
11
|
+
* const image = await generateImage(
|
12
12
|
* new StabilityImageGenerationModel(...),
|
13
13
|
* [
|
14
14
|
* { text: "the wicked witch of the west" },
|
@@ -16,8 +16,13 @@ import { ImageGenerationModel, ImageGenerationModelSettings } from "./ImageGener
|
|
16
16
|
* ]
|
17
17
|
* );
|
18
18
|
*/
|
19
|
-
export declare function generateImage<PROMPT, RESPONSE, SETTINGS extends ImageGenerationModelSettings>(model: ImageGenerationModel<PROMPT, RESPONSE, SETTINGS>, prompt: PROMPT, options
|
19
|
+
export declare function generateImage<PROMPT, RESPONSE, SETTINGS extends ImageGenerationModelSettings>(model: ImageGenerationModel<PROMPT, RESPONSE, SETTINGS>, prompt: PROMPT, options: FunctionOptions<SETTINGS> & {
|
20
|
+
fullResponse: true;
|
21
|
+
}): Promise<{
|
20
22
|
image: string;
|
21
23
|
response: RESPONSE;
|
22
24
|
metadata: CallMetadata<ImageGenerationModel<PROMPT, RESPONSE, SETTINGS>>;
|
23
25
|
}>;
|
26
|
+
export declare function generateImage<PROMPT, RESPONSE, SETTINGS extends ImageGenerationModelSettings>(model: ImageGenerationModel<PROMPT, RESPONSE, SETTINGS>, prompt: PROMPT, options?: FunctionOptions<SETTINGS> & {
|
27
|
+
fullResponse?: false;
|
28
|
+
}): Promise<string>;
|
@@ -1,19 +1,4 @@
|
|
1
1
|
import { executeCall } from "../executeCall.js";
|
2
|
-
/**
|
3
|
-
* Generates a base64-encoded image using a prompt.
|
4
|
-
* The prompt format depends on the model.
|
5
|
-
* For example, OpenAI image models expect a string prompt,
|
6
|
-
* and Stability AI models expect an array of text prompts with optional weights.
|
7
|
-
*
|
8
|
-
* @example
|
9
|
-
* const { image } = await generateImage(
|
10
|
-
* new StabilityImageGenerationModel(...),
|
11
|
-
* [
|
12
|
-
* { text: "the wicked witch of the west" },
|
13
|
-
* { text: "style of early 19th century painting", weight: 0.5 },
|
14
|
-
* ]
|
15
|
-
* );
|
16
|
-
*/
|
17
2
|
export async function generateImage(model, prompt, options) {
|
18
3
|
const result = await executeCall({
|
19
4
|
model,
|
@@ -51,9 +36,11 @@ export async function generateImage(model, prompt, options) {
|
|
51
36
|
generatedImage: output,
|
52
37
|
}),
|
53
38
|
});
|
54
|
-
return
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
39
|
+
return options?.fullResponse === true
|
40
|
+
? {
|
41
|
+
image: result.output,
|
42
|
+
response: result.response,
|
43
|
+
metadata: result.metadata,
|
44
|
+
}
|
45
|
+
: result.output;
|
59
46
|
}
|
@@ -52,10 +52,12 @@ async function generateJson(model, schemaDefinition, prompt, options) {
|
|
52
52
|
generatedJson: output,
|
53
53
|
}),
|
54
54
|
});
|
55
|
-
return
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
55
|
+
return options?.fullResponse === true
|
56
|
+
? {
|
57
|
+
value: result.output,
|
58
|
+
response: result.response,
|
59
|
+
metadata: result.metadata,
|
60
|
+
}
|
61
|
+
: result.output;
|
60
62
|
}
|
61
63
|
exports.generateJson = generateJson;
|
@@ -2,8 +2,13 @@ import { FunctionOptions } from "../FunctionOptions.js";
|
|
2
2
|
import { CallMetadata } from "../executeCall.js";
|
3
3
|
import { GenerateJsonModel, GenerateJsonModelSettings, GenerateJsonPrompt } from "./GenerateJsonModel.js";
|
4
4
|
import { SchemaDefinition } from "./SchemaDefinition.js";
|
5
|
-
export declare function generateJson<STRUCTURE, PROMPT, RESPONSE, NAME extends string, SETTINGS extends GenerateJsonModelSettings>(model: GenerateJsonModel<PROMPT, RESPONSE, SETTINGS>, schemaDefinition: SchemaDefinition<NAME, STRUCTURE>, prompt: (schemaDefinition: SchemaDefinition<NAME, STRUCTURE>) => PROMPT & GenerateJsonPrompt<RESPONSE>, options
|
5
|
+
export declare function generateJson<STRUCTURE, PROMPT, RESPONSE, NAME extends string, SETTINGS extends GenerateJsonModelSettings>(model: GenerateJsonModel<PROMPT, RESPONSE, SETTINGS>, schemaDefinition: SchemaDefinition<NAME, STRUCTURE>, prompt: (schemaDefinition: SchemaDefinition<NAME, STRUCTURE>) => PROMPT & GenerateJsonPrompt<RESPONSE>, options: FunctionOptions<SETTINGS> & {
|
6
|
+
fullResponse: true;
|
7
|
+
}): Promise<{
|
6
8
|
value: STRUCTURE;
|
7
9
|
response: RESPONSE;
|
8
10
|
metadata: CallMetadata<GenerateJsonModel<PROMPT, RESPONSE, SETTINGS>>;
|
9
11
|
}>;
|
12
|
+
export declare function generateJson<STRUCTURE, PROMPT, RESPONSE, NAME extends string, SETTINGS extends GenerateJsonModelSettings>(model: GenerateJsonModel<PROMPT, RESPONSE, SETTINGS>, schemaDefinition: SchemaDefinition<NAME, STRUCTURE>, prompt: (schemaDefinition: SchemaDefinition<NAME, STRUCTURE>) => PROMPT & GenerateJsonPrompt<RESPONSE>, options?: FunctionOptions<SETTINGS> & {
|
13
|
+
fullResponse?: false;
|
14
|
+
}): Promise<STRUCTURE>;
|
@@ -49,9 +49,11 @@ export async function generateJson(model, schemaDefinition, prompt, options) {
|
|
49
49
|
generatedJson: output,
|
50
50
|
}),
|
51
51
|
});
|
52
|
-
return
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
52
|
+
return options?.fullResponse === true
|
53
|
+
? {
|
54
|
+
value: result.output,
|
55
|
+
response: result.response,
|
56
|
+
metadata: result.metadata,
|
57
|
+
}
|
58
|
+
: result.output;
|
57
59
|
}
|
@@ -65,10 +65,12 @@ async function generateJsonOrText(model, schemaDefinitions, prompt, options) {
|
|
65
65
|
generatedJson: output,
|
66
66
|
}),
|
67
67
|
});
|
68
|
-
return
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
68
|
+
return options?.fullResponse === true
|
69
|
+
? {
|
70
|
+
...result.output,
|
71
|
+
response: result.response,
|
72
|
+
metadata: result.metadata,
|
73
|
+
}
|
74
|
+
: result.output;
|
73
75
|
}
|
74
76
|
exports.generateJsonOrText = generateJsonOrText;
|
@@ -14,7 +14,9 @@ type ToSchemaUnion<T> = {
|
|
14
14
|
} : never;
|
15
15
|
}[keyof T];
|
16
16
|
type ToOutputValue<SCHEMAS extends SchemaDefinitionArray<SchemaDefinition<any, any>[]>> = ToSchemaUnion<ToSchemaDefinitionsMap<SCHEMAS>>;
|
17
|
-
export declare function generateJsonOrText<SCHEMAS extends SchemaDefinition<any, any>[], PROMPT, RESPONSE, SETTINGS extends GenerateJsonOrTextModelSettings>(model: GenerateJsonOrTextModel<PROMPT, RESPONSE, SETTINGS>, schemaDefinitions: SCHEMAS, prompt: (schemaDefinitions: SCHEMAS) => PROMPT & GenerateJsonOrTextPrompt<RESPONSE>, options
|
17
|
+
export declare function generateJsonOrText<SCHEMAS extends SchemaDefinition<any, any>[], PROMPT, RESPONSE, SETTINGS extends GenerateJsonOrTextModelSettings>(model: GenerateJsonOrTextModel<PROMPT, RESPONSE, SETTINGS>, schemaDefinitions: SCHEMAS, prompt: (schemaDefinitions: SCHEMAS) => PROMPT & GenerateJsonOrTextPrompt<RESPONSE>, options: FunctionOptions<SETTINGS> & {
|
18
|
+
fullResponse: true;
|
19
|
+
}): Promise<({
|
18
20
|
schema: null;
|
19
21
|
value: null;
|
20
22
|
text: string;
|
@@ -22,4 +24,11 @@ export declare function generateJsonOrText<SCHEMAS extends SchemaDefinition<any,
|
|
22
24
|
response: RESPONSE;
|
23
25
|
metadata: CallMetadata<GenerateJsonOrTextModel<PROMPT, RESPONSE, SETTINGS>>;
|
24
26
|
}>;
|
27
|
+
export declare function generateJsonOrText<SCHEMAS extends SchemaDefinition<any, any>[], PROMPT, RESPONSE, SETTINGS extends GenerateJsonOrTextModelSettings>(model: GenerateJsonOrTextModel<PROMPT, RESPONSE, SETTINGS>, schemaDefinitions: SCHEMAS, prompt: (schemaDefinitions: SCHEMAS) => PROMPT & GenerateJsonOrTextPrompt<RESPONSE>, options?: FunctionOptions<SETTINGS> & {
|
28
|
+
fullResponse?: false;
|
29
|
+
}): Promise<{
|
30
|
+
schema: null;
|
31
|
+
value: null;
|
32
|
+
text: string;
|
33
|
+
} | ToOutputValue<SCHEMAS>>;
|
25
34
|
export {};
|
@@ -62,9 +62,11 @@ export async function generateJsonOrText(model, schemaDefinitions, prompt, optio
|
|
62
62
|
generatedJson: output,
|
63
63
|
}),
|
64
64
|
});
|
65
|
-
return
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
65
|
+
return options?.fullResponse === true
|
66
|
+
? {
|
67
|
+
...result.output,
|
68
|
+
response: result.response,
|
69
|
+
metadata: result.metadata,
|
70
|
+
}
|
71
|
+
: result.output;
|
70
72
|
}
|
@@ -2,18 +2,6 @@
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.generateText = void 0;
|
4
4
|
const executeCall_js_1 = require("../executeCall.cjs");
|
5
|
-
/**
|
6
|
-
* Generates a text using a prompt.
|
7
|
-
* The prompt format depends on the model.
|
8
|
-
* For example, OpenAI text models expect a string prompt, and OpenAI chat models expect an array of chat messages.
|
9
|
-
*
|
10
|
-
* @example
|
11
|
-
* const model = new OpenAITextGenerationModel(...);
|
12
|
-
*
|
13
|
-
* const { text } = await model.generateText(
|
14
|
-
* "Write a short story about a robot learning to love:\n\n"
|
15
|
-
* );
|
16
|
-
*/
|
17
5
|
async function generateText(
|
18
6
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
19
7
|
model, prompt, options) {
|
@@ -58,10 +46,12 @@ model, prompt, options) {
|
|
58
46
|
generatedText: output,
|
59
47
|
}),
|
60
48
|
});
|
61
|
-
return
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
49
|
+
return options?.fullResponse === true
|
50
|
+
? {
|
51
|
+
text: result.output,
|
52
|
+
response: result.response,
|
53
|
+
metadata: result.metadata,
|
54
|
+
}
|
55
|
+
: result.output;
|
66
56
|
}
|
67
57
|
exports.generateText = generateText;
|
@@ -9,12 +9,17 @@ import { TextGenerationModel, TextGenerationModelSettings } from "./TextGenerati
|
|
9
9
|
* @example
|
10
10
|
* const model = new OpenAITextGenerationModel(...);
|
11
11
|
*
|
12
|
-
* const
|
12
|
+
* const text = await model.generateText(
|
13
13
|
* "Write a short story about a robot learning to love:\n\n"
|
14
14
|
* );
|
15
15
|
*/
|
16
|
-
export declare function generateText<PROMPT, RESPONSE, SETTINGS extends TextGenerationModelSettings>(model: TextGenerationModel<PROMPT, RESPONSE, any, SETTINGS>, prompt: PROMPT, options
|
16
|
+
export declare function generateText<PROMPT, RESPONSE, SETTINGS extends TextGenerationModelSettings>(model: TextGenerationModel<PROMPT, RESPONSE, any, SETTINGS>, prompt: PROMPT, options: FunctionOptions<SETTINGS> & {
|
17
|
+
fullResponse: true;
|
18
|
+
}): Promise<{
|
17
19
|
text: string;
|
18
20
|
response: RESPONSE;
|
19
21
|
metadata: CallMetadata<TextGenerationModel<PROMPT, RESPONSE, unknown, SETTINGS>>;
|
20
22
|
}>;
|
23
|
+
export declare function generateText<PROMPT, RESPONSE, SETTINGS extends TextGenerationModelSettings>(model: TextGenerationModel<PROMPT, RESPONSE, any, SETTINGS>, prompt: PROMPT, options?: FunctionOptions<SETTINGS> & {
|
24
|
+
fullResponse?: false;
|
25
|
+
}): Promise<string>;
|